source
stringlengths
3
92
c
stringlengths
26
2.25M
adjoint_structural_static_scheme.h
// KRATOS ___| | | | // \___ \ __| __| | | __| __| | | __| _` | | // | | | | | ( | | | | ( | | // _____/ \__|_| \__,_|\___|\__|\__,_|_| \__,_|_| MECHANICS // // License: BSD License // license: structural_mechanics_application/license.txt // // Main authors: Martin Fusseder, https://github.com/MFusseder // #if !defined(KRATOS_ADJOINT_STRUCTURAL_STATIC_SCHEME) #define KRATOS_ADJOINT_STRUCTURAL_STATIC_SCHEME // System includes #include <vector> #include <string> // External includes // Project includes #include "includes/define.h" #include "includes/model_part.h" #include "includes/kratos_parameters.h" #include "utilities/openmp_utils.h" #include "solving_strategies/schemes/scheme.h" #include "containers/variable.h" #include "response_functions/adjoint_response_function.h" // Application includes namespace Kratos { ///@addtogroup StructuralMechanicsApplication ///@{ ///@name Kratos Classes ///@{ /// A scheme for for adjoint equations. /** * * */ template <class TSparseSpace, class TDenseSpace> class AdjointStructuralStaticScheme : public Scheme<TSparseSpace, TDenseSpace> { public: ///@name Type Definitions ///@{ KRATOS_CLASS_POINTER_DEFINITION(AdjointStructuralStaticScheme); typedef Scheme<TSparseSpace, TDenseSpace> BaseType; typedef typename BaseType::TSystemMatrixType SystemMatrixType; typedef typename BaseType::TSystemVectorType SystemVectorType; typedef typename BaseType::LocalSystemVectorType LocalSystemVectorType; typedef typename BaseType::LocalSystemMatrixType LocalSystemMatrixType; typedef typename BaseType::DofsArrayType DofsArrayType; ///@} ///@name Life Cycle ///@{ /// Constructor. AdjointStructuralStaticScheme(Parameters rParameters, AdjointResponseFunction::Pointer pResponseFunction) : Scheme<TSparseSpace, TDenseSpace>() { KRATOS_TRY; Parameters default_params(R"( { "scheme_type": "adjoint_structural", "rotation_dofs": false })"); rParameters.ValidateAndAssignDefaults(default_params); mpResponseFunction = pResponseFunction; mHasRotationDofs = rParameters["rotation_dofs"].GetBool(); KRATOS_CATCH(""); } /// Destructor. ~AdjointStructuralStaticScheme() override { } ///@} ///@name Operators ///@{ ///@} ///@name Operations ///@{ void Initialize(ModelPart& rModelPart) override { KRATOS_TRY; BaseType::Initialize(rModelPart); // Allocate auxiliary memory int num_threads = OpenMPUtils::GetNumThreads(); mAdjointValues.resize(num_threads); mpResponseFunction->Initialize(); KRATOS_CATCH(""); } void InitializeSolutionStep(ModelPart& rModelPart, SystemMatrixType& rA, SystemVectorType& rDx, SystemVectorType& rb) override { KRATOS_TRY; // Sensitivities are generally computed as a time integral. For steady // problems, we set the time step to -1.0 (minus because adjoint is // backward in time). rModelPart.GetProcessInfo()[DELTA_TIME] = -1.0; BaseType::InitializeSolutionStep(rModelPart, rA, rDx, rb); // initialize the variables to zero. #pragma omp parallel for for (int k = 0; k< static_cast<int> (rModelPart.Nodes().size()); ++k) { auto it_node = rModelPart.NodesBegin() + k; noalias(it_node->FastGetSolutionStepValue(ADJOINT_DISPLACEMENT)) = ADJOINT_DISPLACEMENT.Zero(); } if(mHasRotationDofs) { #pragma omp parallel for for (int k = 0; k< static_cast<int> (rModelPart.Nodes().size()); ++k) { auto it_node = rModelPart.NodesBegin() + k; noalias(it_node->FastGetSolutionStepValue(ADJOINT_ROTATION)) = ADJOINT_ROTATION.Zero(); } } mpResponseFunction->InitializeSolutionStep(); KRATOS_CATCH(""); } void FinalizeSolutionStep(ModelPart& rModelPart, SystemMatrixType& rA, SystemVectorType& rDx, SystemVectorType& rb) override { KRATOS_TRY; BaseType::FinalizeSolutionStep(rModelPart, rA, rDx, rb); mpResponseFunction->FinalizeSolutionStep(); KRATOS_CATCH(""); } /// Update adjoint. void Update(ModelPart& rModelPart, DofsArrayType& rDofSet, SystemMatrixType& rA, SystemVectorType& rDx, SystemVectorType& rb) override { KRATOS_TRY; Communicator& r_comm = rModelPart.GetCommunicator(); if (r_comm.TotalProcesses() == 1) { int ndofs = static_cast<int>(rDofSet.size()); #pragma omp parallel for for (int i = 0; i < ndofs; ++i) { typename DofsArrayType::iterator it = rDofSet.begin() + i; if (it->IsFree() == true) it->GetSolutionStepValue() += TSparseSpace::GetValue(rDx, it->EquationId()); } } else { int ndofs = static_cast<int>(rDofSet.size()); #pragma omp parallel for for (int i = 0; i < ndofs; ++i) { typename DofsArrayType::iterator it = rDofSet.begin() + i; if (it->GetSolutionStepValue(PARTITION_INDEX) == r_comm.MyPID()) if (it->IsFree() == true) it->GetSolutionStepValue() += TSparseSpace::GetValue(rDx, it->EquationId()); } // todo: add a function Communicator::SynchronizeDofVariables() to // reduce communication here. r_comm.SynchronizeNodalSolutionStepsData(); } KRATOS_CATCH(""); } int Check(ModelPart& rModelPart) override { KRATOS_TRY; // check domain dimension and element const unsigned int working_space_dimension = rModelPart.Elements().begin()->GetGeometry().WorkingSpaceDimension(); ProcessInfo& r_current_process_info = rModelPart.GetProcessInfo(); const unsigned int domain_size = static_cast<unsigned int>(r_current_process_info[DOMAIN_SIZE]); KRATOS_ERROR_IF(domain_size != 2 && domain_size != 3) << "Invalid DOMAIN_SIZE: " << domain_size << std::endl; KRATOS_ERROR_IF(domain_size != working_space_dimension) << "DOMAIN_SIZE != WorkingSpaceDimension()" << std::endl; for(auto& rnode : rModelPart.Nodes()) KRATOS_CHECK_VARIABLE_IN_NODAL_DATA(ADJOINT_DISPLACEMENT, rnode) if(mHasRotationDofs) { for(auto& rnode : rModelPart.Nodes()) KRATOS_CHECK_VARIABLE_IN_NODAL_DATA(ADJOINT_ROTATION, rnode) } return BaseType::Check(rModelPart); // check elements and conditions KRATOS_CATCH(""); } /// Calculate residual based element contributions to steady adjoint. void CalculateSystemContributions(Element::Pointer pCurrentElement, LocalSystemMatrixType& rLHS_Contribution, LocalSystemVectorType& rRHS_Contribution, Element::EquationIdVectorType& rEquationId, ProcessInfo& rCurrentProcessInfo) override { KRATOS_TRY; int thread_id = OpenMPUtils::ThisThread(); // Get element stiffness matrix pCurrentElement->CalculateLeftHandSide(rLHS_Contribution, rCurrentProcessInfo); if (rRHS_Contribution.size() != rLHS_Contribution.size1()) rRHS_Contribution.resize(rLHS_Contribution.size1(), false); // Calculate transposed gradient of response function on element w.r.t. primal solution mpResponseFunction->CalculateGradient( *pCurrentElement, rLHS_Contribution, rRHS_Contribution, rCurrentProcessInfo); noalias(rRHS_Contribution) = -rRHS_Contribution; // Calculate system contributions in residual form. pCurrentElement->GetValuesVector(mAdjointValues[thread_id]); noalias(rRHS_Contribution) -= prod(rLHS_Contribution, mAdjointValues[thread_id]); pCurrentElement->EquationIdVector(rEquationId, rCurrentProcessInfo); KRATOS_CATCH(""); } void Calculate_LHS_Contribution(Element::Pointer pCurrentElement, LocalSystemMatrixType& rLHS_Contribution, Element::EquationIdVectorType& rEquationId, ProcessInfo& rCurrentProcessInfo) override { KRATOS_TRY; LocalSystemVectorType RHS_contribution; RHS_contribution.resize(rLHS_Contribution.size1(), false); CalculateSystemContributions( pCurrentElement, rLHS_Contribution, RHS_contribution, rEquationId, rCurrentProcessInfo); KRATOS_CATCH(""); } void Condition_CalculateSystemContributions(Condition::Pointer pCurrentCondition, LocalSystemMatrixType& rLHS_Contribution, LocalSystemVectorType& rRHS_Contribution, Condition::EquationIdVectorType& rEquationId, ProcessInfo& rCurrentProcessInfo) override { KRATOS_TRY; int thread_id = OpenMPUtils::ThisThread(); // Calculate transposed gradient of condition residual w.r.t. primal solution. pCurrentCondition->CalculateLeftHandSide(rLHS_Contribution, rCurrentProcessInfo); if (rRHS_Contribution.size() != rLHS_Contribution.size1()) rRHS_Contribution.resize(rLHS_Contribution.size1(), false); // Calculate transposed gradient of response function on condition w.r.t. primal solution. mpResponseFunction->CalculateGradient( *pCurrentCondition, rLHS_Contribution, rRHS_Contribution, rCurrentProcessInfo); noalias(rRHS_Contribution) = -rRHS_Contribution; // Calculate system contributions in residual form. pCurrentCondition->GetValuesVector(mAdjointValues[thread_id]); noalias(rRHS_Contribution) -= prod(rLHS_Contribution, mAdjointValues[thread_id]); pCurrentCondition->EquationIdVector(rEquationId, rCurrentProcessInfo); KRATOS_CATCH(""); } void Condition_Calculate_LHS_Contribution(Condition::Pointer pCurrentCondition, LocalSystemMatrixType& rLHS_Contribution, Condition::EquationIdVectorType& rEquationId, ProcessInfo& rCurrentProcessInfo) override { KRATOS_TRY; LocalSystemVectorType RHS_contribution; RHS_contribution.resize(rLHS_Contribution.size1(), false); Condition_CalculateSystemContributions( pCurrentCondition, rLHS_Contribution, RHS_contribution, rEquationId, rCurrentProcessInfo); KRATOS_CATCH(""); } ///@} ///@name Access ///@{ ///@} ///@name Inquiry ///@{ ///@} ///@name Friends ///@{ ///@} protected: ///@name Protected static Member Variables ///@{ ///@} ///@name Protected member Variables ///@{ ///@} ///@name Protected Operators ///@{ ///@} ///@name Protected Operations ///@{ ///@} ///@name Protected Access ///@{ ///@} ///@name Protected Inquiry ///@{ ///@} ///@name Protected LifeCycle ///@{ ///@} private: ///@name Static Member Variables ///@{ ///@} ///@name Member Variables ///@{ AdjointResponseFunction::Pointer mpResponseFunction; std::vector<LocalSystemVectorType> mAdjointValues; bool mHasRotationDofs; ///@} ///@name Private Operators ///@{ ///@} ///@name Private Operations ///@{ ///@} ///@name Private Access ///@{ ///@} ///@name Private Inquiry ///@{ ///@} ///@name Un accessible methods ///@{ ///@} }; /* Class Scheme */ ///@} ///@name Type Definitions ///@{ ///@} ///@} // Structural Mechanics Application group } /* namespace Kratos.*/ #endif /* KRATOS_ADJOINT_STRUCTURAL_STATIC_SCHEME defined */
lock-nested-unrelated.c
/* * lock-nested-unrelated.c -- Archer testcase */ //===----------------------------------------------------------------------===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // // See tools/archer/LICENSE.txt for details. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // RUN: %libarcher-compile-and-run-race | FileCheck %s #include <omp.h> #include <stdio.h> int main(int argc, char *argv[]) { int var = 0; omp_nest_lock_t lock; omp_init_nest_lock(&lock); #pragma omp parallel num_threads(2) shared(var) { omp_set_nest_lock(&lock); omp_set_nest_lock(&lock); // Dummy locking. omp_unset_nest_lock(&lock); omp_unset_nest_lock(&lock); var++; } omp_destroy_nest_lock(&lock); fprintf(stderr, "DONE\n"); } // CHECK: WARNING: ThreadSanitizer: data race // CHECK-NEXT: {{(Write|Read)}} of size 4 // CHECK-NEXT: #0 {{.*}}lock-nested-unrelated.c:33 // CHECK: Previous write of size 4 // CHECK-NEXT: #0 {{.*}}lock-nested-unrelated.c:33 // CHECK: DONE // CHECK: ThreadSanitizer: reported 1 warnings
convolution_1x1_pack8_fp16s.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void conv1x1s1_sgemm_pack8_fp16sa_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt) { int w = bottom_blob.w; int h = bottom_blob.h; const int size = w * h; Mat bottom_im2col = bottom_blob; bottom_im2col.w = size; bottom_im2col.h = 1; im2col_sgemm_pack8_fp16sa_neon(bottom_im2col, top_blob, kernel, _bias, opt); } static void conv1x1s2_sgemm_pack8_fp16sa_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt) { int w = bottom_blob.w; int channels = bottom_blob.c; size_t elemsize = bottom_blob.elemsize; int elempack = bottom_blob.elempack; int outw = top_blob.w; int outh = top_blob.h; const int tailstep = (w - 2 * outw + w) * 8; Mat bottom_blob_shrinked; bottom_blob_shrinked.create(outw, outh, channels, elemsize, elempack, opt.workspace_allocator); #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < channels; p++) { const __fp16* r0 = bottom_blob.channel(p); __fp16* outptr = bottom_blob_shrinked.channel(p); for (int i = 0; i < outh; i++) { int j = 0; for (; j + 3 < outw; j += 4) { float16x8_t _v0 = vld1q_f16(r0); float16x8_t _v1 = vld1q_f16(r0 + 16); float16x8_t _v2 = vld1q_f16(r0 + 32); float16x8_t _v3 = vld1q_f16(r0 + 48); vst1q_f16(outptr, _v0); vst1q_f16(outptr + 8, _v1); vst1q_f16(outptr + 16, _v2); vst1q_f16(outptr + 24, _v3); r0 += 64; outptr += 32; } for (; j + 1 < outw; j += 2) { float16x8_t _v0 = vld1q_f16(r0); float16x8_t _v1 = vld1q_f16(r0 + 16); vst1q_f16(outptr, _v0); vst1q_f16(outptr + 8, _v1); r0 += 32; outptr += 16; } for (; j < outw; j++) { float16x8_t _v = vld1q_f16(r0); vst1q_f16(outptr, _v); r0 += 16; outptr += 8; } r0 += tailstep; } } conv1x1s1_sgemm_pack8_fp16sa_neon(bottom_blob_shrinked, top_blob, kernel, _bias, opt); }
udr-1.c
/* { dg-do run } */ extern #ifdef __cplusplus "C" #endif void abort (); struct S { int s; struct S *t; }; void foo (struct S *out, struct S *in) { out->s += in->s; } void bar (struct S *x) { if (x->s != 6) abort (); x->s = 15; } void baz (struct S *x, struct S *y) { x->s = 6; x->t = x; (void) y; } #pragma omp declare reduction (foo: struct S: foo (&omp_out, &omp_in)) \ initializer (omp_priv = { 8, &omp_priv }) #pragma omp declare reduction (foo: char, int, short: omp_out += omp_in - 4) \ initializer (omp_priv = 4) #pragma omp declare reduction (+: struct S: foo (&omp_out, &omp_in)) \ initializer (baz (&omp_priv, &omp_orig)) void test (struct S s, struct S t) { int q = 0; #pragma omp parallel num_threads (4) reduction (+: s, q) reduction (foo: t) { if (s.s != 6 || s.t != &s || t.s != 8 || t.t != &t) abort (); s.s = 2; t.s = 3; q = 1; } if (s.s != 12 + 2 * q || t.s != 14 + 3 * q) abort (); } int main () { struct S s, t; s.s = 9; t.s = 10; int h = 30, v = 2, q = 0; #pragma omp declare reduction (foo: struct S: omp_out.s *= omp_in.s) \ initializer (omp_priv = omp_orig) { #pragma omp declare reduction (foo: struct S: omp_out.s += omp_in.s) \ initializer (omp_priv = omp_orig) #pragma omp parallel num_threads (4) reduction (+: t, q) \ reduction (min: h) reduction (foo: s, v) { if (s.s != 9 || t.s != 6 || v != 4 || h != __INT_MAX__) abort (); asm volatile ("" : "+m" (s.s), "+m" (t.s)); asm volatile ("" : "+r" (h), "+r" (v)); h = t.s; s.s++; t.s++; v++; q++; } } if (h != 6 || s.s != 9 + q * 10 || t.s != 10 + q * 7 || v != 2 + q) abort (); s.s = 12; t.s = 14; test (s, t); return 0; }
mapper_utilities.h
// | / | // ' / __| _` | __| _ \ __| // . \ | ( | | ( |\__ ` // _|\_\_| \__,_|\__|\___/ ____/ // Multi-Physics // // License: BSD License // Kratos default license: kratos/license.txt // // Main authors: Philipp Bucher, Jordi Cotela // // See Master-Thesis P.Bucher // "Development and Implementation of a Parallel // Framework for Non-Matching Grid Mapping" #if !defined(KRATOS_MAPPER_UTILITIES_H_INCLUDED) #define KRATOS_MAPPER_UTILITIES_H_INCLUDED // System includes #include <array> #include <vector> // External includes // Project includes #include "includes/model_part.h" #include "utilities/parallel_utilities.h" #include "mapping_application_variables.h" #include "custom_utilities/mapper_flags.h" #include "custom_utilities/mapper_local_system.h" namespace Kratos { namespace MapperUtilities { typedef std::size_t SizeType; typedef std::size_t IndexType; typedef Node<3> NodeType; typedef Kratos::unique_ptr<MapperInterfaceInfo> MapperInterfaceInfoUniquePointerType; typedef Kratos::shared_ptr<MapperInterfaceInfo> MapperInterfaceInfoPointerType; typedef std::vector<std::vector<MapperInterfaceInfoPointerType>> MapperInterfaceInfoPointerVectorType; typedef Kratos::unique_ptr<MapperLocalSystem> MapperLocalSystemPointer; typedef std::vector<MapperLocalSystemPointer> MapperLocalSystemPointerVector; typedef Kratos::shared_ptr<MapperLocalSystemPointerVector> MapperLocalSystemPointerVectorPointer; using BoundingBoxType = std::array<double, 6>; // using BoundingBoxContainerType = std::vector<BoundingBoxType>; template< class TVarType > static void FillFunction(const NodeType& rNode, const TVarType& rVariable, double& rValue) { rValue = rNode.FastGetSolutionStepValue(rVariable); } template< class TVarType > static void FillFunctionNonHist(const NodeType& rNode, const TVarType& rVariable, double& rValue) { rValue = rNode.GetValue(rVariable); } template< class TVarType > static std::function<void(const NodeType&, const TVarType&, double&)> GetFillFunction(const Kratos::Flags& rMappingOptions) { if (rMappingOptions.Is(MapperFlags::FROM_NON_HISTORICAL)) return &FillFunctionNonHist<TVarType>; return &FillFunction<TVarType>; } template< class TVarType > static void UpdateFunction(NodeType& rNode, const TVarType& rVariable, const double Value, const double Factor) { rNode.FastGetSolutionStepValue(rVariable) = Value * Factor; } template< class TVarType > static void UpdateFunctionWithAdd(NodeType& rNode, const TVarType& rVariable, const double Value, const double Factor) { rNode.FastGetSolutionStepValue(rVariable) += Value * Factor; } template< class TVarType > static void UpdateFunctionNonHist(NodeType& rNode, const TVarType& rVariable, const double Value, const double Factor) { rNode.GetValue(rVariable) = Value * Factor; } template< class TVarType > static void UpdateFunctionNonHistWithAdd(NodeType& rNode, const TVarType& rVariable, const double Value, const double Factor) { rNode.GetValue(rVariable) += Value * Factor; } template< class TVarType > static std::function<void(NodeType&, const TVarType&, const double, const double)> GetUpdateFunction(const Kratos::Flags& rMappingOptions) { if (rMappingOptions.Is(MapperFlags::ADD_VALUES) && rMappingOptions.Is(MapperFlags::TO_NON_HISTORICAL)) return &UpdateFunctionNonHistWithAdd<TVarType>; if (rMappingOptions.Is(MapperFlags::ADD_VALUES)) return &UpdateFunctionWithAdd<TVarType>; if (rMappingOptions.Is(MapperFlags::TO_NON_HISTORICAL)) return &UpdateFunctionNonHist<TVarType>; return &UpdateFunction<TVarType>; } template< class TVectorType, class TVarType > void UpdateSystemVectorFromModelPart(TVectorType& rVector, ModelPart& rModelPart, const TVarType& rVariable, const Kratos::Flags& rMappingOptions) { // Here we construct a function pointer to not have the if all the time inside the loop const auto fill_fct = MapperUtilities::GetFillFunction<TVarType>(rMappingOptions); const int num_local_nodes = rModelPart.GetCommunicator().LocalMesh().NumberOfNodes(); const auto nodes_begin = rModelPart.GetCommunicator().LocalMesh().NodesBegin(); #pragma omp parallel for for (int i=0; i<num_local_nodes; i++) { fill_fct(*(nodes_begin + i), rVariable, rVector[i]); } } template< class TVectorType, class TVarType > void UpdateModelPartFromSystemVector(const TVectorType& rVector, ModelPart& rModelPart, const TVarType& rVariable, const Kratos::Flags& rMappingOptions) { const double factor = rMappingOptions.Is(MapperFlags::SWAP_SIGN) ? -1.0 : 1.0; // Here we construct a function pointer to not have the if all the time inside the loop const auto update_fct = std::bind(MapperUtilities::GetUpdateFunction<TVarType>(rMappingOptions), std::placeholders::_1, std::placeholders::_2, std::placeholders::_3, factor); const int num_local_nodes = rModelPart.GetCommunicator().LocalMesh().NumberOfNodes(); const auto nodes_begin = rModelPart.GetCommunicator().LocalMesh().NodesBegin(); #pragma omp parallel for for (int i=0; i<num_local_nodes; i++) { update_fct(*(nodes_begin + i), rVariable, rVector[i]); } } /** * @brief Assigning INTERFACE_EQUATION_IDs to the nodes, with and without MPI * This function assigns the INTERFACE_EQUATION_IDs to the nodes, which * act as EquationIds for the MappingMatrix. This work with and without MPI, * in MPI a ScanSum is performed with the local number of nodes * @param rModelPartCommunicator The Modelpart-Communicator to be used * @author Philipp Bucher */ void AssignInterfaceEquationIds(Communicator& rModelPartCommunicator); template<class TMapperLocalSystem> void CreateMapperLocalSystemsFromNodes(const Communicator& rModelPartCommunicator, std::vector<Kratos::unique_ptr<MapperLocalSystem>>& rLocalSystems) { const std::size_t num_nodes = rModelPartCommunicator.LocalMesh().NumberOfNodes(); const auto nodes_ptr_begin = rModelPartCommunicator.LocalMesh().Nodes().ptr_begin(); if (rLocalSystems.size() != num_nodes) { rLocalSystems.resize(num_nodes); } #pragma omp parallel for for (int i = 0; i< static_cast<int>(num_nodes); ++i) { auto it_node = nodes_ptr_begin + i; rLocalSystems[i] = Kratos::make_unique<TMapperLocalSystem>((*it_node).get()); } int num_local_systems = rModelPartCommunicator.GetDataCommunicator().SumAll((int)(rLocalSystems.size())); // int bcs of MPI KRATOS_ERROR_IF_NOT(num_local_systems > 0) << "No mapper local systems were created" << std::endl; } void CreateMapperLocalSystemsFromGeometries(const MapperLocalSystem& rMapperLocalSystemPrototype, const Communicator& rModelPartCommunicator, std::vector<Kratos::unique_ptr<MapperLocalSystem>>& rLocalSystems); template <class T1, class T2> inline double ComputeDistance(const T1& rCoords1, const T2& rCoords2) { return std::sqrt( std::pow(rCoords1[0] - rCoords2[0] , 2) + std::pow(rCoords1[1] - rCoords2[1] , 2) + std::pow(rCoords1[2] - rCoords2[2] , 2) ); } template <typename T> double ComputeMaxEdgeLengthLocal(const T& rEntityContainer); double ComputeSearchRadius(const ModelPart& rModelPart, int EchoLevel); double ComputeSearchRadius(const ModelPart& rModelPart1, const ModelPart& rModelPart2, const int EchoLevel); void CheckInterfaceModelParts(const int CommRank); BoundingBoxType ComputeLocalBoundingBox(const ModelPart& rModelPart); BoundingBoxType ComputeGlobalBoundingBox(const ModelPart& rModelPart); void ComputeBoundingBoxesWithTolerance(const std::vector<double>& rBoundingBoxes, const double Tolerance, std::vector<double>& rBoundingBoxesWithTolerance); std::string BoundingBoxStringStream(const BoundingBoxType& rBoundingBox); bool PointIsInsideBoundingBox(const BoundingBoxType& rBoundingBox, const array_1d<double, 3>& rCoords); void KRATOS_API(MAPPING_APPLICATION) SaveCurrentConfiguration(ModelPart& rModelPart); void KRATOS_API(MAPPING_APPLICATION) RestoreCurrentConfiguration(ModelPart& rModelPart); template<class TDataType> void EraseNodalVariable(ModelPart& rModelPart, const Variable<TDataType>& rVariable) { KRATOS_TRY; block_for_each(rModelPart.Nodes(), [&](Node<3>& rNode){ rNode.Data().Erase(rVariable); }); KRATOS_CATCH(""); } void FillBufferBeforeLocalSearch(const MapperLocalSystemPointerVector& rMapperLocalSystems, const std::vector<double>& rBoundingBoxes, const SizeType BufferSizeEstimate, std::vector<std::vector<double>>& rSendBuffer, std::vector<int>& rSendSizes); void CreateMapperInterfaceInfosFromBuffer(const std::vector<std::vector<double>>& rRecvBuffer, const MapperInterfaceInfoUniquePointerType& rpRefInterfaceInfo, const int CommRank, MapperInterfaceInfoPointerVectorType& rMapperInterfaceInfosContainer); void FillBufferAfterLocalSearch(MapperInterfaceInfoPointerVectorType& rMapperInterfaceInfosContainer, const MapperInterfaceInfoUniquePointerType& rpRefInterfaceInfo, const int CommRank, std::vector<std::vector<char>>& rSendBuffer, std::vector<int>& rSendSizes); void AssignInterfaceInfosAfterRemoteSearch(const MapperInterfaceInfoPointerVectorType& rMapperInterfaceInfosContainer, MapperLocalSystemPointerVectorPointer& rpMapperLocalSystems); void DeserializeMapperInterfaceInfosFromBuffer( const std::vector<std::vector<char>>& rSendBuffer, const MapperInterfaceInfoUniquePointerType& rpRefInterfaceInfo, const int CommRank, MapperInterfaceInfoPointerVectorType& rMapperInterfaceInfosContainer); /** * @class MapperInterfaceInfoSerializer * @ingroup MappingApplication * @brief Helper class to serialize/deserialize a vector containing MapperInterfaceInfos * @details This class serializes the vector containing the MapperInterfaceInfos (Shared Ptrs) * The goal of this class is to have a more efficient/faster implementation than the * one of the Serializer by avoiding the casting that is done in the serializer when pointers * are serialized * @TODO test the performance against the Serializer * @author Philipp Bucher */ class KRATOS_API(MAPPING_APPLICATION) MapperInterfaceInfoSerializer { public: MapperInterfaceInfoSerializer(std::vector<MapperInterfaceInfoPointerType>& rMapperInterfaceInfosContainer, const MapperInterfaceInfoUniquePointerType& rpRefInterfaceInfo) : mrInterfaceInfos(rMapperInterfaceInfosContainer) , mrpRefInterfaceInfo(rpRefInterfaceInfo->Create()) { } private: std::vector<MapperInterfaceInfoPointerType>& mrInterfaceInfos; MapperInterfaceInfoPointerType mrpRefInterfaceInfo; friend class Kratos::Serializer; // Adding "Kratos::" is nedded bcs of the "MapperUtilities"-namespace virtual void save(Kratos::Serializer& rSerializer) const; virtual void load(Kratos::Serializer& rSerializer); }; } // namespace MapperUtilities. } // namespace Kratos. #endif // KRATOS_MAPPER_UTILITIES_H_INCLUDED defined
constant_density_acoustic_time_scalar_3D_4.h
#ifndef __CDA_TIME_SCALAR_3D_4__ #define __CDA_TIME_SCALAR_3D_4__ #include <stdlib.h> template< typename T, int ACCURACY > void cda_time_scalar_3D_4( T* km1_u, int nr_km1_u, int nc_km1_u, // in - padded wavefield shape T* k_Phix, int nr_k_Phix, int nc_k_Phix, // in - padded wavefield shape T* k_Phiy, int nr_k_Phiy, int nc_k_Phiy, // in - padded wavefield shape T* k_Phiz, int nr_k_Phiz, int nc_k_Phiz, // in - padded wavefield shape T* k_psi, int nr_k_psi, int nc_k_psi, // in T* k_u, int nr_k_u, int nc_k_u, // in - padded wavefield shape T* C, int nr_C, int nc_C, // in - padded wavefield shape T* rhs, int nr_rhs, int nc_rhs, // in - padded wavefield shape T* xlpml, int n_xlpml, // in - length is the number of nodes inside the padding that the pml value is defined. T* xrpml, int n_xrpml, // in - length is the number of nodes inside the padding that the pml value is defined. T* ylpml, int n_ylpml, // in - length is the number of nodes inside the padding that the pml value is defined. T* yrpml, int n_yrpml, // in - length is the number of nodes inside the padding that the pml value is defined. T* zlpml, int n_zlpml, // in - length is the number of nodes inside the padding that the pml value is defined. T* zrpml, int n_zrpml, // in - length is the number of nodes inside the padding that the pml value is defined. double const& dt, // in double const& dx, // in double const& dy, // in double const& dz, // in int const& nx, // in int const& ny, // in int const& nz, // in T* kp1_Phix, int nr_kp1_Phix, int nc_kp1_Phix, // out T* kp1_Phiy, int nr_kp1_Phiy, int nc_kp1_Phiy, // out T* kp1_Phiz, int nr_kp1_Phiz, int nc_kp1_Phiz, // out T* kp1_psi, int nr_kp1_psi, int nc_kp1_psi, // out T* kp1_u, int nr_kp1_u, int nc_kp1_u ) // out { enum {MAX_FD_SHIFT = ACCURACY/2}; T lapU = 0.0; // PML variable T sigmax = 0.0; T sigmay = 0.0; T sigmaz = 0.0; // Time delta variables T dt2 = dt*dt; // Loop/index variables int idx; int xstride = nz*ny; int ystride = nz; int zstride = 1; int s = zstride; int i, k, j; // shared space step square variable T dx2 = dx*dx; T dz2 = dz*dz; T dy2 = dy*dy; // private variables //non derivatives T fac1; T fac2; //derivatives T dux , duz, duy; T dPhix, dPhiz, dPhiy; T dPsix, dPsiz, dPsiy; char* NUM = getenv("OMP_NUM_THREADS"); int Num_Th = atoi (NUM); #pragma omp parallel for private(sigmaz, sigmax, sigmay, i, k, j, idx, dux, duz, duy, dPhix, dPhiz, dPhiy, lapU, fac1, fac2, dPsix, dPsiy, dPsiz) shared(dx, dx2, dz, dz2, dy, dy2, xstride, zstride, ystride, kp1_Phix, kp1_Phiz, kp1_Phiy, k_Phix, k_Phiz, k_Phiy, n_zrpml, n_zlpml, n_yrpml, n_ylpml, n_xrpml, n_xlpml, xrpml, xlpml, zrpml, zlpml, yrpml, ylpml, s, rhs, C, dt, dt2, km1_u, k_u, kp1_u) num_threads(Num_Th) collapse(3) for(int i=0; i < nx; ++i) { for(int j=0; j < ny; ++j) { for(int k=0; k < nz; k++) { idx = i*xstride + j*ystride + k; kp1_u[idx] = 0.0; kp1_Phix[idx] = 0.0; kp1_Phiy[idx] = 0.0; kp1_Phiz[idx] = 0.0; kp1_psi[idx] = 0.0; // This handles homogeneous Dirichlet BCs and non-updating in ghost regions. if ((i == 0) || (i == nx-1)) continue; if ((j == 0) || (j == ny-1)) continue; if ((k == 0) || (k == nz-1)) continue; lapU = 0.0; // Do the X direction if (i==0) { //decentered derivative 2 ranks on the right dux = ((1./12.)*0.0+(-2./3.)*0.0+0.0+(2./3.)*k_u[idx+xstride]+(-1./12.)*k_u[idx+2*xstride])/dx; dPsix = ((1./12.)*0.0+(-2./3.)*0.0+0.0+(2./3.)*k_psi[idx+xstride]+(-1./12.)*k_psi[idx+2*xstride])/dx; dPhix = ((1./12.)*0.0+(-2./3.)*0.0+0.0+(2./3.)*k_Phix[idx+xstride]+(-1./12.)*k_Phix[idx+2*xstride])/ dx; lapU += ((-1./12.)*0.0+(4./3.)*0.0+(-5./2.)*k_u[idx]+(4./3.)*k_u[idx+xstride]+(-1./12.)*k_u[idx+2*xstride])/ dx2; } else if (i == 1) { //decentered derivative 1 rank on the right dux = ((1./12.)*0.0 +(-2./3.)*k_u[idx-xstride]+0.0+(2./3.)*k_u[idx+xstride]+(-1./12.)*k_u[idx+2*xstride])/ dx; dPsix = ((1./12.)*0.0 +(-2./3.)*k_psi[idx-xstride]+0.0+(2./3.)*k_psi[idx+xstride]+(-1./12.)*k_psi[idx+2*xstride])/ dx; dPhix = ((1./12.)*0.0+(-2./3.)*k_Phix[idx-xstride]+0.0+(2./3.)*k_Phix[idx+xstride]+(-1./12.)*k_Phix[idx+2*xstride])/ dx; lapU += ((-1./12.)*0.0+(4./3.)*k_u[idx-xstride]+(-5./2.)*k_u[idx]+(4./3.)*k_u[idx+xstride]+(-1./12.)*k_u[idx+2*xstride])/ dx2; // Right side } else if (i == nx-1) { //decentered derivative 2 ranks on the left dux = ((1./12.)*k_u[idx-2*xstride]+(-2./3.)*k_u[idx-xstride]+0.0+(2./3.)*0.0 +(-1./12.)*0.0)/ dx; dPsix = ((1./12.)*k_psi[idx-2*xstride]+(-2./3.)*k_psi[idx-xstride]+0.0+(2./3.)*0.0 +(-1./12.)*0.0)/ dx; dPhix = ((1./12.)*k_Phix[idx-2*xstride]+(-2./3.)*k_Phix[idx-xstride]+0.0+(2./3.)*0.0+(-1./12.)*0.0) / dx; lapU += ((-1./12.)*k_u[idx-2*xstride]+(4./3.)*k_u[idx-xstride]+(-5./2.)*k_u[idx]+(4./3.)*0.0+(-1./12.)*0.0)/ dx2; } else if (i == nx-2) { //decentered derivative 1 ranks on the left dux = ((1./12.)*k_u[idx-2*xstride]+(-2./3.)*k_u[idx-xstride]+0.0+(2./3.)*k_u[idx+xstride]+(-1./12.)*0.0)/ dx; dPsix = ((1./12.)*k_psi[idx-2*xstride]+(-2./3.)*k_psi[idx-xstride]+0.0+(2./3.)*k_psi[idx+xstride]+(-1./12.)*0.0)/ dx; dPhix = ((1./12.)*k_Phix[idx-2*xstride]+(-2./3.)*k_Phix[idx-xstride]+0.0+(2./3.)*k_Phix[idx+xstride]+(-1./12.)*0.0)/ dx; lapU += ((-1./12.)*k_u[idx-2*xstride]+(4./3.)*k_u[idx-xstride]+(-5./2.)*k_u[idx]+(4./3.)*k_u[idx+xstride]+(-1./12.)*0.0)/ dx2; } else { //classic centered derivative dux = ((1./12.)*k_u[idx-2*xstride]+(-2./3.)*k_u[idx-xstride]+0.0+(2./3.)*k_u[idx+xstride]+(-1./12.)*k_u[idx+2*xstride])/ dx; dPsix = ((1./12.)*k_psi[idx-2*xstride]+(-2./3.)*k_psi[idx-xstride]+0.0+(2./3.)*k_psi[idx+xstride]+(-1./12.)*k_psi[idx+2*xstride])/ dx; dPhix = ((1./12.)*k_Phix[idx-2*xstride]+(-2./3.)*k_Phix[idx-xstride]+0.0+(2./3.)*k_Phix[idx+xstride]+(-1./12.)*k_Phix[idx+2*xstride])/ dx; lapU += ((-1./12.)*k_u[idx-2*xstride]+(4./3.)*k_u[idx-xstride]+(-5./2.)*k_u[idx]+(4./3.)*k_u[idx+xstride]+(-1./12.)*k_u[idx+2*xstride])/ dx2; } // Do the Y direction if (j==0) { //decentered derivative 2 ranks on the right duy = ((1./12.)*0.0+(-2./3.)*0.0+0.0+(2./3.)*k_u[idx+ystride]+(-1./12.)*k_u[idx+2*ystride])/dy; dPsiy = ((1./12.)*0.0+(-2./3.)*0.0+0.0+(2./3.)*k_psi[idx+ystride]+(-1./12.)*k_psi[idx+2*ystride])/dy; dPhiy = ((1./12.)*0.0+(-2./3.)*0.0+0.0+(2./3.)*k_Phiy[idx+ystride]+(-1./12.)*k_Phiy[idx+2*ystride])/ dy; lapU += ((-1./12.)*0.0+(4./3.)*0.0+(-5./2.)*k_u[idx]+(4./3.)*k_u[idx+ystride]+(-1./12.)*k_u[idx+2*ystride])/ dy2; } else if (j == 1) { //decentered derivative 1 rank on the right duy = ((1./12.)*0.0 +(-2./3.)*k_u[idx-ystride]+0.0+(2./3.)*k_u[idx+ystride]+(-1./12.)*k_u[idx+2*ystride])/ dy; dPsiy = ((1./12.)*0.0 +(-2./3.)*k_psi[idx-ystride]+0.0+(2./3.)*k_psi[idx+ystride]+(-1./12.)*k_psi[idx+2*ystride])/ dy; dPhiy = ((1./12.)*0.0+(-2./3.)*k_Phiy[idx-ystride]+0.0+(2./3.)*k_Phiy[idx+ystride]+(-1./12.)*k_Phiy[idx+2*ystride])/ dy; lapU += ((-1./12.)*0.0+(4./3.)*k_u[idx-ystride]+(-5./2.)*k_u[idx]+(4./3.)*k_u[idx+ystride]+(-1./12.)*k_u[idx+2*ystride])/ dy2; // Right side } else if (j== nx-1) { //decentered derivative 2 ranks on the left duy = ((1./12.)*k_u[idx-2*ystride]+(-2./3.)*k_u[idx-ystride]+0.0+(2./3.)*0.0 +(-1./12.)*0.0)/ dy; dPsiy = ((1./12.)*k_psi[idx-2*ystride]+(-2./3.)*k_psi[idx-ystride]+0.0+(2./3.)*0.0 +(-1./12.)*0.0)/ dy; dPhiy = ((1./12.)*k_Phiy[idx-2*ystride]+(-2./3.)*k_Phiy[idx-ystride]+0.0+(2./3.)*0.0+(-1./12.)*0.0) / dy; lapU += ((-1./12.)*k_u[idx-2*ystride]+(4./3.)*k_u[idx-ystride]+(-5./2.)*k_u[idx]+(4./3.)*0.0+(-1./12.)*0.0)/ dy2; } else if (j == nx-2) { //decentered derivative 1 ranks on the left duy = ((1./12.)*k_u[idx-2*ystride]+(-2./3.)*k_u[idx-ystride]+0.0+(2./3.)*k_u[idx+ystride]+(-1./12.)*0.0)/ dy; dPsiy = ((1./12.)*k_psi[idx-2*ystride]+(-2./3.)*k_psi[idx-ystride]+0.0+(2./3.)*k_psi[idx+ystride]+(-1./12.)*0.0)/ dy; dPhiy = ((1./12.)*k_Phiy[idx-2*ystride]+(-2./3.)*k_Phiy[idx-ystride]+0.0+(2./3.)*k_Phiy[idx+ystride]+(-1./12.)*0.0)/ dy; lapU += ((-1./12.)*k_u[idx-2*ystride]+(4./3.)*k_u[idx-ystride]+(-5./2.)*k_u[idx]+(4./3.)*k_u[idx+ystride]+(-1./12.)*0.0)/ dy2; } else { //classic centered derivative duy = ((1./12.)*k_u[idx-2*ystride]+(-2./3.)*k_u[idx-ystride]+0.0+(2./3.)*k_u[idx+ystride]+(-1./12.)*k_u[idx+2*ystride])/ dy; dPsiy = ((1./12.)*k_psi[idx-2*ystride]+(-2./3.)*k_psi[idx-ystride]+0.0+(2./3.)*k_psi[idx+ystride]+(-1./12.)*k_psi[idx+2*ystride])/ dy; dPhiy = ((1./12.)*k_Phiy[idx-2*ystride]+(-2./3.)*k_Phiy[idx-ystride]+0.0+(2./3.)*k_Phiy[idx+ystride]+(-1./12.)*k_Phiy[idx+2*ystride])/ dy; lapU += ((-1./12.)*k_u[idx-2*ystride]+(4./3.)*k_u[idx-ystride]+(-5./2.)*k_u[idx]+(4./3.)*k_u[idx+ystride]+(-1./12.)*k_u[idx+2*ystride])/ dy2; } // Do the Z direction // Left side if (k==0) { //decentered derivative 2 ranks on the right duz = ((1./12.)*0.0+(-2./3.)*0.0+0.0+(2./3.)*k_u[idx+s]+(-1./12.)*k_u[idx+2*s])/dz; dPsiz = ((1./12.)*0.0+(-2./3.)*0.0+0.0+(2./3.)*k_psi[idx+s]+(-1./12.)*k_psi[idx+2*s])/dz; dPhiz = ((1./12.)*0.0+(-2./3.)*0.0+0.0+(2./3.)*k_Phiz[idx+s]+(-1./12.)*k_Phiz[idx+2*s])/ dz; lapU += ((-1./12.)*0.0+(4./3.)*0.0+(-5./2.)*k_u[idx]+(4./3.)*k_u[idx+s]+(-1./12.)*k_u[idx+2*s])/ dz2; } else if (k == 1) { //decentered derivative 1 rank on the right duz = ((1./12.)*0.0 +(-2./3.)*k_u[idx-s]+0.0+(2./3.)*k_u[idx+s]+(-1./12.)*k_u[idx+2*s])/ dz; dPsiz = ((1./12.)*0.0 +(-2./3.)*k_psi[idx-s]+0.0+(2./3.)*k_psi[idx+s]+(-1./12.)*k_psi[idx+2*s])/ dz; dPhiz = ((1./12.)*0.0+(-2./3.)*k_Phiz[idx-s]+0.0+(2./3.)*k_Phiz[idx+s]+(-1./12.)*k_Phiz[idx+2*s])/ dz; lapU += ((-1./12.)*0.0+(4./3.)*k_u[idx-s]+(-5./2.)*k_u[idx]+(4./3.)*k_u[idx+s]+(-1./12.)*k_u[idx+2*s])/ dz2; } else if (k == nz-1) { //decentered derivative 2 ranks on the left duz = ((1./12.)*k_u[idx-2*s]+(-2./3.)*k_u[idx-s]+0.0+(2./3.)*0.0 +(-1./12.)*0.0)/ dz; dPsiz = ((1./12.)*k_psi[idx-2*s]+(-2./3.)*k_psi[idx-s]+0.0+(2./3.)*0.0 +(-1./12.)*0.0)/ dz; dPhiz = ((1./12.)*k_Phiz[idx-2*s]+(-2./3.)*k_Phiz[idx-s]+0.0+(2./3.)*0.0+(-1./12.)*0.0) / dz; lapU += ((-1./12.)*k_u[idx-2*s]+(4./3.)*k_u[idx-s]+(-5./2.)*k_u[idx]+(4./3.)*0.0+(-1./12.)*0.0)/ dz2; } else if (k == nz-2) { //decentered derivative 1 ranks on the left duz = ((1./12.)*k_u[idx-2*s]+(-2./3.)*k_u[idx-s]+0.0+(2./3.)*k_u[idx+s]+(-1./12.)*0.0)/ dz; dPsiz = ((1./12.)*k_psi[idx-2*s]+(-2./3.)*k_psi[idx-s]+0.0+(2./3.)*k_psi[idx+s]+(-1./12.)*0.0)/ dz; dPhiz = ((1./12.)*k_Phiz[idx-2*s]+(-2./3.)*k_Phiz[idx-s]+0.0+(2./3.)*k_Phiz[idx+s]+(-1./12.)*0.0)/ dz; lapU += ((-1./12.)*k_u[idx-2*s]+(4./3.)*k_u[idx-s]+(-5./2.)*k_u[idx]+(4./3.)*k_u[idx+s]+(-1./12.)*0.0)/ dz2; } else { //classic centered derivative duz = ((1./12.)*k_u[idx-2*s]+(-2./3.)*k_u[idx-s]+0.0+(2./3.)*k_u[idx+s]+(-1./12.)*k_u[idx+2*s])/ dz; dPsiz = ((1./12.)*k_psi[idx-2*s]+(-2./3.)*k_psi[idx-s]+0.0+(2./3.)*k_psi[idx+s]+(-1./12.)*k_psi[idx+2*s])/ dz; dPhiz = ((1./12.)*k_Phiz[idx-2*s]+(-2./3.)*k_Phiz[idx-s]+0.0+(2./3.)*k_Phiz[idx+s]+(-1./12.)*k_Phiz[idx+2*s])/ dz; lapU += ((-1./12.)*k_u[idx-2*s]+(4./3.)*k_u[idx-s]+(-5./2.)*k_u[idx]+(4./3.)*k_u[idx+s]+(-1./12.)*k_u[idx+2*s])/ dz2; } sigmax = 0.0; sigmay = 0.0; sigmaz = 0.0; // Check if in left PML-X if((n_xlpml>0) && (i < n_xlpml)) { sigmax = xlpml[i]; } // Check if in right PML-X else if((n_xrpml>0) && (i >= nx-n_xrpml)) { sigmax = xrpml[n_xrpml-((nx-1)-i)]; } // Check if in left PML-Y if((n_ylpml>0) && (j < n_ylpml)) { sigmay = ylpml[j]; } // Check if in right PML-Y else if((n_yrpml>0) && (j >= ny-n_yrpml)) { sigmay = yrpml[n_yrpml-((ny-1)-j)]; } // Check if in left PML-Z if((n_zlpml>0) && (k < n_zlpml)) { sigmaz = zlpml[k]; } // Check if in right PML-Z else if((n_zrpml>0) && (k >= nz-n_zrpml)) { sigmaz = zrpml[n_zrpml-((nz-1)-k)]; // 0th element of the right pml array corresponds to n_zrpml'th node from the right boundary. } if((sigmaz != 0.0) || (sigmay != 0.0) || (sigmax != 0.0)) { kp1_Phix[idx] = k_Phix[idx] - dt*sigmax*k_Phix[idx] + dt*(sigmay+sigmaz-sigmax)*dux + dt*(sigmay*sigmaz*dPsix); kp1_Phiy[idx] = k_Phiy[idx] - dt*sigmay*k_Phiy[idx] + dt*(sigmaz+sigmax-sigmay)*duy + dt*(sigmaz*sigmax*dPsiy); kp1_Phiz[idx] = k_Phiz[idx] - dt*sigmaz*k_Phiz[idx] + dt*(sigmax+sigmay-sigmaz)*duz + dt*(sigmax*sigmay*dPsiz); kp1_psi[idx] = k_psi[idx] + dt * k_u[idx]; fac1 = (2.0*dt2 / (2.0 + dt*(sigmax+sigmay+sigmaz))); fac2 = (C[idx]*C[idx]) * (rhs[idx] + lapU + dPhix + dPhiy + dPhiz - (sigmax*sigmay*sigmaz)*k_psi[idx]) - (km1_u[idx]-2.0*k_u[idx])/dt2 + (sigmax+sigmay+sigmaz)*km1_u[idx]/(2.0*dt) - (sigmax*sigmay + sigmay*sigmaz + sigmaz*sigmax)*k_u[idx]; kp1_u[idx] = fac1 * fac2; } else { kp1_Phix[idx] = k_Phix[idx]; kp1_Phiy[idx] = k_Phiy[idx]; kp1_Phiz[idx] = k_Phiz[idx]; kp1_psi[idx] = k_psi[idx] + dt * k_u[idx]; kp1_u[idx] = dt2*(C[idx]*C[idx])*(rhs[idx]+lapU+dPhix+dPhiy+dPhiz) - (km1_u[idx]-2.0*k_u[idx]); } } } } }; #endif
Example_array_sections.2.c
/* * @@name: array_sections.2c * @@type: C * @@compilable: no * @@linkable: no * @@expect: failure * @@version: omp_4.0 */ void foo () { int A[30], *p; #pragma omp target data map( A[0:4] ) { p = &A[0]; /* invalid because p[3] and A[3] are the same * location on the host but the array section * specified via p[...] is not a subset of A[0:4] */ #pragma omp target map( p[3:20] ) { A[2] = 0; p[8] = 0; } } }
effect.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % EEEEE FFFFF FFFFF EEEEE CCCC TTTTT % % E F F E C T % % EEE FFF FFF EEE C T % % E F F E C T % % EEEEE F F EEEEE CCCC T % % % % % % MagickCore Image Effects Methods % % % % Software Design % % Cristy % % October 1996 % % % % % % Copyright 1999-2018 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "magick/studio.h" #include "magick/accelerate-private.h" #include "magick/blob.h" #include "magick/cache-view.h" #include "magick/color.h" #include "magick/color-private.h" #include "magick/colorspace.h" #include "magick/constitute.h" #include "magick/decorate.h" #include "magick/distort.h" #include "magick/draw.h" #include "magick/enhance.h" #include "magick/exception.h" #include "magick/exception-private.h" #include "magick/effect.h" #include "magick/fx.h" #include "magick/gem.h" #include "magick/geometry.h" #include "magick/image-private.h" #include "magick/list.h" #include "magick/log.h" #include "magick/matrix.h" #include "magick/memory_.h" #include "magick/memory-private.h" #include "magick/monitor.h" #include "magick/monitor-private.h" #include "magick/montage.h" #include "magick/morphology.h" #include "magick/morphology-private.h" #include "magick/opencl-private.h" #include "magick/paint.h" #include "magick/pixel-accessor.h" #include "magick/pixel-private.h" #include "magick/property.h" #include "magick/quantize.h" #include "magick/quantum.h" #include "magick/random_.h" #include "magick/random-private.h" #include "magick/resample.h" #include "magick/resample-private.h" #include "magick/resize.h" #include "magick/resource_.h" #include "magick/segment.h" #include "magick/shear.h" #include "magick/signature-private.h" #include "magick/statistic.h" #include "magick/string_.h" #include "magick/thread-private.h" #include "magick/transform.h" #include "magick/threshold.h" #ifdef MAGICKCORE_CLPERFMARKER #include "CLPerfMarker.h" #endif /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A d a p t i v e B l u r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AdaptiveBlurImage() adaptively blurs the image by blurring less % intensely near image edges and more intensely far from edges. We blur the % image with a Gaussian operator of the given radius and standard deviation % (sigma). For reasonable results, radius should be larger than sigma. Use a % radius of 0 and AdaptiveBlurImage() selects a suitable radius for you. % % The format of the AdaptiveBlurImage method is: % % Image *AdaptiveBlurImage(const Image *image,const double radius, % const double sigma,ExceptionInfo *exception) % Image *AdaptiveBlurImageChannel(const Image *image, % const ChannelType channel,double radius,const double sigma, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel type. % % o radius: the radius of the Gaussian, in pixels, not counting the center % pixel. % % o sigma: the standard deviation of the Laplacian, in pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *AdaptiveBlurImage(const Image *image,const double radius, const double sigma,ExceptionInfo *exception) { Image *blur_image; blur_image=AdaptiveBlurImageChannel(image,DefaultChannels,radius,sigma, exception); return(blur_image); } MagickExport Image *AdaptiveBlurImageChannel(const Image *image, const ChannelType channel,const double radius,const double sigma, ExceptionInfo *exception) { #define AdaptiveBlurImageTag "Convolve/Image" #define MagickSigma (fabs(sigma) < MagickEpsilon ? MagickEpsilon : sigma) CacheView *blur_view, *edge_view, *image_view; double **kernel, normalize; Image *blur_image, *edge_image, *gaussian_image; MagickBooleanType status; MagickOffsetType progress; MagickPixelPacket bias; register ssize_t i; size_t width; ssize_t j, k, u, v, y; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); blur_image=CloneImage(image,image->columns,image->rows,MagickTrue,exception); if (blur_image == (Image *) NULL) return((Image *) NULL); if (fabs(sigma) <= MagickEpsilon) return(blur_image); if (SetImageStorageClass(blur_image,DirectClass) == MagickFalse) { InheritException(exception,&blur_image->exception); blur_image=DestroyImage(blur_image); return((Image *) NULL); } /* Edge detect the image brighness channel, level, blur, and level again. */ edge_image=EdgeImage(image,radius,exception); if (edge_image == (Image *) NULL) { blur_image=DestroyImage(blur_image); return((Image *) NULL); } (void) AutoLevelImage(edge_image); gaussian_image=BlurImage(edge_image,radius,sigma,exception); if (gaussian_image != (Image *) NULL) { edge_image=DestroyImage(edge_image); edge_image=gaussian_image; } (void) AutoLevelImage(edge_image); /* Create a set of kernels from maximum (radius,sigma) to minimum. */ width=GetOptimalKernelWidth2D(radius,sigma); kernel=(double **) MagickAssumeAligned(AcquireAlignedMemory((size_t) width, sizeof(*kernel))); if (kernel == (double **) NULL) { edge_image=DestroyImage(edge_image); blur_image=DestroyImage(blur_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } (void) ResetMagickMemory(kernel,0,(size_t) width*sizeof(*kernel)); for (i=0; i < (ssize_t) width; i+=2) { kernel[i]=(double *) MagickAssumeAligned(AcquireAlignedMemory((size_t) (width-i),(width-i)*sizeof(**kernel))); if (kernel[i] == (double *) NULL) break; normalize=0.0; j=(ssize_t) (width-i-1)/2; k=0; for (v=(-j); v <= j; v++) { for (u=(-j); u <= j; u++) { kernel[i][k]=(double) (exp(-((double) u*u+v*v)/(2.0*MagickSigma* MagickSigma))/(2.0*MagickPI*MagickSigma*MagickSigma)); normalize+=kernel[i][k]; k++; } } kernel[i][(k-1)/2]+=(1.0-normalize); if (sigma < MagickEpsilon) kernel[i][(k-1)/2]=1.0; } if (i < (ssize_t) width) { for (i-=2; i >= 0; i-=2) kernel[i]=(double *) RelinquishAlignedMemory(kernel[i]); kernel=(double **) RelinquishAlignedMemory(kernel); edge_image=DestroyImage(edge_image); blur_image=DestroyImage(blur_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } /* Adaptively blur image. */ status=MagickTrue; progress=0; GetMagickPixelPacket(image,&bias); SetMagickPixelPacketBias(image,&bias); image_view=AcquireVirtualCacheView(image,exception); edge_view=AcquireVirtualCacheView(edge_image,exception); blur_view=AcquireAuthenticCacheView(blur_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_number_threads(image,blur_image,blur_image->rows,1) #endif for (y=0; y < (ssize_t) blur_image->rows; y++) { register const IndexPacket *magick_restrict indexes; register const PixelPacket *magick_restrict p, *magick_restrict r; register IndexPacket *magick_restrict blur_indexes; register PixelPacket *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; r=GetCacheViewVirtualPixels(edge_view,0,y,edge_image->columns,1,exception); q=QueueCacheViewAuthenticPixels(blur_view,0,y,blur_image->columns,1, exception); if ((r == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } blur_indexes=GetCacheViewAuthenticIndexQueue(blur_view); for (x=0; x < (ssize_t) blur_image->columns; x++) { double alpha, gamma; DoublePixelPacket pixel; register const double *magick_restrict k; register ssize_t i, u, v; gamma=0.0; i=(ssize_t) ceil((double) width*QuantumScale* GetPixelIntensity(edge_image,r)-0.5); if (i < 0) i=0; else if (i > (ssize_t) width) i=(ssize_t) width; if ((i & 0x01) != 0) i--; p=GetCacheViewVirtualPixels(image_view,x-((ssize_t) (width-i)/2L),y- (ssize_t) ((width-i)/2L),width-i,width-i,exception); if (p == (const PixelPacket *) NULL) break; indexes=GetCacheViewVirtualIndexQueue(image_view); pixel.red=bias.red; pixel.green=bias.green; pixel.blue=bias.blue; pixel.opacity=bias.opacity; pixel.index=bias.index; k=kernel[i]; for (v=0; v < (ssize_t) (width-i); v++) { for (u=0; u < (ssize_t) (width-i); u++) { alpha=1.0; if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse)) alpha=(MagickRealType) (QuantumScale*GetPixelAlpha(p)); if ((channel & RedChannel) != 0) pixel.red+=(*k)*alpha*GetPixelRed(p); if ((channel & GreenChannel) != 0) pixel.green+=(*k)*alpha*GetPixelGreen(p); if ((channel & BlueChannel) != 0) pixel.blue+=(*k)*alpha*GetPixelBlue(p); if ((channel & OpacityChannel) != 0) pixel.opacity+=(*k)*GetPixelOpacity(p); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) pixel.index+=(*k)*alpha*GetPixelIndex(indexes+x+(width-i)*v+u); gamma+=(*k)*alpha; k++; p++; } } gamma=PerceptibleReciprocal(gamma); if ((channel & RedChannel) != 0) SetPixelRed(q,ClampToQuantum(gamma*pixel.red)); if ((channel & GreenChannel) != 0) SetPixelGreen(q,ClampToQuantum(gamma*pixel.green)); if ((channel & BlueChannel) != 0) SetPixelBlue(q,ClampToQuantum(gamma*pixel.blue)); if ((channel & OpacityChannel) != 0) SetPixelOpacity(q,ClampToQuantum(pixel.opacity)); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) SetPixelIndex(blur_indexes+x,ClampToQuantum(gamma*pixel.index)); q++; r++; } if (SyncCacheViewAuthenticPixels(blur_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_AdaptiveBlurImageChannel) #endif proceed=SetImageProgress(image,AdaptiveBlurImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } blur_image->type=image->type; blur_view=DestroyCacheView(blur_view); edge_view=DestroyCacheView(edge_view); image_view=DestroyCacheView(image_view); edge_image=DestroyImage(edge_image); for (i=0; i < (ssize_t) width; i+=2) kernel[i]=(double *) RelinquishAlignedMemory(kernel[i]); kernel=(double **) RelinquishAlignedMemory(kernel); if (status == MagickFalse) blur_image=DestroyImage(blur_image); return(blur_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A d a p t i v e S h a r p e n I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AdaptiveSharpenImage() adaptively sharpens the image by sharpening more % intensely near image edges and less intensely far from edges. We sharpen the % image with a Gaussian operator of the given radius and standard deviation % (sigma). For reasonable results, radius should be larger than sigma. Use a % radius of 0 and AdaptiveSharpenImage() selects a suitable radius for you. % % The format of the AdaptiveSharpenImage method is: % % Image *AdaptiveSharpenImage(const Image *image,const double radius, % const double sigma,ExceptionInfo *exception) % Image *AdaptiveSharpenImageChannel(const Image *image, % const ChannelType channel,double radius,const double sigma, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel type. % % o radius: the radius of the Gaussian, in pixels, not counting the center % pixel. % % o sigma: the standard deviation of the Laplacian, in pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *AdaptiveSharpenImage(const Image *image,const double radius, const double sigma,ExceptionInfo *exception) { Image *sharp_image; sharp_image=AdaptiveSharpenImageChannel(image,DefaultChannels,radius,sigma, exception); return(sharp_image); } MagickExport Image *AdaptiveSharpenImageChannel(const Image *image, const ChannelType channel,const double radius,const double sigma, ExceptionInfo *exception) { #define AdaptiveSharpenImageTag "Convolve/Image" #define MagickSigma (fabs(sigma) < MagickEpsilon ? MagickEpsilon : sigma) CacheView *sharp_view, *edge_view, *image_view; double **kernel, normalize; Image *sharp_image, *edge_image, *gaussian_image; MagickBooleanType status; MagickOffsetType progress; MagickPixelPacket bias; register ssize_t i; size_t width; ssize_t j, k, u, v, y; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); sharp_image=CloneImage(image,0,0,MagickTrue,exception); if (sharp_image == (Image *) NULL) return((Image *) NULL); if (fabs(sigma) <= MagickEpsilon) return(sharp_image); if (SetImageStorageClass(sharp_image,DirectClass) == MagickFalse) { InheritException(exception,&sharp_image->exception); sharp_image=DestroyImage(sharp_image); return((Image *) NULL); } /* Edge detect the image brighness channel, level, sharp, and level again. */ edge_image=EdgeImage(image,radius,exception); if (edge_image == (Image *) NULL) { sharp_image=DestroyImage(sharp_image); return((Image *) NULL); } (void) AutoLevelImage(edge_image); gaussian_image=BlurImage(edge_image,radius,sigma,exception); if (gaussian_image != (Image *) NULL) { edge_image=DestroyImage(edge_image); edge_image=gaussian_image; } (void) AutoLevelImage(edge_image); /* Create a set of kernels from maximum (radius,sigma) to minimum. */ width=GetOptimalKernelWidth2D(radius,sigma); kernel=(double **) MagickAssumeAligned(AcquireAlignedMemory((size_t) width, sizeof(*kernel))); if (kernel == (double **) NULL) { edge_image=DestroyImage(edge_image); sharp_image=DestroyImage(sharp_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } (void) ResetMagickMemory(kernel,0,(size_t) width*sizeof(*kernel)); for (i=0; i < (ssize_t) width; i+=2) { kernel[i]=(double *) MagickAssumeAligned(AcquireAlignedMemory((size_t) (width-i),(width-i)*sizeof(**kernel))); if (kernel[i] == (double *) NULL) break; normalize=0.0; j=(ssize_t) (width-i-1)/2; k=0; for (v=(-j); v <= j; v++) { for (u=(-j); u <= j; u++) { kernel[i][k]=(double) (-exp(-((double) u*u+v*v)/(2.0*MagickSigma* MagickSigma))/(2.0*MagickPI*MagickSigma*MagickSigma)); normalize+=kernel[i][k]; k++; } } kernel[i][(k-1)/2]=(double) ((-2.0)*normalize); if (sigma < MagickEpsilon) kernel[i][(k-1)/2]=1.0; } if (i < (ssize_t) width) { for (i-=2; i >= 0; i-=2) kernel[i]=(double *) RelinquishAlignedMemory(kernel[i]); kernel=(double **) RelinquishAlignedMemory(kernel); edge_image=DestroyImage(edge_image); sharp_image=DestroyImage(sharp_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } /* Adaptively sharpen image. */ status=MagickTrue; progress=0; GetMagickPixelPacket(image,&bias); SetMagickPixelPacketBias(image,&bias); image_view=AcquireVirtualCacheView(image,exception); edge_view=AcquireVirtualCacheView(edge_image,exception); sharp_view=AcquireAuthenticCacheView(sharp_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_number_threads(image,sharp_image,sharp_image->rows,1) #endif for (y=0; y < (ssize_t) sharp_image->rows; y++) { register const IndexPacket *magick_restrict indexes; register const PixelPacket *magick_restrict p, *magick_restrict r; register IndexPacket *magick_restrict sharp_indexes; register PixelPacket *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; r=GetCacheViewVirtualPixels(edge_view,0,y,edge_image->columns,1,exception); q=QueueCacheViewAuthenticPixels(sharp_view,0,y,sharp_image->columns,1, exception); if ((r == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } sharp_indexes=GetCacheViewAuthenticIndexQueue(sharp_view); for (x=0; x < (ssize_t) sharp_image->columns; x++) { double alpha, gamma; DoublePixelPacket pixel; register const double *magick_restrict k; register ssize_t i, u, v; gamma=0.0; i=(ssize_t) ceil((double) width*(1.0-QuantumScale* GetPixelIntensity(edge_image,r))-0.5); if (i < 0) i=0; else if (i > (ssize_t) width) i=(ssize_t) width; if ((i & 0x01) != 0) i--; p=GetCacheViewVirtualPixels(image_view,x-((ssize_t) (width-i)/2L),y- (ssize_t) ((width-i)/2L),width-i,width-i,exception); if (p == (const PixelPacket *) NULL) break; indexes=GetCacheViewVirtualIndexQueue(image_view); k=kernel[i]; pixel.red=bias.red; pixel.green=bias.green; pixel.blue=bias.blue; pixel.opacity=bias.opacity; pixel.index=bias.index; for (v=0; v < (ssize_t) (width-i); v++) { for (u=0; u < (ssize_t) (width-i); u++) { alpha=1.0; if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse)) alpha=(MagickRealType) (QuantumScale*GetPixelAlpha(p)); if ((channel & RedChannel) != 0) pixel.red+=(*k)*alpha*GetPixelRed(p); if ((channel & GreenChannel) != 0) pixel.green+=(*k)*alpha*GetPixelGreen(p); if ((channel & BlueChannel) != 0) pixel.blue+=(*k)*alpha*GetPixelBlue(p); if ((channel & OpacityChannel) != 0) pixel.opacity+=(*k)*GetPixelOpacity(p); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) pixel.index+=(*k)*alpha*GetPixelIndex(indexes+x+(width-i)*v+u); gamma+=(*k)*alpha; k++; p++; } } gamma=PerceptibleReciprocal(gamma); if ((channel & RedChannel) != 0) SetPixelRed(q,ClampToQuantum(gamma*pixel.red)); if ((channel & GreenChannel) != 0) SetPixelGreen(q,ClampToQuantum(gamma*pixel.green)); if ((channel & BlueChannel) != 0) SetPixelBlue(q,ClampToQuantum(gamma*pixel.blue)); if ((channel & OpacityChannel) != 0) SetPixelOpacity(q,ClampToQuantum(pixel.opacity)); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) SetPixelIndex(sharp_indexes+x,ClampToQuantum(gamma*pixel.index)); q++; r++; } if (SyncCacheViewAuthenticPixels(sharp_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_AdaptiveSharpenImageChannel) #endif proceed=SetImageProgress(image,AdaptiveSharpenImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } sharp_image->type=image->type; sharp_view=DestroyCacheView(sharp_view); edge_view=DestroyCacheView(edge_view); image_view=DestroyCacheView(image_view); edge_image=DestroyImage(edge_image); for (i=0; i < (ssize_t) width; i+=2) kernel[i]=(double *) RelinquishAlignedMemory(kernel[i]); kernel=(double **) RelinquishAlignedMemory(kernel); if (status == MagickFalse) sharp_image=DestroyImage(sharp_image); return(sharp_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % B l u r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % BlurImage() blurs an image. We convolve the image with a Gaussian operator % of the given radius and standard deviation (sigma). For reasonable results, % the radius should be larger than sigma. Use a radius of 0 and BlurImage() % selects a suitable radius for you. % % The format of the BlurImage method is: % % Image *BlurImage(const Image *image,const double radius, % const double sigma,ExceptionInfo *exception) % Image *BlurImageChannel(const Image *image,const ChannelType channel, % const double radius,const double sigma,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel type. % % o radius: the radius of the Gaussian, in pixels, not counting the center % pixel. % % o sigma: the standard deviation of the Gaussian, in pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *BlurImage(const Image *image,const double radius, const double sigma,ExceptionInfo *exception) { Image *blur_image; blur_image=BlurImageChannel(image,DefaultChannels,radius,sigma,exception); return(blur_image); } MagickExport Image *BlurImageChannel(const Image *image, const ChannelType channel,const double radius,const double sigma, ExceptionInfo *exception) { char geometry[MaxTextExtent]; KernelInfo *kernel_info; Image *blur_image = NULL; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); #if defined(MAGICKCORE_OPENCL_SUPPORT) blur_image=AccelerateBlurImage(image,channel,radius,sigma,exception); if (blur_image != (Image *) NULL) return(blur_image); #endif (void) FormatLocaleString(geometry,MaxTextExtent, "blur:%.20gx%.20g;blur:%.20gx%.20g+90",radius,sigma,radius,sigma); kernel_info=AcquireKernelInfo(geometry); if (kernel_info == (KernelInfo *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); blur_image=MorphologyImageChannel(image,channel,ConvolveMorphology,1, kernel_info,exception); kernel_info=DestroyKernelInfo(kernel_info); return(blur_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o n v o l v e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ConvolveImage() applies a custom convolution kernel to the image. % % The format of the ConvolveImage method is: % % Image *ConvolveImage(const Image *image,const size_t order, % const double *kernel,ExceptionInfo *exception) % Image *ConvolveImageChannel(const Image *image,const ChannelType channel, % const size_t order,const double *kernel,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel type. % % o order: the number of columns and rows in the filter kernel. % % o kernel: An array of double representing the convolution kernel. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ConvolveImage(const Image *image,const size_t order, const double *kernel,ExceptionInfo *exception) { Image *convolve_image; #ifdef MAGICKCORE_CLPERFMARKER clBeginPerfMarkerAMD(__FUNCTION__,""); #endif convolve_image=ConvolveImageChannel(image,DefaultChannels,order,kernel, exception); #ifdef MAGICKCORE_CLPERFMARKER clEndPerfMarkerAMD(); #endif return(convolve_image); } MagickExport Image *ConvolveImageChannel(const Image *image, const ChannelType channel,const size_t order,const double *kernel, ExceptionInfo *exception) { Image *convolve_image; KernelInfo *kernel_info; register ssize_t i; kernel_info=AcquireKernelInfo((const char *) NULL); if (kernel_info == (KernelInfo *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); kernel_info->width=order; kernel_info->height=order; kernel_info->x=(ssize_t) (order-1)/2; kernel_info->y=(ssize_t) (order-1)/2; kernel_info->signature=MagickCoreSignature; kernel_info->values=(double *) MagickAssumeAligned(AcquireAlignedMemory( kernel_info->width,kernel_info->width*sizeof(*kernel_info->values))); if (kernel_info->values == (double *) NULL) { kernel_info=DestroyKernelInfo(kernel_info); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } for (i=0; i < (ssize_t) (order*order); i++) kernel_info->values[i]=kernel[i]; convolve_image=(Image *) NULL; #if defined(MAGICKCORE_OPENCL_SUPPORT) convolve_image=AccelerateConvolveImageChannel(image,channel,kernel_info, exception); #endif if (convolve_image == (Image *) NULL) convolve_image=MorphologyImageChannel(image,channel,ConvolveMorphology,1, kernel_info,exception); kernel_info=DestroyKernelInfo(kernel_info); return(convolve_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e s p e c k l e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DespeckleImage() reduces the speckle noise in an image while perserving the % edges of the original image. A speckle removing filter uses a complementary % hulling technique (raising pixels that are darker than their surrounding % neighbors, then complementarily lowering pixels that are brighter than their % surrounding neighbors) to reduce the speckle index of that image (reference % Crimmins speckle removal). % % The format of the DespeckleImage method is: % % Image *DespeckleImage(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ static void Hull(const Image *image,const ssize_t x_offset, const ssize_t y_offset,const size_t columns,const size_t rows, const int polarity,Quantum *magick_restrict f,Quantum *magick_restrict g) { register Quantum *p, *q, *r, *s; ssize_t y; assert(f != (Quantum *) NULL); assert(g != (Quantum *) NULL); p=f+(columns+2); q=g+(columns+2); r=p+(y_offset*(columns+2)+x_offset); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) \ magick_number_threads(image,image,rows,1) #endif for (y=0; y < (ssize_t) rows; y++) { register ssize_t i, x; SignedQuantum v; i=(2*y+1)+y*columns; if (polarity > 0) for (x=0; x < (ssize_t) columns; x++) { v=(SignedQuantum) p[i]; if ((SignedQuantum) r[i] >= (v+ScaleCharToQuantum(2))) v+=ScaleCharToQuantum(1); q[i]=(Quantum) v; i++; } else for (x=0; x < (ssize_t) columns; x++) { v=(SignedQuantum) p[i]; if ((SignedQuantum) r[i] <= (v-ScaleCharToQuantum(2))) v-=ScaleCharToQuantum(1); q[i]=(Quantum) v; i++; } } p=f+(columns+2); q=g+(columns+2); r=q+(y_offset*(columns+2)+x_offset); s=q-(y_offset*(columns+2)+x_offset); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) \ magick_number_threads(image,image,rows,1) #endif for (y=0; y < (ssize_t) rows; y++) { register ssize_t i, x; SignedQuantum v; i=(2*y+1)+y*columns; if (polarity > 0) for (x=0; x < (ssize_t) columns; x++) { v=(SignedQuantum) q[i]; if (((SignedQuantum) s[i] >= (v+ScaleCharToQuantum(2))) && ((SignedQuantum) r[i] > v)) v+=ScaleCharToQuantum(1); p[i]=(Quantum) v; i++; } else for (x=0; x < (ssize_t) columns; x++) { v=(SignedQuantum) q[i]; if (((SignedQuantum) s[i] <= (v-ScaleCharToQuantum(2))) && ((SignedQuantum) r[i] < v)) v-=ScaleCharToQuantum(1); p[i]=(Quantum) v; i++; } } } MagickExport Image *DespeckleImage(const Image *image,ExceptionInfo *exception) { #define DespeckleImageTag "Despeckle/Image" CacheView *despeckle_view, *image_view; Image *despeckle_image; MagickBooleanType status; MemoryInfo *buffer_info, *pixel_info; register ssize_t i; Quantum *magick_restrict buffer, *magick_restrict pixels; size_t length, number_channels; static const ssize_t X[4] = {0, 1, 1,-1}, Y[4] = {1, 0, 1, 1}; /* Allocate despeckled image. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); #if defined(MAGICKCORE_OPENCL_SUPPORT) despeckle_image=AccelerateDespeckleImage(image, exception); if (despeckle_image != (Image *) NULL) return(despeckle_image); #endif despeckle_image=CloneImage(image,image->columns,image->rows,MagickTrue, exception); if (despeckle_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(despeckle_image,DirectClass) == MagickFalse) { InheritException(exception,&despeckle_image->exception); despeckle_image=DestroyImage(despeckle_image); return((Image *) NULL); } /* Allocate image buffer. */ length=(size_t) ((image->columns+2)*(image->rows+2)); pixel_info=AcquireVirtualMemory(length,sizeof(*pixels)); buffer_info=AcquireVirtualMemory(length,sizeof(*buffer)); if ((pixel_info == (MemoryInfo *) NULL) || (buffer_info == (MemoryInfo *) NULL)) { if (buffer_info != (MemoryInfo *) NULL) buffer_info=RelinquishVirtualMemory(buffer_info); if (pixel_info != (MemoryInfo *) NULL) pixel_info=RelinquishVirtualMemory(pixel_info); despeckle_image=DestroyImage(despeckle_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } pixels=(Quantum *) GetVirtualMemoryBlob(pixel_info); buffer=(Quantum *) GetVirtualMemoryBlob(buffer_info); /* Reduce speckle in the image. */ status=MagickTrue; number_channels=(size_t) (image->colorspace == CMYKColorspace ? 5 : 4); image_view=AcquireVirtualCacheView(image,exception); despeckle_view=AcquireAuthenticCacheView(despeckle_image,exception); for (i=0; i < (ssize_t) number_channels; i++) { register ssize_t k, x; ssize_t j, y; if (status == MagickFalse) continue; if ((image->matte == MagickFalse) && (i == 3)) continue; (void) ResetMagickMemory(pixels,0,length*sizeof(*pixels)); j=(ssize_t) image->columns+2; for (y=0; y < (ssize_t) image->rows; y++) { register const IndexPacket *magick_restrict indexes; register const PixelPacket *magick_restrict p; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const PixelPacket *) NULL) break; indexes=GetCacheViewVirtualIndexQueue(image_view); j++; for (x=0; x < (ssize_t) image->columns; x++) { switch (i) { case 0: pixels[j]=GetPixelRed(p); break; case 1: pixels[j]=GetPixelGreen(p); break; case 2: pixels[j]=GetPixelBlue(p); break; case 3: pixels[j]=GetPixelOpacity(p); break; case 4: pixels[j]=GetPixelBlack(indexes+x); break; default: break; } p++; j++; } j++; } (void) ResetMagickMemory(buffer,0,length*sizeof(*buffer)); for (k=0; k < 4; k++) { Hull(image,X[k],Y[k],image->columns,image->rows,1,pixels,buffer); Hull(image,-X[k],-Y[k],image->columns,image->rows,1,pixels,buffer); Hull(image,-X[k],-Y[k],image->columns,image->rows,-1,pixels,buffer); Hull(image,X[k],Y[k],image->columns,image->rows,-1,pixels,buffer); } j=(ssize_t) image->columns+2; for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register IndexPacket *magick_restrict indexes; register PixelPacket *magick_restrict q; q=GetCacheViewAuthenticPixels(despeckle_view,0,y,despeckle_image->columns, 1,exception); if (q == (PixelPacket *) NULL) break; indexes=GetCacheViewAuthenticIndexQueue(despeckle_view); j++; for (x=0; x < (ssize_t) image->columns; x++) { switch (i) { case 0: SetPixelRed(q,pixels[j]); break; case 1: SetPixelGreen(q,pixels[j]); break; case 2: SetPixelBlue(q,pixels[j]); break; case 3: SetPixelOpacity(q,pixels[j]); break; case 4: SetPixelIndex(indexes+x,pixels[j]); break; default: break; } q++; j++; } sync=SyncCacheViewAuthenticPixels(despeckle_view,exception); if (sync == MagickFalse) { status=MagickFalse; break; } j++; } if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,DespeckleImageTag,(MagickOffsetType) i, number_channels); if (proceed == MagickFalse) status=MagickFalse; } } despeckle_view=DestroyCacheView(despeckle_view); image_view=DestroyCacheView(image_view); buffer_info=RelinquishVirtualMemory(buffer_info); pixel_info=RelinquishVirtualMemory(pixel_info); despeckle_image->type=image->type; if (status == MagickFalse) despeckle_image=DestroyImage(despeckle_image); return(despeckle_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % E d g e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % EdgeImage() finds edges in an image. Radius defines the radius of the % convolution filter. Use a radius of 0 and EdgeImage() selects a suitable % radius for you. % % The format of the EdgeImage method is: % % Image *EdgeImage(const Image *image,const double radius, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the pixel neighborhood. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *EdgeImage(const Image *image,const double radius, ExceptionInfo *exception) { Image *edge_image; KernelInfo *kernel_info; register ssize_t i; size_t width; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); width=GetOptimalKernelWidth1D(radius,0.5); kernel_info=AcquireKernelInfo((const char *) NULL); if (kernel_info == (KernelInfo *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); (void) ResetMagickMemory(kernel_info,0,sizeof(*kernel_info)); kernel_info->width=width; kernel_info->height=width; kernel_info->x=(ssize_t) (kernel_info->width-1)/2; kernel_info->y=(ssize_t) (kernel_info->height-1)/2; kernel_info->signature=MagickCoreSignature; kernel_info->values=(double *) MagickAssumeAligned(AcquireAlignedMemory( kernel_info->width,kernel_info->height*sizeof(*kernel_info->values))); if (kernel_info->values == (double *) NULL) { kernel_info=DestroyKernelInfo(kernel_info); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } for (i=0; i < (ssize_t) (kernel_info->width*kernel_info->height); i++) kernel_info->values[i]=(-1.0); kernel_info->values[i/2]=(double) kernel_info->width*kernel_info->height-1.0; edge_image=(Image *) NULL; #if defined(MAGICKCORE_OPENCL_SUPPORT) edge_image=AccelerateConvolveImageChannel(image,DefaultChannels,kernel_info, exception); #endif if (edge_image == (Image *) NULL) edge_image=MorphologyImageChannel(image,DefaultChannels,ConvolveMorphology, 1,kernel_info,exception); kernel_info=DestroyKernelInfo(kernel_info); return(edge_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % E m b o s s I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % EmbossImage() returns a grayscale image with a three-dimensional effect. % We convolve the image with a Gaussian operator of the given radius and % standard deviation (sigma). For reasonable results, radius should be % larger than sigma. Use a radius of 0 and Emboss() selects a suitable % radius for you. % % The format of the EmbossImage method is: % % Image *EmbossImage(const Image *image,const double radius, % const double sigma,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the pixel neighborhood. % % o sigma: the standard deviation of the Gaussian, in pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *EmbossImage(const Image *image,const double radius, const double sigma,ExceptionInfo *exception) { double gamma, normalize; Image *emboss_image; KernelInfo *kernel_info; register ssize_t i; size_t width; ssize_t j, k, u, v; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); width=GetOptimalKernelWidth1D(radius,sigma); kernel_info=AcquireKernelInfo((const char *) NULL); if (kernel_info == (KernelInfo *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); kernel_info->width=width; kernel_info->height=width; kernel_info->x=(ssize_t) (width-1)/2; kernel_info->y=(ssize_t) (width-1)/2; kernel_info->values=(double *) MagickAssumeAligned(AcquireAlignedMemory( kernel_info->width,kernel_info->width*sizeof(*kernel_info->values))); if (kernel_info->values == (double *) NULL) { kernel_info=DestroyKernelInfo(kernel_info); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } j=(ssize_t) (kernel_info->width-1)/2; k=j; i=0; for (v=(-j); v <= j; v++) { for (u=(-j); u <= j; u++) { kernel_info->values[i]=(double) (((u < 0) || (v < 0) ? -8.0 : 8.0)*exp(-((double) u*u+v*v)/(2.0*MagickSigma*MagickSigma))/ (2.0*MagickPI*MagickSigma*MagickSigma)); if (u != k) kernel_info->values[i]=0.0; i++; } k--; } normalize=0.0; for (i=0; i < (ssize_t) (kernel_info->width*kernel_info->height); i++) normalize+=kernel_info->values[i]; gamma=PerceptibleReciprocal(normalize); for (i=0; i < (ssize_t) (kernel_info->width*kernel_info->height); i++) kernel_info->values[i]*=gamma; emboss_image=(Image *) NULL; #if defined(MAGICKCORE_OPENCL_SUPPORT) emboss_image=AccelerateConvolveImageChannel(image,DefaultChannels,kernel_info, exception); #endif if (emboss_image == (Image *) NULL) emboss_image=MorphologyImageChannel(image,DefaultChannels, ConvolveMorphology,1,kernel_info,exception); kernel_info=DestroyKernelInfo(kernel_info); if (emboss_image != (Image *) NULL) (void) EqualizeImageChannel(emboss_image,(ChannelType) (AllChannels &~ SyncChannels)); return(emboss_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % F i l t e r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % FilterImage() applies a custom convolution kernel to the image. % % The format of the FilterImage method is: % % Image *FilterImage(const Image *image,const KernelInfo *kernel, % ExceptionInfo *exception) % Image *FilterImageChannel(const Image *image,const ChannelType channel, % const KernelInfo *kernel,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel type. % % o kernel: the filtering kernel. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *FilterImage(const Image *image,const KernelInfo *kernel, ExceptionInfo *exception) { Image *filter_image; filter_image=FilterImageChannel(image,DefaultChannels,kernel,exception); return(filter_image); } MagickExport Image *FilterImageChannel(const Image *image, const ChannelType channel,const KernelInfo *kernel,ExceptionInfo *exception) { #define FilterImageTag "Filter/Image" CacheView *filter_view, *image_view; Image *filter_image; MagickBooleanType status; MagickOffsetType progress; MagickPixelPacket bias; MagickRealType *filter_kernel; register ssize_t i; ssize_t y; #ifdef MAGICKCORE_CLPERFMARKER clBeginPerfMarkerAMD(__FUNCTION__,""); #endif /* Initialize filter image attributes. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if ((kernel->width % 2) == 0) ThrowImageException(OptionError,"KernelWidthMustBeAnOddNumber"); if (image->debug != MagickFalse) { char format[MaxTextExtent], *message; register const double *k; ssize_t u, v; (void) LogMagickEvent(TransformEvent,GetMagickModule(), " FilterImage with %.20gx%.20g kernel:",(double) kernel->width,(double) kernel->height); message=AcquireString(""); k=kernel->values; for (v=0; v < (ssize_t) kernel->height; v++) { *message='\0'; (void) FormatLocaleString(format,MaxTextExtent,"%.20g: ",(double) v); (void) ConcatenateString(&message,format); for (u=0; u < (ssize_t) kernel->width; u++) { (void) FormatLocaleString(format,MaxTextExtent,"%g ",*k++); (void) ConcatenateString(&message,format); } (void) LogMagickEvent(TransformEvent,GetMagickModule(),"%s",message); } message=DestroyString(message); } #if defined(MAGICKCORE_OPENCL_SUPPORT) filter_image=AccelerateConvolveImageChannel(image,channel,kernel,exception); if (filter_image != (Image *) NULL) { #ifdef MAGICKCORE_CLPERFMARKER clEndPerfMarkerAMD(); #endif return(filter_image); } #endif filter_image=CloneImage(image,0,0,MagickTrue,exception); if (filter_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(filter_image,DirectClass) == MagickFalse) { InheritException(exception,&filter_image->exception); filter_image=DestroyImage(filter_image); return((Image *) NULL); } /* Normalize kernel. */ filter_kernel=(MagickRealType *) MagickAssumeAligned(AcquireAlignedMemory( kernel->width,kernel->height*sizeof(*filter_kernel))); if (filter_kernel == (MagickRealType *) NULL) { filter_image=DestroyImage(filter_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } for (i=0; i < (ssize_t) (kernel->width*kernel->height); i++) filter_kernel[i]=(MagickRealType) kernel->values[i]; /* Filter image. */ status=MagickTrue; progress=0; GetMagickPixelPacket(image,&bias); SetMagickPixelPacketBias(image,&bias); image_view=AcquireVirtualCacheView(image,exception); filter_view=AcquireAuthenticCacheView(filter_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_number_threads(image,filter_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register const IndexPacket *magick_restrict indexes; register const PixelPacket *magick_restrict p; register IndexPacket *magick_restrict filter_indexes; register PixelPacket *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,-((ssize_t) (kernel->width-1)/2L),y- (ssize_t) ((kernel->height-1)/2L),image->columns+kernel->width, kernel->height,exception); q=GetCacheViewAuthenticPixels(filter_view,0,y,filter_image->columns,1, exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } indexes=GetCacheViewVirtualIndexQueue(image_view); filter_indexes=GetCacheViewAuthenticIndexQueue(filter_view); for (x=0; x < (ssize_t) image->columns; x++) { DoublePixelPacket pixel; register const MagickRealType *magick_restrict k; register const PixelPacket *magick_restrict kernel_pixels; register ssize_t u; ssize_t v; pixel.red=bias.red; pixel.green=bias.green; pixel.blue=bias.blue; pixel.opacity=bias.opacity; pixel.index=bias.index; k=filter_kernel; kernel_pixels=p; if (((channel & OpacityChannel) == 0) || (image->matte == MagickFalse)) { for (v=0; v < (ssize_t) kernel->width; v++) { for (u=0; u < (ssize_t) kernel->height; u++) { pixel.red+=(*k)*kernel_pixels[u].red; pixel.green+=(*k)*kernel_pixels[u].green; pixel.blue+=(*k)*kernel_pixels[u].blue; k++; } kernel_pixels+=image->columns+kernel->width; } if ((channel & RedChannel) != 0) SetPixelRed(q,ClampToQuantum(pixel.red)); if ((channel & GreenChannel) != 0) SetPixelGreen(q,ClampToQuantum(pixel.green)); if ((channel & BlueChannel) != 0) SetPixelBlue(q,ClampToQuantum(pixel.blue)); if ((channel & OpacityChannel) != 0) { k=filter_kernel; kernel_pixels=p; for (v=0; v < (ssize_t) kernel->width; v++) { for (u=0; u < (ssize_t) kernel->height; u++) { pixel.opacity+=(*k)*kernel_pixels[u].opacity; k++; } kernel_pixels+=image->columns+kernel->width; } SetPixelOpacity(q,ClampToQuantum(pixel.opacity)); } if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) { register const IndexPacket *magick_restrict kernel_indexes; k=filter_kernel; kernel_indexes=indexes; for (v=0; v < (ssize_t) kernel->width; v++) { for (u=0; u < (ssize_t) kernel->height; u++) { pixel.index+=(*k)*GetPixelIndex(kernel_indexes+u); k++; } kernel_indexes+=image->columns+kernel->width; } SetPixelIndex(filter_indexes+x,ClampToQuantum(pixel.index)); } } else { double alpha, gamma; gamma=0.0; for (v=0; v < (ssize_t) kernel->width; v++) { for (u=0; u < (ssize_t) kernel->height; u++) { alpha=(MagickRealType) (QuantumScale*(QuantumRange- GetPixelOpacity(kernel_pixels+u))); pixel.red+=(*k)*alpha*GetPixelRed(kernel_pixels+u); pixel.green+=(*k)*alpha*GetPixelGreen(kernel_pixels+u); pixel.blue+=(*k)*alpha*GetPixelBlue(kernel_pixels+u); gamma+=(*k)*alpha; k++; } kernel_pixels+=image->columns+kernel->width; } gamma=PerceptibleReciprocal(gamma); if ((channel & RedChannel) != 0) SetPixelRed(q,ClampToQuantum(gamma*pixel.red)); if ((channel & GreenChannel) != 0) SetPixelGreen(q,ClampToQuantum(gamma*pixel.green)); if ((channel & BlueChannel) != 0) SetPixelBlue(q,ClampToQuantum(gamma*pixel.blue)); if ((channel & OpacityChannel) != 0) { k=filter_kernel; kernel_pixels=p; for (v=0; v < (ssize_t) kernel->width; v++) { for (u=0; u < (ssize_t) kernel->height; u++) { pixel.opacity+=(*k)*GetPixelOpacity(kernel_pixels+u); k++; } kernel_pixels+=image->columns+kernel->width; } SetPixelOpacity(q,ClampToQuantum(pixel.opacity)); } if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) { register const IndexPacket *magick_restrict kernel_indexes; k=filter_kernel; kernel_pixels=p; kernel_indexes=indexes; for (v=0; v < (ssize_t) kernel->width; v++) { for (u=0; u < (ssize_t) kernel->height; u++) { alpha=(MagickRealType) (QuantumScale*(QuantumRange- kernel_pixels[u].opacity)); pixel.index+=(*k)*alpha*GetPixelIndex(kernel_indexes+u); k++; } kernel_pixels+=image->columns+kernel->width; kernel_indexes+=image->columns+kernel->width; } SetPixelIndex(filter_indexes+x,ClampToQuantum(gamma*pixel.index)); } } indexes++; p++; q++; } sync=SyncCacheViewAuthenticPixels(filter_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_FilterImageChannel) #endif proceed=SetImageProgress(image,FilterImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } filter_image->type=image->type; filter_view=DestroyCacheView(filter_view); image_view=DestroyCacheView(image_view); filter_kernel=(MagickRealType *) RelinquishAlignedMemory(filter_kernel); if (status == MagickFalse) filter_image=DestroyImage(filter_image); #ifdef MAGICKCORE_CLPERFMARKER clEndPerfMarkerAMD(); #endif return(filter_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G a u s s i a n B l u r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GaussianBlurImage() blurs an image. We convolve the image with a % Gaussian operator of the given radius and standard deviation (sigma). % For reasonable results, the radius should be larger than sigma. Use a % radius of 0 and GaussianBlurImage() selects a suitable radius for you % % The format of the GaussianBlurImage method is: % % Image *GaussianBlurImage(const Image *image,onst double radius, % const double sigma,ExceptionInfo *exception) % Image *GaussianBlurImageChannel(const Image *image, % const ChannelType channel,const double radius,const double sigma, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel type. % % o radius: the radius of the Gaussian, in pixels, not counting the center % pixel. % % o sigma: the standard deviation of the Gaussian, in pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *GaussianBlurImage(const Image *image,const double radius, const double sigma,ExceptionInfo *exception) { Image *blur_image; blur_image=GaussianBlurImageChannel(image,DefaultChannels,radius,sigma, exception); return(blur_image); } MagickExport Image *GaussianBlurImageChannel(const Image *image, const ChannelType channel,const double radius,const double sigma, ExceptionInfo *exception) { char geometry[MaxTextExtent]; KernelInfo *kernel_info; Image *blur_image; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); (void) FormatLocaleString(geometry,MaxTextExtent,"gaussian:%.20gx%.20g", radius,sigma); kernel_info=AcquireKernelInfo(geometry); if (kernel_info == (KernelInfo *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); blur_image=(Image *) NULL; #if defined(MAGICKCORE_OPENCL_SUPPORT) blur_image=AccelerateConvolveImageChannel(image,channel,kernel_info, exception); #endif if (blur_image == (Image *) NULL) blur_image=MorphologyImageChannel(image,channel,ConvolveMorphology,1, kernel_info,exception); kernel_info=DestroyKernelInfo(kernel_info); return(blur_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M o t i o n B l u r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MotionBlurImage() simulates motion blur. We convolve the image with a % Gaussian operator of the given radius and standard deviation (sigma). % For reasonable results, radius should be larger than sigma. Use a % radius of 0 and MotionBlurImage() selects a suitable radius for you. % Angle gives the angle of the blurring motion. % % Andrew Protano contributed this effect. % % The format of the MotionBlurImage method is: % % Image *MotionBlurImage(const Image *image,const double radius, % const double sigma,const double angle,ExceptionInfo *exception) % Image *MotionBlurImageChannel(const Image *image,const ChannelType channel, % const double radius,const double sigma,const double angle, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel type. % % o radius: the radius of the Gaussian, in pixels, not counting the center % pixel. % % o sigma: the standard deviation of the Gaussian, in pixels. % % o angle: Apply the effect along this angle. % % o exception: return any errors or warnings in this structure. % */ static double *GetMotionBlurKernel(const size_t width,const double sigma) { double *kernel, normalize; register ssize_t i; /* Generate a 1-D convolution kernel. */ (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); kernel=(double *) MagickAssumeAligned(AcquireAlignedMemory((size_t) width, sizeof(*kernel))); if (kernel == (double *) NULL) return(kernel); normalize=0.0; for (i=0; i < (ssize_t) width; i++) { kernel[i]=(double) (exp((-((double) i*i)/(double) (2.0*MagickSigma* MagickSigma)))/(MagickSQ2PI*MagickSigma)); normalize+=kernel[i]; } for (i=0; i < (ssize_t) width; i++) kernel[i]/=normalize; return(kernel); } MagickExport Image *MotionBlurImage(const Image *image,const double radius, const double sigma,const double angle,ExceptionInfo *exception) { Image *motion_blur; motion_blur=MotionBlurImageChannel(image,DefaultChannels,radius,sigma,angle, exception); return(motion_blur); } MagickExport Image *MotionBlurImageChannel(const Image *image, const ChannelType channel,const double radius,const double sigma, const double angle,ExceptionInfo *exception) { #define BlurImageTag "Blur/Image" CacheView *blur_view, *image_view; double *kernel; Image *blur_image; MagickBooleanType status; MagickOffsetType progress; MagickPixelPacket bias; OffsetInfo *offset; PointInfo point; register ssize_t i; size_t width; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); width=GetOptimalKernelWidth1D(radius,sigma); kernel=GetMotionBlurKernel(width,sigma); if (kernel == (double *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); offset=(OffsetInfo *) AcquireQuantumMemory(width,sizeof(*offset)); if (offset == (OffsetInfo *) NULL) { kernel=(double *) RelinquishAlignedMemory(kernel); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } point.x=(double) width*sin(DegreesToRadians(angle)); point.y=(double) width*cos(DegreesToRadians(angle)); for (i=0; i < (ssize_t) width; i++) { offset[i].x=(ssize_t) ceil((double) (i*point.y)/hypot(point.x,point.y)-0.5); offset[i].y=(ssize_t) ceil((double) (i*point.x)/hypot(point.x,point.y)-0.5); } /* Motion blur image. */ #if defined(MAGICKCORE_OPENCL_SUPPORT) blur_image=AccelerateMotionBlurImage(image,channel,kernel,width,offset, exception); if (blur_image != (Image *) NULL) return blur_image; #endif blur_image=CloneImage(image,0,0,MagickTrue,exception); if (blur_image == (Image *) NULL) { kernel=(double *) RelinquishAlignedMemory(kernel); offset=(OffsetInfo *) RelinquishMagickMemory(offset); return((Image *) NULL); } if (SetImageStorageClass(blur_image,DirectClass) == MagickFalse) { kernel=(double *) RelinquishAlignedMemory(kernel); offset=(OffsetInfo *) RelinquishMagickMemory(offset); InheritException(exception,&blur_image->exception); blur_image=DestroyImage(blur_image); return((Image *) NULL); } status=MagickTrue; progress=0; GetMagickPixelPacket(image,&bias); image_view=AcquireVirtualCacheView(image,exception); blur_view=AcquireAuthenticCacheView(blur_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_number_threads(image,blur_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register IndexPacket *magick_restrict blur_indexes; register PixelPacket *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(blur_view,0,y,blur_image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } blur_indexes=GetCacheViewAuthenticIndexQueue(blur_view); for (x=0; x < (ssize_t) image->columns; x++) { MagickPixelPacket qixel; PixelPacket pixel; register const IndexPacket *magick_restrict indexes; register double *magick_restrict k; register ssize_t i; k=kernel; qixel=bias; if (((channel & OpacityChannel) == 0) || (image->matte == MagickFalse)) { for (i=0; i < (ssize_t) width; i++) { (void) GetOneCacheViewVirtualPixel(image_view,x+offset[i].x,y+ offset[i].y,&pixel,exception); qixel.red+=(*k)*pixel.red; qixel.green+=(*k)*pixel.green; qixel.blue+=(*k)*pixel.blue; qixel.opacity+=(*k)*pixel.opacity; if (image->colorspace == CMYKColorspace) { indexes=GetCacheViewVirtualIndexQueue(image_view); qixel.index+=(*k)*(*indexes); } k++; } if ((channel & RedChannel) != 0) SetPixelRed(q,ClampToQuantum(qixel.red)); if ((channel & GreenChannel) != 0) SetPixelGreen(q,ClampToQuantum(qixel.green)); if ((channel & BlueChannel) != 0) SetPixelBlue(q,ClampToQuantum(qixel.blue)); if ((channel & OpacityChannel) != 0) SetPixelOpacity(q,ClampToQuantum(qixel.opacity)); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) SetPixelIndex(blur_indexes+x,ClampToQuantum(qixel.index)); } else { double alpha, gamma; alpha=0.0; gamma=0.0; for (i=0; i < (ssize_t) width; i++) { (void) GetOneCacheViewVirtualPixel(image_view,x+offset[i].x,y+ offset[i].y,&pixel,exception); alpha=(MagickRealType) (QuantumScale*GetPixelAlpha(&pixel)); qixel.red+=(*k)*alpha*pixel.red; qixel.green+=(*k)*alpha*pixel.green; qixel.blue+=(*k)*alpha*pixel.blue; qixel.opacity+=(*k)*pixel.opacity; if (image->colorspace == CMYKColorspace) { indexes=GetCacheViewVirtualIndexQueue(image_view); qixel.index+=(*k)*alpha*GetPixelIndex(indexes); } gamma+=(*k)*alpha; k++; } gamma=PerceptibleReciprocal(gamma); if ((channel & RedChannel) != 0) SetPixelRed(q,ClampToQuantum(gamma*qixel.red)); if ((channel & GreenChannel) != 0) SetPixelGreen(q,ClampToQuantum(gamma*qixel.green)); if ((channel & BlueChannel) != 0) SetPixelBlue(q,ClampToQuantum(gamma*qixel.blue)); if ((channel & OpacityChannel) != 0) SetPixelOpacity(q,ClampToQuantum(qixel.opacity)); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) SetPixelIndex(blur_indexes+x,ClampToQuantum(gamma*qixel.index)); } q++; } if (SyncCacheViewAuthenticPixels(blur_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_MotionBlurImageChannel) #endif proceed=SetImageProgress(image,BlurImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } blur_view=DestroyCacheView(blur_view); image_view=DestroyCacheView(image_view); kernel=(double *) RelinquishAlignedMemory(kernel); offset=(OffsetInfo *) RelinquishMagickMemory(offset); if (status == MagickFalse) blur_image=DestroyImage(blur_image); return(blur_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % K u w a h a r a I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % KuwaharaImage() is an edge preserving noise reduction filter. % % The format of the KuwaharaImage method is: % % Image *KuwaharaImage(const Image *image,const double width, % const double sigma,ExceptionInfo *exception) % Image *KuwaharaImageChannel(const Image *image,const ChannelType channel, % const double width,const double sigma,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel type. % % o radius: the square window radius. % % o sigma: the standard deviation of the Gaussian, in pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *KuwaharaImage(const Image *image,const double radius, const double sigma,ExceptionInfo *exception) { Image *kuwahara_image; kuwahara_image=KuwaharaImageChannel(image,DefaultChannels,radius,sigma, exception); return(kuwahara_image); } MagickExport Image *KuwaharaImageChannel(const Image *image, const ChannelType channel,const double radius,const double sigma, ExceptionInfo *exception) { #define KuwaharaImageTag "Kiwahara/Image" CacheView *image_view, *kuwahara_view; Image *gaussian_image, *kuwahara_image; MagickBooleanType status; MagickOffsetType progress; size_t width; ssize_t y; /* Initialize Kuwahara image attributes. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); (void) channel; width=(size_t) radius+1; gaussian_image=BlurImage(image,radius,sigma,exception); if (gaussian_image == (Image *) NULL) return((Image *) NULL); kuwahara_image=CloneImage(image,image->columns,image->rows,MagickTrue, exception); if (kuwahara_image == (Image *) NULL) { gaussian_image=DestroyImage(gaussian_image); return((Image *) NULL); } if (SetImageStorageClass(kuwahara_image,DirectClass) == MagickFalse) { InheritException(exception,&kuwahara_image->exception); gaussian_image=DestroyImage(gaussian_image); kuwahara_image=DestroyImage(kuwahara_image); return((Image *) NULL); } /* Edge preserving noise reduction filter. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(gaussian_image,exception); kuwahara_view=AcquireAuthenticCacheView(kuwahara_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_number_threads(image,kuwahara_image,kuwahara_image->rows,1) #endif for (y=0; y < (ssize_t) kuwahara_image->rows; y++) { register IndexPacket *magick_restrict kuwahara_indexes; register PixelPacket *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=QueueCacheViewAuthenticPixels(kuwahara_view,0,y,kuwahara_image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } kuwahara_indexes=GetCacheViewAuthenticIndexQueue(kuwahara_view); for (x=0; x < (ssize_t) kuwahara_image->columns; x++) { double min_variance; MagickPixelPacket pixel; RectangleInfo quadrant, target; register ssize_t i; min_variance=MagickMaximumValue; SetGeometry(gaussian_image,&target); quadrant.width=width; quadrant.height=width; for (i=0; i < 4; i++) { const PixelPacket *magick_restrict p; double variance; MagickPixelPacket mean; register const PixelPacket *magick_restrict k; register ssize_t n; quadrant.x=x; quadrant.y=y; switch (i) { case 0: { quadrant.x=x-(ssize_t) (width-1); quadrant.y=y-(ssize_t) (width-1); break; } case 1: { quadrant.y=y-(ssize_t) (width-1); break; } case 2: { quadrant.x=x-(ssize_t) (width-1); break; } default: break; } p=GetCacheViewVirtualPixels(image_view,quadrant.x,quadrant.y, quadrant.width,quadrant.height,exception); if (p == (const PixelPacket *) NULL) break; GetMagickPixelPacket(image,&mean); k=p; for (n=0; n < (ssize_t) (width*width); n++) { mean.red+=(double) k->red; mean.green+=(double) k->green; mean.blue+=(double) k->blue; k++; } mean.red/=(double) (width*width); mean.green/=(double) (width*width); mean.blue/=(double) (width*width); k=p; variance=0.0; for (n=0; n < (ssize_t) (width*width); n++) { double luma; luma=GetPixelLuma(image,k); variance+=(luma-MagickPixelLuma(&mean))*(luma-MagickPixelLuma(&mean)); k++; } if (variance < min_variance) { min_variance=variance; target=quadrant; } } if (i < 4) { status=MagickFalse; break; } (void) InterpolateMagickPixelPacket(gaussian_image,image_view, UndefinedInterpolatePixel,(double) target.x+target.width/2.0, (double) target.y+target.height/2.0,&pixel,exception); SetPixelPacket(kuwahara_image,&pixel,q,kuwahara_indexes+x); q++; } if (SyncCacheViewAuthenticPixels(kuwahara_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_KuwaharaImage) #endif proceed=SetImageProgress(image,KuwaharaImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } kuwahara_view=DestroyCacheView(kuwahara_view); image_view=DestroyCacheView(image_view); gaussian_image=DestroyImage(gaussian_image); if (status == MagickFalse) kuwahara_image=DestroyImage(kuwahara_image); return(kuwahara_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % L o c a l C o n t r a s t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % LocalContrastImage() attempts to increase the appearance of large-scale % light-dark transitions. Local contrast enhancement works similarly to % sharpening with an unsharp mask, however the mask is instead created using % an image with a greater blur distance. % % The format of the LocalContrastImage method is: % % Image *LocalContrastImage(const Image *image, const double radius, % const double strength, ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the Gaussian blur, in percentage with 100% % resulting in a blur radius of 20% of largest dimension. % % o strength: the strength of the blur mask in percentage. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *LocalContrastImage(const Image *image,const double radius, const double strength,ExceptionInfo *exception) { #define LocalContrastImageTag "LocalContrast/Image" CacheView *image_view, *contrast_view; float *interImage, *scanLinePixels, totalWeight; Image *contrast_image; MagickBooleanType status; MemoryInfo *scanLinePixels_info, *interImage_info; ssize_t scanLineSize, width; /* Initialize contrast image attributes. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); #if defined(MAGICKCORE_OPENCL_SUPPORT) contrast_image=AccelerateLocalContrastImage(image,radius,strength,exception); if (contrast_image != (Image *) NULL) return(contrast_image); #endif contrast_image=CloneImage(image,0,0,MagickTrue,exception); if (contrast_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(contrast_image,DirectClass) == MagickFalse) { InheritException(exception,&contrast_image->exception); contrast_image=DestroyImage(contrast_image); return((Image *) NULL); } image_view=AcquireVirtualCacheView(image,exception); contrast_view=AcquireAuthenticCacheView(contrast_image,exception); scanLineSize=(ssize_t) MagickMax(image->columns,image->rows); width=(ssize_t) scanLineSize*0.002f*fabs(radius); scanLineSize+=(2*width); scanLinePixels_info=AcquireVirtualMemory(GetOpenMPMaximumThreads()* scanLineSize,sizeof(*scanLinePixels)); if (scanLinePixels_info == (MemoryInfo *) NULL) { contrast_view=DestroyCacheView(contrast_view); image_view=DestroyCacheView(image_view); contrast_image=DestroyImage(contrast_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } scanLinePixels=(float *) GetVirtualMemoryBlob(scanLinePixels_info); /* Create intermediate buffer. */ interImage_info=AcquireVirtualMemory(image->rows*(image->columns+(2*width)), sizeof(*interImage)); if (interImage_info == (MemoryInfo *) NULL) { scanLinePixels_info=RelinquishVirtualMemory(scanLinePixels_info); contrast_view=DestroyCacheView(contrast_view); image_view=DestroyCacheView(image_view); contrast_image=DestroyImage(contrast_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } interImage=(float *) GetVirtualMemoryBlob(interImage_info); totalWeight=(width+1)*(width+1); /* Vertical pass. */ status=MagickTrue; { ssize_t x; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) \ magick_number_threads(image,image,image->columns,1) #endif for (x=0; x < (ssize_t) image->columns; x++) { const int id = GetOpenMPThreadId(); const PixelPacket *magick_restrict p; float *out, *pix, *pixels; register ssize_t y; ssize_t i; if (status == MagickFalse) continue; pixels=scanLinePixels; pixels+=id*scanLineSize; pix=pixels; p=GetCacheViewVirtualPixels(image_view,x,-width,1,image->rows+(2*width), exception); if (p == (const PixelPacket *) NULL) { status=MagickFalse; continue; } for (y=0; y < (ssize_t) image->rows+(2*width); y++) { *pix++=(float)GetPixelLuma(image,p); p++; } out=interImage+x+width; for (y=0; y < (ssize_t) image->rows; y++) { float sum, weight; weight=1.0f; sum=0; pix=pixels+y; for (i=0; i < width; i++) { sum+=weight*(*pix++); weight+=1.0f; } for (i=width+1; i < (2*width); i++) { sum+=weight*(*pix++); weight-=1.0f; } /* write to output */ *out=sum/totalWeight; /* mirror into padding */ if (x <= width && x != 0) *(out-(x*2))=*out; if ((x > (ssize_t) image->columns-width-2) && (x != (ssize_t) image->columns-1)) *(out+((image->columns-x-1)*2))=*out; out+=image->columns+(width*2); } } } /* Horizontal pass. */ { ssize_t y; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { const int id = GetOpenMPThreadId(); const PixelPacket *magick_restrict p; float *pix, *pixels; register PixelPacket *magick_restrict q; register ssize_t x; ssize_t i; if (status == MagickFalse) continue; pixels=scanLinePixels; pixels+=id*scanLineSize; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1, exception); q=GetCacheViewAuthenticPixels(contrast_view,0,y,image->columns,1, exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } memcpy(pixels,interImage+(y*(image->columns+(2*width))),(image->columns+ (2*width))*sizeof(float)); for (x=0; x < (ssize_t) image->columns; x++) { float mult, srcVal, sum, weight; weight=1.0f; sum=0; pix=pixels+x; for (i=0; i < width; i++) { sum+=weight*(*pix++); weight+=1.0f; } for (i=width+1; i < (2*width); i++) { sum+=weight*(*pix++); weight-=1.0f; } /* Apply and write */ srcVal=(float) GetPixelLuma(image,p); mult=(srcVal-(sum/totalWeight))*(strength/100.0f); mult=(srcVal+mult)/srcVal; SetPixelRed(q,ClampToQuantum(GetPixelRed(p)*mult)); SetPixelGreen(q,ClampToQuantum(GetPixelGreen(p)*mult)); SetPixelBlue(q,ClampToQuantum(GetPixelBlue(p)*mult)); p++; q++; } if (SyncCacheViewAuthenticPixels(contrast_view,exception) == MagickFalse) status=MagickFalse; } } scanLinePixels_info=RelinquishVirtualMemory(scanLinePixels_info); interImage_info=RelinquishVirtualMemory(interImage_info); contrast_view=DestroyCacheView(contrast_view); image_view=DestroyCacheView(image_view); if (status == MagickFalse) contrast_image=DestroyImage(contrast_image); return(contrast_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % P r e v i e w I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PreviewImage() tiles 9 thumbnails of the specified image with an image % processing operation applied with varying parameters. This may be helpful % pin-pointing an appropriate parameter for a particular image processing % operation. % % The format of the PreviewImages method is: % % Image *PreviewImages(const Image *image,const PreviewType preview, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o preview: the image processing operation. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *PreviewImage(const Image *image,const PreviewType preview, ExceptionInfo *exception) { #define NumberTiles 9 #define PreviewImageTag "Preview/Image" #define DefaultPreviewGeometry "204x204+10+10" char factor[MaxTextExtent], label[MaxTextExtent]; double degrees, gamma, percentage, radius, sigma, threshold; Image *images, *montage_image, *preview_image, *thumbnail; ImageInfo *preview_info; MagickBooleanType proceed; MontageInfo *montage_info; QuantizeInfo quantize_info; RectangleInfo geometry; register ssize_t i, x; size_t colors; ssize_t y; /* Open output image file. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); colors=2; degrees=0.0; gamma=(-0.2f); preview_info=AcquireImageInfo(); SetGeometry(image,&geometry); (void) ParseMetaGeometry(DefaultPreviewGeometry,&geometry.x,&geometry.y, &geometry.width,&geometry.height); images=NewImageList(); percentage=12.5; GetQuantizeInfo(&quantize_info); radius=0.0; sigma=1.0; threshold=0.0; x=0; y=0; for (i=0; i < NumberTiles; i++) { thumbnail=ThumbnailImage(image,geometry.width,geometry.height,exception); if (thumbnail == (Image *) NULL) break; (void) SetImageProgressMonitor(thumbnail,(MagickProgressMonitor) NULL, (void *) NULL); (void) SetImageProperty(thumbnail,"label",DefaultTileLabel); if (i == (NumberTiles/2)) { (void) QueryColorDatabase("#dfdfdf",&thumbnail->matte_color,exception); AppendImageToList(&images,thumbnail); continue; } switch (preview) { case RotatePreview: { degrees+=45.0; preview_image=RotateImage(thumbnail,degrees,exception); (void) FormatLocaleString(label,MaxTextExtent,"rotate %g",degrees); break; } case ShearPreview: { degrees+=5.0; preview_image=ShearImage(thumbnail,degrees,degrees,exception); (void) FormatLocaleString(label,MaxTextExtent,"shear %gx%g", degrees,2.0*degrees); break; } case RollPreview: { x=(ssize_t) ((i+1)*thumbnail->columns)/NumberTiles; y=(ssize_t) ((i+1)*thumbnail->rows)/NumberTiles; preview_image=RollImage(thumbnail,x,y,exception); (void) FormatLocaleString(label,MaxTextExtent,"roll %+.20gx%+.20g", (double) x,(double) y); break; } case HuePreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; (void) FormatLocaleString(factor,MaxTextExtent,"100,100,%g", 2.0*percentage); (void) ModulateImage(preview_image,factor); (void) FormatLocaleString(label,MaxTextExtent,"modulate %s",factor); break; } case SaturationPreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; (void) FormatLocaleString(factor,MaxTextExtent,"100,%g",2.0*percentage); (void) ModulateImage(preview_image,factor); (void) FormatLocaleString(label,MaxTextExtent,"modulate %s",factor); break; } case BrightnessPreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; (void) FormatLocaleString(factor,MaxTextExtent,"%g",2.0*percentage); (void) ModulateImage(preview_image,factor); (void) FormatLocaleString(label,MaxTextExtent,"modulate %s",factor); break; } case GammaPreview: default: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; gamma+=0.4f; (void) GammaImageChannel(preview_image,DefaultChannels,gamma); (void) FormatLocaleString(label,MaxTextExtent,"gamma %g",gamma); break; } case SpiffPreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image != (Image *) NULL) for (x=0; x < i; x++) (void) ContrastImage(preview_image,MagickTrue); (void) FormatLocaleString(label,MaxTextExtent,"contrast (%.20g)", (double) i+1); break; } case DullPreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; for (x=0; x < i; x++) (void) ContrastImage(preview_image,MagickFalse); (void) FormatLocaleString(label,MaxTextExtent,"+contrast (%.20g)", (double) i+1); break; } case GrayscalePreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; colors<<=1; quantize_info.number_colors=colors; quantize_info.colorspace=GRAYColorspace; (void) QuantizeImage(&quantize_info,preview_image); (void) FormatLocaleString(label,MaxTextExtent, "-colorspace gray -colors %.20g",(double) colors); break; } case QuantizePreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; colors<<=1; quantize_info.number_colors=colors; (void) QuantizeImage(&quantize_info,preview_image); (void) FormatLocaleString(label,MaxTextExtent,"colors %.20g",(double) colors); break; } case DespecklePreview: { for (x=0; x < (i-1); x++) { preview_image=DespeckleImage(thumbnail,exception); if (preview_image == (Image *) NULL) break; thumbnail=DestroyImage(thumbnail); thumbnail=preview_image; } preview_image=DespeckleImage(thumbnail,exception); if (preview_image == (Image *) NULL) break; (void) FormatLocaleString(label,MaxTextExtent,"despeckle (%.20g)", (double) i+1); break; } case ReduceNoisePreview: { preview_image=StatisticImage(thumbnail,NonpeakStatistic,(size_t) radius, (size_t) radius,exception); (void) FormatLocaleString(label,MaxTextExtent,"noise %g",radius); break; } case AddNoisePreview: { switch ((int) i) { case 0: { (void) CopyMagickString(factor,"uniform",MaxTextExtent); break; } case 1: { (void) CopyMagickString(factor,"gaussian",MaxTextExtent); break; } case 2: { (void) CopyMagickString(factor,"multiplicative",MaxTextExtent); break; } case 3: { (void) CopyMagickString(factor,"impulse",MaxTextExtent); break; } case 5: { (void) CopyMagickString(factor,"laplacian",MaxTextExtent); break; } case 6: { (void) CopyMagickString(factor,"poisson",MaxTextExtent); break; } default: { (void) CopyMagickString(thumbnail->magick,"NULL",MaxTextExtent); break; } } preview_image=StatisticImage(thumbnail,NonpeakStatistic,(size_t) i, (size_t) i,exception); (void) FormatLocaleString(label,MaxTextExtent,"+noise %s",factor); break; } case SharpenPreview: { preview_image=SharpenImage(thumbnail,radius,sigma,exception); (void) FormatLocaleString(label,MaxTextExtent,"sharpen %gx%g", radius,sigma); break; } case BlurPreview: { preview_image=BlurImage(thumbnail,radius,sigma,exception); (void) FormatLocaleString(label,MaxTextExtent,"blur %gx%g",radius, sigma); break; } case ThresholdPreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; (void) BilevelImage(thumbnail, (double) (percentage*((MagickRealType) QuantumRange+1.0))/100.0); (void) FormatLocaleString(label,MaxTextExtent,"threshold %g", (double) (percentage*((MagickRealType) QuantumRange+1.0))/100.0); break; } case EdgeDetectPreview: { preview_image=EdgeImage(thumbnail,radius,exception); (void) FormatLocaleString(label,MaxTextExtent,"edge %g",radius); break; } case SpreadPreview: { preview_image=SpreadImage(thumbnail,radius,exception); (void) FormatLocaleString(label,MaxTextExtent,"spread %g", radius+0.5); break; } case SolarizePreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; (void) SolarizeImage(preview_image,(double) QuantumRange* percentage/100.0); (void) FormatLocaleString(label,MaxTextExtent,"solarize %g", (QuantumRange*percentage)/100.0); break; } case ShadePreview: { degrees+=10.0; preview_image=ShadeImage(thumbnail,MagickTrue,degrees,degrees, exception); (void) FormatLocaleString(label,MaxTextExtent,"shade %gx%g", degrees,degrees); break; } case RaisePreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; geometry.width=(size_t) (2*i+2); geometry.height=(size_t) (2*i+2); geometry.x=(i-1)/2; geometry.y=(i-1)/2; (void) RaiseImage(preview_image,&geometry,MagickTrue); (void) FormatLocaleString(label,MaxTextExtent, "raise %.20gx%.20g%+.20g%+.20g",(double) geometry.width,(double) geometry.height,(double) geometry.x,(double) geometry.y); break; } case SegmentPreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; threshold+=0.4f; (void) SegmentImage(preview_image,sRGBColorspace,MagickFalse,threshold, threshold); (void) FormatLocaleString(label,MaxTextExtent,"segment %gx%g", threshold,threshold); break; } case SwirlPreview: { preview_image=SwirlImage(thumbnail,degrees,exception); (void) FormatLocaleString(label,MaxTextExtent,"swirl %g",degrees); degrees+=45.0; break; } case ImplodePreview: { degrees+=0.1f; preview_image=ImplodeImage(thumbnail,degrees,exception); (void) FormatLocaleString(label,MaxTextExtent,"implode %g",degrees); break; } case WavePreview: { degrees+=5.0f; preview_image=WaveImage(thumbnail,0.5*degrees,2.0*degrees,exception); (void) FormatLocaleString(label,MaxTextExtent,"wave %gx%g", 0.5*degrees,2.0*degrees); break; } case OilPaintPreview: { preview_image=OilPaintImage(thumbnail,(double) radius,exception); (void) FormatLocaleString(label,MaxTextExtent,"paint %g",radius); break; } case CharcoalDrawingPreview: { preview_image=CharcoalImage(thumbnail,(double) radius,(double) sigma, exception); (void) FormatLocaleString(label,MaxTextExtent,"charcoal %gx%g", radius,sigma); break; } case JPEGPreview: { char filename[MaxTextExtent]; int file; MagickBooleanType status; preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; preview_info->quality=(size_t) percentage; (void) FormatLocaleString(factor,MaxTextExtent,"%.20g",(double) preview_info->quality); file=AcquireUniqueFileResource(filename); if (file != -1) file=close(file)-1; (void) FormatLocaleString(preview_image->filename,MaxTextExtent, "jpeg:%s",filename); status=WriteImage(preview_info,preview_image); if (status != MagickFalse) { Image *quality_image; (void) CopyMagickString(preview_info->filename, preview_image->filename,MaxTextExtent); quality_image=ReadImage(preview_info,exception); if (quality_image != (Image *) NULL) { preview_image=DestroyImage(preview_image); preview_image=quality_image; } } (void) RelinquishUniqueFileResource(preview_image->filename); if ((GetBlobSize(preview_image)/1024) >= 1024) (void) FormatLocaleString(label,MaxTextExtent,"quality %s\n%gmb ", factor,(double) ((MagickOffsetType) GetBlobSize(preview_image))/ 1024.0/1024.0); else if (GetBlobSize(preview_image) >= 1024) (void) FormatLocaleString(label,MaxTextExtent, "quality %s\n%gkb ",factor,(double) ((MagickOffsetType) GetBlobSize(preview_image))/1024.0); else (void) FormatLocaleString(label,MaxTextExtent,"quality %s\n%.20gb ", factor,(double) ((MagickOffsetType) GetBlobSize(thumbnail))); break; } } thumbnail=DestroyImage(thumbnail); percentage+=12.5; radius+=0.5; sigma+=0.25; if (preview_image == (Image *) NULL) break; (void) DeleteImageProperty(preview_image,"label"); (void) SetImageProperty(preview_image,"label",label); AppendImageToList(&images,preview_image); proceed=SetImageProgress(image,PreviewImageTag,(MagickOffsetType) i, NumberTiles); if (proceed == MagickFalse) break; } if (images == (Image *) NULL) { preview_info=DestroyImageInfo(preview_info); return((Image *) NULL); } /* Create the montage. */ montage_info=CloneMontageInfo(preview_info,(MontageInfo *) NULL); (void) CopyMagickString(montage_info->filename,image->filename,MaxTextExtent); montage_info->shadow=MagickTrue; (void) CloneString(&montage_info->tile,"3x3"); (void) CloneString(&montage_info->geometry,DefaultPreviewGeometry); (void) CloneString(&montage_info->frame,DefaultTileFrame); montage_image=MontageImages(images,montage_info,exception); montage_info=DestroyMontageInfo(montage_info); images=DestroyImageList(images); if (montage_image == (Image *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); if (montage_image->montage != (char *) NULL) { /* Free image directory. */ montage_image->montage=(char *) RelinquishMagickMemory( montage_image->montage); if (image->directory != (char *) NULL) montage_image->directory=(char *) RelinquishMagickMemory( montage_image->directory); } preview_info=DestroyImageInfo(preview_info); return(montage_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R o t a t i o n a l B l u r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RotationalBlurImage() applies a rotational blur to the image. % % Andrew Protano contributed this effect. % % The format of the RotationalBlurImage method is: % % Image *RotationalBlurImage(const Image *image,const double angle, % ExceptionInfo *exception) % Image *RotationalBlurImageChannel(const Image *image, % const ChannelType channel,const double angle,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel type. % % o angle: the angle of the rotational blur. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *RotationalBlurImage(const Image *image,const double angle, ExceptionInfo *exception) { Image *blur_image; blur_image=RotationalBlurImageChannel(image,DefaultChannels,angle,exception); return(blur_image); } MagickExport Image *RotationalBlurImageChannel(const Image *image, const ChannelType channel,const double angle,ExceptionInfo *exception) { CacheView *blur_view, *image_view; Image *blur_image; MagickBooleanType status; MagickOffsetType progress; MagickPixelPacket bias; MagickRealType blur_radius, *cos_theta, offset, *sin_theta, theta; PointInfo blur_center; register ssize_t i; size_t n; ssize_t y; /* Allocate blur image. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); #if defined(MAGICKCORE_OPENCL_SUPPORT) blur_image=AccelerateRadialBlurImage(image,channel,angle,exception); if (blur_image != (Image *) NULL) return(blur_image); #endif blur_image=CloneImage(image,0,0,MagickTrue,exception); if (blur_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(blur_image,DirectClass) == MagickFalse) { InheritException(exception,&blur_image->exception); blur_image=DestroyImage(blur_image); return((Image *) NULL); } blur_center.x=(double) (image->columns-1)/2.0; blur_center.y=(double) (image->rows-1)/2.0; blur_radius=hypot(blur_center.x,blur_center.y); n=(size_t) fabs(4.0*DegreesToRadians(angle)*sqrt((double) blur_radius)+2UL); theta=DegreesToRadians(angle)/(MagickRealType) (n-1); cos_theta=(MagickRealType *) AcquireQuantumMemory((size_t) n, sizeof(*cos_theta)); sin_theta=(MagickRealType *) AcquireQuantumMemory((size_t) n, sizeof(*sin_theta)); if ((cos_theta == (MagickRealType *) NULL) || (sin_theta == (MagickRealType *) NULL)) { if (cos_theta != (double *) NULL) cos_theta=(double *) RelinquishMagickMemory(cos_theta); if (sin_theta != (double *) NULL) sin_theta=(double *) RelinquishMagickMemory(sin_theta); blur_image=DestroyImage(blur_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } offset=theta*(MagickRealType) (n-1)/2.0; for (i=0; i < (ssize_t) n; i++) { cos_theta[i]=cos((double) (theta*i-offset)); sin_theta[i]=sin((double) (theta*i-offset)); } /* Radial blur image. */ status=MagickTrue; progress=0; GetMagickPixelPacket(image,&bias); image_view=AcquireVirtualCacheView(image,exception); blur_view=AcquireAuthenticCacheView(blur_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_number_threads(image,blur_image,blur_image->rows,1) #endif for (y=0; y < (ssize_t) blur_image->rows; y++) { register const IndexPacket *magick_restrict indexes; register IndexPacket *magick_restrict blur_indexes; register PixelPacket *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(blur_view,0,y,blur_image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } blur_indexes=GetCacheViewAuthenticIndexQueue(blur_view); for (x=0; x < (ssize_t) blur_image->columns; x++) { MagickPixelPacket qixel; MagickRealType normalize, radius; PixelPacket pixel; PointInfo center; register ssize_t i; size_t step; center.x=(double) x-blur_center.x; center.y=(double) y-blur_center.y; radius=hypot((double) center.x,center.y); if (radius == 0) step=1; else { step=(size_t) (blur_radius/radius); if (step == 0) step=1; else if (step >= n) step=n-1; } normalize=0.0; qixel=bias; if (((channel & OpacityChannel) == 0) || (image->matte == MagickFalse)) { for (i=0; i < (ssize_t) n; i+=(ssize_t) step) { (void) GetOneCacheViewVirtualPixel(image_view,(ssize_t) (blur_center.x+center.x*cos_theta[i]-center.y*sin_theta[i]+0.5), (ssize_t) (blur_center.y+center.x*sin_theta[i]+center.y* cos_theta[i]+0.5),&pixel,exception); qixel.red+=pixel.red; qixel.green+=pixel.green; qixel.blue+=pixel.blue; qixel.opacity+=pixel.opacity; if (image->colorspace == CMYKColorspace) { indexes=GetCacheViewVirtualIndexQueue(image_view); qixel.index+=(*indexes); } normalize+=1.0; } normalize=PerceptibleReciprocal(normalize); if ((channel & RedChannel) != 0) SetPixelRed(q,ClampToQuantum(normalize*qixel.red)); if ((channel & GreenChannel) != 0) SetPixelGreen(q,ClampToQuantum(normalize*qixel.green)); if ((channel & BlueChannel) != 0) SetPixelBlue(q,ClampToQuantum(normalize*qixel.blue)); if ((channel & OpacityChannel) != 0) SetPixelOpacity(q,ClampToQuantum(normalize*qixel.opacity)); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) SetPixelIndex(blur_indexes+x,ClampToQuantum(normalize*qixel.index)); } else { double alpha, gamma; alpha=1.0; gamma=0.0; for (i=0; i < (ssize_t) n; i+=(ssize_t) step) { (void) GetOneCacheViewVirtualPixel(image_view,(ssize_t) (blur_center.x+center.x*cos_theta[i]-center.y*sin_theta[i]+0.5), (ssize_t) (blur_center.y+center.x*sin_theta[i]+center.y* cos_theta[i]+0.5),&pixel,exception); alpha=(MagickRealType) (QuantumScale*GetPixelAlpha(&pixel)); qixel.red+=alpha*pixel.red; qixel.green+=alpha*pixel.green; qixel.blue+=alpha*pixel.blue; qixel.opacity+=pixel.opacity; if (image->colorspace == CMYKColorspace) { indexes=GetCacheViewVirtualIndexQueue(image_view); qixel.index+=alpha*(*indexes); } gamma+=alpha; normalize+=1.0; } gamma=PerceptibleReciprocal(gamma); normalize=PerceptibleReciprocal(normalize); if ((channel & RedChannel) != 0) SetPixelRed(q,ClampToQuantum(gamma*qixel.red)); if ((channel & GreenChannel) != 0) SetPixelGreen(q,ClampToQuantum(gamma*qixel.green)); if ((channel & BlueChannel) != 0) SetPixelBlue(q,ClampToQuantum(gamma*qixel.blue)); if ((channel & OpacityChannel) != 0) SetPixelOpacity(q,ClampToQuantum(normalize*qixel.opacity)); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) SetPixelIndex(blur_indexes+x,ClampToQuantum(gamma*qixel.index)); } q++; } if (SyncCacheViewAuthenticPixels(blur_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_RotationalBlurImageChannel) #endif proceed=SetImageProgress(image,BlurImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } blur_view=DestroyCacheView(blur_view); image_view=DestroyCacheView(image_view); cos_theta=(MagickRealType *) RelinquishMagickMemory(cos_theta); sin_theta=(MagickRealType *) RelinquishMagickMemory(sin_theta); if (status == MagickFalse) blur_image=DestroyImage(blur_image); return(blur_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e l e c t i v e B l u r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SelectiveBlurImage() selectively blur pixels within a contrast threshold. % It is similar to the unsharpen mask that sharpens everything with contrast % above a certain threshold. % % The format of the SelectiveBlurImage method is: % % Image *SelectiveBlurImage(const Image *image,const double radius, % const double sigma,const double threshold,ExceptionInfo *exception) % Image *SelectiveBlurImageChannel(const Image *image, % const ChannelType channel,const double radius,const double sigma, % const double threshold,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel type. % % o radius: the radius of the Gaussian, in pixels, not counting the center % pixel. % % o sigma: the standard deviation of the Gaussian, in pixels. % % o threshold: only pixels within this contrast threshold are included % in the blur operation. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *SelectiveBlurImage(const Image *image,const double radius, const double sigma,const double threshold,ExceptionInfo *exception) { Image *blur_image; blur_image=SelectiveBlurImageChannel(image,DefaultChannels,radius,sigma, threshold,exception); return(blur_image); } MagickExport Image *SelectiveBlurImageChannel(const Image *image, const ChannelType channel,const double radius,const double sigma, const double threshold,ExceptionInfo *exception) { #define SelectiveBlurImageTag "SelectiveBlur/Image" CacheView *blur_view, *image_view, *luminance_view; double *kernel; Image *blur_image, *luminance_image; MagickBooleanType status; MagickOffsetType progress; MagickPixelPacket bias; register ssize_t i; size_t width; ssize_t center, j, u, v, y; /* Initialize blur image attributes. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); width=GetOptimalKernelWidth1D(radius,sigma); kernel=(double *) MagickAssumeAligned(AcquireAlignedMemory((size_t) width, width*sizeof(*kernel))); if (kernel == (double *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); j=(ssize_t) (width-1)/2; i=0; for (v=(-j); v <= j; v++) { for (u=(-j); u <= j; u++) kernel[i++]=(double) (exp(-((double) u*u+v*v)/(2.0*MagickSigma* MagickSigma))/(2.0*MagickPI*MagickSigma*MagickSigma)); } if (image->debug != MagickFalse) { char format[MaxTextExtent], *message; register const double *k; ssize_t u, v; (void) LogMagickEvent(TransformEvent,GetMagickModule(), " SelectiveBlurImage with %.20gx%.20g kernel:",(double) width,(double) width); message=AcquireString(""); k=kernel; for (v=0; v < (ssize_t) width; v++) { *message='\0'; (void) FormatLocaleString(format,MaxTextExtent,"%.20g: ",(double) v); (void) ConcatenateString(&message,format); for (u=0; u < (ssize_t) width; u++) { (void) FormatLocaleString(format,MaxTextExtent,"%+f ",*k++); (void) ConcatenateString(&message,format); } (void) LogMagickEvent(TransformEvent,GetMagickModule(),"%s",message); } message=DestroyString(message); } blur_image=CloneImage(image,0,0,MagickTrue,exception); if (blur_image == (Image *) NULL) { kernel=(double *) RelinquishAlignedMemory(kernel); return((Image *) NULL); } if (SetImageStorageClass(blur_image,DirectClass) == MagickFalse) { kernel=(double *) RelinquishAlignedMemory(kernel); InheritException(exception,&blur_image->exception); blur_image=DestroyImage(blur_image); return((Image *) NULL); } luminance_image=CloneImage(image,0,0,MagickTrue,exception); if (luminance_image == (Image *) NULL) { kernel=(double *) RelinquishAlignedMemory(kernel); blur_image=DestroyImage(blur_image); return((Image *) NULL); } status=TransformImageColorspace(luminance_image,GRAYColorspace); if (status == MagickFalse) { InheritException(exception,&luminance_image->exception); kernel=(double *) RelinquishAlignedMemory(kernel); blur_image=DestroyImage(blur_image); luminance_image=DestroyImage(luminance_image); return((Image *) NULL); } /* Threshold blur image. */ status=MagickTrue; progress=0; center=(ssize_t) ((image->columns+width)*((width-1)/2L)+((width-1)/2L)); GetMagickPixelPacket(image,&bias); SetMagickPixelPacketBias(image,&bias); image_view=AcquireVirtualCacheView(image,exception); luminance_view=AcquireVirtualCacheView(luminance_image,exception); blur_view=AcquireAuthenticCacheView(blur_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_number_threads(image,blur_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { double gamma; MagickBooleanType sync; register const IndexPacket *magick_restrict indexes; register const PixelPacket *magick_restrict l, *magick_restrict p; register IndexPacket *magick_restrict blur_indexes; register PixelPacket *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,-((ssize_t) (width-1)/2L),y-(ssize_t) ((width-1)/2L),image->columns+width,width,exception); l=GetCacheViewVirtualPixels(luminance_view,-((ssize_t) (width-1)/2L),y- (ssize_t) ((width-1)/2L),luminance_image->columns+width,width,exception); q=GetCacheViewAuthenticPixels(blur_view,0,y,blur_image->columns,1, exception); if ((p == (const PixelPacket *) NULL) || (l == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } indexes=GetCacheViewVirtualIndexQueue(image_view); blur_indexes=GetCacheViewAuthenticIndexQueue(blur_view); for (x=0; x < (ssize_t) image->columns; x++) { double contrast; DoublePixelPacket pixel; MagickRealType intensity; register const double *magick_restrict k; register ssize_t u; ssize_t j, v; pixel.red=bias.red; pixel.green=bias.green; pixel.blue=bias.blue; pixel.opacity=bias.opacity; pixel.index=bias.index; k=kernel; intensity=GetPixelIntensity(image,p+center); gamma=0.0; j=0; if (((channel & OpacityChannel) == 0) || (image->matte == MagickFalse)) { for (v=0; v < (ssize_t) width; v++) { for (u=0; u < (ssize_t) width; u++) { contrast=GetPixelIntensity(luminance_image,l+u+j)-intensity; if (fabs(contrast) < threshold) { pixel.red+=(*k)*GetPixelRed(p+u+j); pixel.green+=(*k)*GetPixelGreen(p+u+j); pixel.blue+=(*k)*GetPixelBlue(p+u+j); gamma+=(*k); } k++; } j+=(ssize_t) (image->columns+width); } if (gamma != 0.0) { gamma=PerceptibleReciprocal(gamma); if ((channel & RedChannel) != 0) SetPixelRed(q,ClampToQuantum(gamma*pixel.red)); if ((channel & GreenChannel) != 0) SetPixelGreen(q,ClampToQuantum(gamma*pixel.green)); if ((channel & BlueChannel) != 0) SetPixelBlue(q,ClampToQuantum(gamma*pixel.blue)); } if ((channel & OpacityChannel) != 0) { gamma=0.0; j=0; for (v=0; v < (ssize_t) width; v++) { for (u=0; u < (ssize_t) width; u++) { contrast=GetPixelIntensity(luminance_image,l+u+j)-intensity; if (fabs(contrast) < threshold) { pixel.opacity+=(*k)*(p+u+j)->opacity; gamma+=(*k); } k++; } j+=(ssize_t) (image->columns+width); } gamma=PerceptibleReciprocal(gamma); SetPixelOpacity(q,ClampToQuantum(gamma*pixel.opacity)); } if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) { gamma=0.0; j=0; for (v=0; v < (ssize_t) width; v++) { for (u=0; u < (ssize_t) width; u++) { contrast=GetPixelIntensity(luminance_image,l+u+j)-intensity; if (fabs(contrast) < threshold) { pixel.index+=(*k)*GetPixelIndex(indexes+x+u+j); gamma+=(*k); } k++; } j+=(ssize_t) (image->columns+width); } gamma=PerceptibleReciprocal(gamma); SetPixelIndex(blur_indexes+x,ClampToQuantum(gamma*pixel.index)); } } else { MagickRealType alpha; for (v=0; v < (ssize_t) width; v++) { for (u=0; u < (ssize_t) width; u++) { contrast=GetPixelIntensity(luminance_image,l+u+j)-intensity; if (fabs(contrast) < threshold) { alpha=(MagickRealType) (QuantumScale*GetPixelAlpha(p+u+j)); pixel.red+=(*k)*alpha*GetPixelRed(p+u+j); pixel.green+=(*k)*alpha*GetPixelGreen(p+u+j); pixel.blue+=(*k)*alpha*GetPixelBlue(p+u+j); pixel.opacity+=(*k)*GetPixelOpacity(p+u+j); gamma+=(*k)*alpha; } k++; } j+=(ssize_t) (image->columns+width); } if (gamma != 0.0) { gamma=PerceptibleReciprocal(gamma); if ((channel & RedChannel) != 0) SetPixelRed(q,ClampToQuantum(gamma*pixel.red)); if ((channel & GreenChannel) != 0) SetPixelGreen(q,ClampToQuantum(gamma*pixel.green)); if ((channel & BlueChannel) != 0) SetPixelBlue(q,ClampToQuantum(gamma*pixel.blue)); } if ((channel & OpacityChannel) != 0) { j=0; for (v=0; v < (ssize_t) width; v++) { for (u=0; u < (ssize_t) width; u++) { contrast=GetPixelIntensity(luminance_image,l+u+j)-intensity; if (fabs(contrast) < threshold) pixel.opacity+=(*k)*GetPixelOpacity(p+u+j); k++; } j+=(ssize_t) (image->columns+width); } SetPixelOpacity(q,ClampToQuantum(pixel.opacity)); } if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) { gamma=0.0; j=0; for (v=0; v < (ssize_t) width; v++) { for (u=0; u < (ssize_t) width; u++) { contrast=GetPixelIntensity(luminance_image,l+u+j)-intensity; if (fabs(contrast) < threshold) { alpha=(MagickRealType) (QuantumScale* GetPixelAlpha(p+u+j)); pixel.index+=(*k)*alpha*GetPixelIndex(indexes+x+u+j); gamma+=(*k); } k++; } j+=(ssize_t) (image->columns+width); } gamma=PerceptibleReciprocal(gamma); SetPixelIndex(blur_indexes+x,ClampToQuantum(gamma*pixel.index)); } } p++; l++; q++; } sync=SyncCacheViewAuthenticPixels(blur_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_SelectiveBlurImageChannel) #endif proceed=SetImageProgress(image,SelectiveBlurImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } blur_image->type=image->type; blur_view=DestroyCacheView(blur_view); luminance_view=DestroyCacheView(luminance_view); image_view=DestroyCacheView(image_view); luminance_image=DestroyImage(luminance_image); kernel=(double *) RelinquishAlignedMemory(kernel); if (status == MagickFalse) blur_image=DestroyImage(blur_image); return(blur_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S h a d e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ShadeImage() shines a distant light on an image to create a % three-dimensional effect. You control the positioning of the light with % azimuth and elevation; azimuth is measured in degrees off the x axis % and elevation is measured in pixels above the Z axis. % % The format of the ShadeImage method is: % % Image *ShadeImage(const Image *image,const MagickBooleanType gray, % const double azimuth,const double elevation,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o gray: A value other than zero shades the intensity of each pixel. % % o azimuth, elevation: Define the light source direction. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ShadeImage(const Image *image,const MagickBooleanType gray, const double azimuth,const double elevation,ExceptionInfo *exception) { #define ShadeImageTag "Shade/Image" CacheView *image_view, *shade_view; Image *linear_image, *shade_image; MagickBooleanType status; MagickOffsetType progress; PrimaryInfo light; ssize_t y; /* Initialize shaded image attributes. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); linear_image=CloneImage(image,0,0,MagickTrue,exception); shade_image=CloneImage(image,image->columns,image->rows,MagickTrue,exception); if ((linear_image == (Image *) NULL) || (shade_image == (Image *) NULL)) { if (linear_image != (Image *) NULL) linear_image=DestroyImage(linear_image); if (shade_image != (Image *) NULL) shade_image=DestroyImage(shade_image); return((Image *) NULL); } if (SetImageStorageClass(shade_image,DirectClass) == MagickFalse) { InheritException(exception,&shade_image->exception); linear_image=DestroyImage(linear_image); shade_image=DestroyImage(shade_image); return((Image *) NULL); } /* Compute the light vector. */ light.x=(double) QuantumRange*cos(DegreesToRadians(azimuth))* cos(DegreesToRadians(elevation)); light.y=(double) QuantumRange*sin(DegreesToRadians(azimuth))* cos(DegreesToRadians(elevation)); light.z=(double) QuantumRange*sin(DegreesToRadians(elevation)); /* Shade image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(linear_image,exception); shade_view=AcquireAuthenticCacheView(shade_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_number_threads(linear_image,shade_image,linear_image->rows,1) #endif for (y=0; y < (ssize_t) linear_image->rows; y++) { MagickRealType distance, normal_distance, shade; PrimaryInfo normal; register const PixelPacket *magick_restrict p, *magick_restrict s0, *magick_restrict s1, *magick_restrict s2; register PixelPacket *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,-1,y-1,linear_image->columns+2,3, exception); q=QueueCacheViewAuthenticPixels(shade_view,0,y,shade_image->columns,1, exception); if ((p == (PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } /* Shade this row of pixels. */ normal.z=2.0*(double) QuantumRange; /* constant Z of surface normal */ for (x=0; x < (ssize_t) linear_image->columns; x++) { /* Determine the surface normal and compute shading. */ s0=p+1; s1=s0+image->columns+2; s2=s1+image->columns+2; normal.x=(double) (GetPixelIntensity(linear_image,s0-1)+ GetPixelIntensity(linear_image,s1-1)+ GetPixelIntensity(linear_image,s2-1)- GetPixelIntensity(linear_image,s0+1)- GetPixelIntensity(linear_image,s1+1)- GetPixelIntensity(linear_image,s2+1)); normal.y=(double) (GetPixelIntensity(linear_image,s2-1)+ GetPixelIntensity(linear_image,s2)+ GetPixelIntensity(linear_image,s2+1)- GetPixelIntensity(linear_image,s0-1)- GetPixelIntensity(linear_image,s0)- GetPixelIntensity(linear_image,s0+1)); if ((fabs(normal.x) <= MagickEpsilon) && (fabs(normal.y) <= MagickEpsilon)) shade=light.z; else { shade=0.0; distance=normal.x*light.x+normal.y*light.y+normal.z*light.z; if (distance > MagickEpsilon) { normal_distance=normal.x*normal.x+normal.y*normal.y+normal.z* normal.z; if (normal_distance > (MagickEpsilon*MagickEpsilon)) shade=distance/sqrt((double) normal_distance); } } if (gray != MagickFalse) { SetPixelRed(q,shade); SetPixelGreen(q,shade); SetPixelBlue(q,shade); } else { SetPixelRed(q,ClampToQuantum(QuantumScale*shade*GetPixelRed(s1))); SetPixelGreen(q,ClampToQuantum(QuantumScale*shade*GetPixelGreen(s1))); SetPixelBlue(q,ClampToQuantum(QuantumScale*shade*GetPixelBlue(s1))); } q->opacity=s1->opacity; p++; q++; } if (SyncCacheViewAuthenticPixels(shade_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_ShadeImage) #endif proceed=SetImageProgress(image,ShadeImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } shade_view=DestroyCacheView(shade_view); image_view=DestroyCacheView(image_view); linear_image=DestroyImage(linear_image); if (status == MagickFalse) shade_image=DestroyImage(shade_image); return(shade_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S h a r p e n I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SharpenImage() sharpens the image. We convolve the image with a Gaussian % operator of the given radius and standard deviation (sigma). For % reasonable results, radius should be larger than sigma. Use a radius of 0 % and SharpenImage() selects a suitable radius for you. % % Using a separable kernel would be faster, but the negative weights cancel % out on the corners of the kernel producing often undesirable ringing in the % filtered result; this can be avoided by using a 2D gaussian shaped image % sharpening kernel instead. % % The format of the SharpenImage method is: % % Image *SharpenImage(const Image *image,const double radius, % const double sigma,ExceptionInfo *exception) % Image *SharpenImageChannel(const Image *image,const ChannelType channel, % const double radius,const double sigma,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel type. % % o radius: the radius of the Gaussian, in pixels, not counting the center % pixel. % % o sigma: the standard deviation of the Laplacian, in pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *SharpenImage(const Image *image,const double radius, const double sigma,ExceptionInfo *exception) { Image *sharp_image; sharp_image=SharpenImageChannel(image,DefaultChannels,radius,sigma,exception); return(sharp_image); } MagickExport Image *SharpenImageChannel(const Image *image, const ChannelType channel,const double radius,const double sigma, ExceptionInfo *exception) { double gamma, normalize; Image *sharp_image; KernelInfo *kernel_info; register ssize_t i; size_t width; ssize_t j, u, v; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); width=GetOptimalKernelWidth2D(radius,sigma); kernel_info=AcquireKernelInfo((const char *) NULL); if (kernel_info == (KernelInfo *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); (void) ResetMagickMemory(kernel_info,0,sizeof(*kernel_info)); kernel_info->width=width; kernel_info->height=width; kernel_info->x=(ssize_t) (width-1)/2; kernel_info->y=(ssize_t) (width-1)/2; kernel_info->signature=MagickCoreSignature; kernel_info->values=(double *) MagickAssumeAligned(AcquireAlignedMemory( kernel_info->width,kernel_info->height*sizeof(*kernel_info->values))); if (kernel_info->values == (double *) NULL) { kernel_info=DestroyKernelInfo(kernel_info); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } normalize=0.0; j=(ssize_t) (kernel_info->width-1)/2; i=0; for (v=(-j); v <= j; v++) { for (u=(-j); u <= j; u++) { kernel_info->values[i]=(double) (-exp(-((double) u*u+v*v)/(2.0* MagickSigma*MagickSigma))/(2.0*MagickPI*MagickSigma*MagickSigma)); normalize+=kernel_info->values[i]; i++; } } kernel_info->values[i/2]=(double) ((-2.0)*normalize); normalize=0.0; for (i=0; i < (ssize_t) (kernel_info->width*kernel_info->height); i++) normalize+=kernel_info->values[i]; gamma=PerceptibleReciprocal(normalize); for (i=0; i < (ssize_t) (kernel_info->width*kernel_info->height); i++) kernel_info->values[i]*=gamma; sharp_image=MorphologyImageChannel(image,channel,ConvolveMorphology,1, kernel_info,exception); kernel_info=DestroyKernelInfo(kernel_info); return(sharp_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S p r e a d I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SpreadImage() is a special effects method that randomly displaces each % pixel in a block defined by the radius parameter. % % The format of the SpreadImage method is: % % Image *SpreadImage(const Image *image,const double radius, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: Choose a random pixel in a neighborhood of this extent. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *SpreadImage(const Image *image,const double radius, ExceptionInfo *exception) { #define SpreadImageTag "Spread/Image" CacheView *image_view, *spread_view; Image *spread_image; MagickBooleanType status; MagickOffsetType progress; MagickPixelPacket bias; RandomInfo **magick_restrict random_info; size_t width; ssize_t y; #if defined(MAGICKCORE_OPENMP_SUPPORT) unsigned long key; #endif /* Initialize spread image attributes. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); spread_image=CloneImage(image,image->columns,image->rows,MagickTrue, exception); if (spread_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(spread_image,DirectClass) == MagickFalse) { InheritException(exception,&spread_image->exception); spread_image=DestroyImage(spread_image); return((Image *) NULL); } /* Spread image. */ status=MagickTrue; progress=0; GetMagickPixelPacket(spread_image,&bias); width=GetOptimalKernelWidth1D(radius,0.5); random_info=AcquireRandomInfoThreadSet(); image_view=AcquireVirtualCacheView(image,exception); spread_view=AcquireAuthenticCacheView(spread_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) key=GetRandomSecretKey(random_info[0]); #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_number_threads(image,spread_image,spread_image->rows,key == ~0UL) #endif for (y=0; y < (ssize_t) spread_image->rows; y++) { const int id = GetOpenMPThreadId(); MagickPixelPacket pixel; register IndexPacket *magick_restrict indexes; register PixelPacket *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=QueueCacheViewAuthenticPixels(spread_view,0,y,spread_image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(spread_view); pixel=bias; for (x=0; x < (ssize_t) spread_image->columns; x++) { PointInfo point; point.x=GetPseudoRandomValue(random_info[id]); point.y=GetPseudoRandomValue(random_info[id]); (void) InterpolateMagickPixelPacket(image,image_view,image->interpolate, (double) x+width*(point.x-0.5),(double) y+width*(point.y-0.5),&pixel, exception); SetPixelPacket(spread_image,&pixel,q,indexes+x); q++; } if (SyncCacheViewAuthenticPixels(spread_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_SpreadImage) #endif proceed=SetImageProgress(image,SpreadImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } spread_view=DestroyCacheView(spread_view); image_view=DestroyCacheView(image_view); random_info=DestroyRandomInfoThreadSet(random_info); if (status == MagickFalse) spread_image=DestroyImage(spread_image); return(spread_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % U n s h a r p M a s k I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % UnsharpMaskImage() sharpens one or more image channels. We convolve the % image with a Gaussian operator of the given radius and standard deviation % (sigma). For reasonable results, radius should be larger than sigma. Use a % radius of 0 and UnsharpMaskImage() selects a suitable radius for you. % % The format of the UnsharpMaskImage method is: % % Image *UnsharpMaskImage(const Image *image,const double radius, % const double sigma,const double amount,const double threshold, % ExceptionInfo *exception) % Image *UnsharpMaskImageChannel(const Image *image, % const ChannelType channel,const double radius,const double sigma, % const double gain,const double threshold,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel type. % % o radius: the radius of the Gaussian, in pixels, not counting the center % pixel. % % o sigma: the standard deviation of the Gaussian, in pixels. % % o gain: the percentage of the difference between the original and the % blur image that is added back into the original. % % o threshold: the threshold in pixels needed to apply the diffence gain. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *UnsharpMaskImage(const Image *image,const double radius, const double sigma,const double gain,const double threshold, ExceptionInfo *exception) { Image *sharp_image; sharp_image=UnsharpMaskImageChannel(image,DefaultChannels,radius,sigma,gain, threshold,exception); return(sharp_image); } MagickExport Image *UnsharpMaskImageChannel(const Image *image, const ChannelType channel,const double radius,const double sigma, const double gain,const double threshold,ExceptionInfo *exception) { #define SharpenImageTag "Sharpen/Image" CacheView *image_view, *unsharp_view; Image *unsharp_image; MagickBooleanType status; MagickOffsetType progress; MagickPixelPacket bias; MagickRealType quantum_threshold; ssize_t y; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); #if defined(MAGICKCORE_OPENCL_SUPPORT) unsharp_image=AccelerateUnsharpMaskImage(image,channel,radius,sigma,gain, threshold,exception); if (unsharp_image != (Image *) NULL) return(unsharp_image); #endif unsharp_image=BlurImageChannel(image,(ChannelType) (channel &~ SyncChannels), radius,sigma,exception); if (unsharp_image == (Image *) NULL) return((Image *) NULL); quantum_threshold=(MagickRealType) QuantumRange*threshold; /* Unsharp-mask image. */ status=MagickTrue; progress=0; GetMagickPixelPacket(image,&bias); image_view=AcquireVirtualCacheView(image,exception); unsharp_view=AcquireAuthenticCacheView(unsharp_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_number_threads(image,unsharp_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { DoublePixelPacket pixel; register const IndexPacket *magick_restrict indexes; register const PixelPacket *magick_restrict p; register IndexPacket *magick_restrict unsharp_indexes; register PixelPacket *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=GetCacheViewAuthenticPixels(unsharp_view,0,y,unsharp_image->columns,1, exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } indexes=GetCacheViewVirtualIndexQueue(image_view); unsharp_indexes=GetCacheViewAuthenticIndexQueue(unsharp_view); pixel.red=bias.red; pixel.green=bias.green; pixel.blue=bias.blue; pixel.opacity=bias.opacity; pixel.index=bias.index; for (x=0; x < (ssize_t) image->columns; x++) { if ((channel & RedChannel) != 0) { pixel.red=GetPixelRed(p)-(MagickRealType) GetPixelRed(q); if (fabs(2.0*pixel.red) < quantum_threshold) pixel.red=(MagickRealType) GetPixelRed(p); else pixel.red=(MagickRealType) GetPixelRed(p)+(pixel.red*gain); SetPixelRed(q,ClampToQuantum(pixel.red)); } if ((channel & GreenChannel) != 0) { pixel.green=GetPixelGreen(p)-(MagickRealType) q->green; if (fabs(2.0*pixel.green) < quantum_threshold) pixel.green=(MagickRealType) GetPixelGreen(p); else pixel.green=(MagickRealType) GetPixelGreen(p)+(pixel.green*gain); SetPixelGreen(q,ClampToQuantum(pixel.green)); } if ((channel & BlueChannel) != 0) { pixel.blue=GetPixelBlue(p)-(MagickRealType) q->blue; if (fabs(2.0*pixel.blue) < quantum_threshold) pixel.blue=(MagickRealType) GetPixelBlue(p); else pixel.blue=(MagickRealType) GetPixelBlue(p)+(pixel.blue*gain); SetPixelBlue(q,ClampToQuantum(pixel.blue)); } if ((channel & OpacityChannel) != 0) { pixel.opacity=GetPixelOpacity(p)-(MagickRealType) q->opacity; if (fabs(2.0*pixel.opacity) < quantum_threshold) pixel.opacity=(MagickRealType) GetPixelOpacity(p); else pixel.opacity=GetPixelOpacity(p)+(pixel.opacity*gain); SetPixelOpacity(q,ClampToQuantum(pixel.opacity)); } if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) { pixel.index=GetPixelIndex(indexes+x)-(MagickRealType) GetPixelIndex(unsharp_indexes+x); if (fabs(2.0*pixel.index) < quantum_threshold) pixel.index=(MagickRealType) GetPixelIndex(indexes+x); else pixel.index=(MagickRealType) GetPixelIndex(indexes+x)+ (pixel.index*gain); SetPixelIndex(unsharp_indexes+x,ClampToQuantum(pixel.index)); } p++; q++; } if (SyncCacheViewAuthenticPixels(unsharp_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_UnsharpMaskImageChannel) #endif proceed=SetImageProgress(image,SharpenImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } unsharp_image->type=image->type; unsharp_view=DestroyCacheView(unsharp_view); image_view=DestroyCacheView(image_view); if (status == MagickFalse) unsharp_image=DestroyImage(unsharp_image); return(unsharp_image); }
matrix.h
void EigenJacobi (long double ** h, long double ** a, long double * e, int n, long double accuracy=1e-10, int max_sweep=100) { int flag; long double ** v=new long double * [n]; {for (int i=0; i<n; i++) v[i]=new long double [n];} {for (int i=0; i<n; i++) for (int j=0; j<n; j++) {a[i][j]=h[i][j]; if (i==j) v[i][j]=1; else v[i][j]=0;};} do { max_sweep--; flag=0; for (int pp=n-1; pp>=1; pp--) for (int q=0; q<n-pp; q++) { int p=pp+q; if (fabs(a[p][q])>accuracy) { flag=1; long double theta=(a[q][q]-a[p][p])/(2*a[p][q]); long double t=1/(abs(theta)+sqrt(theta*theta+1));if (theta<0) t=-t; long double c=1/sqrt(t*t+1), s=t*c, tau=s/(1+c); for (int r=0; r<n; r++) { if ((r!=p) && (r!=q)) { long double a1=a[r][p]-s*(a[r][q]+tau*a[r][p]); long double a2=a[r][q]+s*(a[r][p]-tau*a[r][q]); a[r][p]=a1; a[p][r]=a1; a[r][q]=a2; a[q][r]=a2; ;} long double v1=c*v[p][r]-s*v[q][r]; long double v2=s*v[p][r]+c*v[q][r]; v[p][r]=v1; v[q][r]=v2; ;} long double ap=a[p][p]-t*a[p][q], aq=a[q][q]+t*a[p][q]; a[p][p]=ap; a[q][q]=aq; a[p][q]=0; a[q][p]=0; ;} ;} ;} while( (flag>0) && (max_sweep>0) ); for (int i=0; i<n; i++) e[i]=a[i][i]; /* for (int i=0; i<n-1; i++) for (int j=i+1; j<n; j++) if (e[j]<e[i]) { long double a=e[j]; e[j]=e[i]; e[i]=a; long double * b=v[j]; v[j]=v[i];v[i]=b; ;} */ {for (int i=0; i<n; i++) for (int j=0; j<n; j++) a[i][j]=v[i][j];} for (int i=0; i<n; i++) //normalization { long double s=0; for (int j=0; j<n; j++) s+=a[i][j]*a[i][j]; s=sqrt(s); if (fabs(s)>accuracy*accuracy) {for (int j=0; j<n; j++) a[i][j]/=s;} else {for (int j=0; j<n; j++) a[i][j]=1/sqrt(n);} ;} {for (int i=0; i<n; i++) delete [] v[i]; delete [] v;} ;} void EigenJacobi (complex ** h, complex ** a, long double * e, int n, long double accuracy=1e-10, int max_sweep=100) { static complex ** v=new_complex2(n,n); static int nn=n; if (n>nn) {delete_complex2(v,nn,nn); v=new_complex2(n,n); nn=n;} {for (int i=0; i<n; i++) for (int j=0; j<n; j++) {a[i][j]=h[i][j]; if (i==j) v[i][j]=1; else v[i][j]=0;};} for1(sweep, max_sweep) { int f=0; for1(p,n) for1(q,p) // for (int p=n-1; p>0; p--) // for (int q=0; q<n-p; q++) if (abs(a[p][q])>accuracy) { complex z=sqrt(I*conj(a[p][q])/abs(a[p][q])), x1,x2; for1(j,n) {x1=a[p][j]*z/sqrt(2.l); x2=a[q][j]*conj(z)/sqrt(2.l);a[p][j]=x1-x2; a[q][j]=x1+x2;} // for1(j,n) {x1=v[p][j]*z/sqrt(2); x2=v[q][j]*conj(z)/sqrt(2);v[p][j]=x1-x2; v[q][j]=x1+x2;} for1(j,n) {x1=a[j][p]*conj(z)/sqrt(2.l); x2=a[j][q]*z/sqrt(2.l);a[j][p]=x1-x2; a[j][q]=x1+x2;} for1(j,n) {x1=v[j][p]*conj(z)/sqrt(2.l); x2=v[j][q]*z/sqrt(2.l);v[j][p]=x1-x2; v[j][q]=x1+x2;} z=sqrt(conj(a[p][q])/abs(a[p][q])); for1(j,n) {x1=a[p][j]*z/sqrt(2.l); x2=a[q][j]*conj(z)/sqrt(2.l);a[p][j]=x1-x2; a[q][j]=x1+x2;} // for1(j,n) {x1=v[p][j]*z/sqrt(2); x2=v[q][j]*conj(z)/sqrt(2);v[p][j]=x1-x2; v[q][j]=x1+x2;} for1(j,n) {x1=a[j][p]*conj(z)/sqrt(2.l); x2=a[j][q]*z/sqrt(2.l);a[j][p]=x1-x2; a[j][q]=x1+x2;} for1(j,n) {x1=v[j][p]*conj(z)/sqrt(2.l); x2=v[j][q]*z/sqrt(2.l);v[j][p]=x1-x2; v[j][q]=x1+x2;} f=1; } if (f==0) break; } for1 (i,n) e[i]=real(a[i][i]); for2(i,j,n) a[j][i]=v[i][j]; for (int i=0; i<n; i++) //normalization { complex s=0; for (int j=0; j<n; j++) s+=norm2(a[i][j]); s=sqrt(s); //if (abs(a[i][0])!=0) s*=(a[i][0])/abs(a[i][0]); if (abs(s)>accuracy*accuracy) {for (int j=0; j<n; j++) a[i][j]/=s;} else {for (int j=0; j<n; j++) a[i][j]=1.l/sqrt(n);} ;} } void rotate(long double ** &a, long double ** & v, int size) { long double ** b=new long double * [size]; {for (int i=0; i<size; i++) b[i]=new long double [size];} { for (int i=0;i<size;i++) for (int j=0;j<size;j++) { b[i][j]=0; for (int k=0; k<size; k++) b[i][j]+=a[i][k]*v[j][k]; //check index order!!! ;} ;} { for (int i=0;i<size;i++) for (int j=0;j<size;j++) { a[i][j]=0; for (int k=0; k<size; k++) a[i][j]+=v[i][k]*b[k][j]; //check index order!!! ;} ;} {for (int i=0; i<size; i++) delete [] b[i];} delete [] b; ;} void mult_left(complex ** a, complex ** r, int size) //a=r*a { static int old_size=0; static complex ** y=NULL; if (size>old_size) { for (int i=0; i<old_size; i++) delete [] y[i]; delete [] y; y=new complex * [size]; for (int i=0; i<size; i++) y[i]=new complex [size]; old_size=size; } for (int i=0; i<size; i++) for (int j=0; j<size; j++) y[i][j]=0; #pragma omp parallel for for (int i=0; i<size; i++) for (int k=0; k<size; k++) if (abs(r[i][k])>1e-15) for (int j=0; j<size; j++) y[i][j]+=r[i][k]*a[k][j]; for (int i=0; i<size; i++) for (int j=0; j<size; j++) a[i][j]=y[i][j]; } void mult_right(complex ** a, complex ** r, int size) //a=a*r { static int old_size=0; static complex ** y=NULL; if (size>old_size) { for (int i=0; i<old_size; i++) delete [] y[i]; delete [] y; y=new complex * [size]; for (int i=0; i<size; i++) y[i]=new complex [size]; old_size=size; } for (int i=0; i<size; i++) for (int j=0; j<size; j++) y[i][j]=0; #pragma omp parallel for for (int j=0; j<size; j++) for (int k=0; k<size; k++) if (abs(r[k][j])>1e-15) for (int i=0; i<size; i++) y[i][j]+=a[i][k]*r[k][j]; for (int i=0; i<size; i++) for (int j=0; j<size; j++) a[i][j]=y[i][j]; } #define c_number long double void Inverse(c_number ** &a, int size) //Gauss with partial pivoting { c_number ** r=new c_number * [size]; { for (int i=0;i<size;i++) { r[i]=new c_number [size]; for (int j=0; j<size; j++) r[i][j]=0.; r[i][i]=1.; ;} ;} for (int i=0; i<size; i++) { //pivoting { long double fmax=-1.; int jmax; for (int j=i; j<size; j++) if ( fmax<norm2(a[j][i]) ) {jmax=j; fmax=norm2(a[j][i]);} c_number * aux; aux=a[i]; a[i]=a[jmax]; a[jmax]=aux; aux=r[i]; r[i]=r[jmax]; r[jmax]=aux; if (norm2(a[i][i])<=0.) {cout<<"LInverse!"; return;} ;} //main body #pragma omp parallel for for (int j=0; j<i; j++) { c_number f=a[j][i]/a[i][i];a[j][i]=0; { for (int l=i+1; l<size; l++) a[j][l]-=a[i][l]*f; for (int l=0; l<size; l++) r[j][l]-=r[i][l]*f; } ;} { c_number f=1.l/a[i][i]; {for (int l=i; l<size; l++) a[i][l]*=f;} {for (int l=0; l<size; l++) r[i][l]*=f;} ;} #pragma omp parallel for for (int j=i+1; j<size; j++) { c_number f=a[j][i]/a[i][i]; a[j][i]=0; { for (int l=i+1; l<size; l++) a[j][l]-=a[i][l]*f; for (int l=0; l<size; l++) r[j][l]-=r[i][l]*f; } ;} ;} {for (int i=0; i<size; i++) for (int j=0; j<size; j++) a[i][j]=r[i][j];} {for (int i=0; i<size; i++) delete [] r[i]; delete [] r;} ;} void Inverse(complex ** &a, int size) //Gauss with partial pivoting { complex ** r=new complex * [size]; { for (int i=0;i<size;i++) { r[i]=new complex [size]; for (int j=0; j<size; j++) r[i][j]=0.; r[i][i]=1.; ;} ;} for (int i=0; i<size; i++) { //pivoting { long double fmax=-1.; int jmax; for (int j=i; j<size; j++) if ( fmax<norm2(a[j][i]) ) {jmax=j; fmax=norm2(a[j][i]);} complex * aux; aux=a[i]; a[i]=a[jmax]; a[jmax]=aux; aux=r[i]; r[i]=r[jmax]; r[jmax]=aux; if (norm2(a[i][i])<=0.) {cout<<"LInverse!"; return;} ;} //main body #pragma omp parallel for for (int j=0; j<i; j++) { complex f=a[j][i]/a[i][i];a[j][i]=0; { for (int l=i+1; l<size; l++) a[j][l]-=a[i][l]*f; for (int l=0; l<size; l++) r[j][l]-=r[i][l]*f; } ;} { complex f=1.l/a[i][i]; {for (int l=i; l<size; l++) a[i][l]*=f;} {for (int l=0; l<size; l++) r[i][l]*=f;} ;} #pragma omp parallel for for (int j=i+1; j<size; j++) { complex f=a[j][i]/a[i][i]; a[j][i]=0; { for (int l=i+1; l<size; l++) a[j][l]-=a[i][l]*f; for (int l=0; l<size; l++) r[j][l]-=r[i][l]*f; } ;} ;} {for (int i=0; i<size; i++) for (int j=0; j<size; j++) a[i][j]=r[i][j];} {for (int i=0; i<size; i++) delete [] r[i]; delete [] r;} ;} void Fourier(complex * x, complex * y, int size, int dir) //dir==-1 => r to k; dir==1 => k to r { long double r=dir*2*Pi/size; for (int j=0; j<size; j++) {y[j]=0; for (int l=0; l<size; l++) y[j]+=x[l]*exp(I*(r*j*l));} if (dir==1) for (int i=0; i<size; i++) y[i]/=1.l*size; ;} void Fourier(complex * x, int size, int dir) //dir==-1 => r to k; dir==1 => k to r { static complex * y=NULL; static int old_size=0; if (size>old_size) {delete[]y; y=new complex [size];old_size=size;} Fourier(x,y, size, dir); for (int i=0; i<size; i++) x[i]=y[i]; } void kk_elements (complex ** x, complex *y, int size) { long double r=2*Pi/size; for (int k=0; k<size; k++) { y[k]=0; for (int i=0; i<size; i++) for (int j=0; j<size; j++) y[k]+=x[i][j]*exp(I*(r*(i-j))); } for (int k=0; k<size; k++) y[k]/=1.l*size; } void Fourier (complex ** x, int size, int dir) { static int old_size=0; static complex ** y=NULL; if (size>old_size) { for (int i=0; i<old_size; i++) delete [] y[i]; delete [] y; y=new complex * [size]; for (int i=0; i<size; i++) y[i]=new complex [size]; old_size=size; } //long double s; for (int i=0; i<size; i++) for (int j=0; j<size; j++) s+=abs(sqr(x[i][j])); long double r=dir*2*Pi/size; for (int i=0; i<size; i++) for (int j=0; j<size; j++) y[i][j]=exp(I*(r*i*j)); mult_left(x,y,size); for (int i=0; i<size; i++) for (int j=0; j<size; j++) y[j][i]=exp(-I*(r*i*j)); mult_right(x,y,size); for (int i=0; i<size; i++) for (int j=0; j<size; j++) x[i][j]/=(1.l*size); // long double s2; for (int i=0; i<size; i++) for (int j=0; j<size; j++) s2+=abs(sqr(x[i][j])); cout<<"\n"<<s<<"="<<s2<<"\n"; } complex det(complex ** & b, int N) { complex s=1; for (int i=0; i<N-1; i++) { //pivoting { long double fmax=0; int jmax=i; for (int j=i; j<N; j++) if ( fmax<abs(b[j][i]) ) {jmax=j; fmax=abs(b[j][i]);} if (jmax!=i) { complex * aux; aux=b[i]; b[i]=b[jmax]; b[jmax]=aux; s=-s; ;} ;} //main body if (norm2(b[i][i])<=0) return 0; for (int j=i+1; j<N; j++) { complex f=b[j][i]/b[i][i]; for (int k=i; k<N; k++) b[j][k]-=f*b[i][k]; ;} ;} if (abs(b[N-1][N-1])<=0) return 0; {for (int i=0; i<N; i++) s*=b[i][i];} return s; ;} long double det(long double ** & b, int N) { long double s=1; for (int i=0; i<N-1; i++) { //pivoting { long double fmax=0; int jmax=i; for (int j=i; j<N; j++) if ( fmax<abs(b[j][i]) ) {jmax=j; fmax=abs(b[j][i]);} if (jmax!=i) { long double * aux; aux=b[i]; b[i]=b[jmax]; b[jmax]=aux; s=-s; ;} ;} //main body if (abs(b[i][i])<=0) return 0; for (int j=i+1; j<N; j++) { long double f=b[j][i]/b[i][i]; for (int k=i; k<N; k++) b[j][k]-=f*b[i][k]; ;} ;} if (abs(b[N-1][N-1])<=0) return 0; {for (int i=0; i<N; i++) s*=b[i][i];} return s; ;}
DCSCTile.h
/****************************************************************************** * ** Copyright (c) 2016, Intel Corporation ** * ** All rights reserved. ** * ** ** * ** Redistribution and use in source and binary forms, with or without ** * ** modification, are permitted provided that the following conditions ** * ** are met: ** * ** 1. Redistributions of source code must retain the above copyright ** * ** notice, this list of conditions and the following disclaimer. ** * ** 2. Redistributions in binary form must reproduce the above copyright ** * ** notice, this list of conditions and the following disclaimer in the ** * ** documentation and/or other materials provided with the distribution. ** * ** 3. Neither the name of the copyright holder nor the names of its ** * ** contributors may be used to endorse or promote products derived ** * ** from this software without specific prior written permission. ** * ** ** * ** THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ** * ** "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ** * ** LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ** * ** A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ** * ** HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ** * ** SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED ** * ** TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR ** * ** PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF ** * ** LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING ** * ** NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ** * ** SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * ******************************************************************************/ /* Michael Anderson (Intel Corp.) * * ******************************************************************************/ #ifndef SRC_DCSCTILE_H_ #define SRC_DCSCTILE_H_ #include <string> #include <algorithm> #include <vector> template <typename T> bool compare_dcsc(const tedge_t<T> &a, const tedge_t<T> &b) { if (a.tile_id < b.tile_id) return true; else if (a.tile_id > b.tile_id) return false; if (a.dst < b.dst) return true; else if (a.dst > b.dst) return false; if (a.src < b.src) return true; else if (a.src > b.src) return false; return false; } template <typename T> class DCSCTile { public: int nnz; std::string name; int m; int n; int num_cols; int num_partitions; T *vals; // nnz int *row_inds; // nnz int *row_pointers; // num_partitions + 1 int *edge_pointers; // num_partitions + 1 int *col_starts; // num_partitions + 1 int *col_ptrs; // col_starts[num_partitions] int *col_indices; // col_starts[num_partitions] // Serialize friend boost::serialization::access; template<class Archive> void save(Archive& ar, const unsigned int version) const { ar & nnz; ar & name; ar & m; ar & n; ar & num_cols; ar & num_partitions; if(nnz > 0) { for(int i = 0 ; i < nnz ; i++) { ar & vals[i]; } for(int i = 0 ; i < nnz ; i++) { ar & row_inds[i]; } for(int i = 0 ; i < num_partitions+1 ; i++) { ar & row_pointers[i]; } for(int i = 0 ; i < num_partitions+1 ; i++) { ar & edge_pointers[i]; } for(int i = 0 ; i < num_partitions+1 ; i++) { ar & col_starts[i]; } for(int i = 0 ; i < col_starts[num_partitions] ; i++) { ar & col_ptrs[i]; } for(int i = 0 ; i < col_starts[num_partitions] ; i++) { ar & col_indices[i]; } } } template<class Archive> void load(Archive& ar, const unsigned int version) { ar & nnz; ar & name; ar & m; ar & n; ar & num_cols; ar & num_partitions; std::cout << "Got nnz: " << nnz << std::endl; if(nnz > 0) { vals = reinterpret_cast<T *>( _mm_malloc(nnz * sizeof(T), 64)); row_inds = reinterpret_cast<int *>( _mm_malloc(nnz * sizeof(int), 64)); row_pointers = reinterpret_cast<int *>( _mm_malloc((num_partitions+1) * sizeof(int), 64)); edge_pointers = reinterpret_cast<int *>( _mm_malloc((num_partitions+1) * sizeof(int), 64)); col_starts = reinterpret_cast<int *>( _mm_malloc((num_partitions+1) * sizeof(int), 64)); col_ptrs = reinterpret_cast<int *>( _mm_malloc(num_cols * sizeof(int), 64)); col_indices = reinterpret_cast<int *>( _mm_malloc(num_cols * sizeof(int), 64)); for(int i = 0 ; i < nnz ; i++) { ar & vals[i]; } for(int i = 0 ; i < nnz ; i++) { ar & row_inds[i]; } for(int i = 0 ; i < num_partitions+1 ; i++) { ar & row_pointers[i]; } for(int i = 0 ; i < num_partitions+1 ; i++) { ar & edge_pointers[i]; } for(int i = 0 ; i < num_partitions+1 ; i++) { ar & col_starts[i]; } for(int i = 0 ; i < num_cols ; i++) { ar & col_ptrs[i]; } for(int i = 0 ; i < num_cols ; i++) { ar & col_indices[i]; } } } BOOST_SERIALIZATION_SPLIT_MEMBER() DCSCTile() : name("TEMP"), m(0), n(0), nnz(0), num_partitions(0) {} DCSCTile(int _m, int _n) : name("TEMP"), m(_m), n(_n), nnz(0), num_partitions(0) {} static void static_partition(int *&row_pointers, int m, int num_partitions, int round) { row_pointers = reinterpret_cast<int *>( _mm_malloc((num_partitions + 1) * sizeof(int), 64)); if (round == 1) { int rows_per_partition = m / num_partitions; int rows_leftover = m % num_partitions; row_pointers[0] = 0; int current_row = row_pointers[0] + rows_per_partition; for (int p = 1; p < num_partitions + 1; p++) { if (rows_leftover > 0) { current_row += 1; row_pointers[p] = current_row; current_row += rows_per_partition; rows_leftover--; } else { row_pointers[p] = current_row; current_row += rows_per_partition; } } } else { int n512 = std::max((m / round) / num_partitions, 1); int n_round = std::max(0, m / round - n512 * num_partitions); assert(n_round < num_partitions); row_pointers[0] = 0; for (int p = 1; p < num_partitions; p++) { row_pointers[p] = row_pointers[p - 1] + ((n_round > 0) ? ((n512 + 1) * round) : (n512 * round)); row_pointers[p] = std::min(row_pointers[p], m); if (n_round > 0) n_round--; } row_pointers[num_partitions] = m; } } static void set_edge_pointers(tedge_t<T> *edges, int *row_pointers, int **edge_pointers, int nnz, int num_partitions) { // Figure out edge pointers (*edge_pointers) = reinterpret_cast<int *>( _mm_malloc((num_partitions + 1) * sizeof(int), 64)); int p = 0; for (int edge_id = 0; edge_id < nnz; edge_id++) { while (edges[edge_id].src >= row_pointers[p]) { (*edge_pointers)[p] = edge_id; p++; } } (*edge_pointers)[p] = nnz; for (p = p + 1; p < num_partitions + 1; p++) { (*edge_pointers)[p] = nnz; } } DCSCTile(edge_t<T> *edges, int _m, int _n, int _nnz, int row_start, int col_start) : name("TEMP"), m(_m), n(_n), nnz(_nnz) { double _start_time = MPI_Wtime(); if (nnz > 0) { num_partitions = omp_get_max_threads() * 16; // Partition DCSCTile<T>::static_partition(row_pointers, this->m, num_partitions, 32); // Set partition IDs for each edge tedge_t<T> *p_edges = reinterpret_cast<tedge_t<T> *>( _mm_malloc((uint64_t)nnz * (uint64_t)sizeof(tedge_t<T>), 64)); std::cout << "num partitions: " << num_partitions << std::endl; double _ep_start = MPI_Wtime(); #pragma omp parallel for for (int i = 0; i < nnz; i++) { p_edges[i].src = edges[i].src - 1 - row_start; p_edges[i].dst = edges[i].dst - 1 - col_start; p_edges[i].val = edges[i].val; p_edges[i].tile_id = -1; int p_start = 0; int p_end = num_partitions-1; while(1) { int p_half = p_start + (p_end - p_start); // Check p_half if (p_edges[i].src >= row_pointers[p_half] && p_edges[i].src < row_pointers[p_half + 1]) { p_edges[i].tile_id = p_half; break; } if(p_edges[i].src < row_pointers[p_half]) p_end = p_half - 1; if(p_edges[i].src >= row_pointers[p_half+1]) p_start = p_half + 1; } #ifdef CHECK_BINARY_SEARCH for (int p = 0; p < num_partitions; p++) { if (p_edges[i].src >= row_pointers[p] && p_edges[i].src < row_pointers[p + 1]) { //p_edges[i].tile_id = p; //break; assert(p_edges[i].tile_id == p); } #endif assert(p_edges[i].tile_id >= 0); } double _ep_end = MPI_Wtime(); std::cout << "set_edge_pointers time: " << _ep_end - _ep_start << std::endl; // Sort // std::cout << "Sorting: " << (uint64_t)nnz << std::endl; // std::cout << "allocated : " << (uint64_t)nnz * (uint64_t)sizeof(tedge_t<T>) << std::endl; #pragma omp parallel for for(int i =0 ; i < nnz ; i++) { assert(p_edges[i].src >= 0); assert(p_edges[i].dst >= 0); assert(p_edges[i].src < _m); assert(p_edges[i].dst < _n); } __gnu_parallel::sort(p_edges, p_edges + nnz, compare_dcsc<T>); // Find edge pointers DCSCTile<T>::set_edge_pointers(p_edges, row_pointers, &edge_pointers, nnz, num_partitions); // Count columns int *ncols = reinterpret_cast<int *>(_mm_malloc(num_partitions * sizeof(int), 64)); col_starts = reinterpret_cast<int *>( _mm_malloc((num_partitions + 1) * sizeof(int), 64)); #pragma omp parallel for for (int p = 0; p < num_partitions; p++) { int current_column = -1; int num_columns = 0; for (int edge_id = edge_pointers[p]; edge_id < edge_pointers[p + 1]; edge_id++) { if (current_column < p_edges[edge_id].dst) { num_columns++; current_column = p_edges[edge_id].dst; } } ncols[p] = num_columns; } int total_cols = 0; for (int p = 0; p < num_partitions; p++) { col_starts[p] = total_cols; total_cols += ncols[p] + 1; } col_starts[num_partitions] = total_cols; num_cols = total_cols; // Build DCSC std::cout << "Allocating nnz vals: " << nnz << std::endl; vals = reinterpret_cast<T *>( _mm_malloc((uint64_t)nnz * (uint64_t)sizeof(T), 64)); row_inds = reinterpret_cast<int *>( _mm_malloc((uint64_t)nnz * (uint64_t)sizeof(int), 64)); col_indices = reinterpret_cast<int *>( _mm_malloc(col_starts[num_partitions] * sizeof(int), 64)); col_ptrs = reinterpret_cast<int *>( _mm_malloc(col_starts[num_partitions] * sizeof(int), 64)); #pragma omp parallel for for (int p = 0; p < num_partitions; p++) { T *val = vals + edge_pointers[p]; int *row_ind = row_inds + edge_pointers[p]; int *col_index = col_indices + col_starts[p]; int *col_ptr = col_ptrs + col_starts[p]; int current_column = -1; int current_column_num = -1; for (int edge_id = edge_pointers[p]; edge_id < edge_pointers[p + 1]; edge_id++) { val[edge_id - edge_pointers[p]] = p_edges[edge_id].val; row_ind[edge_id - edge_pointers[p]] = p_edges[edge_id].src; if (current_column < p_edges[edge_id].dst) { current_column_num++; current_column = p_edges[edge_id].dst; col_index[current_column_num] = current_column; col_ptr[current_column_num] = edge_id - edge_pointers[p]; } } int num_columns = col_starts[p + 1] - col_starts[p] - 1; col_ptr[num_columns] = edge_pointers[p + 1] - edge_pointers[p]; col_index[num_columns] = n + 1; } _mm_free(p_edges); _mm_free(ncols); } else { num_partitions = 0; } double _end_time = MPI_Wtime(); std::cout << "fn time: " << _end_time - _start_time << std::endl; } void get_edges(edge_t<T> *edges, int row_start, int col_start) { int nnzcnt = 0; for (int p = 0; p < num_partitions; p++) { for (int j = 0; j < (col_starts[p + 1] - col_starts[p]) - 1; j++) { int col_index = col_indices[col_starts[p] + j]; for (int nz_idx = col_ptrs[col_starts[p] + j]; nz_idx < col_ptrs[col_starts[p] + j + 1]; nz_idx++) { int row_ind = row_inds[edge_pointers[p] + nz_idx]; edges[nnzcnt].src = row_start + row_ind + 1; edges[nnzcnt].dst = col_start + col_index + 1; edges[nnzcnt].val = vals[edge_pointers[p] + nz_idx]; nnzcnt++; } } } assert(nnzcnt == this->nnz); } bool isEmpty() const { return nnz <= 0; } ~DCSCTile() { if(nnz > 0) { _mm_free(row_inds); _mm_free(col_ptrs); _mm_free(col_indices); _mm_free(vals); _mm_free(row_pointers); _mm_free(edge_pointers); _mm_free(col_starts); } nnz = 0; } }; #endif // SRC_DCSCTILE_H_
ordered-1.c
/* { dg-do compile } */ /* { dg-options "-fopenmp -fdump-tree-ompexp" } */ extern void bar(int); void foo (void) { #pragma omp ordered bar(0); #pragma omp ordered { bar(1); bar(2); } } /* { dg-final { scan-tree-dump-times "GOMP_ordered_start" 2 "ompexp" } } */ /* { dg-final { scan-tree-dump-times "GOMP_ordered_end" 2 "ompexp" } } */
kernel.h
/* * kernel.h * * Created on: Oct 9, 2016 * Updated on: Oct 20, 2016 * Author: Yimin Zhong */ #ifndef FMM_KERNEL_H #define FMM_KERNEL_H #include "tree.h" #include <functional> #include "Eigen/Dense" #ifdef RUN_OMP #include "omp.h" #endif using Eigen::MatrixXd; using Eigen::VectorXd; class kernel { public: tree t; VectorXd chargeTree; std::function<double(point&, point&)> eval; int rank; MatrixXd R[8]; int nChebyshev; VectorXd chebyNode; MatrixXd tNode; kernel() { nChebyshev = 0; rank = 0; } ~kernel() {} void initialize(int _nChebyshev, vector<point>& _source, vector<point>& _target, VectorXd _charge, int _nSource, int _nTarget, int _rank, int _maxLevel) { // populate the kd-tree. t.populate(_source, _target, _nSource, _nTarget, _rank, _maxLevel); nChebyshev = _nChebyshev; chargeTree = _charge; // nChebyshev^3 nodes are used for interpolation. rank = nChebyshev * nChebyshev * nChebyshev; chebyNode = VectorXd::Zero(nChebyshev); getStandardChebyNodes(nChebyshev, chebyNode); tNode = MatrixXd::Zero(nChebyshev, nChebyshev); getStandardChebyPoly(nChebyshev, nChebyshev, chebyNode, tNode); getTransfer(nChebyshev, chebyNode, tNode, R); } void run(VectorXd& potentialMatrix) { #ifdef RUN_OMP #pragma omp parallel #endif { #ifdef RUN_OMP #pragma omp single #endif RUN("up-pass", upPass(0)); } #ifdef RUN_OMP #pragma omp taskwait #endif potentialMatrix = VectorXd::Zero(t.nTarget); #ifdef RUN_OMP #pragma omp parallel #endif { #ifdef RUN_OMP #pragma omp single #endif RUN("down-pass", downPass(0, potentialMatrix)); } #ifdef RUN_OMP #pragma omp taskwait #endif } protected: void getStandardChebyNodes(int _nChebyshev, VectorXd& _chebyNode) { _chebyNode = VectorXd::Zero(_nChebyshev); for (int i = 0; i < _nChebyshev; ++i) { _chebyNode(i) = -cos((i + 0.5) * M_PI/_nChebyshev); } } void getStandardChebyPoly(int _nChebyPoly, int _N, VectorXd& _x, MatrixXd& _T) { _T = MatrixXd::Zero(_N, _nChebyPoly); _T.col(0) = VectorXd::Ones(_N); if (_nChebyPoly > 1) { _T.col(1) = _x; for (int i = 2; i < _nChebyPoly; ++i) { _T.col(i) = 2.0 * _x.cwiseProduct(_T.col(i - 1)) - _T.col(i - 2); } } } void getTransferFromParentChebyshevToChildrenChebyshev(int _nChebyshev, VectorXd& _chebyNode, MatrixXd& _tNode, MatrixXd& _transfer) { VectorXd childChebyNode (2 * _nChebyshev); childChebyNode.segment(0, nChebyshev) = 0.5 *(_chebyNode - VectorXd::Ones(_nChebyshev)); childChebyNode.segment(nChebyshev, nChebyshev) = 0.5 *(_chebyNode + VectorXd::Ones(_nChebyshev)); getStandardChebyPoly(_nChebyshev, 2 * _nChebyshev, childChebyNode, _transfer); _transfer = (2.0 * _transfer * _tNode.transpose() - MatrixXd::Ones(2 * _nChebyshev, _nChebyshev))/_nChebyshev; } void getTransfer(int _nChebyshev, VectorXd& _chebyNode, MatrixXd& _tNode, MatrixXd* R) { MatrixXd S; S = MatrixXd::Zero(2 * _nChebyshev, _nChebyshev); getTransferFromParentChebyshevToChildrenChebyshev(_nChebyshev, _chebyNode, _tNode, S); MatrixXd Transfer[2]; Transfer[0] = S.block(0, 0, _nChebyshev, _nChebyshev); Transfer[1] = S.block(_nChebyshev, 0, _nChebyshev, _nChebyshev); int _rank = _nChebyshev * _nChebyshev * _nChebyshev; for (int i = 0; i < 8; ++i) { R[i] = MatrixXd::Zero(_rank, _rank); } // follow bit representaion. for (int i = 0; i < _nChebyshev; ++i) { for (int j =0; j < _nChebyshev; ++j) { for (int k = 0; k < _nChebyshev; ++k) { for (int l = 0; l < _nChebyshev; ++l) { for (int m = 0; m < _nChebyshev; ++m) { for (int n = 0; n < _nChebyshev; ++n) { for (int id = 0; id < 8; ++id) { int bit[3]; bit[0] = (id >> 0) & 1; bit[1] = (id >> 1) & 1; bit[2] = (id >> 2) & 1; R[id](i * _nChebyshev * _nChebyshev + j * _nChebyshev + k, l * _nChebyshev * _nChebyshev + m * _nChebyshev + n) = Transfer[bit[2]](i, l) * Transfer[bit[1]](j, m) * Transfer[bit[0]](k, n); } } } } } } } } void getScaledChebyNode(int _nChebyNode, VectorXd& _chebyNode, point& center, point& radius, vector<point>& _scaledCnode) { for (int i = 0; i < _nChebyNode; ++i) { _scaledCnode.push_back(point(center.x + radius.x * _chebyNode(i), center.y + radius.y * _chebyNode(i), center.z + radius.z * _chebyNode(i))); } } void getCharge(int rootId) { node& n = t.dict[rootId]; if(n.chargeComputed){ return; } else{ n.chargeComputed = true; n.charge = MatrixXd::Zero(n.nSource,1); for(int k=0;k<n.nSource; ++k){ n.charge.row(k) = chargeTree.row(n.sourceIndex[k]); } } } void getTransferParentToChildren(int _nChebyNode, vector<point>& _tree, vector<int>& _index, point& _center, point& _radius, VectorXd& _chebyNode, MatrixXd& _tNode, MatrixXd& R) { int N = (int) _index.size(); VectorXd standlocation[3]; standlocation[0].resize(N); standlocation[1].resize(N); standlocation[2].resize(N); for (int i = 0; i < N; ++i) { standlocation[0](i) = (_tree[_index[i]].x - _center.x)/_radius.x; standlocation[1](i) = (_tree[_index[i]].y - _center.y)/_radius.y; standlocation[2](i) = (_tree[_index[i]].z - _center.z)/_radius.z; } MatrixXd Transfer[3]; for (int k = 0; k < 3; ++k) { getStandardChebyPoly(_nChebyNode, N, standlocation[k], Transfer[k]); Transfer[k] = (2.0 * Transfer[k] * _tNode.transpose() - MatrixXd::Ones(N, _nChebyNode))/_nChebyNode; } int _rank = _nChebyNode * _nChebyNode * _nChebyNode; R = MatrixXd::Zero(N, _rank); for (int k = 0; k < N; ++k) { for (int i = 0; i < _nChebyNode; ++i) { for (int j = 0; j <_nChebyNode; ++j) { for (int l = 0; l< _nChebyNode; ++l) { R(k, l*_nChebyNode * _nChebyNode + j*_nChebyNode + i) = Transfer[0](k, i) * Transfer[1](k, j) * Transfer[2](k, l); } } } } } void kernelEval(vector<point>& _source, vector<point>& _target, MatrixXd& K) { K = MatrixXd::Zero(_target.size(), _source.size()); for (size_t _s = 0; _s < _source.size(); ++_s) { for (size_t _t = 0; _t < _target.size(); ++_t) { K(_t, _s) = this->eval(_source[_s], _target[_t]); } } } void kernelEvalIndex(vector<int>& _sourceIndex, vector<int>& _targetIndex, MatrixXd& K) { K = MatrixXd::Zero(_targetIndex.size(), _sourceIndex.size()); for (size_t _s = 0; _s < _sourceIndex.size(); ++_s) { for (size_t _t = 0; _t < _targetIndex.size(); ++_t) { K(_t, _s) = this->eval( this->t.sourceTree[_sourceIndex[_s]], this->t.targetTree[_targetIndex[_t]] ); } } } void kernelEvalChebyshev(int _M, vector<point>& _xv, int _N, vector<point>& _yv, MatrixXd& K) { vector<point> sourceVec; vector<point> targetVec; K = MatrixXd::Zero(_M * _M * _M, _N* _N * _N); for (int k = 0; k < _M; k++) { for (int j = 0; j < _M; j++) { for (int i = 0; i < _M; i++) { point np(_xv[i].x , _xv[j].y, _xv[k].z); sourceVec.push_back(np); } } } for (int k = 0; k < _N; k++) { for (int j = 0; j < _N; j++) { for (int i = 0; i < _N; i++) { point np(_yv[i].x , _yv[j].y, _yv[k].z); targetVec.push_back(np); } } } kernelEval(sourceVec, targetVec, K); } void upPass(int rootId) { node& n = t.dict[rootId]; n.scaledCnode.clear(); n.nodeCharge = MatrixXd::Zero(rank, 1); n.nodePotential = MatrixXd::Zero(rank, 1); getScaledChebyNode(nChebyshev, chebyNode, n.center, n.radius, n.scaledCnode); if (n.isLeaf) { // lazy getCharge(rootId); getTransferParentToChildren(nChebyshev, t.sourceTree, n.sourceIndex, n.center, n.radius, chebyNode, tNode, n.R); getTransferParentToChildren(nChebyshev, t.targetTree, n.targetIndex, n.center, n.radius, chebyNode, tNode, n.L); n.nodeCharge += n.R.transpose() * n.charge; } else { for (int i = 0; i < 8; ++i) { #ifdef RUN_OMP #pragma omp task shared(n) firstprivate(i) #endif upPass(n.child[i]); } #ifdef RUN_OMP #pragma omp taskwait #endif for (int i = 0; i < 8; ++i) { if (!t.dict[n.child[i]].isEmpty) { n.nodeCharge += R[i].transpose() * t.dict[n.child[i]].nodeCharge; } } } } void downPass(int rootId, VectorXd& potential) { node& n = t.dict[rootId]; MatrixXd K; VectorXd temp; if (n.parent != -1) { /* * V list */ for (int i : n.vList) { if (!t.dict[i].isEmpty) { kernelEvalChebyshev(nChebyshev, t.dict[i].scaledCnode, nChebyshev, n.scaledCnode, K); n.nodePotential += K * t.dict[i].nodeCharge; } } /* * X List */ for (int i : n.xList) { if (!t.dict[i].isEmpty) { kernelEvalChebyshev(nChebyshev, t.dict[i].scaledCnode, nChebyshev, n.scaledCnode, K); n.nodePotential += K * t.dict[i].nodeCharge; } } /* * L2L */ node& p = t.dict[n.parent]; n.nodePotential += this->R[n.nodeIndex] * p.nodePotential; } if (n.isLeaf && n.nTarget != 0) { n.potential = MatrixXd::Zero(n.nTarget, 1); /* * U List */ for (int i : n.uList) { if (!t.dict[i].isEmpty) { getCharge(i); kernelEvalIndex(t.dict[i].sourceIndex, n.targetIndex, K); n.potential += K * t.dict[i].charge; } } /* * W List */ for (int i : n.wList) { if (!t.dict[i].isEmpty) { getCharge(i); kernelEvalIndex(t.dict[i].sourceIndex, n.targetIndex, K); n.potential += K * t.dict[i].charge; } } /* * L2T */ n.potential += n.L * n.nodePotential; /* * Finalize, caution: * * omp should be fine here, because no two threads will write to the same place at the same time. */ for (int i = 0; i < n.nTarget; i++) { potential.row(n.targetIndex[i]) += n.potential.row(i); } } if (!n.isLeaf) { for (int i = 0; i < 8; ++i) { #ifdef RUN_OMP #pragma omp task shared(n, potential) firstprivate(i) #endif downPass(n.child[i], potential); } #ifdef RUN_OMP #pragma omp taskwait #endif } } }; #endif //FMM_KERNEL_H
pi_spmd_final.c
/* NAME: PI SPMD final version without false sharing This program will numerically compute the integral of 4/(1+x*x) from 0 to 1. The value of this integral is pi -- which is great since it gives us an easy way to check the answer. The program was parallelized using OpenMP and an SPMD algorithm. The following OpenMP specific lines were added: (1) A line to include omp.h -- the include file that contains OpenMP's function prototypes and constants. (2) A pragma that tells OpenMP to create a team of threads with an integer variable i being created for each thread. (3) two function calls: one to get the thread ID (ranging from 0 to one less than the number of threads), and the other returning the total number of threads. (4) A "single" construct so only one thread prints the number of threads. (5) A cyclic distribution of the loop by changing loop control expressions to run from the thread ID incremented by the number of threads. Local sums accumlated into sum[id]. (6) A barrier to make sure everyone's done. (7) A single construct so only one thread combines the local sums into a single global sum. Note that this program avoids the false sharing problem by storing partial sums into a private scalar. History: Written by Tim Mattson, 11/99. */ #include <stdio.h> #include <omp.h> #define MAX_THREADS 4 static long num_steps = 100000000; double step; int main () { int i, j, nthreads; double pi, full_sum = 0.0; double start_time, run_time; double sum[MAX_THREADS]; step = 1.0 / (double) num_steps; for (j = 1; j <= MAX_THREADS; j++) { omp_set_num_threads(j); full_sum = 0.0; start_time = omp_get_wtime(); #pragma omp parallel private(i) { int id = omp_get_thread_num(); int numthreads = omp_get_num_threads(); double x; double partial_sum = 0; #pragma omp single nthreads = numthreads; for (i = id; i < num_steps; i += numthreads) { x = (i + 0.5) * step; partial_sum += 4.0 / (1.0 + x*x); } #pragma omp critical full_sum += partial_sum; } pi = step * full_sum; run_time = omp_get_wtime() - start_time; printf("\n pi is %f in %f seconds %d threds \n ", pi, run_time,nthreads); } }
dds.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % DDDD DDDD SSSSS % % D D D D SS % % D D D D SSS % % D D D D SS % % DDDD DDDD SSSSS % % % % % % Read/Write Microsoft Direct Draw Surface Image Format % % % % Software Design % % Bianca van Schaik % % March 2008 % % Dirk Lemstra % % September 2013 % % % % % % Copyright 1999-2017 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % http://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/attribute.h" #include "MagickCore/blob.h" #include "MagickCore/blob-private.h" #include "MagickCore/cache.h" #include "MagickCore/colorspace.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/image.h" #include "MagickCore/image-private.h" #include "MagickCore/list.h" #include "MagickCore/log.h" #include "MagickCore/magick.h" #include "MagickCore/memory_.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/option.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/profile.h" #include "MagickCore/quantum.h" #include "MagickCore/quantum-private.h" #include "MagickCore/resource_.h" #include "MagickCore/static.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/module.h" #include "MagickCore/transform.h" /* Definitions */ #define DDSD_CAPS 0x00000001 #define DDSD_HEIGHT 0x00000002 #define DDSD_WIDTH 0x00000004 #define DDSD_PITCH 0x00000008 #define DDSD_PIXELFORMAT 0x00001000 #define DDSD_MIPMAPCOUNT 0x00020000 #define DDSD_LINEARSIZE 0x00080000 #define DDSD_DEPTH 0x00800000 #define DDPF_ALPHAPIXELS 0x00000001 #define DDPF_FOURCC 0x00000004 #define DDPF_RGB 0x00000040 #define DDPF_LUMINANCE 0x00020000 #define FOURCC_DXT1 0x31545844 #define FOURCC_DXT3 0x33545844 #define FOURCC_DXT5 0x35545844 #define DDSCAPS_COMPLEX 0x00000008 #define DDSCAPS_TEXTURE 0x00001000 #define DDSCAPS_MIPMAP 0x00400000 #define DDSCAPS2_CUBEMAP 0x00000200 #define DDSCAPS2_CUBEMAP_POSITIVEX 0x00000400 #define DDSCAPS2_CUBEMAP_NEGATIVEX 0x00000800 #define DDSCAPS2_CUBEMAP_POSITIVEY 0x00001000 #define DDSCAPS2_CUBEMAP_NEGATIVEY 0x00002000 #define DDSCAPS2_CUBEMAP_POSITIVEZ 0x00004000 #define DDSCAPS2_CUBEMAP_NEGATIVEZ 0x00008000 #define DDSCAPS2_VOLUME 0x00200000 #ifndef SIZE_MAX #define SIZE_MAX ((size_t) -1) #endif /* Structure declarations. */ typedef struct _DDSPixelFormat { size_t flags, fourcc, rgb_bitcount, r_bitmask, g_bitmask, b_bitmask, alpha_bitmask; } DDSPixelFormat; typedef struct _DDSInfo { size_t flags, height, width, pitchOrLinearSize, depth, mipmapcount, ddscaps1, ddscaps2; DDSPixelFormat pixelformat; } DDSInfo; typedef struct _DDSColors { unsigned char r[4], g[4], b[4], a[4]; } DDSColors; typedef struct _DDSVector4 { float x, y, z, w; } DDSVector4; typedef struct _DDSVector3 { float x, y, z; } DDSVector3; typedef struct _DDSSourceBlock { unsigned char start, end, error; } DDSSourceBlock; typedef struct _DDSSingleColourLookup { DDSSourceBlock sources[2]; } DDSSingleColourLookup; typedef MagickBooleanType DDSDecoder(Image *, DDSInfo *, ExceptionInfo *); static const DDSSingleColourLookup DDSLookup_5_4[] = { { { { 0, 0, 0 }, { 0, 0, 0 } } }, { { { 0, 0, 1 }, { 0, 1, 1 } } }, { { { 0, 0, 2 }, { 0, 1, 0 } } }, { { { 0, 0, 3 }, { 0, 1, 1 } } }, { { { 0, 0, 4 }, { 0, 2, 1 } } }, { { { 1, 0, 3 }, { 0, 2, 0 } } }, { { { 1, 0, 2 }, { 0, 2, 1 } } }, { { { 1, 0, 1 }, { 0, 3, 1 } } }, { { { 1, 0, 0 }, { 0, 3, 0 } } }, { { { 1, 0, 1 }, { 1, 2, 1 } } }, { { { 1, 0, 2 }, { 1, 2, 0 } } }, { { { 1, 0, 3 }, { 0, 4, 0 } } }, { { { 1, 0, 4 }, { 0, 5, 1 } } }, { { { 2, 0, 3 }, { 0, 5, 0 } } }, { { { 2, 0, 2 }, { 0, 5, 1 } } }, { { { 2, 0, 1 }, { 0, 6, 1 } } }, { { { 2, 0, 0 }, { 0, 6, 0 } } }, { { { 2, 0, 1 }, { 2, 3, 1 } } }, { { { 2, 0, 2 }, { 2, 3, 0 } } }, { { { 2, 0, 3 }, { 0, 7, 0 } } }, { { { 2, 0, 4 }, { 1, 6, 1 } } }, { { { 3, 0, 3 }, { 1, 6, 0 } } }, { { { 3, 0, 2 }, { 0, 8, 0 } } }, { { { 3, 0, 1 }, { 0, 9, 1 } } }, { { { 3, 0, 0 }, { 0, 9, 0 } } }, { { { 3, 0, 1 }, { 0, 9, 1 } } }, { { { 3, 0, 2 }, { 0, 10, 1 } } }, { { { 3, 0, 3 }, { 0, 10, 0 } } }, { { { 3, 0, 4 }, { 2, 7, 1 } } }, { { { 4, 0, 4 }, { 2, 7, 0 } } }, { { { 4, 0, 3 }, { 0, 11, 0 } } }, { { { 4, 0, 2 }, { 1, 10, 1 } } }, { { { 4, 0, 1 }, { 1, 10, 0 } } }, { { { 4, 0, 0 }, { 0, 12, 0 } } }, { { { 4, 0, 1 }, { 0, 13, 1 } } }, { { { 4, 0, 2 }, { 0, 13, 0 } } }, { { { 4, 0, 3 }, { 0, 13, 1 } } }, { { { 4, 0, 4 }, { 0, 14, 1 } } }, { { { 5, 0, 3 }, { 0, 14, 0 } } }, { { { 5, 0, 2 }, { 2, 11, 1 } } }, { { { 5, 0, 1 }, { 2, 11, 0 } } }, { { { 5, 0, 0 }, { 0, 15, 0 } } }, { { { 5, 0, 1 }, { 1, 14, 1 } } }, { { { 5, 0, 2 }, { 1, 14, 0 } } }, { { { 5, 0, 3 }, { 0, 16, 0 } } }, { { { 5, 0, 4 }, { 0, 17, 1 } } }, { { { 6, 0, 3 }, { 0, 17, 0 } } }, { { { 6, 0, 2 }, { 0, 17, 1 } } }, { { { 6, 0, 1 }, { 0, 18, 1 } } }, { { { 6, 0, 0 }, { 0, 18, 0 } } }, { { { 6, 0, 1 }, { 2, 15, 1 } } }, { { { 6, 0, 2 }, { 2, 15, 0 } } }, { { { 6, 0, 3 }, { 0, 19, 0 } } }, { { { 6, 0, 4 }, { 1, 18, 1 } } }, { { { 7, 0, 3 }, { 1, 18, 0 } } }, { { { 7, 0, 2 }, { 0, 20, 0 } } }, { { { 7, 0, 1 }, { 0, 21, 1 } } }, { { { 7, 0, 0 }, { 0, 21, 0 } } }, { { { 7, 0, 1 }, { 0, 21, 1 } } }, { { { 7, 0, 2 }, { 0, 22, 1 } } }, { { { 7, 0, 3 }, { 0, 22, 0 } } }, { { { 7, 0, 4 }, { 2, 19, 1 } } }, { { { 8, 0, 4 }, { 2, 19, 0 } } }, { { { 8, 0, 3 }, { 0, 23, 0 } } }, { { { 8, 0, 2 }, { 1, 22, 1 } } }, { { { 8, 0, 1 }, { 1, 22, 0 } } }, { { { 8, 0, 0 }, { 0, 24, 0 } } }, { { { 8, 0, 1 }, { 0, 25, 1 } } }, { { { 8, 0, 2 }, { 0, 25, 0 } } }, { { { 8, 0, 3 }, { 0, 25, 1 } } }, { { { 8, 0, 4 }, { 0, 26, 1 } } }, { { { 9, 0, 3 }, { 0, 26, 0 } } }, { { { 9, 0, 2 }, { 2, 23, 1 } } }, { { { 9, 0, 1 }, { 2, 23, 0 } } }, { { { 9, 0, 0 }, { 0, 27, 0 } } }, { { { 9, 0, 1 }, { 1, 26, 1 } } }, { { { 9, 0, 2 }, { 1, 26, 0 } } }, { { { 9, 0, 3 }, { 0, 28, 0 } } }, { { { 9, 0, 4 }, { 0, 29, 1 } } }, { { { 10, 0, 3 }, { 0, 29, 0 } } }, { { { 10, 0, 2 }, { 0, 29, 1 } } }, { { { 10, 0, 1 }, { 0, 30, 1 } } }, { { { 10, 0, 0 }, { 0, 30, 0 } } }, { { { 10, 0, 1 }, { 2, 27, 1 } } }, { { { 10, 0, 2 }, { 2, 27, 0 } } }, { { { 10, 0, 3 }, { 0, 31, 0 } } }, { { { 10, 0, 4 }, { 1, 30, 1 } } }, { { { 11, 0, 3 }, { 1, 30, 0 } } }, { { { 11, 0, 2 }, { 4, 24, 0 } } }, { { { 11, 0, 1 }, { 1, 31, 1 } } }, { { { 11, 0, 0 }, { 1, 31, 0 } } }, { { { 11, 0, 1 }, { 1, 31, 1 } } }, { { { 11, 0, 2 }, { 2, 30, 1 } } }, { { { 11, 0, 3 }, { 2, 30, 0 } } }, { { { 11, 0, 4 }, { 2, 31, 1 } } }, { { { 12, 0, 4 }, { 2, 31, 0 } } }, { { { 12, 0, 3 }, { 4, 27, 0 } } }, { { { 12, 0, 2 }, { 3, 30, 1 } } }, { { { 12, 0, 1 }, { 3, 30, 0 } } }, { { { 12, 0, 0 }, { 4, 28, 0 } } }, { { { 12, 0, 1 }, { 3, 31, 1 } } }, { { { 12, 0, 2 }, { 3, 31, 0 } } }, { { { 12, 0, 3 }, { 3, 31, 1 } } }, { { { 12, 0, 4 }, { 4, 30, 1 } } }, { { { 13, 0, 3 }, { 4, 30, 0 } } }, { { { 13, 0, 2 }, { 6, 27, 1 } } }, { { { 13, 0, 1 }, { 6, 27, 0 } } }, { { { 13, 0, 0 }, { 4, 31, 0 } } }, { { { 13, 0, 1 }, { 5, 30, 1 } } }, { { { 13, 0, 2 }, { 5, 30, 0 } } }, { { { 13, 0, 3 }, { 8, 24, 0 } } }, { { { 13, 0, 4 }, { 5, 31, 1 } } }, { { { 14, 0, 3 }, { 5, 31, 0 } } }, { { { 14, 0, 2 }, { 5, 31, 1 } } }, { { { 14, 0, 1 }, { 6, 30, 1 } } }, { { { 14, 0, 0 }, { 6, 30, 0 } } }, { { { 14, 0, 1 }, { 6, 31, 1 } } }, { { { 14, 0, 2 }, { 6, 31, 0 } } }, { { { 14, 0, 3 }, { 8, 27, 0 } } }, { { { 14, 0, 4 }, { 7, 30, 1 } } }, { { { 15, 0, 3 }, { 7, 30, 0 } } }, { { { 15, 0, 2 }, { 8, 28, 0 } } }, { { { 15, 0, 1 }, { 7, 31, 1 } } }, { { { 15, 0, 0 }, { 7, 31, 0 } } }, { { { 15, 0, 1 }, { 7, 31, 1 } } }, { { { 15, 0, 2 }, { 8, 30, 1 } } }, { { { 15, 0, 3 }, { 8, 30, 0 } } }, { { { 15, 0, 4 }, { 10, 27, 1 } } }, { { { 16, 0, 4 }, { 10, 27, 0 } } }, { { { 16, 0, 3 }, { 8, 31, 0 } } }, { { { 16, 0, 2 }, { 9, 30, 1 } } }, { { { 16, 0, 1 }, { 9, 30, 0 } } }, { { { 16, 0, 0 }, { 12, 24, 0 } } }, { { { 16, 0, 1 }, { 9, 31, 1 } } }, { { { 16, 0, 2 }, { 9, 31, 0 } } }, { { { 16, 0, 3 }, { 9, 31, 1 } } }, { { { 16, 0, 4 }, { 10, 30, 1 } } }, { { { 17, 0, 3 }, { 10, 30, 0 } } }, { { { 17, 0, 2 }, { 10, 31, 1 } } }, { { { 17, 0, 1 }, { 10, 31, 0 } } }, { { { 17, 0, 0 }, { 12, 27, 0 } } }, { { { 17, 0, 1 }, { 11, 30, 1 } } }, { { { 17, 0, 2 }, { 11, 30, 0 } } }, { { { 17, 0, 3 }, { 12, 28, 0 } } }, { { { 17, 0, 4 }, { 11, 31, 1 } } }, { { { 18, 0, 3 }, { 11, 31, 0 } } }, { { { 18, 0, 2 }, { 11, 31, 1 } } }, { { { 18, 0, 1 }, { 12, 30, 1 } } }, { { { 18, 0, 0 }, { 12, 30, 0 } } }, { { { 18, 0, 1 }, { 14, 27, 1 } } }, { { { 18, 0, 2 }, { 14, 27, 0 } } }, { { { 18, 0, 3 }, { 12, 31, 0 } } }, { { { 18, 0, 4 }, { 13, 30, 1 } } }, { { { 19, 0, 3 }, { 13, 30, 0 } } }, { { { 19, 0, 2 }, { 16, 24, 0 } } }, { { { 19, 0, 1 }, { 13, 31, 1 } } }, { { { 19, 0, 0 }, { 13, 31, 0 } } }, { { { 19, 0, 1 }, { 13, 31, 1 } } }, { { { 19, 0, 2 }, { 14, 30, 1 } } }, { { { 19, 0, 3 }, { 14, 30, 0 } } }, { { { 19, 0, 4 }, { 14, 31, 1 } } }, { { { 20, 0, 4 }, { 14, 31, 0 } } }, { { { 20, 0, 3 }, { 16, 27, 0 } } }, { { { 20, 0, 2 }, { 15, 30, 1 } } }, { { { 20, 0, 1 }, { 15, 30, 0 } } }, { { { 20, 0, 0 }, { 16, 28, 0 } } }, { { { 20, 0, 1 }, { 15, 31, 1 } } }, { { { 20, 0, 2 }, { 15, 31, 0 } } }, { { { 20, 0, 3 }, { 15, 31, 1 } } }, { { { 20, 0, 4 }, { 16, 30, 1 } } }, { { { 21, 0, 3 }, { 16, 30, 0 } } }, { { { 21, 0, 2 }, { 18, 27, 1 } } }, { { { 21, 0, 1 }, { 18, 27, 0 } } }, { { { 21, 0, 0 }, { 16, 31, 0 } } }, { { { 21, 0, 1 }, { 17, 30, 1 } } }, { { { 21, 0, 2 }, { 17, 30, 0 } } }, { { { 21, 0, 3 }, { 20, 24, 0 } } }, { { { 21, 0, 4 }, { 17, 31, 1 } } }, { { { 22, 0, 3 }, { 17, 31, 0 } } }, { { { 22, 0, 2 }, { 17, 31, 1 } } }, { { { 22, 0, 1 }, { 18, 30, 1 } } }, { { { 22, 0, 0 }, { 18, 30, 0 } } }, { { { 22, 0, 1 }, { 18, 31, 1 } } }, { { { 22, 0, 2 }, { 18, 31, 0 } } }, { { { 22, 0, 3 }, { 20, 27, 0 } } }, { { { 22, 0, 4 }, { 19, 30, 1 } } }, { { { 23, 0, 3 }, { 19, 30, 0 } } }, { { { 23, 0, 2 }, { 20, 28, 0 } } }, { { { 23, 0, 1 }, { 19, 31, 1 } } }, { { { 23, 0, 0 }, { 19, 31, 0 } } }, { { { 23, 0, 1 }, { 19, 31, 1 } } }, { { { 23, 0, 2 }, { 20, 30, 1 } } }, { { { 23, 0, 3 }, { 20, 30, 0 } } }, { { { 23, 0, 4 }, { 22, 27, 1 } } }, { { { 24, 0, 4 }, { 22, 27, 0 } } }, { { { 24, 0, 3 }, { 20, 31, 0 } } }, { { { 24, 0, 2 }, { 21, 30, 1 } } }, { { { 24, 0, 1 }, { 21, 30, 0 } } }, { { { 24, 0, 0 }, { 24, 24, 0 } } }, { { { 24, 0, 1 }, { 21, 31, 1 } } }, { { { 24, 0, 2 }, { 21, 31, 0 } } }, { { { 24, 0, 3 }, { 21, 31, 1 } } }, { { { 24, 0, 4 }, { 22, 30, 1 } } }, { { { 25, 0, 3 }, { 22, 30, 0 } } }, { { { 25, 0, 2 }, { 22, 31, 1 } } }, { { { 25, 0, 1 }, { 22, 31, 0 } } }, { { { 25, 0, 0 }, { 24, 27, 0 } } }, { { { 25, 0, 1 }, { 23, 30, 1 } } }, { { { 25, 0, 2 }, { 23, 30, 0 } } }, { { { 25, 0, 3 }, { 24, 28, 0 } } }, { { { 25, 0, 4 }, { 23, 31, 1 } } }, { { { 26, 0, 3 }, { 23, 31, 0 } } }, { { { 26, 0, 2 }, { 23, 31, 1 } } }, { { { 26, 0, 1 }, { 24, 30, 1 } } }, { { { 26, 0, 0 }, { 24, 30, 0 } } }, { { { 26, 0, 1 }, { 26, 27, 1 } } }, { { { 26, 0, 2 }, { 26, 27, 0 } } }, { { { 26, 0, 3 }, { 24, 31, 0 } } }, { { { 26, 0, 4 }, { 25, 30, 1 } } }, { { { 27, 0, 3 }, { 25, 30, 0 } } }, { { { 27, 0, 2 }, { 28, 24, 0 } } }, { { { 27, 0, 1 }, { 25, 31, 1 } } }, { { { 27, 0, 0 }, { 25, 31, 0 } } }, { { { 27, 0, 1 }, { 25, 31, 1 } } }, { { { 27, 0, 2 }, { 26, 30, 1 } } }, { { { 27, 0, 3 }, { 26, 30, 0 } } }, { { { 27, 0, 4 }, { 26, 31, 1 } } }, { { { 28, 0, 4 }, { 26, 31, 0 } } }, { { { 28, 0, 3 }, { 28, 27, 0 } } }, { { { 28, 0, 2 }, { 27, 30, 1 } } }, { { { 28, 0, 1 }, { 27, 30, 0 } } }, { { { 28, 0, 0 }, { 28, 28, 0 } } }, { { { 28, 0, 1 }, { 27, 31, 1 } } }, { { { 28, 0, 2 }, { 27, 31, 0 } } }, { { { 28, 0, 3 }, { 27, 31, 1 } } }, { { { 28, 0, 4 }, { 28, 30, 1 } } }, { { { 29, 0, 3 }, { 28, 30, 0 } } }, { { { 29, 0, 2 }, { 30, 27, 1 } } }, { { { 29, 0, 1 }, { 30, 27, 0 } } }, { { { 29, 0, 0 }, { 28, 31, 0 } } }, { { { 29, 0, 1 }, { 29, 30, 1 } } }, { { { 29, 0, 2 }, { 29, 30, 0 } } }, { { { 29, 0, 3 }, { 29, 30, 1 } } }, { { { 29, 0, 4 }, { 29, 31, 1 } } }, { { { 30, 0, 3 }, { 29, 31, 0 } } }, { { { 30, 0, 2 }, { 29, 31, 1 } } }, { { { 30, 0, 1 }, { 30, 30, 1 } } }, { { { 30, 0, 0 }, { 30, 30, 0 } } }, { { { 30, 0, 1 }, { 30, 31, 1 } } }, { { { 30, 0, 2 }, { 30, 31, 0 } } }, { { { 30, 0, 3 }, { 30, 31, 1 } } }, { { { 30, 0, 4 }, { 31, 30, 1 } } }, { { { 31, 0, 3 }, { 31, 30, 0 } } }, { { { 31, 0, 2 }, { 31, 30, 1 } } }, { { { 31, 0, 1 }, { 31, 31, 1 } } }, { { { 31, 0, 0 }, { 31, 31, 0 } } } }; static const DDSSingleColourLookup DDSLookup_6_4[] = { { { { 0, 0, 0 }, { 0, 0, 0 } } }, { { { 0, 0, 1 }, { 0, 1, 0 } } }, { { { 0, 0, 2 }, { 0, 2, 0 } } }, { { { 1, 0, 1 }, { 0, 3, 1 } } }, { { { 1, 0, 0 }, { 0, 3, 0 } } }, { { { 1, 0, 1 }, { 0, 4, 0 } } }, { { { 1, 0, 2 }, { 0, 5, 0 } } }, { { { 2, 0, 1 }, { 0, 6, 1 } } }, { { { 2, 0, 0 }, { 0, 6, 0 } } }, { { { 2, 0, 1 }, { 0, 7, 0 } } }, { { { 2, 0, 2 }, { 0, 8, 0 } } }, { { { 3, 0, 1 }, { 0, 9, 1 } } }, { { { 3, 0, 0 }, { 0, 9, 0 } } }, { { { 3, 0, 1 }, { 0, 10, 0 } } }, { { { 3, 0, 2 }, { 0, 11, 0 } } }, { { { 4, 0, 1 }, { 0, 12, 1 } } }, { { { 4, 0, 0 }, { 0, 12, 0 } } }, { { { 4, 0, 1 }, { 0, 13, 0 } } }, { { { 4, 0, 2 }, { 0, 14, 0 } } }, { { { 5, 0, 1 }, { 0, 15, 1 } } }, { { { 5, 0, 0 }, { 0, 15, 0 } } }, { { { 5, 0, 1 }, { 0, 16, 0 } } }, { { { 5, 0, 2 }, { 1, 15, 0 } } }, { { { 6, 0, 1 }, { 0, 17, 0 } } }, { { { 6, 0, 0 }, { 0, 18, 0 } } }, { { { 6, 0, 1 }, { 0, 19, 0 } } }, { { { 6, 0, 2 }, { 3, 14, 0 } } }, { { { 7, 0, 1 }, { 0, 20, 0 } } }, { { { 7, 0, 0 }, { 0, 21, 0 } } }, { { { 7, 0, 1 }, { 0, 22, 0 } } }, { { { 7, 0, 2 }, { 4, 15, 0 } } }, { { { 8, 0, 1 }, { 0, 23, 0 } } }, { { { 8, 0, 0 }, { 0, 24, 0 } } }, { { { 8, 0, 1 }, { 0, 25, 0 } } }, { { { 8, 0, 2 }, { 6, 14, 0 } } }, { { { 9, 0, 1 }, { 0, 26, 0 } } }, { { { 9, 0, 0 }, { 0, 27, 0 } } }, { { { 9, 0, 1 }, { 0, 28, 0 } } }, { { { 9, 0, 2 }, { 7, 15, 0 } } }, { { { 10, 0, 1 }, { 0, 29, 0 } } }, { { { 10, 0, 0 }, { 0, 30, 0 } } }, { { { 10, 0, 1 }, { 0, 31, 0 } } }, { { { 10, 0, 2 }, { 9, 14, 0 } } }, { { { 11, 0, 1 }, { 0, 32, 0 } } }, { { { 11, 0, 0 }, { 0, 33, 0 } } }, { { { 11, 0, 1 }, { 2, 30, 0 } } }, { { { 11, 0, 2 }, { 0, 34, 0 } } }, { { { 12, 0, 1 }, { 0, 35, 0 } } }, { { { 12, 0, 0 }, { 0, 36, 0 } } }, { { { 12, 0, 1 }, { 3, 31, 0 } } }, { { { 12, 0, 2 }, { 0, 37, 0 } } }, { { { 13, 0, 1 }, { 0, 38, 0 } } }, { { { 13, 0, 0 }, { 0, 39, 0 } } }, { { { 13, 0, 1 }, { 5, 30, 0 } } }, { { { 13, 0, 2 }, { 0, 40, 0 } } }, { { { 14, 0, 1 }, { 0, 41, 0 } } }, { { { 14, 0, 0 }, { 0, 42, 0 } } }, { { { 14, 0, 1 }, { 6, 31, 0 } } }, { { { 14, 0, 2 }, { 0, 43, 0 } } }, { { { 15, 0, 1 }, { 0, 44, 0 } } }, { { { 15, 0, 0 }, { 0, 45, 0 } } }, { { { 15, 0, 1 }, { 8, 30, 0 } } }, { { { 15, 0, 2 }, { 0, 46, 0 } } }, { { { 16, 0, 2 }, { 0, 47, 0 } } }, { { { 16, 0, 1 }, { 1, 46, 0 } } }, { { { 16, 0, 0 }, { 0, 48, 0 } } }, { { { 16, 0, 1 }, { 0, 49, 0 } } }, { { { 16, 0, 2 }, { 0, 50, 0 } } }, { { { 17, 0, 1 }, { 2, 47, 0 } } }, { { { 17, 0, 0 }, { 0, 51, 0 } } }, { { { 17, 0, 1 }, { 0, 52, 0 } } }, { { { 17, 0, 2 }, { 0, 53, 0 } } }, { { { 18, 0, 1 }, { 4, 46, 0 } } }, { { { 18, 0, 0 }, { 0, 54, 0 } } }, { { { 18, 0, 1 }, { 0, 55, 0 } } }, { { { 18, 0, 2 }, { 0, 56, 0 } } }, { { { 19, 0, 1 }, { 5, 47, 0 } } }, { { { 19, 0, 0 }, { 0, 57, 0 } } }, { { { 19, 0, 1 }, { 0, 58, 0 } } }, { { { 19, 0, 2 }, { 0, 59, 0 } } }, { { { 20, 0, 1 }, { 7, 46, 0 } } }, { { { 20, 0, 0 }, { 0, 60, 0 } } }, { { { 20, 0, 1 }, { 0, 61, 0 } } }, { { { 20, 0, 2 }, { 0, 62, 0 } } }, { { { 21, 0, 1 }, { 8, 47, 0 } } }, { { { 21, 0, 0 }, { 0, 63, 0 } } }, { { { 21, 0, 1 }, { 1, 62, 0 } } }, { { { 21, 0, 2 }, { 1, 63, 0 } } }, { { { 22, 0, 1 }, { 10, 46, 0 } } }, { { { 22, 0, 0 }, { 2, 62, 0 } } }, { { { 22, 0, 1 }, { 2, 63, 0 } } }, { { { 22, 0, 2 }, { 3, 62, 0 } } }, { { { 23, 0, 1 }, { 11, 47, 0 } } }, { { { 23, 0, 0 }, { 3, 63, 0 } } }, { { { 23, 0, 1 }, { 4, 62, 0 } } }, { { { 23, 0, 2 }, { 4, 63, 0 } } }, { { { 24, 0, 1 }, { 13, 46, 0 } } }, { { { 24, 0, 0 }, { 5, 62, 0 } } }, { { { 24, 0, 1 }, { 5, 63, 0 } } }, { { { 24, 0, 2 }, { 6, 62, 0 } } }, { { { 25, 0, 1 }, { 14, 47, 0 } } }, { { { 25, 0, 0 }, { 6, 63, 0 } } }, { { { 25, 0, 1 }, { 7, 62, 0 } } }, { { { 25, 0, 2 }, { 7, 63, 0 } } }, { { { 26, 0, 1 }, { 16, 45, 0 } } }, { { { 26, 0, 0 }, { 8, 62, 0 } } }, { { { 26, 0, 1 }, { 8, 63, 0 } } }, { { { 26, 0, 2 }, { 9, 62, 0 } } }, { { { 27, 0, 1 }, { 16, 48, 0 } } }, { { { 27, 0, 0 }, { 9, 63, 0 } } }, { { { 27, 0, 1 }, { 10, 62, 0 } } }, { { { 27, 0, 2 }, { 10, 63, 0 } } }, { { { 28, 0, 1 }, { 16, 51, 0 } } }, { { { 28, 0, 0 }, { 11, 62, 0 } } }, { { { 28, 0, 1 }, { 11, 63, 0 } } }, { { { 28, 0, 2 }, { 12, 62, 0 } } }, { { { 29, 0, 1 }, { 16, 54, 0 } } }, { { { 29, 0, 0 }, { 12, 63, 0 } } }, { { { 29, 0, 1 }, { 13, 62, 0 } } }, { { { 29, 0, 2 }, { 13, 63, 0 } } }, { { { 30, 0, 1 }, { 16, 57, 0 } } }, { { { 30, 0, 0 }, { 14, 62, 0 } } }, { { { 30, 0, 1 }, { 14, 63, 0 } } }, { { { 30, 0, 2 }, { 15, 62, 0 } } }, { { { 31, 0, 1 }, { 16, 60, 0 } } }, { { { 31, 0, 0 }, { 15, 63, 0 } } }, { { { 31, 0, 1 }, { 24, 46, 0 } } }, { { { 31, 0, 2 }, { 16, 62, 0 } } }, { { { 32, 0, 2 }, { 16, 63, 0 } } }, { { { 32, 0, 1 }, { 17, 62, 0 } } }, { { { 32, 0, 0 }, { 25, 47, 0 } } }, { { { 32, 0, 1 }, { 17, 63, 0 } } }, { { { 32, 0, 2 }, { 18, 62, 0 } } }, { { { 33, 0, 1 }, { 18, 63, 0 } } }, { { { 33, 0, 0 }, { 27, 46, 0 } } }, { { { 33, 0, 1 }, { 19, 62, 0 } } }, { { { 33, 0, 2 }, { 19, 63, 0 } } }, { { { 34, 0, 1 }, { 20, 62, 0 } } }, { { { 34, 0, 0 }, { 28, 47, 0 } } }, { { { 34, 0, 1 }, { 20, 63, 0 } } }, { { { 34, 0, 2 }, { 21, 62, 0 } } }, { { { 35, 0, 1 }, { 21, 63, 0 } } }, { { { 35, 0, 0 }, { 30, 46, 0 } } }, { { { 35, 0, 1 }, { 22, 62, 0 } } }, { { { 35, 0, 2 }, { 22, 63, 0 } } }, { { { 36, 0, 1 }, { 23, 62, 0 } } }, { { { 36, 0, 0 }, { 31, 47, 0 } } }, { { { 36, 0, 1 }, { 23, 63, 0 } } }, { { { 36, 0, 2 }, { 24, 62, 0 } } }, { { { 37, 0, 1 }, { 24, 63, 0 } } }, { { { 37, 0, 0 }, { 32, 47, 0 } } }, { { { 37, 0, 1 }, { 25, 62, 0 } } }, { { { 37, 0, 2 }, { 25, 63, 0 } } }, { { { 38, 0, 1 }, { 26, 62, 0 } } }, { { { 38, 0, 0 }, { 32, 50, 0 } } }, { { { 38, 0, 1 }, { 26, 63, 0 } } }, { { { 38, 0, 2 }, { 27, 62, 0 } } }, { { { 39, 0, 1 }, { 27, 63, 0 } } }, { { { 39, 0, 0 }, { 32, 53, 0 } } }, { { { 39, 0, 1 }, { 28, 62, 0 } } }, { { { 39, 0, 2 }, { 28, 63, 0 } } }, { { { 40, 0, 1 }, { 29, 62, 0 } } }, { { { 40, 0, 0 }, { 32, 56, 0 } } }, { { { 40, 0, 1 }, { 29, 63, 0 } } }, { { { 40, 0, 2 }, { 30, 62, 0 } } }, { { { 41, 0, 1 }, { 30, 63, 0 } } }, { { { 41, 0, 0 }, { 32, 59, 0 } } }, { { { 41, 0, 1 }, { 31, 62, 0 } } }, { { { 41, 0, 2 }, { 31, 63, 0 } } }, { { { 42, 0, 1 }, { 32, 61, 0 } } }, { { { 42, 0, 0 }, { 32, 62, 0 } } }, { { { 42, 0, 1 }, { 32, 63, 0 } } }, { { { 42, 0, 2 }, { 41, 46, 0 } } }, { { { 43, 0, 1 }, { 33, 62, 0 } } }, { { { 43, 0, 0 }, { 33, 63, 0 } } }, { { { 43, 0, 1 }, { 34, 62, 0 } } }, { { { 43, 0, 2 }, { 42, 47, 0 } } }, { { { 44, 0, 1 }, { 34, 63, 0 } } }, { { { 44, 0, 0 }, { 35, 62, 0 } } }, { { { 44, 0, 1 }, { 35, 63, 0 } } }, { { { 44, 0, 2 }, { 44, 46, 0 } } }, { { { 45, 0, 1 }, { 36, 62, 0 } } }, { { { 45, 0, 0 }, { 36, 63, 0 } } }, { { { 45, 0, 1 }, { 37, 62, 0 } } }, { { { 45, 0, 2 }, { 45, 47, 0 } } }, { { { 46, 0, 1 }, { 37, 63, 0 } } }, { { { 46, 0, 0 }, { 38, 62, 0 } } }, { { { 46, 0, 1 }, { 38, 63, 0 } } }, { { { 46, 0, 2 }, { 47, 46, 0 } } }, { { { 47, 0, 1 }, { 39, 62, 0 } } }, { { { 47, 0, 0 }, { 39, 63, 0 } } }, { { { 47, 0, 1 }, { 40, 62, 0 } } }, { { { 47, 0, 2 }, { 48, 46, 0 } } }, { { { 48, 0, 2 }, { 40, 63, 0 } } }, { { { 48, 0, 1 }, { 41, 62, 0 } } }, { { { 48, 0, 0 }, { 41, 63, 0 } } }, { { { 48, 0, 1 }, { 48, 49, 0 } } }, { { { 48, 0, 2 }, { 42, 62, 0 } } }, { { { 49, 0, 1 }, { 42, 63, 0 } } }, { { { 49, 0, 0 }, { 43, 62, 0 } } }, { { { 49, 0, 1 }, { 48, 52, 0 } } }, { { { 49, 0, 2 }, { 43, 63, 0 } } }, { { { 50, 0, 1 }, { 44, 62, 0 } } }, { { { 50, 0, 0 }, { 44, 63, 0 } } }, { { { 50, 0, 1 }, { 48, 55, 0 } } }, { { { 50, 0, 2 }, { 45, 62, 0 } } }, { { { 51, 0, 1 }, { 45, 63, 0 } } }, { { { 51, 0, 0 }, { 46, 62, 0 } } }, { { { 51, 0, 1 }, { 48, 58, 0 } } }, { { { 51, 0, 2 }, { 46, 63, 0 } } }, { { { 52, 0, 1 }, { 47, 62, 0 } } }, { { { 52, 0, 0 }, { 47, 63, 0 } } }, { { { 52, 0, 1 }, { 48, 61, 0 } } }, { { { 52, 0, 2 }, { 48, 62, 0 } } }, { { { 53, 0, 1 }, { 56, 47, 0 } } }, { { { 53, 0, 0 }, { 48, 63, 0 } } }, { { { 53, 0, 1 }, { 49, 62, 0 } } }, { { { 53, 0, 2 }, { 49, 63, 0 } } }, { { { 54, 0, 1 }, { 58, 46, 0 } } }, { { { 54, 0, 0 }, { 50, 62, 0 } } }, { { { 54, 0, 1 }, { 50, 63, 0 } } }, { { { 54, 0, 2 }, { 51, 62, 0 } } }, { { { 55, 0, 1 }, { 59, 47, 0 } } }, { { { 55, 0, 0 }, { 51, 63, 0 } } }, { { { 55, 0, 1 }, { 52, 62, 0 } } }, { { { 55, 0, 2 }, { 52, 63, 0 } } }, { { { 56, 0, 1 }, { 61, 46, 0 } } }, { { { 56, 0, 0 }, { 53, 62, 0 } } }, { { { 56, 0, 1 }, { 53, 63, 0 } } }, { { { 56, 0, 2 }, { 54, 62, 0 } } }, { { { 57, 0, 1 }, { 62, 47, 0 } } }, { { { 57, 0, 0 }, { 54, 63, 0 } } }, { { { 57, 0, 1 }, { 55, 62, 0 } } }, { { { 57, 0, 2 }, { 55, 63, 0 } } }, { { { 58, 0, 1 }, { 56, 62, 1 } } }, { { { 58, 0, 0 }, { 56, 62, 0 } } }, { { { 58, 0, 1 }, { 56, 63, 0 } } }, { { { 58, 0, 2 }, { 57, 62, 0 } } }, { { { 59, 0, 1 }, { 57, 63, 1 } } }, { { { 59, 0, 0 }, { 57, 63, 0 } } }, { { { 59, 0, 1 }, { 58, 62, 0 } } }, { { { 59, 0, 2 }, { 58, 63, 0 } } }, { { { 60, 0, 1 }, { 59, 62, 1 } } }, { { { 60, 0, 0 }, { 59, 62, 0 } } }, { { { 60, 0, 1 }, { 59, 63, 0 } } }, { { { 60, 0, 2 }, { 60, 62, 0 } } }, { { { 61, 0, 1 }, { 60, 63, 1 } } }, { { { 61, 0, 0 }, { 60, 63, 0 } } }, { { { 61, 0, 1 }, { 61, 62, 0 } } }, { { { 61, 0, 2 }, { 61, 63, 0 } } }, { { { 62, 0, 1 }, { 62, 62, 1 } } }, { { { 62, 0, 0 }, { 62, 62, 0 } } }, { { { 62, 0, 1 }, { 62, 63, 0 } } }, { { { 62, 0, 2 }, { 63, 62, 0 } } }, { { { 63, 0, 1 }, { 63, 63, 1 } } }, { { { 63, 0, 0 }, { 63, 63, 0 } } } }; static const DDSSingleColourLookup* DDS_LOOKUP[] = { DDSLookup_5_4, DDSLookup_6_4, DDSLookup_5_4 }; /* Macros */ #define C565_r(x) (((x) & 0xF800) >> 11) #define C565_g(x) (((x) & 0x07E0) >> 5) #define C565_b(x) ((x) & 0x001F) #define C565_red(x) ( (C565_r(x) << 3 | C565_r(x) >> 2)) #define C565_green(x) ( (C565_g(x) << 2 | C565_g(x) >> 4)) #define C565_blue(x) ( (C565_b(x) << 3 | C565_b(x) >> 2)) #define DIV2(x) ((x) > 1 ? ((x) >> 1) : 1) #define FixRange(min, max, steps) \ if (min > max) \ min = max; \ if ((ssize_t) max - min < steps) \ max = MagickMin(min + steps, 255); \ if ((ssize_t) max - min < steps) \ min = MagickMax(0, (ssize_t) max - steps) #define Dot(left, right) (left.x*right.x) + (left.y*right.y) + (left.z*right.z) #define VectorInit(vector, value) vector.x = vector.y = vector.z = vector.w \ = value #define VectorInit3(vector, value) vector.x = vector.y = vector.z = value #define IsBitMask(mask, r, g, b, a) (mask.r_bitmask == r && mask.g_bitmask == \ g && mask.b_bitmask == b && mask.alpha_bitmask == a) /* Forward declarations */ /* Forward declarations */ static MagickBooleanType ConstructOrdering(const size_t,const DDSVector4 *,const DDSVector3, DDSVector4 *, DDSVector4 *, unsigned char *, size_t), ReadDDSInfo(Image *,DDSInfo *), ReadDXT1(Image *,DDSInfo *,ExceptionInfo *), ReadDXT3(Image *,DDSInfo *,ExceptionInfo *), ReadDXT5(Image *,DDSInfo *,ExceptionInfo *), ReadUncompressedRGB(Image *,DDSInfo *,ExceptionInfo *), ReadUncompressedRGBA(Image *,DDSInfo *,ExceptionInfo *), SkipDXTMipmaps(Image *,DDSInfo *,int,ExceptionInfo *), SkipRGBMipmaps(Image *,DDSInfo *,int,ExceptionInfo *), WriteDDSImage(const ImageInfo *,Image *,ExceptionInfo *), WriteMipmaps(Image *,const size_t,const size_t,const size_t, const MagickBooleanType,const MagickBooleanType,ExceptionInfo *); static void RemapIndices(const ssize_t *,const unsigned char *,unsigned char *), WriteDDSInfo(Image *,const size_t,const size_t,const size_t), WriteFourCC(Image *,const size_t,const MagickBooleanType, const MagickBooleanType,ExceptionInfo *), WriteImageData(Image *,const size_t,const size_t,const MagickBooleanType, const MagickBooleanType,ExceptionInfo *), WriteIndices(Image *,const DDSVector3,const DDSVector3,unsigned char *), WriteSingleColorFit(Image *,const DDSVector4 *,const ssize_t *), WriteUncompressed(Image *,ExceptionInfo *); static inline void VectorAdd(const DDSVector4 left, const DDSVector4 right, DDSVector4 *destination) { destination->x = left.x + right.x; destination->y = left.y + right.y; destination->z = left.z + right.z; destination->w = left.w + right.w; } static inline void VectorClamp(DDSVector4 *value) { value->x = MagickMin(1.0f,MagickMax(0.0f,value->x)); value->y = MagickMin(1.0f,MagickMax(0.0f,value->y)); value->z = MagickMin(1.0f,MagickMax(0.0f,value->z)); value->w = MagickMin(1.0f,MagickMax(0.0f,value->w)); } static inline void VectorClamp3(DDSVector3 *value) { value->x = MagickMin(1.0f,MagickMax(0.0f,value->x)); value->y = MagickMin(1.0f,MagickMax(0.0f,value->y)); value->z = MagickMin(1.0f,MagickMax(0.0f,value->z)); } static inline void VectorCopy43(const DDSVector4 source, DDSVector3 *destination) { destination->x = source.x; destination->y = source.y; destination->z = source.z; } static inline void VectorCopy44(const DDSVector4 source, DDSVector4 *destination) { destination->x = source.x; destination->y = source.y; destination->z = source.z; destination->w = source.w; } static inline void VectorNegativeMultiplySubtract(const DDSVector4 a, const DDSVector4 b, const DDSVector4 c, DDSVector4 *destination) { destination->x = c.x - (a.x * b.x); destination->y = c.y - (a.y * b.y); destination->z = c.z - (a.z * b.z); destination->w = c.w - (a.w * b.w); } static inline void VectorMultiply(const DDSVector4 left, const DDSVector4 right, DDSVector4 *destination) { destination->x = left.x * right.x; destination->y = left.y * right.y; destination->z = left.z * right.z; destination->w = left.w * right.w; } static inline void VectorMultiply3(const DDSVector3 left, const DDSVector3 right, DDSVector3 *destination) { destination->x = left.x * right.x; destination->y = left.y * right.y; destination->z = left.z * right.z; } static inline void VectorMultiplyAdd(const DDSVector4 a, const DDSVector4 b, const DDSVector4 c, DDSVector4 *destination) { destination->x = (a.x * b.x) + c.x; destination->y = (a.y * b.y) + c.y; destination->z = (a.z * b.z) + c.z; destination->w = (a.w * b.w) + c.w; } static inline void VectorMultiplyAdd3(const DDSVector3 a, const DDSVector3 b, const DDSVector3 c, DDSVector3 *destination) { destination->x = (a.x * b.x) + c.x; destination->y = (a.y * b.y) + c.y; destination->z = (a.z * b.z) + c.z; } static inline void VectorReciprocal(const DDSVector4 value, DDSVector4 *destination) { destination->x = 1.0f / value.x; destination->y = 1.0f / value.y; destination->z = 1.0f / value.z; destination->w = 1.0f / value.w; } static inline void VectorSubtract(const DDSVector4 left, const DDSVector4 right, DDSVector4 *destination) { destination->x = left.x - right.x; destination->y = left.y - right.y; destination->z = left.z - right.z; destination->w = left.w - right.w; } static inline void VectorSubtract3(const DDSVector3 left, const DDSVector3 right, DDSVector3 *destination) { destination->x = left.x - right.x; destination->y = left.y - right.y; destination->z = left.z - right.z; } static inline void VectorTruncate(DDSVector4 *value) { value->x = value->x > 0.0f ? floor(value->x) : ceil(value->x); value->y = value->y > 0.0f ? floor(value->y) : ceil(value->y); value->z = value->z > 0.0f ? floor(value->z) : ceil(value->z); value->w = value->w > 0.0f ? floor(value->w) : ceil(value->w); } static inline void VectorTruncate3(DDSVector3 *value) { value->x = value->x > 0.0f ? floor(value->x) : ceil(value->x); value->y = value->y > 0.0f ? floor(value->y) : ceil(value->y); value->z = value->z > 0.0f ? floor(value->z) : ceil(value->z); } static void CalculateColors(unsigned short c0, unsigned short c1, DDSColors *c, MagickBooleanType ignoreAlpha) { c->a[0] = c->a[1] = c->a[2] = c->a[3] = 0; c->r[0] = (unsigned char) C565_red(c0); c->g[0] = (unsigned char) C565_green(c0); c->b[0] = (unsigned char) C565_blue(c0); c->r[1] = (unsigned char) C565_red(c1); c->g[1] = (unsigned char) C565_green(c1); c->b[1] = (unsigned char) C565_blue(c1); if (ignoreAlpha != MagickFalse || c0 > c1) { c->r[2] = (unsigned char) ((2 * c->r[0] + c->r[1]) / 3); c->g[2] = (unsigned char) ((2 * c->g[0] + c->g[1]) / 3); c->b[2] = (unsigned char) ((2 * c->b[0] + c->b[1]) / 3); c->r[3] = (unsigned char) ((c->r[0] + 2 * c->r[1]) / 3); c->g[3] = (unsigned char) ((c->g[0] + 2 * c->g[1]) / 3); c->b[3] = (unsigned char) ((c->b[0] + 2 * c->b[1]) / 3); } else { c->r[2] = (unsigned char) ((c->r[0] + c->r[1]) / 2); c->g[2] = (unsigned char) ((c->g[0] + c->g[1]) / 2); c->b[2] = (unsigned char) ((c->b[0] + c->b[1]) / 2); c->r[3] = c->g[3] = c->b[3] = 0; c->a[3] = 255; } } static size_t CompressAlpha(const size_t min, const size_t max, const size_t steps, const ssize_t *alphas, unsigned char* indices) { unsigned char codes[8]; register ssize_t i; size_t error, index, j, least, value; codes[0] = (unsigned char) min; codes[1] = (unsigned char) max; codes[6] = 0; codes[7] = 255; for (i=1; i < (ssize_t) steps; i++) codes[i+1] = (unsigned char) (((steps-i)*min + i*max) / steps); error = 0; for (i=0; i<16; i++) { if (alphas[i] == -1) { indices[i] = 0; continue; } value = alphas[i]; least = SIZE_MAX; index = 0; for (j=0; j<8; j++) { size_t dist; dist = value - (size_t)codes[j]; dist *= dist; if (dist < least) { least = dist; index = j; } } indices[i] = (unsigned char)index; error += least; } return error; } static void CompressClusterFit(const size_t count, const DDSVector4 *points, const ssize_t *map, const DDSVector3 principle, const DDSVector4 metric, DDSVector3 *start, DDSVector3* end, unsigned char *indices) { DDSVector3 axis; DDSVector4 grid, gridrcp, half, onethird_onethird2, pointsWeights[16], two, twonineths, twothirds_twothirds2, xSumwSum; float bestError = 1e+37f; size_t bestIteration = 0, besti = 0, bestj = 0, bestk = 0, iterationIndex; ssize_t i; unsigned char *o, order[128], unordered[16]; VectorInit(half,0.5f); VectorInit(two,2.0f); VectorInit(onethird_onethird2,1.0f/3.0f); onethird_onethird2.w = 1.0f/9.0f; VectorInit(twothirds_twothirds2,2.0f/3.0f); twothirds_twothirds2.w = 4.0f/9.0f; VectorInit(twonineths,2.0f/9.0f); grid.x = 31.0f; grid.y = 63.0f; grid.z = 31.0f; grid.w = 0.0f; gridrcp.x = 1.0f/31.0f; gridrcp.y = 1.0f/63.0f; gridrcp.z = 1.0f/31.0f; gridrcp.w = 0.0f; xSumwSum.x = 0.0f; xSumwSum.y = 0.0f; xSumwSum.z = 0.0f; xSumwSum.w = 0.0f; ConstructOrdering(count,points,principle,pointsWeights,&xSumwSum,order,0); for (iterationIndex = 0;;) { #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,1) \ num_threads(GetMagickResourceLimit(ThreadResource)) #endif for (i=0; i < (ssize_t) count; i++) { DDSVector4 part0, part1, part2; size_t ii, j, k, kmin; VectorInit(part0,0.0f); for(ii=0; ii < (size_t) i; ii++) VectorAdd(pointsWeights[ii],part0,&part0); VectorInit(part1,0.0f); for (j=(size_t) i;;) { if (j == 0) { VectorCopy44(pointsWeights[0],&part2); kmin = 1; } else { VectorInit(part2,0.0f); kmin = j; } for (k=kmin;;) { DDSVector4 a, alpha2_sum, alphax_sum, alphabeta_sum, b, beta2_sum, betax_sum, e1, e2, factor, part3; float error; VectorSubtract(xSumwSum,part2,&part3); VectorSubtract(part3,part1,&part3); VectorSubtract(part3,part0,&part3); VectorMultiplyAdd(part1,twothirds_twothirds2,part0,&alphax_sum); VectorMultiplyAdd(part2,onethird_onethird2,alphax_sum,&alphax_sum); VectorInit(alpha2_sum,alphax_sum.w); VectorMultiplyAdd(part2,twothirds_twothirds2,part3,&betax_sum); VectorMultiplyAdd(part1,onethird_onethird2,betax_sum,&betax_sum); VectorInit(beta2_sum,betax_sum.w); VectorAdd(part1,part2,&alphabeta_sum); VectorInit(alphabeta_sum,alphabeta_sum.w); VectorMultiply(twonineths,alphabeta_sum,&alphabeta_sum); VectorMultiply(alpha2_sum,beta2_sum,&factor); VectorNegativeMultiplySubtract(alphabeta_sum,alphabeta_sum,factor, &factor); VectorReciprocal(factor,&factor); VectorMultiply(alphax_sum,beta2_sum,&a); VectorNegativeMultiplySubtract(betax_sum,alphabeta_sum,a,&a); VectorMultiply(a,factor,&a); VectorMultiply(betax_sum,alpha2_sum,&b); VectorNegativeMultiplySubtract(alphax_sum,alphabeta_sum,b,&b); VectorMultiply(b,factor,&b); VectorClamp(&a); VectorMultiplyAdd(grid,a,half,&a); VectorTruncate(&a); VectorMultiply(a,gridrcp,&a); VectorClamp(&b); VectorMultiplyAdd(grid,b,half,&b); VectorTruncate(&b); VectorMultiply(b,gridrcp,&b); VectorMultiply(b,b,&e1); VectorMultiply(e1,beta2_sum,&e1); VectorMultiply(a,a,&e2); VectorMultiplyAdd(e2,alpha2_sum,e1,&e1); VectorMultiply(a,b,&e2); VectorMultiply(e2,alphabeta_sum,&e2); VectorNegativeMultiplySubtract(a,alphax_sum,e2,&e2); VectorNegativeMultiplySubtract(b,betax_sum,e2,&e2); VectorMultiplyAdd(two,e2,e1,&e2); VectorMultiply(e2,metric,&e2); error = e2.x + e2.y + e2.z; if (error < bestError) { #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (DDS_CompressClusterFit) #endif { if (error < bestError) { VectorCopy43(a,start); VectorCopy43(b,end); bestError = error; besti = i; bestj = j; bestk = k; bestIteration = iterationIndex; } } } if (k == count) break; VectorAdd(pointsWeights[k],part2,&part2); k++; } if (j == count) break; VectorAdd(pointsWeights[j],part1,&part1); j++; } } if (bestIteration != iterationIndex) break; iterationIndex++; if (iterationIndex == 8) break; VectorSubtract3(*end,*start,&axis); if (ConstructOrdering(count,points,axis,pointsWeights,&xSumwSum,order, iterationIndex) == MagickFalse) break; } o = order + (16*bestIteration); for (i=0; i < (ssize_t) besti; i++) unordered[o[i]] = 0; for (i=besti; i < (ssize_t) bestj; i++) unordered[o[i]] = 2; for (i=bestj; i < (ssize_t) bestk; i++) unordered[o[i]] = 3; for (i=bestk; i < (ssize_t) count; i++) unordered[o[i]] = 1; RemapIndices(map,unordered,indices); } static void CompressRangeFit(const size_t count, const DDSVector4* points, const ssize_t *map, const DDSVector3 principle, const DDSVector4 metric, DDSVector3 *start, DDSVector3 *end, unsigned char *indices) { float d, bestDist, max, min, val; DDSVector3 codes[4], grid, gridrcp, half, dist; register ssize_t i; size_t bestj, j; unsigned char closest[16]; VectorInit3(half,0.5f); grid.x = 31.0f; grid.y = 63.0f; grid.z = 31.0f; gridrcp.x = 1.0f/31.0f; gridrcp.y = 1.0f/63.0f; gridrcp.z = 1.0f/31.0f; if (count > 0) { VectorCopy43(points[0],start); VectorCopy43(points[0],end); min = max = Dot(points[0],principle); for (i=1; i < (ssize_t) count; i++) { val = Dot(points[i],principle); if (val < min) { VectorCopy43(points[i],start); min = val; } else if (val > max) { VectorCopy43(points[i],end); max = val; } } } VectorClamp3(start); VectorMultiplyAdd3(grid,*start,half,start); VectorTruncate3(start); VectorMultiply3(*start,gridrcp,start); VectorClamp3(end); VectorMultiplyAdd3(grid,*end,half,end); VectorTruncate3(end); VectorMultiply3(*end,gridrcp,end); codes[0] = *start; codes[1] = *end; codes[2].x = (start->x * (2.0f/3.0f)) + (end->x * (1.0f/3.0f)); codes[2].y = (start->y * (2.0f/3.0f)) + (end->y * (1.0f/3.0f)); codes[2].z = (start->z * (2.0f/3.0f)) + (end->z * (1.0f/3.0f)); codes[3].x = (start->x * (1.0f/3.0f)) + (end->x * (2.0f/3.0f)); codes[3].y = (start->y * (1.0f/3.0f)) + (end->y * (2.0f/3.0f)); codes[3].z = (start->z * (1.0f/3.0f)) + (end->z * (2.0f/3.0f)); for (i=0; i < (ssize_t) count; i++) { bestDist = 1e+37f; bestj = 0; for (j=0; j < 4; j++) { dist.x = (points[i].x - codes[j].x) * metric.x; dist.y = (points[i].y - codes[j].y) * metric.y; dist.z = (points[i].z - codes[j].z) * metric.z; d = Dot(dist,dist); if (d < bestDist) { bestDist = d; bestj = j; } } closest[i] = (unsigned char) bestj; } RemapIndices(map, closest, indices); } static void ComputeEndPoints(const DDSSingleColourLookup *lookup[], const unsigned char *color, DDSVector3 *start, DDSVector3 *end, unsigned char *index) { register ssize_t i; size_t c, maxError = SIZE_MAX; for (i=0; i < 2; i++) { const DDSSourceBlock* sources[3]; size_t error = 0; for (c=0; c < 3; c++) { sources[c] = &lookup[c][color[c]].sources[i]; error += ((size_t) sources[c]->error) * ((size_t) sources[c]->error); } if (error > maxError) continue; start->x = (float) sources[0]->start / 31.0f; start->y = (float) sources[1]->start / 63.0f; start->z = (float) sources[2]->start / 31.0f; end->x = (float) sources[0]->end / 31.0f; end->y = (float) sources[1]->end / 63.0f; end->z = (float) sources[2]->end / 31.0f; *index = (unsigned char) (2*i); maxError = error; } } static void ComputePrincipleComponent(const float *covariance, DDSVector3 *principle) { DDSVector4 row0, row1, row2, v; register ssize_t i; row0.x = covariance[0]; row0.y = covariance[1]; row0.z = covariance[2]; row0.w = 0.0f; row1.x = covariance[1]; row1.y = covariance[3]; row1.z = covariance[4]; row1.w = 0.0f; row2.x = covariance[2]; row2.y = covariance[4]; row2.z = covariance[5]; row2.w = 0.0f; VectorInit(v,1.0f); for (i=0; i < 8; i++) { DDSVector4 w; float a; w.x = row0.x * v.x; w.y = row0.y * v.x; w.z = row0.z * v.x; w.w = row0.w * v.x; w.x = (row1.x * v.y) + w.x; w.y = (row1.y * v.y) + w.y; w.z = (row1.z * v.y) + w.z; w.w = (row1.w * v.y) + w.w; w.x = (row2.x * v.z) + w.x; w.y = (row2.y * v.z) + w.y; w.z = (row2.z * v.z) + w.z; w.w = (row2.w * v.z) + w.w; a = 1.0f / MagickMax(w.x,MagickMax(w.y,w.z)); v.x = w.x * a; v.y = w.y * a; v.z = w.z * a; v.w = w.w * a; } VectorCopy43(v,principle); } static void ComputeWeightedCovariance(const size_t count, const DDSVector4 *points, float *covariance) { DDSVector3 centroid; float total; size_t i; total = 0.0f; VectorInit3(centroid,0.0f); for (i=0; i < count; i++) { total += points[i].w; centroid.x += (points[i].x * points[i].w); centroid.y += (points[i].y * points[i].w); centroid.z += (points[i].z * points[i].w); } if( total > 1.192092896e-07F) { centroid.x /= total; centroid.y /= total; centroid.z /= total; } for (i=0; i < 6; i++) covariance[i] = 0.0f; for (i = 0; i < count; i++) { DDSVector3 a, b; a.x = points[i].x - centroid.x; a.y = points[i].y - centroid.y; a.z = points[i].z - centroid.z; b.x = points[i].w * a.x; b.y = points[i].w * a.y; b.z = points[i].w * a.z; covariance[0] += a.x*b.x; covariance[1] += a.x*b.y; covariance[2] += a.x*b.z; covariance[3] += a.y*b.y; covariance[4] += a.y*b.z; covariance[5] += a.z*b.z; } } static MagickBooleanType ConstructOrdering(const size_t count, const DDSVector4 *points, const DDSVector3 axis, DDSVector4 *pointsWeights, DDSVector4 *xSumwSum, unsigned char *order, size_t iteration) { float dps[16], f; register ssize_t i; size_t j; unsigned char c, *o, *p; o = order + (16*iteration); for (i=0; i < (ssize_t) count; i++) { dps[i] = Dot(points[i],axis); o[i] = (unsigned char)i; } for (i=0; i < (ssize_t) count; i++) { for (j=i; j > 0 && dps[j] < dps[j - 1]; j--) { f = dps[j]; dps[j] = dps[j - 1]; dps[j - 1] = f; c = o[j]; o[j] = o[j - 1]; o[j - 1] = c; } } for (i=0; i < (ssize_t) iteration; i++) { MagickBooleanType same; p = order + (16*i); same = MagickTrue; for (j=0; j < count; j++) { if (o[j] != p[j]) { same = MagickFalse; break; } } if (same != MagickFalse) return MagickFalse; } xSumwSum->x = 0; xSumwSum->y = 0; xSumwSum->z = 0; xSumwSum->w = 0; for (i=0; i < (ssize_t) count; i++) { DDSVector4 v; j = (size_t) o[i]; v.x = points[j].w * points[j].x; v.y = points[j].w * points[j].y; v.z = points[j].w * points[j].z; v.w = points[j].w * 1.0f; VectorCopy44(v,&pointsWeights[i]); VectorAdd(*xSumwSum,v,xSumwSum); } return MagickTrue; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s D D S % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsDDS() returns MagickTrue if the image format type, identified by the % magick string, is DDS. % % The format of the IsDDS method is: % % MagickBooleanType IsDDS(const unsigned char *magick,const size_t length) % % A description of each parameter follows: % % o magick: compare image format pattern against these bytes. % % o length: Specifies the length of the magick string. % */ static MagickBooleanType IsDDS(const unsigned char *magick, const size_t length) { if (length < 4) return(MagickFalse); if (LocaleNCompare((char *) magick,"DDS ", 4) == 0) return(MagickTrue); return(MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e a d D D S I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReadDDSImage() reads a DirectDraw Surface image file and returns it. It % allocates the memory necessary for the new Image structure and returns a % pointer to the new image. % % The format of the ReadDDSImage method is: % % Image *ReadDDSImage(const ImageInfo *image_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: The image info. % % o exception: return any errors or warnings in this structure. % */ static Image *ReadDDSImage(const ImageInfo *image_info,ExceptionInfo *exception) { Image *image; MagickBooleanType status, cubemap = MagickFalse, volume = MagickFalse; CompressionType compression; DDSInfo dds_info; DDSDecoder *decoder; PixelTrait alpha_trait; size_t n, num_images; /* Open image file. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); image=AcquireImage(image_info,exception); status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception); if (status == MagickFalse) { image=DestroyImageList(image); return((Image *) NULL); } /* Initialize image structure. */ if (ReadDDSInfo(image, &dds_info) != MagickTrue) { ThrowReaderException(CorruptImageError,"ImproperImageHeader"); } if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP) cubemap = MagickTrue; if (dds_info.ddscaps2 & DDSCAPS2_VOLUME && dds_info.depth > 0) volume = MagickTrue; (void) SeekBlob(image, 128, SEEK_SET); /* Determine pixel format */ if (dds_info.pixelformat.flags & DDPF_RGB) { compression = NoCompression; if (dds_info.pixelformat.flags & DDPF_ALPHAPIXELS) { alpha_trait = BlendPixelTrait; decoder = ReadUncompressedRGBA; } else { alpha_trait = UndefinedPixelTrait; decoder = ReadUncompressedRGB; } } else if (dds_info.pixelformat.flags & DDPF_LUMINANCE) { compression = NoCompression; if (dds_info.pixelformat.flags & DDPF_ALPHAPIXELS) { /* Not sure how to handle this */ ThrowReaderException(CorruptImageError, "ImageTypeNotSupported"); } else { alpha_trait = UndefinedPixelTrait; decoder = ReadUncompressedRGB; } } else if (dds_info.pixelformat.flags & DDPF_FOURCC) { switch (dds_info.pixelformat.fourcc) { case FOURCC_DXT1: { alpha_trait = UndefinedPixelTrait; compression = DXT1Compression; decoder = ReadDXT1; break; } case FOURCC_DXT3: { alpha_trait = BlendPixelTrait; compression = DXT3Compression; decoder = ReadDXT3; break; } case FOURCC_DXT5: { alpha_trait = BlendPixelTrait; compression = DXT5Compression; decoder = ReadDXT5; break; } default: { /* Unknown FOURCC */ ThrowReaderException(CorruptImageError, "ImageTypeNotSupported"); } } } else { /* Neither compressed nor uncompressed... thus unsupported */ ThrowReaderException(CorruptImageError, "ImageTypeNotSupported"); } num_images = 1; if (cubemap) { /* Determine number of faces defined in the cubemap */ num_images = 0; if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_POSITIVEX) num_images++; if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_NEGATIVEX) num_images++; if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_POSITIVEY) num_images++; if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_NEGATIVEY) num_images++; if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_POSITIVEZ) num_images++; if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_NEGATIVEZ) num_images++; } if (volume) num_images = dds_info.depth; for (n = 0; n < num_images; n++) { if (n != 0) { /* Start a new image */ AcquireNextImage(image_info,image,exception); if (GetNextImageInList(image) == (Image *) NULL) return(DestroyImageList(image)); image=SyncNextImageInList(image); } image->alpha_trait=alpha_trait; image->compression = compression; image->columns = dds_info.width; image->rows = dds_info.height; image->storage_class = DirectClass; image->endian = LSBEndian; image->depth = 8; if (image_info->ping != MagickFalse) { (void) CloseBlob(image); return(GetFirstImageInList(image)); } status=SetImageExtent(image,image->columns,image->rows,exception); if (status == MagickFalse) return(DestroyImageList(image)); if ((decoder)(image, &dds_info, exception) != MagickTrue) { (void) CloseBlob(image); return(GetFirstImageInList(image)); } } (void) CloseBlob(image); return(GetFirstImageInList(image)); } static MagickBooleanType ReadDDSInfo(Image *image, DDSInfo *dds_info) { size_t hdr_size, required; /* Seek to start of header */ (void) SeekBlob(image, 4, SEEK_SET); /* Check header field */ hdr_size = ReadBlobLSBLong(image); if (hdr_size != 124) return MagickFalse; /* Fill in DDS info struct */ dds_info->flags = ReadBlobLSBLong(image); /* Check required flags */ required=(size_t) (DDSD_WIDTH | DDSD_HEIGHT | DDSD_PIXELFORMAT); if ((dds_info->flags & required) != required) return MagickFalse; dds_info->height = ReadBlobLSBLong(image); dds_info->width = ReadBlobLSBLong(image); dds_info->pitchOrLinearSize = ReadBlobLSBLong(image); dds_info->depth = ReadBlobLSBLong(image); dds_info->mipmapcount = ReadBlobLSBLong(image); (void) SeekBlob(image, 44, SEEK_CUR); /* reserved region of 11 DWORDs */ /* Read pixel format structure */ hdr_size = ReadBlobLSBLong(image); if (hdr_size != 32) return MagickFalse; dds_info->pixelformat.flags = ReadBlobLSBLong(image); dds_info->pixelformat.fourcc = ReadBlobLSBLong(image); dds_info->pixelformat.rgb_bitcount = ReadBlobLSBLong(image); dds_info->pixelformat.r_bitmask = ReadBlobLSBLong(image); dds_info->pixelformat.g_bitmask = ReadBlobLSBLong(image); dds_info->pixelformat.b_bitmask = ReadBlobLSBLong(image); dds_info->pixelformat.alpha_bitmask = ReadBlobLSBLong(image); dds_info->ddscaps1 = ReadBlobLSBLong(image); dds_info->ddscaps2 = ReadBlobLSBLong(image); (void) SeekBlob(image, 12, SEEK_CUR); /* 3 reserved DWORDs */ return MagickTrue; } static MagickBooleanType SetDXT1Pixels(Image *image,ssize_t x,ssize_t y, DDSColors colors,size_t bits,Quantum *q) { register ssize_t i; ssize_t j; unsigned char code; for (j = 0; j < 4; j++) { for (i = 0; i < 4; i++) { if ((x + i) < (ssize_t) image->columns && (y + j) < (ssize_t) image->rows) { code=(unsigned char) ((bits >> ((j*4+i)*2)) & 0x3); SetPixelRed(image,ScaleCharToQuantum(colors.r[code]),q); SetPixelGreen(image,ScaleCharToQuantum(colors.g[code]),q); SetPixelBlue(image,ScaleCharToQuantum(colors.b[code]),q); SetPixelOpacity(image,ScaleCharToQuantum(colors.a[code]),q); if ((colors.a[code] != 0) && (image->alpha_trait == UndefinedPixelTrait)) return(MagickFalse); q+=GetPixelChannels(image); } } } return(MagickTrue); } static MagickBooleanType ReadDXT1(Image *image,DDSInfo *dds_info, ExceptionInfo *exception) { DDSColors colors; register Quantum *q; register ssize_t x; size_t bits; ssize_t y; unsigned short c0, c1; for (y = 0; y < (ssize_t) image->rows; y += 4) { for (x = 0; x < (ssize_t) image->columns; x += 4) { /* Get 4x4 patch of pixels to write on */ q=QueueAuthenticPixels(image,x,y,MagickMin(4,image->columns-x), MagickMin(4,image->rows-y),exception); if (q == (Quantum *) NULL) return MagickFalse; /* Read 8 bytes of data from the image */ c0=ReadBlobLSBShort(image); c1=ReadBlobLSBShort(image); bits=ReadBlobLSBLong(image); CalculateColors(c0,c1,&colors,MagickFalse); /* Write the pixels */ if (SetDXT1Pixels(image,x,y,colors,bits,q) == MagickFalse) { /* Correct alpha */ SetImageAlpha(image,QuantumRange,exception); q=QueueAuthenticPixels(image,x,y,MagickMin(4,image->columns-x), MagickMin(4,image->rows-y),exception); if (q != (Quantum *) NULL) SetDXT1Pixels(image,x,y,colors,bits,q); } if (SyncAuthenticPixels(image,exception) == MagickFalse) return MagickFalse; } } return(SkipDXTMipmaps(image,dds_info,8,exception)); } static MagickBooleanType ReadDXT3(Image *image, DDSInfo *dds_info, ExceptionInfo *exception) { DDSColors colors; register Quantum *q; register ssize_t i, x; unsigned char alpha; size_t a0, a1, bits, code; ssize_t j, y; unsigned short c0, c1; for (y = 0; y < (ssize_t) dds_info->height; y += 4) { for (x = 0; x < (ssize_t) dds_info->width; x += 4) { /* Get 4x4 patch of pixels to write on */ q = QueueAuthenticPixels(image, x, y, MagickMin(4, dds_info->width - x), MagickMin(4, dds_info->height - y),exception); if (q == (Quantum *) NULL) return MagickFalse; /* Read alpha values (8 bytes) */ a0 = ReadBlobLSBLong(image); a1 = ReadBlobLSBLong(image); /* Read 8 bytes of data from the image */ c0 = ReadBlobLSBShort(image); c1 = ReadBlobLSBShort(image); bits = ReadBlobLSBLong(image); CalculateColors(c0, c1, &colors, MagickTrue); /* Write the pixels */ for (j = 0; j < 4; j++) { for (i = 0; i < 4; i++) { if ((x + i) < (ssize_t) dds_info->width && (y + j) < (ssize_t) dds_info->height) { code = (bits >> ((4*j+i)*2)) & 0x3; SetPixelRed(image,ScaleCharToQuantum(colors.r[code]),q); SetPixelGreen(image,ScaleCharToQuantum(colors.g[code]),q); SetPixelBlue(image,ScaleCharToQuantum(colors.b[code]),q); /* Extract alpha value: multiply 0..15 by 17 to get range 0..255 */ if (j < 2) alpha = 17U * (unsigned char) ((a0 >> (4*(4*j+i))) & 0xf); else alpha = 17U * (unsigned char) ((a1 >> (4*(4*(j-2)+i))) & 0xf); SetPixelAlpha(image,ScaleCharToQuantum((unsigned char) alpha),q); q+=GetPixelChannels(image); } } } if (SyncAuthenticPixels(image,exception) == MagickFalse) return MagickFalse; } } return(SkipDXTMipmaps(image,dds_info,16,exception)); } static MagickBooleanType ReadDXT5(Image *image, DDSInfo *dds_info, ExceptionInfo *exception) { DDSColors colors; MagickSizeType alpha_bits; register Quantum *q; register ssize_t i, x; unsigned char a0, a1; size_t alpha, bits, code, alpha_code; ssize_t j, y; unsigned short c0, c1; for (y = 0; y < (ssize_t) dds_info->height; y += 4) { for (x = 0; x < (ssize_t) dds_info->width; x += 4) { /* Get 4x4 patch of pixels to write on */ q = QueueAuthenticPixels(image, x, y, MagickMin(4, dds_info->width - x), MagickMin(4, dds_info->height - y),exception); if (q == (Quantum *) NULL) return MagickFalse; /* Read alpha values (8 bytes) */ a0 = (unsigned char) ReadBlobByte(image); a1 = (unsigned char) ReadBlobByte(image); alpha_bits = (MagickSizeType)ReadBlobLSBLong(image); alpha_bits = alpha_bits | ((MagickSizeType)ReadBlobLSBShort(image) << 32); /* Read 8 bytes of data from the image */ c0 = ReadBlobLSBShort(image); c1 = ReadBlobLSBShort(image); bits = ReadBlobLSBLong(image); CalculateColors(c0, c1, &colors, MagickTrue); /* Write the pixels */ for (j = 0; j < 4; j++) { for (i = 0; i < 4; i++) { if ((x + i) < (ssize_t) dds_info->width && (y + j) < (ssize_t) dds_info->height) { code = (bits >> ((4*j+i)*2)) & 0x3; SetPixelRed(image,ScaleCharToQuantum(colors.r[code]),q); SetPixelGreen(image,ScaleCharToQuantum(colors.g[code]),q); SetPixelBlue(image,ScaleCharToQuantum(colors.b[code]),q); /* Extract alpha value */ alpha_code = (size_t) (alpha_bits >> (3*(4*j+i))) & 0x7; if (alpha_code == 0) alpha = a0; else if (alpha_code == 1) alpha = a1; else if (a0 > a1) alpha = ((8-alpha_code) * a0 + (alpha_code-1) * a1) / 7; else if (alpha_code == 6) alpha = 0; else if (alpha_code == 7) alpha = 255; else alpha = (((6-alpha_code) * a0 + (alpha_code-1) * a1) / 5); SetPixelAlpha(image,ScaleCharToQuantum((unsigned char) alpha),q); q+=GetPixelChannels(image); } } } if (SyncAuthenticPixels(image,exception) == MagickFalse) return MagickFalse; } } return(SkipDXTMipmaps(image,dds_info,16,exception)); } static MagickBooleanType ReadUncompressedRGB(Image *image, DDSInfo *dds_info, ExceptionInfo *exception) { register Quantum *q; ssize_t x, y; unsigned short color; if (dds_info->pixelformat.rgb_bitcount == 8) (void) SetImageType(image,GrayscaleType,exception); else if (dds_info->pixelformat.rgb_bitcount == 16 && !IsBitMask( dds_info->pixelformat,0xf800,0x07e0,0x001f,0x0000)) ThrowBinaryException(CorruptImageError,"ImageTypeNotSupported", image->filename); for (y = 0; y < (ssize_t) dds_info->height; y++) { q = QueueAuthenticPixels(image, 0, y, dds_info->width, 1,exception); if (q == (Quantum *) NULL) return MagickFalse; for (x = 0; x < (ssize_t) dds_info->width; x++) { if (dds_info->pixelformat.rgb_bitcount == 8) SetPixelGray(image,ScaleCharToQuantum(ReadBlobByte(image)),q); else if (dds_info->pixelformat.rgb_bitcount == 16) { color=ReadBlobShort(image); SetPixelRed(image,ScaleCharToQuantum((unsigned char) (((color >> 11)/31.0)*255)),q); SetPixelGreen(image,ScaleCharToQuantum((unsigned char) ((((unsigned short)(color << 5) >> 10)/63.0)*255)),q); SetPixelBlue(image,ScaleCharToQuantum((unsigned char) ((((unsigned short)(color << 11) >> 11)/31.0)*255)),q); } else { SetPixelBlue(image,ScaleCharToQuantum((unsigned char) ReadBlobByte(image)),q); SetPixelGreen(image,ScaleCharToQuantum((unsigned char) ReadBlobByte(image)),q); SetPixelRed(image,ScaleCharToQuantum((unsigned char) ReadBlobByte(image)),q); if (dds_info->pixelformat.rgb_bitcount == 32) (void) ReadBlobByte(image); } q+=GetPixelChannels(image); } if (SyncAuthenticPixels(image,exception) == MagickFalse) return MagickFalse; } return(SkipRGBMipmaps(image,dds_info,3,exception)); } static MagickBooleanType ReadUncompressedRGBA(Image *image, DDSInfo *dds_info, ExceptionInfo *exception) { register Quantum *q; ssize_t alphaBits, x, y; unsigned short color; alphaBits=0; if (dds_info->pixelformat.rgb_bitcount == 16) { if (IsBitMask(dds_info->pixelformat,0x7c00,0x03e0,0x001f,0x8000)) alphaBits=1; else if (IsBitMask(dds_info->pixelformat,0x00ff,0x00ff,0x00ff,0xff00)) { alphaBits=2; (void) SetImageType(image,GrayscaleAlphaType,exception); } else if (IsBitMask(dds_info->pixelformat,0x0f00,0x00f0,0x000f,0xf000)) alphaBits=4; else ThrowBinaryException(CorruptImageError,"ImageTypeNotSupported", image->filename); } for (y = 0; y < (ssize_t) dds_info->height; y++) { q = QueueAuthenticPixels(image, 0, y, dds_info->width, 1,exception); if (q == (Quantum *) NULL) return MagickFalse; for (x = 0; x < (ssize_t) dds_info->width; x++) { if (dds_info->pixelformat.rgb_bitcount == 16) { color=ReadBlobShort(image); if (alphaBits == 1) { SetPixelAlpha(image,(color & (1 << 15)) ? QuantumRange : 0,q); SetPixelRed(image,ScaleCharToQuantum((unsigned char) ((((unsigned short)(color << 1) >> 11)/31.0)*255)),q); SetPixelGreen(image,ScaleCharToQuantum((unsigned char) ((((unsigned short)(color << 6) >> 11)/31.0)*255)),q); SetPixelBlue(image,ScaleCharToQuantum((unsigned char) ((((unsigned short)(color << 11) >> 11)/31.0)*255)),q); } else if (alphaBits == 2) { SetPixelAlpha(image,ScaleCharToQuantum((unsigned char) (color >> 8)),q); SetPixelGray(image,ScaleCharToQuantum((unsigned char)color),q); } else { SetPixelAlpha(image,ScaleCharToQuantum((unsigned char) (((color >> 12)/15.0)*255)),q); SetPixelRed(image,ScaleCharToQuantum((unsigned char) ((((unsigned short)(color << 4) >> 12)/15.0)*255)),q); SetPixelGreen(image,ScaleCharToQuantum((unsigned char) ((((unsigned short)(color << 8) >> 12)/15.0)*255)),q); SetPixelBlue(image,ScaleCharToQuantum((unsigned char) ((((unsigned short)(color << 12) >> 12)/15.0)*255)),q); } } else { SetPixelBlue(image,ScaleCharToQuantum((unsigned char) ReadBlobByte(image)),q); SetPixelGreen(image,ScaleCharToQuantum((unsigned char) ReadBlobByte(image)),q); SetPixelRed(image,ScaleCharToQuantum((unsigned char) ReadBlobByte(image)),q); SetPixelAlpha(image,ScaleCharToQuantum((unsigned char) ReadBlobByte(image)),q); } q+=GetPixelChannels(image); } if (SyncAuthenticPixels(image,exception) == MagickFalse) return MagickFalse; } return(SkipRGBMipmaps(image,dds_info,4,exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e g i s t e r D D S I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RegisterDDSImage() adds attributes for the DDS image format to % the list of supported formats. The attributes include the image format % tag, a method to read and/or write the format, whether the format % supports the saving of more than one frame to the same file or blob, % whether the format supports native in-memory I/O, and a brief % description of the format. % % The format of the RegisterDDSImage method is: % % RegisterDDSImage(void) % */ ModuleExport size_t RegisterDDSImage(void) { MagickInfo *entry; entry = AcquireMagickInfo("DDS","DDS","Microsoft DirectDraw Surface"); entry->decoder = (DecodeImageHandler *) ReadDDSImage; entry->encoder = (EncodeImageHandler *) WriteDDSImage; entry->magick = (IsImageFormatHandler *) IsDDS; entry->flags|=CoderDecoderSeekableStreamFlag; (void) RegisterMagickInfo(entry); entry = AcquireMagickInfo("DDS","DXT1","Microsoft DirectDraw Surface"); entry->decoder = (DecodeImageHandler *) ReadDDSImage; entry->encoder = (EncodeImageHandler *) WriteDDSImage; entry->magick = (IsImageFormatHandler *) IsDDS; entry->flags|=CoderDecoderSeekableStreamFlag; (void) RegisterMagickInfo(entry); entry = AcquireMagickInfo("DDS","DXT5","Microsoft DirectDraw Surface"); entry->decoder = (DecodeImageHandler *) ReadDDSImage; entry->encoder = (EncodeImageHandler *) WriteDDSImage; entry->magick = (IsImageFormatHandler *) IsDDS; entry->flags|=CoderDecoderSeekableStreamFlag; (void) RegisterMagickInfo(entry); return(MagickImageCoderSignature); } static void RemapIndices(const ssize_t *map, const unsigned char *source, unsigned char *target) { register ssize_t i; for (i = 0; i < 16; i++) { if (map[i] == -1) target[i] = 3; else target[i] = source[map[i]]; } } /* Skip the mipmap images for compressed (DXTn) dds files */ static MagickBooleanType SkipDXTMipmaps(Image *image,DDSInfo *dds_info, int texel_size,ExceptionInfo *exception) { MagickOffsetType offset; register ssize_t i; size_t h, w; /* Only skip mipmaps for textures and cube maps */ if (EOFBlob(image) != MagickFalse) { ThrowFileException(exception,CorruptImageWarning,"UnexpectedEndOfFile", image->filename); return(MagickFalse); } if (dds_info->ddscaps1 & DDSCAPS_MIPMAP && (dds_info->ddscaps1 & DDSCAPS_TEXTURE || dds_info->ddscaps2 & DDSCAPS2_CUBEMAP)) { w = DIV2(dds_info->width); h = DIV2(dds_info->height); /* Mipmapcount includes the main image, so start from one */ for (i = 1; (i < (ssize_t) dds_info->mipmapcount) && w && h; i++) { offset = (MagickOffsetType) ((w + 3) / 4) * ((h + 3) / 4) * texel_size; if (SeekBlob(image, offset, SEEK_CUR) < 0) break; w = DIV2(w); h = DIV2(h); } } return(MagickTrue); } /* Skip the mipmap images for uncompressed (RGB or RGBA) dds files */ static MagickBooleanType SkipRGBMipmaps(Image *image,DDSInfo *dds_info, int pixel_size,ExceptionInfo *exception) { MagickOffsetType offset; register ssize_t i; size_t h, w; /* Only skip mipmaps for textures and cube maps */ if (EOFBlob(image) != MagickFalse) { ThrowFileException(exception,CorruptImageError,"UnexpectedEndOfFile", image->filename); return(MagickFalse); } if (dds_info->ddscaps1 & DDSCAPS_MIPMAP && (dds_info->ddscaps1 & DDSCAPS_TEXTURE || dds_info->ddscaps2 & DDSCAPS2_CUBEMAP)) { w = DIV2(dds_info->width); h = DIV2(dds_info->height); /* Mipmapcount includes the main image, so start from one */ for (i=1; (i < (ssize_t) dds_info->mipmapcount) && w && h; i++) { offset = (MagickOffsetType) w * h * pixel_size; if (SeekBlob(image, offset, SEEK_CUR) < 0) break; w = DIV2(w); h = DIV2(h); } } return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % U n r e g i s t e r D D S I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % UnregisterDDSImage() removes format registrations made by the % DDS module from the list of supported formats. % % The format of the UnregisterDDSImage method is: % % UnregisterDDSImage(void) % */ ModuleExport void UnregisterDDSImage(void) { (void) UnregisterMagickInfo("DDS"); (void) UnregisterMagickInfo("DXT1"); (void) UnregisterMagickInfo("DXT5"); } static void WriteAlphas(Image *image, const ssize_t *alphas, size_t min5, size_t max5, size_t min7, size_t max7) { register ssize_t i; size_t err5, err7, j; unsigned char indices5[16], indices7[16]; FixRange(min5,max5,5); err5 = CompressAlpha(min5,max5,5,alphas,indices5); FixRange(min7,max7,7); err7 = CompressAlpha(min7,max7,7,alphas,indices7); if (err7 < err5) { for (i=0; i < 16; i++) { unsigned char index; index = indices7[i]; if( index == 0 ) indices5[i] = 1; else if (index == 1) indices5[i] = 0; else indices5[i] = 9 - index; } min5 = max7; max5 = min7; } (void) WriteBlobByte(image,(unsigned char) min5); (void) WriteBlobByte(image,(unsigned char) max5); for(i=0; i < 2; i++) { size_t value = 0; for (j=0; j < 8; j++) { size_t index = (size_t) indices5[j + i*8]; value |= ( index << 3*j ); } for (j=0; j < 3; j++) { size_t byte = (value >> 8*j) & 0xff; (void) WriteBlobByte(image,(unsigned char) byte); } } } static void WriteCompressed(Image *image, const size_t count, DDSVector4 *points, const ssize_t *map, const MagickBooleanType clusterFit) { float covariance[16]; DDSVector3 end, principle, start; DDSVector4 metric; unsigned char indices[16]; VectorInit(metric,1.0f); VectorInit3(start,0.0f); VectorInit3(end,0.0f); ComputeWeightedCovariance(count,points,covariance); ComputePrincipleComponent(covariance,&principle); if ((clusterFit == MagickFalse) || (count == 0)) CompressRangeFit(count,points,map,principle,metric,&start,&end,indices); else CompressClusterFit(count,points,map,principle,metric,&start,&end,indices); WriteIndices(image,start,end,indices); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % W r i t e D D S I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % WriteDDSImage() writes a DirectDraw Surface image file in the DXT5 format. % % The format of the WriteBMPImage method is: % % MagickBooleanType WriteDDSImage(const ImageInfo *image_info,Image *image) % % A description of each parameter follows. % % o image_info: the image info. % % o image: The image. % */ static MagickBooleanType WriteDDSImage(const ImageInfo *image_info, Image *image, ExceptionInfo *exception) { const char *option; size_t compression, columns, maxMipmaps, mipmaps, pixelFormat, rows; MagickBooleanType clusterFit, status, weightByAlpha; assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); status=OpenBlob(image_info,image,WriteBinaryBlobMode,exception); if (status == MagickFalse) return(status); (void) TransformImageColorspace(image,sRGBColorspace,exception); pixelFormat=DDPF_FOURCC; compression=FOURCC_DXT5; if (image->alpha_trait == UndefinedPixelTrait) compression=FOURCC_DXT1; if (LocaleCompare(image_info->magick,"dxt1") == 0) compression=FOURCC_DXT1; option=GetImageOption(image_info,"dds:compression"); if (option != (char *) NULL) { if (LocaleCompare(option,"dxt1") == 0) compression=FOURCC_DXT1; if (LocaleCompare(option,"none") == 0) pixelFormat=DDPF_RGB; } clusterFit=MagickFalse; weightByAlpha=MagickFalse; if (pixelFormat == DDPF_FOURCC) { option=GetImageOption(image_info,"dds:cluster-fit"); if (IsStringTrue(option) != MagickFalse) { clusterFit=MagickTrue; if (compression != FOURCC_DXT1) { option=GetImageOption(image_info,"dds:weight-by-alpha"); if (IsStringTrue(option) != MagickFalse) weightByAlpha=MagickTrue; } } } maxMipmaps=SIZE_MAX; mipmaps=0; if ((image->columns & (image->columns - 1)) == 0 && (image->rows & (image->rows - 1)) == 0) { option=GetImageOption(image_info,"dds:mipmaps"); if (option != (char *) NULL) maxMipmaps=StringToUnsignedLong(option); if (maxMipmaps != 0) { columns=image->columns; rows=image->rows; while ((columns != 1 || rows != 1) && mipmaps != maxMipmaps) { columns=DIV2(columns); rows=DIV2(rows); mipmaps++; } } } WriteDDSInfo(image,pixelFormat,compression,mipmaps); WriteImageData(image,pixelFormat,compression,clusterFit,weightByAlpha, exception); if (mipmaps > 0 && WriteMipmaps(image,pixelFormat,compression,mipmaps, clusterFit,weightByAlpha,exception) == MagickFalse) return(MagickFalse); (void) CloseBlob(image); return(MagickTrue); } static void WriteDDSInfo(Image *image, const size_t pixelFormat, const size_t compression, const size_t mipmaps) { char software[MagickPathExtent]; register ssize_t i; unsigned int format, caps, flags; flags=(unsigned int) (DDSD_CAPS | DDSD_WIDTH | DDSD_HEIGHT | DDSD_PIXELFORMAT); caps=(unsigned int) DDSCAPS_TEXTURE; format=(unsigned int) pixelFormat; if (format == DDPF_FOURCC) flags=flags | DDSD_LINEARSIZE; else flags=flags | DDSD_PITCH; if (mipmaps > 0) { flags=flags | (unsigned int) DDSD_MIPMAPCOUNT; caps=caps | (unsigned int) (DDSCAPS_MIPMAP | DDSCAPS_COMPLEX); } if (format != DDPF_FOURCC && image->alpha_trait != UndefinedPixelTrait) format=format | DDPF_ALPHAPIXELS; (void) WriteBlob(image,4,(unsigned char *) "DDS "); (void) WriteBlobLSBLong(image,124); (void) WriteBlobLSBLong(image,flags); (void) WriteBlobLSBLong(image,(unsigned int) image->rows); (void) WriteBlobLSBLong(image,(unsigned int) image->columns); if (pixelFormat == DDPF_FOURCC) { /* Compressed DDS requires linear compressed size of first image */ if (compression == FOURCC_DXT1) (void) WriteBlobLSBLong(image,(unsigned int) (MagickMax(1, (image->columns+3)/4)*MagickMax(1,(image->rows+3)/4)*8)); else /* DXT5 */ (void) WriteBlobLSBLong(image,(unsigned int) (MagickMax(1, (image->columns+3)/4)*MagickMax(1,(image->rows+3)/4)*16)); } else { /* Uncompressed DDS requires byte pitch of first image */ if (image->alpha_trait != UndefinedPixelTrait) (void) WriteBlobLSBLong(image,(unsigned int) (image->columns * 4)); else (void) WriteBlobLSBLong(image,(unsigned int) (image->columns * 3)); } (void) WriteBlobLSBLong(image,0x00); (void) WriteBlobLSBLong(image,(unsigned int) mipmaps+1); (void) ResetMagickMemory(software,0,sizeof(software)); (void) CopyMagickString(software,"IMAGEMAGICK",MagickPathExtent); (void) WriteBlob(image,44,(unsigned char *) software); (void) WriteBlobLSBLong(image,32); (void) WriteBlobLSBLong(image,format); if (pixelFormat == DDPF_FOURCC) { (void) WriteBlobLSBLong(image,(unsigned int) compression); for(i=0;i < 5;i++) // bitcount / masks (void) WriteBlobLSBLong(image,0x00); } else { (void) WriteBlobLSBLong(image,0x00); if (image->alpha_trait != UndefinedPixelTrait) { (void) WriteBlobLSBLong(image,32); (void) WriteBlobLSBLong(image,0xff0000); (void) WriteBlobLSBLong(image,0xff00); (void) WriteBlobLSBLong(image,0xff); (void) WriteBlobLSBLong(image,0xff000000); } else { (void) WriteBlobLSBLong(image,24); (void) WriteBlobLSBLong(image,0xff0000); (void) WriteBlobLSBLong(image,0xff00); (void) WriteBlobLSBLong(image,0xff); (void) WriteBlobLSBLong(image,0x00); } } (void) WriteBlobLSBLong(image,caps); for(i=0;i < 4;i++) // ddscaps2 + reserved region (void) WriteBlobLSBLong(image,0x00); } static void WriteFourCC(Image *image, const size_t compression, const MagickBooleanType clusterFit, const MagickBooleanType weightByAlpha, ExceptionInfo *exception) { register ssize_t x; ssize_t i, y, bx, by; register const Quantum *p; for (y=0; y < (ssize_t) image->rows; y+=4) { for (x=0; x < (ssize_t) image->columns; x+=4) { MagickBooleanType match; DDSVector4 point, points[16]; size_t count = 0, max5 = 0, max7 = 0, min5 = 255, min7 = 255, columns = 4, rows = 4; ssize_t alphas[16], map[16]; unsigned char alpha; if (x + columns >= image->columns) columns = image->columns - x; if (y + rows >= image->rows) rows = image->rows - y; p=GetVirtualPixels(image,x,y,columns,rows,exception); if (p == (const Quantum *) NULL) break; for (i=0; i<16; i++) { map[i] = -1; alphas[i] = -1; } for (by=0; by < (ssize_t) rows; by++) { for (bx=0; bx < (ssize_t) columns; bx++) { if (compression == FOURCC_DXT5) alpha = ScaleQuantumToChar(GetPixelAlpha(image,p)); else alpha = 255; if (compression == FOURCC_DXT5) { if (alpha < min7) min7 = alpha; if (alpha > max7) max7 = alpha; if (alpha != 0 && alpha < min5) min5 = alpha; if (alpha != 255 && alpha > max5) max5 = alpha; } alphas[4*by + bx] = (size_t)alpha; point.x = (float)ScaleQuantumToChar(GetPixelRed(image,p)) / 255.0f; point.y = (float)ScaleQuantumToChar(GetPixelGreen(image,p)) / 255.0f; point.z = (float)ScaleQuantumToChar(GetPixelBlue(image,p)) / 255.0f; point.w = weightByAlpha ? (float)(alpha + 1) / 256.0f : 1.0f; p+=GetPixelChannels(image); match = MagickFalse; for (i=0; i < (ssize_t) count; i++) { if ((points[i].x == point.x) && (points[i].y == point.y) && (points[i].z == point.z) && (alpha >= 128 || compression == FOURCC_DXT5)) { points[i].w += point.w; map[4*by + bx] = i; match = MagickTrue; break; } } if (match != MagickFalse) continue; points[count].x = point.x; points[count].y = point.y; points[count].z = point.z; points[count].w = point.w; map[4*by + bx] = count; count++; } } for (i=0; i < (ssize_t) count; i++) points[i].w = sqrt(points[i].w); if (compression == FOURCC_DXT5) WriteAlphas(image,alphas,min5,max5,min7,max7); if (count == 1) WriteSingleColorFit(image,points,map); else WriteCompressed(image,count,points,map,clusterFit); } } } static void WriteImageData(Image *image, const size_t pixelFormat, const size_t compression,const MagickBooleanType clusterFit, const MagickBooleanType weightByAlpha, ExceptionInfo *exception) { if (pixelFormat == DDPF_FOURCC) WriteFourCC(image,compression,clusterFit,weightByAlpha,exception); else WriteUncompressed(image,exception); } static inline size_t ClampToLimit(const float value, const size_t limit) { size_t result = (int) (value + 0.5f); if (result < 0.0f) return(0); if (result > limit) return(limit); return result; } static inline size_t ColorTo565(const DDSVector3 point) { size_t r = ClampToLimit(31.0f*point.x,31); size_t g = ClampToLimit(63.0f*point.y,63); size_t b = ClampToLimit(31.0f*point.z,31); return (r << 11) | (g << 5) | b; } static void WriteIndices(Image *image, const DDSVector3 start, const DDSVector3 end, unsigned char *indices) { register ssize_t i; size_t a, b; unsigned char remapped[16]; const unsigned char *ind; a = ColorTo565(start); b = ColorTo565(end); for (i=0; i<16; i++) { if( a < b ) remapped[i] = (indices[i] ^ 0x1) & 0x3; else if( a == b ) remapped[i] = 0; else remapped[i] = indices[i]; } if( a < b ) Swap(a,b); (void) WriteBlobByte(image,(unsigned char) (a & 0xff)); (void) WriteBlobByte(image,(unsigned char) (a >> 8)); (void) WriteBlobByte(image,(unsigned char) (b & 0xff)); (void) WriteBlobByte(image,(unsigned char) (b >> 8)); for (i=0; i<4; i++) { ind = remapped + 4*i; (void) WriteBlobByte(image,ind[0] | (ind[1] << 2) | (ind[2] << 4) | (ind[3] << 6)); } } static MagickBooleanType WriteMipmaps(Image *image, const size_t pixelFormat, const size_t compression, const size_t mipmaps, const MagickBooleanType clusterFit, const MagickBooleanType weightByAlpha, ExceptionInfo *exception) { Image* resize_image; register ssize_t i; size_t columns, rows; columns = image->columns; rows = image->rows; for (i=0; i< (ssize_t) mipmaps; i++) { resize_image = ResizeImage(image,DIV2(columns),DIV2(rows),TriangleFilter, exception); if (resize_image == (Image *) NULL) return(MagickFalse); DestroyBlob(resize_image); resize_image->blob=ReferenceBlob(image->blob); WriteImageData(resize_image,pixelFormat,compression,weightByAlpha, clusterFit,exception); resize_image=DestroyImage(resize_image); columns = DIV2(columns); rows = DIV2(rows); } return(MagickTrue); } static void WriteSingleColorFit(Image *image, const DDSVector4 *points, const ssize_t *map) { DDSVector3 start, end; register ssize_t i; unsigned char color[3], index, indexes[16], indices[16]; color[0] = (unsigned char) ClampToLimit(255.0f*points->x,255); color[1] = (unsigned char) ClampToLimit(255.0f*points->y,255); color[2] = (unsigned char) ClampToLimit(255.0f*points->z,255); index=0; ComputeEndPoints(DDS_LOOKUP,color,&start,&end,&index); for (i=0; i< 16; i++) indexes[i]=index; RemapIndices(map,indexes,indices); WriteIndices(image,start,end,indices); } static void WriteUncompressed(Image *image, ExceptionInfo *exception) { register const Quantum *p; register ssize_t x; ssize_t y; for (y=0; y < (ssize_t) image->rows; y++) { p=GetVirtualPixels(image,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { (void) WriteBlobByte(image,ScaleQuantumToChar(GetPixelBlue(image,p))); (void) WriteBlobByte(image,ScaleQuantumToChar(GetPixelGreen(image,p))); (void) WriteBlobByte(image,ScaleQuantumToChar(GetPixelRed(image,p))); if (image->alpha_trait != UndefinedPixelTrait) (void) WriteBlobByte(image,ScaleQuantumToChar(GetPixelAlpha(image,p))); p+=GetPixelChannels(image); } } }
gbdt.h
#ifndef LIGHTGBM_BOOSTING_GBDT_H_ #define LIGHTGBM_BOOSTING_GBDT_H_ #include <LightGBM/boosting.h> #include <LightGBM/objective_function.h> #include <LightGBM/prediction_early_stop.h> #include <LightGBM/json11.hpp> #include "score_updater.hpp" #include <cstdio> #include <vector> #include <string> #include <fstream> #include <memory> #include <mutex> #include <map> using namespace json11; namespace LightGBM { /*! * \brief GBDT algorithm implementation. including Training, prediction, bagging. */ class GBDT : public GBDTBase { public: /*! * \brief Constructor */ GBDT(); /*! * \brief Destructor */ ~GBDT(); /*! * \brief Initialization logic * \param gbdt_config Config for boosting * \param train_data Training data * \param objective_function Training objective function * \param training_metrics Training metrics */ void Init(const Config* gbdt_config, const Dataset* train_data, const ObjectiveFunction* objective_function, const std::vector<const Metric*>& training_metrics) override; /*! * \brief Merge model from other boosting object. Will insert to the front of current boosting object * \param other */ void MergeFrom(const Boosting* other) override { auto other_gbdt = reinterpret_cast<const GBDT*>(other); // tmp move to other vector auto original_models = std::move(models_); models_ = std::vector<std::unique_ptr<Tree>>(); // push model from other first for (const auto& tree : other_gbdt->models_) { auto new_tree = std::unique_ptr<Tree>(new Tree(*(tree.get()))); models_.push_back(std::move(new_tree)); } num_init_iteration_ = static_cast<int>(models_.size()) / num_tree_per_iteration_; // push model in current object for (const auto& tree : original_models) { auto new_tree = std::unique_ptr<Tree>(new Tree(*(tree.get()))); models_.push_back(std::move(new_tree)); } num_iteration_for_pred_ = static_cast<int>(models_.size()) / num_tree_per_iteration_; } void ShuffleModels(int start_iter, int end_iter) override { int total_iter = static_cast<int>(models_.size()) / num_tree_per_iteration_; start_iter = std::max(0, start_iter); if (end_iter <= 0) { end_iter = total_iter; } end_iter = std::min(total_iter, end_iter); auto original_models = std::move(models_); std::vector<int> indices(total_iter); for (int i = 0; i < total_iter; ++i) { indices[i] = i; } Random tmp_rand(17); for (int i = start_iter; i < end_iter - 1; ++i) { int j = tmp_rand.NextShort(i + 1, end_iter); std::swap(indices[i], indices[j]); } models_ = std::vector<std::unique_ptr<Tree>>(); for (int i = 0; i < total_iter; ++i) { for (int j = 0; j < num_tree_per_iteration_; ++j) { int tree_idx = indices[i] * num_tree_per_iteration_ + j; auto new_tree = std::unique_ptr<Tree>(new Tree(*(original_models[tree_idx].get()))); models_.push_back(std::move(new_tree)); } } } /*! * \brief Reset the training data * \param train_data New Training data * \param objective_function Training objective function * \param training_metrics Training metrics */ void ResetTrainingData(const Dataset* train_data, const ObjectiveFunction* objective_function, const std::vector<const Metric*>& training_metrics) override; /*! * \brief Reset Boosting Config * \param gbdt_config Config for boosting */ void ResetConfig(const Config* gbdt_config) override; /*! * \brief Adding a validation dataset * \param valid_data Validation dataset * \param valid_metrics Metrics for validation dataset */ void AddValidDataset(const Dataset* valid_data, const std::vector<const Metric*>& valid_metrics) override; /*! * \brief Perform a full training procedure * \param snapshot_freq frequence of snapshot * \param model_output_path path of model file */ void Train(int snapshot_freq, const std::string& model_output_path) override; void RefitTree(const std::vector<std::vector<int>>& tree_leaf_prediction) override; /*! * \brief Training logic * \param gradients nullptr for using default objective, otherwise use self-defined boosting * \param hessians nullptr for using default objective, otherwise use self-defined boosting * \return True if cannot train any more */ virtual bool TrainOneIter(const score_t* gradients, const score_t* hessians) override; /*! * \brief Rollback one iteration */ void RollbackOneIter() override; /*! * \brief Get current iteration */ int GetCurrentIteration() const override { return static_cast<int>(models_.size()) / num_tree_per_iteration_; } /*! * \brief Can use early stopping for prediction or not * \return True if cannot use early stopping for prediction */ bool NeedAccuratePrediction() const override { if (objective_function_ == nullptr) { return true; } else { return objective_function_->NeedAccuratePrediction(); } } /*! * \brief Get evaluation result at data_idx data * \param data_idx 0: training data, 1: 1st validation data * \return evaluation result */ std::vector<double> GetEvalAt(int data_idx) const override; /*! * \brief Get current training score * \param out_len length of returned score * \return training score */ virtual const double* GetTrainingScore(int64_t* out_len) override; /*! * \brief Get size of prediction at data_idx data * \param data_idx 0: training data, 1: 1st validation data * \return The size of prediction */ virtual int64_t GetNumPredictAt(int data_idx) const override { CHECK(data_idx >= 0 && data_idx <= static_cast<int>(valid_score_updater_.size())); data_size_t num_data = train_data_->num_data(); if (data_idx > 0) { num_data = valid_score_updater_[data_idx - 1]->num_data(); } return num_data * num_class_; } /*! * \brief Get prediction result at data_idx data * \param data_idx 0: training data, 1: 1st validation data * \param result used to store prediction result, should allocate memory before call this function * \param out_len length of returned score */ void GetPredictAt(int data_idx, double* out_result, int64_t* out_len) override; /*! * \brief Get number of prediction for one data * \param num_iteration number of used iterations * \param is_pred_leaf True if predicting leaf index * \param is_pred_contrib True if predicting feature contribution * \return number of prediction */ inline int NumPredictOneRow(int num_iteration, bool is_pred_leaf, bool is_pred_contrib) const override { int num_preb_in_one_row = num_class_; if (is_pred_leaf) { int max_iteration = GetCurrentIteration(); if (num_iteration > 0) { num_preb_in_one_row *= static_cast<int>(std::min(max_iteration, num_iteration)); } else { num_preb_in_one_row *= max_iteration; } } else if (is_pred_contrib) { num_preb_in_one_row = num_tree_per_iteration_ * (max_feature_idx_ + 2); // +1 for 0-based indexing, +1 for baseline } return num_preb_in_one_row; } void PredictRaw(const double* features, double* output, const PredictionEarlyStopInstance* earlyStop) const override; void PredictRawByMap(const std::unordered_map<int, double>& features, double* output, const PredictionEarlyStopInstance* early_stop) const override; void Predict(const double* features, double* output, const PredictionEarlyStopInstance* earlyStop) const override; void PredictByMap(const std::unordered_map<int, double>& features, double* output, const PredictionEarlyStopInstance* early_stop) const override; void PredictLeafIndex(const double* features, double* output) const override; void PredictLeafIndexByMap(const std::unordered_map<int, double>& features, double* output) const override; void PredictContrib(const double* features, double* output, const PredictionEarlyStopInstance* earlyStop) const override; /*! * \brief Dump model to json format string * \param start_iteration The model will be saved start from * \param num_iteration Number of iterations that want to dump, -1 means dump all * \return Json format string of model */ std::string DumpModel(int start_iteration, int num_iteration) const override; /*! * \brief Translate model to if-else statement * \param num_iteration Number of iterations that want to translate, -1 means translate all * \return if-else format codes of model */ std::string ModelToIfElse(int num_iteration) const override; /*! * \brief Translate model to if-else statement * \param num_iteration Number of iterations that want to translate, -1 means translate all * \param filename Filename that want to save to * \return is_finish Is training finished or not */ bool SaveModelToIfElse(int num_iteration, const char* filename) const override; /*! * \brief Save model to file * \param start_iteration The model will be saved start from * \param num_iterations Number of model that want to save, -1 means save all * \param filename Filename that want to save to * \return is_finish Is training finished or not */ virtual bool SaveModelToFile(int start_iteration, int num_iterations, const char* filename) const override; /*! * \brief Save model to string * \param start_iteration The model will be saved start from * \param num_iterations Number of model that want to save, -1 means save all * \return Non-empty string if succeeded */ virtual std::string SaveModelToString(int start_iteration, int num_iterations) const override; /*! * \brief Restore from a serialized buffer */ bool LoadModelFromString(const char* buffer, size_t len) override; /*! * \brief Calculate feature importances * \param num_iteration Number of model that want to use for feature importance, -1 means use all * \param importance_type: 0 for split, 1 for gain * \return vector of feature_importance */ std::vector<double> FeatureImportance(int num_iteration, int importance_type) const override; /*! * \brief Get max feature index of this model * \return Max feature index of this model */ inline int MaxFeatureIdx() const override { return max_feature_idx_; } /*! * \brief Get feature names of this model * \return Feature names of this model */ inline std::vector<std::string> FeatureNames() const override { return feature_names_; } /*! * \brief Get index of label column * \return index of label column */ inline int LabelIdx() const override { return label_idx_; } /*! * \brief Get number of weak sub-models * \return Number of weak sub-models */ inline int NumberOfTotalModel() const override { return static_cast<int>(models_.size()); } /*! * \brief Get number of tree per iteration * \return number of tree per iteration */ inline int NumModelPerIteration() const override { return num_tree_per_iteration_; } /*! * \brief Get number of classes * \return Number of classes */ inline int NumberOfClasses() const override { return num_class_; } inline void InitPredict(int num_iteration, bool is_pred_contrib) override { num_iteration_for_pred_ = static_cast<int>(models_.size()) / num_tree_per_iteration_; if (num_iteration > 0) { num_iteration_for_pred_ = std::min(num_iteration, num_iteration_for_pred_); } if (is_pred_contrib) { #pragma omp parallel for schedule(static) for (int i = 0; i < static_cast<int>(models_.size()); ++i) { models_[i]->RecomputeMaxDepth(); } } } inline double GetLeafValue(int tree_idx, int leaf_idx) const override { CHECK(tree_idx >= 0 && static_cast<size_t>(tree_idx) < models_.size()); CHECK(leaf_idx >= 0 && leaf_idx < models_[tree_idx]->num_leaves()); return models_[tree_idx]->LeafOutput(leaf_idx); } inline void SetLeafValue(int tree_idx, int leaf_idx, double val) override { CHECK(tree_idx >= 0 && static_cast<size_t>(tree_idx) < models_.size()); CHECK(leaf_idx >= 0 && leaf_idx < models_[tree_idx]->num_leaves()); models_[tree_idx]->SetLeafOutput(leaf_idx, val); } /*! * \brief Get Type name of this boosting object */ virtual const char* SubModelName() const override { return "tree"; } /*! * \brief Get the trees contained in this boosting class. Used for MOJO writing. */ inline const std::vector<std::unique_ptr<Tree>>& GetTrees() const override { return models_; } protected: /*! * \brief Print eval result and check early stopping */ bool EvalAndCheckEarlyStopping(); /*! * \brief reset config for bagging */ void ResetBaggingConfig(const Config* config, bool is_change_dataset); /*! * \brief Implement bagging logic * \param iter Current interation */ virtual void Bagging(int iter); /*! * \brief Helper function for bagging, used for multi-threading optimization * \param start start indice of bagging * \param cnt count * \param buffer output buffer * \return count of left size */ data_size_t BaggingHelper(Random& cur_rand, data_size_t start, data_size_t cnt, data_size_t* buffer); /*! * \brief calculate the object function */ virtual void Boosting(); /*! * \brief updating score after tree was trained * \param tree Trained tree of this iteration * \param cur_tree_id Current tree for multiclass training */ virtual void UpdateScore(const Tree* tree, const int cur_tree_id); /*! * \brief eval results for one metric */ virtual std::vector<double> EvalOneMetric(const Metric* metric, const double* score) const; /*! * \brief Print metric result of current iteration * \param iter Current interation * \return best_msg if met early_stopping */ std::string OutputMetric(int iter); double BoostFromAverage(int class_id); /*! \brief current iteration */ int iter_; /*! \brief Pointer to training data */ const Dataset* train_data_; /*! \brief Config of gbdt */ std::unique_ptr<Config> config_; /*! \brief Tree learner, will use this class to learn trees */ std::unique_ptr<TreeLearner> tree_learner_; /*! \brief Objective function */ const ObjectiveFunction* objective_function_; /*! \brief Store and update training data's score */ std::unique_ptr<ScoreUpdater> train_score_updater_; /*! \brief Metrics for training data */ std::vector<const Metric*> training_metrics_; /*! \brief Store and update validation data's scores */ std::vector<std::unique_ptr<ScoreUpdater>> valid_score_updater_; /*! \brief Metric for validation data */ std::vector<std::vector<const Metric*>> valid_metrics_; /*! \brief Number of rounds for early stopping */ int early_stopping_round_; /*! \brief Best iteration(s) for early stopping */ std::vector<std::vector<int>> best_iter_; /*! \brief Best score(s) for early stopping */ std::vector<std::vector<double>> best_score_; /*! \brief output message of best iteration */ std::vector<std::vector<std::string>> best_msg_; /*! \brief Trained models(trees) */ std::vector<std::unique_ptr<Tree>> models_; /*! \brief Max feature index of training data*/ int max_feature_idx_; /*! \brief First order derivative of training data */ std::vector<score_t> gradients_; /*! \brief Secend order derivative of training data */ std::vector<score_t> hessians_; /*! \brief Store the indices of in-bag data */ std::vector<data_size_t> bag_data_indices_; /*! \brief Number of in-bag data */ data_size_t bag_data_cnt_; /*! \brief Store the indices of in-bag data */ std::vector<data_size_t> tmp_indices_; /*! \brief Number of training data */ data_size_t num_data_; /*! \brief Number of trees per iterations */ int num_tree_per_iteration_; /*! \brief Number of class */ int num_class_; /*! \brief Index of label column */ data_size_t label_idx_; /*! \brief number of used model */ int num_iteration_for_pred_; /*! \brief Shrinkage rate for one iteration */ double shrinkage_rate_; /*! \brief Number of loaded initial models */ int num_init_iteration_; /*! \brief Feature names */ std::vector<std::string> feature_names_; std::vector<std::string> feature_infos_; /*! \brief number of threads */ int num_threads_; /*! \brief Buffer for multi-threading bagging */ std::vector<data_size_t> offsets_buf_; /*! \brief Buffer for multi-threading bagging */ std::vector<data_size_t> left_cnts_buf_; /*! \brief Buffer for multi-threading bagging */ std::vector<data_size_t> right_cnts_buf_; /*! \brief Buffer for multi-threading bagging */ std::vector<data_size_t> left_write_pos_buf_; /*! \brief Buffer for multi-threading bagging */ std::vector<data_size_t> right_write_pos_buf_; std::unique_ptr<Dataset> tmp_subset_; bool is_use_subset_; std::vector<bool> class_need_train_; bool is_constant_hessian_; std::unique_ptr<ObjectiveFunction> loaded_objective_; bool average_output_; bool need_re_bagging_; std::string loaded_parameter_; Json forced_splits_json_; }; } // namespace LightGBM #endif // LightGBM_BOOSTING_GBDT_H_
GxB_Scalar_wait.c
//------------------------------------------------------------------------------ // GxB_Scalar_wait: wait for a scalar to complete //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // Finishes all work on a scalar, followed by an OpenMP flush. #include "GB.h" #define GB_FREE_ALL ; GrB_Info GxB_Scalar_wait // finish all work on a scalar ( GxB_Scalar *s ) { //-------------------------------------------------------------------------- // check inputs //-------------------------------------------------------------------------- #pragma omp flush GB_WHERE ((*s), "GxB_Scalar_wait (&s)") ; GB_RETURN_IF_NULL (s) ; GB_RETURN_IF_NULL_OR_FAULTY (*s) ; //-------------------------------------------------------------------------- // finish all pending work on the scalar //-------------------------------------------------------------------------- if (GB_ANY_PENDING_WORK (*s)) { GrB_Info info ; GB_BURBLE_START ("GxB_Scalar_wait") ; GB_OK (GB_Matrix_wait ((GrB_Matrix) (*s), Context)) ; GB_BURBLE_END ; } //-------------------------------------------------------------------------- // return result //-------------------------------------------------------------------------- #pragma omp flush return (GrB_SUCCESS) ; }
threshold.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % TTTTT H H RRRR EEEEE SSSSS H H OOO L DDDD % % T H H R R E SS H H O O L D D % % T HHHHH RRRR EEE SSS HHHHH O O L D D % % T H H R R E SS H H O O L D D % % T H H R R EEEEE SSSSS H H OOO LLLLL DDDD % % % % % % MagickCore Image Threshold Methods % % % % Software Design % % Cristy % % October 1996 % % % % % % Copyright 1999-2020 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/artifact.h" #include "MagickCore/blob.h" #include "MagickCore/cache-view.h" #include "MagickCore/color.h" #include "MagickCore/color-private.h" #include "MagickCore/colormap.h" #include "MagickCore/colorspace.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/configure.h" #include "MagickCore/constitute.h" #include "MagickCore/decorate.h" #include "MagickCore/draw.h" #include "MagickCore/enhance.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/effect.h" #include "MagickCore/fx.h" #include "MagickCore/gem.h" #include "MagickCore/gem-private.h" #include "MagickCore/geometry.h" #include "MagickCore/image-private.h" #include "MagickCore/list.h" #include "MagickCore/log.h" #include "MagickCore/memory_.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/montage.h" #include "MagickCore/option.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/pixel-private.h" #include "MagickCore/property.h" #include "MagickCore/quantize.h" #include "MagickCore/quantum.h" #include "MagickCore/quantum-private.h" #include "MagickCore/random_.h" #include "MagickCore/random-private.h" #include "MagickCore/resize.h" #include "MagickCore/resource_.h" #include "MagickCore/segment.h" #include "MagickCore/shear.h" #include "MagickCore/signature-private.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/thread-private.h" #include "MagickCore/threshold.h" #include "MagickCore/token.h" #include "MagickCore/transform.h" #include "MagickCore/xml-tree.h" #include "MagickCore/xml-tree-private.h" /* Define declarations. */ #define ThresholdsFilename "thresholds.xml" /* Typedef declarations. */ struct _ThresholdMap { char *map_id, *description; size_t width, height; ssize_t divisor, *levels; }; /* Static declarations. */ #if MAGICKCORE_ZERO_CONFIGURATION_SUPPORT #include "MagickCore/threshold-map.h" #else static const char *const BuiltinMap= "<?xml version=\"1.0\"?>" "<thresholds>" " <threshold map=\"threshold\" alias=\"1x1\">" " <description>Threshold 1x1 (non-dither)</description>" " <levels width=\"1\" height=\"1\" divisor=\"2\">" " 1" " </levels>" " </threshold>" " <threshold map=\"checks\" alias=\"2x1\">" " <description>Checkerboard 2x1 (dither)</description>" " <levels width=\"2\" height=\"2\" divisor=\"3\">" " 1 2" " 2 1" " </levels>" " </threshold>" "</thresholds>"; #endif /* Forward declarations. */ static ThresholdMap *GetThresholdMapFile(const char *,const char *,const char *,ExceptionInfo *); /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A d a p t i v e T h r e s h o l d I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AdaptiveThresholdImage() selects an individual threshold for each pixel % based on the range of intensity values in its local neighborhood. This % allows for thresholding of an image whose global intensity histogram % doesn't contain distinctive peaks. % % The format of the AdaptiveThresholdImage method is: % % Image *AdaptiveThresholdImage(const Image *image,const size_t width, % const size_t height,const double bias,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o width: the width of the local neighborhood. % % o height: the height of the local neighborhood. % % o bias: the mean bias. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *AdaptiveThresholdImage(const Image *image, const size_t width,const size_t height,const double bias, ExceptionInfo *exception) { #define AdaptiveThresholdImageTag "AdaptiveThreshold/Image" CacheView *image_view, *threshold_view; Image *threshold_image; MagickBooleanType status; MagickOffsetType progress; MagickSizeType number_pixels; ssize_t y; /* Initialize threshold image attributes. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); threshold_image=CloneImage(image,0,0,MagickTrue,exception); if (threshold_image == (Image *) NULL) return((Image *) NULL); if ((width == 0) || (height == 0)) return(threshold_image); status=SetImageStorageClass(threshold_image,DirectClass,exception); if (status == MagickFalse) { threshold_image=DestroyImage(threshold_image); return((Image *) NULL); } /* Threshold image. */ status=MagickTrue; progress=0; number_pixels=(MagickSizeType) width*height; image_view=AcquireVirtualCacheView(image,exception); threshold_view=AcquireAuthenticCacheView(threshold_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,threshold_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { double channel_bias[MaxPixelChannels], channel_sum[MaxPixelChannels]; register const Quantum *magick_restrict p, *magick_restrict pixels; register Quantum *magick_restrict q; register ssize_t i, x; ssize_t center, u, v; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,-((ssize_t) width/2L),y-(ssize_t) (height/2L),image->columns+width,height,exception); q=QueueCacheViewAuthenticPixels(threshold_view,0,y,threshold_image->columns, 1,exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } center=(ssize_t) GetPixelChannels(image)*(image->columns+width)*(height/2L)+ GetPixelChannels(image)*(width/2); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait threshold_traits=GetPixelChannelTraits(threshold_image, channel); if ((traits == UndefinedPixelTrait) || (threshold_traits == UndefinedPixelTrait)) continue; if ((threshold_traits & CopyPixelTrait) != 0) { SetPixelChannel(threshold_image,channel,p[center+i],q); continue; } pixels=p; channel_bias[channel]=0.0; channel_sum[channel]=0.0; for (v=0; v < (ssize_t) height; v++) { for (u=0; u < (ssize_t) width; u++) { if (u == (ssize_t) (width-1)) channel_bias[channel]+=pixels[i]; channel_sum[channel]+=pixels[i]; pixels+=GetPixelChannels(image); } pixels+=GetPixelChannels(image)*image->columns; } } for (x=0; x < (ssize_t) image->columns; x++) { for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double mean; PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait threshold_traits=GetPixelChannelTraits(threshold_image, channel); if ((traits == UndefinedPixelTrait) || (threshold_traits == UndefinedPixelTrait)) continue; if ((threshold_traits & CopyPixelTrait) != 0) { SetPixelChannel(threshold_image,channel,p[center+i],q); continue; } channel_sum[channel]-=channel_bias[channel]; channel_bias[channel]=0.0; pixels=p; for (v=0; v < (ssize_t) height; v++) { channel_bias[channel]+=pixels[i]; pixels+=(width-1)*GetPixelChannels(image); channel_sum[channel]+=pixels[i]; pixels+=GetPixelChannels(image)*(image->columns+1); } mean=(double) (channel_sum[channel]/number_pixels+bias); SetPixelChannel(threshold_image,channel,(Quantum) ((double) p[center+i] <= mean ? 0 : QuantumRange),q); } p+=GetPixelChannels(image); q+=GetPixelChannels(threshold_image); } if (SyncCacheViewAuthenticPixels(threshold_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,AdaptiveThresholdImageTag,progress, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } threshold_image->type=image->type; threshold_view=DestroyCacheView(threshold_view); image_view=DestroyCacheView(image_view); if (status == MagickFalse) threshold_image=DestroyImage(threshold_image); return(threshold_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A u t o T h r e s h o l d I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AutoThresholdImage() automatically performs image thresholding % dependent on which method you specify. % % The format of the AutoThresholdImage method is: % % MagickBooleanType AutoThresholdImage(Image *image, % const AutoThresholdMethod method,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: The image to auto-threshold. % % o method: choose from Kapur, OTSU, or Triangle. % % o exception: return any errors or warnings in this structure. % */ static double KapurThreshold(const Image *image,const double *histogram, ExceptionInfo *exception) { #define MaxIntensity 255 double *black_entropy, *cumulative_histogram, entropy, epsilon, maximum_entropy, *white_entropy; register ssize_t i, j; size_t threshold; /* Compute optimal threshold from the entopy of the histogram. */ cumulative_histogram=(double *) AcquireQuantumMemory(MaxIntensity+1UL, sizeof(*cumulative_histogram)); black_entropy=(double *) AcquireQuantumMemory(MaxIntensity+1UL, sizeof(*black_entropy)); white_entropy=(double *) AcquireQuantumMemory(MaxIntensity+1UL, sizeof(*white_entropy)); if ((cumulative_histogram == (double *) NULL) || (black_entropy == (double *) NULL) || (white_entropy == (double *) NULL)) { if (white_entropy != (double *) NULL) white_entropy=(double *) RelinquishMagickMemory(white_entropy); if (black_entropy != (double *) NULL) black_entropy=(double *) RelinquishMagickMemory(black_entropy); if (cumulative_histogram != (double *) NULL) cumulative_histogram=(double *) RelinquishMagickMemory(cumulative_histogram); (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(-1.0); } /* Entropy for black and white parts of the histogram. */ cumulative_histogram[0]=histogram[0]; for (i=1; i <= MaxIntensity; i++) cumulative_histogram[i]=cumulative_histogram[i-1]+histogram[i]; epsilon=MagickMinimumValue; for (j=0; j <= MaxIntensity; j++) { /* Black entropy. */ black_entropy[j]=0.0; if (cumulative_histogram[j] > epsilon) { entropy=0.0; for (i=0; i <= j; i++) if (histogram[i] > epsilon) entropy-=histogram[i]/cumulative_histogram[j]* log(histogram[i]/cumulative_histogram[j]); black_entropy[j]=entropy; } /* White entropy. */ white_entropy[j]=0.0; if ((1.0-cumulative_histogram[j]) > epsilon) { entropy=0.0; for (i=j+1; i <= MaxIntensity; i++) if (histogram[i] > epsilon) entropy-=histogram[i]/(1.0-cumulative_histogram[j])* log(histogram[i]/(1.0-cumulative_histogram[j])); white_entropy[j]=entropy; } } /* Find histogram bin with maximum entropy. */ maximum_entropy=black_entropy[0]+white_entropy[0]; threshold=0; for (j=1; j <= MaxIntensity; j++) if ((black_entropy[j]+white_entropy[j]) > maximum_entropy) { maximum_entropy=black_entropy[j]+white_entropy[j]; threshold=(size_t) j; } /* Free resources. */ white_entropy=(double *) RelinquishMagickMemory(white_entropy); black_entropy=(double *) RelinquishMagickMemory(black_entropy); cumulative_histogram=(double *) RelinquishMagickMemory(cumulative_histogram); return(100.0*threshold/MaxIntensity); } static double OTSUThreshold(const Image *image,const double *histogram, ExceptionInfo *exception) { double max_sigma, *myu, *omega, *probability, *sigma, threshold; register ssize_t i; /* Compute optimal threshold from maximization of inter-class variance. */ myu=(double *) AcquireQuantumMemory(MaxIntensity+1UL,sizeof(*myu)); omega=(double *) AcquireQuantumMemory(MaxIntensity+1UL,sizeof(*omega)); probability=(double *) AcquireQuantumMemory(MaxIntensity+1UL, sizeof(*probability)); sigma=(double *) AcquireQuantumMemory(MaxIntensity+1UL,sizeof(*sigma)); if ((myu == (double *) NULL) || (omega == (double *) NULL) || (probability == (double *) NULL) || (sigma == (double *) NULL)) { if (sigma != (double *) NULL) sigma=(double *) RelinquishMagickMemory(sigma); if (probability != (double *) NULL) probability=(double *) RelinquishMagickMemory(probability); if (omega != (double *) NULL) omega=(double *) RelinquishMagickMemory(omega); if (myu != (double *) NULL) myu=(double *) RelinquishMagickMemory(myu); (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(-1.0); } /* Calculate probability density. */ for (i=0; i <= (ssize_t) MaxIntensity; i++) probability[i]=histogram[i]; /* Generate probability of graylevels and mean value for separation. */ omega[0]=probability[0]; myu[0]=0.0; for (i=1; i <= (ssize_t) MaxIntensity; i++) { omega[i]=omega[i-1]+probability[i]; myu[i]=myu[i-1]+i*probability[i]; } /* Sigma maximization: inter-class variance and compute optimal threshold. */ threshold=0; max_sigma=0.0; for (i=0; i < (ssize_t) MaxIntensity; i++) { sigma[i]=0.0; if ((omega[i] != 0.0) && (omega[i] != 1.0)) sigma[i]=pow(myu[MaxIntensity]*omega[i]-myu[i],2.0)/(omega[i]*(1.0- omega[i])); if (sigma[i] > max_sigma) { max_sigma=sigma[i]; threshold=(double) i; } } /* Free resources. */ myu=(double *) RelinquishMagickMemory(myu); omega=(double *) RelinquishMagickMemory(omega); probability=(double *) RelinquishMagickMemory(probability); sigma=(double *) RelinquishMagickMemory(sigma); return(100.0*threshold/MaxIntensity); } static double TriangleThreshold(const double *histogram) { double a, b, c, count, distance, inverse_ratio, max_distance, segment, x1, x2, y1, y2; register ssize_t i; ssize_t end, max, start, threshold; /* Compute optimal threshold with triangle algorithm. */ start=0; /* find start bin, first bin not zero count */ for (i=0; i <= (ssize_t) MaxIntensity; i++) if (histogram[i] > 0.0) { start=i; break; } end=0; /* find end bin, last bin not zero count */ for (i=(ssize_t) MaxIntensity; i >= 0; i--) if (histogram[i] > 0.0) { end=i; break; } max=0; /* find max bin, bin with largest count */ count=0.0; for (i=0; i <= (ssize_t) MaxIntensity; i++) if (histogram[i] > count) { max=i; count=histogram[i]; } /* Compute threshold at split point. */ x1=(double) max; y1=histogram[max]; x2=(double) end; if ((max-start) >= (end-max)) x2=(double) start; y2=0.0; a=y1-y2; b=x2-x1; c=(-1.0)*(a*x1+b*y1); inverse_ratio=1.0/sqrt(a*a+b*b+c*c); threshold=0; max_distance=0.0; if (x2 == (double) start) for (i=start; i < max; i++) { segment=inverse_ratio*(a*i+b*histogram[i]+c); distance=sqrt(segment*segment); if ((distance > max_distance) && (segment > 0.0)) { threshold=i; max_distance=distance; } } else for (i=end; i > max; i--) { segment=inverse_ratio*(a*i+b*histogram[i]+c); distance=sqrt(segment*segment); if ((distance > max_distance) && (segment < 0.0)) { threshold=i; max_distance=distance; } } return(100.0*threshold/MaxIntensity); } MagickExport MagickBooleanType AutoThresholdImage(Image *image, const AutoThresholdMethod method,ExceptionInfo *exception) { CacheView *image_view; char property[MagickPathExtent]; double gamma, *histogram, sum, threshold; MagickBooleanType status; register ssize_t i; ssize_t y; /* Form histogram. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); histogram=(double *) AcquireQuantumMemory(MaxIntensity+1UL, sizeof(*histogram)); if (histogram == (double *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); status=MagickTrue; (void) memset(histogram,0,(MaxIntensity+1UL)*sizeof(*histogram)); image_view=AcquireVirtualCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { register const Quantum *magick_restrict p; register ssize_t x; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { double intensity = GetPixelIntensity(image,p); histogram[ScaleQuantumToChar(ClampToQuantum(intensity))]++; p+=GetPixelChannels(image); } } image_view=DestroyCacheView(image_view); /* Normalize histogram. */ sum=0.0; for (i=0; i <= (ssize_t) MaxIntensity; i++) sum+=histogram[i]; gamma=PerceptibleReciprocal(sum); for (i=0; i <= (ssize_t) MaxIntensity; i++) histogram[i]=gamma*histogram[i]; /* Discover threshold from histogram. */ switch (method) { case KapurThresholdMethod: { threshold=KapurThreshold(image,histogram,exception); break; } case OTSUThresholdMethod: default: { threshold=OTSUThreshold(image,histogram,exception); break; } case TriangleThresholdMethod: { threshold=TriangleThreshold(histogram); break; } } histogram=(double *) RelinquishMagickMemory(histogram); if (threshold < 0.0) status=MagickFalse; if (status == MagickFalse) return(MagickFalse); /* Threshold image. */ (void) FormatLocaleString(property,MagickPathExtent,"%g%%",threshold); (void) SetImageProperty(image,"auto-threshold:threshold",property,exception); if (IsStringTrue(GetImageArtifact(image,"auto-threshold:verbose")) != MagickFalse) (void) FormatLocaleFile(stdout,"%.*g%%\n",GetMagickPrecision(),threshold); return(BilevelImage(image,QuantumRange*threshold/100.0,exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % B i l e v e l I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % BilevelImage() changes the value of individual pixels based on the % intensity of each pixel channel. The result is a high-contrast image. % % More precisely each channel value of the image is 'thresholded' so that if % it is equal to or less than the given value it is set to zero, while any % value greater than that give is set to it maximum or QuantumRange. % % This function is what is used to implement the "-threshold" operator for % the command line API. % % If the default channel setting is given the image is thresholded using just % the gray 'intensity' of the image, rather than the individual channels. % % The format of the BilevelImage method is: % % MagickBooleanType BilevelImage(Image *image,const double threshold, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o threshold: define the threshold values. % % o exception: return any errors or warnings in this structure. % % Aside: You can get the same results as operator using LevelImages() % with the 'threshold' value for both the black_point and the white_point. % */ MagickExport MagickBooleanType BilevelImage(Image *image,const double threshold, ExceptionInfo *exception) { #define ThresholdImageTag "Threshold/Image" CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); if (IsGrayColorspace(image->colorspace) == MagickFalse) (void) SetImageColorspace(image,sRGBColorspace,exception); /* Bilevel threshold image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double pixel; register ssize_t i; pixel=GetPixelIntensity(image,q); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; if (image->channel_mask != DefaultChannels) pixel=(double) q[i]; q[i]=(Quantum) (pixel <= threshold ? 0 : QuantumRange); } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,ThresholdImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % B l a c k T h r e s h o l d I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % BlackThresholdImage() is like ThresholdImage() but forces all pixels below % the threshold into black while leaving all pixels at or above the threshold % unchanged. % % The format of the BlackThresholdImage method is: % % MagickBooleanType BlackThresholdImage(Image *image, % const char *threshold,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o threshold: define the threshold value. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType BlackThresholdImage(Image *image, const char *thresholds,ExceptionInfo *exception) { #define ThresholdImageTag "Threshold/Image" CacheView *image_view; GeometryInfo geometry_info; MagickBooleanType status; MagickOffsetType progress; PixelInfo threshold; MagickStatusType flags; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (thresholds == (const char *) NULL) return(MagickTrue); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); if (IsGrayColorspace(image->colorspace) != MagickFalse) (void) SetImageColorspace(image,sRGBColorspace,exception); GetPixelInfo(image,&threshold); flags=ParseGeometry(thresholds,&geometry_info); threshold.red=geometry_info.rho; threshold.green=geometry_info.rho; threshold.blue=geometry_info.rho; threshold.black=geometry_info.rho; threshold.alpha=100.0; if ((flags & SigmaValue) != 0) threshold.green=geometry_info.sigma; if ((flags & XiValue) != 0) threshold.blue=geometry_info.xi; if ((flags & PsiValue) != 0) threshold.alpha=geometry_info.psi; if (threshold.colorspace == CMYKColorspace) { if ((flags & PsiValue) != 0) threshold.black=geometry_info.psi; if ((flags & ChiValue) != 0) threshold.alpha=geometry_info.chi; } if ((flags & PercentValue) != 0) { threshold.red*=(MagickRealType) (QuantumRange/100.0); threshold.green*=(MagickRealType) (QuantumRange/100.0); threshold.blue*=(MagickRealType) (QuantumRange/100.0); threshold.black*=(MagickRealType) (QuantumRange/100.0); threshold.alpha*=(MagickRealType) (QuantumRange/100.0); } /* White threshold image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double pixel; register ssize_t i; pixel=GetPixelIntensity(image,q); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; if (image->channel_mask != DefaultChannels) pixel=(double) q[i]; if (pixel < GetPixelInfoChannel(&threshold,channel)) q[i]=(Quantum) 0; } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,ThresholdImageTag,progress, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C l a m p I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ClampImage() set each pixel whose value is below zero to zero and any the % pixel whose value is above the quantum range to the quantum range (e.g. % 65535) otherwise the pixel value remains unchanged. % % The format of the ClampImage method is: % % MagickBooleanType ClampImage(Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType ClampImage(Image *image,ExceptionInfo *exception) { #define ClampImageTag "Clamp/Image" CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->storage_class == PseudoClass) { register ssize_t i; register PixelInfo *magick_restrict q; q=image->colormap; for (i=0; i < (ssize_t) image->colors; i++) { q->red=(double) ClampPixel(q->red); q->green=(double) ClampPixel(q->green); q->blue=(double) ClampPixel(q->blue); q->alpha=(double) ClampPixel(q->alpha); q++; } return(SyncImage(image,exception)); } /* Clamp image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; q[i]=ClampPixel((MagickRealType) q[i]); } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,ClampImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o l o r T h r e s h o l d I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ColorThresholdImage() forces all pixels in the color range to white % otherwise black. % % The format of the ColorThresholdImage method is: % % MagickBooleanType ColorThresholdImage(Image *image, % const PixelInfo *start_color,const PixelInfo *stop_color, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o start_color, stop_color: define the start and stop color range. Any % pixel within the range returns white otherwise black. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType ColorThresholdImage(Image *image, const PixelInfo *start_color,const PixelInfo *stop_color, ExceptionInfo *exception) { #define ThresholdImageTag "Threshold/Image" CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; PixelInfo start, stop; ssize_t y; /* Color threshold image. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); status=AcquireImageColormap(image,2,exception); if (status == MagickFalse) return(status); start=(*start_color); stop=(*stop_color); switch (image->colorspace) { case HCLColorspace: { ConvertRGBToHCL(start_color->red,start_color->green,start_color->blue, &start.red,&start.green,&start.blue); ConvertRGBToHCL(stop_color->red,stop_color->green,stop_color->blue, &stop.red,&stop.green,&stop.blue); break; } case HSBColorspace: { ConvertRGBToHSB(start_color->red,start_color->green,start_color->blue, &start.red,&start.green,&start.blue); ConvertRGBToHSB(stop_color->red,stop_color->green,stop_color->blue, &stop.red,&stop.green,&stop.blue); break; } case HSLColorspace: { ConvertRGBToHSL(start_color->red,start_color->green,start_color->blue, &start.red,&start.green,&start.blue); ConvertRGBToHSL(stop_color->red,stop_color->green,stop_color->blue, &stop.red,&stop.green,&stop.blue); break; } case HSVColorspace: { ConvertRGBToHSV(start_color->red,start_color->green,start_color->blue, &start.red,&start.green,&start.blue); ConvertRGBToHSV(stop_color->red,stop_color->green,stop_color->blue, &stop.red,&stop.green,&stop.blue); break; } case HWBColorspace: { ConvertRGBToHWB(start_color->red,start_color->green,start_color->blue, &start.red,&start.green,&start.blue); ConvertRGBToHWB(stop_color->red,stop_color->green,stop_color->blue, &stop.red,&stop.green,&stop.blue); break; } case LabColorspace: { ConvertRGBToLab(start_color->red,start_color->green,start_color->blue, &start.red,&start.green,&start.blue); ConvertRGBToLab(stop_color->red,stop_color->green,stop_color->blue, &stop.red,&stop.green,&stop.blue); break; } default: { start.red*=QuantumScale; start.green*=QuantumScale; start.blue*=QuantumScale; stop.red*=QuantumScale; stop.green*=QuantumScale; stop.blue*=QuantumScale; break; } } start.red*=QuantumRange; start.green*=QuantumRange; start.blue*=QuantumRange; stop.red*=QuantumRange; stop.green*=QuantumRange; stop.blue*=QuantumRange; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { MagickBooleanType foreground = MagickTrue; register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; if ((q[i] < GetPixelInfoChannel(&start,channel)) || (q[i] > GetPixelInfoChannel(&stop,channel))) foreground=MagickFalse; } SetPixelIndex(image,(Quantum) (foreground != MagickFalse ? 1 : 0),q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,ThresholdImageTag,progress, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); image->colorspace=sRGBColorspace; return(SyncImage(image,exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e s t r o y T h r e s h o l d M a p % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyThresholdMap() de-allocate the given ThresholdMap % % The format of the ListThresholdMaps method is: % % ThresholdMap *DestroyThresholdMap(Threshold *map) % % A description of each parameter follows. % % o map: Pointer to the Threshold map to destroy % */ MagickExport ThresholdMap *DestroyThresholdMap(ThresholdMap *map) { assert(map != (ThresholdMap *) NULL); if (map->map_id != (char *) NULL) map->map_id=DestroyString(map->map_id); if (map->description != (char *) NULL) map->description=DestroyString(map->description); if (map->levels != (ssize_t *) NULL) map->levels=(ssize_t *) RelinquishMagickMemory(map->levels); map=(ThresholdMap *) RelinquishMagickMemory(map); return(map); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t T h r e s h o l d M a p % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetThresholdMap() loads and searches one or more threshold map files for the % map matching the given name or alias. % % The format of the GetThresholdMap method is: % % ThresholdMap *GetThresholdMap(const char *map_id, % ExceptionInfo *exception) % % A description of each parameter follows. % % o map_id: ID of the map to look for. % % o exception: return any errors or warnings in this structure. % */ MagickExport ThresholdMap *GetThresholdMap(const char *map_id, ExceptionInfo *exception) { ThresholdMap *map; map=GetThresholdMapFile(BuiltinMap,"built-in",map_id,exception); if (map != (ThresholdMap *) NULL) return(map); #if !MAGICKCORE_ZERO_CONFIGURATION_SUPPORT { const StringInfo *option; LinkedListInfo *options; options=GetConfigureOptions(ThresholdsFilename,exception); option=(const StringInfo *) GetNextValueInLinkedList(options); while (option != (const StringInfo *) NULL) { map=GetThresholdMapFile((const char *) GetStringInfoDatum(option), GetStringInfoPath(option),map_id,exception); if (map != (ThresholdMap *) NULL) break; option=(const StringInfo *) GetNextValueInLinkedList(options); } options=DestroyConfigureOptions(options); } #endif return(map); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t T h r e s h o l d M a p F i l e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetThresholdMapFile() look for a given threshold map name or alias in the % given XML file data, and return the allocated the map when found. % % The format of the ListThresholdMaps method is: % % ThresholdMap *GetThresholdMap(const char *xml,const char *filename, % const char *map_id,ExceptionInfo *exception) % % A description of each parameter follows. % % o xml: The threshold map list in XML format. % % o filename: The threshold map XML filename. % % o map_id: ID of the map to look for in XML list. % % o exception: return any errors or warnings in this structure. % */ static ThresholdMap *GetThresholdMapFile(const char *xml,const char *filename, const char *map_id,ExceptionInfo *exception) { char *p; const char *attribute, *content; double value; register ssize_t i; ThresholdMap *map; XMLTreeInfo *description, *levels, *threshold, *thresholds; (void) LogMagickEvent(ConfigureEvent,GetMagickModule(), "Loading threshold map file \"%s\" ...",filename); map=(ThresholdMap *) NULL; thresholds=NewXMLTree(xml,exception); if (thresholds == (XMLTreeInfo *) NULL) return(map); for (threshold=GetXMLTreeChild(thresholds,"threshold"); threshold != (XMLTreeInfo *) NULL; threshold=GetNextXMLTreeTag(threshold)) { attribute=GetXMLTreeAttribute(threshold,"map"); if ((attribute != (char *) NULL) && (LocaleCompare(map_id,attribute) == 0)) break; attribute=GetXMLTreeAttribute(threshold,"alias"); if ((attribute != (char *) NULL) && (LocaleCompare(map_id,attribute) == 0)) break; } if (threshold == (XMLTreeInfo *) NULL) { thresholds=DestroyXMLTree(thresholds); return(map); } description=GetXMLTreeChild(threshold,"description"); if (description == (XMLTreeInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlMissingElement", "<description>, map \"%s\"",map_id); thresholds=DestroyXMLTree(thresholds); return(map); } levels=GetXMLTreeChild(threshold,"levels"); if (levels == (XMLTreeInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlMissingElement", "<levels>, map \"%s\"", map_id); thresholds=DestroyXMLTree(thresholds); return(map); } map=(ThresholdMap *) AcquireCriticalMemory(sizeof(*map)); map->map_id=(char *) NULL; map->description=(char *) NULL; map->levels=(ssize_t *) NULL; attribute=GetXMLTreeAttribute(threshold,"map"); if (attribute != (char *) NULL) map->map_id=ConstantString(attribute); content=GetXMLTreeContent(description); if (content != (char *) NULL) map->description=ConstantString(content); attribute=GetXMLTreeAttribute(levels,"width"); if (attribute == (char *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlMissingAttribute", "<levels width>, map \"%s\"",map_id); thresholds=DestroyXMLTree(thresholds); map=DestroyThresholdMap(map); return(map); } map->width=StringToUnsignedLong(attribute); if (map->width == 0) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlInvalidAttribute", "<levels width>, map \"%s\"",map_id); thresholds=DestroyXMLTree(thresholds); map=DestroyThresholdMap(map); return(map); } attribute=GetXMLTreeAttribute(levels,"height"); if (attribute == (char *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlMissingAttribute", "<levels height>, map \"%s\"",map_id); thresholds=DestroyXMLTree(thresholds); map=DestroyThresholdMap(map); return(map); } map->height=StringToUnsignedLong(attribute); if (map->height == 0) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlInvalidAttribute", "<levels height>, map \"%s\"",map_id); thresholds=DestroyXMLTree(thresholds); map=DestroyThresholdMap(map); return(map); } attribute=GetXMLTreeAttribute(levels,"divisor"); if (attribute == (char *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlMissingAttribute", "<levels divisor>, map \"%s\"",map_id); thresholds=DestroyXMLTree(thresholds); map=DestroyThresholdMap(map); return(map); } map->divisor=(ssize_t) StringToLong(attribute); if (map->divisor < 2) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlInvalidAttribute", "<levels divisor>, map \"%s\"",map_id); thresholds=DestroyXMLTree(thresholds); map=DestroyThresholdMap(map); return(map); } content=GetXMLTreeContent(levels); if (content == (char *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlMissingContent", "<levels>, map \"%s\"",map_id); thresholds=DestroyXMLTree(thresholds); map=DestroyThresholdMap(map); return(map); } map->levels=(ssize_t *) AcquireQuantumMemory((size_t) map->width,map->height* sizeof(*map->levels)); if (map->levels == (ssize_t *) NULL) ThrowFatalException(ResourceLimitFatalError,"UnableToAcquireThresholdMap"); for (i=0; i < (ssize_t) (map->width*map->height); i++) { map->levels[i]=(ssize_t) strtol(content,&p,10); if (p == content) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlInvalidContent", "<level> too few values, map \"%s\"",map_id); thresholds=DestroyXMLTree(thresholds); map=DestroyThresholdMap(map); return(map); } if ((map->levels[i] < 0) || (map->levels[i] > map->divisor)) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlInvalidContent", "<level> %.20g out of range, map \"%s\"", (double) map->levels[i],map_id); thresholds=DestroyXMLTree(thresholds); map=DestroyThresholdMap(map); return(map); } content=p; } value=(double) strtol(content,&p,10); (void) value; if (p != content) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlInvalidContent", "<level> too many values, map \"%s\"",map_id); thresholds=DestroyXMLTree(thresholds); map=DestroyThresholdMap(map); return(map); } thresholds=DestroyXMLTree(thresholds); return(map); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + L i s t T h r e s h o l d M a p F i l e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ListThresholdMapFile() lists the threshold maps and their descriptions % in the given XML file data. % % The format of the ListThresholdMaps method is: % % MagickBooleanType ListThresholdMaps(FILE *file,const char*xml, % const char *filename,ExceptionInfo *exception) % % A description of each parameter follows. % % o file: An pointer to the output FILE. % % o xml: The threshold map list in XML format. % % o filename: The threshold map XML filename. % % o exception: return any errors or warnings in this structure. % */ MagickBooleanType ListThresholdMapFile(FILE *file,const char *xml, const char *filename,ExceptionInfo *exception) { const char *alias, *content, *map; XMLTreeInfo *description, *threshold, *thresholds; assert( xml != (char *) NULL ); assert( file != (FILE *) NULL ); (void) LogMagickEvent(ConfigureEvent,GetMagickModule(), "Loading threshold map file \"%s\" ...",filename); thresholds=NewXMLTree(xml,exception); if ( thresholds == (XMLTreeInfo *) NULL ) return(MagickFalse); (void) FormatLocaleFile(file,"%-16s %-12s %s\n","Map","Alias","Description"); (void) FormatLocaleFile(file, "----------------------------------------------------\n"); threshold=GetXMLTreeChild(thresholds,"threshold"); for ( ; threshold != (XMLTreeInfo *) NULL; threshold=GetNextXMLTreeTag(threshold)) { map=GetXMLTreeAttribute(threshold,"map"); if (map == (char *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlMissingAttribute", "<map>"); thresholds=DestroyXMLTree(thresholds); return(MagickFalse); } alias=GetXMLTreeAttribute(threshold,"alias"); description=GetXMLTreeChild(threshold,"description"); if (description == (XMLTreeInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlMissingElement", "<description>, map \"%s\"",map); thresholds=DestroyXMLTree(thresholds); return(MagickFalse); } content=GetXMLTreeContent(description); if (content == (char *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlMissingContent", "<description>, map \"%s\"", map); thresholds=DestroyXMLTree(thresholds); return(MagickFalse); } (void) FormatLocaleFile(file,"%-16s %-12s %s\n",map,alias ? alias : "", content); } thresholds=DestroyXMLTree(thresholds); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % L i s t T h r e s h o l d M a p s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ListThresholdMaps() lists the threshold maps and their descriptions % as defined by "threshold.xml" to a file. % % The format of the ListThresholdMaps method is: % % MagickBooleanType ListThresholdMaps(FILE *file,ExceptionInfo *exception) % % A description of each parameter follows. % % o file: An pointer to the output FILE. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType ListThresholdMaps(FILE *file, ExceptionInfo *exception) { const StringInfo *option; LinkedListInfo *options; MagickStatusType status; status=MagickTrue; if (file == (FILE *) NULL) file=stdout; options=GetConfigureOptions(ThresholdsFilename,exception); (void) FormatLocaleFile(file, "\n Threshold Maps for Ordered Dither Operations\n"); option=(const StringInfo *) GetNextValueInLinkedList(options); while (option != (const StringInfo *) NULL) { (void) FormatLocaleFile(file,"\nPath: %s\n\n",GetStringInfoPath(option)); status&=ListThresholdMapFile(file,(const char *) GetStringInfoDatum(option), GetStringInfoPath(option),exception); option=(const StringInfo *) GetNextValueInLinkedList(options); } options=DestroyConfigureOptions(options); return(status != 0 ? MagickTrue : MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % O r d e r e d D i t h e r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % OrderedDitherImage() will perform a ordered dither based on a number % of pre-defined dithering threshold maps, but over multiple intensity % levels, which can be different for different channels, according to the % input argument. % % The format of the OrderedDitherImage method is: % % MagickBooleanType OrderedDitherImage(Image *image, % const char *threshold_map,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o threshold_map: A string containing the name of the threshold dither % map to use, followed by zero or more numbers representing the number % of color levels to dither between. % % Any level number less than 2 will be equivalent to 2, and means only % binary dithering will be applied to each color channel. % % No numbers also means a 2 level (bitmap) dither will be applied to all % channels, while a single number is the number of levels applied to each % channel in sequence. More numbers will be applied in turn to each of % the color channels. % % For example: "o3x3,6" will generate a 6 level posterization of the % image with an ordered 3x3 diffused pixel dither being applied between % each level. While checker,8,8,4 will produce a 332 colormaped image % with only a single checkerboard hash pattern (50% grey) between each % color level, to basically double the number of color levels with % a bare minimim of dithering. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType OrderedDitherImage(Image *image, const char *threshold_map,ExceptionInfo *exception) { #define DitherImageTag "Dither/Image" CacheView *image_view; char token[MagickPathExtent]; const char *p; double levels[CompositePixelChannel]; MagickBooleanType status; MagickOffsetType progress; register ssize_t i; ssize_t y; ThresholdMap *map; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if (threshold_map == (const char *) NULL) return(MagickTrue); p=(char *) threshold_map; while (((isspace((int) ((unsigned char) *p)) != 0) || (*p == ',')) && (*p != '\0')) p++; threshold_map=p; while (((isspace((int) ((unsigned char) *p)) == 0) && (*p != ',')) && (*p != '\0')) { if ((p-threshold_map) >= (MagickPathExtent-1)) break; token[p-threshold_map]=(*p); p++; } token[p-threshold_map]='\0'; map=GetThresholdMap(token,exception); if (map == (ThresholdMap *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "InvalidArgument","%s : '%s'","ordered-dither",threshold_map); return(MagickFalse); } for (i=0; i < MaxPixelChannels; i++) levels[i]=2.0; p=strchr((char *) threshold_map,','); if ((p != (char *) NULL) && (isdigit((int) ((unsigned char) *(++p))) != 0)) { (void) GetNextToken(p,&p,MagickPathExtent,token); for (i=0; (i < MaxPixelChannels); i++) levels[i]=StringToDouble(token,(char **) NULL); for (i=0; (*p != '\0') && (i < MaxPixelChannels); i++) { (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); levels[i]=StringToDouble(token,(char **) NULL); } } for (i=0; i < MaxPixelChannels; i++) if (fabs(levels[i]) >= 1) levels[i]-=1.0; if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t i; ssize_t n; n=0; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { ssize_t level, threshold; PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; if (fabs(levels[n]) < MagickEpsilon) { n++; continue; } threshold=(ssize_t) (QuantumScale*q[i]*(levels[n]*(map->divisor-1)+1)); level=threshold/(map->divisor-1); threshold-=level*(map->divisor-1); q[i]=ClampToQuantum((double) (level+(threshold >= map->levels[(x % map->width)+map->width*(y % map->height)]))* QuantumRange/levels[n]); n++; } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,DitherImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); map=DestroyThresholdMap(map); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % P e r c e p t i b l e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PerceptibleImage() set each pixel whose value is less than |epsilon| to % epsilon or -epsilon (whichever is closer) otherwise the pixel value remains % unchanged. % % The format of the PerceptibleImage method is: % % MagickBooleanType PerceptibleImage(Image *image,const double epsilon, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o epsilon: the epsilon threshold (e.g. 1.0e-9). % % o exception: return any errors or warnings in this structure. % */ static inline Quantum PerceptibleThreshold(const Quantum quantum, const double epsilon) { double sign; sign=(double) quantum < 0.0 ? -1.0 : 1.0; if ((sign*quantum) >= epsilon) return(quantum); return((Quantum) (sign*epsilon)); } MagickExport MagickBooleanType PerceptibleImage(Image *image, const double epsilon,ExceptionInfo *exception) { #define PerceptibleImageTag "Perceptible/Image" CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->storage_class == PseudoClass) { register ssize_t i; register PixelInfo *magick_restrict q; q=image->colormap; for (i=0; i < (ssize_t) image->colors; i++) { q->red=(double) PerceptibleThreshold(ClampToQuantum(q->red), epsilon); q->green=(double) PerceptibleThreshold(ClampToQuantum(q->green), epsilon); q->blue=(double) PerceptibleThreshold(ClampToQuantum(q->blue), epsilon); q->alpha=(double) PerceptibleThreshold(ClampToQuantum(q->alpha), epsilon); q++; } return(SyncImage(image,exception)); } /* Perceptible image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if (traits == UndefinedPixelTrait) continue; q[i]=PerceptibleThreshold(q[i],epsilon); } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,PerceptibleImageTag,progress, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R a n d o m T h r e s h o l d I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RandomThresholdImage() changes the value of individual pixels based on the % intensity of each pixel compared to a random threshold. The result is a % low-contrast, two color image. % % The format of the RandomThresholdImage method is: % % MagickBooleanType RandomThresholdImage(Image *image, % const char *thresholds,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o low,high: Specify the high and low thresholds. These values range from % 0 to QuantumRange. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType RandomThresholdImage(Image *image, const double min_threshold, const double max_threshold,ExceptionInfo *exception) { #define ThresholdImageTag "Threshold/Image" CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; PixelInfo threshold; RandomInfo **magick_restrict random_info; ssize_t y; #if defined(MAGICKCORE_OPENMP_SUPPORT) unsigned long key; #endif assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); GetPixelInfo(image,&threshold); /* Random threshold image. */ status=MagickTrue; progress=0; random_info=AcquireRandomInfoThreadSet(); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) key=GetRandomSecretKey(random_info[0]); #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,key == ~0UL) #endif for (y=0; y < (ssize_t) image->rows; y++) { const int id = GetOpenMPThreadId(); register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double threshold; PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; if ((double) q[i] < min_threshold) threshold=min_threshold; else if ((double) q[i] > max_threshold) threshold=max_threshold; else threshold=(double) (QuantumRange* GetPseudoRandomValue(random_info[id])); q[i]=(double) q[i] <= threshold ? 0 : QuantumRange; } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,ThresholdImageTag,progress, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); random_info=DestroyRandomInfoThreadSet(random_info); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R a n g e T h r e s h o l d I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RangeThresholdImage() applies soft and hard thresholding. % % The format of the RangeThresholdImage method is: % % MagickBooleanType RangeThresholdImage(Image *image, % const double low_black,const double low_white,const double high_white, % const double high_black,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o low_black: Define the minimum black threshold value. % % o low_white: Define the minimum white threshold value. % % o high_white: Define the maximum white threshold value. % % o high_black: Define the maximum black threshold value. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType RangeThresholdImage(Image *image, const double low_black,const double low_white,const double high_white, const double high_black,ExceptionInfo *exception) { #define ThresholdImageTag "Threshold/Image" CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); if (IsGrayColorspace(image->colorspace) != MagickFalse) (void) TransformImageColorspace(image,sRGBColorspace,exception); /* Range threshold image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double pixel; register ssize_t i; pixel=GetPixelIntensity(image,q); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; if (image->channel_mask != DefaultChannels) pixel=(double) q[i]; if (pixel < low_black) q[i]=(Quantum) 0; else if ((pixel >= low_black) && (pixel < low_white)) q[i]=ClampToQuantum(QuantumRange* PerceptibleReciprocal(low_white-low_black)*(pixel-low_black)); else if ((pixel >= low_white) && (pixel <= high_white)) q[i]=QuantumRange; else if ((pixel > high_white) && (pixel <= high_black)) q[i]=ClampToQuantum(QuantumRange*PerceptibleReciprocal( high_black-high_white)*(high_black-pixel)); else if (pixel > high_black) q[i]=(Quantum) 0; else q[i]=(Quantum) 0; } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,ThresholdImageTag,progress, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % W h i t e T h r e s h o l d I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % WhiteThresholdImage() is like ThresholdImage() but forces all pixels above % the threshold into white while leaving all pixels at or below the threshold % unchanged. % % The format of the WhiteThresholdImage method is: % % MagickBooleanType WhiteThresholdImage(Image *image, % const char *threshold,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o threshold: Define the threshold value. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType WhiteThresholdImage(Image *image, const char *thresholds,ExceptionInfo *exception) { #define ThresholdImageTag "Threshold/Image" CacheView *image_view; GeometryInfo geometry_info; MagickBooleanType status; MagickOffsetType progress; PixelInfo threshold; MagickStatusType flags; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (thresholds == (const char *) NULL) return(MagickTrue); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); if (IsGrayColorspace(image->colorspace) != MagickFalse) (void) TransformImageColorspace(image,sRGBColorspace,exception); GetPixelInfo(image,&threshold); flags=ParseGeometry(thresholds,&geometry_info); threshold.red=geometry_info.rho; threshold.green=geometry_info.rho; threshold.blue=geometry_info.rho; threshold.black=geometry_info.rho; threshold.alpha=100.0; if ((flags & SigmaValue) != 0) threshold.green=geometry_info.sigma; if ((flags & XiValue) != 0) threshold.blue=geometry_info.xi; if ((flags & PsiValue) != 0) threshold.alpha=geometry_info.psi; if (threshold.colorspace == CMYKColorspace) { if ((flags & PsiValue) != 0) threshold.black=geometry_info.psi; if ((flags & ChiValue) != 0) threshold.alpha=geometry_info.chi; } if ((flags & PercentValue) != 0) { threshold.red*=(MagickRealType) (QuantumRange/100.0); threshold.green*=(MagickRealType) (QuantumRange/100.0); threshold.blue*=(MagickRealType) (QuantumRange/100.0); threshold.black*=(MagickRealType) (QuantumRange/100.0); threshold.alpha*=(MagickRealType) (QuantumRange/100.0); } /* White threshold image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double pixel; register ssize_t i; pixel=GetPixelIntensity(image,q); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; if (image->channel_mask != DefaultChannels) pixel=(double) q[i]; if (pixel > GetPixelInfoChannel(&threshold,channel)) q[i]=QuantumRange; } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,ThresholdImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); }
omp_simd_simdlen.c
//Variable examples of using simd directives void foo (int n, double *a, double* b) { for (int i=0; i<n; i++) a[i]=b[i]; } void foo2 (int n, double *a, double* b) { for (int i=0; i<n; i++) a[i]=b[i]; } void foo3 (int n, double *a, double* b) { int j=0; #pragma omp simd simdlen(16) for (int i=0; i<n; i++,j++) { a[i]=b[i]+j; } } void foo32 (int n, double *a, double* b) { int j=0, k=0; for (int i=0; i<n; i++,j++,k++) { a[i]=b[i]+j+k; } } void foo33 (int n, double *a, double* b) { int j=0, k=0; for (int i=0; i<n; i++,j++,k++) { a[i]=b[i]+j+k; } } void fooAligned (int n, double *a, double* b) { int j=0, k=0; for (int i=0; i<n; i++,j++,k++) { a[i]=b[i]+j+k; } } void fooAligned2 (int n, double *a, double* b) { int j=0, k=0; for (int i=0; i<n; i++,j++,k++) { a[i]=b[i]+j+k; } } double work( double *a, double *b, int n ) { int i; double tmp, sum; sum = 0.0; for (i = 0; i < n; i++) { tmp = a[i] + b[i]; sum += tmp; } return sum; } #define N 45 int a[N], b[N], c[N]; void foo4(int i, double* P) { int j; for (i = 0; i < 999; ++i) { j = P[i]; } } void work2( double **a, double **b, double **c, int n ) { int i, j; double tmp; for (i = 0; i < n; i++) { for (j = 0; j < n; j++) { tmp = a[i][j] + b[i][j]; c[i][j] = tmp; } } } void work3( double **a, double **b, double **c, int n ) { int i, j; double tmp; for (i = 0; i < n; i++) { for (j = 0; j < n; j++) { tmp = a[i][j] + b[i][j]; c[i][j] = tmp; } } } // declare simd can show up several times! float bar(int * p) { *p = *p +10; return *p; } // declare simd can show up several times! float bar2(int * p) { *p = *p +10; return *p; }
AliveDetector.h
// // Created by 庾金科 on 11/02/2018. // #ifndef ACTIVEDETECTION_ALIVEDETECTOR_H #define ACTIVEDETECTION_ALIVEDETECTOR_H #include "opencv2/dnn.hpp" #include "opencv2/opencv.hpp" using namespace cv; using namespace std; const float pnet_stride = 2; const float pnet_cell_size = 12; const int pnet_max_detect_num = 5000; //mean & std const float mean_val = 127.5f; const float std_val = 0.0078125f; //minibatch size const int step_size = 128; typedef struct FaceBox { float xmin; float ymin; float xmax; float ymax; float score; } FaceBox; typedef struct FaceInfo { float bbox_reg[4]; float landmark_reg[10]; float landmark[10]; FaceBox bbox; } FaceInfo; typedef struct Line{ float A; float B; float C; } Line; class MTCNN { public: MTCNN(const string& proto_model_dir); vector<FaceInfo> Detect_mtcnn(const cv::Mat& img, const int min_size, const float* threshold, const float factor, const int stage); //protected: vector<FaceInfo> ProposalNet(const cv::Mat& img, int min_size, float threshold, float factor); vector<FaceInfo> NextStage(const cv::Mat& image, vector<FaceInfo> &pre_stage_res, int input_w, int input_h, int stage_num, const float threshold); void BBoxRegression(vector<FaceInfo>& bboxes); void BBoxPadSquare(vector<FaceInfo>& bboxes, int width, int height); void BBoxPad(vector<FaceInfo>& bboxes, int width, int height); void GenerateBBox(Mat* confidence, Mat* reg_box, float scale, float thresh); std::vector<FaceInfo> NMS(std::vector<FaceInfo>& bboxes, float thresh, char methodType); float IoU(float xmin, float ymin, float xmax, float ymax, float xmin_, float ymin_, float xmax_, float ymax_, bool is_iom = false); // std::shared_ptr<dnn::Net> PNet_; // std::shared_ptr<dnn::Net> ONet_; // std::shared_ptr<dnn::Net> RNet_; public: dnn::Net PNet_; dnn::Net RNet_; dnn::Net ONet_; std::vector<FaceInfo> candidate_boxes_; std::vector<FaceInfo> total_boxes_; }; MTCNN::MTCNN(const string& proto_model_dir) { PNet_ = cv::dnn::readNetFromCaffe(proto_model_dir + "/det1.prototxt", proto_model_dir + "/det1_half.caffemodel"); RNet_ = cv::dnn::readNetFromCaffe(proto_model_dir + "/det2.prototxt", proto_model_dir + "/det2_half.caffemodel"); ONet_ = cv::dnn::readNetFromCaffe(proto_model_dir + "/det3-half.prototxt", proto_model_dir + "/det3-half.caffemodel"); } bool CompareBBox(const FaceInfo & a, const FaceInfo & b) { return a.bbox.score > b.bbox.score; } float MTCNN::IoU(float xmin, float ymin, float xmax, float ymax, float xmin_, float ymin_, float xmax_, float ymax_, bool is_iom) { float iw = std::min(xmax, xmax_) - std::max(xmin, xmin_) + 1; float ih = std::min(ymax, ymax_) - std::max(ymin, ymin_) + 1; if (iw <= 0 || ih <= 0) return 0; float s = iw*ih; if (is_iom) { float ov = s / min((xmax - xmin + 1)*(ymax - ymin + 1), (xmax_ - xmin_ + 1)*(ymax_ - ymin_ + 1)); return ov; } else { float ov = s / ((xmax - xmin + 1)*(ymax - ymin + 1) + (xmax_ - xmin_ + 1)*(ymax_ - ymin_ + 1) - s); return ov; } } void MTCNN::BBoxRegression(vector<FaceInfo>& bboxes) { //#pragma omp parallel for num_threads(threads_num) for (int i = 0; i < bboxes.size(); ++i) { FaceBox &bbox = bboxes[i].bbox; float *bbox_reg = bboxes[i].bbox_reg; float w = bbox.xmax - bbox.xmin + 1; float h = bbox.ymax - bbox.ymin + 1; bbox.xmin += bbox_reg[0] * w; bbox.ymin += bbox_reg[1] * h; bbox.xmax += bbox_reg[2] * w; bbox.ymax += bbox_reg[3] * h; } } void MTCNN::BBoxPad(vector<FaceInfo>& bboxes, int width, int height) { //#pragma omp parallel for num_threads(threads_num) for (int i = 0; i < bboxes.size(); ++i) { FaceBox &bbox = bboxes[i].bbox; bbox.xmin = round(max(bbox.xmin, 0.f)); bbox.ymin = round(max(bbox.ymin, 0.f)); bbox.xmax = round(min(bbox.xmax, width - 1.f)); bbox.ymax = round(min(bbox.ymax, height - 1.f)); } } void MTCNN::BBoxPadSquare(vector<FaceInfo>& bboxes, int width, int height) { //#pragma omp parallel for num_threads(threads_num) for (int i = 0; i < bboxes.size(); ++i) { FaceBox &bbox = bboxes[i].bbox; float w = bbox.xmax - bbox.xmin + 1; float h = bbox.ymax - bbox.ymin + 1; float side = h>w ? h : w; bbox.xmin = round(max(bbox.xmin + (w - side)*0.5f, 0.f)); bbox.ymin = round(max(bbox.ymin + (h - side)*0.5f, 0.f)); bbox.xmax = round(min(bbox.xmin + side - 1, width - 1.f)); bbox.ymax = round(min(bbox.ymin + side - 1, height - 1.f)); } } void MTCNN::GenerateBBox(Mat* confidence, Mat* reg_box, float scale, float thresh) { int feature_map_w_ = confidence->size[3]; int feature_map_h_ = confidence->size[2]; int spatical_size = feature_map_w_*feature_map_h_; // const float* confidence_data = (float*)(confidence->data + spatical_size); const float* confidence_data = (float*)(confidence->data); confidence_data += spatical_size; // std::cout<<confidence_data[0]<<std::endl; const float* reg_data = (float*)(reg_box->data); candidate_boxes_.clear(); for (int i = 0; i<spatical_size; i++) { if (confidence_data[i] >= thresh) { int y = i / feature_map_w_; int x = i - feature_map_w_ * y; FaceInfo faceInfo; FaceBox &faceBox = faceInfo.bbox; faceBox.xmin = (float)(x * pnet_stride) / scale; faceBox.ymin = (float)(y * pnet_stride) / scale; faceBox.xmax = (float)(x * pnet_stride + pnet_cell_size - 1.f) / scale; faceBox.ymax = (float)(y * pnet_stride + pnet_cell_size - 1.f) / scale; faceInfo.bbox_reg[0] = reg_data[i]; faceInfo.bbox_reg[1] = reg_data[i + spatical_size]; faceInfo.bbox_reg[2] = reg_data[i + 2 * spatical_size]; faceInfo.bbox_reg[3] = reg_data[i + 3 * spatical_size]; faceBox.score = confidence_data[i]; candidate_boxes_.push_back(faceInfo); } } } std::vector<FaceInfo> MTCNN::NMS(std::vector<FaceInfo>& bboxes, float thresh, char methodType) { std::vector<FaceInfo> bboxes_nms; if (bboxes.size() == 0) { return bboxes_nms; } std::sort(bboxes.begin(), bboxes.end(), CompareBBox); int32_t select_idx = 0; int32_t num_bbox = static_cast<int32_t>(bboxes.size()); std::vector<int32_t> mask_merged(num_bbox, 0); bool all_merged = false; while (!all_merged) { while (select_idx < num_bbox && mask_merged[select_idx] == 1) select_idx++; if (select_idx == num_bbox) { all_merged = true; continue; } bboxes_nms.push_back(bboxes[select_idx]); mask_merged[select_idx] = 1; FaceBox select_bbox = bboxes[select_idx].bbox; float area1 = static_cast<float>((select_bbox.xmax - select_bbox.xmin + 1) * (select_bbox.ymax - select_bbox.ymin + 1)); float x1 = static_cast<float>(select_bbox.xmin); float y1 = static_cast<float>(select_bbox.ymin); float x2 = static_cast<float>(select_bbox.xmax); float y2 = static_cast<float>(select_bbox.ymax); select_idx++; //#pragma omp parallel for num_threads(threads_num) for (int32_t i = select_idx; i < num_bbox; i++) { if (mask_merged[i] == 1) continue; FaceBox & bbox_i = bboxes[i].bbox; float x = std::max<float>(x1, static_cast<float>(bbox_i.xmin)); float y = std::max<float>(y1, static_cast<float>(bbox_i.ymin)); float w = std::min<float>(x2, static_cast<float>(bbox_i.xmax)) - x + 1; float h = std::min<float>(y2, static_cast<float>(bbox_i.ymax)) - y + 1; if (w <= 0 || h <= 0) continue; float area2 = static_cast<float>((bbox_i.xmax - bbox_i.xmin + 1) * (bbox_i.ymax - bbox_i.ymin + 1)); float area_intersect = w * h; switch (methodType) { case 'u': if (static_cast<float>(area_intersect) / (area1 + area2 - area_intersect) > thresh) mask_merged[i] = 1; break; case 'm': if (static_cast<float>(area_intersect) / std::min(area1, area2) > thresh) mask_merged[i] = 1; break; default: break; } } } return bboxes_nms; } vector<FaceInfo> MTCNN::NextStage(const cv::Mat& image, vector<FaceInfo> &pre_stage_res, int input_w, int input_h, int stage_num, const float threshold) { vector<FaceInfo> res; int batch_size = (int)pre_stage_res.size(); if (batch_size == 0) return res; Mat* input_layer = nullptr; Mat* confidence = nullptr; Mat* reg_box = nullptr; Mat* reg_landmark = nullptr; std::vector< Mat > targets_blobs; switch (stage_num) { case 2: { // input_layer = RNet_->input_blobs()[0]; // input_layer->Reshape(batch_size, 3, input_h, input_w); // RNet_->Reshape(); }break; case 3: { // input_layer = ONet_->input_blobs()[0]; // input_layer->Reshape(batch_size, 3, input_h, input_w); // ONet_->Reshape(); }break; default: return res; break; } // float * input_data = input_layer->mutable_cpu_data(); int spatial_size = input_h*input_w; //#pragma omp parallel for num_threads(threads_num) std::vector<cv::Mat> inputs; for (int n = 0; n < batch_size; ++n) { FaceBox &box = pre_stage_res[n].bbox; Mat roi = image(Rect(Point((int)box.xmin, (int)box.ymin), Point((int)box.xmax, (int)box.ymax))).clone(); resize(roi, roi, Size(input_w, input_h)); inputs.push_back(roi); //resize好的face roi 里面 } // // cv::Mat inputBlob = cv::dnn::blobFromImage(resized, std_val,cv::Size(),mean_val); // cv::imshow("image",inputs[0]); // cv::waitKey(0); Mat blob_input = dnn::blobFromImages(inputs, std_val,cv::Size(),cv::Scalar(mean_val,mean_val,mean_val),false); // PNet_.setInput(inputBlob, "data"); // const std::vector< String > targets_node{"conv4-2","prob1"}; // std::vector< Mat > targets_blobs; // PNet_.forward(targets_blobs,targets_node); switch (stage_num) { case 2: { RNet_.setInput(blob_input, "data"); const std::vector< String > targets_node{"conv5-2","prob1"}; RNet_.forward(targets_blobs,targets_node); confidence = &targets_blobs[1]; reg_box = &targets_blobs[0]; float* confidence_data = (float*)confidence->data; }break; case 3: { ONet_.setInput(blob_input, "data"); const std::vector< String > targets_node{"conv6-2","conv6-3","prob1"}; ONet_.forward(targets_blobs,targets_node); reg_box = &targets_blobs[0]; reg_landmark = &targets_blobs[1]; confidence = &targets_blobs[2]; }break; } const float* confidence_data = (float*)confidence->data; // std::cout<<"confidence_data[0] "<<confidence_data[0]<<std::endl; const float* reg_data = (float*)reg_box->data; const float* landmark_data = nullptr; if (reg_landmark) { landmark_data = (float*)reg_landmark->data; } for (int k = 0; k < batch_size; ++k) { if (confidence_data[2 * k + 1] >= threshold) { FaceInfo info; info.bbox.score = confidence_data[2 * k + 1]; info.bbox.xmin = pre_stage_res[k].bbox.xmin; info.bbox.ymin = pre_stage_res[k].bbox.ymin; info.bbox.xmax = pre_stage_res[k].bbox.xmax; info.bbox.ymax = pre_stage_res[k].bbox.ymax; for (int i = 0; i < 4; ++i) { info.bbox_reg[i] = reg_data[4 * k + i]; } if (reg_landmark) { float w = info.bbox.xmax - info.bbox.xmin + 1.f; float h = info.bbox.ymax - info.bbox.ymin + 1.f; for (int i = 0; i < 5; ++i){ info.landmark[2 * i] = landmark_data[10 * k + 2 * i] * w + info.bbox.xmin; info.landmark[2 * i + 1] = landmark_data[10 * k + 2 * i + 1] * h + info.bbox.ymin; } } res.push_back(info); } } return res; } vector<FaceInfo> MTCNN::ProposalNet(const cv::Mat& img, int minSize, float threshold, float factor) { cv::Mat resized; int width = img.cols; int height = img.rows; float scale = 12.f / minSize; float minWH = std::min(height, width) *scale; std::vector<float> scales; while (minWH >= 12) { scales.push_back(scale); minWH *= factor; scale *= factor; } // Mat* input_layer = PNet_->input_blobs()[0]; total_boxes_.clear(); for (int i = 0; i < scales.size(); i++) { int ws = (int)std::ceil(width*scales[i]); int hs = (int)std::ceil(height*scales[i]); cv::resize(img, resized, cv::Size(ws, hs), 0, 0, cv::INTER_LINEAR); // // input_layer->Reshape(1, 3, hs, ws); // PNet_->Reshape(); // // float * input_data = input_layer->mutable_cpu_data(); // cv::Vec3b * img_data = (cv::Vec3b *)resized.data; // int spatial_size = ws* hs; // for (int k = 0; k < spatial_size; ++k) { // input_data[k] = float((img_data[k][0] - mean_val)* std_val); // input_data[k + spatial_size] = float((img_data[k][1] - mean_val) * std_val); // input_data[k + 2 * spatial_size] = float((img_data[k][2] - mean_val) * std_val); // } cv::Mat inputBlob = cv::dnn::blobFromImage(resized, std_val,cv::Size(),cv::Scalar(mean_val,mean_val,mean_val),false); float* c = (float*)inputBlob.data; PNet_.setInput(inputBlob, "data"); const std::vector< cv::String > targets_node{"conv4-2","prob1"}; std::vector< cv::Mat > targets_blobs; PNet_.forward(targets_blobs,targets_node); cv::Mat prob = targets_blobs[1]; cv::Mat reg = targets_blobs[0]; // std::cout<<prob.size<<std::endl; // int w = prob.size[3]; // int h = prob.size[2]; // // float *confidence = (float*)pnet.data; // std::cout<<"confidence"<<std::endl; // std::cout<<confidence[w*h+1]<<std::endl; // std::cout<<confidence[w*h+0]<<std::endl; // // std::cout<<"targets_blobs[1].data[0]:"<<((float*)targets_blobs[1].data)[299]<<std::endl; // // //// cv::Mat* reg = &targets_blobs[0]; GenerateBBox(&prob, &reg, scales[i], threshold); // std::vector<FaceInfo> bboxes_nms = NMS(candidate_boxes_, 0.5, 'u'); if (bboxes_nms.size()>0) { total_boxes_.insert(total_boxes_.end(), bboxes_nms.begin(), bboxes_nms.end()); } } int num_box = (int)total_boxes_.size(); // std::cout<<num_box<<std::endl; vector<FaceInfo> res_boxes; if (num_box != 0) { res_boxes = NMS(total_boxes_, 0.7f, 'u'); BBoxRegression(res_boxes); BBoxPadSquare(res_boxes, width, height); } return res_boxes; } vector<FaceInfo> MTCNN::Detect_mtcnn(const cv::Mat& image, const int minSize, const float* threshold, const float factor, const int stage) { vector<FaceInfo> pnet_res; vector<FaceInfo> rnet_res; vector<FaceInfo> onet_res; if (stage >= 1){ pnet_res = ProposalNet(image, minSize, threshold[0], factor); } if (stage >= 2 && pnet_res.size()>0){ if (pnet_max_detect_num < (int)pnet_res.size()){ pnet_res.resize(pnet_max_detect_num); } int num = (int)pnet_res.size(); int size = (int)ceil(1.f*num / step_size); for (int iter = 0; iter < size; ++iter){ int start = iter*step_size; int end = min(start + step_size, num); vector<FaceInfo> input(pnet_res.begin() + start, pnet_res.begin() + end); vector<FaceInfo> res = NextStage(image, input, 24, 24, 2, threshold[1]); rnet_res.insert(rnet_res.end(), res.begin(), res.end()); } rnet_res = NMS(rnet_res, 0.4f, 'm'); BBoxRegression(rnet_res); BBoxPadSquare(rnet_res, image.cols, image.rows); } if (stage >= 3 && rnet_res.size()>0){ int num = (int)rnet_res.size(); int size = (int)ceil(1.f*num / step_size); for (int iter = 0; iter < size; ++iter){ int start = iter*step_size; int end = min(start + step_size, num); vector<FaceInfo> input(rnet_res.begin() + start, rnet_res.begin() + end); vector<FaceInfo> res = NextStage(image, input, 48, 48, 3, threshold[2]); onet_res.insert(onet_res.end(), res.begin(), res.end()); } BBoxRegression(onet_res); onet_res = NMS(onet_res, 0.7f, 'm'); BBoxPad(onet_res, image.cols, image.rows); } if (stage == 1){ return pnet_res; } else if (stage == 2){ return rnet_res; } else if (stage == 3){ return onet_res; } else{ return onet_res; } } cv::Point getMidPoint(cv::Point p1,cv::Point p2){ return cv::Point((p1.x+p2.x)/2,(p1.y+p2.y)/2); } Line computeLine(cv::Point p1,cv::Point p2) { float A = p2.y - p1.y; float B = p1.x- p2.x; float C = p2.x*p1.y - p1.x*p2.y; Line line; line.A = A; line.B = B; line.C = C; return line; } float computeLineDistance(Line line,cv::Point p) { float A = line.A; float B = line.B; float C = line.C; float MOD = sqrt(A*A+B*B) ; return (p.x*A + p.y*B +C)/MOD; } #define CYCLE_ACTIVE 16 class ActiveDetector_Shake{ public: float frames[CYCLE_ACTIVE]; int idx; ActiveDetector_Shake(){ idx = 0 ; } void moveForward(float *frames) { for(int i = 1 ; i <CYCLE_ACTIVE;i++) { frames[i-1]=frames[i]; } } void addFrame(float frame) { if(idx>=CYCLE_ACTIVE) { moveForward(frames); frames[CYCLE_ACTIVE - 1] = frame; } else { frames[idx] = frame; } idx +=1; } bool getState() { if (idx < CYCLE_ACTIVE) return false; int sum = 0; bool flag = 0; for (int i = 0; i < CYCLE_ACTIVE; i++) { if (frames[i] > 11 || frames[i] < -11) { flag = 1; } if (frames[i] > 8) sum++; else if (frames[i] < -8) sum--; } if (abs(sum - 0) < 6 && flag == 1) return true; else return false; } }; class ActiveDetector_updown{ public: float frames[CYCLE_ACTIVE]; int idx; ActiveDetector_updown(){ idx = 0 ; } void moveForward(float *frames) { for(int i = 1 ; i <CYCLE_ACTIVE;i++) { frames[i-1]=frames[i]; } } void addFrame(float frame) { if(idx>=CYCLE_ACTIVE) { moveForward(frames); frames[CYCLE_ACTIVE - 1] = frame; } else { frames[idx] = frame; } idx +=1; } bool getState(bool up) { if (idx < CYCLE_ACTIVE) return false; int sum = 0; bool flag = 0; for (int i = 0; i < CYCLE_ACTIVE; i++) { if (frames[i] > 15 || frames[i] < -15) { flag = 1; } if(up) { if (frames[i] > 15) sum++; } else if(frames[i]<-15 ) sum++; } if (sum>7&&flag==1) return true; else return false; } }; class AliveDetector { public: ActiveDetector_Shake *activeDetector_shake; ActiveDetector_updown *activeDetector_updown; MTCNN *detector; const float factor = 0.709f; const float threshold[3] = {0.7f, 0.6f, 0.6f}; const int minSize = 150; AliveDetector(std::string folder_mtcnn) { activeDetector_shake = new ActiveDetector_Shake(); activeDetector_updown = new ActiveDetector_updown(); detector = new MTCNN(folder_mtcnn); } // State of the face // unsure -1 // normal 0 // shake 1 // up 2 // down 3 ~AliveDetector() { delete activeDetector_updown; delete activeDetector_shake; delete detector; } int detect(cv::Mat frame) { vector<FaceInfo> faceInfo = detector->Detect_mtcnn(frame, minSize, threshold, factor, 3); if (faceInfo.size() == 1) { for (int i = 0; i < faceInfo.size(); i++) { int x = (int) faceInfo[i].bbox.xmin; int y = (int) faceInfo[i].bbox.ymin; int w = (int) (faceInfo[i].bbox.xmax - faceInfo[i].bbox.xmin + 1); int h = (int) (faceInfo[i].bbox.ymax - faceInfo[i].bbox.ymin + 1); cv::rectangle(frame, cv::Rect(x, y, w, h), cv::Scalar(255, 0, 0), 2); } for (int i = 0; i < faceInfo.size(); i++) { float *landmark = faceInfo[i].landmark; cv::Point p1((int) landmark[2 * 0], (int) landmark[2 * 0 + 1]); cv::Point p2((int) landmark[2 * 1], (int) landmark[2 * 1 + 1]); cv::Point p3((int) landmark[2 * 2], (int) landmark[2 * 2 + 1]); cv::Point p4((int) landmark[2 * 3], (int) landmark[2 * 3 + 1]); cv::Point p5((int) landmark[2 * 4], (int) landmark[2 * 4 + 1]); cv::Point mid1 = getMidPoint(p1, p2); cv::Point mid2 = getMidPoint(p4, p5); cv::Point v_mid1 = getMidPoint(p1, p4); cv::Point v_mid2 = getMidPoint(p2, p5); Line line = computeLine(mid1, mid2); Line line1 = computeLine(v_mid1, v_mid2); activeDetector_shake->addFrame(computeLineDistance(line, p3)); activeDetector_updown->addFrame(computeLineDistance(line1, p3)); cv::line(frame, mid1, mid2, cv::Scalar(255, 255, 0), 1); cv::line(frame, v_mid1, v_mid2, cv::Scalar(0, 255, 255), 1); for (int j = 0; j < 5; j++) { cv::circle(frame, cv::Point((int) landmark[2 * j], (int) landmark[2 * j + 1]), 1, cv::Scalar(255, 50 * j, 50 * j), 2); } // std::cout<<"std::cout"<<computeLineDistance(line1,p3)<<std::endl; if (activeDetector_updown->getState(true)) return 2; // std::cout<<"state:"<<"抬头"<<std::endl; if (activeDetector_updown->getState(false)) return 3; // std::cout<<"state:"<<"低头"<<std::endl; if (activeDetector_shake->getState()) return 1; // std::cout<<"state:"<<"摇头"<<std::endl; return 0; } } return -1; } }; #endif //ACTIVEDETECTION_ALIVEDETECTOR_H
GB_binop__ge_int16.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__ge_int16) // A.*B function (eWiseMult): GB (_AemultB_08__ge_int16) // A.*B function (eWiseMult): GB (_AemultB_02__ge_int16) // A.*B function (eWiseMult): GB (_AemultB_04__ge_int16) // A.*B function (eWiseMult): GB (_AemultB_bitmap__ge_int16) // A*D function (colscale): GB (_AxD__ge_int16) // D*A function (rowscale): GB (_DxB__ge_int16) // C+=B function (dense accum): GB (_Cdense_accumB__ge_int16) // C+=b function (dense accum): GB (_Cdense_accumb__ge_int16) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__ge_int16) // C=scalar+B GB (_bind1st__ge_int16) // C=scalar+B' GB (_bind1st_tran__ge_int16) // C=A+scalar GB (_bind2nd__ge_int16) // C=A'+scalar GB (_bind2nd_tran__ge_int16) // C type: bool // A type: int16_t // A pattern? 0 // B type: int16_t // B pattern? 0 // BinaryOp: cij = (aij >= bij) #define GB_ATYPE \ int16_t #define GB_BTYPE \ int16_t #define GB_CTYPE \ bool // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 0 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 0 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ int16_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ int16_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ bool t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x >= y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_GE || GxB_NO_INT16 || GxB_NO_GE_INT16) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__ge_int16) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__ge_int16) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { #include "GB_dense_subassign_23_template.c" } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__ge_int16) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { // get the scalar b for C += b, of type int16_t int16_t bwork = (*((int16_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__ge_int16) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__ge_int16) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__ge_int16) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; int16_t alpha_scalar ; int16_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((int16_t *) alpha_scalar_in)) ; beta_scalar = (*((int16_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__ge_int16) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__ge_int16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__ge_int16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__ge_int16) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__ge_int16) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *Cx = (bool *) Cx_output ; int16_t x = (*((int16_t *) x_input)) ; int16_t *Bx = (int16_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; int16_t bij = GBX (Bx, p, false) ; Cx [p] = (x >= bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__ge_int16) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; bool *Cx = (bool *) Cx_output ; int16_t *Ax = (int16_t *) Ax_input ; int16_t y = (*((int16_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int16_t aij = GBX (Ax, p, false) ; Cx [p] = (aij >= y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x >= aij) ; \ } GrB_Info GB (_bind1st_tran__ge_int16) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int16_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t x = (*((const int16_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int16_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij >= y) ; \ } GrB_Info GB (_bind2nd_tran__ge_int16) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t y = (*((const int16_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_unop__signum_fc64_fc64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__signum_fc64_fc64 // op(A') function: GB_unop_tran__signum_fc64_fc64 // C type: GxB_FC64_t // A type: GxB_FC64_t // cast: GxB_FC64_t cij = aij // unaryop: cij = GB_csignum (aij) #define GB_ATYPE \ GxB_FC64_t #define GB_CTYPE \ GxB_FC64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ GxB_FC64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_csignum (x) ; // casting #define GB_CAST(z, aij) \ GxB_FC64_t z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GxB_FC64_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ GxB_FC64_t z = aij ; \ Cx [pC] = GB_csignum (z) ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_SIGNUM || GxB_NO_FC64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__signum_fc64_fc64 ( GxB_FC64_t *Cx, // Cx and Ax may be aliased const GxB_FC64_t *Ax, const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (GxB_FC64_t), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GxB_FC64_t aij = Ax [p] ; GxB_FC64_t z = aij ; Cx [p] = GB_csignum (z) ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; GxB_FC64_t aij = Ax [p] ; GxB_FC64_t z = aij ; Cx [p] = GB_csignum (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__signum_fc64_fc64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
Parallelizer.h
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2010 Gael Guennebaud <gael.guennebaud@inria.fr> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_PARALLELIZER_H #define EIGEN_PARALLELIZER_H #if EIGEN_HAS_CXX11_ATOMIC #include <atomic> #endif #include "../InternalHeaderCheck.h" namespace Eigen { namespace internal { /** \internal */ inline void manage_multi_threading(Action action, int* v) { static int m_maxThreads = -1; EIGEN_UNUSED_VARIABLE(m_maxThreads) if(action==SetAction) { eigen_internal_assert(v!=0); m_maxThreads = *v; } else if(action==GetAction) { eigen_internal_assert(v!=0); #ifdef EIGEN_HAS_OPENMP if(m_maxThreads>0) *v = m_maxThreads; else *v = omp_get_max_threads(); #else *v = 1; #endif } else { eigen_internal_assert(false); } } } /** Must be call first when calling Eigen from multiple threads */ inline void initParallel() { int nbt; internal::manage_multi_threading(GetAction, &nbt); std::ptrdiff_t l1, l2, l3; internal::manage_caching_sizes(GetAction, &l1, &l2, &l3); } /** \returns the max number of threads reserved for Eigen * \sa setNbThreads */ inline int nbThreads() { int ret; internal::manage_multi_threading(GetAction, &ret); return ret; } /** Sets the max number of threads reserved for Eigen * \sa nbThreads */ inline void setNbThreads(int v) { internal::manage_multi_threading(SetAction, &v); } namespace internal { template<typename Index> struct GemmParallelInfo { GemmParallelInfo() : sync(-1), users(0), lhs_start(0), lhs_length(0) {} // volatile is not enough on all architectures (see bug 1572) // to guarantee that when thread A says to thread B that it is // done with packing a block, then all writes have been really // carried out... C++11 memory model+atomic guarantees this. #if EIGEN_HAS_CXX11_ATOMIC std::atomic<Index> sync; std::atomic<int> users; #else Index volatile sync; int volatile users; #endif Index lhs_start; Index lhs_length; }; template<bool Condition, typename Functor, typename Index> void parallelize_gemm(const Functor& func, Index rows, Index cols, Index depth, bool transpose) { // TODO when EIGEN_USE_BLAS is defined, // we should still enable OMP for other scalar types // Without C++11, we have to disable GEMM's parallelization on // non x86 architectures because there volatile is not enough for our purpose. // See bug 1572. #if (! defined(EIGEN_HAS_OPENMP)) || defined(EIGEN_USE_BLAS) || ((!EIGEN_HAS_CXX11_ATOMIC) && !(EIGEN_ARCH_i386_OR_x86_64)) // FIXME the transpose variable is only needed to properly split // the matrix product when multithreading is enabled. This is a temporary // fix to support row-major destination matrices. This whole // parallelizer mechanism has to be redesigned anyway. EIGEN_UNUSED_VARIABLE(depth); EIGEN_UNUSED_VARIABLE(transpose); func(0,rows, 0,cols); #else // Dynamically check whether we should enable or disable OpenMP. // The conditions are: // - the max number of threads we can create is greater than 1 // - we are not already in a parallel code // - the sizes are large enough // compute the maximal number of threads from the size of the product: // This first heuristic takes into account that the product kernel is fully optimized when working with nr columns at once. Index size = transpose ? rows : cols; Index pb_max_threads = std::max<Index>(1,size / Functor::Traits::nr); // compute the maximal number of threads from the total amount of work: double work = static_cast<double>(rows) * static_cast<double>(cols) * static_cast<double>(depth); double kMinTaskSize = 50000; // FIXME improve this heuristic. pb_max_threads = std::max<Index>(1, std::min<Index>(pb_max_threads, static_cast<Index>( work / kMinTaskSize ) )); // compute the number of threads we are going to use Index threads = std::min<Index>(nbThreads(), pb_max_threads); // if multi-threading is explicitly disabled, not useful, or if we already are in a parallel session, // then abort multi-threading // FIXME omp_get_num_threads()>1 only works for openmp, what if the user does not use openmp? if((!Condition) || (threads==1) || (omp_get_num_threads()>1)) return func(0,rows, 0,cols); Eigen::initParallel(); func.initParallelSession(threads); if(transpose) std::swap(rows,cols); ei_declare_aligned_stack_constructed_variable(GemmParallelInfo<Index>,info,threads,0); #pragma omp parallel num_threads(threads) { Index i = omp_get_thread_num(); // Note that the actual number of threads might be lower than the number of request ones. Index actual_threads = omp_get_num_threads(); Index blockCols = (cols / actual_threads) & ~Index(0x3); Index blockRows = (rows / actual_threads); blockRows = (blockRows/Functor::Traits::mr)*Functor::Traits::mr; Index r0 = i*blockRows; Index actualBlockRows = (i+1==actual_threads) ? rows-r0 : blockRows; Index c0 = i*blockCols; Index actualBlockCols = (i+1==actual_threads) ? cols-c0 : blockCols; info[i].lhs_start = r0; info[i].lhs_length = actualBlockRows; if(transpose) func(c0, actualBlockCols, 0, rows, info); else func(0, rows, c0, actualBlockCols, info); } #endif } } // end namespace internal } // end namespace Eigen #endif // EIGEN_PARALLELIZER_H
quick_sort.h
// MIT License // // Copyright (c) 2020 xiw // \author wang xi // Parallel quick sort using OpenMP. #ifndef QUICK_SORT_H_ #define QUICK_SORT_H_ #include <vector> #include <memory> #include <omp.h> using std::vector; namespace para { // when array size is smaller than MIN_SIZE, no more tasks will be created const int MIN_SIZE = 100000000; template<typename T> void Partition(T* arr, size_t size, size_t* partition_pos) { if (size <= 0) { return; } // maybe better to randomly choose a pivot T pivot_value = arr[0]; size_t left = 0, right = size; while (left < right) { --right; while (left < right && arr[right] >= pivot_value) { --right; } if (left >= right) { break; } // swap arr[left] = arr[right]; ++left; while (left < right && arr[left] <= pivot_value) { ++left; } if (left >= right) { break; } // swap arr[right] = arr[left]; } arr[left] = pivot_value; *partition_pos = left; } template<typename T> void ParallelSort(T* arr, size_t size) { if (size <= 1) { return; } size_t partition_pos = 0; Partition(arr, size, &partition_pos); if (size > MIN_SIZE) { #pragma omp taskgroup { #pragma omp task mergeable untied if (partition_pos > 1){ ParallelSort(arr, partition_pos); } #pragma omp task mergeable untied if (partition_pos + 2 < size) { ParallelSort(arr + partition_pos + 1, size - partition_pos - 1); } } } else { #pragma omp task mergeable untied { if (partition_pos > 1){ ParallelSort(arr, partition_pos); } if (partition_pos + 2 < size) { ParallelSort(arr + partition_pos + 1, size - partition_pos - 1); } } } } // \brief Sort the data using parallel version of quicksort. // Parallel is realized through OpenMP. For parallelism, we // use omp task clause. // // For the 1st version, we only support data type that can use // < to compare. And the sort is in place. // // \param arr the arr to be sorted // \return void template<typename T> void ParallelQuickSort(T* arr, size_t size) { #pragma omp parallel #pragma omp single { ParallelSort(arr, size); } } } // namespace para #endif
3d7pt.c
/* * Order-1, 3D 7 point stencil * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+2; Ny = atoi(argv[2])+2; Nz = atoi(argv[3])+2; } if (argc > 4) Nt = atoi(argv[4]); double ****A = (double ****) malloc(sizeof(double***)*2); A[0] = (double ***) malloc(sizeof(double**)*Nz); A[1] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[0][i] = (double**) malloc(sizeof(double*)*Ny); A[1][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[0][i][j] = (double*) malloc(sizeof(double)*Nx); A[1][i][j] = (double*) malloc(sizeof(double)*Nx); } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 32; tile_size[1] = 32; tile_size[2] = 32; tile_size[3] = 32; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; const double alpha = 0.0876; const double beta = 0.0765; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 #pragma scop for (t = 0; t < Nt-1; t++) { for (i = 1; i < Nz-1; i++) { for (j = 1; j < Ny-1; j++) { for (k = 1; k < Nx-1; k++) { A[(t+1)%2][i][j][k] = alpha * (A[t%2][i][j][k]) + beta * (A[t%2][i - 1][j][k] + A[t%2][i][j - 1][k] + A[t%2][i][j][k - 1] + A[t%2][i + 1][j][k] + A[t%2][i][j + 1][k] + A[t%2][i][j][k + 1]); } } } } #pragma endscop gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(1, "constant") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays (Causing performance degradation /* for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); */ return 0; }
life.c
/* DESCRIPTION: Conway's Game of Life project for UoA dit, September 2020. * * AUTHOR: Andrew Pappas, 1115201500201, UoA, dit. * DATE: September 2020. */ /*********************** * Libraries to import * ***********************/ #include <stdlib.h> #include <stdio.h> #include <string.h> #include <unistd.h> #ifdef MPI #include <mpi.h> #endif #ifdef OPENMP #include <omp.h> #endif #define ALIVE 1 #define DEAD 0 /******************************************** * Need at least this many rows and columns * ********************************************/ const int MINIMUM_ROWS = 1; const int MINIMUM_COLUMNS = 1; const int MINIMUM_TIME_STEPS = 1; /***************************************************** * Add an "s" to the end of a value's name if needed * *****************************************************/ void pluralize_value_if_needed(int value) { if(value != 1) fprintf(stderr, "s"); return; } /******************************************************************************* * Make sure a value is >= another value, print error and return -1 if it isn't ******************************************************************************/ int assert_minimum_value(char which_value[16], int actual_value, int expected_value) { int retval; if(actual_value < expected_value) { fprintf(stderr, "ERROR: %d %s", actual_value, which_value); pluralize_value_if_needed(actual_value); fprintf(stderr, "; need at least %d %s", expected_value, which_value); pluralize_value_if_needed(expected_value); fprintf(stderr, "\n"); retval = -1; } else retval = 0; return retval; } /****************************************************************************** * Print a function name and exit if the specified boolean expression is true * ******************************************************************************/ void exit_if(int boolean_expression, char function_name[32], int OUR_RANK) { if(boolean_expression) { #ifdef MPI fprintf(stderr, "Rank %d ", OUR_RANK); #endif #ifdef OPENMP fprintf(stderr, "Thread %d ", omp_get_thread_num()); #endif fprintf(stderr, "ERROR in %s\n", function_name); exit(-1); } return; } /**************** * Main program * ****************/ int main(int argc, char **argv) { int NUMBER_OF_ROWS = 5, NUMBER_OF_COLUMNS = 5, NUMBER_OF_TIME_STEPS = 5, OUR_NUMBER_OF_ROWS = 5, OUR_RANK = 0, NUMBER_OF_PROCESSES = 1, our_current_row, my_current_column, my_neighbor_row, my_neighbor_column, my_number_of_alive_neighbors, c, return_value, next_lowest_rank, next_highest_rank; int **our_current_grid, **our_next_grid; int current_time_step; double startTime, endTime, localTotalElapsed, globalTotalElapsed; #ifdef SHOW_RESULTS int current_rank; #endif /* Initialize the distributed memory environment */ #ifdef MPI exit_if((MPI_Init(&argc, &argv) != MPI_SUCCESS), "MPI_Init", OUR_RANK); exit_if((MPI_Comm_rank(MPI_COMM_WORLD, &OUR_RANK) != MPI_SUCCESS), "MPI_Comm_rank", OUR_RANK); exit_if((MPI_Comm_size(MPI_COMM_WORLD, &NUMBER_OF_PROCESSES) != MPI_SUCCESS), "MPI_Comm_size", OUR_RANK); #endif /* Parse command line arguments */ while((c = getopt(argc, argv, "r:c:t:")) != -1) { switch(c) { case 'r': NUMBER_OF_ROWS = atoi(optarg); break; case 'c': NUMBER_OF_COLUMNS = atoi(optarg); break; case 't': NUMBER_OF_TIME_STEPS = atoi(optarg); break; case '?': default: #ifdef MPI fprintf(stderr, "Usage: mpirun -np NUMBER_OF_PROCESSES %s [-r NUMBER_OF_ROWS] [-c NUMBER_OF_COLUMNS] [-t NUMBER_OF_TIME_STEPS]\n", argv[0]); #else fprintf(stderr, "Usage: %s [-r NUMBER_OF_ROWS] [-c NUMBER_OF_COLUMNS] [-t NUMBER_OF_TIME_STEPS]\n", argv[0]); #endif exit(-1); } } argc -= optind; argv += optind; /* Make sure we have enough rows, columns, and time steps */ return_value = assert_minimum_value("row", NUMBER_OF_ROWS, MINIMUM_ROWS); return_value += assert_minimum_value("column", NUMBER_OF_COLUMNS, MINIMUM_COLUMNS); return_value += assert_minimum_value("time step", NUMBER_OF_TIME_STEPS, MINIMUM_TIME_STEPS); /* Exit if we don't */ if(return_value != 0) exit(-1); /* Determine our number of rows */ OUR_NUMBER_OF_ROWS = NUMBER_OF_ROWS / NUMBER_OF_PROCESSES; if(OUR_RANK == NUMBER_OF_PROCESSES - 1) { OUR_NUMBER_OF_ROWS += NUMBER_OF_ROWS % NUMBER_OF_PROCESSES; } /* Allocate enough space in our current grid and next grid for the * number of rows and the number of columns, plus the ghost rows * and columns */ exit_if(((our_current_grid = (int**)malloc((OUR_NUMBER_OF_ROWS + 2) * (NUMBER_OF_COLUMNS + 2) * sizeof(int))) == NULL), "malloc(our_current_grid)", OUR_RANK); exit_if(((our_next_grid = (int**)malloc((OUR_NUMBER_OF_ROWS + 2) * (NUMBER_OF_COLUMNS + 2) * sizeof(int))) == NULL), "malloc(our_next_grid)", OUR_RANK); for(our_current_row = 0; our_current_row <= OUR_NUMBER_OF_ROWS + 1; our_current_row++) { exit_if(((our_current_grid[our_current_row] = (int*)malloc((NUMBER_OF_COLUMNS + 2) * sizeof(int))) == NULL), "malloc(our_current_grid[some_row])", OUR_RANK); exit_if(((our_next_grid[our_current_row] = (int*)malloc((NUMBER_OF_COLUMNS + 2) * sizeof(int))) == NULL), "malloc(our_next_grid[some_row])", OUR_RANK); } /* Initialize the grid (each cell gets a random state) */ for(our_current_row = 1; our_current_row <= OUR_NUMBER_OF_ROWS; our_current_row++) { #pragma omp parallel for private(my_current_column) for(my_current_column = 1; my_current_column <= NUMBER_OF_COLUMNS; my_current_column++) { our_current_grid[our_current_row][my_current_column] = random() % (ALIVE + 1); } } /* Determine the process with the next-lowest rank */ if(OUR_RANK == 0) next_lowest_rank = NUMBER_OF_PROCESSES - 1; else next_lowest_rank = OUR_RANK - 1; /* Determine the process with the next-highest rank */ if(OUR_RANK == NUMBER_OF_PROCESSES - 1) next_highest_rank = 0; else next_highest_rank = OUR_RANK + 1; /* ========== MAIN SIMULATION LOOP ============= */ #ifdef MPI MPI_Barrier(MPI_COMM_WORLD); startTime = MPI_Wtime(); MPI_Pcontrol(1); #endif /* Run the simulation for the specified number of time steps */ for(current_time_step = 0; current_time_step <= NUMBER_OF_TIME_STEPS - 1; current_time_step++) { /* Set up the ghost rows */ #ifdef MPI /* Send our second-from-the-top row to the process with the * next-lowest rank */ exit_if((MPI_Send(our_current_grid[1], NUMBER_OF_COLUMNS + 2, MPI_INT, next_lowest_rank, 0, MPI_COMM_WORLD) != MPI_SUCCESS), "MPI_Send(top row)", OUR_RANK); /* Send our second-from-the-bottom row to the process * with the next-highest rank */ exit_if((MPI_Send(our_current_grid[OUR_NUMBER_OF_ROWS], NUMBER_OF_COLUMNS + 2, MPI_INT, next_highest_rank, 0, MPI_COMM_WORLD) != MPI_SUCCESS), "MPI_Send(bottom row)", OUR_RANK); /* Receive our bottom row from the process with the * next-highest rank */ exit_if((MPI_Recv(our_current_grid[OUR_NUMBER_OF_ROWS + 1], NUMBER_OF_COLUMNS + 2, MPI_INT, next_highest_rank, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE) != MPI_SUCCESS), "MPI_Recv(bottom row)", OUR_RANK); /* Receive our top row from the process with the * next-lowest rank */ exit_if((MPI_Recv(our_current_grid[0], NUMBER_OF_COLUMNS + 2, MPI_INT, next_lowest_rank, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE) != MPI_SUCCESS), "MPI_Recv(top row)", OUR_RANK); #else #pragma omp parallel private(my_current_column) for(my_current_column = 0; my_current_column <= NUMBER_OF_COLUMNS + 1; my_current_column++) { /* Set our top row to be the same as our second-to-last * row */ our_current_grid[0][my_current_column] = our_current_grid[OUR_NUMBER_OF_ROWS][my_current_column]; /* Set our bottom row to be the same as our * second-to-top row */ our_current_grid[OUR_NUMBER_OF_ROWS + 1][my_current_column] = our_current_grid[1][my_current_column]; } #endif /* Set up the ghost columns */ for(our_current_row = 0; our_current_row <= OUR_NUMBER_OF_ROWS + 1; our_current_row++) { /* The left ghost column is the same as the farthest-right, * non-ghost column */ our_current_grid[our_current_row][0] = our_current_grid[our_current_row][NUMBER_OF_COLUMNS]; /* The right ghost column is the same as the farthest-left, * non-ghost column */ our_current_grid[our_current_row][NUMBER_OF_COLUMNS + 1] = our_current_grid[our_current_row][1]; } /* Display our current grid */ #ifdef SHOW_RESULTS for(current_rank = 0; current_rank <= NUMBER_OF_PROCESSES - 1; current_rank++) { printf("\nTime Step %d, Rank %d:\n", current_time_step, OUR_RANK); printf("====================\n\n"); } for(our_current_row = 0; our_current_row <= OUR_NUMBER_OF_ROWS + 1; our_current_row++) { if(our_current_row == 1) { for(my_current_column = 0; my_current_column <= NUMBER_OF_COLUMNS + 1 + 2; my_current_column++) { printf("- "); } printf("\n"); } for(my_current_column = 0; my_current_column <= NUMBER_OF_COLUMNS + 1; my_current_column++) { if(my_current_column == 1) { printf("| "); } printf("%d ", our_current_grid[our_current_row] [my_current_column]); if(my_current_column == NUMBER_OF_COLUMNS) { printf("| "); } } printf("\n"); if(our_current_row == OUR_NUMBER_OF_ROWS) { for(my_current_column = 0; my_current_column <= NUMBER_OF_COLUMNS + 1 + 2; my_current_column++) { printf("- "); } printf("\n"); } } #endif /* Determine our next grid -- for each row, do the following: */ for(our_current_row = 1; our_current_row <= OUR_NUMBER_OF_ROWS; our_current_row++) { /* For each column, spawn threads to do the following: */ #pragma omp parallel for private(my_current_column, my_neighbor_row, my_neighbor_column, my_number_of_alive_neighbors) for(my_current_column = 1; my_current_column <= NUMBER_OF_COLUMNS; my_current_column++) { /* Initialize the count of ALIVE neighbors to 0 */ my_number_of_alive_neighbors = 0; /* For each row of the cell's neighbors, do the * following: */ for(my_neighbor_row = our_current_row - 1; my_neighbor_row <= our_current_row + 1; my_neighbor_row++) { /* For each column of the cell's neighbors, do * the following: */ for(my_neighbor_column = my_current_column - 1; my_neighbor_column <= my_current_column + 1; my_neighbor_column++) { /* If the neighbor is not the cell itself, * and the neighbor is ALIVE, do the following: */ if((my_neighbor_row != our_current_row || my_neighbor_column != my_current_column) && (our_current_grid[my_neighbor_row] [my_neighbor_column] == ALIVE)) { /* Add 1 to the count of the * number of ALIVE neighbors */ my_number_of_alive_neighbors++; } } } /* Apply Rule 1 of Conway's Game of Life */ if(my_number_of_alive_neighbors < 2) { our_next_grid[our_current_row][my_current_column] = DEAD; } /* Apply Rule 2 of Conway's Game of Life */ if(our_current_grid[our_current_row][my_current_column] == ALIVE && (my_number_of_alive_neighbors == 2 || my_number_of_alive_neighbors == 3)) { our_next_grid[our_current_row][my_current_column] = ALIVE; } /* Apply Rule 3 of Conway's Game of Life */ if(my_number_of_alive_neighbors > 3) { our_next_grid[our_current_row][my_current_column] = DEAD; } /* Apply Rule 4 of Conway's Game of Life */ if(our_current_grid[our_current_row][my_current_column] == DEAD && my_number_of_alive_neighbors == 3) { our_next_grid[our_current_row][my_current_column] = ALIVE; } } } /* Spawn threads to copy the next grid into the current grid */ for(our_current_row = 1; our_current_row <= OUR_NUMBER_OF_ROWS; our_current_row++) { #pragma omp parallel for private(my_current_column) for(my_current_column = 1; my_current_column <= NUMBER_OF_COLUMNS; my_current_column++) { our_current_grid[our_current_row][my_current_column] = our_next_grid[our_current_row][my_current_column]; } } } /* ============= END OF MAIN SIMULATION LOOP ============== */ #ifdef MPI endTime = MPI_Wtime(); MPI_Pcontrol(0); localTotalElapsed = endTime - startTime; //printf("Local time elapsed: %lf\n", localTotalElapsed); MPI_Reduce(&localTotalElapsed, &globalTotalElapsed, 1, MPI_DOUBLE, MPI_MAX, 0, MPI_COMM_WORLD); if(OUR_RANK == 0) { printf("Total time elapsed: %lf seconds.\n", globalTotalElapsed); } #endif /* Deallocate data structures */ for(our_current_row = OUR_NUMBER_OF_ROWS + 1; our_current_row >= 0; our_current_row--) { free(our_next_grid[our_current_row]); free(our_current_grid[our_current_row]); } free(our_next_grid); free(our_current_grid); /* Finalize the distributed memory environment */ #ifdef MPI exit_if((MPI_Finalize() != MPI_SUCCESS), "MPI_Finalize", OUR_RANK); #endif return 0; }
expressions.c
/* some simple expressions */ void foo() { int i; #pragma omp task if (99) i++; #pragma omp task if (i) i++; #pragma omp task if (i==0) i++; #pragma omp task if (i+=0) i++; #pragma omp task if (i==0) i++; #pragma omp task if (i!=0) i++; #pragma omp task if (i<0) i++; #pragma omp task if (i>0) i++; }
trmv_x_dia_n_lo_conj.c
#include "alphasparse/kernel.h" #include "alphasparse/opt.h" #include "alphasparse/util.h" #include <string.h> #ifdef _OPENMP #include <omp.h> #endif static alphasparse_status_t ONAME_omp(const ALPHA_Number alpha, const ALPHA_SPMAT_DIA* A, const ALPHA_Number* x, const ALPHA_Number beta, ALPHA_Number* y) { #ifdef COMPLEX const ALPHA_INT m = A->rows; const ALPHA_INT n = A->cols; if(m != n) return ALPHA_SPARSE_STATUS_INVALID_VALUE; const ALPHA_INT thread_num = alpha_get_thread_num(); ALPHA_Number** tmp = (ALPHA_Number**)malloc(sizeof(ALPHA_Number*) * thread_num); for(int i = 0; i < thread_num; ++i) { tmp[i] = malloc(sizeof(ALPHA_Number) * m); memset(tmp[i], 0, sizeof(ALPHA_Number) * m); } const ALPHA_INT diags = A->ndiag; #ifdef _OPENMP #pragma omp parallel for num_threads(thread_num) #endif for (ALPHA_INT i = 0; i < diags; ++i) { const ALPHA_INT threadId = alpha_get_thread_id(); const ALPHA_INT dis = A->distance[i]; if(dis == 0) { const ALPHA_INT start = i * A->lval; for(ALPHA_INT j = 0; j < m; ++j) { ALPHA_Number v; alpha_mul_3c(v, alpha, A->values[start + j]); alpha_madde(tmp[threadId][j], v, x[j]); } } else if(dis < 0) { const ALPHA_INT row_start = -dis; const ALPHA_INT col_start = 0; const ALPHA_INT nnz = m + dis; const ALPHA_INT start = i * A->lval; for(ALPHA_INT j = 0; j < nnz; ++j) { ALPHA_Number v; alpha_mul_3c(v, alpha, A->values[start + row_start + j]); alpha_madde(tmp[threadId][col_start + j], v, x[row_start + j]); } } } #ifdef _OPENMP #pragma omp parallel for num_threads(thread_num) #endif for(ALPHA_INT i = 0; i < m; ++i) { alpha_mul(y[i], beta, y[i]); for(ALPHA_INT j = 0; j < thread_num; ++j) { alpha_add(y[i], y[i], tmp[j][i]); } } #ifdef _OPENMP #pragma omp parallel for num_threads(thread_num) #endif for (ALPHA_INT i = 0; i < thread_num; ++i) { alpha_free(tmp[i]); } alpha_free(tmp); return ALPHA_SPARSE_STATUS_SUCCESS; #else return ALPHA_SPARSE_STATUS_INVALID_VALUE; #endif } alphasparse_status_t ONAME(const ALPHA_Number alpha, const ALPHA_SPMAT_DIA* A, const ALPHA_Number* x, const ALPHA_Number beta, ALPHA_Number* y) { #ifdef COMPLEX return ONAME_omp(alpha, A, x, beta, y); #else return ALPHA_SPARSE_STATUS_INVALID_VALUE; #endif }
psd.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % PPPP SSSSS DDDD % % P P SS D D % % PPPP SSS D D % % P SS D D % % P SSSSS DDDD % % % % % % Read/Write Adobe Photoshop Image Format % % % % Software Design % % Cristy % % Leonard Rosenthol % % July 1992 % % Dirk Lemstra % % December 2013 % % % % % % Copyright 1999-2020 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % Photoshop spec @ https://www.adobe.com/devnet-apps/photoshop/fileformatashtml % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/artifact.h" #include "MagickCore/attribute.h" #include "MagickCore/blob.h" #include "MagickCore/blob-private.h" #include "MagickCore/cache.h" #include "MagickCore/channel.h" #include "MagickCore/colormap.h" #include "MagickCore/colormap-private.h" #include "MagickCore/colorspace.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/constitute.h" #include "MagickCore/enhance.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/image.h" #include "MagickCore/image-private.h" #include "MagickCore/list.h" #include "MagickCore/log.h" #include "MagickCore/magick.h" #include "MagickCore/memory_.h" #include "MagickCore/module.h" #include "MagickCore/monitor-private.h" #include "MagickCore/option.h" #include "MagickCore/pixel.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/policy.h" #include "MagickCore/profile.h" #include "MagickCore/property.h" #include "MagickCore/registry.h" #include "MagickCore/quantum-private.h" #include "MagickCore/static.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/thread-private.h" #ifdef MAGICKCORE_ZLIB_DELEGATE #include <zlib.h> #endif #include "psd-private.h" /* Define declaractions. */ #define MaxPSDChannels 56 #define PSDQuantum(x) (((ssize_t) (x)+1) & -2) /* Enumerated declaractions. */ typedef enum { Raw = 0, RLE = 1, ZipWithoutPrediction = 2, ZipWithPrediction = 3 } PSDCompressionType; typedef enum { BitmapMode = 0, GrayscaleMode = 1, IndexedMode = 2, RGBMode = 3, CMYKMode = 4, MultichannelMode = 7, DuotoneMode = 8, LabMode = 9 } PSDImageType; /* Typedef declaractions. */ typedef struct _ChannelInfo { short type; size_t size; } ChannelInfo; typedef struct _MaskInfo { Image *image; RectangleInfo page; unsigned char background, flags; } MaskInfo; typedef struct _LayerInfo { ChannelInfo channel_info[MaxPSDChannels]; char blendkey[4]; Image *image; MaskInfo mask; Quantum opacity; RectangleInfo page; size_t offset_x, offset_y; unsigned char clipping, flags, name[257], visible; unsigned short channels; StringInfo *info; } LayerInfo; /* Forward declarations. */ static MagickBooleanType WritePSDImage(const ImageInfo *,Image *,ExceptionInfo *); /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s P S D % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsPSD()() returns MagickTrue if the image format type, identified by the % magick string, is PSD. % % The format of the IsPSD method is: % % MagickBooleanType IsPSD(const unsigned char *magick,const size_t length) % % A description of each parameter follows: % % o magick: compare image format pattern against these bytes. % % o length: Specifies the length of the magick string. % */ static MagickBooleanType IsPSD(const unsigned char *magick,const size_t length) { if (length < 4) return(MagickFalse); if (LocaleNCompare((const char *) magick,"8BPS",4) == 0) return(MagickTrue); return(MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e a d P S D I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReadPSDImage() reads an Adobe Photoshop image file and returns it. It % allocates the memory necessary for the new Image structure and returns a % pointer to the new image. % % The format of the ReadPSDImage method is: % % Image *ReadPSDImage(image_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: the image info. % % o exception: return any errors or warnings in this structure. % */ static const char *CompositeOperatorToPSDBlendMode(Image *image) { switch (image->compose) { case ColorBurnCompositeOp: return(image->endian == LSBEndian ? "vidi" : "idiv"); case ColorDodgeCompositeOp: return(image->endian == LSBEndian ? " vid" : "div "); case ColorizeCompositeOp: return(image->endian == LSBEndian ? "rloc" : "colr"); case DarkenCompositeOp: return(image->endian == LSBEndian ? "krad" : "dark"); case DifferenceCompositeOp: return(image->endian == LSBEndian ? "ffid" : "diff"); case DissolveCompositeOp: return(image->endian == LSBEndian ? "ssid" : "diss"); case ExclusionCompositeOp: return(image->endian == LSBEndian ? "dums" : "smud"); case HardLightCompositeOp: return(image->endian == LSBEndian ? "tiLh" : "hLit"); case HardMixCompositeOp: return(image->endian == LSBEndian ? "xiMh" : "hMix"); case HueCompositeOp: return(image->endian == LSBEndian ? " euh" : "hue "); case LightenCompositeOp: return(image->endian == LSBEndian ? "etil" : "lite"); case LinearBurnCompositeOp: return(image->endian == LSBEndian ? "nrbl" : "lbrn"); case LinearDodgeCompositeOp: return(image->endian == LSBEndian ? "gddl" : "lddg"); case LinearLightCompositeOp: return(image->endian == LSBEndian ? "tiLl" : "lLit"); case LuminizeCompositeOp: return(image->endian == LSBEndian ? " mul" : "lum "); case MultiplyCompositeOp: return(image->endian == LSBEndian ? " lum" : "mul "); case OverlayCompositeOp: return(image->endian == LSBEndian ? "revo" : "over"); case PinLightCompositeOp: return(image->endian == LSBEndian ? "tiLp" : "pLit"); case SaturateCompositeOp: return(image->endian == LSBEndian ? " tas" : "sat "); case ScreenCompositeOp: return(image->endian == LSBEndian ? "nrcs" : "scrn"); case SoftLightCompositeOp: return(image->endian == LSBEndian ? "tiLs" : "sLit"); case VividLightCompositeOp: return(image->endian == LSBEndian ? "tiLv" : "vLit"); case OverCompositeOp: default: return(image->endian == LSBEndian ? "mron" : "norm"); } } /* For some reason Photoshop seems to blend semi-transparent pixels with white. This method reverts the blending. This can be disabled by setting the option 'psd:alpha-unblend' to off. */ static MagickBooleanType CorrectPSDAlphaBlend(const ImageInfo *image_info, Image *image,ExceptionInfo* exception) { const char *option; MagickBooleanType status; ssize_t y; if ((image->alpha_trait != BlendPixelTrait) || (image->colorspace != sRGBColorspace)) return(MagickTrue); option=GetImageOption(image_info,"psd:alpha-unblend"); if (IsStringFalse(option) != MagickFalse) return(MagickTrue); status=MagickTrue; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetAuthenticPixels(image,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double gamma; register ssize_t i; gamma=QuantumScale*GetPixelAlpha(image, q); if (gamma != 0.0 && gamma != 1.0) { for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); if (channel != AlphaPixelChannel) q[i]=ClampToQuantum((q[i]-((1.0-gamma)*QuantumRange))/gamma); } } q+=GetPixelChannels(image); } if (SyncAuthenticPixels(image,exception) == MagickFalse) status=MagickFalse; } return(status); } static inline CompressionType ConvertPSDCompression( PSDCompressionType compression) { switch (compression) { case RLE: return RLECompression; case ZipWithPrediction: case ZipWithoutPrediction: return ZipCompression; default: return NoCompression; } } static MagickBooleanType ApplyPSDLayerOpacity(Image *image,Quantum opacity, MagickBooleanType revert,ExceptionInfo *exception) { MagickBooleanType status; ssize_t y; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " applying layer opacity %.20g", (double) opacity); if (opacity == OpaqueAlpha) return(MagickTrue); if (image->alpha_trait != BlendPixelTrait) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception); status=MagickTrue; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetAuthenticPixels(image,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { if (revert == MagickFalse) SetPixelAlpha(image,(Quantum) (QuantumScale*(GetPixelAlpha(image,q))* opacity),q); else if (opacity > 0) SetPixelAlpha(image,(Quantum) (QuantumRange*(GetPixelAlpha(image,q)/ (MagickRealType) opacity)),q); q+=GetPixelChannels(image); } if (SyncAuthenticPixels(image,exception) == MagickFalse) status=MagickFalse; } return(status); } static MagickBooleanType ApplyPSDOpacityMask(Image *image,const Image *mask, Quantum background,MagickBooleanType revert,ExceptionInfo *exception) { Image *complete_mask; MagickBooleanType status; PixelInfo color; ssize_t y; if (image->alpha_trait == UndefinedPixelTrait) return(MagickTrue); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " applying opacity mask"); complete_mask=CloneImage(image,0,0,MagickTrue,exception); if (complete_mask == (Image *) NULL) return(MagickFalse); complete_mask->alpha_trait=BlendPixelTrait; GetPixelInfo(complete_mask,&color); color.red=(MagickRealType) background; (void) SetImageColor(complete_mask,&color,exception); status=CompositeImage(complete_mask,mask,OverCompositeOp,MagickTrue, mask->page.x-image->page.x,mask->page.y-image->page.y,exception); if (status == MagickFalse) { complete_mask=DestroyImage(complete_mask); return(status); } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; register Quantum *p; register ssize_t x; if (status == MagickFalse) continue; q=GetAuthenticPixels(image,0,y,image->columns,1,exception); p=GetAuthenticPixels(complete_mask,0,y,complete_mask->columns,1,exception); if ((q == (Quantum *) NULL) || (p == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { MagickRealType alpha, intensity; alpha=(MagickRealType) GetPixelAlpha(image,q); intensity=GetPixelIntensity(complete_mask,p); if (revert == MagickFalse) SetPixelAlpha(image,ClampToQuantum(intensity*(QuantumScale*alpha)),q); else if (intensity > 0) SetPixelAlpha(image,ClampToQuantum((alpha/intensity)*QuantumRange),q); q+=GetPixelChannels(image); p+=GetPixelChannels(complete_mask); } if (SyncAuthenticPixels(image,exception) == MagickFalse) status=MagickFalse; } complete_mask=DestroyImage(complete_mask); return(status); } static void PreservePSDOpacityMask(Image *image,LayerInfo* layer_info, ExceptionInfo *exception) { char *key; RandomInfo *random_info; StringInfo *key_info; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " preserving opacity mask"); random_info=AcquireRandomInfo(); key_info=GetRandomKey(random_info,2+1); key=(char *) GetStringInfoDatum(key_info); key[8]=(char) layer_info->mask.background; key[9]='\0'; layer_info->mask.image->page.x+=layer_info->page.x; layer_info->mask.image->page.y+=layer_info->page.y; (void) SetImageRegistry(ImageRegistryType,(const char *) key, layer_info->mask.image,exception); (void) SetImageArtifact(layer_info->image,"psd:opacity-mask", (const char *) key); key_info=DestroyStringInfo(key_info); random_info=DestroyRandomInfo(random_info); } static ssize_t DecodePSDPixels(const size_t number_compact_pixels, const unsigned char *compact_pixels,const ssize_t depth, const size_t number_pixels,unsigned char *pixels) { #define CheckNumberCompactPixels \ if (packets == 0) \ return(i); \ packets-- #define CheckNumberPixels(count) \ if (((ssize_t) i + count) > (ssize_t) number_pixels) \ return(i); \ i+=count int pixel; register ssize_t i, j; size_t length; ssize_t packets; packets=(ssize_t) number_compact_pixels; for (i=0; (packets > 1) && (i < (ssize_t) number_pixels); ) { packets--; length=(size_t) (*compact_pixels++); if (length == 128) continue; if (length > 128) { length=256-length+1; CheckNumberCompactPixels; pixel=(*compact_pixels++); for (j=0; j < (ssize_t) length; j++) { switch (depth) { case 1: { CheckNumberPixels(8); *pixels++=(pixel >> 7) & 0x01 ? 0U : 255U; *pixels++=(pixel >> 6) & 0x01 ? 0U : 255U; *pixels++=(pixel >> 5) & 0x01 ? 0U : 255U; *pixels++=(pixel >> 4) & 0x01 ? 0U : 255U; *pixels++=(pixel >> 3) & 0x01 ? 0U : 255U; *pixels++=(pixel >> 2) & 0x01 ? 0U : 255U; *pixels++=(pixel >> 1) & 0x01 ? 0U : 255U; *pixels++=(pixel >> 0) & 0x01 ? 0U : 255U; break; } case 2: { CheckNumberPixels(4); *pixels++=(unsigned char) ((pixel >> 6) & 0x03); *pixels++=(unsigned char) ((pixel >> 4) & 0x03); *pixels++=(unsigned char) ((pixel >> 2) & 0x03); *pixels++=(unsigned char) ((pixel & 0x03) & 0x03); break; } case 4: { CheckNumberPixels(2); *pixels++=(unsigned char) ((pixel >> 4) & 0xff); *pixels++=(unsigned char) ((pixel & 0x0f) & 0xff); break; } default: { CheckNumberPixels(1); *pixels++=(unsigned char) pixel; break; } } } continue; } length++; for (j=0; j < (ssize_t) length; j++) { CheckNumberCompactPixels; switch (depth) { case 1: { CheckNumberPixels(8); *pixels++=(*compact_pixels >> 7) & 0x01 ? 0U : 255U; *pixels++=(*compact_pixels >> 6) & 0x01 ? 0U : 255U; *pixels++=(*compact_pixels >> 5) & 0x01 ? 0U : 255U; *pixels++=(*compact_pixels >> 4) & 0x01 ? 0U : 255U; *pixels++=(*compact_pixels >> 3) & 0x01 ? 0U : 255U; *pixels++=(*compact_pixels >> 2) & 0x01 ? 0U : 255U; *pixels++=(*compact_pixels >> 1) & 0x01 ? 0U : 255U; *pixels++=(*compact_pixels >> 0) & 0x01 ? 0U : 255U; break; } case 2: { CheckNumberPixels(4); *pixels++=(*compact_pixels >> 6) & 0x03; *pixels++=(*compact_pixels >> 4) & 0x03; *pixels++=(*compact_pixels >> 2) & 0x03; *pixels++=(*compact_pixels & 0x03) & 0x03; break; } case 4: { CheckNumberPixels(2); *pixels++=(*compact_pixels >> 4) & 0xff; *pixels++=(*compact_pixels & 0x0f) & 0xff; break; } default: { CheckNumberPixels(1); *pixels++=(*compact_pixels); break; } } compact_pixels++; } } return(i); } static inline LayerInfo *DestroyLayerInfo(LayerInfo *layer_info, const ssize_t number_layers) { ssize_t i; for (i=0; i<number_layers; i++) { if (layer_info[i].image != (Image *) NULL) layer_info[i].image=DestroyImage(layer_info[i].image); if (layer_info[i].mask.image != (Image *) NULL) layer_info[i].mask.image=DestroyImage(layer_info[i].mask.image); if (layer_info[i].info != (StringInfo *) NULL) layer_info[i].info=DestroyStringInfo(layer_info[i].info); } return (LayerInfo *) RelinquishMagickMemory(layer_info); } static inline size_t GetPSDPacketSize(const Image *image) { if (image->storage_class == PseudoClass) { if (image->colors > 256) return(2); } if (image->depth > 16) return(4); if (image->depth > 8) return(2); return(1); } static inline MagickSizeType GetPSDSize(const PSDInfo *psd_info,Image *image) { if (psd_info->version == 1) return((MagickSizeType) ReadBlobLong(image)); return((MagickSizeType) ReadBlobLongLong(image)); } static inline size_t GetPSDRowSize(Image *image) { if (image->depth == 1) return(((image->columns+7)/8)*GetPSDPacketSize(image)); else return(image->columns*GetPSDPacketSize(image)); } static const char *ModeToString(PSDImageType type) { switch (type) { case BitmapMode: return "Bitmap"; case GrayscaleMode: return "Grayscale"; case IndexedMode: return "Indexed"; case RGBMode: return "RGB"; case CMYKMode: return "CMYK"; case MultichannelMode: return "Multichannel"; case DuotoneMode: return "Duotone"; case LabMode: return "L*A*B"; default: return "unknown"; } } static MagickBooleanType NegateCMYK(Image *image,ExceptionInfo *exception) { ChannelType channel_mask; MagickBooleanType status; channel_mask=SetImageChannelMask(image,(ChannelType)(AllChannels &~ AlphaChannel)); status=NegateImage(image,MagickFalse,exception); (void) SetImageChannelMask(image,channel_mask); return(status); } static StringInfo *ParseImageResourceBlocks(PSDInfo *psd_info,Image *image, const unsigned char *blocks,size_t length) { const unsigned char *p; ssize_t offset; StringInfo *profile; unsigned char name_length; unsigned int count; unsigned short id, short_sans; if (length < 16) return((StringInfo *) NULL); profile=BlobToStringInfo((const unsigned char *) NULL,length); SetStringInfoDatum(profile,blocks); SetStringInfoName(profile,"8bim"); for (p=blocks; (p >= blocks) && (p < (blocks+length-7)); ) { if (LocaleNCompare((const char *) p,"8BIM",4) != 0) break; p+=4; p=PushShortPixel(MSBEndian,p,&id); p=PushCharPixel(p,&name_length); if ((name_length % 2) == 0) name_length++; p+=name_length; if (p > (blocks+length-4)) break; p=PushLongPixel(MSBEndian,p,&count); offset=(ssize_t) count; if (((p+offset) < blocks) || ((p+offset) > (blocks+length))) break; switch (id) { case 0x03ed: { unsigned short resolution; /* Resolution info. */ if (offset < 16) break; p=PushShortPixel(MSBEndian,p,&resolution); image->resolution.x=(double) resolution; (void) FormatImageProperty(image,"tiff:XResolution","%*g", GetMagickPrecision(),image->resolution.x); p=PushShortPixel(MSBEndian,p,&short_sans); p=PushShortPixel(MSBEndian,p,&short_sans); p=PushShortPixel(MSBEndian,p,&short_sans); p=PushShortPixel(MSBEndian,p,&resolution); image->resolution.y=(double) resolution; (void) FormatImageProperty(image,"tiff:YResolution","%*g", GetMagickPrecision(),image->resolution.y); p=PushShortPixel(MSBEndian,p,&short_sans); p=PushShortPixel(MSBEndian,p,&short_sans); p=PushShortPixel(MSBEndian,p,&short_sans); image->units=PixelsPerInchResolution; break; } case 0x0421: { if ((offset > 4) && (*(p+4) == 0)) psd_info->has_merged_image=MagickFalse; p+=offset; break; } default: { p+=offset; break; } } if ((offset & 0x01) != 0) p++; } return(profile); } static CompositeOperator PSDBlendModeToCompositeOperator(const char *mode) { if (mode == (const char *) NULL) return(OverCompositeOp); if (LocaleNCompare(mode,"norm",4) == 0) return(OverCompositeOp); if (LocaleNCompare(mode,"mul ",4) == 0) return(MultiplyCompositeOp); if (LocaleNCompare(mode,"diss",4) == 0) return(DissolveCompositeOp); if (LocaleNCompare(mode,"diff",4) == 0) return(DifferenceCompositeOp); if (LocaleNCompare(mode,"dark",4) == 0) return(DarkenCompositeOp); if (LocaleNCompare(mode,"lite",4) == 0) return(LightenCompositeOp); if (LocaleNCompare(mode,"hue ",4) == 0) return(HueCompositeOp); if (LocaleNCompare(mode,"sat ",4) == 0) return(SaturateCompositeOp); if (LocaleNCompare(mode,"colr",4) == 0) return(ColorizeCompositeOp); if (LocaleNCompare(mode,"lum ",4) == 0) return(LuminizeCompositeOp); if (LocaleNCompare(mode,"scrn",4) == 0) return(ScreenCompositeOp); if (LocaleNCompare(mode,"over",4) == 0) return(OverlayCompositeOp); if (LocaleNCompare(mode,"hLit",4) == 0) return(HardLightCompositeOp); if (LocaleNCompare(mode,"sLit",4) == 0) return(SoftLightCompositeOp); if (LocaleNCompare(mode,"smud",4) == 0) return(ExclusionCompositeOp); if (LocaleNCompare(mode,"div ",4) == 0) return(ColorDodgeCompositeOp); if (LocaleNCompare(mode,"idiv",4) == 0) return(ColorBurnCompositeOp); if (LocaleNCompare(mode,"lbrn",4) == 0) return(LinearBurnCompositeOp); if (LocaleNCompare(mode,"lddg",4) == 0) return(LinearDodgeCompositeOp); if (LocaleNCompare(mode,"lLit",4) == 0) return(LinearLightCompositeOp); if (LocaleNCompare(mode,"vLit",4) == 0) return(VividLightCompositeOp); if (LocaleNCompare(mode,"pLit",4) == 0) return(PinLightCompositeOp); if (LocaleNCompare(mode,"hMix",4) == 0) return(HardMixCompositeOp); return(OverCompositeOp); } static inline void ReversePSDString(Image *image,char *p,size_t length) { char *q; if (image->endian == MSBEndian) return; q=p+length; for(--q; p < q; ++p, --q) { *p = *p ^ *q, *q = *p ^ *q, *p = *p ^ *q; } } static inline void SetPSDPixel(Image *image,const size_t channels, const ssize_t type,const size_t packet_size,const Quantum pixel,Quantum *q, ExceptionInfo *exception) { if (image->storage_class == PseudoClass) { PixelInfo *color; Quantum index; index=pixel; if (packet_size == 1) index=(Quantum) ScaleQuantumToChar(index); index=(Quantum) ConstrainColormapIndex(image,(ssize_t) index, exception); if (type == 0) SetPixelIndex(image,index,q); if ((type == 0) && (channels > 1)) return; color=image->colormap+(ssize_t) GetPixelIndex(image,q); if (type != 0) color->alpha=(MagickRealType) pixel; SetPixelViaPixelInfo(image,color,q); return; } switch (type) { case -1: { SetPixelAlpha(image,pixel,q); break; } case -2: case 0: { SetPixelRed(image,pixel,q); break; } case -3: case 1: { SetPixelGreen(image,pixel,q); break; } case -4: case 2: { SetPixelBlue(image,pixel,q); break; } case 3: { if (image->colorspace == CMYKColorspace) SetPixelBlack(image,pixel,q); else if (image->alpha_trait != UndefinedPixelTrait) SetPixelAlpha(image,pixel,q); break; } case 4: { if ((IssRGBCompatibleColorspace(image->colorspace) != MagickFalse) && (channels > 3)) break; if (image->alpha_trait != UndefinedPixelTrait) SetPixelAlpha(image,pixel,q); break; } } } static MagickBooleanType ReadPSDChannelPixels(Image *image, const size_t channels,const ssize_t row,const ssize_t type, const unsigned char *pixels,ExceptionInfo *exception) { Quantum pixel; register const unsigned char *p; register Quantum *q; register ssize_t x; size_t packet_size; p=pixels; q=GetAuthenticPixels(image,0,row,image->columns,1,exception); if (q == (Quantum *) NULL) return MagickFalse; packet_size=GetPSDPacketSize(image); for (x=0; x < (ssize_t) image->columns; x++) { if (packet_size == 1) pixel=ScaleCharToQuantum(*p++); else if (packet_size == 2) { unsigned short nibble; p=PushShortPixel(MSBEndian,p,&nibble); pixel=ScaleShortToQuantum(nibble); } else { MagickFloatType nibble; p=PushFloatPixel(MSBEndian,p,&nibble); pixel=ClampToQuantum((MagickRealType) (QuantumRange*nibble)); } if (image->depth > 1) { SetPSDPixel(image,channels,type,packet_size,pixel,q,exception); q+=GetPixelChannels(image); } else { ssize_t bit, number_bits; number_bits=(ssize_t) image->columns-x; if (number_bits > 8) number_bits=8; for (bit = 0; bit < (ssize_t) number_bits; bit++) { SetPSDPixel(image,channels,type,packet_size,(((unsigned char) pixel) & (0x01 << (7-bit))) != 0 ? 0 : QuantumRange,q,exception); q+=GetPixelChannels(image); x++; } if (x != (ssize_t) image->columns) x--; continue; } } return(SyncAuthenticPixels(image,exception)); } static MagickBooleanType ReadPSDChannelRaw(Image *image,const size_t channels, const ssize_t type,ExceptionInfo *exception) { MagickBooleanType status; size_t row_size; ssize_t count, y; unsigned char *pixels; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer data is RAW"); row_size=GetPSDRowSize(image); pixels=(unsigned char *) AcquireQuantumMemory(row_size,sizeof(*pixels)); if (pixels == (unsigned char *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); (void) memset(pixels,0,row_size*sizeof(*pixels)); status=MagickTrue; for (y=0; y < (ssize_t) image->rows; y++) { status=MagickFalse; count=ReadBlob(image,row_size,pixels); if (count != (ssize_t) row_size) { status=MagickFalse; break; } status=ReadPSDChannelPixels(image,channels,y,type,pixels,exception); if (status == MagickFalse) break; } pixels=(unsigned char *) RelinquishMagickMemory(pixels); return(status); } static inline MagickOffsetType *ReadPSDRLESizes(Image *image, const PSDInfo *psd_info,const size_t size) { MagickOffsetType *sizes; ssize_t y; sizes=(MagickOffsetType *) AcquireQuantumMemory(size,sizeof(*sizes)); if(sizes != (MagickOffsetType *) NULL) { for (y=0; y < (ssize_t) size; y++) { if (psd_info->version == 1) sizes[y]=(MagickOffsetType) ReadBlobShort(image); else sizes[y]=(MagickOffsetType) ReadBlobLong(image); } } return sizes; } static MagickBooleanType ReadPSDChannelRLE(Image *image,const PSDInfo *psd_info, const ssize_t type,MagickOffsetType *sizes,ExceptionInfo *exception) { MagickBooleanType status; size_t length, row_size; ssize_t count, y; unsigned char *compact_pixels, *pixels; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer data is RLE compressed"); row_size=GetPSDRowSize(image); pixels=(unsigned char *) AcquireQuantumMemory(row_size,sizeof(*pixels)); if (pixels == (unsigned char *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); length=0; for (y=0; y < (ssize_t) image->rows; y++) if ((MagickOffsetType) length < sizes[y]) length=(size_t) sizes[y]; if (length > (row_size+2048)) /* arbitrary number */ { pixels=(unsigned char *) RelinquishMagickMemory(pixels); ThrowBinaryException(ResourceLimitError,"InvalidLength",image->filename); } compact_pixels=(unsigned char *) AcquireQuantumMemory(length,sizeof(*pixels)); if (compact_pixels == (unsigned char *) NULL) { pixels=(unsigned char *) RelinquishMagickMemory(pixels); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } (void) memset(compact_pixels,0,length*sizeof(*compact_pixels)); status=MagickTrue; for (y=0; y < (ssize_t) image->rows; y++) { status=MagickFalse; count=ReadBlob(image,(size_t) sizes[y],compact_pixels); if (count != (ssize_t) sizes[y]) break; count=DecodePSDPixels((size_t) sizes[y],compact_pixels, (ssize_t) (image->depth == 1 ? 123456 : image->depth),row_size,pixels); if (count != (ssize_t) row_size) break; status=ReadPSDChannelPixels(image,psd_info->channels,y,type,pixels, exception); if (status == MagickFalse) break; } compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels); pixels=(unsigned char *) RelinquishMagickMemory(pixels); return(status); } #ifdef MAGICKCORE_ZLIB_DELEGATE static void Unpredict8Bit(unsigned char *pixels,const size_t count) { register unsigned char *p; size_t remaining; p=pixels; remaining=count; while (--remaining) { *(p+1)+=*p; p++; } } static void Unpredict16Bit(const Image *image,unsigned char *pixels, const size_t count, const size_t row_size) { register unsigned char *p; size_t length, remaining; p=pixels; remaining=count; while (remaining > 0) { length=image->columns; while (--length) { p[2]+=p[0]+((p[1]+p[3]) >> 8); p[3]+=p[1]; p+=2; } p+=2; remaining-=row_size; } } static void Unpredict32Bit(const Image *image,unsigned char *pixels, unsigned char *output_pixels,const size_t row_size) { register unsigned char *p, *q; register ssize_t y; size_t offset1, offset2, offset3, remaining; unsigned char *start; offset1=image->columns; offset2=2*offset1; offset3=3*offset1; p=pixels; q=output_pixels; for (y=0; y < (ssize_t) image->rows; y++) { start=p; remaining=row_size; while (--remaining) { *(p+1)+=*p; p++; } p=start; remaining=image->columns; while (remaining--) { *(q++)=*p; *(q++)=*(p+offset1); *(q++)=*(p+offset2); *(q++)=*(p+offset3); p++; } p=start+row_size; } } static MagickBooleanType ReadPSDChannelZip(Image *image,const size_t channels, const ssize_t type,const PSDCompressionType compression, const size_t compact_size,ExceptionInfo *exception) { MagickBooleanType status; register unsigned char *p; size_t count, packet_size, row_size; register ssize_t y; unsigned char *compact_pixels, *pixels; z_stream stream; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer data is ZIP compressed"); if ((MagickSizeType) compact_size > GetBlobSize(image)) ThrowBinaryException(CorruptImageError,"UnexpectedEndOfFile", image->filename); compact_pixels=(unsigned char *) AcquireQuantumMemory(compact_size, sizeof(*compact_pixels)); if (compact_pixels == (unsigned char *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); packet_size=GetPSDPacketSize(image); row_size=image->columns*packet_size; count=image->rows*row_size; pixels=(unsigned char *) AcquireQuantumMemory(count,sizeof(*pixels)); if (pixels == (unsigned char *) NULL) { compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } if (ReadBlob(image,compact_size,compact_pixels) != (ssize_t) compact_size) { pixels=(unsigned char *) RelinquishMagickMemory(pixels); compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels); ThrowBinaryException(CorruptImageError,"UnexpectedEndOfFile", image->filename); } memset(&stream,0,sizeof(stream)); stream.data_type=Z_BINARY; stream.next_in=(Bytef *)compact_pixels; stream.avail_in=(uInt) compact_size; stream.next_out=(Bytef *)pixels; stream.avail_out=(uInt) count; if (inflateInit(&stream) == Z_OK) { int ret; while (stream.avail_out > 0) { ret=inflate(&stream,Z_SYNC_FLUSH); if ((ret != Z_OK) && (ret != Z_STREAM_END)) { (void) inflateEnd(&stream); compact_pixels=(unsigned char *) RelinquishMagickMemory( compact_pixels); pixels=(unsigned char *) RelinquishMagickMemory(pixels); return(MagickFalse); } if (ret == Z_STREAM_END) break; } (void) inflateEnd(&stream); } if (compression == ZipWithPrediction) { if (packet_size == 1) Unpredict8Bit(pixels,count); else if (packet_size == 2) Unpredict16Bit(image,pixels,count,row_size); else if (packet_size == 4) { unsigned char *output_pixels; output_pixels=(unsigned char *) AcquireQuantumMemory(count, sizeof(*output_pixels)); if (pixels == (unsigned char *) NULL) { compact_pixels=(unsigned char *) RelinquishMagickMemory( compact_pixels); pixels=(unsigned char *) RelinquishMagickMemory(pixels); ThrowBinaryException(ResourceLimitError, "MemoryAllocationFailed",image->filename); } Unpredict32Bit(image,pixels,output_pixels,row_size); pixels=(unsigned char *) RelinquishMagickMemory(pixels); pixels=output_pixels; } } status=MagickTrue; p=pixels; for (y=0; y < (ssize_t) image->rows; y++) { status=ReadPSDChannelPixels(image,channels,y,type,p,exception); if (status == MagickFalse) break; p+=row_size; } compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels); pixels=(unsigned char *) RelinquishMagickMemory(pixels); return(status); } #endif static MagickBooleanType ReadPSDChannel(Image *image, const ImageInfo *image_info,const PSDInfo *psd_info,LayerInfo* layer_info, const size_t channel,const PSDCompressionType compression, ExceptionInfo *exception) { Image *channel_image, *mask; MagickOffsetType offset; MagickBooleanType status; channel_image=image; mask=(Image *) NULL; if ((layer_info->channel_info[channel].type < -1) && (layer_info->mask.page.width > 0) && (layer_info->mask.page.height > 0)) { const char *option; /* Ignore mask that is not a user supplied layer mask, if the mask is disabled or if the flags have unsupported values. */ option=GetImageOption(image_info,"psd:preserve-opacity-mask"); if ((layer_info->channel_info[channel].type != -2) || (layer_info->mask.flags > 2) || ((layer_info->mask.flags & 0x02) && (IsStringTrue(option) == MagickFalse))) { (void) SeekBlob(image,(MagickOffsetType) layer_info->channel_info[channel].size-2,SEEK_CUR); return(MagickTrue); } mask=CloneImage(image,layer_info->mask.page.width, layer_info->mask.page.height,MagickFalse,exception); if (mask != (Image *) NULL) { (void) ResetImagePixels(mask,exception); (void) SetImageType(mask,GrayscaleType,exception); channel_image=mask; } } offset=TellBlob(image); status=MagickFalse; switch(compression) { case Raw: status=ReadPSDChannelRaw(channel_image,psd_info->channels, (ssize_t) layer_info->channel_info[channel].type,exception); break; case RLE: { MagickOffsetType *sizes; sizes=ReadPSDRLESizes(channel_image,psd_info,channel_image->rows); if (sizes == (MagickOffsetType *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); status=ReadPSDChannelRLE(channel_image,psd_info, (ssize_t) layer_info->channel_info[channel].type,sizes,exception); sizes=(MagickOffsetType *) RelinquishMagickMemory(sizes); } break; case ZipWithPrediction: case ZipWithoutPrediction: #ifdef MAGICKCORE_ZLIB_DELEGATE status=ReadPSDChannelZip(channel_image,layer_info->channels, (ssize_t) layer_info->channel_info[channel].type,compression, layer_info->channel_info[channel].size-2,exception); #else (void) ThrowMagickException(exception,GetMagickModule(), MissingDelegateWarning,"DelegateLibrarySupportNotBuiltIn", "'%s' (ZLIB)",image->filename); #endif break; default: (void) ThrowMagickException(exception,GetMagickModule(),TypeWarning, "CompressionNotSupported","'%.20g'",(double) compression); break; } (void) SeekBlob(image,offset+layer_info->channel_info[channel].size-2, SEEK_SET); if (status == MagickFalse) { if (mask != (Image *) NULL) (void) DestroyImage(mask); ThrowBinaryException(CoderError,"UnableToDecompressImage", image->filename); } if (mask != (Image *) NULL) { if (layer_info->mask.image != (Image *) NULL) layer_info->mask.image=DestroyImage(layer_info->mask.image); layer_info->mask.image=mask; } return(status); } static MagickBooleanType ReadPSDLayer(Image *image,const ImageInfo *image_info, const PSDInfo *psd_info,LayerInfo* layer_info,ExceptionInfo *exception) { char message[MagickPathExtent]; MagickBooleanType status; PSDCompressionType compression; ssize_t j; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " setting up new layer image"); if (psd_info->mode != IndexedMode) (void) SetImageBackgroundColor(layer_info->image,exception); layer_info->image->compose=PSDBlendModeToCompositeOperator( layer_info->blendkey); if (layer_info->visible == MagickFalse) layer_info->image->compose=NoCompositeOp; /* Set up some hidden attributes for folks that need them. */ (void) FormatLocaleString(message,MagickPathExtent,"%.20g", (double) layer_info->page.x); (void) SetImageArtifact(layer_info->image,"psd:layer.x",message); (void) FormatLocaleString(message,MagickPathExtent,"%.20g", (double) layer_info->page.y); (void) SetImageArtifact(layer_info->image,"psd:layer.y",message); (void) FormatLocaleString(message,MagickPathExtent,"%.20g",(double) layer_info->opacity); (void) SetImageArtifact(layer_info->image,"psd:layer.opacity",message); (void) SetImageProperty(layer_info->image,"label",(char *) layer_info->name, exception); status=MagickTrue; for (j=0; j < (ssize_t) layer_info->channels; j++) { if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " reading data for channel %.20g",(double) j); compression=(PSDCompressionType) ReadBlobShort(layer_info->image); layer_info->image->compression=ConvertPSDCompression(compression); if (layer_info->channel_info[j].type == -1) layer_info->image->alpha_trait=BlendPixelTrait; status=ReadPSDChannel(layer_info->image,image_info,psd_info,layer_info, (size_t) j,compression,exception); if (status == MagickFalse) break; } if (status != MagickFalse) status=ApplyPSDLayerOpacity(layer_info->image,layer_info->opacity, MagickFalse,exception); if ((status != MagickFalse) && (layer_info->image->colorspace == CMYKColorspace)) status=NegateCMYK(layer_info->image,exception); if ((status != MagickFalse) && (layer_info->mask.image != (Image *) NULL)) { const char *option; layer_info->mask.image->page.x=layer_info->mask.page.x; layer_info->mask.image->page.y=layer_info->mask.page.y; /* Do not composite the mask when it is disabled */ if ((layer_info->mask.flags & 0x02) == 0x02) layer_info->mask.image->compose=NoCompositeOp; else status=ApplyPSDOpacityMask(layer_info->image,layer_info->mask.image, layer_info->mask.background == 0 ? 0 : QuantumRange,MagickFalse, exception); option=GetImageOption(image_info,"psd:preserve-opacity-mask"); if (IsStringTrue(option) != MagickFalse) PreservePSDOpacityMask(image,layer_info,exception); layer_info->mask.image=DestroyImage(layer_info->mask.image); } return(status); } static MagickBooleanType CheckPSDChannels(const PSDInfo *psd_info, LayerInfo *layer_info) { int channel_type; register ssize_t i; if (layer_info->channels < psd_info->min_channels) return(MagickFalse); channel_type=RedChannel; if (psd_info->min_channels >= 3) channel_type|=(GreenChannel | BlueChannel); if (psd_info->min_channels >= 4) channel_type|=BlackChannel; for (i=0; i < (ssize_t) layer_info->channels; i++) { short type; type=layer_info->channel_info[i].type; if ((i == 0) && (psd_info->mode == IndexedMode) && (type != 0)) return(MagickFalse); if (type == -1) { channel_type|=AlphaChannel; continue; } if (type < -1) continue; if (type == 0) channel_type&=~RedChannel; else if (type == 1) channel_type&=~GreenChannel; else if (type == 2) channel_type&=~BlueChannel; else if (type == 3) channel_type&=~BlackChannel; } if (channel_type == 0) return(MagickTrue); if ((channel_type == AlphaChannel) && (layer_info->channels >= psd_info->min_channels + 1)) return(MagickTrue); return(MagickFalse); } static void AttachPSDLayers(Image *image,LayerInfo *layer_info, ssize_t number_layers) { register ssize_t i; ssize_t j; for (i=0; i < number_layers; i++) { if (layer_info[i].image == (Image *) NULL) { for (j=i; j < number_layers - 1; j++) layer_info[j] = layer_info[j+1]; number_layers--; i--; } } if (number_layers == 0) { layer_info=(LayerInfo *) RelinquishMagickMemory(layer_info); return; } for (i=0; i < number_layers; i++) { if (i > 0) layer_info[i].image->previous=layer_info[i-1].image; if (i < (number_layers-1)) layer_info[i].image->next=layer_info[i+1].image; layer_info[i].image->page=layer_info[i].page; } image->next=layer_info[0].image; layer_info[0].image->previous=image; layer_info=(LayerInfo *) RelinquishMagickMemory(layer_info); } static inline MagickBooleanType PSDSkipImage(const PSDInfo *psd_info, const ImageInfo *image_info,const size_t index) { if (psd_info->has_merged_image == MagickFalse) return(MagickFalse); if (image_info->number_scenes == 0) return(MagickFalse); if (index < image_info->scene) return(MagickTrue); if (index > image_info->scene+image_info->number_scenes-1) return(MagickTrue); return(MagickFalse); } static void CheckMergedImageAlpha(const PSDInfo *psd_info,Image *image) { /* The number of layers cannot be used to determine if the merged image contains an alpha channel. So we enable it when we think we should. */ if (((psd_info->mode == GrayscaleMode) && (psd_info->channels > 1)) || ((psd_info->mode == RGBMode) && (psd_info->channels > 3)) || ((psd_info->mode == CMYKMode) && (psd_info->channels > 4))) image->alpha_trait=BlendPixelTrait; } static void ParseAdditionalInfo(LayerInfo *layer_info) { char key[5]; size_t remaining_length; unsigned char *p; unsigned int size; p=GetStringInfoDatum(layer_info->info); remaining_length=GetStringInfoLength(layer_info->info); while (remaining_length >= 12) { /* skip over signature */ p+=4; key[0]=(char) (*p++); key[1]=(char) (*p++); key[2]=(char) (*p++); key[3]=(char) (*p++); key[4]='\0'; size=(unsigned int) (*p++) << 24; size|=(unsigned int) (*p++) << 16; size|=(unsigned int) (*p++) << 8; size|=(unsigned int) (*p++); size=size & 0xffffffff; remaining_length-=12; if ((size_t) size > remaining_length) break; if (LocaleNCompare(key,"luni",sizeof(key)) == 0) { unsigned char *name; unsigned int length; length=(unsigned int) (*p++) << 24; length|=(unsigned int) (*p++) << 16; length|=(unsigned int) (*p++) << 8; length|=(unsigned int) (*p++); if (length * 2 > size - 4) break; if (sizeof(layer_info->name) <= length) break; name=layer_info->name; while (length > 0) { /* Only ASCII strings are supported */ if (*p++ != '\0') break; *name++=*p++; length--; } if (length == 0) *name='\0'; break; } else p+=size; remaining_length-=(size_t) size; } } static MagickBooleanType ReadPSDLayersInternal(Image *image, const ImageInfo *image_info,const PSDInfo *psd_info, const MagickBooleanType skip_layers,ExceptionInfo *exception) { char type[4]; LayerInfo *layer_info; MagickSizeType size; MagickBooleanType status; register ssize_t i; ssize_t count, index, j, number_layers; size=GetPSDSize(psd_info,image); if (size == 0) { /* Skip layers & masks. */ (void) ReadBlobLong(image); count=ReadBlob(image,4,(unsigned char *) type); if (count == 4) ReversePSDString(image,type,(size_t) count); if ((count != 4) || (LocaleNCompare(type,"8BIM",4) != 0)) { CheckMergedImageAlpha(psd_info,image); return(MagickTrue); } else { count=ReadBlob(image,4,(unsigned char *) type); if (count == 4) ReversePSDString(image,type,4); if ((count == 4) && ((LocaleNCompare(type,"Lr16",4) == 0) || (LocaleNCompare(type,"Lr32",4) == 0))) size=GetPSDSize(psd_info,image); else { CheckMergedImageAlpha(psd_info,image); return(MagickTrue); } } } if (size == 0) return(MagickTrue); layer_info=(LayerInfo *) NULL; number_layers=(ssize_t) ReadBlobSignedShort(image); if (number_layers < 0) { /* The first alpha channel in the merged result contains the transparency data for the merged result. */ number_layers=MagickAbsoluteValue(number_layers); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " negative layer count corrected for"); image->alpha_trait=BlendPixelTrait; } /* We only need to know if the image has an alpha channel */ if (skip_layers != MagickFalse) return(MagickTrue); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " image contains %.20g layers",(double) number_layers); if (number_layers == 0) ThrowBinaryException(CorruptImageError,"InvalidNumberOfLayers", image->filename); layer_info=(LayerInfo *) AcquireQuantumMemory((size_t) number_layers, sizeof(*layer_info)); if (layer_info == (LayerInfo *) NULL) { if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " allocation of LayerInfo failed"); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } (void) memset(layer_info,0,(size_t) number_layers*sizeof(*layer_info)); for (i=0; i < number_layers; i++) { ssize_t top, left, bottom, right; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " reading layer #%.20g",(double) i+1); top=(ssize_t) ReadBlobSignedLong(image); left=(ssize_t) ReadBlobSignedLong(image); bottom=(ssize_t) ReadBlobSignedLong(image); right=(ssize_t) ReadBlobSignedLong(image); if ((right < left) || (bottom < top)) { layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError,"ImproperImageHeader", image->filename); } layer_info[i].page.y=top; layer_info[i].page.x=left; layer_info[i].page.width=(size_t) (right-left); layer_info[i].page.height=(size_t) (bottom-top); layer_info[i].channels=ReadBlobShort(image); if (layer_info[i].channels > MaxPSDChannels) { layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError,"MaximumChannelsExceeded", image->filename); } if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " offset(%.20g,%.20g), size(%.20g,%.20g), channels=%.20g", (double) layer_info[i].page.x,(double) layer_info[i].page.y, (double) layer_info[i].page.height,(double) layer_info[i].page.width,(double) layer_info[i].channels); for (j=0; j < (ssize_t) layer_info[i].channels; j++) { layer_info[i].channel_info[j].type=(short) ReadBlobShort(image); if ((layer_info[i].channel_info[j].type < -4) || (layer_info[i].channel_info[j].type > 4)) { layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError,"NoSuchImageChannel", image->filename); } layer_info[i].channel_info[j].size=(size_t) GetPSDSize(psd_info, image); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " channel[%.20g]: type=%.20g, size=%.20g",(double) j, (double) layer_info[i].channel_info[j].type, (double) layer_info[i].channel_info[j].size); } if (CheckPSDChannels(psd_info,&layer_info[i]) == MagickFalse) { layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError,"ImproperImageHeader", image->filename); } count=ReadBlob(image,4,(unsigned char *) type); if (count == 4) ReversePSDString(image,type,4); if ((count != 4) || (LocaleNCompare(type,"8BIM",4) != 0)) { if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer type was %.4s instead of 8BIM", type); layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError,"ImproperImageHeader", image->filename); } count=ReadBlob(image,4,(unsigned char *) layer_info[i].blendkey); if (count != 4) { layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError,"ImproperImageHeader", image->filename); } ReversePSDString(image,layer_info[i].blendkey,4); layer_info[i].opacity=(Quantum) ScaleCharToQuantum((unsigned char) ReadBlobByte(image)); layer_info[i].clipping=(unsigned char) ReadBlobByte(image); layer_info[i].flags=(unsigned char) ReadBlobByte(image); layer_info[i].visible=!(layer_info[i].flags & 0x02); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " blend=%.4s, opacity=%.20g, clipping=%s, flags=%d, visible=%s", layer_info[i].blendkey,(double) layer_info[i].opacity, layer_info[i].clipping ? "true" : "false",layer_info[i].flags, layer_info[i].visible ? "true" : "false"); (void) ReadBlobByte(image); /* filler */ size=ReadBlobLong(image); if (size != 0) { MagickSizeType combined_length, length; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer contains additional info"); length=ReadBlobLong(image); combined_length=length+4; if (length != 0) { /* Layer mask info. */ layer_info[i].mask.page.y=(ssize_t) ReadBlobSignedLong(image); layer_info[i].mask.page.x=(ssize_t) ReadBlobSignedLong(image); layer_info[i].mask.page.height=(size_t) (ReadBlobSignedLong(image)-layer_info[i].mask.page.y); layer_info[i].mask.page.width=(size_t) ( ReadBlobSignedLong(image)-layer_info[i].mask.page.x); layer_info[i].mask.background=(unsigned char) ReadBlobByte( image); layer_info[i].mask.flags=(unsigned char) ReadBlobByte(image); if (!(layer_info[i].mask.flags & 0x01)) { layer_info[i].mask.page.y=layer_info[i].mask.page.y- layer_info[i].page.y; layer_info[i].mask.page.x=layer_info[i].mask.page.x- layer_info[i].page.x; } if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer mask: offset(%.20g,%.20g), size(%.20g,%.20g), length=%.20g", (double) layer_info[i].mask.page.x,(double) layer_info[i].mask.page.y,(double) layer_info[i].mask.page.width,(double) layer_info[i].mask.page.height,(double) ((MagickOffsetType) length)-18); /* Skip over the rest of the layer mask information. */ if (DiscardBlobBytes(image,(MagickSizeType) (length-18)) == MagickFalse) { layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError, "UnexpectedEndOfFile",image->filename); } } length=ReadBlobLong(image); combined_length+=length+4; if (length != 0) { /* Layer blending ranges info. */ if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer blending ranges: length=%.20g",(double) ((MagickOffsetType) length)); if (DiscardBlobBytes(image,length) == MagickFalse) { layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError, "UnexpectedEndOfFile",image->filename); } } /* Layer name. */ length=(MagickSizeType) (unsigned char) ReadBlobByte(image); combined_length+=length+1; if (length > 0) (void) ReadBlob(image,(size_t) length++,layer_info[i].name); layer_info[i].name[length]='\0'; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer name: %s",layer_info[i].name); if ((length % 4) != 0) { length=4-(length % 4); combined_length+=length; /* Skip over the padding of the layer name */ if (DiscardBlobBytes(image,length) == MagickFalse) { layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError, "UnexpectedEndOfFile",image->filename); } } length=(MagickSizeType) size-combined_length; if (length > 0) { unsigned char *info; if (length > GetBlobSize(image)) { layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError, "InsufficientImageDataInFile",image->filename); } layer_info[i].info=AcquireStringInfo((const size_t) length); info=GetStringInfoDatum(layer_info[i].info); (void) ReadBlob(image,(const size_t) length,info); ParseAdditionalInfo(&layer_info[i]); } } } for (i=0; i < number_layers; i++) { if ((layer_info[i].page.width == 0) || (layer_info[i].page.height == 0)) { if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer data is empty"); if (layer_info[i].info != (StringInfo *) NULL) layer_info[i].info=DestroyStringInfo(layer_info[i].info); continue; } /* Allocate layered image. */ layer_info[i].image=CloneImage(image,layer_info[i].page.width, layer_info[i].page.height,MagickFalse,exception); if (layer_info[i].image == (Image *) NULL) { layer_info=DestroyLayerInfo(layer_info,number_layers); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " allocation of image for layer %.20g failed",(double) i); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } if (layer_info[i].info != (StringInfo *) NULL) { (void) SetImageProfile(layer_info[i].image,"psd:additional-info", layer_info[i].info,exception); layer_info[i].info=DestroyStringInfo(layer_info[i].info); } } if (image_info->ping != MagickFalse) { AttachPSDLayers(image,layer_info,number_layers); return(MagickTrue); } status=MagickTrue; index=0; for (i=0; i < number_layers; i++) { if ((layer_info[i].image == (Image *) NULL) || (PSDSkipImage(psd_info, image_info,++index) != MagickFalse)) { for (j=0; j < (ssize_t) layer_info[i].channels; j++) { if (DiscardBlobBytes(image,(MagickSizeType) layer_info[i].channel_info[j].size) == MagickFalse) { layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError, "UnexpectedEndOfFile",image->filename); } } continue; } if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " reading data for layer %.20g",(double) i); status=ReadPSDLayer(image,image_info,psd_info,&layer_info[i], exception); if (status == MagickFalse) break; status=SetImageProgress(image,LoadImagesTag,(MagickOffsetType) i, (MagickSizeType) number_layers); if (status == MagickFalse) break; } if (status != MagickFalse) AttachPSDLayers(image,layer_info,number_layers); else layer_info=DestroyLayerInfo(layer_info,number_layers); return(status); } ModuleExport MagickBooleanType ReadPSDLayers(Image *image, const ImageInfo *image_info,const PSDInfo *psd_info,ExceptionInfo *exception) { PolicyDomain domain; PolicyRights rights; domain=CoderPolicyDomain; rights=ReadPolicyRights; if (IsRightsAuthorized(domain,rights,"PSD") == MagickFalse) return(MagickTrue); return(ReadPSDLayersInternal(image,image_info,psd_info,MagickFalse, exception)); } static MagickBooleanType ReadPSDMergedImage(const ImageInfo *image_info, Image *image,const PSDInfo *psd_info,ExceptionInfo *exception) { MagickOffsetType *sizes; MagickBooleanType status; PSDCompressionType compression; register ssize_t i; if ((image_info->number_scenes != 0) && (image_info->scene != 0)) return(MagickTrue); compression=(PSDCompressionType) ReadBlobMSBShort(image); image->compression=ConvertPSDCompression(compression); if (compression != Raw && compression != RLE) { (void) ThrowMagickException(exception,GetMagickModule(), TypeWarning,"CompressionNotSupported","'%.20g'",(double) compression); return(MagickFalse); } sizes=(MagickOffsetType *) NULL; if (compression == RLE) { sizes=ReadPSDRLESizes(image,psd_info,image->rows*psd_info->channels); if (sizes == (MagickOffsetType *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } status=MagickTrue; for (i=0; i < (ssize_t) psd_info->channels; i++) { ssize_t type; type=i; if ((type == 1) && (psd_info->channels == 2)) type=-1; if (compression == RLE) status=ReadPSDChannelRLE(image,psd_info,type,sizes+(i*image->rows), exception); else status=ReadPSDChannelRaw(image,psd_info->channels,type,exception); if (status != MagickFalse) status=SetImageProgress(image,LoadImagesTag,(MagickOffsetType) i, psd_info->channels); if (status == MagickFalse) break; } if ((status != MagickFalse) && (image->colorspace == CMYKColorspace)) status=NegateCMYK(image,exception); if (status != MagickFalse) status=CorrectPSDAlphaBlend(image_info,image,exception); sizes=(MagickOffsetType *) RelinquishMagickMemory(sizes); return(status); } static Image *ReadPSDImage(const ImageInfo *image_info,ExceptionInfo *exception) { Image *image; MagickBooleanType skip_layers; MagickOffsetType offset; MagickSizeType length; MagickBooleanType status; PSDInfo psd_info; register ssize_t i; size_t imageListLength; ssize_t count; StringInfo *profile; /* Open image file. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); image=AcquireImage(image_info,exception); status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception); if (status == MagickFalse) { image=DestroyImageList(image); return((Image *) NULL); } /* Read image header. */ image->endian=MSBEndian; count=ReadBlob(image,4,(unsigned char *) psd_info.signature); psd_info.version=ReadBlobMSBShort(image); if ((count != 4) || (LocaleNCompare(psd_info.signature,"8BPS",4) != 0) || ((psd_info.version != 1) && (psd_info.version != 2))) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); (void) ReadBlob(image,6,psd_info.reserved); psd_info.channels=ReadBlobMSBShort(image); if (psd_info.channels < 1) ThrowReaderException(CorruptImageError,"MissingImageChannel"); if (psd_info.channels > MaxPSDChannels) ThrowReaderException(CorruptImageError,"MaximumChannelsExceeded"); psd_info.rows=ReadBlobMSBLong(image); psd_info.columns=ReadBlobMSBLong(image); if ((psd_info.version == 1) && ((psd_info.rows > 30000) || (psd_info.columns > 30000))) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); psd_info.depth=ReadBlobMSBShort(image); if ((psd_info.depth != 1) && (psd_info.depth != 8) && (psd_info.depth != 16) && (psd_info.depth != 32)) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); psd_info.mode=ReadBlobMSBShort(image); if ((psd_info.mode == IndexedMode) && (psd_info.channels > 3)) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Image is %.20g x %.20g with channels=%.20g, depth=%.20g, mode=%s", (double) psd_info.columns,(double) psd_info.rows,(double) psd_info.channels,(double) psd_info.depth,ModeToString((PSDImageType) psd_info.mode)); if (EOFBlob(image) != MagickFalse) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); /* Initialize image. */ image->depth=psd_info.depth; image->columns=psd_info.columns; image->rows=psd_info.rows; status=SetImageExtent(image,image->columns,image->rows,exception); if (status == MagickFalse) return(DestroyImageList(image)); status=ResetImagePixels(image,exception); if (status == MagickFalse) return(DestroyImageList(image)); psd_info.min_channels=3; if (psd_info.mode == LabMode) (void) SetImageColorspace(image,LabColorspace,exception); if (psd_info.mode == CMYKMode) { psd_info.min_channels=4; (void) SetImageColorspace(image,CMYKColorspace,exception); } else if ((psd_info.mode == BitmapMode) || (psd_info.mode == GrayscaleMode) || (psd_info.mode == DuotoneMode)) { if (psd_info.depth != 32) { status=AcquireImageColormap(image,MagickMin((size_t) (psd_info.depth < 16 ? 256 : 65536), MaxColormapSize),exception); if (status == MagickFalse) ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Image colormap allocated"); } psd_info.min_channels=1; (void) SetImageColorspace(image,GRAYColorspace,exception); } else if (psd_info.mode == IndexedMode) psd_info.min_channels=1; if (psd_info.channels < psd_info.min_channels) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); /* Read PSD raster colormap only present for indexed and duotone images. */ length=ReadBlobMSBLong(image); if ((psd_info.mode == IndexedMode) && (length < 3)) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); if (length != 0) { if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " reading colormap"); if ((psd_info.mode == DuotoneMode) || (psd_info.depth == 32)) { /* Duotone image data; the format of this data is undocumented. 32 bits per pixel; the colormap is ignored. */ (void) SeekBlob(image,(const MagickOffsetType) length,SEEK_CUR); } else { size_t number_colors; /* Read PSD raster colormap. */ number_colors=(size_t) length/3; if (number_colors > 65536) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); if (AcquireImageColormap(image,number_colors,exception) == MagickFalse) ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); for (i=0; i < (ssize_t) image->colors; i++) image->colormap[i].red=(MagickRealType) ScaleCharToQuantum( (unsigned char) ReadBlobByte(image)); for (i=0; i < (ssize_t) image->colors; i++) image->colormap[i].green=(MagickRealType) ScaleCharToQuantum( (unsigned char) ReadBlobByte(image)); for (i=0; i < (ssize_t) image->colors; i++) image->colormap[i].blue=(MagickRealType) ScaleCharToQuantum( (unsigned char) ReadBlobByte(image)); image->alpha_trait=UndefinedPixelTrait; } } if ((image->depth == 1) && (image->storage_class != PseudoClass)) ThrowReaderException(CorruptImageError, "ImproperImageHeader"); psd_info.has_merged_image=MagickTrue; profile=(StringInfo *) NULL; length=ReadBlobMSBLong(image); if (length != 0) { unsigned char *blocks; /* Image resources block. */ if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " reading image resource blocks - %.20g bytes",(double) ((MagickOffsetType) length)); if (length > GetBlobSize(image)) ThrowReaderException(CorruptImageError,"InsufficientImageDataInFile"); blocks=(unsigned char *) AcquireQuantumMemory((size_t) length, sizeof(*blocks)); if (blocks == (unsigned char *) NULL) ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); count=ReadBlob(image,(size_t) length,blocks); if ((count != (ssize_t) length) || (length < 4) || (LocaleNCompare((char *) blocks,"8BIM",4) != 0)) { blocks=(unsigned char *) RelinquishMagickMemory(blocks); ThrowReaderException(CorruptImageError,"ImproperImageHeader"); } profile=ParseImageResourceBlocks(&psd_info,image,blocks,(size_t) length); blocks=(unsigned char *) RelinquishMagickMemory(blocks); } /* Layer and mask block. */ length=GetPSDSize(&psd_info,image); if (length == 8) { length=ReadBlobMSBLong(image); length=ReadBlobMSBLong(image); } offset=TellBlob(image); skip_layers=MagickFalse; if ((image_info->number_scenes == 1) && (image_info->scene == 0) && (psd_info.has_merged_image != MagickFalse)) { if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " read composite only"); skip_layers=MagickTrue; } if (length == 0) { if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " image has no layers"); } else { if (ReadPSDLayersInternal(image,image_info,&psd_info,skip_layers, exception) != MagickTrue) { if (profile != (StringInfo *) NULL) profile=DestroyStringInfo(profile); (void) CloseBlob(image); image=DestroyImageList(image); return((Image *) NULL); } /* Skip the rest of the layer and mask information. */ (void) SeekBlob(image,offset+length,SEEK_SET); } /* If we are only "pinging" the image, then we're done - so return. */ if (EOFBlob(image) != MagickFalse) { if (profile != (StringInfo *) NULL) profile=DestroyStringInfo(profile); ThrowReaderException(CorruptImageError,"UnexpectedEndOfFile"); } if (image_info->ping != MagickFalse) { if (profile != (StringInfo *) NULL) profile=DestroyStringInfo(profile); (void) CloseBlob(image); return(GetFirstImageInList(image)); } /* Read the precombined layer, present for PSD < 4 compatibility. */ if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " reading the precombined layer"); imageListLength=GetImageListLength(image); if ((psd_info.has_merged_image != MagickFalse) || (imageListLength == 1)) psd_info.has_merged_image=(MagickBooleanType) ReadPSDMergedImage( image_info,image,&psd_info,exception); if ((psd_info.has_merged_image == MagickFalse) && (imageListLength == 1) && (length != 0)) { (void) SeekBlob(image,offset,SEEK_SET); status=ReadPSDLayersInternal(image,image_info,&psd_info,MagickFalse, exception); if (status != MagickTrue) { if (profile != (StringInfo *) NULL) profile=DestroyStringInfo(profile); (void) CloseBlob(image); image=DestroyImageList(image); return((Image *) NULL); } } if (psd_info.has_merged_image == MagickFalse) { Image *merged; if (imageListLength == 1) { if (profile != (StringInfo *) NULL) profile=DestroyStringInfo(profile); ThrowReaderException(CorruptImageError,"InsufficientImageDataInFile"); } image->background_color.alpha=(MagickRealType) TransparentAlpha; image->background_color.alpha_trait=BlendPixelTrait; (void) SetImageBackgroundColor(image,exception); merged=MergeImageLayers(image,FlattenLayer,exception); ReplaceImageInList(&image,merged); } if (profile != (StringInfo *) NULL) { Image *next; i=0; next=image; while (next != (Image *) NULL) { if (PSDSkipImage(&psd_info,image_info,i++) == MagickFalse) (void) SetImageProfile(next,GetStringInfoName(profile),profile, exception); next=next->next; } profile=DestroyStringInfo(profile); } (void) CloseBlob(image); return(GetFirstImageInList(image)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e g i s t e r P S D I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RegisterPSDImage() adds properties for the PSD image format to % the list of supported formats. The properties include the image format % tag, a method to read and/or write the format, whether the format % supports the saving of more than one frame to the same file or blob, % whether the format supports native in-memory I/O, and a brief % description of the format. % % The format of the RegisterPSDImage method is: % % size_t RegisterPSDImage(void) % */ ModuleExport size_t RegisterPSDImage(void) { MagickInfo *entry; entry=AcquireMagickInfo("PSD","PSB","Adobe Large Document Format"); entry->decoder=(DecodeImageHandler *) ReadPSDImage; entry->encoder=(EncodeImageHandler *) WritePSDImage; entry->magick=(IsImageFormatHandler *) IsPSD; entry->flags|=CoderDecoderSeekableStreamFlag; entry->flags|=CoderEncoderSeekableStreamFlag; (void) RegisterMagickInfo(entry); entry=AcquireMagickInfo("PSD","PSD","Adobe Photoshop bitmap"); entry->decoder=(DecodeImageHandler *) ReadPSDImage; entry->encoder=(EncodeImageHandler *) WritePSDImage; entry->magick=(IsImageFormatHandler *) IsPSD; entry->flags|=CoderDecoderSeekableStreamFlag; entry->flags|=CoderEncoderSeekableStreamFlag; (void) RegisterMagickInfo(entry); return(MagickImageCoderSignature); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % U n r e g i s t e r P S D I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % UnregisterPSDImage() removes format registrations made by the % PSD module from the list of supported formats. % % The format of the UnregisterPSDImage method is: % % UnregisterPSDImage(void) % */ ModuleExport void UnregisterPSDImage(void) { (void) UnregisterMagickInfo("PSB"); (void) UnregisterMagickInfo("PSD"); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % W r i t e P S D I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % WritePSDImage() writes an image in the Adobe Photoshop encoded image format. % % The format of the WritePSDImage method is: % % MagickBooleanType WritePSDImage(const ImageInfo *image_info,Image *image, % ExceptionInfo *exception) % % A description of each parameter follows. % % o image_info: the image info. % % o image: The image. % % o exception: return any errors or warnings in this structure. % */ static inline ssize_t SetPSDOffset(const PSDInfo *psd_info,Image *image, const size_t offset) { if (psd_info->version == 1) return(WriteBlobMSBShort(image,(unsigned short) offset)); return(WriteBlobMSBLong(image,(unsigned int) offset)); } static inline ssize_t WritePSDOffset(const PSDInfo *psd_info,Image *image, const MagickSizeType size,const MagickOffsetType offset) { MagickOffsetType current_offset; ssize_t result; current_offset=TellBlob(image); (void) SeekBlob(image,offset,SEEK_SET); if (psd_info->version == 1) result=WriteBlobMSBShort(image,(unsigned short) size); else result=WriteBlobMSBLong(image,(unsigned int) size); (void) SeekBlob(image,current_offset,SEEK_SET); return(result); } static inline ssize_t SetPSDSize(const PSDInfo *psd_info,Image *image, const MagickSizeType size) { if (psd_info->version == 1) return(WriteBlobLong(image,(unsigned int) size)); return(WriteBlobLongLong(image,size)); } static inline ssize_t WritePSDSize(const PSDInfo *psd_info,Image *image, const MagickSizeType size,const MagickOffsetType offset) { MagickOffsetType current_offset; ssize_t result; current_offset=TellBlob(image); (void) SeekBlob(image,offset,SEEK_SET); result=SetPSDSize(psd_info,image,size); (void) SeekBlob(image,current_offset,SEEK_SET); return(result); } static size_t PSDPackbitsEncodeImage(Image *image,const size_t length, const unsigned char *pixels,unsigned char *compact_pixels, ExceptionInfo *exception) { int count; register ssize_t i, j; register unsigned char *q; unsigned char *packbits; /* Compress pixels with Packbits encoding. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(pixels != (unsigned char *) NULL); assert(compact_pixels != (unsigned char *) NULL); packbits=(unsigned char *) AcquireQuantumMemory(128UL,sizeof(*packbits)); if (packbits == (unsigned char *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); q=compact_pixels; for (i=(ssize_t) length; i != 0; ) { switch (i) { case 1: { i--; *q++=(unsigned char) 0; *q++=(*pixels); break; } case 2: { i-=2; *q++=(unsigned char) 1; *q++=(*pixels); *q++=pixels[1]; break; } case 3: { i-=3; if ((*pixels == *(pixels+1)) && (*(pixels+1) == *(pixels+2))) { *q++=(unsigned char) ((256-3)+1); *q++=(*pixels); break; } *q++=(unsigned char) 2; *q++=(*pixels); *q++=pixels[1]; *q++=pixels[2]; break; } default: { if ((*pixels == *(pixels+1)) && (*(pixels+1) == *(pixels+2))) { /* Packed run. */ count=3; while (((ssize_t) count < i) && (*pixels == *(pixels+count))) { count++; if (count >= 127) break; } i-=count; *q++=(unsigned char) ((256-count)+1); *q++=(*pixels); pixels+=count; break; } /* Literal run. */ count=0; while ((*(pixels+count) != *(pixels+count+1)) || (*(pixels+count+1) != *(pixels+count+2))) { packbits[count+1]=pixels[count]; count++; if (((ssize_t) count >= (i-3)) || (count >= 127)) break; } i-=count; *packbits=(unsigned char) (count-1); for (j=0; j <= (ssize_t) count; j++) *q++=packbits[j]; pixels+=count; break; } } } *q++=(unsigned char) 128; /* EOD marker */ packbits=(unsigned char *) RelinquishMagickMemory(packbits); return((size_t) (q-compact_pixels)); } static size_t WriteCompressionStart(const PSDInfo *psd_info,Image *image, const Image *next_image,const CompressionType compression, const ssize_t channels) { size_t length; ssize_t i, y; if (compression == RLECompression) { length=(size_t) WriteBlobShort(image,RLE); for (i=0; i < channels; i++) for (y=0; y < (ssize_t) next_image->rows; y++) length+=SetPSDOffset(psd_info,image,0); } #ifdef MAGICKCORE_ZLIB_DELEGATE else if (compression == ZipCompression) length=(size_t) WriteBlobShort(image,ZipWithoutPrediction); #endif else length=(size_t) WriteBlobShort(image,Raw); return(length); } static size_t WritePSDChannel(const PSDInfo *psd_info, const ImageInfo *image_info,Image *image,Image *next_image, const QuantumType quantum_type, unsigned char *compact_pixels, MagickOffsetType size_offset,const MagickBooleanType separate, const CompressionType compression,ExceptionInfo *exception) { MagickBooleanType monochrome; QuantumInfo *quantum_info; register const Quantum *p; register ssize_t i; size_t count, length; ssize_t y; unsigned char *pixels; #ifdef MAGICKCORE_ZLIB_DELEGATE int flush, level; unsigned char *compressed_pixels; z_stream stream; compressed_pixels=(unsigned char *) NULL; flush=Z_NO_FLUSH; #endif count=0; if (separate != MagickFalse) { size_offset=TellBlob(image)+2; count+=WriteCompressionStart(psd_info,image,next_image,compression,1); } if (next_image->depth > 8) next_image->depth=16; monochrome=IsImageMonochrome(image) && (image->depth == 1) ? MagickTrue : MagickFalse; quantum_info=AcquireQuantumInfo(image_info,next_image); if (quantum_info == (QuantumInfo *) NULL) return(0); pixels=(unsigned char *) GetQuantumPixels(quantum_info); #ifdef MAGICKCORE_ZLIB_DELEGATE if (compression == ZipCompression) { compressed_pixels=(unsigned char *) AcquireQuantumMemory( MagickMinBufferExtent,sizeof(*compressed_pixels)); if (compressed_pixels == (unsigned char *) NULL) { quantum_info=DestroyQuantumInfo(quantum_info); return(0); } memset(&stream,0,sizeof(stream)); stream.data_type=Z_BINARY; level=Z_DEFAULT_COMPRESSION; if ((image_info->quality > 0 && image_info->quality < 10)) level=(int) image_info->quality; if (deflateInit(&stream,level) != Z_OK) { quantum_info=DestroyQuantumInfo(quantum_info); compressed_pixels=(unsigned char *) RelinquishMagickMemory( compressed_pixels); return(0); } } #endif for (y=0; y < (ssize_t) next_image->rows; y++) { p=GetVirtualPixels(next_image,0,y,next_image->columns,1,exception); if (p == (const Quantum *) NULL) break; length=ExportQuantumPixels(next_image,(CacheView *) NULL,quantum_info, quantum_type,pixels,exception); if (monochrome != MagickFalse) for (i=0; i < (ssize_t) length; i++) pixels[i]=(~pixels[i]); if (compression == RLECompression) { length=PSDPackbitsEncodeImage(image,length,pixels,compact_pixels, exception); count+=WriteBlob(image,length,compact_pixels); size_offset+=WritePSDOffset(psd_info,image,length,size_offset); } #ifdef MAGICKCORE_ZLIB_DELEGATE else if (compression == ZipCompression) { stream.avail_in=(uInt) length; stream.next_in=(Bytef *) pixels; if (y == (ssize_t) next_image->rows-1) flush=Z_FINISH; do { stream.avail_out=(uInt) MagickMinBufferExtent; stream.next_out=(Bytef *) compressed_pixels; if (deflate(&stream,flush) == Z_STREAM_ERROR) break; length=(size_t) MagickMinBufferExtent-stream.avail_out; if (length > 0) count+=WriteBlob(image,length,compressed_pixels); } while (stream.avail_out == 0); } #endif else count+=WriteBlob(image,length,pixels); } #ifdef MAGICKCORE_ZLIB_DELEGATE if (compression == ZipCompression) { (void) deflateEnd(&stream); compressed_pixels=(unsigned char *) RelinquishMagickMemory( compressed_pixels); } #endif quantum_info=DestroyQuantumInfo(quantum_info); return(count); } static unsigned char *AcquireCompactPixels(const Image *image, ExceptionInfo *exception) { size_t packet_size; unsigned char *compact_pixels; packet_size=image->depth > 8UL ? 2UL : 1UL; compact_pixels=(unsigned char *) AcquireQuantumMemory((9* image->columns)+1,packet_size*sizeof(*compact_pixels)); if (compact_pixels == (unsigned char *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); } return(compact_pixels); } static size_t WritePSDChannels(const PSDInfo *psd_info, const ImageInfo *image_info,Image *image,Image *next_image, MagickOffsetType size_offset,const MagickBooleanType separate, ExceptionInfo *exception) { CompressionType compression; Image *mask; MagickOffsetType rows_offset; size_t channels, count, length, offset_length; unsigned char *compact_pixels; count=0; offset_length=0; rows_offset=0; compact_pixels=(unsigned char *) NULL; compression=next_image->compression; if (image_info->compression != UndefinedCompression) compression=image_info->compression; if (compression == RLECompression) { compact_pixels=AcquireCompactPixels(next_image,exception); if (compact_pixels == (unsigned char *) NULL) return(0); } channels=1; if (separate == MagickFalse) { if ((next_image->storage_class != PseudoClass) || (IsImageGray(next_image) != MagickFalse)) { if (IsImageGray(next_image) == MagickFalse) channels=(size_t) (next_image->colorspace == CMYKColorspace ? 4 : 3); if (next_image->alpha_trait != UndefinedPixelTrait) channels++; } rows_offset=TellBlob(image)+2; count+=WriteCompressionStart(psd_info,image,next_image,compression, (ssize_t) channels); offset_length=(next_image->rows*(psd_info->version == 1 ? 2 : 4)); } size_offset+=2; if ((next_image->storage_class == PseudoClass) && (IsImageGray(next_image) == MagickFalse)) { length=WritePSDChannel(psd_info,image_info,image,next_image, IndexQuantum,compact_pixels,rows_offset,separate,compression, exception); if (separate != MagickFalse) size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2; else rows_offset+=offset_length; count+=length; } else { if (IsImageGray(next_image) != MagickFalse) { length=WritePSDChannel(psd_info,image_info,image,next_image, GrayQuantum,compact_pixels,rows_offset,separate,compression, exception); if (separate != MagickFalse) size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2; else rows_offset+=offset_length; count+=length; } else { if (next_image->colorspace == CMYKColorspace) (void) NegateCMYK(next_image,exception); length=WritePSDChannel(psd_info,image_info,image,next_image, RedQuantum,compact_pixels,rows_offset,separate,compression, exception); if (separate != MagickFalse) size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2; else rows_offset+=offset_length; count+=length; length=WritePSDChannel(psd_info,image_info,image,next_image, GreenQuantum,compact_pixels,rows_offset,separate,compression, exception); if (separate != MagickFalse) size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2; else rows_offset+=offset_length; count+=length; length=WritePSDChannel(psd_info,image_info,image,next_image, BlueQuantum,compact_pixels,rows_offset,separate,compression, exception); if (separate != MagickFalse) size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2; else rows_offset+=offset_length; count+=length; if (next_image->colorspace == CMYKColorspace) { length=WritePSDChannel(psd_info,image_info,image,next_image, BlackQuantum,compact_pixels,rows_offset,separate,compression, exception); if (separate != MagickFalse) size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2; else rows_offset+=offset_length; count+=length; } } if (next_image->alpha_trait != UndefinedPixelTrait) { length=WritePSDChannel(psd_info,image_info,image,next_image, AlphaQuantum,compact_pixels,rows_offset,separate,compression, exception); if (separate != MagickFalse) size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2; else rows_offset+=offset_length; count+=length; } } compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels); if (next_image->colorspace == CMYKColorspace) (void) NegateCMYK(next_image,exception); if (separate != MagickFalse) { const char *property; property=GetImageArtifact(next_image,"psd:opacity-mask"); if (property != (const char *) NULL) { mask=(Image *) GetImageRegistry(ImageRegistryType,property, exception); if (mask != (Image *) NULL) { if (compression == RLECompression) { compact_pixels=AcquireCompactPixels(mask,exception); if (compact_pixels == (unsigned char *) NULL) return(0); } length=WritePSDChannel(psd_info,image_info,image,mask, RedQuantum,compact_pixels,rows_offset,MagickTrue,compression, exception); (void) WritePSDSize(psd_info,image,length,size_offset); count+=length; compact_pixels=(unsigned char *) RelinquishMagickMemory( compact_pixels); } } } return(count); } static size_t WritePascalString(Image *image,const char *value,size_t padding) { size_t count, length; register ssize_t i; /* Max length is 255. */ count=0; length=(strlen(value) > 255UL ) ? 255UL : strlen(value); if (length == 0) count+=WriteBlobByte(image,0); else { count+=WriteBlobByte(image,(unsigned char) length); count+=WriteBlob(image,length,(const unsigned char *) value); } length++; if ((length % padding) == 0) return(count); for (i=0; i < (ssize_t) (padding-(length % padding)); i++) count+=WriteBlobByte(image,0); return(count); } static void WriteResolutionResourceBlock(Image *image) { double x_resolution, y_resolution; unsigned short units; if (image->units == PixelsPerCentimeterResolution) { x_resolution=2.54*65536.0*image->resolution.x+0.5; y_resolution=2.54*65536.0*image->resolution.y+0.5; units=2; } else { x_resolution=65536.0*image->resolution.x+0.5; y_resolution=65536.0*image->resolution.y+0.5; units=1; } (void) WriteBlob(image,4,(const unsigned char *) "8BIM"); (void) WriteBlobMSBShort(image,0x03ED); (void) WriteBlobMSBShort(image,0); (void) WriteBlobMSBLong(image,16); /* resource size */ (void) WriteBlobMSBLong(image,(unsigned int) (x_resolution+0.5)); (void) WriteBlobMSBShort(image,units); /* horizontal resolution unit */ (void) WriteBlobMSBShort(image,units); /* width unit */ (void) WriteBlobMSBLong(image,(unsigned int) (y_resolution+0.5)); (void) WriteBlobMSBShort(image,units); /* vertical resolution unit */ (void) WriteBlobMSBShort(image,units); /* height unit */ } static inline size_t WriteChannelSize(const PSDInfo *psd_info,Image *image, const signed short channel) { size_t count; count=(size_t) WriteBlobShort(image,(const unsigned short) channel); count+=SetPSDSize(psd_info,image,0); return(count); } static void RemoveICCProfileFromResourceBlock(StringInfo *bim_profile) { register const unsigned char *p; size_t length; unsigned char *datum; unsigned int count, long_sans; unsigned short id, short_sans; length=GetStringInfoLength(bim_profile); if (length < 16) return; datum=GetStringInfoDatum(bim_profile); for (p=datum; (p >= datum) && (p < (datum+length-16)); ) { register unsigned char *q; q=(unsigned char *) p; if (LocaleNCompare((const char *) p,"8BIM",4) != 0) break; p=PushLongPixel(MSBEndian,p,&long_sans); p=PushShortPixel(MSBEndian,p,&id); p=PushShortPixel(MSBEndian,p,&short_sans); p=PushLongPixel(MSBEndian,p,&count); if (id == 0x0000040f) { ssize_t quantum; quantum=PSDQuantum(count)+12; if ((quantum >= 12) && (quantum < (ssize_t) length)) { if ((q+quantum < (datum+length-16))) (void) memmove(q,q+quantum,length-quantum-(q-datum)); SetStringInfoLength(bim_profile,length-quantum); } break; } p+=count; if ((count & 0x01) != 0) p++; } } static void RemoveResolutionFromResourceBlock(StringInfo *bim_profile) { register const unsigned char *p; size_t length; unsigned char *datum; unsigned int count, long_sans; unsigned short id, short_sans; length=GetStringInfoLength(bim_profile); if (length < 16) return; datum=GetStringInfoDatum(bim_profile); for (p=datum; (p >= datum) && (p < (datum+length-16)); ) { register unsigned char *q; ssize_t cnt; q=(unsigned char *) p; if (LocaleNCompare((const char *) p,"8BIM",4) != 0) return; p=PushLongPixel(MSBEndian,p,&long_sans); p=PushShortPixel(MSBEndian,p,&id); p=PushShortPixel(MSBEndian,p,&short_sans); p=PushLongPixel(MSBEndian,p,&count); cnt=PSDQuantum(count); if (cnt < 0) return; if ((id == 0x000003ed) && (cnt < (ssize_t) (length-12)) && ((ssize_t) length-(cnt+12)-(q-datum)) > 0) { (void) memmove(q,q+cnt+12,length-(cnt+12)-(q-datum)); SetStringInfoLength(bim_profile,length-(cnt+12)); break; } p+=count; if ((count & 0x01) != 0) p++; } } static const StringInfo *GetAdditionalInformation(const ImageInfo *image_info, Image *image,ExceptionInfo *exception) { #define PSDKeySize 5 #define PSDAllowedLength 36 char key[PSDKeySize]; /* Whitelist of keys from: https://www.adobe.com/devnet-apps/photoshop/fileformatashtml/ */ const char allowed[PSDAllowedLength][PSDKeySize] = { "blnc", "blwh", "brit", "brst", "clbl", "clrL", "curv", "expA", "FMsk", "GdFl", "grdm", "hue ", "hue2", "infx", "knko", "lclr", "levl", "lnsr", "lfx2", "luni", "lrFX", "lspf", "lyid", "lyvr", "mixr", "nvrt", "phfl", "post", "PtFl", "selc", "shpa", "sn2P", "SoCo", "thrs", "tsly", "vibA" }, *option; const StringInfo *info; MagickBooleanType found; register size_t i; size_t remaining_length, length; StringInfo *profile; unsigned char *p; unsigned int size; info=GetImageProfile(image,"psd:additional-info"); if (info == (const StringInfo *) NULL) return((const StringInfo *) NULL); option=GetImageOption(image_info,"psd:additional-info"); if (LocaleCompare(option,"all") == 0) return(info); if (LocaleCompare(option,"selective") != 0) { profile=RemoveImageProfile(image,"psd:additional-info"); return(DestroyStringInfo(profile)); } length=GetStringInfoLength(info); p=GetStringInfoDatum(info); remaining_length=length; length=0; while (remaining_length >= 12) { /* skip over signature */ p+=4; key[0]=(char) (*p++); key[1]=(char) (*p++); key[2]=(char) (*p++); key[3]=(char) (*p++); key[4]='\0'; size=(unsigned int) (*p++) << 24; size|=(unsigned int) (*p++) << 16; size|=(unsigned int) (*p++) << 8; size|=(unsigned int) (*p++); size=size & 0xffffffff; remaining_length-=12; if ((size_t) size > remaining_length) return((const StringInfo *) NULL); found=MagickFalse; for (i=0; i < PSDAllowedLength; i++) { if (LocaleNCompare(key,allowed[i],PSDKeySize) != 0) continue; found=MagickTrue; break; } remaining_length-=(size_t) size; if (found == MagickFalse) { if (remaining_length > 0) p=(unsigned char *) memmove(p-12,p+size,remaining_length); continue; } length+=(size_t) size+12; p+=size; } profile=RemoveImageProfile(image,"psd:additional-info"); if (length == 0) return(DestroyStringInfo(profile)); SetStringInfoLength(profile,(const size_t) length); (void) SetImageProfile(image,"psd:additional-info",info,exception); return(profile); } static MagickBooleanType WritePSDLayersInternal(Image *image, const ImageInfo *image_info,const PSDInfo *psd_info,size_t *layers_size, ExceptionInfo *exception) { char layer_name[MagickPathExtent]; const char *property; const StringInfo *info; Image *base_image, *next_image; MagickBooleanType status; MagickOffsetType *layer_size_offsets, size_offset; register ssize_t i; size_t layer_count, layer_index, length, name_length, rounded_size, size; status=MagickTrue; base_image=GetNextImageInList(image); if (base_image == (Image *) NULL) base_image=image; size=0; size_offset=TellBlob(image); (void) SetPSDSize(psd_info,image,0); layer_count=0; for (next_image=base_image; next_image != NULL; ) { layer_count++; next_image=GetNextImageInList(next_image); } if (image->alpha_trait != UndefinedPixelTrait) size+=WriteBlobShort(image,-(unsigned short) layer_count); else size+=WriteBlobShort(image,(unsigned short) layer_count); layer_size_offsets=(MagickOffsetType *) AcquireQuantumMemory( (size_t) layer_count,sizeof(MagickOffsetType)); if (layer_size_offsets == (MagickOffsetType *) NULL) ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed"); layer_index=0; for (next_image=base_image; next_image != NULL; ) { Image *mask; unsigned char default_color; unsigned short channels, total_channels; mask=(Image *) NULL; property=GetImageArtifact(next_image,"psd:opacity-mask"); default_color=0; if (property != (const char *) NULL) { mask=(Image *) GetImageRegistry(ImageRegistryType,property,exception); default_color=(unsigned char) (strlen(property) == 9 ? 255 : 0); } size+=WriteBlobSignedLong(image,(signed int) next_image->page.y); size+=WriteBlobSignedLong(image,(signed int) next_image->page.x); size+=WriteBlobSignedLong(image,(signed int) (next_image->page.y+ next_image->rows)); size+=WriteBlobSignedLong(image,(signed int) (next_image->page.x+ next_image->columns)); channels=1; if ((next_image->storage_class != PseudoClass) && (IsImageGray(next_image) == MagickFalse)) channels=(unsigned short) (next_image->colorspace == CMYKColorspace ? 4 : 3); total_channels=channels; if (next_image->alpha_trait != UndefinedPixelTrait) total_channels++; if (mask != (Image *) NULL) total_channels++; size+=WriteBlobShort(image,total_channels); layer_size_offsets[layer_index++]=TellBlob(image); for (i=0; i < (ssize_t) channels; i++) size+=WriteChannelSize(psd_info,image,(signed short) i); if (next_image->alpha_trait != UndefinedPixelTrait) size+=WriteChannelSize(psd_info,image,-1); if (mask != (Image *) NULL) size+=WriteChannelSize(psd_info,image,-2); size+=WriteBlobString(image,image->endian == LSBEndian ? "MIB8" :"8BIM"); size+=WriteBlobString(image,CompositeOperatorToPSDBlendMode(next_image)); property=GetImageArtifact(next_image,"psd:layer.opacity"); if (property != (const char *) NULL) { Quantum opacity; opacity=(Quantum) StringToInteger(property); size+=WriteBlobByte(image,ScaleQuantumToChar(opacity)); (void) ApplyPSDLayerOpacity(next_image,opacity,MagickTrue,exception); } else size+=WriteBlobByte(image,255); size+=WriteBlobByte(image,0); size+=WriteBlobByte(image,(const unsigned char) (next_image->compose == NoCompositeOp ? 1 << 0x02 : 1)); /* layer properties - visible, etc. */ size+=WriteBlobByte(image,0); info=GetAdditionalInformation(image_info,next_image,exception); property=(const char *) GetImageProperty(next_image,"label",exception); if (property == (const char *) NULL) { (void) FormatLocaleString(layer_name,MagickPathExtent,"L%.20g", (double) layer_index); property=layer_name; } name_length=strlen(property)+1; if ((name_length % 4) != 0) name_length+=(4-(name_length % 4)); if (info != (const StringInfo *) NULL) name_length+=GetStringInfoLength(info); name_length+=8; if (mask != (Image *) NULL) name_length+=20; size+=WriteBlobLong(image,(unsigned int) name_length); if (mask == (Image *) NULL) size+=WriteBlobLong(image,0); else { if (mask->compose != NoCompositeOp) (void) ApplyPSDOpacityMask(next_image,mask,ScaleCharToQuantum( default_color),MagickTrue,exception); mask->page.y+=image->page.y; mask->page.x+=image->page.x; size+=WriteBlobLong(image,20); size+=WriteBlobSignedLong(image,(const signed int) mask->page.y); size+=WriteBlobSignedLong(image,(const signed int) mask->page.x); size+=WriteBlobSignedLong(image,(const signed int) (mask->rows+ mask->page.y)); size+=WriteBlobSignedLong(image,(const signed int) (mask->columns+ mask->page.x)); size+=WriteBlobByte(image,default_color); size+=WriteBlobByte(image,(const unsigned char) (mask->compose == NoCompositeOp ? 2 : 0)); size+=WriteBlobMSBShort(image,0); } size+=WriteBlobLong(image,0); size+=WritePascalString(image,property,4); if (info != (const StringInfo *) NULL) size+=WriteBlob(image,GetStringInfoLength(info), GetStringInfoDatum(info)); next_image=GetNextImageInList(next_image); } /* Now the image data! */ next_image=base_image; layer_index=0; while (next_image != NULL) { length=WritePSDChannels(psd_info,image_info,image,next_image, layer_size_offsets[layer_index++],MagickTrue,exception); if (length == 0) { status=MagickFalse; break; } size+=length; next_image=GetNextImageInList(next_image); } /* Write the total size */ if (layers_size != (size_t*) NULL) *layers_size=size; if ((size/2) != ((size+1)/2)) rounded_size=size+1; else rounded_size=size; (void) WritePSDSize(psd_info,image,rounded_size,size_offset); layer_size_offsets=(MagickOffsetType *) RelinquishMagickMemory( layer_size_offsets); /* Remove the opacity mask from the registry */ next_image=base_image; while (next_image != (Image *) NULL) { property=GetImageArtifact(next_image,"psd:opacity-mask"); if (property != (const char *) NULL) (void) DeleteImageRegistry(property); next_image=GetNextImageInList(next_image); } return(status); } ModuleExport MagickBooleanType WritePSDLayers(Image * image, const ImageInfo *image_info,const PSDInfo *psd_info,ExceptionInfo *exception) { PolicyDomain domain; PolicyRights rights; domain=CoderPolicyDomain; rights=WritePolicyRights; if (IsRightsAuthorized(domain,rights,"PSD") == MagickFalse) return(MagickTrue); return WritePSDLayersInternal(image,image_info,psd_info,(size_t*) NULL, exception); } static MagickBooleanType WritePSDImage(const ImageInfo *image_info, Image *image,ExceptionInfo *exception) { const StringInfo *icc_profile; MagickBooleanType status; PSDInfo psd_info; register ssize_t i; size_t length, num_channels, packet_size; StringInfo *bim_profile; /* Open image file. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); status=OpenBlob(image_info,image,WriteBinaryBlobMode,exception); if (status == MagickFalse) return(status); packet_size=(size_t) (image->depth > 8 ? 6 : 3); if (image->alpha_trait != UndefinedPixelTrait) packet_size+=image->depth > 8 ? 2 : 1; psd_info.version=1; if ((LocaleCompare(image_info->magick,"PSB") == 0) || (image->columns > 30000) || (image->rows > 30000)) psd_info.version=2; (void) WriteBlob(image,4,(const unsigned char *) "8BPS"); (void) WriteBlobMSBShort(image,psd_info.version); /* version */ for (i=1; i <= 6; i++) (void) WriteBlobByte(image, 0); /* 6 bytes of reserved */ /* When the image has a color profile it won't be converted to gray scale */ if ((GetImageProfile(image,"icc") == (StringInfo *) NULL) && (SetImageGray(image,exception) != MagickFalse)) num_channels=(image->alpha_trait != UndefinedPixelTrait ? 2UL : 1UL); else if ((image_info->type != TrueColorType) && (image_info->type != TrueColorAlphaType) && (image->storage_class == PseudoClass)) num_channels=(image->alpha_trait != UndefinedPixelTrait ? 2UL : 1UL); else { if (image->storage_class == PseudoClass) (void) SetImageStorageClass(image,DirectClass,exception); if (image->colorspace != CMYKColorspace) num_channels=(image->alpha_trait != UndefinedPixelTrait ? 4UL : 3UL); else num_channels=(image->alpha_trait != UndefinedPixelTrait ? 5UL : 4UL); } (void) WriteBlobMSBShort(image,(unsigned short) num_channels); (void) WriteBlobMSBLong(image,(unsigned int) image->rows); (void) WriteBlobMSBLong(image,(unsigned int) image->columns); if (IsImageGray(image) != MagickFalse) { MagickBooleanType monochrome; /* Write depth & mode. */ monochrome=IsImageMonochrome(image) && (image->depth == 1) ? MagickTrue : MagickFalse; (void) WriteBlobMSBShort(image,(unsigned short) (monochrome != MagickFalse ? 1 : image->depth > 8 ? 16 : 8)); (void) WriteBlobMSBShort(image,(unsigned short) (monochrome != MagickFalse ? BitmapMode : GrayscaleMode)); } else { (void) WriteBlobMSBShort(image,(unsigned short) (image->storage_class == PseudoClass ? 8 : image->depth > 8 ? 16 : 8)); if (((image_info->colorspace != UndefinedColorspace) || (image->colorspace != CMYKColorspace)) && (image_info->colorspace != CMYKColorspace)) { (void) TransformImageColorspace(image,sRGBColorspace,exception); (void) WriteBlobMSBShort(image,(unsigned short) (image->storage_class == PseudoClass ? IndexedMode : RGBMode)); } else { if (image->colorspace != CMYKColorspace) (void) TransformImageColorspace(image,CMYKColorspace,exception); (void) WriteBlobMSBShort(image,CMYKMode); } } if ((IsImageGray(image) != MagickFalse) || (image->storage_class == DirectClass) || (image->colors > 256)) (void) WriteBlobMSBLong(image,0); else { /* Write PSD raster colormap. */ (void) WriteBlobMSBLong(image,768); for (i=0; i < (ssize_t) image->colors; i++) (void) WriteBlobByte(image,ScaleQuantumToChar(ClampToQuantum( image->colormap[i].red))); for ( ; i < 256; i++) (void) WriteBlobByte(image,0); for (i=0; i < (ssize_t) image->colors; i++) (void) WriteBlobByte(image,ScaleQuantumToChar(ClampToQuantum( image->colormap[i].green))); for ( ; i < 256; i++) (void) WriteBlobByte(image,0); for (i=0; i < (ssize_t) image->colors; i++) (void) WriteBlobByte(image,ScaleQuantumToChar(ClampToQuantum( image->colormap[i].blue))); for ( ; i < 256; i++) (void) WriteBlobByte(image,0); } /* Image resource block. */ length=28; /* 0x03EB */ bim_profile=(StringInfo *) GetImageProfile(image,"8bim"); icc_profile=GetImageProfile(image,"icc"); if (bim_profile != (StringInfo *) NULL) { bim_profile=CloneStringInfo(bim_profile); if (icc_profile != (StringInfo *) NULL) RemoveICCProfileFromResourceBlock(bim_profile); RemoveResolutionFromResourceBlock(bim_profile); length+=PSDQuantum(GetStringInfoLength(bim_profile)); } if (icc_profile != (const StringInfo *) NULL) length+=PSDQuantum(GetStringInfoLength(icc_profile))+12; (void) WriteBlobMSBLong(image,(unsigned int) length); WriteResolutionResourceBlock(image); if (bim_profile != (StringInfo *) NULL) { (void) WriteBlob(image,GetStringInfoLength(bim_profile), GetStringInfoDatum(bim_profile)); bim_profile=DestroyStringInfo(bim_profile); } if (icc_profile != (StringInfo *) NULL) { (void) WriteBlob(image,4,(const unsigned char *) "8BIM"); (void) WriteBlobMSBShort(image,0x0000040F); (void) WriteBlobMSBShort(image,0); (void) WriteBlobMSBLong(image,(unsigned int) GetStringInfoLength( icc_profile)); (void) WriteBlob(image,GetStringInfoLength(icc_profile), GetStringInfoDatum(icc_profile)); if ((ssize_t) GetStringInfoLength(icc_profile) != PSDQuantum(GetStringInfoLength(icc_profile))) (void) WriteBlobByte(image,0); } if (status != MagickFalse) { MagickOffsetType size_offset; size_t size; size_offset=TellBlob(image); (void) SetPSDSize(&psd_info,image,0); status=WritePSDLayersInternal(image,image_info,&psd_info,&size, exception); size_offset+=WritePSDSize(&psd_info,image,size+ (psd_info.version == 1 ? 8 : 12),size_offset); } (void) WriteBlobMSBLong(image,0); /* user mask data */ /* Write composite image. */ if (status != MagickFalse) { CompressionType compression; compression=image->compression; if (image_info->compression != UndefinedCompression) image->compression=image_info->compression; if (image->compression == ZipCompression) image->compression=RLECompression; if (WritePSDChannels(&psd_info,image_info,image,image,0,MagickFalse, exception) == 0) status=MagickFalse; image->compression=compression; } (void) CloseBlob(image); return(status); }
GB_unop__identity_int16_int32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__identity_int16_int32) // op(A') function: GB (_unop_tran__identity_int16_int32) // C type: int16_t // A type: int32_t // cast: int16_t cij = (int16_t) aij // unaryop: cij = aij #define GB_ATYPE \ int32_t #define GB_CTYPE \ int16_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ int16_t z = (int16_t) aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ int32_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ int16_t z = (int16_t) aij ; \ Cx [pC] = z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_INT16 || GxB_NO_INT32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__identity_int16_int32) ( int16_t *Cx, // Cx and Ax may be aliased const int32_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int32_t aij = Ax [p] ; int16_t z = (int16_t) aij ; Cx [p] = z ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; int32_t aij = Ax [p] ; int16_t z = (int16_t) aij ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__identity_int16_int32) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
convolution_winograd_dot_pack8.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2022 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void convolution_winograd_dot_pack8_avx(Mat& bottom_blob_tm, int outch, const Mat& kernel_tm, Mat& top_blob_tm, const Option& opt) { // Mat bottom_blob_tm(tiles, 16/36/64, inch, 32u, 4, opt.workspace_allocator); const int tiles = bottom_blob_tm.w; const int batch = bottom_blob_tm.h; const int inch = bottom_blob_tm.c; // permute Mat bottom_blob_tm2; if (tiles >= 12) bottom_blob_tm2.create(12 * inch, tiles / 12 + (tiles % 12) / 8 + (tiles % 12 % 8) / 4 + (tiles % 12 % 4) / 2 + tiles % 12 % 2, batch, 32u, 8, opt.workspace_allocator); else if (tiles >= 8) bottom_blob_tm2.create(8 * inch, tiles / 8 + (tiles % 8) / 4 + (tiles % 4) / 2 + tiles % 2, batch, 32u, 8, opt.workspace_allocator); else if (tiles >= 4) bottom_blob_tm2.create(4 * inch, tiles / 4 + (tiles % 4) / 2 + tiles % 2, batch, 32u, 8, opt.workspace_allocator); else if (tiles >= 2) bottom_blob_tm2.create(2 * inch, tiles / 2 + tiles % 2, batch, 32u, 8, opt.workspace_allocator); else // if (tiles >= 1) bottom_blob_tm2.create(1 * inch, tiles, batch, 32u, 8, opt.workspace_allocator); #pragma omp parallel for num_threads(opt.num_threads) for (int r = 0; r < batch; r++) { Mat tm2 = bottom_blob_tm2.channel(r); // tile int i = 0; for (; i + 11 < tiles; i += 12) { float* tmpptr = tm2.row(i / 12); const float* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 8; for (int q = 0; q < inch; q++) { // transpose 8x12 __m256 _r0 = _mm256_load_ps(r0); __m256 _r1 = _mm256_load_ps(r0 + 8); __m256 _r2 = _mm256_load_ps(r0 + 8 * 2); __m256 _r3 = _mm256_load_ps(r0 + 8 * 3); __m256 _r4 = _mm256_load_ps(r0 + 8 * 4); __m256 _r5 = _mm256_load_ps(r0 + 8 * 5); __m256 _r6 = _mm256_load_ps(r0 + 8 * 6); __m256 _r7 = _mm256_load_ps(r0 + 8 * 7); __m256 _r8 = _mm256_load_ps(r0 + 8 * 8); __m256 _r9 = _mm256_load_ps(r0 + 8 * 9); __m256 _ra = _mm256_load_ps(r0 + 8 * 10); __m256 _rb = _mm256_load_ps(r0 + 8 * 11); __m256 _tmp0 = _mm256_unpacklo_ps(_r0, _r1); __m256 _tmp1 = _mm256_unpackhi_ps(_r0, _r1); __m256 _tmp2 = _mm256_unpacklo_ps(_r2, _r3); __m256 _tmp3 = _mm256_unpackhi_ps(_r2, _r3); __m256 _tmp4 = _mm256_unpacklo_ps(_r4, _r5); __m256 _tmp5 = _mm256_unpackhi_ps(_r4, _r5); __m256 _tmp6 = _mm256_unpacklo_ps(_r6, _r7); __m256 _tmp7 = _mm256_unpackhi_ps(_r6, _r7); __m256 _tmp8 = _mm256_unpacklo_ps(_r8, _r9); __m256 _tmp9 = _mm256_unpackhi_ps(_r8, _r9); __m256 _tmpa = _mm256_unpacklo_ps(_ra, _rb); __m256 _tmpb = _mm256_unpackhi_ps(_ra, _rb); __m256 _tmpc = _mm256_shuffle_ps(_tmp0, _tmp2, _MM_SHUFFLE(1, 0, 1, 0)); __m256 _tmpd = _mm256_shuffle_ps(_tmp0, _tmp2, _MM_SHUFFLE(3, 2, 3, 2)); __m256 _tmpe = _mm256_shuffle_ps(_tmp1, _tmp3, _MM_SHUFFLE(1, 0, 1, 0)); __m256 _tmpf = _mm256_shuffle_ps(_tmp1, _tmp3, _MM_SHUFFLE(3, 2, 3, 2)); __m256 _tmpg = _mm256_shuffle_ps(_tmp4, _tmp6, _MM_SHUFFLE(1, 0, 1, 0)); __m256 _tmph = _mm256_shuffle_ps(_tmp4, _tmp6, _MM_SHUFFLE(3, 2, 3, 2)); __m256 _tmpi = _mm256_shuffle_ps(_tmp5, _tmp7, _MM_SHUFFLE(1, 0, 1, 0)); __m256 _tmpj = _mm256_shuffle_ps(_tmp5, _tmp7, _MM_SHUFFLE(3, 2, 3, 2)); __m256 _tmpk = _mm256_shuffle_ps(_tmp8, _tmpa, _MM_SHUFFLE(1, 0, 1, 0)); __m256 _tmpl = _mm256_shuffle_ps(_tmp8, _tmpa, _MM_SHUFFLE(3, 2, 3, 2)); __m256 _tmpm = _mm256_shuffle_ps(_tmp9, _tmpb, _MM_SHUFFLE(1, 0, 1, 0)); __m256 _tmpn = _mm256_shuffle_ps(_tmp9, _tmpb, _MM_SHUFFLE(3, 2, 3, 2)); _r0 = _mm256_permute2f128_ps(_tmpc, _tmpg, _MM_SHUFFLE(0, 2, 0, 0)); _r1 = _mm256_permute2f128_ps(_tmpk, _tmpd, _MM_SHUFFLE(0, 2, 0, 0)); _r2 = _mm256_permute2f128_ps(_tmph, _tmpl, _MM_SHUFFLE(0, 2, 0, 0)); _r3 = _mm256_permute2f128_ps(_tmpe, _tmpi, _MM_SHUFFLE(0, 2, 0, 0)); _r4 = _mm256_permute2f128_ps(_tmpm, _tmpf, _MM_SHUFFLE(0, 2, 0, 0)); _r5 = _mm256_permute2f128_ps(_tmpj, _tmpn, _MM_SHUFFLE(0, 2, 0, 0)); _r6 = _mm256_permute2f128_ps(_tmpc, _tmpg, _MM_SHUFFLE(0, 3, 0, 1)); _r7 = _mm256_permute2f128_ps(_tmpk, _tmpd, _MM_SHUFFLE(0, 3, 0, 1)); _r8 = _mm256_permute2f128_ps(_tmph, _tmpl, _MM_SHUFFLE(0, 3, 0, 1)); _r9 = _mm256_permute2f128_ps(_tmpe, _tmpi, _MM_SHUFFLE(0, 3, 0, 1)); _ra = _mm256_permute2f128_ps(_tmpm, _tmpf, _MM_SHUFFLE(0, 3, 0, 1)); _rb = _mm256_permute2f128_ps(_tmpj, _tmpn, _MM_SHUFFLE(0, 3, 0, 1)); _mm256_store_ps(tmpptr, _r0); _mm256_store_ps(tmpptr + 8, _r1); _mm256_store_ps(tmpptr + 8 * 2, _r2); _mm256_store_ps(tmpptr + 8 * 3, _r3); _mm256_store_ps(tmpptr + 8 * 4, _r4); _mm256_store_ps(tmpptr + 8 * 5, _r5); _mm256_store_ps(tmpptr + 8 * 6, _r6); _mm256_store_ps(tmpptr + 8 * 7, _r7); _mm256_store_ps(tmpptr + 8 * 8, _r8); _mm256_store_ps(tmpptr + 8 * 9, _r9); _mm256_store_ps(tmpptr + 8 * 10, _ra); _mm256_store_ps(tmpptr + 8 * 11, _rb); tmpptr += 96; r0 += bottom_blob_tm.cstep * 8; } } for (; i + 7 < tiles; i += 8) { float* tmpptr = tm2.row(i / 12 + (i % 12) / 8); const float* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 8; for (int q = 0; q < inch; q++) { // transpose 8x8 __m256 _r0 = _mm256_load_ps(r0); __m256 _r1 = _mm256_load_ps(r0 + 8); __m256 _r2 = _mm256_load_ps(r0 + 8 * 2); __m256 _r3 = _mm256_load_ps(r0 + 8 * 3); __m256 _r4 = _mm256_load_ps(r0 + 8 * 4); __m256 _r5 = _mm256_load_ps(r0 + 8 * 5); __m256 _r6 = _mm256_load_ps(r0 + 8 * 6); __m256 _r7 = _mm256_load_ps(r0 + 8 * 7); __m256 _tmp0 = _mm256_unpacklo_ps(_r0, _r1); __m256 _tmp1 = _mm256_unpackhi_ps(_r0, _r1); __m256 _tmp2 = _mm256_unpacklo_ps(_r2, _r3); __m256 _tmp3 = _mm256_unpackhi_ps(_r2, _r3); __m256 _tmp4 = _mm256_unpacklo_ps(_r4, _r5); __m256 _tmp5 = _mm256_unpackhi_ps(_r4, _r5); __m256 _tmp6 = _mm256_unpacklo_ps(_r6, _r7); __m256 _tmp7 = _mm256_unpackhi_ps(_r6, _r7); __m256 _tmp8 = _mm256_shuffle_ps(_tmp0, _tmp2, _MM_SHUFFLE(1, 0, 1, 0)); __m256 _tmp9 = _mm256_shuffle_ps(_tmp0, _tmp2, _MM_SHUFFLE(3, 2, 3, 2)); __m256 _tmpa = _mm256_shuffle_ps(_tmp1, _tmp3, _MM_SHUFFLE(1, 0, 1, 0)); __m256 _tmpb = _mm256_shuffle_ps(_tmp1, _tmp3, _MM_SHUFFLE(3, 2, 3, 2)); __m256 _tmpc = _mm256_shuffle_ps(_tmp4, _tmp6, _MM_SHUFFLE(1, 0, 1, 0)); __m256 _tmpd = _mm256_shuffle_ps(_tmp4, _tmp6, _MM_SHUFFLE(3, 2, 3, 2)); __m256 _tmpe = _mm256_shuffle_ps(_tmp5, _tmp7, _MM_SHUFFLE(1, 0, 1, 0)); __m256 _tmpf = _mm256_shuffle_ps(_tmp5, _tmp7, _MM_SHUFFLE(3, 2, 3, 2)); _r0 = _mm256_permute2f128_ps(_tmp8, _tmpc, _MM_SHUFFLE(0, 2, 0, 0)); _r1 = _mm256_permute2f128_ps(_tmp9, _tmpd, _MM_SHUFFLE(0, 2, 0, 0)); _r2 = _mm256_permute2f128_ps(_tmpa, _tmpe, _MM_SHUFFLE(0, 2, 0, 0)); _r3 = _mm256_permute2f128_ps(_tmpb, _tmpf, _MM_SHUFFLE(0, 2, 0, 0)); _r4 = _mm256_permute2f128_ps(_tmp8, _tmpc, _MM_SHUFFLE(0, 3, 0, 1)); _r5 = _mm256_permute2f128_ps(_tmp9, _tmpd, _MM_SHUFFLE(0, 3, 0, 1)); _r6 = _mm256_permute2f128_ps(_tmpa, _tmpe, _MM_SHUFFLE(0, 3, 0, 1)); _r7 = _mm256_permute2f128_ps(_tmpb, _tmpf, _MM_SHUFFLE(0, 3, 0, 1)); _mm256_store_ps(tmpptr, _r0); _mm256_store_ps(tmpptr + 8, _r1); _mm256_store_ps(tmpptr + 8 * 2, _r2); _mm256_store_ps(tmpptr + 8 * 3, _r3); _mm256_store_ps(tmpptr + 8 * 4, _r4); _mm256_store_ps(tmpptr + 8 * 5, _r5); _mm256_store_ps(tmpptr + 8 * 6, _r6); _mm256_store_ps(tmpptr + 8 * 7, _r7); tmpptr += 64; r0 += bottom_blob_tm.cstep * 8; } } for (; i + 3 < tiles; i += 4) { float* tmpptr = tm2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4); const float* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 8; for (int q = 0; q < inch; q++) { // transpose 8x4 __m256 _r0 = _mm256_load_ps(r0); __m256 _r1 = _mm256_load_ps(r0 + 8); __m256 _r2 = _mm256_load_ps(r0 + 8 * 2); __m256 _r3 = _mm256_load_ps(r0 + 8 * 3); __m256 _tmp0 = _mm256_unpacklo_ps(_r0, _r1); __m256 _tmp1 = _mm256_unpackhi_ps(_r0, _r1); __m256 _tmp2 = _mm256_unpacklo_ps(_r2, _r3); __m256 _tmp3 = _mm256_unpackhi_ps(_r2, _r3); __m256 _tmp4 = _mm256_shuffle_ps(_tmp0, _tmp2, _MM_SHUFFLE(1, 0, 1, 0)); __m256 _tmp5 = _mm256_shuffle_ps(_tmp0, _tmp2, _MM_SHUFFLE(3, 2, 3, 2)); __m256 _tmp6 = _mm256_shuffle_ps(_tmp1, _tmp3, _MM_SHUFFLE(1, 0, 1, 0)); __m256 _tmp7 = _mm256_shuffle_ps(_tmp1, _tmp3, _MM_SHUFFLE(3, 2, 3, 2)); _r0 = _mm256_permute2f128_ps(_tmp4, _tmp5, _MM_SHUFFLE(0, 2, 0, 0)); _r1 = _mm256_permute2f128_ps(_tmp6, _tmp7, _MM_SHUFFLE(0, 2, 0, 0)); _r2 = _mm256_permute2f128_ps(_tmp4, _tmp5, _MM_SHUFFLE(0, 3, 0, 1)); _r3 = _mm256_permute2f128_ps(_tmp6, _tmp7, _MM_SHUFFLE(0, 3, 0, 1)); _mm256_store_ps(tmpptr, _r0); _mm256_store_ps(tmpptr + 8, _r1); _mm256_store_ps(tmpptr + 8 * 2, _r2); _mm256_store_ps(tmpptr + 8 * 3, _r3); tmpptr += 32; r0 += bottom_blob_tm.cstep * 8; } } for (; i + 1 < tiles; i += 2) { float* tmpptr = tm2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2); const float* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 8; for (int q = 0; q < inch; q++) { // transpose 8x2 __m256 _r0 = _mm256_load_ps(r0); __m256 _r1 = _mm256_load_ps(r0 + 8); __m256 _tmp0 = _mm256_unpacklo_ps(_r0, _r1); __m256 _tmp1 = _mm256_unpackhi_ps(_r0, _r1); _r0 = _mm256_permute2f128_ps(_tmp0, _tmp1, _MM_SHUFFLE(0, 2, 0, 0)); _r1 = _mm256_permute2f128_ps(_tmp0, _tmp1, _MM_SHUFFLE(0, 3, 0, 1)); _mm256_store_ps(tmpptr, _r0); _mm256_store_ps(tmpptr + 8, _r1); tmpptr += 16; r0 += bottom_blob_tm.cstep * 8; } } for (; i < tiles; i++) { float* tmpptr = tm2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2 + i % 12 % 2); const float* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 8; for (int q = 0; q < inch; q++) { __m256 _val = _mm256_load_ps(r0); _mm256_store_ps(tmpptr, _val); tmpptr += 8; r0 += bottom_blob_tm.cstep * 8; } } } bottom_blob_tm = Mat(); // permute end top_blob_tm.create(tiles, batch, outch, 32u, 8, opt.workspace_allocator); #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { float* output0_tm = top_blob_tm.channel(p); const Mat kernel0_tm = kernel_tm.channel(p); for (int r = 0; r < batch; r++) { const Mat bb2 = bottom_blob_tm2.channel(r); int i = 0; for (; i + 11 < tiles; i += 12) { const float* r0 = bb2.row(i / 12); const float* k0 = kernel0_tm.row(r); int nn = inch * 8; // inch always > 0 __m256 _sum0 = _mm256_setzero_ps(); __m256 _sum1 = _mm256_setzero_ps(); __m256 _sum2 = _mm256_setzero_ps(); __m256 _sum3 = _mm256_setzero_ps(); __m256 _sum4 = _mm256_setzero_ps(); __m256 _sum5 = _mm256_setzero_ps(); __m256 _sum6 = _mm256_setzero_ps(); __m256 _sum7 = _mm256_setzero_ps(); __m256 _sum8 = _mm256_setzero_ps(); __m256 _sum9 = _mm256_setzero_ps(); __m256 _suma = _mm256_setzero_ps(); __m256 _sumb = _mm256_setzero_ps(); for (int j = 0; j < nn; j++) { __m256 _w0 = _mm256_load_ps(k0); __m256 _val0 = _mm256_broadcast_ss(r0); __m256 _val1 = _mm256_broadcast_ss(r0 + 1); _sum0 = _mm256_comp_fmadd_ps(_val0, _w0, _sum0); _sum1 = _mm256_comp_fmadd_ps(_val1, _w0, _sum1); __m256 _val2 = _mm256_broadcast_ss(r0 + 2); __m256 _val3 = _mm256_broadcast_ss(r0 + 3); _sum2 = _mm256_comp_fmadd_ps(_val2, _w0, _sum2); _sum3 = _mm256_comp_fmadd_ps(_val3, _w0, _sum3); __m256 _val4 = _mm256_broadcast_ss(r0 + 4); __m256 _val5 = _mm256_broadcast_ss(r0 + 5); _sum4 = _mm256_comp_fmadd_ps(_val4, _w0, _sum4); _sum5 = _mm256_comp_fmadd_ps(_val5, _w0, _sum5); __m256 _val6 = _mm256_broadcast_ss(r0 + 6); __m256 _val7 = _mm256_broadcast_ss(r0 + 7); _sum6 = _mm256_comp_fmadd_ps(_val6, _w0, _sum6); _sum7 = _mm256_comp_fmadd_ps(_val7, _w0, _sum7); __m256 _val8 = _mm256_broadcast_ss(r0 + 8); __m256 _val9 = _mm256_broadcast_ss(r0 + 9); _sum8 = _mm256_comp_fmadd_ps(_val8, _w0, _sum8); _sum9 = _mm256_comp_fmadd_ps(_val9, _w0, _sum9); __m256 _vala = _mm256_broadcast_ss(r0 + 10); __m256 _valb = _mm256_broadcast_ss(r0 + 11); _suma = _mm256_comp_fmadd_ps(_vala, _w0, _suma); _sumb = _mm256_comp_fmadd_ps(_valb, _w0, _sumb); r0 += 12; k0 += 8; } _mm256_store_ps(output0_tm, _sum0); _mm256_store_ps(output0_tm + 8, _sum1); _mm256_store_ps(output0_tm + 8 * 2, _sum2); _mm256_store_ps(output0_tm + 8 * 3, _sum3); _mm256_store_ps(output0_tm + 8 * 4, _sum4); _mm256_store_ps(output0_tm + 8 * 5, _sum5); _mm256_store_ps(output0_tm + 8 * 6, _sum6); _mm256_store_ps(output0_tm + 8 * 7, _sum7); _mm256_store_ps(output0_tm + 8 * 8, _sum8); _mm256_store_ps(output0_tm + 8 * 9, _sum9); _mm256_store_ps(output0_tm + 8 * 10, _suma); _mm256_store_ps(output0_tm + 8 * 11, _sumb); output0_tm += 8 * 12; } for (; i + 7 < tiles; i += 8) { const float* r0 = bb2.row(i / 12 + (i % 12) / 8); const float* k0 = kernel0_tm.row(r); int nn = inch * 8; // inch always > 0 __m256 _sum0 = _mm256_setzero_ps(); __m256 _sum1 = _mm256_setzero_ps(); __m256 _sum2 = _mm256_setzero_ps(); __m256 _sum3 = _mm256_setzero_ps(); __m256 _sum4 = _mm256_setzero_ps(); __m256 _sum5 = _mm256_setzero_ps(); __m256 _sum6 = _mm256_setzero_ps(); __m256 _sum7 = _mm256_setzero_ps(); for (int j = 0; j < nn; j++) { __m256 _w0 = _mm256_load_ps(k0); __m256 _val0 = _mm256_broadcast_ss(r0); __m256 _val1 = _mm256_broadcast_ss(r0 + 1); _sum0 = _mm256_comp_fmadd_ps(_val0, _w0, _sum0); _sum1 = _mm256_comp_fmadd_ps(_val1, _w0, _sum1); __m256 _val2 = _mm256_broadcast_ss(r0 + 2); __m256 _val3 = _mm256_broadcast_ss(r0 + 3); _sum2 = _mm256_comp_fmadd_ps(_val2, _w0, _sum2); _sum3 = _mm256_comp_fmadd_ps(_val3, _w0, _sum3); __m256 _val4 = _mm256_broadcast_ss(r0 + 4); __m256 _val5 = _mm256_broadcast_ss(r0 + 5); _sum4 = _mm256_comp_fmadd_ps(_val4, _w0, _sum4); _sum5 = _mm256_comp_fmadd_ps(_val5, _w0, _sum5); __m256 _val6 = _mm256_broadcast_ss(r0 + 6); __m256 _val7 = _mm256_broadcast_ss(r0 + 7); _sum6 = _mm256_comp_fmadd_ps(_val6, _w0, _sum6); _sum7 = _mm256_comp_fmadd_ps(_val7, _w0, _sum7); r0 += 8; k0 += 8; } _mm256_store_ps(output0_tm, _sum0); _mm256_store_ps(output0_tm + 8, _sum1); _mm256_store_ps(output0_tm + 8 * 2, _sum2); _mm256_store_ps(output0_tm + 8 * 3, _sum3); _mm256_store_ps(output0_tm + 8 * 4, _sum4); _mm256_store_ps(output0_tm + 8 * 5, _sum5); _mm256_store_ps(output0_tm + 8 * 6, _sum6); _mm256_store_ps(output0_tm + 8 * 7, _sum7); output0_tm += 8 * 8; } for (; i + 3 < tiles; i += 4) { const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4); const float* k0 = kernel0_tm.row(r); int nn = inch * 8; // inch always > 0 __m256 _sum0 = _mm256_setzero_ps(); __m256 _sum1 = _mm256_setzero_ps(); __m256 _sum2 = _mm256_setzero_ps(); __m256 _sum3 = _mm256_setzero_ps(); for (int j = 0; j < nn; j++) { __m256 _w0 = _mm256_load_ps(k0); __m256 _val0 = _mm256_broadcast_ss(r0); __m256 _val1 = _mm256_broadcast_ss(r0 + 1); _sum0 = _mm256_comp_fmadd_ps(_val0, _w0, _sum0); _sum1 = _mm256_comp_fmadd_ps(_val1, _w0, _sum1); __m256 _val2 = _mm256_broadcast_ss(r0 + 2); __m256 _val3 = _mm256_broadcast_ss(r0 + 3); _sum2 = _mm256_comp_fmadd_ps(_val2, _w0, _sum2); _sum3 = _mm256_comp_fmadd_ps(_val3, _w0, _sum3); r0 += 4; k0 += 8; } _mm256_store_ps(output0_tm, _sum0); _mm256_store_ps(output0_tm + 8, _sum1); _mm256_store_ps(output0_tm + 8 * 2, _sum2); _mm256_store_ps(output0_tm + 8 * 3, _sum3); output0_tm += 8 * 4; } for (; i + 1 < tiles; i += 2) { const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2); const float* k0 = kernel0_tm.row(r); int nn = inch * 8; // inch always > 0 __m256 _sum0 = _mm256_setzero_ps(); __m256 _sum1 = _mm256_setzero_ps(); for (int j = 0; j < nn; j++) { __m256 _w0 = _mm256_load_ps(k0); __m256 _val0 = _mm256_broadcast_ss(r0); __m256 _val1 = _mm256_broadcast_ss(r0 + 1); _sum0 = _mm256_comp_fmadd_ps(_val0, _w0, _sum0); _sum1 = _mm256_comp_fmadd_ps(_val1, _w0, _sum1); r0 += 2; k0 += 8; } _mm256_store_ps(output0_tm, _sum0); _mm256_store_ps(output0_tm + 8, _sum1); output0_tm += 8 * 2; } for (; i < tiles; i++) { const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2 + i % 12 % 2); const float* k0 = kernel0_tm.row(r); int nn = inch * 8; // inch always > 0 __m256 _sum0 = _mm256_setzero_ps(); for (int j = 0; j < nn; j++) { __m256 _w0 = _mm256_load_ps(k0); __m256 _val0 = _mm256_broadcast_ss(r0); _sum0 = _mm256_comp_fmadd_ps(_val0, _w0, _sum0); r0 += 1; k0 += 8; } _mm256_store_ps(output0_tm, _sum0); output0_tm += 8; } } } }
DRB021-reductionmissing-orig-yes.c
/* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov, schordan1@llnl.gov, karlin1@llnl.gov) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https://github.com/LLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* A kernel with two level parallelizable loop with reduction: if reduction(+:sum) is missing, there is race condition. Data race pairs: we allow multiple pairs to preserve the pattern. sum@70:7 vs. sum@70:7 sum@70:7 vs. sum@70:13 */ #include <stdio.h> int main(int argc, char* argv[]) { int i,j; float temp, sum=0.0; int len=100; float u[100][100]; #pragma omp parallel for private(i, j) for (i = 0; i < len; i++) #pragma omp parallel for private(j) for (j = 0; j < len; j++) u[i][j] = 0.5; #pragma omp parallel for private (temp, j) reduction(+:sum) collapse(2) for (i = 0; i < len; i++) for (j = 0; j < len; j++) { temp = u[i][j]; sum = sum + temp * temp; } printf ("sum = %f\n", sum); return 0; }
test.c
#include <stdio.h> #include <omp.h> #include <math.h> #include "../utilities/check.h" #include "../utilities/utilities.h" int main(void) { check_offloading(); double A = 2.0; float B = 2.0; #pragma omp target map(A,B) { A = sqrt(pow(A, B)) + pow(A, 4); } printf("%lf\n",A - pow(2, 4)); return 0; }
test.c
#include <stdio.h> #include <omp.h> #include "../utilities/check.h" #include "../utilities/utilities.h" #define MAX_N 25000 #define TRIALS (1) #define N (1024*3) #define INIT() INIT_LOOP(N, {C[i] = 1; D[i] = i; E[i] = -i+1;}) #define ZERO(X) ZERO_ARRAY(N, X) #define DUMP_SUCCESS6() { \ if (cpuExec) { \ DUMP_SUCCESS(3*6); \ } \ } void reset_input(double *a, double *a_h, double *b, double *c) { for(int i = 0 ; i < MAX_N ; i++) { a[i] = a_h[i] = i; b[i] = i*2; c[i] = i-3; } } // // FIXME: // Add support for 'shared', 'lastprivate' // int main(void) { check_offloading(); double A[N], B[N], C[N], D[N], E[N]; double S[N]; double p[2]; int cpuExec = 0; #pragma omp target map(tofrom: cpuExec) { cpuExec = omp_is_initial_device(); } int max_teams = 256; int gpu_threads = 256; int cpu_threads = 32; int max_threads = cpuExec ? cpu_threads : gpu_threads; INIT(); // // Test: proc_bind clause // #undef TARGET_TDPARALLEL_FOR_SIMD_CLAUSES #define TARGET_TDPARALLEL_FOR_SIMD_CLAUSES proc_bind(master) #include "ttdpfs_defines.h" for (int tms = 1; tms <= max_teams; tms *= 3) { for (int t = 1; t <= max_threads; t+=78) { int threads[1]; threads[0] = t; int num_teams = cpuExec? 1 : tms; TARGET_TDPARALLEL_FOR_SIMD1( { S[0] = 0; for (int i = 0; i < N; i++) { A[i] = B[i] = 0; } }, for (int i = 0; i < N; i++) { \ A[i] += C[i] + D[i]; \ B[i] += D[i] + E[i]; \ }, { double tmp = 0; for (int i = 0; i < N; i++) { tmp += A[i] + B[i] - 1; } S[0] += tmp; }, VERIFY(0, 1, S[0], (double) N/2*(N+1))) TARGET_TDPARALLEL_FOR_SIMD2( { S[0] = 0; for (int i = 0; i < N; i++) { A[i] = B[i] = 0; } }, for (int i = 0; i < N; i++) { \ A[i] += C[i] + D[i]; \ B[i] += D[i] + E[i]; \ }, { double tmp = 0; for (int i = 0; i < N; i++) { tmp += A[i] + B[i] - 1; } S[0] += tmp; }, VERIFY(0, 1, S[0], (double) N/2*(N+1))) TARGET_TDPARALLEL_FOR_SIMD3( { S[0] = 0; for (int i = 0; i < N; i++) { A[i] = B[i] = 0; } }, for (int i = 0; i < N; i++) { \ A[i] += C[i] + D[i]; \ B[i] += D[i] + E[i]; \ }, { double tmp = 0; for (int i = 0; i < N; i++) { tmp += A[i] + B[i] - 1; } S[0] += tmp; }, VERIFY(0, 1, S[0], (double) N/2*(N+1))) TARGET_TDPARALLEL_FOR_SIMD4( { S[0] = 0; for (int i = 0; i < N; i++) { A[i] = B[i] = 0; } }, for (int i = 0; i < N; i++) { \ A[i] += C[i] + D[i]; \ B[i] += D[i] + E[i]; \ }, { double tmp = 0; for (int i = 0; i < N; i++) { tmp += A[i] + B[i] - 1; } S[0] += tmp; }, VERIFY(0, 1, S[0], (double) N/2*(N+1))) TARGET_TDPARALLEL_FOR_SIMD5( { S[0] = 0; for (int i = 0; i < N; i++) { A[i] = B[i] = 0; } }, for (int i = 0; i < N; i++) { \ A[i] += C[i] + D[i]; \ B[i] += D[i] + E[i]; \ }, { double tmp = 0; for (int i = 0; i < N; i++) { tmp += A[i] + B[i] - 1; } S[0] += tmp; }, VERIFY(0, 1, S[0], (double) N/2*(N+1))) TARGET_TDPARALLEL_FOR_SIMD6( { S[0] = 0; for (int i = 0; i < N; i++) { A[i] = B[i] = 0; } }, for (int i = 0; i < N; i++) { \ A[i] += C[i] + D[i]; \ B[i] += D[i] + E[i]; \ }, { double tmp = 0; for (int i = 0; i < N; i++) { tmp += A[i] + B[i] - 1; } S[0] += tmp; }, VERIFY(0, 1, S[0], (double) N/2*(N+1))) } DUMP_SUCCESS6() } #undef TARGET_TDPARALLEL_FOR_SIMD_CLAUSES #define TARGET_TDPARALLEL_FOR_SIMD_CLAUSES proc_bind(close) #include "ttdpfs_defines.h" for (int tms = 1; tms <= max_teams; tms *= 3) { for (int t = 1; t <= max_threads; t+=78) { int threads[1]; threads[0] = t; int num_teams = cpuExec? 1 : tms; TARGET_TDPARALLEL_FOR_SIMD1( { S[0] = 0; for (int i = 0; i < N; i++) { A[i] = B[i] = 0; } }, for (int i = 0; i < N; i++) { \ A[i] += C[i] + D[i]; \ B[i] += D[i] + E[i]; \ }, { double tmp = 0; for (int i = 0; i < N; i++) { tmp += A[i] + B[i] - 1; } S[0] += tmp; }, VERIFY(0, 1, S[0], (double) N/2*(N+1))) TARGET_TDPARALLEL_FOR_SIMD2( { S[0] = 0; for (int i = 0; i < N; i++) { A[i] = B[i] = 0; } }, for (int i = 0; i < N; i++) { \ A[i] += C[i] + D[i]; \ B[i] += D[i] + E[i]; \ }, { double tmp = 0; for (int i = 0; i < N; i++) { tmp += A[i] + B[i] - 1; } S[0] += tmp; }, VERIFY(0, 1, S[0], (double) N/2*(N+1))) TARGET_TDPARALLEL_FOR_SIMD3( { S[0] = 0; for (int i = 0; i < N; i++) { A[i] = B[i] = 0; } }, for (int i = 0; i < N; i++) { \ A[i] += C[i] + D[i]; \ B[i] += D[i] + E[i]; \ }, { double tmp = 0; for (int i = 0; i < N; i++) { tmp += A[i] + B[i] - 1; } S[0] += tmp; }, VERIFY(0, 1, S[0], (double) N/2*(N+1))) TARGET_TDPARALLEL_FOR_SIMD4( { S[0] = 0; for (int i = 0; i < N; i++) { A[i] = B[i] = 0; } }, for (int i = 0; i < N; i++) { \ A[i] += C[i] + D[i]; \ B[i] += D[i] + E[i]; \ }, { double tmp = 0; for (int i = 0; i < N; i++) { tmp += A[i] + B[i] - 1; } S[0] += tmp; }, VERIFY(0, 1, S[0], (double) N/2*(N+1))) TARGET_TDPARALLEL_FOR_SIMD5( { S[0] = 0; for (int i = 0; i < N; i++) { A[i] = B[i] = 0; } }, for (int i = 0; i < N; i++) { \ A[i] += C[i] + D[i]; \ B[i] += D[i] + E[i]; \ }, { double tmp = 0; for (int i = 0; i < N; i++) { tmp += A[i] + B[i] - 1; } S[0] += tmp; }, VERIFY(0, 1, S[0], (double) N/2*(N+1))) TARGET_TDPARALLEL_FOR_SIMD6( { S[0] = 0; for (int i = 0; i < N; i++) { A[i] = B[i] = 0; } }, for (int i = 0; i < N; i++) { \ A[i] += C[i] + D[i]; \ B[i] += D[i] + E[i]; \ }, { double tmp = 0; for (int i = 0; i < N; i++) { tmp += A[i] + B[i] - 1; } S[0] += tmp; }, VERIFY(0, 1, S[0], (double) N/2*(N+1))) } DUMP_SUCCESS6() } #undef TARGET_TDPARALLEL_FOR_SIMD_CLAUSES #define TARGET_TDPARALLEL_FOR_SIMD_CLAUSES proc_bind(spread) #include "ttdpfs_defines.h" for (int tms = 1; tms <= max_teams; tms *= 3) { for (int t = 1; t <= max_threads; t+=78) { int threads[1]; threads[0] = t; int num_teams = cpuExec? 1 : tms; TARGET_TDPARALLEL_FOR_SIMD1( { S[0] = 0; for (int i = 0; i < N; i++) { A[i] = B[i] = 0; } }, for (int i = 0; i < N; i++) { \ A[i] += C[i] + D[i]; \ B[i] += D[i] + E[i]; \ }, { double tmp = 0; for (int i = 0; i < N; i++) { tmp += A[i] + B[i] - 1; } S[0] += tmp; }, VERIFY(0, 1, S[0], (double) N/2*(N+1))) TARGET_TDPARALLEL_FOR_SIMD2( { S[0] = 0; for (int i = 0; i < N; i++) { A[i] = B[i] = 0; } }, for (int i = 0; i < N; i++) { \ A[i] += C[i] + D[i]; \ B[i] += D[i] + E[i]; \ }, { double tmp = 0; for (int i = 0; i < N; i++) { tmp += A[i] + B[i] - 1; } S[0] += tmp; }, VERIFY(0, 1, S[0], (double) N/2*(N+1))) TARGET_TDPARALLEL_FOR_SIMD3( { S[0] = 0; for (int i = 0; i < N; i++) { A[i] = B[i] = 0; } }, for (int i = 0; i < N; i++) { \ A[i] += C[i] + D[i]; \ B[i] += D[i] + E[i]; \ }, { double tmp = 0; for (int i = 0; i < N; i++) { tmp += A[i] + B[i] - 1; } S[0] += tmp; }, VERIFY(0, 1, S[0], (double) N/2*(N+1))) TARGET_TDPARALLEL_FOR_SIMD4( { S[0] = 0; for (int i = 0; i < N; i++) { A[i] = B[i] = 0; } }, for (int i = 0; i < N; i++) { \ A[i] += C[i] + D[i]; \ B[i] += D[i] + E[i]; \ }, { double tmp = 0; for (int i = 0; i < N; i++) { tmp += A[i] + B[i] - 1; } S[0] += tmp; }, VERIFY(0, 1, S[0], (double) N/2*(N+1))) TARGET_TDPARALLEL_FOR_SIMD5( { S[0] = 0; for (int i = 0; i < N; i++) { A[i] = B[i] = 0; } }, for (int i = 0; i < N; i++) { \ A[i] += C[i] + D[i]; \ B[i] += D[i] + E[i]; \ }, { double tmp = 0; for (int i = 0; i < N; i++) { tmp += A[i] + B[i] - 1; } S[0] += tmp; }, VERIFY(0, 1, S[0], (double) N/2*(N+1))) TARGET_TDPARALLEL_FOR_SIMD6( { S[0] = 0; for (int i = 0; i < N; i++) { A[i] = B[i] = 0; } }, for (int i = 0; i < N; i++) { \ A[i] += C[i] + D[i]; \ B[i] += D[i] + E[i]; \ }, { double tmp = 0; for (int i = 0; i < N; i++) { tmp += A[i] + B[i] - 1; } S[0] += tmp; }, VERIFY(0, 1, S[0], (double) N/2*(N+1))) } DUMP_SUCCESS6() } // // Test: private, shared clauses on omp target teams distribute parallel for simd. // #undef TARGET_TDPARALLEL_FOR_SIMD_CLAUSES #define TARGET_TDPARALLEL_FOR_SIMD_CLAUSES private(p,q) shared(A,B,C,D,E) #include "ttdpfs_defines.h" // FIXME: shared(a) where 'a' is an implicitly mapped scalar does not work. // FIXME: shared(A) private(A) does not generate correct results. for (int tms = 1; tms <= max_teams; tms *= 3) { for (int t = 1; t <= max_threads; t+=78) { int threads[1]; threads[0] = t; int num_teams = cpuExec? 1 : tms; TARGET_TDPARALLEL_FOR_SIMD1( double p = 2; \ double q = 4; \ S[0] = 0; \ for (int i = 0; i < N; i++) { \ A[i] = B[i] = 0; \ } , for (int i = 0; i < N; i++) { \ p = C[i] + D[i]; \ q = D[i] + E[i]; \ A[i] += p; \ B[i] += q; \ }, { double tmp = p + q; for (int i = 0; i < N; i++) { tmp += A[i] + B[i] - 1; } S[0] += tmp; }, VERIFY(0, 1, S[0], (double) 6 + N/2*(N+1))) TARGET_TDPARALLEL_FOR_SIMD2( double p = 2; \ double q = 4; \ S[0] = 0; \ for (int i = 0; i < N; i++) { \ A[i] = B[i] = 0; \ } , for (int i = 0; i < N; i++) { \ p = C[i] + D[i]; \ q = D[i] + E[i]; \ A[i] += p; \ B[i] += q; \ }, { double tmp = p + q; for (int i = 0; i < N; i++) { tmp += A[i] + B[i] - 1; } S[0] += tmp; }, VERIFY(0, 1, S[0], (double) 6 + N/2*(N+1))) TARGET_TDPARALLEL_FOR_SIMD3( double p = 2; \ double q = 4; \ S[0] = 0; \ for (int i = 0; i < N; i++) { \ A[i] = B[i] = 0; \ } , for (int i = 0; i < N; i++) { \ p = C[i] + D[i]; \ q = D[i] + E[i]; \ A[i] += p; \ B[i] += q; \ }, { double tmp = p + q; for (int i = 0; i < N; i++) { tmp += A[i] + B[i] - 1; } S[0] += tmp; }, VERIFY(0, 1, S[0], (double) 6 + N/2*(N+1))) TARGET_TDPARALLEL_FOR_SIMD4( double p = 2; \ double q = 4; \ S[0] = 0; \ for (int i = 0; i < N; i++) { \ A[i] = B[i] = 0; \ } , for (int i = 0; i < N; i++) { \ p = C[i] + D[i]; \ q = D[i] + E[i]; \ A[i] += p; \ B[i] += q; \ }, { double tmp = p + q; for (int i = 0; i < N; i++) { tmp += A[i] + B[i] - 1; } S[0] += tmp; }, VERIFY(0, 1, S[0], (double) 6 + N/2*(N+1))) TARGET_TDPARALLEL_FOR_SIMD5( double p = 2; \ double q = 4; \ S[0] = 0; \ for (int i = 0; i < N; i++) { \ A[i] = B[i] = 0; \ } , for (int i = 0; i < N; i++) { \ p = C[i] + D[i]; \ q = D[i] + E[i]; \ A[i] += p; \ B[i] += q; \ }, { double tmp = p + q; for (int i = 0; i < N; i++) { tmp += A[i] + B[i] - 1; } S[0] += tmp; }, VERIFY(0, 1, S[0], (double) 6 + N/2*(N+1))) TARGET_TDPARALLEL_FOR_SIMD6( double p = 2; \ double q = 4; \ S[0] = 0; \ for (int i = 0; i < N; i++) { \ A[i] = B[i] = 0; \ } , for (int i = 0; i < N; i++) { \ p = C[i] + D[i]; \ q = D[i] + E[i]; \ A[i] += p; \ B[i] += q; \ }, { double tmp = p + q; for (int i = 0; i < N; i++) { tmp += A[i] + B[i] - 1; } S[0] += tmp; }, VERIFY(0, 1, S[0], (double) 6 + N/2*(N+1))) } DUMP_SUCCESS6() } // // Test: firstprivate clause on omp target teams distribute parallel for simd. // #undef TARGET_TDPARALLEL_FOR_SIMD_CLAUSES #define TARGET_TDPARALLEL_FOR_SIMD_CLAUSES firstprivate(p,q) #include "ttdpfs_defines.h" for (int tms = 1; tms <= max_teams; tms *= 3) { for (int t = 1; t <= max_threads; t+=78) { int threads[1]; threads[0] = t; int num_teams = cpuExec? 1 : tms; TARGET_TDPARALLEL_FOR_SIMD1( double p = -4; \ double q = 4; \ S[0] = 0; \ for (int i = 0; i < N; i++) { \ A[i] = B[i] = 0; \ } , for (int i = 0; i < N; i++) { \ A[i] += C[i] + D[i] + p; \ B[i] += D[i] + E[i] + q; \ if (i == N-1) { \ p += 6; \ q += 9; \ } \ }, { double tmp = p + q; for (int i = 0; i < N; i++) { tmp += A[i] + B[i] - 1; } S[0] += tmp; }, VERIFY(0, 1, S[0], (double) N/2*(N+1))) TARGET_TDPARALLEL_FOR_SIMD2( double p = -4; \ double q = 4; \ S[0] = 0; \ for (int i = 0; i < N; i++) { \ A[i] = B[i] = 0; \ } , for (int i = 0; i < N; i++) { \ A[i] += C[i] + D[i] + p; \ B[i] += D[i] + E[i] + q; \ if (i == N-1) { \ p += 6; \ q += 9; \ } \ }, { double tmp = p + q; for (int i = 0; i < N; i++) { tmp += A[i] + B[i] - 1; } S[0] += tmp; }, VERIFY(0, 1, S[0], (double) N/2*(N+1))) TARGET_TDPARALLEL_FOR_SIMD3( double p = -4; \ double q = 4; \ S[0] = 0; \ for (int i = 0; i < N; i++) { \ A[i] = B[i] = 0; \ } , for (int i = 0; i < N; i++) { \ A[i] += C[i] + D[i] + p; \ B[i] += D[i] + E[i] + q; \ if (i == N-1) { \ p += 6; \ q += 9; \ } \ }, { double tmp = p + q; for (int i = 0; i < N; i++) { tmp += A[i] + B[i] - 1; } S[0] += tmp; }, VERIFY(0, 1, S[0], (double) N/2*(N+1))) TARGET_TDPARALLEL_FOR_SIMD4( double p = -4; \ double q = 4; \ S[0] = 0; \ for (int i = 0; i < N; i++) { \ A[i] = B[i] = 0; \ } , for (int i = 0; i < N; i++) { \ A[i] += C[i] + D[i] + p; \ B[i] += D[i] + E[i] + q; \ if (i == N-1) { \ p += 6; \ q += 9; \ } \ }, { double tmp = p + q; for (int i = 0; i < N; i++) { tmp += A[i] + B[i] - 1; } S[0] += tmp; }, VERIFY(0, 1, S[0], (double) N/2*(N+1))) TARGET_TDPARALLEL_FOR_SIMD5( double p = -4; \ double q = 4; \ S[0] = 0; \ for (int i = 0; i < N; i++) { \ A[i] = B[i] = 0; \ } , for (int i = 0; i < N; i++) { \ A[i] += C[i] + D[i] + p; \ B[i] += D[i] + E[i] + q; \ if (i == N-1) { \ p += 6; \ q += 9; \ } \ }, { double tmp = p + q; for (int i = 0; i < N; i++) { tmp += A[i] + B[i] - 1; } S[0] += tmp; }, VERIFY(0, 1, S[0], (double) N/2*(N+1))) TARGET_TDPARALLEL_FOR_SIMD6( double p = -4; \ double q = 4; \ S[0] = 0; \ for (int i = 0; i < N; i++) { \ A[i] = B[i] = 0; \ } , for (int i = 0; i < N; i++) { \ A[i] += C[i] + D[i] + p; \ B[i] += D[i] + E[i] + q; \ if (i == N-1) { \ p += 6; \ q += 9; \ } \ }, { double tmp = p + q; for (int i = 0; i < N; i++) { tmp += A[i] + B[i] - 1; } S[0] += tmp; }, VERIFY(0, 1, S[0], (double) N/2*(N+1))) } DUMP_SUCCESS6() } #if 0 FIXME // // Test: lastprivate clause on omp target teams distribute parallel for simd. // #undef TARGET_TDPARALLEL_FOR_SIMD_CLAUSES #define TARGET_TDPARALLEL_FOR_SIMD_CLAUSES lastprivate(q) #include "ttdpfs_defines.h" // FIXME: modify to t=1 and in ttdpfs_defines.h to use host after bug fix. // FIXME: variable is not private. for (int tms = 1; tms <= max_teams; tms *= 3) { for (int t = 0; t <= max_threads; t++) { int threads[1]; threads[0] = t; int num_teams = tms; TARGET_TDPARALLEL_FOR_SIMD1( double p[1]; \ double q[1]; \ S[0] = 0; \ for (int i = 0; i < N; i++) { \ A[i] = B[i] = 0; \ } , for (int i = 0; i < N; i++) { \ p[0] = C[i] + D[i]; \ q[0] = D[i] + E[i]; \ A[i] = p[0]; \ B[i] = q[0]; \ }, { double tmp = p[0] + q[0]; for (int i = 0; i < N; i++) { tmp += A[i] + B[i] - 1; } S[0] += tmp; }, VERIFY(0, 1, S[0], (double) N+1+ N/2*(N+1))) } FIXME: private of non-scalar does not work. // // Test: private clause on omp parallel for. // #undef PARALLEL_FOR_CLAUSES #define PARALLEL_FOR_CLAUSES private(p) #include "ttdpfs_defines.h" for (int t = 0; t <= 224; t++) { int threads[1]; threads[0] = t; PARALLEL_FOR( p[0] = 2; p[1] = 4; \ S[0] = 0; \ for (int i = 0; i < N; i++) { \ A[i] = B[i] = 0; \ } , for (int i = 0; i < N; i++) { \ p[0] = C[i] + D[i]; \ p[1] = D[i] + E[i]; \ A[i] += p[0]; \ B[i] += p[1]; \ } , { double tmp = p[0] + p[1]; for (int i = 0; i < N; i++) { tmp += A[i] + B[i]; } S[0] += tmp; }, VERIFY(0, 1, S[0], (double) 6 + SUMS * (N/2*(N+1)))) } FIXME: private of non-scalar does not work. // // Test: firstprivate clause on omp parallel for. // #undef PARALLEL_FOR_CLAUSES #define PARALLEL_FOR_CLAUSES firstprivate(p) #include "ttdpfs_defines.h" for (int t = 0; t <= 224; t++) { int threads[1]; threads[0] = t; PARALLEL_FOR( p[0] = -4; p[1] = 4; \ S[0] = 0; \ for (int i = 0; i < N; i++) { \ A[i] = B[i] = 0; \ } , for (int i = 0; i < N; i++) { \ A[i] += C[i] + D[i] + p[0]; \ B[i] += D[i] + E[i] + p[1]; \ if (i == N-1) { \ p[0] += 6; \ p[1] += 9; \ } \ } , { double tmp = p[0] + p[1]; for (int i = 0; i < N; i++) { tmp += A[i] + B[i]; } S[0] += tmp; }, VERIFY(0, 1, S[0], (double) SUMS * (N/2*(N+1)))) } } #endif // // Test: collapse clause on omp target teams distribute parallel for simd. // #undef TARGET_TDPARALLEL_FOR_SIMD_CLAUSES #define TARGET_TDPARALLEL_FOR_SIMD_CLAUSES collapse(2) #include "ttdpfs_defines.h" for (int tms = 1; tms <= max_teams; tms *= 3) { for (int t = 1; t <= max_threads; t+=78) { int threads[1]; threads[0] = t; int num_teams = cpuExec? 1 : tms; TARGET_TDPARALLEL_FOR_SIMD1( S[0] = 0; \ for (int i = 0; i < N; i++) { \ A[i] = B[i] = 0; \ } , for (int i = 0; i < 1024; i++) { \ for (int j = 0; j < 3; j++) { \ A[i*3+j] += C[i*3+j] + D[i*3+j]; \ B[i*3+j] += D[i*3+j] + E[i*3+j]; \ } \ } , { double tmp = 0; for (int i = 0; i < N; i++) { tmp += A[i] + B[i] - 1; } S[0] += tmp; }, VERIFY(0, 1, S[0], (double) (N/2*(N+1)))) TARGET_TDPARALLEL_FOR_SIMD2( S[0] = 0; \ for (int i = 0; i < N; i++) { \ A[i] = B[i] = 0; \ } , for (int i = 0; i < 1024; i++) { \ for (int j = 0; j < 3; j++) { \ A[i*3+j] += C[i*3+j] + D[i*3+j]; \ B[i*3+j] += D[i*3+j] + E[i*3+j]; \ } \ } , { double tmp = 0; for (int i = 0; i < N; i++) { tmp += A[i] + B[i] - 1; } S[0] += tmp; }, VERIFY(0, 1, S[0], (double) (N/2*(N+1)))) TARGET_TDPARALLEL_FOR_SIMD3( S[0] = 0; \ for (int i = 0; i < N; i++) { \ A[i] = B[i] = 0; \ } , for (int i = 0; i < 1024; i++) { \ for (int j = 0; j < 3; j++) { \ A[i*3+j] += C[i*3+j] + D[i*3+j]; \ B[i*3+j] += D[i*3+j] + E[i*3+j]; \ } \ } , { double tmp = 0; for (int i = 0; i < N; i++) { tmp += A[i] + B[i] - 1; } S[0] += tmp; }, VERIFY(0, 1, S[0], (double) (N/2*(N+1)))) TARGET_TDPARALLEL_FOR_SIMD4( S[0] = 0; \ for (int i = 0; i < N; i++) { \ A[i] = B[i] = 0; \ } , for (int i = 0; i < 1024; i++) { \ for (int j = 0; j < 3; j++) { \ A[i*3+j] += C[i*3+j] + D[i*3+j]; \ B[i*3+j] += D[i*3+j] + E[i*3+j]; \ } \ } , { double tmp = 0; for (int i = 0; i < N; i++) { tmp += A[i] + B[i] - 1; } S[0] += tmp; }, VERIFY(0, 1, S[0], (double) (N/2*(N+1)))) TARGET_TDPARALLEL_FOR_SIMD5( S[0] = 0; \ for (int i = 0; i < N; i++) { \ A[i] = B[i] = 0; \ } , for (int i = 0; i < 1024; i++) { \ for (int j = 0; j < 3; j++) { \ A[i*3+j] += C[i*3+j] + D[i*3+j]; \ B[i*3+j] += D[i*3+j] + E[i*3+j]; \ } \ } , { double tmp = 0; for (int i = 0; i < N; i++) { tmp += A[i] + B[i] - 1; } S[0] += tmp; }, VERIFY(0, 1, S[0], (double) (N/2*(N+1)))) TARGET_TDPARALLEL_FOR_SIMD6( S[0] = 0; \ for (int i = 0; i < N; i++) { \ A[i] = B[i] = 0; \ } , for (int i = 0; i < 1024; i++) { \ for (int j = 0; j < 3; j++) { \ A[i*3+j] += C[i*3+j] + D[i*3+j]; \ B[i*3+j] += D[i*3+j] + E[i*3+j]; \ } \ } , { double tmp = 0; for (int i = 0; i < N; i++) { tmp += A[i] + B[i] - 1; } S[0] += tmp; }, VERIFY(0, 1, S[0], (double) (N/2*(N+1)))) } DUMP_SUCCESS6() } double * a = (double *) malloc(MAX_N * sizeof(double)); double * a_h = (double *) malloc(MAX_N * sizeof(double)); double * b = (double *) malloc(MAX_N * sizeof(double)); double * c = (double *) malloc(MAX_N * sizeof(double)); #pragma omp target enter data map(to:a[:MAX_N],b[:MAX_N],c[:MAX_N]) // 1. no schedule clauses printf("no schedule clauses\n"); for (int n = 32 ; n < MAX_N ; n+=5000) { reset_input(a, a_h, b, c); #pragma omp target update to(a[:n],b[:n],c[:n]) int t = 0; for (int tms = 1 ; tms <= 256 ; tms *= 2) { // 8 times for (int ths = 32 ; ths <= 1024 ; ths *= 2) { // 6 times t++; #pragma omp target teams distribute parallel for simd num_teams(tms) thread_limit(ths) for (int i = 0; i < n; ++i) { a[i] += b[i] + c[i]; } } // loop over 'ths' } // loop over 'tms' // check results for each 'n' for (int times = 0 ; times < t ; times++) for (int i = 0; i < n; ++i) a_h[i] += b[i] + c[i]; #pragma omp target update from(a[:n]) for (int i = 0; i < n; ++i) { if (a_h[i] != a[i]) { printf("Error at n = %d, i = %d: host = %lf, device = %lf\n", n, i, a_h[i], a[i]); return 1; } } } // loop over 'n' printf("Succeeded\n"); // 2. schedule static no chunk printf("schedule static no chunk\n"); for (int n = 32 ; n < MAX_N ; n+=5000) { reset_input(a, a_h, b, c); #pragma omp target update to(a[:n],b[:n],c[:n]) int t = 0; for (int tms = 1 ; tms <= 256 ; tms *= 2) { // 8 times for (int ths = 32 ; ths <= 1024 ; ths *= 2) { // 6 times t++; #pragma omp target teams distribute parallel for simd schedule(static) num_teams(tms) thread_limit(ths) for (int i = 0; i < n; ++i) { a[i] += b[i] + c[i]; } } // loop over 'ths' } // loop over 'tms' // check results for each 'n' for (int times = 0 ; times < t ; times++) for (int i = 0; i < n; ++i) a_h[i] += b[i] + c[i]; #pragma omp target update from(a[:n]) for (int i = 0; i < n; ++i) { if (a_h[i] != a[i]) { printf("Error at n = %d, i = %d: host = %lf, device = %lf\n", n, i, a_h[i], a[i]); return 1; } } } // loop over 'n' printf("Succeeded\n"); // 3. schedule static chunk printf("schedule static chunk\n"); for (int n = 32 ; n < MAX_N ; n+=5000) { int t = 0; reset_input(a, a_h, b, c); #pragma omp target update to(a[:n],b[:n],c[:n]) for (int tms = 1 ; tms <= 256 ; tms *= 2) { // 8 times for (int ths = 32 ; ths <= 1024 ; ths *= 2) { // 6 times for(int sch = 1 ; sch <= n ; sch *= 3000) { t++; #pragma omp target teams distribute parallel for simd schedule(static,sch) num_teams(tms) thread_limit(ths) for (int i = 0; i < n; ++i) { a[i] += b[i] + c[i]; } } // loop 'sch' } // loop 'ths' } // loop 'tms' // check results for each 'n' for (int times = 0 ; times < t ; times++) for (int i = 0; i < n; ++i) a_h[i] += b[i] + c[i]; #pragma omp target update from(a[:n]) for (int i = 0; i < n; ++i) { if (a_h[i] != a[i]) { printf("Error at n = %d, i = %d: host = %lf, device = %lf\n", n, i, a_h[i], a[i]); return 1; } } } // loop 'n' printf("Succeeded\n"); // 4. schedule dynamic no chunk (debugging) printf("schedule dynamic no chunk\n"); for (int n = 32 ; n < MAX_N ; n+=5000) { int t = 0; reset_input(a, a_h, b, c); #pragma omp target update to(a[:n],b[:n],c[:n]) for (int tms = 1 ; tms <= 256 ; tms *= 2) { // 8 times for (int ths = 32 ; ths <= 1024 ; ths *= 2) { // 6 times t++; #pragma omp target teams distribute parallel for simd schedule(dynamic) num_teams(tms) thread_limit(ths) for (int i = 0; i < n; ++i) { a[i] += b[i] + c[i]; } } // loop 'ths' } // loop 'tms' // check results for each 'n' for (int times = 0 ; times < t ; times++) for (int i = 0; i < n; ++i) a_h[i] += b[i] + c[i]; #pragma omp target update from(a[:n]) for (int i = 0; i < n; ++i) { if (a_h[i] != a[i]) { printf("Error at n = %d, i = %d: host = %lf, device = %lf\n", n, i, a_h[i], a[i]); return 1; } } } // loop 'n' printf("Succeeded\n"); // 5. schedule dynamic chunk (debugging) printf("schedule dynamic chunk\n"); for (int n = 32 ; n < MAX_N ; n+=5000) { int t = 0; reset_input(a, a_h, b, c); #pragma omp target update to(a[:n],b[:n],c[:n]) for (int tms = 1 ; tms <= 256 ; tms *= 2) { // 8 times for (int ths = 32 ; ths <= 1024 ; ths *= 2) { // 6 times for(int sch = 1 ; sch <= n ; sch *= 1200) { t++; #pragma omp target teams distribute parallel for simd schedule(dynamic, sch) num_teams(tms) thread_limit(ths) for (int i = 0; i < n; ++i) { a[i] += b[i] + c[i]; } } // loop 'sch' } // loop 'ths' } // loop 'tms' // check results for each 'n' for (int times = 0 ; times < t ; times++) for (int i = 0; i < n; ++i) a_h[i] += b[i] + c[i]; #pragma omp target update from(a[:n]) for (int i = 0; i < n; ++i) { if (a_h[i] != a[i]) { printf("Error at n = %d, i = %d: host = %lf, device = %lf\n", n, i, a_h[i], a[i]); return 1; } } } // loop 'n' printf("Succeeded\n"); // 6. dist_schedule static no chunk printf("dist_schedule static no chunk\n"); for (int n = 32 ; n < MAX_N ; n+=5000) { reset_input(a, a_h, b, c); #pragma omp target update to(a[:n],b[:n],c[:n]) int t = 0; for (int tms = 1 ; tms <= 256 ; tms *= 2) { // 8 times for (int ths = 32 ; ths <= 1024 ; ths *= 2) { // 6 times t++; #pragma omp target teams distribute parallel for simd dist_schedule(static) num_teams(tms) thread_limit(ths) for (int i = 0; i < n; ++i) { a[i] += b[i] + c[i]; } } } // check results for each 'n' for (int times = 0 ; times < t ; times++) for (int i = 0; i < n; ++i) a_h[i] += b[i] + c[i]; #pragma omp target update from(a[:n]) for (int i = 0; i < n; ++i) { if (a_h[i] != a[i]) { printf("Error at n = %d, i = %d: host = %lf, device = %lf\n", n, i, a_h[i], a[i]); return 1; } } } // loop over 'n' printf("Succeeded\n"); // 7. dist_schedule static chunk printf("dist_schedule static chunk\n"); for (int n = 32 ; n < MAX_N ; n+=5000) { int t = 0; reset_input(a, a_h, b, c); #pragma omp target update to(a[:n],b[:n],c[:n]) for (int tms = 1 ; tms <= 256 ; tms *= 2) { // 8 times for (int ths = 32 ; ths <= 1024 ; ths *= 2) { // 6 times for(int sch = 128 ; sch <= n ; sch *= 10000) { t++; #pragma omp target teams distribute parallel for simd dist_schedule(static,sch) num_teams(tms) thread_limit(ths) for (int i = 0; i < n; ++i) { a[i] += b[i] + c[i]; } } // loop 'sch' } // loop 'ths' } // loop 'tms' // check results for each 'n' for (int times = 0 ; times < t ; times++) for (int i = 0; i < n; ++i) a_h[i] += b[i] + c[i]; #pragma omp target update from(a[:n]) for (int i = 0; i < n; ++i) { if (a_h[i] != a[i]) { printf("Error at n = %d, i = %d: host = %lf, device = %lf\n", n, i, a_h[i], a[i]); return 1; } } } // loop 'n' printf("Succeeded\n"); // 8. dist_schedule static no chunk, schedule static no chunk printf("dist_schedule static no chunk, schedule static no chunk\n"); for (int n = 32 ; n < MAX_N ; n+=5000) { reset_input(a, a_h, b, c); #pragma omp target update to(a[:n],b[:n],c[:n]) int t = 0; for (int tms = 1 ; tms <= 256 ; tms *= 2) { // 8 times for (int ths = 32 ; ths <= 1024 ; ths *= 2) { // 6 times t++; #pragma omp target teams distribute parallel for simd dist_schedule(static) schedule(static) num_teams(tms) thread_limit(ths) for (int i = 0; i < n; ++i) { a[i] += b[i] + c[i]; } } } // check results for each 'n' for (int times = 0 ; times < t ; times++) for (int i = 0; i < n; ++i) a_h[i] += b[i] + c[i]; #pragma omp target update from(a[:n]) for (int i = 0; i < n; ++i) { if (a_h[i] != a[i]) { printf("Error at n = %d, i = %d: host = %lf, device = %lf\n", n, i, a_h[i], a[i]); return 1; } } } // loop over 'n' printf("Succeeded\n"); // 9. dist_schedule static no chunk, schedule static chunk printf("dist_schedule static no chunk, schedule static chunk\n"); for (int n = 32 ; n < MAX_N ; n+=5000) { int t = 0; reset_input(a, a_h, b, c); #pragma omp target update to(a[:n],b[:n],c[:n]) for (int tms = 1 ; tms <= 256 ; tms *= 2) { // 8 times for (int ths = 32 ; ths <= 1024 ; ths *= 2) { // 6 times for(int sch = 1 ; sch <= n ; sch *= 1000) { // speed up very slow tests t++; #pragma omp target teams distribute parallel for simd dist_schedule(static) schedule(static,sch) num_teams(tms) thread_limit(ths) for (int i = 0; i < n; ++i) { a[i] += b[i] + c[i]; } } // loop 'sch' } // loop 'ths' } // loop 'tms' // check results for each 'n' for (int times = 0 ; times < t ; times++) for (int i = 0; i < n; ++i) a_h[i] += b[i] + c[i]; #pragma omp target update from(a[:n]) for (int i = 0; i < n; ++i) { if (a_h[i] != a[i]) { printf("Error at n = %d, i = %d: host = %lf, device = %lf\n", n, i, a_h[i], a[i]); return 1; } } } // loop 'n' printf("Succeeded\n"); // 10. dist_schedule static chunk, schedule static no chunk printf("dist_schedule static chunk, schedule static no chunk\n"); for (int n = 32 ; n < MAX_N ; n+=5000) { int t = 0; reset_input(a, a_h, b, c); #pragma omp target update to(a[:n],b[:n],c[:n]) for (int tms = 1 ; tms <= 256 ; tms *= 2) { // 8 times for (int ths = 32 ; ths <= 1024 ; ths *= 2) { // 6 times for(int sch = 128 ; sch <= n ; sch *= 1200) { t++; #pragma omp target teams distribute parallel for simd dist_schedule(static,sch) schedule(static) num_teams(tms) thread_limit(ths) for (int i = 0; i < n; ++i) { a[i] += b[i] + c[i]; } } // loop 'sch' } // loop 'ths' } // loop 'tms' // check results for each 'n' for (int times = 0 ; times < t ; times++) for (int i = 0; i < n; ++i) a_h[i] += b[i] + c[i]; #pragma omp target update from(a[:n]) for (int i = 0; i < n; ++i) { if (a_h[i] != a[i]) { printf("Error at n = %d, i = %d: host = %lf, device = %lf\n", n, i, a_h[i], a[i]); return 1; } } } // loop 'n' printf("Succeeded\n"); // 11. dist_schedule static chunk, schedule static chunk printf("dist_schedule static chunk, schedule static chunk\n"); for (int n = 32 ; n < MAX_N ; n+=5000) { int t = 0; reset_input(a, a_h, b, c); #pragma omp target update to(a[:n],b[:n],c[:n]) for (int tms = 1 ; tms <= 256 ; tms *= 2) { // 8 times for (int ths = 32 ; ths <= 1024 ; ths *= 2) { // 6 times for(int dssch = 128 ; dssch <= n ; dssch *= 1200) { for(int sch = 100 ; sch <= n ; sch *= 3000) { t++; #pragma omp target teams distribute parallel for simd dist_schedule(static,dssch) schedule(static,sch) num_teams(tms) thread_limit(ths) for (int i = 0; i < n; ++i) { a[i] += b[i] + c[i]; } } // loop 'sch' } // loop 'dssch' } // loop 'ths' } // loop 'tms' // check results for each 'n' for (int times = 0 ; times < t ; times++) for (int i = 0; i < n; ++i) a_h[i] += b[i] + c[i]; #pragma omp target update from(a[:n]) for (int i = 0; i < n; ++i) { if (a_h[i] != a[i]) { printf("Error at n = %d, i = %d: host = %lf, device = %lf\n", n, i, a_h[i], a[i]); return 1; } } } // loop 'n' printf("Succeeded\n"); // 12. dist_schedule static chunk, schedule dynamic no chunk printf("dist_schedule static chunk, schedule dynamic no chunk\n"); for (int n = 32 ; n < MAX_N ; n+=5000) { int t = 0; reset_input(a, a_h, b, c); #pragma omp target update to(a[:n],b[:n],c[:n]) for (int tms = 1 ; tms <= 256 ; tms *= 2) { // 8 times for (int ths = 32 ; ths <= 1024 ; ths *= 2) { // 6 times for(int sch = 128 ; sch <= n ; sch *= 3000) { t++; #pragma omp target teams distribute parallel for simd dist_schedule(static,sch) schedule(dynamic) num_teams(tms) thread_limit(ths) for (int i = 0; i < n; ++i) { a[i] += b[i] + c[i]; } } // loop 'sch' } // loop 'ths' } // loop 'tms' // check results for each 'n' for (int times = 0 ; times < t ; times++) for (int i = 0; i < n; ++i) a_h[i] += b[i] + c[i]; #pragma omp target update from(a[:n]) for (int i = 0; i < n; ++i) { if (a_h[i] != a[i]) { printf("Error at n = %d, i = %d: host = %lf, device = %lf\n", n, i, a_h[i], a[i]); return 1; } } } // loop 'n' printf("Succeeded\n"); // 13. dist_schedule static chunk, schedule dynamic chunk printf("dist_schedule static chunk, schedule dynamic chunk\n"); for (int n = 32 ; n < MAX_N ; n+=5000) { int t = 0; reset_input(a, a_h, b, c); #pragma omp target update to(a[:n],b[:n],c[:n]) for (int tms = 1 ; tms <= 256 ; tms *= 2) { // 8 times for (int ths = 32 ; ths <= 1024 ; ths *= 2) { // 6 times for(int dssch = 128 ; dssch <= n ; dssch *= 3000) { for(int sch = 1000 ; sch <= n ; sch *= 3000) { t++; #pragma omp target teams distribute parallel for simd dist_schedule(static,dssch) schedule(dynamic,sch) num_teams(tms) thread_limit(ths) for (int i = 0; i < n; ++i) { a[i] += b[i] + c[i]; } } // loop 'sch' } // loop 'dssch' } // loop 'ths' } // loop 'tms' // check results for each 'n' for (int times = 0 ; times < t ; times++) for (int i = 0; i < n; ++i) a_h[i] += b[i] + c[i]; #pragma omp target update from(a[:n]) for (int i = 0; i < n; ++i) { if (a_h[i] != a[i]) { printf("Error at n = %d, i = %d: host = %lf, device = %lf\n", n, i, a_h[i], a[i]); return 1; } } } // loop 'n' printf("Succeeded\n"); // 14. dist_schedule static chunk, schedule guided no chunk printf("dist_schedule static chunk, schedule guided no chunk\n"); for (int n = 32 ; n < MAX_N ; n+=5000) { int t = 0; reset_input(a, a_h, b, c); #pragma omp target update to(a[:n],b[:n],c[:n]) for (int tms = 1 ; tms <= 256 ; tms *= 2) { // 8 times for (int ths = 32 ; ths <= 1024 ; ths *= 2) { // 6 times for(int sch = 1000 ; sch <= n ; sch *= 3000) { t++; #pragma omp target teams distribute parallel for simd dist_schedule(static,sch) schedule(guided) num_teams(tms) thread_limit(ths) for (int i = 0; i < n; ++i) { a[i] += b[i] + c[i]; } } // loop 'sch' } // loop 'ths' } // loop 'tms' // check results for each 'n' for (int times = 0 ; times < t ; times++) for (int i = 0; i < n; ++i) a_h[i] += b[i] + c[i]; #pragma omp target update from(a[:n]) for (int i = 0; i < n; ++i) { if (a_h[i] != a[i]) { printf("Error at n = %d, i = %d: host = %lf, device = %lf\n", n, i, a_h[i], a[i]); return 1; } } } // loop 'n' printf("Succeeded\n"); // 15. dist_schedule static chunk, schedule guided chunk printf("dist_schedule static chunk, schedule guided chunk\n"); for (int n = 32 ; n < MAX_N ; n+=5000) { int t = 0; reset_input(a, a_h, b, c); #pragma omp target update to(a[:n],b[:n],c[:n]) for (int tms = 1 ; tms <= 256 ; tms *= 2) { // 8 times for (int ths = 32 ; ths <= 1024 ; ths *= 2) { // 6 times for(int dssch = 1000 ; dssch <= n ; dssch *= 3000) { for(int sch = 1000 ; sch <= n ; sch *= 3000) { t++; #pragma omp target teams distribute parallel for simd dist_schedule(static,dssch) schedule(guided,sch) num_teams(tms) thread_limit(ths) for (int i = 0; i < n; ++i) { a[i] += b[i] + c[i]; } } // loop 'sch' } // loop 'dssch' } // loop 'ths' } // loop 'tms' // check results for each 'n' for (int times = 0 ; times < t ; times++) for (int i = 0; i < n; ++i) a_h[i] += b[i] + c[i]; #pragma omp target update from(a[:n]) for (int i = 0; i < n; ++i) { if (a_h[i] != a[i]) { printf("Error at n = %d, i = %d: host = %lf, device = %lf\n", n, i, a_h[i], a[i]); return 1; } } } // loop 'n' printf("Succeeded\n"); // 16. dist_schedule static chunk, schedule auto printf("dist_schedule static chunk, schedule auto\n"); for (int n = 32 ; n < MAX_N ; n+=5000) { int t = 0; reset_input(a, a_h, b, c); #pragma omp target update to(a[:n],b[:n],c[:n]) for (int tms = 1 ; tms <= 256 ; tms *= 2) { // 8 times for (int ths = 32 ; ths <= 1024 ; ths *= 2) { // 6 times for(int sch = 1000 ; sch <= n ; sch *= 3000) { t++; #pragma omp target teams distribute parallel for simd dist_schedule(static,sch) schedule(auto) num_teams(tms) thread_limit(ths) for (int i = 0; i < n; ++i) { a[i] += b[i] + c[i]; } } // loop 'sch' } // loop 'ths' } // loop 'tms' // check results for each 'n' for (int times = 0 ; times < t ; times++) for (int i = 0; i < n; ++i) a_h[i] += b[i] + c[i]; #pragma omp target update from(a[:n]) for (int i = 0; i < n; ++i) { if (a_h[i] != a[i]) { printf("Error at n = %d, i = %d: host = %lf, device = %lf\n", n, i, a_h[i], a[i]); return 1; } } } // loop 'n' printf("Succeeded\n"); // 17. dist_schedule static chunk, schedule runtime printf("dist_schedule static chunk, schedule runtime\n"); for (int n = 32 ; n < MAX_N ; n+=5000) { int t = 0; reset_input(a, a_h, b, c); #pragma omp target update to(a[:n],b[:n],c[:n]) for (int tms = 1 ; tms <= 256 ; tms *= 2) { // 8 times for (int ths = 32 ; ths <= 1024 ; ths *= 2) { // 6 times for(int sch = 1000 ; sch <= n ; sch *= 3000) { t++; #pragma omp target teams distribute parallel for simd dist_schedule(static,sch) schedule(runtime) num_teams(tms) thread_limit(ths) for (int i = 0; i < n; ++i) { a[i] += b[i] + c[i]; } } // loop 'sch' } // loop 'ths' } // loop 'tms' // check results for each 'n' for (int times = 0 ; times < t ; times++) for (int i = 0; i < n; ++i) a_h[i] += b[i] + c[i]; #pragma omp target update from(a[:n]) for (int i = 0; i < n; ++i) { if (a_h[i] != a[i]) { printf("Error at n = %d, i = %d: host = %lf, device = %lf\n", n, i, a_h[i], a[i]); return 1; } } } // loop 'n' printf("Succeeded\n"); #pragma omp target exit data map(release:a[:MAX_N],b[:MAX_N],c[:MAX_N]) return 0; }
ten_tusscher_3_EPI_RS_CPU.c
#include "model_common.h" #include <assert.h> #include <stdlib.h> #include "ten_tusscher_3_RS.h" #define EPI GET_CELL_MODEL_DATA(init_cell_model_data) { assert(cell_model); if(get_initial_v) cell_model->initial_v = INITIAL_V; if(get_neq) cell_model->number_of_ode_equations = NEQ; } SET_ODE_INITIAL_CONDITIONS_CPU(set_model_initial_conditions_cpu) { sv[0] = -86.2f; // V; millivolt sv[1] = 0.0f; //M sv[2] = 0.75; //H sv[3] = 0.75; //J sv[4] = 0.0f; //Xr1 sv[5] = 0.0f; //Xs sv[6] = 1.0f; //S sv[7] = 1.0f; //F sv[8] = 1.0f; //F2 sv[9] = 0.0; //D_INF sv[10] = 0.0; //R_INF sv[11] = 0.0; //Xr2_INF} } SOLVE_MODEL_ODES_CPU(solve_model_odes_cpu) { uint32_t sv_id; real *fibrosis; // Default values for a healthy cell /////////// real atpi = 6.8f; real Ko = 5.4f; real Ki = 138.3f; real Vm_change = 0.0; real GNa_multiplicator = 1.0f; real GCa_multiplicator = 1.0f; //////////////////////////////////// int num_extra_parameters = 6; size_t extra_parameters_size = num_extra_parameters*sizeof(real); if(extra_data) { fibrosis = ((real*)extra_data) + num_extra_parameters; //pointer } else { extra_data = malloc(extra_parameters_size); ((real*)extra_data)[0] = atpi; ((real*)extra_data)[1] = Ko; ((real*)extra_data)[2] = Ki; ((real*)extra_data)[3] = Vm_change; ((real*)extra_data)[4] = GNa_multiplicator; ((real*)extra_data)[5] = GCa_multiplicator; fibrosis = calloc(num_cells_to_solve, sizeof(real)); } int i; #pragma omp parallel for private(sv_id) for (i = 0; i < num_cells_to_solve; i++) { if(cells_to_solve) sv_id = cells_to_solve[i]; else sv_id = i; for (int j = 0; j < num_steps; ++j) { solve_model_ode_cpu(dt, sv + (sv_id * NEQ), stim_currents[i], fibrosis[i], extra_data); } } if(extra_data == NULL) free(fibrosis); } void solve_model_ode_cpu(real dt, real *sv, real stim_current, real fibrosis, real *extra_parameters) { assert(sv); real rY[NEQ], rDY[NEQ]; for(int i = 0; i < NEQ; i++) rY[i] = sv[i]; RHS_cpu(rY, rDY, stim_current, dt, fibrosis, extra_parameters); //THIS MODEL USES THE Rush Larsen Method TO SOLVE THE EDOS sv[0] = dt*rDY[0] + rY[0]; sv[1] = rDY[1]; sv[2] = rDY[2]; sv[3] = rDY[3]; sv[4] = rDY[4]; sv[5] = rDY[5]; sv[6] = rDY[6]; sv[7] = rDY[7]; sv[8] = rDY[8]; sv[9] = rDY[9]; sv[10] = rDY[10]; sv[11] = rDY[11]; } void RHS_cpu(const real *sv, real *rDY_, real stim_current, real dt, real fibrosis, real *extra_parameters) { //fibrosis = 0 means that the cell is fibrotic, 1 is not fibrotic. Anything between 0 and 1 means border zone const real svolt = sv[0]; //printf("%lf, %lf, %lf, %lf, %lf\n", atpi, Ko, Ki_multiplicator, acidosis, fibrosis); real atpi = extra_parameters[0]; real Ko = extra_parameters[1]; real Ki = extra_parameters[2]; real Vm_modifier = extra_parameters[3]; real GNa_multiplicator = extra_parameters[4]; real GCa_multiplicator = extra_parameters[5]; Vm_modifier = Vm_modifier - Vm_modifier*fibrosis; //These values are from In Electrophysiologic effects of acute myocardial ischemia: a theoretical //study of altered cell excitability and action potential duration real svolt_acid = svolt - Vm_modifier; const real sh =sv[2]; const real sm =sv[1]; const real sj =sv[3]; const real sxr1 =sv[4]; const real sxs =sv[5]; const real ss =sv[6]; const real sf =sv[7]; const real sf2 =sv[8]; const real D_INF =sv[9]; const real Xr2_INF =sv[10]; const real R_INF =sv[11]; const real natp = 0.24; // K dependence of ATP-sensitive K current const real nicholsarea = 0.00005; // Nichol's areas (cm^2) const real hatp = 2; // Hill coefficient //Extracellular potassium concentration was elevated //from its default value of 5.4 mM to values between 6.0 and 8.0 mM //Ref: A Comparison of Two Models of Human Ventricular Tissue: Simulated Ischemia and Re-entry real Ko_change = 5.4f - Ko; Ko = Ko + Ko_change*fibrosis; //Linear changing of atpi depending on the fibrosis and distance from the center of the scar (only for border zone cells) real atpi_change = 6.8f - atpi; atpi = atpi + atpi_change*fibrosis; //real katp = 0.306; //Ref: A Comparison of Two Models of Human Ventricular Tissue: Simulated Ischaemia and Re-entry const real katp = -0.0942857142857f*atpi + 0.683142857143f; const real patp = 1.0f/(1.0f + powf((atpi/katp),hatp)); const real gkatp = 0.000195f/nicholsarea; const real gkbaratp = gkatp*patp*powf((Ko/4),natp); const real katp2= 1.4; const real hatp2 = 2.6; const real pcal = 1.0f/(1.0f + powf((katp2/atpi),hatp2)); const real Cao=2.0; const real Nao=140.0; const real Cai=0.00007; const real Nai=7.67; //This paramter changes with acidosis. //In Electrophysiologic effects of acute myocardial ischemia: a theoretical //study of altered cell excitability and action potential duration //the authors change Ki by multiplying it to 0.863259669. Should we do the same here? //This changes are based on data from rat and guinea pig real Ki_change = 138.3f - Ki; Ki = Ki + Ki_change*fibrosis; real GNa_multiplicator_change = 1.0f - GNa_multiplicator; GNa_multiplicator = GNa_multiplicator + GNa_multiplicator_change*fibrosis; real GCa_multiplicator_change = 1.0f - GCa_multiplicator; GCa_multiplicator = GCa_multiplicator + GCa_multiplicator_change*fibrosis; //printf("Ki = %lf\n", Ki); //Constants const real R = 8314.472; const real F = 96485.3415f; const real T = 310.0; const real RTONF = (R*T)/F; //Parameters for currents //Parameters for IKr const real Gkr=0.101; //Parameters for Iks const real pKNa=0.03; #ifdef EPI const real Gks=0.257; #endif #ifdef ENDO const real Gks=0.392; #endif #ifdef MCELL const real Gks=0.098; #endif //Parameters for Ik1 const real GK1=5.405; //Parameters for Ito #ifdef EPI const real Gto=0.294; #endif #ifdef ENDO const real Gto=0.073; #endif #ifdef MCELL const real Gto=0.294; #endif //Parameters for INa //if acidosis this has to change to 0.75*GNa real GNa=14.838; GNa = GNa*GNa_multiplicator; //Parameters for IbNa const real GbNa=0.00029; //Parameters for INaK const real KmK=1.0; const real KmNa=40.0; const real knak=2.724; //Parameters for ICaL //if acidosis this has to change to 0.75*GCaL real GCaL=0.2786f*pcal; GCaL = GCaL*GCa_multiplicator; //Parameters for IbCa const real GbCa=0.000592; //Parameters for INaCa const real knaca=1000; const real KmNai=87.5; const real KmCa=1.38; const real ksat=0.1; const real n=0.35; //Parameters for IpCa const real GpCa=0.1238; const real KpCa=0.0005; //Parameters for IpK; const real GpK=0.0293; const real Ek=RTONF*(logf((Ko/Ki))); const real Ena=RTONF*(logf((Nao/Nai))); const real Eks=RTONF*(logf((Ko+pKNa*Nao)/(Ki+pKNa*Nai))); const real Eca=0.5f*RTONF*(logf((Cao/Cai))); real IKr; real IKs; real IK1; real Ito; real INa; real IbNa; real ICaL; real IbCa; real INaCa; real IpCa; real IpK; real INaK; real IKatp; real Ak1; real Bk1; real rec_iK1; real rec_ipK; real rec_iNaK; real AM; real BM; real AH_1; real BH_1; real AH_2; real BH_2; real AJ_1; real BJ_1; real AJ_2; real BJ_2; real M_INF; real H_INF; real J_INF; real TAU_M; real TAU_H; real TAU_J; real axr1; real bxr1; real Xr1_INF; real Xr2_INF_new; real TAU_Xr1; real Axs; real Bxs; real Xs_INF; real TAU_Xs; real R_INF_new; real S_INF; real TAU_S; real Af; real Bf; real Cf; real Af2; real Bf2; real Cf2; real D_INF_new; real TAU_F; real F_INF; real TAU_F2; real F2_INF; real sItot; //Needed to compute currents Ak1=0.1f/(1.0f+expf(0.06f*(svolt-Ek-200.0f))); Bk1=(3.0f*expf(0.0002f*(svolt-Ek+100.0f))+ expf(0.1f*(svolt-Ek-10.0f)))/(1.0f+expf(-0.5f*(svolt-Ek))); rec_iK1=Ak1/(Ak1+Bk1); rec_iNaK=(1.0f/(1.0f+0.1245f*expf(-0.1f*svolt_acid*F/(R*T))+0.0353f*expf(-svolt_acid*F/(R*T)))); rec_ipK=1.0f/(1.0f+expf((25.0f-svolt)/5.98f)); //According to In Electrophysiologic effects of acute myocardial ischemia: a theoretical //study of altered cell excitability and action potential duration //Vm_acid = Vm -3.4 for all sodium current computation //Compute currents INa=GNa*sm*sm*sm*sh*sj*(svolt_acid-Ena); ICaL=GCaL*D_INF*sf*sf2*(svolt-60); Ito=Gto*R_INF*ss*(svolt-Ek); IKr=Gkr*sqrtf(Ko/5.4f)*sxr1*Xr2_INF*(svolt-Ek); IKs=Gks*sxs*sxs*(svolt-Eks); IK1=GK1*rec_iK1*(svolt-Ek); INaCa=knaca*(1.0f/(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1.0f/(KmCa+Cao))* (1.0f/(1.0f+ksat*expf((n-1.0f)*svolt_acid*F/(R*T))))* (expf(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao- expf((n-1.0f)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5f); INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK; IpCa=GpCa*Cai/(KpCa+Cai); IpK=GpK*rec_ipK*(svolt-Ek); IbNa=GbNa*(svolt_acid-Ena); IbCa=GbCa*(svolt-Eca); IKatp = gkbaratp*(svolt-Ek); //Determine total current (sItot) = IKr + IKs + IK1 + Ito + INa + IbNa + ICaL + IbCa + INaK + INaCa + IpCa + IpK + IKatp + stim_current; //compute steady state values and time constants AM=1.0f/(1.0f+expf((-60.0f-svolt)/5.0f)); BM=0.1f/(1.0f+expf((svolt+35.0f)/5.0f))+0.10f/(1.0f+expf((svolt-50.0f)/200.0f)); TAU_M=AM*BM; M_INF=1.0f/((1.0f+expf((-56.86f-svolt)/9.03f))*(1.0f+expf((-56.86f-svolt)/9.03f))); if (svolt>=-40.) { AH_1=0.0f; BH_1=(0.77f/(0.13f*(1.0f+expf(-(svolt+10.66f)/11.1f)))); TAU_H= 1.0f/(AH_1+BH_1); } else { AH_2=(0.057f*expf(-(svolt+80.0f)/6.8f)); BH_2=(2.7f*expf(0.079f*svolt)+(3.1e5f)*expf(0.3485f*svolt)); TAU_H=1.0f/(AH_2+BH_2); } H_INF=1.0f/((1.0f+expf((svolt+71.55f)/7.43f))*(1.0f+expf((svolt+71.55f)/7.43f))); if(svolt>=-40.0f) { AJ_1=0.0f; BJ_1=(0.6f*expf((0.057f)*svolt)/(1.0f+expf(-0.1f*(svolt+32.0f)))); TAU_J= 1.0f/(AJ_1+BJ_1); } else { AJ_2=(((-2.5428e4f)*expf(0.2444f*svolt)-(6.948e-6f)*expf(-0.04391f*svolt))*(svolt+37.78f)/ (1.0f+expf(0.311f*(svolt+79.23f)))); BJ_2=(0.02424f*expf(-0.01052f*svolt)/(1.0f+expf(-0.1378f*(svolt+40.14f)))); TAU_J= 1.0f/(AJ_2+BJ_2); } J_INF=H_INF; Xr1_INF=1.0f/(1.0f+expf((-26.0f-svolt)/7.0f)); axr1=450.0f/(1.0f+expf((-45.0f-svolt)/10.0f)); bxr1=6.0f/(1.0f+expf((svolt-(-30.0f))/11.5f)); TAU_Xr1=axr1*bxr1; Xr2_INF_new=1.0f/(1.0f+expf((svolt-(-88.0f))/24.0f)); Xs_INF=1.0f/(1.0f+expf((-5.0f-svolt)/14.0f)); Axs=(1400.0f/(sqrtf(1.0f+expf((5.0f-svolt)/6.0f)))); Bxs=(1.0f/(1.0f+expf((svolt-35.0f)/15.0f))); TAU_Xs=Axs*Bxs+80; #ifdef EPI R_INF_new=1./(1.+expf((20-svolt)/6.)); S_INF=1./(1.+expf((svolt+20)/5.)); TAU_S=85.*expf(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+expf((svolt-20.)/5.))+3.; #endif #ifdef ENDO R_INF_new=1.0f/(1.0f+expf((20.0f-svolt)/6.0f)); S_INF=1.0f/(1.0f+expf((svolt+28.0f)/5.0f)); TAU_S=1000.0f*expf(-(svolt+67.0f)*(svolt+67.0f)/1000.0f)+8.0f; #endif #ifdef MCELL R_INF_new=1./(1.+expf((20-svolt)/6.)); S_INF=1./(1.+expf((svolt+20)/5.)); TAU_S=85.*expf(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+expf((svolt-20.)/5.))+3.; #endif D_INF_new=1.0f/(1.0f+expf((-8.0f-svolt)/7.5f)); F_INF=1.0f/(1.0f+expf((svolt+20)/7)); Af=1102.5f*expf(-(svolt+27)*(svolt+27.0f)/225.0f); Bf=200.0f/(1.0f+expf((13.0f-svolt)/10.f)); Cf=(180.0f/(1.0f+expf((svolt+30.0f)/10.0f)))+20.0f; TAU_F=Af+Bf+Cf; F2_INF=0.67f/(1.0f+expf((svolt+35.0f)/7.0f))+0.33f; Af2=600.0f*expf(-(svolt+27.0f)*(svolt+27.0f)/170.0f); Bf2=7.75f/(1.0f+expf((25.0f-svolt)/10.0f)); Cf2=16.0f/(1.0f+expf((svolt+30.0f)/10.0f)); TAU_F2=Af2+Bf2+Cf2; //update voltage rDY_[0] = -sItot; //Update gates rDY_[1] = M_INF-(M_INF-sm)*expf(-dt/TAU_M); rDY_[2] = H_INF-(H_INF-sh)*expf(-dt/TAU_H); rDY_[3] = J_INF-(J_INF-sj)*expf(-dt/TAU_J); rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*expf(-dt/TAU_Xr1); rDY_[5] = Xs_INF-(Xs_INF-sxs)*expf(-dt/TAU_Xs); rDY_[6]= S_INF-(S_INF-ss)*expf(-dt/TAU_S); rDY_[7] =F_INF-(F_INF-sf)*expf(-dt/TAU_F); rDY_[8] =F2_INF-(F2_INF-sf2)*expf(-dt/TAU_F2); rDY_[9] = D_INF_new; rDY_[10] = R_INF_new; rDY_[11] = Xr2_INF_new; }
single_value.c
#include <stdio.h> #include <omp.h> int main() { int x = 0; #pragma omp parallel { int y = x; printf("Y: %d\n", y); if(omp_get_thread_num() == 0) { x = 42; } } printf("X: %d\n", x); }
cgeswp.c
/** * * @file * * PLASMA is a software package provided by: * University of Tennessee, US, * University of Manchester, UK. * * @generated from /home/luszczek/workspace/plasma/bitbucket/plasma/compute/zgeswp.c, normal z -> c, Fri Sep 28 17:38:06 2018 * **/ #include "plasma.h" #include "plasma_async.h" #include "plasma_context.h" #include "plasma_descriptor.h" #include "plasma_internal.h" #include "plasma_tuning.h" #include "plasma_types.h" /******************************************************************************/ int plasma_cgeswp(plasma_enum_t colrow, int m, int n, plasma_complex32_t *pA, int lda, int *ipiv, int incx) { // Get PLASMA context. plasma_context_t *plasma = plasma_context_self(); if (plasma == NULL) { plasma_error("PLASMA not initialized"); return PlasmaErrorNotInitialized; } // Check input arguments. if ((colrow != PlasmaColumnwise) && (colrow != PlasmaRowwise)) { plasma_error("illegal value of colrow"); return -1; } if (m < 0) { plasma_error("illegal value of m"); return -2; } if (n < 0) { plasma_error("illegal value of n"); return -3; } if (lda < imax(1, m)) { plasma_error("illegal value of lda"); return -5; } // quick return if (imin(n, m) == 0) return PlasmaSuccess; // Tune parameters. if (plasma->tuning) plasma_tune_geswp(plasma, PlasmaComplexFloat, m, n); // Set tiling parameters. int nb = plasma->nb; // Create tile matrices. plasma_desc_t A; int retval; retval = plasma_desc_general_create(PlasmaComplexFloat, nb, nb, m, n, 0, 0, m, n, &A); if (retval != PlasmaSuccess) { plasma_error("plasma_general_desc_create() failed"); return retval; } // Initialize sequence. plasma_sequence_t sequence; retval = plasma_sequence_init(&sequence); // Initialize request. plasma_request_t request; retval = plasma_request_init(&request); // asynchronous block #pragma omp parallel #pragma omp master { // Translate to tile layout. plasma_omp_cge2desc(pA, lda, A, &sequence, &request); // Call tile async function. plasma_omp_cgeswp(colrow, A, ipiv, incx, &sequence, &request); // Translate back to LAPACK layout. plasma_omp_cdesc2ge(A, pA, lda, &sequence, &request); } // implicit synchronization // Free matrices in tile layout. plasma_desc_destroy(&A); // Return status. int status = sequence.status; return status; } /******************************************************************************/ void plasma_omp_cgeswp(plasma_enum_t colrow, plasma_desc_t A, int *ipiv, int incx, plasma_sequence_t *sequence, plasma_request_t *request) { // Get PLASMA context. plasma_context_t *plasma = plasma_context_self(); if (plasma == NULL) { plasma_error("PLASMA not initialized"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } // Check input arguments. if ((colrow != PlasmaColumnwise) && (colrow != PlasmaRowwise)) { plasma_error("illegal value of colrow"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (plasma_desc_check(A) != PlasmaSuccess) { plasma_error("invalid A"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (sequence == NULL) { plasma_error("NULL sequence"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (request == NULL) { plasma_error("NULL request"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } // quick return if (imin(A.m, A.n) == 0) return; // Call the parallel function. plasma_pcgeswp(colrow, A, ipiv, incx, sequence, request); }
OpenMPClause.h
//===- OpenMPClause.h - Classes for OpenMP clauses --------------*- C++ -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // /// \file /// This file defines OpenMP AST classes for clauses. /// There are clauses for executable directives, clauses for declarative /// directives and clauses which can be used in both kinds of directives. // //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_AST_OPENMPCLAUSE_H #define LLVM_CLANG_AST_OPENMPCLAUSE_H #include "clang/AST/ASTFwd.h" #include "clang/AST/Decl.h" #include "clang/AST/DeclarationName.h" #include "clang/AST/Expr.h" #include "clang/AST/NestedNameSpecifier.h" #include "clang/AST/Stmt.h" #include "clang/AST/StmtIterator.h" #include "clang/Basic/LLVM.h" #include "clang/Basic/OpenMPKinds.h" #include "clang/Basic/SourceLocation.h" #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/MapVector.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/iterator.h" #include "llvm/ADT/iterator_range.h" #include "llvm/Frontend/OpenMP/OMPConstants.h" #include "llvm/Frontend/OpenMP/OMPContext.h" #include "llvm/Support/Casting.h" #include "llvm/Support/Compiler.h" #include "llvm/Support/TrailingObjects.h" #include <cassert> #include <cstddef> #include <iterator> #include <utility> namespace clang { class ASTContext; //===----------------------------------------------------------------------===// // AST classes for clauses. //===----------------------------------------------------------------------===// /// This is a basic class for representing single OpenMP clause. class OMPClause { /// Starting location of the clause (the clause keyword). SourceLocation StartLoc; /// Ending location of the clause. SourceLocation EndLoc; /// Kind of the clause. OpenMPClauseKind Kind; protected: OMPClause(OpenMPClauseKind K, SourceLocation StartLoc, SourceLocation EndLoc) : StartLoc(StartLoc), EndLoc(EndLoc), Kind(K) {} public: /// Returns the starting location of the clause. SourceLocation getBeginLoc() const { return StartLoc; } /// Returns the ending location of the clause. SourceLocation getEndLoc() const { return EndLoc; } /// Sets the starting location of the clause. void setLocStart(SourceLocation Loc) { StartLoc = Loc; } /// Sets the ending location of the clause. void setLocEnd(SourceLocation Loc) { EndLoc = Loc; } /// Returns kind of OpenMP clause (private, shared, reduction, etc.). OpenMPClauseKind getClauseKind() const { return Kind; } bool isImplicit() const { return StartLoc.isInvalid(); } using child_iterator = StmtIterator; using const_child_iterator = ConstStmtIterator; using child_range = llvm::iterator_range<child_iterator>; using const_child_range = llvm::iterator_range<const_child_iterator>; child_range children(); const_child_range children() const { auto Children = const_cast<OMPClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } /// Get the iterator range for the expressions used in the clauses. Used /// expressions include only the children that must be evaluated at the /// runtime before entering the construct. child_range used_children(); const_child_range used_children() const { auto Children = const_cast<OMPClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } static bool classof(const OMPClause *) { return true; } }; /// Class that handles pre-initialization statement for some clauses, like /// 'shedule', 'firstprivate' etc. class OMPClauseWithPreInit { friend class OMPClauseReader; /// Pre-initialization statement for the clause. Stmt *PreInit = nullptr; /// Region that captures the associated stmt. OpenMPDirectiveKind CaptureRegion = llvm::omp::OMPD_unknown; protected: OMPClauseWithPreInit(const OMPClause *This) { assert(get(This) && "get is not tuned for pre-init."); } /// Set pre-initialization statement for the clause. void setPreInitStmt(Stmt *S, OpenMPDirectiveKind ThisRegion = llvm::omp::OMPD_unknown) { PreInit = S; CaptureRegion = ThisRegion; } public: /// Get pre-initialization statement for the clause. const Stmt *getPreInitStmt() const { return PreInit; } /// Get pre-initialization statement for the clause. Stmt *getPreInitStmt() { return PreInit; } /// Get capture region for the stmt in the clause. OpenMPDirectiveKind getCaptureRegion() const { return CaptureRegion; } static OMPClauseWithPreInit *get(OMPClause *C); static const OMPClauseWithPreInit *get(const OMPClause *C); }; /// Class that handles post-update expression for some clauses, like /// 'lastprivate', 'reduction' etc. class OMPClauseWithPostUpdate : public OMPClauseWithPreInit { friend class OMPClauseReader; /// Post-update expression for the clause. Expr *PostUpdate = nullptr; protected: OMPClauseWithPostUpdate(const OMPClause *This) : OMPClauseWithPreInit(This) { assert(get(This) && "get is not tuned for post-update."); } /// Set pre-initialization statement for the clause. void setPostUpdateExpr(Expr *S) { PostUpdate = S; } public: /// Get post-update expression for the clause. const Expr *getPostUpdateExpr() const { return PostUpdate; } /// Get post-update expression for the clause. Expr *getPostUpdateExpr() { return PostUpdate; } static OMPClauseWithPostUpdate *get(OMPClause *C); static const OMPClauseWithPostUpdate *get(const OMPClause *C); }; /// This structure contains most locations needed for by an OMPVarListClause. struct OMPVarListLocTy { /// Starting location of the clause (the clause keyword). SourceLocation StartLoc; /// Location of '('. SourceLocation LParenLoc; /// Ending location of the clause. SourceLocation EndLoc; OMPVarListLocTy() = default; OMPVarListLocTy(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : StartLoc(StartLoc), LParenLoc(LParenLoc), EndLoc(EndLoc) {} }; /// This represents clauses with the list of variables like 'private', /// 'firstprivate', 'copyin', 'shared', or 'reduction' clauses in the /// '#pragma omp ...' directives. template <class T> class OMPVarListClause : public OMPClause { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Number of variables in the list. unsigned NumVars; protected: /// Build a clause with \a N variables /// /// \param K Kind of the clause. /// \param StartLoc Starting location of the clause (the clause keyword). /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. OMPVarListClause(OpenMPClauseKind K, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N) : OMPClause(K, StartLoc, EndLoc), LParenLoc(LParenLoc), NumVars(N) {} /// Fetches list of variables associated with this clause. MutableArrayRef<Expr *> getVarRefs() { return MutableArrayRef<Expr *>( static_cast<T *>(this)->template getTrailingObjects<Expr *>(), NumVars); } /// Sets the list of variables for this clause. void setVarRefs(ArrayRef<Expr *> VL) { assert(VL.size() == NumVars && "Number of variables is not the same as the preallocated buffer"); std::copy(VL.begin(), VL.end(), static_cast<T *>(this)->template getTrailingObjects<Expr *>()); } public: using varlist_iterator = MutableArrayRef<Expr *>::iterator; using varlist_const_iterator = ArrayRef<const Expr *>::iterator; using varlist_range = llvm::iterator_range<varlist_iterator>; using varlist_const_range = llvm::iterator_range<varlist_const_iterator>; unsigned varlist_size() const { return NumVars; } bool varlist_empty() const { return NumVars == 0; } varlist_range varlists() { return varlist_range(varlist_begin(), varlist_end()); } varlist_const_range varlists() const { return varlist_const_range(varlist_begin(), varlist_end()); } varlist_iterator varlist_begin() { return getVarRefs().begin(); } varlist_iterator varlist_end() { return getVarRefs().end(); } varlist_const_iterator varlist_begin() const { return getVarRefs().begin(); } varlist_const_iterator varlist_end() const { return getVarRefs().end(); } /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Fetches list of all variables in the clause. ArrayRef<const Expr *> getVarRefs() const { return llvm::makeArrayRef( static_cast<const T *>(this)->template getTrailingObjects<Expr *>(), NumVars); } }; /// This represents 'allocator' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp allocate(a) allocator(omp_default_mem_alloc) /// \endcode /// In this example directive '#pragma omp allocate' has simple 'allocator' /// clause with the allocator 'omp_default_mem_alloc'. class OMPAllocatorClause : public OMPClause { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Expression with the allocator. Stmt *Allocator = nullptr; /// Set allocator. void setAllocator(Expr *A) { Allocator = A; } public: /// Build 'allocator' clause with the given allocator. /// /// \param A Allocator. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. OMPAllocatorClause(Expr *A, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_allocator, StartLoc, EndLoc), LParenLoc(LParenLoc), Allocator(A) {} /// Build an empty clause. OMPAllocatorClause() : OMPClause(llvm::omp::OMPC_allocator, SourceLocation(), SourceLocation()) {} /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Returns allocator. Expr *getAllocator() const { return cast_or_null<Expr>(Allocator); } child_range children() { return child_range(&Allocator, &Allocator + 1); } const_child_range children() const { return const_child_range(&Allocator, &Allocator + 1); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_allocator; } }; /// This represents clause 'allocate' in the '#pragma omp ...' directives. /// /// \code /// #pragma omp parallel private(a) allocate(omp_default_mem_alloc :a) /// \endcode /// In this example directive '#pragma omp parallel' has clause 'private' /// and clause 'allocate' for the variable 'a'. class OMPAllocateClause final : public OMPVarListClause<OMPAllocateClause>, private llvm::TrailingObjects<OMPAllocateClause, Expr *> { friend class OMPClauseReader; friend OMPVarListClause; friend TrailingObjects; /// Allocator specified in the clause, or 'nullptr' if the default one is /// used. Expr *Allocator = nullptr; /// Position of the ':' delimiter in the clause; SourceLocation ColonLoc; /// Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param Allocator Allocator expression. /// \param ColonLoc Location of ':' delimiter. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. OMPAllocateClause(SourceLocation StartLoc, SourceLocation LParenLoc, Expr *Allocator, SourceLocation ColonLoc, SourceLocation EndLoc, unsigned N) : OMPVarListClause<OMPAllocateClause>(llvm::omp::OMPC_allocate, StartLoc, LParenLoc, EndLoc, N), Allocator(Allocator), ColonLoc(ColonLoc) {} /// Build an empty clause. /// /// \param N Number of variables. explicit OMPAllocateClause(unsigned N) : OMPVarListClause<OMPAllocateClause>(llvm::omp::OMPC_allocate, SourceLocation(), SourceLocation(), SourceLocation(), N) {} /// Sets location of ':' symbol in clause. void setColonLoc(SourceLocation CL) { ColonLoc = CL; } void setAllocator(Expr *A) { Allocator = A; } public: /// Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param Allocator Allocator expression. /// \param ColonLoc Location of ':' delimiter. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the variables. static OMPAllocateClause *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, Expr *Allocator, SourceLocation ColonLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL); /// Returns the allocator expression or nullptr, if no allocator is specified. Expr *getAllocator() const { return Allocator; } /// Returns the location of the ':' delimiter. SourceLocation getColonLoc() const { return ColonLoc; } /// Creates an empty clause with the place for \a N variables. /// /// \param C AST context. /// \param N The number of variables. static OMPAllocateClause *CreateEmpty(const ASTContext &C, unsigned N); child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPAllocateClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_allocate; } }; /// This represents 'if' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp parallel if(parallel:a > 5) /// \endcode /// In this example directive '#pragma omp parallel' has simple 'if' clause with /// condition 'a > 5' and directive name modifier 'parallel'. class OMPIfClause : public OMPClause, public OMPClauseWithPreInit { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Condition of the 'if' clause. Stmt *Condition = nullptr; /// Location of ':' (if any). SourceLocation ColonLoc; /// Directive name modifier for the clause. OpenMPDirectiveKind NameModifier = llvm::omp::OMPD_unknown; /// Name modifier location. SourceLocation NameModifierLoc; /// Set condition. void setCondition(Expr *Cond) { Condition = Cond; } /// Set directive name modifier for the clause. void setNameModifier(OpenMPDirectiveKind NM) { NameModifier = NM; } /// Set location of directive name modifier for the clause. void setNameModifierLoc(SourceLocation Loc) { NameModifierLoc = Loc; } /// Set location of ':'. void setColonLoc(SourceLocation Loc) { ColonLoc = Loc; } public: /// Build 'if' clause with condition \a Cond. /// /// \param NameModifier [OpenMP 4.1] Directive name modifier of clause. /// \param Cond Condition of the clause. /// \param HelperCond Helper condition for the clause. /// \param CaptureRegion Innermost OpenMP region where expressions in this /// clause must be captured. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param NameModifierLoc Location of directive name modifier. /// \param ColonLoc [OpenMP 4.1] Location of ':'. /// \param EndLoc Ending location of the clause. OMPIfClause(OpenMPDirectiveKind NameModifier, Expr *Cond, Stmt *HelperCond, OpenMPDirectiveKind CaptureRegion, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation NameModifierLoc, SourceLocation ColonLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_if, StartLoc, EndLoc), OMPClauseWithPreInit(this), LParenLoc(LParenLoc), Condition(Cond), ColonLoc(ColonLoc), NameModifier(NameModifier), NameModifierLoc(NameModifierLoc) { setPreInitStmt(HelperCond, CaptureRegion); } /// Build an empty clause. OMPIfClause() : OMPClause(llvm::omp::OMPC_if, SourceLocation(), SourceLocation()), OMPClauseWithPreInit(this) {} /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Return the location of ':'. SourceLocation getColonLoc() const { return ColonLoc; } /// Returns condition. Expr *getCondition() const { return cast_or_null<Expr>(Condition); } /// Return directive name modifier associated with the clause. OpenMPDirectiveKind getNameModifier() const { return NameModifier; } /// Return the location of directive name modifier. SourceLocation getNameModifierLoc() const { return NameModifierLoc; } child_range children() { return child_range(&Condition, &Condition + 1); } const_child_range children() const { return const_child_range(&Condition, &Condition + 1); } child_range used_children(); const_child_range used_children() const { auto Children = const_cast<OMPIfClause *>(this)->used_children(); return const_child_range(Children.begin(), Children.end()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_if; } }; /// This represents 'final' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp task final(a > 5) /// \endcode /// In this example directive '#pragma omp task' has simple 'final' /// clause with condition 'a > 5'. class OMPFinalClause : public OMPClause, public OMPClauseWithPreInit { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Condition of the 'if' clause. Stmt *Condition = nullptr; /// Set condition. void setCondition(Expr *Cond) { Condition = Cond; } public: /// Build 'final' clause with condition \a Cond. /// /// \param Cond Condition of the clause. /// \param HelperCond Helper condition for the construct. /// \param CaptureRegion Innermost OpenMP region where expressions in this /// clause must be captured. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. OMPFinalClause(Expr *Cond, Stmt *HelperCond, OpenMPDirectiveKind CaptureRegion, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_final, StartLoc, EndLoc), OMPClauseWithPreInit(this), LParenLoc(LParenLoc), Condition(Cond) { setPreInitStmt(HelperCond, CaptureRegion); } /// Build an empty clause. OMPFinalClause() : OMPClause(llvm::omp::OMPC_final, SourceLocation(), SourceLocation()), OMPClauseWithPreInit(this) {} /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Returns condition. Expr *getCondition() const { return cast_or_null<Expr>(Condition); } child_range children() { return child_range(&Condition, &Condition + 1); } const_child_range children() const { return const_child_range(&Condition, &Condition + 1); } child_range used_children(); const_child_range used_children() const { auto Children = const_cast<OMPFinalClause *>(this)->used_children(); return const_child_range(Children.begin(), Children.end()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_final; } }; /// This represents 'num_threads' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp parallel num_threads(6) /// \endcode /// In this example directive '#pragma omp parallel' has simple 'num_threads' /// clause with number of threads '6'. class OMPNumThreadsClause : public OMPClause, public OMPClauseWithPreInit { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Condition of the 'num_threads' clause. Stmt *NumThreads = nullptr; /// Set condition. void setNumThreads(Expr *NThreads) { NumThreads = NThreads; } public: /// Build 'num_threads' clause with condition \a NumThreads. /// /// \param NumThreads Number of threads for the construct. /// \param HelperNumThreads Helper Number of threads for the construct. /// \param CaptureRegion Innermost OpenMP region where expressions in this /// clause must be captured. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. OMPNumThreadsClause(Expr *NumThreads, Stmt *HelperNumThreads, OpenMPDirectiveKind CaptureRegion, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_num_threads, StartLoc, EndLoc), OMPClauseWithPreInit(this), LParenLoc(LParenLoc), NumThreads(NumThreads) { setPreInitStmt(HelperNumThreads, CaptureRegion); } /// Build an empty clause. OMPNumThreadsClause() : OMPClause(llvm::omp::OMPC_num_threads, SourceLocation(), SourceLocation()), OMPClauseWithPreInit(this) {} /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Returns number of threads. Expr *getNumThreads() const { return cast_or_null<Expr>(NumThreads); } child_range children() { return child_range(&NumThreads, &NumThreads + 1); } const_child_range children() const { return const_child_range(&NumThreads, &NumThreads + 1); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_num_threads; } }; /// This represents 'safelen' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp simd safelen(4) /// \endcode /// In this example directive '#pragma omp simd' has clause 'safelen' /// with single expression '4'. /// If the safelen clause is used then no two iterations executed /// concurrently with SIMD instructions can have a greater distance /// in the logical iteration space than its value. The parameter of /// the safelen clause must be a constant positive integer expression. class OMPSafelenClause : public OMPClause { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Safe iteration space distance. Stmt *Safelen = nullptr; /// Set safelen. void setSafelen(Expr *Len) { Safelen = Len; } public: /// Build 'safelen' clause. /// /// \param Len Expression associated with this clause. /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPSafelenClause(Expr *Len, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_safelen, StartLoc, EndLoc), LParenLoc(LParenLoc), Safelen(Len) {} /// Build an empty clause. explicit OMPSafelenClause() : OMPClause(llvm::omp::OMPC_safelen, SourceLocation(), SourceLocation()) { } /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Return safe iteration space distance. Expr *getSafelen() const { return cast_or_null<Expr>(Safelen); } child_range children() { return child_range(&Safelen, &Safelen + 1); } const_child_range children() const { return const_child_range(&Safelen, &Safelen + 1); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_safelen; } }; /// This represents 'simdlen' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp simd simdlen(4) /// \endcode /// In this example directive '#pragma omp simd' has clause 'simdlen' /// with single expression '4'. /// If the 'simdlen' clause is used then it specifies the preferred number of /// iterations to be executed concurrently. The parameter of the 'simdlen' /// clause must be a constant positive integer expression. class OMPSimdlenClause : public OMPClause { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Safe iteration space distance. Stmt *Simdlen = nullptr; /// Set simdlen. void setSimdlen(Expr *Len) { Simdlen = Len; } public: /// Build 'simdlen' clause. /// /// \param Len Expression associated with this clause. /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPSimdlenClause(Expr *Len, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_simdlen, StartLoc, EndLoc), LParenLoc(LParenLoc), Simdlen(Len) {} /// Build an empty clause. explicit OMPSimdlenClause() : OMPClause(llvm::omp::OMPC_simdlen, SourceLocation(), SourceLocation()) { } /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Return safe iteration space distance. Expr *getSimdlen() const { return cast_or_null<Expr>(Simdlen); } child_range children() { return child_range(&Simdlen, &Simdlen + 1); } const_child_range children() const { return const_child_range(&Simdlen, &Simdlen + 1); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_simdlen; } }; /// This represents 'collapse' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp simd collapse(3) /// \endcode /// In this example directive '#pragma omp simd' has clause 'collapse' /// with single expression '3'. /// The parameter must be a constant positive integer expression, it specifies /// the number of nested loops that should be collapsed into a single iteration /// space. class OMPCollapseClause : public OMPClause { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Number of for-loops. Stmt *NumForLoops = nullptr; /// Set the number of associated for-loops. void setNumForLoops(Expr *Num) { NumForLoops = Num; } public: /// Build 'collapse' clause. /// /// \param Num Expression associated with this clause. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. OMPCollapseClause(Expr *Num, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_collapse, StartLoc, EndLoc), LParenLoc(LParenLoc), NumForLoops(Num) {} /// Build an empty clause. explicit OMPCollapseClause() : OMPClause(llvm::omp::OMPC_collapse, SourceLocation(), SourceLocation()) {} /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Return the number of associated for-loops. Expr *getNumForLoops() const { return cast_or_null<Expr>(NumForLoops); } child_range children() { return child_range(&NumForLoops, &NumForLoops + 1); } const_child_range children() const { return const_child_range(&NumForLoops, &NumForLoops + 1); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_collapse; } }; /// This represents 'default' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp parallel default(shared) /// \endcode /// In this example directive '#pragma omp parallel' has simple 'default' /// clause with kind 'shared'. class OMPDefaultClause : public OMPClause { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// A kind of the 'default' clause. llvm::omp::DefaultKind Kind = llvm::omp::OMP_DEFAULT_unknown; /// Start location of the kind in source code. SourceLocation KindKwLoc; /// Set kind of the clauses. /// /// \param K Argument of clause. void setDefaultKind(llvm::omp::DefaultKind K) { Kind = K; } /// Set argument location. /// /// \param KLoc Argument location. void setDefaultKindKwLoc(SourceLocation KLoc) { KindKwLoc = KLoc; } public: /// Build 'default' clause with argument \a A ('none' or 'shared'). /// /// \param A Argument of the clause ('none' or 'shared'). /// \param ALoc Starting location of the argument. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. OMPDefaultClause(llvm::omp::DefaultKind A, SourceLocation ALoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_default, StartLoc, EndLoc), LParenLoc(LParenLoc), Kind(A), KindKwLoc(ALoc) {} /// Build an empty clause. OMPDefaultClause() : OMPClause(llvm::omp::OMPC_default, SourceLocation(), SourceLocation()) { } /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Returns kind of the clause. llvm::omp::DefaultKind getDefaultKind() const { return Kind; } /// Returns location of clause kind. SourceLocation getDefaultKindKwLoc() const { return KindKwLoc; } child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_default; } }; /// This represents 'proc_bind' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp parallel proc_bind(master) /// \endcode /// In this example directive '#pragma omp parallel' has simple 'proc_bind' /// clause with kind 'master'. class OMPProcBindClause : public OMPClause { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// A kind of the 'proc_bind' clause. llvm::omp::ProcBindKind Kind = llvm::omp::OMP_PROC_BIND_unknown; /// Start location of the kind in source code. SourceLocation KindKwLoc; /// Set kind of the clause. /// /// \param K Kind of clause. void setProcBindKind(llvm::omp::ProcBindKind K) { Kind = K; } /// Set clause kind location. /// /// \param KLoc Kind location. void setProcBindKindKwLoc(SourceLocation KLoc) { KindKwLoc = KLoc; } public: /// Build 'proc_bind' clause with argument \a A ('master', 'close' or /// 'spread'). /// /// \param A Argument of the clause ('master', 'close' or 'spread'). /// \param ALoc Starting location of the argument. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. OMPProcBindClause(llvm::omp::ProcBindKind A, SourceLocation ALoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_proc_bind, StartLoc, EndLoc), LParenLoc(LParenLoc), Kind(A), KindKwLoc(ALoc) {} /// Build an empty clause. OMPProcBindClause() : OMPClause(llvm::omp::OMPC_proc_bind, SourceLocation(), SourceLocation()) {} /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Returns kind of the clause. llvm::omp::ProcBindKind getProcBindKind() const { return Kind; } /// Returns location of clause kind. SourceLocation getProcBindKindKwLoc() const { return KindKwLoc; } child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_proc_bind; } }; /// This represents 'unified_address' clause in the '#pragma omp requires' /// directive. /// /// \code /// #pragma omp requires unified_address /// \endcode /// In this example directive '#pragma omp requires' has 'unified_address' /// clause. class OMPUnifiedAddressClause final : public OMPClause { public: friend class OMPClauseReader; /// Build 'unified_address' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPUnifiedAddressClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_unified_address, StartLoc, EndLoc) {} /// Build an empty clause. OMPUnifiedAddressClause() : OMPClause(llvm::omp::OMPC_unified_address, SourceLocation(), SourceLocation()) {} child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_unified_address; } }; /// This represents 'unified_shared_memory' clause in the '#pragma omp requires' /// directive. /// /// \code /// #pragma omp requires unified_shared_memory /// \endcode /// In this example directive '#pragma omp requires' has 'unified_shared_memory' /// clause. class OMPUnifiedSharedMemoryClause final : public OMPClause { public: friend class OMPClauseReader; /// Build 'unified_shared_memory' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPUnifiedSharedMemoryClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_unified_shared_memory, StartLoc, EndLoc) {} /// Build an empty clause. OMPUnifiedSharedMemoryClause() : OMPClause(llvm::omp::OMPC_unified_shared_memory, SourceLocation(), SourceLocation()) {} child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_unified_shared_memory; } }; /// This represents 'reverse_offload' clause in the '#pragma omp requires' /// directive. /// /// \code /// #pragma omp requires reverse_offload /// \endcode /// In this example directive '#pragma omp requires' has 'reverse_offload' /// clause. class OMPReverseOffloadClause final : public OMPClause { public: friend class OMPClauseReader; /// Build 'reverse_offload' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPReverseOffloadClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_reverse_offload, StartLoc, EndLoc) {} /// Build an empty clause. OMPReverseOffloadClause() : OMPClause(llvm::omp::OMPC_reverse_offload, SourceLocation(), SourceLocation()) {} child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_reverse_offload; } }; /// This represents 'dynamic_allocators' clause in the '#pragma omp requires' /// directive. /// /// \code /// #pragma omp requires dynamic_allocators /// \endcode /// In this example directive '#pragma omp requires' has 'dynamic_allocators' /// clause. class OMPDynamicAllocatorsClause final : public OMPClause { public: friend class OMPClauseReader; /// Build 'dynamic_allocators' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPDynamicAllocatorsClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_dynamic_allocators, StartLoc, EndLoc) {} /// Build an empty clause. OMPDynamicAllocatorsClause() : OMPClause(llvm::omp::OMPC_dynamic_allocators, SourceLocation(), SourceLocation()) {} child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_dynamic_allocators; } }; /// This represents 'atomic_default_mem_order' clause in the '#pragma omp /// requires' directive. /// /// \code /// #pragma omp requires atomic_default_mem_order(seq_cst) /// \endcode /// In this example directive '#pragma omp requires' has simple /// atomic_default_mem_order' clause with kind 'seq_cst'. class OMPAtomicDefaultMemOrderClause final : public OMPClause { friend class OMPClauseReader; /// Location of '(' SourceLocation LParenLoc; /// A kind of the 'atomic_default_mem_order' clause. OpenMPAtomicDefaultMemOrderClauseKind Kind = OMPC_ATOMIC_DEFAULT_MEM_ORDER_unknown; /// Start location of the kind in source code. SourceLocation KindKwLoc; /// Set kind of the clause. /// /// \param K Kind of clause. void setAtomicDefaultMemOrderKind(OpenMPAtomicDefaultMemOrderClauseKind K) { Kind = K; } /// Set clause kind location. /// /// \param KLoc Kind location. void setAtomicDefaultMemOrderKindKwLoc(SourceLocation KLoc) { KindKwLoc = KLoc; } public: /// Build 'atomic_default_mem_order' clause with argument \a A ('seq_cst', /// 'acq_rel' or 'relaxed'). /// /// \param A Argument of the clause ('seq_cst', 'acq_rel' or 'relaxed'). /// \param ALoc Starting location of the argument. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. OMPAtomicDefaultMemOrderClause(OpenMPAtomicDefaultMemOrderClauseKind A, SourceLocation ALoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_atomic_default_mem_order, StartLoc, EndLoc), LParenLoc(LParenLoc), Kind(A), KindKwLoc(ALoc) {} /// Build an empty clause. OMPAtomicDefaultMemOrderClause() : OMPClause(llvm::omp::OMPC_atomic_default_mem_order, SourceLocation(), SourceLocation()) {} /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the locaiton of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Returns kind of the clause. OpenMPAtomicDefaultMemOrderClauseKind getAtomicDefaultMemOrderKind() const { return Kind; } /// Returns location of clause kind. SourceLocation getAtomicDefaultMemOrderKindKwLoc() const { return KindKwLoc; } child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_atomic_default_mem_order; } }; /// This represents 'schedule' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp for schedule(static, 3) /// \endcode /// In this example directive '#pragma omp for' has 'schedule' clause with /// arguments 'static' and '3'. class OMPScheduleClause : public OMPClause, public OMPClauseWithPreInit { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// A kind of the 'schedule' clause. OpenMPScheduleClauseKind Kind = OMPC_SCHEDULE_unknown; /// Modifiers for 'schedule' clause. enum {FIRST, SECOND, NUM_MODIFIERS}; OpenMPScheduleClauseModifier Modifiers[NUM_MODIFIERS]; /// Locations of modifiers. SourceLocation ModifiersLoc[NUM_MODIFIERS]; /// Start location of the schedule ind in source code. SourceLocation KindLoc; /// Location of ',' (if any). SourceLocation CommaLoc; /// Chunk size. Expr *ChunkSize = nullptr; /// Set schedule kind. /// /// \param K Schedule kind. void setScheduleKind(OpenMPScheduleClauseKind K) { Kind = K; } /// Set the first schedule modifier. /// /// \param M Schedule modifier. void setFirstScheduleModifier(OpenMPScheduleClauseModifier M) { Modifiers[FIRST] = M; } /// Set the second schedule modifier. /// /// \param M Schedule modifier. void setSecondScheduleModifier(OpenMPScheduleClauseModifier M) { Modifiers[SECOND] = M; } /// Set location of the first schedule modifier. void setFirstScheduleModifierLoc(SourceLocation Loc) { ModifiersLoc[FIRST] = Loc; } /// Set location of the second schedule modifier. void setSecondScheduleModifierLoc(SourceLocation Loc) { ModifiersLoc[SECOND] = Loc; } /// Set schedule modifier location. /// /// \param M Schedule modifier location. void setScheduleModifer(OpenMPScheduleClauseModifier M) { if (Modifiers[FIRST] == OMPC_SCHEDULE_MODIFIER_unknown) Modifiers[FIRST] = M; else { assert(Modifiers[SECOND] == OMPC_SCHEDULE_MODIFIER_unknown); Modifiers[SECOND] = M; } } /// Sets the location of '('. /// /// \param Loc Location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Set schedule kind start location. /// /// \param KLoc Schedule kind location. void setScheduleKindLoc(SourceLocation KLoc) { KindLoc = KLoc; } /// Set location of ','. /// /// \param Loc Location of ','. void setCommaLoc(SourceLocation Loc) { CommaLoc = Loc; } /// Set chunk size. /// /// \param E Chunk size. void setChunkSize(Expr *E) { ChunkSize = E; } public: /// Build 'schedule' clause with schedule kind \a Kind and chunk size /// expression \a ChunkSize. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param KLoc Starting location of the argument. /// \param CommaLoc Location of ','. /// \param EndLoc Ending location of the clause. /// \param Kind Schedule kind. /// \param ChunkSize Chunk size. /// \param HelperChunkSize Helper chunk size for combined directives. /// \param M1 The first modifier applied to 'schedule' clause. /// \param M1Loc Location of the first modifier /// \param M2 The second modifier applied to 'schedule' clause. /// \param M2Loc Location of the second modifier OMPScheduleClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation KLoc, SourceLocation CommaLoc, SourceLocation EndLoc, OpenMPScheduleClauseKind Kind, Expr *ChunkSize, Stmt *HelperChunkSize, OpenMPScheduleClauseModifier M1, SourceLocation M1Loc, OpenMPScheduleClauseModifier M2, SourceLocation M2Loc) : OMPClause(llvm::omp::OMPC_schedule, StartLoc, EndLoc), OMPClauseWithPreInit(this), LParenLoc(LParenLoc), Kind(Kind), KindLoc(KLoc), CommaLoc(CommaLoc), ChunkSize(ChunkSize) { setPreInitStmt(HelperChunkSize); Modifiers[FIRST] = M1; Modifiers[SECOND] = M2; ModifiersLoc[FIRST] = M1Loc; ModifiersLoc[SECOND] = M2Loc; } /// Build an empty clause. explicit OMPScheduleClause() : OMPClause(llvm::omp::OMPC_schedule, SourceLocation(), SourceLocation()), OMPClauseWithPreInit(this) { Modifiers[FIRST] = OMPC_SCHEDULE_MODIFIER_unknown; Modifiers[SECOND] = OMPC_SCHEDULE_MODIFIER_unknown; } /// Get kind of the clause. OpenMPScheduleClauseKind getScheduleKind() const { return Kind; } /// Get the first modifier of the clause. OpenMPScheduleClauseModifier getFirstScheduleModifier() const { return Modifiers[FIRST]; } /// Get the second modifier of the clause. OpenMPScheduleClauseModifier getSecondScheduleModifier() const { return Modifiers[SECOND]; } /// Get location of '('. SourceLocation getLParenLoc() { return LParenLoc; } /// Get kind location. SourceLocation getScheduleKindLoc() { return KindLoc; } /// Get the first modifier location. SourceLocation getFirstScheduleModifierLoc() const { return ModifiersLoc[FIRST]; } /// Get the second modifier location. SourceLocation getSecondScheduleModifierLoc() const { return ModifiersLoc[SECOND]; } /// Get location of ','. SourceLocation getCommaLoc() { return CommaLoc; } /// Get chunk size. Expr *getChunkSize() { return ChunkSize; } /// Get chunk size. const Expr *getChunkSize() const { return ChunkSize; } child_range children() { return child_range(reinterpret_cast<Stmt **>(&ChunkSize), reinterpret_cast<Stmt **>(&ChunkSize) + 1); } const_child_range children() const { auto Children = const_cast<OMPScheduleClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_schedule; } }; /// This represents 'ordered' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp for ordered (2) /// \endcode /// In this example directive '#pragma omp for' has 'ordered' clause with /// parameter 2. class OMPOrderedClause final : public OMPClause, private llvm::TrailingObjects<OMPOrderedClause, Expr *> { friend class OMPClauseReader; friend TrailingObjects; /// Location of '('. SourceLocation LParenLoc; /// Number of for-loops. Stmt *NumForLoops = nullptr; /// Real number of loops. unsigned NumberOfLoops = 0; /// Build 'ordered' clause. /// /// \param Num Expression, possibly associated with this clause. /// \param NumLoops Number of loops, associated with this clause. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. OMPOrderedClause(Expr *Num, unsigned NumLoops, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_ordered, StartLoc, EndLoc), LParenLoc(LParenLoc), NumForLoops(Num), NumberOfLoops(NumLoops) {} /// Build an empty clause. explicit OMPOrderedClause(unsigned NumLoops) : OMPClause(llvm::omp::OMPC_ordered, SourceLocation(), SourceLocation()), NumberOfLoops(NumLoops) {} /// Set the number of associated for-loops. void setNumForLoops(Expr *Num) { NumForLoops = Num; } public: /// Build 'ordered' clause. /// /// \param Num Expression, possibly associated with this clause. /// \param NumLoops Number of loops, associated with this clause. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. static OMPOrderedClause *Create(const ASTContext &C, Expr *Num, unsigned NumLoops, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Build an empty clause. static OMPOrderedClause* CreateEmpty(const ASTContext &C, unsigned NumLoops); /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Return the number of associated for-loops. Expr *getNumForLoops() const { return cast_or_null<Expr>(NumForLoops); } /// Set number of iterations for the specified loop. void setLoopNumIterations(unsigned NumLoop, Expr *NumIterations); /// Get number of iterations for all the loops. ArrayRef<Expr *> getLoopNumIterations() const; /// Set loop counter for the specified loop. void setLoopCounter(unsigned NumLoop, Expr *Counter); /// Get loops counter for the specified loop. Expr *getLoopCounter(unsigned NumLoop); const Expr *getLoopCounter(unsigned NumLoop) const; child_range children() { return child_range(&NumForLoops, &NumForLoops + 1); } const_child_range children() const { return const_child_range(&NumForLoops, &NumForLoops + 1); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_ordered; } }; /// This represents 'nowait' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp for nowait /// \endcode /// In this example directive '#pragma omp for' has 'nowait' clause. class OMPNowaitClause : public OMPClause { public: /// Build 'nowait' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPNowaitClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_nowait, StartLoc, EndLoc) {} /// Build an empty clause. OMPNowaitClause() : OMPClause(llvm::omp::OMPC_nowait, SourceLocation(), SourceLocation()) {} child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_nowait; } }; /// This represents 'untied' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp task untied /// \endcode /// In this example directive '#pragma omp task' has 'untied' clause. class OMPUntiedClause : public OMPClause { public: /// Build 'untied' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPUntiedClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_untied, StartLoc, EndLoc) {} /// Build an empty clause. OMPUntiedClause() : OMPClause(llvm::omp::OMPC_untied, SourceLocation(), SourceLocation()) {} child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_untied; } }; /// This represents 'mergeable' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp task mergeable /// \endcode /// In this example directive '#pragma omp task' has 'mergeable' clause. class OMPMergeableClause : public OMPClause { public: /// Build 'mergeable' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPMergeableClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_mergeable, StartLoc, EndLoc) {} /// Build an empty clause. OMPMergeableClause() : OMPClause(llvm::omp::OMPC_mergeable, SourceLocation(), SourceLocation()) {} child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_mergeable; } }; /// This represents 'read' clause in the '#pragma omp atomic' directive. /// /// \code /// #pragma omp atomic read /// \endcode /// In this example directive '#pragma omp atomic' has 'read' clause. class OMPReadClause : public OMPClause { public: /// Build 'read' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPReadClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_read, StartLoc, EndLoc) {} /// Build an empty clause. OMPReadClause() : OMPClause(llvm::omp::OMPC_read, SourceLocation(), SourceLocation()) {} child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_read; } }; /// This represents 'write' clause in the '#pragma omp atomic' directive. /// /// \code /// #pragma omp atomic write /// \endcode /// In this example directive '#pragma omp atomic' has 'write' clause. class OMPWriteClause : public OMPClause { public: /// Build 'write' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPWriteClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_write, StartLoc, EndLoc) {} /// Build an empty clause. OMPWriteClause() : OMPClause(llvm::omp::OMPC_write, SourceLocation(), SourceLocation()) {} child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_write; } }; /// This represents 'update' clause in the '#pragma omp atomic' /// directive. /// /// \code /// #pragma omp atomic update /// \endcode /// In this example directive '#pragma omp atomic' has 'update' clause. /// Also, this class represents 'update' clause in '#pragma omp depobj' /// directive. /// /// \code /// #pragma omp depobj(a) update(in) /// \endcode /// In this example directive '#pragma omp depobj' has 'update' clause with 'in' /// dependence kind. class OMPUpdateClause final : public OMPClause, private llvm::TrailingObjects<OMPUpdateClause, SourceLocation, OpenMPDependClauseKind> { friend class OMPClauseReader; friend TrailingObjects; /// true if extended version of the clause for 'depobj' directive. bool IsExtended = false; /// Define the sizes of each trailing object array except the last one. This /// is required for TrailingObjects to work properly. size_t numTrailingObjects(OverloadToken<SourceLocation>) const { // 2 locations: for '(' and argument location. return IsExtended ? 2 : 0; } /// Sets the the location of '(' in clause for 'depobj' directive. void setLParenLoc(SourceLocation Loc) { assert(IsExtended && "Expected extended clause."); *getTrailingObjects<SourceLocation>() = Loc; } /// Sets the the location of '(' in clause for 'depobj' directive. void setArgumentLoc(SourceLocation Loc) { assert(IsExtended && "Expected extended clause."); *std::next(getTrailingObjects<SourceLocation>(), 1) = Loc; } /// Sets the dependence kind for the clause for 'depobj' directive. void setDependencyKind(OpenMPDependClauseKind DK) { assert(IsExtended && "Expected extended clause."); *getTrailingObjects<OpenMPDependClauseKind>() = DK; } /// Build 'update' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPUpdateClause(SourceLocation StartLoc, SourceLocation EndLoc, bool IsExtended) : OMPClause(llvm::omp::OMPC_update, StartLoc, EndLoc), IsExtended(IsExtended) {} /// Build an empty clause. OMPUpdateClause(bool IsExtended) : OMPClause(llvm::omp::OMPC_update, SourceLocation(), SourceLocation()), IsExtended(IsExtended) {} public: /// Creates clause for 'atomic' directive. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. static OMPUpdateClause *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc); /// Creates clause for 'depobj' directive. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param ArgumentLoc Location of the argument. /// \param DK Dependence kind. /// \param EndLoc Ending location of the clause. static OMPUpdateClause *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ArgumentLoc, OpenMPDependClauseKind DK, SourceLocation EndLoc); /// Creates an empty clause with the place for \a N variables. /// /// \param C AST context. /// \param IsExtended true if extended clause for 'depobj' directive must be /// created. static OMPUpdateClause *CreateEmpty(const ASTContext &C, bool IsExtended); /// Checks if the clause is the extended clauses for 'depobj' directive. bool isExtended() const { return IsExtended; } child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } /// Gets the the location of '(' in clause for 'depobj' directive. SourceLocation getLParenLoc() const { assert(IsExtended && "Expected extended clause."); return *getTrailingObjects<SourceLocation>(); } /// Gets the the location of argument in clause for 'depobj' directive. SourceLocation getArgumentLoc() const { assert(IsExtended && "Expected extended clause."); return *std::next(getTrailingObjects<SourceLocation>(), 1); } /// Gets the dependence kind in clause for 'depobj' directive. OpenMPDependClauseKind getDependencyKind() const { assert(IsExtended && "Expected extended clause."); return *getTrailingObjects<OpenMPDependClauseKind>(); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_update; } }; /// This represents 'capture' clause in the '#pragma omp atomic' /// directive. /// /// \code /// #pragma omp atomic capture /// \endcode /// In this example directive '#pragma omp atomic' has 'capture' clause. class OMPCaptureClause : public OMPClause { public: /// Build 'capture' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPCaptureClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_capture, StartLoc, EndLoc) {} /// Build an empty clause. OMPCaptureClause() : OMPClause(llvm::omp::OMPC_capture, SourceLocation(), SourceLocation()) { } child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_capture; } }; /// This represents 'seq_cst' clause in the '#pragma omp atomic' /// directive. /// /// \code /// #pragma omp atomic seq_cst /// \endcode /// In this example directive '#pragma omp atomic' has 'seq_cst' clause. class OMPSeqCstClause : public OMPClause { public: /// Build 'seq_cst' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPSeqCstClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_seq_cst, StartLoc, EndLoc) {} /// Build an empty clause. OMPSeqCstClause() : OMPClause(llvm::omp::OMPC_seq_cst, SourceLocation(), SourceLocation()) { } child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_seq_cst; } }; /// This represents 'acq_rel' clause in the '#pragma omp atomic|flush' /// directives. /// /// \code /// #pragma omp flush acq_rel /// \endcode /// In this example directive '#pragma omp flush' has 'acq_rel' clause. class OMPAcqRelClause final : public OMPClause { public: /// Build 'ack_rel' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPAcqRelClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_acq_rel, StartLoc, EndLoc) {} /// Build an empty clause. OMPAcqRelClause() : OMPClause(llvm::omp::OMPC_acq_rel, SourceLocation(), SourceLocation()) { } child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_acq_rel; } }; /// This represents 'acquire' clause in the '#pragma omp atomic|flush' /// directives. /// /// \code /// #pragma omp flush acquire /// \endcode /// In this example directive '#pragma omp flush' has 'acquire' clause. class OMPAcquireClause final : public OMPClause { public: /// Build 'acquire' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPAcquireClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_acquire, StartLoc, EndLoc) {} /// Build an empty clause. OMPAcquireClause() : OMPClause(llvm::omp::OMPC_acquire, SourceLocation(), SourceLocation()) { } child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_acquire; } }; /// This represents 'release' clause in the '#pragma omp atomic|flush' /// directives. /// /// \code /// #pragma omp flush release /// \endcode /// In this example directive '#pragma omp flush' has 'release' clause. class OMPReleaseClause final : public OMPClause { public: /// Build 'release' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPReleaseClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_release, StartLoc, EndLoc) {} /// Build an empty clause. OMPReleaseClause() : OMPClause(llvm::omp::OMPC_release, SourceLocation(), SourceLocation()) { } child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_release; } }; /// This represents 'relaxed' clause in the '#pragma omp atomic' /// directives. /// /// \code /// #pragma omp atomic relaxed /// \endcode /// In this example directive '#pragma omp atomic' has 'relaxed' clause. class OMPRelaxedClause final : public OMPClause { public: /// Build 'relaxed' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPRelaxedClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_relaxed, StartLoc, EndLoc) {} /// Build an empty clause. OMPRelaxedClause() : OMPClause(llvm::omp::OMPC_relaxed, SourceLocation(), SourceLocation()) { } child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_relaxed; } }; /// This represents clause 'private' in the '#pragma omp ...' directives. /// /// \code /// #pragma omp parallel private(a,b) /// \endcode /// In this example directive '#pragma omp parallel' has clause 'private' /// with the variables 'a' and 'b'. class OMPPrivateClause final : public OMPVarListClause<OMPPrivateClause>, private llvm::TrailingObjects<OMPPrivateClause, Expr *> { friend class OMPClauseReader; friend OMPVarListClause; friend TrailingObjects; /// Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. OMPPrivateClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N) : OMPVarListClause<OMPPrivateClause>(llvm::omp::OMPC_private, StartLoc, LParenLoc, EndLoc, N) {} /// Build an empty clause. /// /// \param N Number of variables. explicit OMPPrivateClause(unsigned N) : OMPVarListClause<OMPPrivateClause>(llvm::omp::OMPC_private, SourceLocation(), SourceLocation(), SourceLocation(), N) {} /// Sets the list of references to private copies with initializers for /// new private variables. /// \param VL List of references. void setPrivateCopies(ArrayRef<Expr *> VL); /// Gets the list of references to private copies with initializers for /// new private variables. MutableArrayRef<Expr *> getPrivateCopies() { return MutableArrayRef<Expr *>(varlist_end(), varlist_size()); } ArrayRef<const Expr *> getPrivateCopies() const { return llvm::makeArrayRef(varlist_end(), varlist_size()); } public: /// Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the variables. /// \param PrivateVL List of references to private copies with initializers. static OMPPrivateClause *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL, ArrayRef<Expr *> PrivateVL); /// Creates an empty clause with the place for \a N variables. /// /// \param C AST context. /// \param N The number of variables. static OMPPrivateClause *CreateEmpty(const ASTContext &C, unsigned N); using private_copies_iterator = MutableArrayRef<Expr *>::iterator; using private_copies_const_iterator = ArrayRef<const Expr *>::iterator; using private_copies_range = llvm::iterator_range<private_copies_iterator>; using private_copies_const_range = llvm::iterator_range<private_copies_const_iterator>; private_copies_range private_copies() { return private_copies_range(getPrivateCopies().begin(), getPrivateCopies().end()); } private_copies_const_range private_copies() const { return private_copies_const_range(getPrivateCopies().begin(), getPrivateCopies().end()); } child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPPrivateClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_private; } }; /// This represents clause 'firstprivate' in the '#pragma omp ...' /// directives. /// /// \code /// #pragma omp parallel firstprivate(a,b) /// \endcode /// In this example directive '#pragma omp parallel' has clause 'firstprivate' /// with the variables 'a' and 'b'. class OMPFirstprivateClause final : public OMPVarListClause<OMPFirstprivateClause>, public OMPClauseWithPreInit, private llvm::TrailingObjects<OMPFirstprivateClause, Expr *> { friend class OMPClauseReader; friend OMPVarListClause; friend TrailingObjects; /// Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. OMPFirstprivateClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N) : OMPVarListClause<OMPFirstprivateClause>(llvm::omp::OMPC_firstprivate, StartLoc, LParenLoc, EndLoc, N), OMPClauseWithPreInit(this) {} /// Build an empty clause. /// /// \param N Number of variables. explicit OMPFirstprivateClause(unsigned N) : OMPVarListClause<OMPFirstprivateClause>( llvm::omp::OMPC_firstprivate, SourceLocation(), SourceLocation(), SourceLocation(), N), OMPClauseWithPreInit(this) {} /// Sets the list of references to private copies with initializers for /// new private variables. /// \param VL List of references. void setPrivateCopies(ArrayRef<Expr *> VL); /// Gets the list of references to private copies with initializers for /// new private variables. MutableArrayRef<Expr *> getPrivateCopies() { return MutableArrayRef<Expr *>(varlist_end(), varlist_size()); } ArrayRef<const Expr *> getPrivateCopies() const { return llvm::makeArrayRef(varlist_end(), varlist_size()); } /// Sets the list of references to initializer variables for new /// private variables. /// \param VL List of references. void setInits(ArrayRef<Expr *> VL); /// Gets the list of references to initializer variables for new /// private variables. MutableArrayRef<Expr *> getInits() { return MutableArrayRef<Expr *>(getPrivateCopies().end(), varlist_size()); } ArrayRef<const Expr *> getInits() const { return llvm::makeArrayRef(getPrivateCopies().end(), varlist_size()); } public: /// Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the original variables. /// \param PrivateVL List of references to private copies with initializers. /// \param InitVL List of references to auto generated variables used for /// initialization of a single array element. Used if firstprivate variable is /// of array type. /// \param PreInit Statement that must be executed before entering the OpenMP /// region with this clause. static OMPFirstprivateClause * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL, ArrayRef<Expr *> PrivateVL, ArrayRef<Expr *> InitVL, Stmt *PreInit); /// Creates an empty clause with the place for \a N variables. /// /// \param C AST context. /// \param N The number of variables. static OMPFirstprivateClause *CreateEmpty(const ASTContext &C, unsigned N); using private_copies_iterator = MutableArrayRef<Expr *>::iterator; using private_copies_const_iterator = ArrayRef<const Expr *>::iterator; using private_copies_range = llvm::iterator_range<private_copies_iterator>; using private_copies_const_range = llvm::iterator_range<private_copies_const_iterator>; private_copies_range private_copies() { return private_copies_range(getPrivateCopies().begin(), getPrivateCopies().end()); } private_copies_const_range private_copies() const { return private_copies_const_range(getPrivateCopies().begin(), getPrivateCopies().end()); } using inits_iterator = MutableArrayRef<Expr *>::iterator; using inits_const_iterator = ArrayRef<const Expr *>::iterator; using inits_range = llvm::iterator_range<inits_iterator>; using inits_const_range = llvm::iterator_range<inits_const_iterator>; inits_range inits() { return inits_range(getInits().begin(), getInits().end()); } inits_const_range inits() const { return inits_const_range(getInits().begin(), getInits().end()); } child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPFirstprivateClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range used_children() const { auto Children = const_cast<OMPFirstprivateClause *>(this)->used_children(); return const_child_range(Children.begin(), Children.end()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_firstprivate; } }; /// This represents clause 'lastprivate' in the '#pragma omp ...' /// directives. /// /// \code /// #pragma omp simd lastprivate(a,b) /// \endcode /// In this example directive '#pragma omp simd' has clause 'lastprivate' /// with the variables 'a' and 'b'. class OMPLastprivateClause final : public OMPVarListClause<OMPLastprivateClause>, public OMPClauseWithPostUpdate, private llvm::TrailingObjects<OMPLastprivateClause, Expr *> { // There are 4 additional tail-allocated arrays at the end of the class: // 1. Contains list of pseudo variables with the default initialization for // each non-firstprivate variables. Used in codegen for initialization of // lastprivate copies. // 2. List of helper expressions for proper generation of assignment operation // required for lastprivate clause. This list represents private variables // (for arrays, single array element). // 3. List of helper expressions for proper generation of assignment operation // required for lastprivate clause. This list represents original variables // (for arrays, single array element). // 4. List of helper expressions that represents assignment operation: // \code // DstExprs = SrcExprs; // \endcode // Required for proper codegen of final assignment performed by the // lastprivate clause. friend class OMPClauseReader; friend OMPVarListClause; friend TrailingObjects; /// Optional lastprivate kind, e.g. 'conditional', if specified by user. OpenMPLastprivateModifier LPKind; /// Optional location of the lasptrivate kind, if specified by user. SourceLocation LPKindLoc; /// Optional colon location, if specified by user. SourceLocation ColonLoc; /// Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. OMPLastprivateClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, OpenMPLastprivateModifier LPKind, SourceLocation LPKindLoc, SourceLocation ColonLoc, unsigned N) : OMPVarListClause<OMPLastprivateClause>(llvm::omp::OMPC_lastprivate, StartLoc, LParenLoc, EndLoc, N), OMPClauseWithPostUpdate(this), LPKind(LPKind), LPKindLoc(LPKindLoc), ColonLoc(ColonLoc) {} /// Build an empty clause. /// /// \param N Number of variables. explicit OMPLastprivateClause(unsigned N) : OMPVarListClause<OMPLastprivateClause>( llvm::omp::OMPC_lastprivate, SourceLocation(), SourceLocation(), SourceLocation(), N), OMPClauseWithPostUpdate(this) {} /// Get the list of helper expressions for initialization of private /// copies for lastprivate variables. MutableArrayRef<Expr *> getPrivateCopies() { return MutableArrayRef<Expr *>(varlist_end(), varlist_size()); } ArrayRef<const Expr *> getPrivateCopies() const { return llvm::makeArrayRef(varlist_end(), varlist_size()); } /// Set list of helper expressions, required for proper codegen of the /// clause. These expressions represent private variables (for arrays, single /// array element) in the final assignment statement performed by the /// lastprivate clause. void setSourceExprs(ArrayRef<Expr *> SrcExprs); /// Get the list of helper source expressions. MutableArrayRef<Expr *> getSourceExprs() { return MutableArrayRef<Expr *>(getPrivateCopies().end(), varlist_size()); } ArrayRef<const Expr *> getSourceExprs() const { return llvm::makeArrayRef(getPrivateCopies().end(), varlist_size()); } /// Set list of helper expressions, required for proper codegen of the /// clause. These expressions represent original variables (for arrays, single /// array element) in the final assignment statement performed by the /// lastprivate clause. void setDestinationExprs(ArrayRef<Expr *> DstExprs); /// Get the list of helper destination expressions. MutableArrayRef<Expr *> getDestinationExprs() { return MutableArrayRef<Expr *>(getSourceExprs().end(), varlist_size()); } ArrayRef<const Expr *> getDestinationExprs() const { return llvm::makeArrayRef(getSourceExprs().end(), varlist_size()); } /// Set list of helper assignment expressions, required for proper /// codegen of the clause. These expressions are assignment expressions that /// assign private copy of the variable to original variable. void setAssignmentOps(ArrayRef<Expr *> AssignmentOps); /// Get the list of helper assignment expressions. MutableArrayRef<Expr *> getAssignmentOps() { return MutableArrayRef<Expr *>(getDestinationExprs().end(), varlist_size()); } ArrayRef<const Expr *> getAssignmentOps() const { return llvm::makeArrayRef(getDestinationExprs().end(), varlist_size()); } /// Sets lastprivate kind. void setKind(OpenMPLastprivateModifier Kind) { LPKind = Kind; } /// Sets location of the lastprivate kind. void setKindLoc(SourceLocation Loc) { LPKindLoc = Loc; } /// Sets colon symbol location. void setColonLoc(SourceLocation Loc) { ColonLoc = Loc; } public: /// Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the variables. /// \param SrcExprs List of helper expressions for proper generation of /// assignment operation required for lastprivate clause. This list represents /// private variables (for arrays, single array element). /// \param DstExprs List of helper expressions for proper generation of /// assignment operation required for lastprivate clause. This list represents /// original variables (for arrays, single array element). /// \param AssignmentOps List of helper expressions that represents assignment /// operation: /// \code /// DstExprs = SrcExprs; /// \endcode /// Required for proper codegen of final assignment performed by the /// lastprivate clause. /// \param LPKind Lastprivate kind, e.g. 'conditional'. /// \param LPKindLoc Location of the lastprivate kind. /// \param ColonLoc Location of the ':' symbol if lastprivate kind is used. /// \param PreInit Statement that must be executed before entering the OpenMP /// region with this clause. /// \param PostUpdate Expression that must be executed after exit from the /// OpenMP region with this clause. static OMPLastprivateClause * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL, ArrayRef<Expr *> SrcExprs, ArrayRef<Expr *> DstExprs, ArrayRef<Expr *> AssignmentOps, OpenMPLastprivateModifier LPKind, SourceLocation LPKindLoc, SourceLocation ColonLoc, Stmt *PreInit, Expr *PostUpdate); /// Creates an empty clause with the place for \a N variables. /// /// \param C AST context. /// \param N The number of variables. static OMPLastprivateClause *CreateEmpty(const ASTContext &C, unsigned N); /// Lastprivate kind. OpenMPLastprivateModifier getKind() const { return LPKind; } /// Returns the location of the lastprivate kind. SourceLocation getKindLoc() const { return LPKindLoc; } /// Returns the location of the ':' symbol, if any. SourceLocation getColonLoc() const { return ColonLoc; } using helper_expr_iterator = MutableArrayRef<Expr *>::iterator; using helper_expr_const_iterator = ArrayRef<const Expr *>::iterator; using helper_expr_range = llvm::iterator_range<helper_expr_iterator>; using helper_expr_const_range = llvm::iterator_range<helper_expr_const_iterator>; /// Set list of helper expressions, required for generation of private /// copies of original lastprivate variables. void setPrivateCopies(ArrayRef<Expr *> PrivateCopies); helper_expr_const_range private_copies() const { return helper_expr_const_range(getPrivateCopies().begin(), getPrivateCopies().end()); } helper_expr_range private_copies() { return helper_expr_range(getPrivateCopies().begin(), getPrivateCopies().end()); } helper_expr_const_range source_exprs() const { return helper_expr_const_range(getSourceExprs().begin(), getSourceExprs().end()); } helper_expr_range source_exprs() { return helper_expr_range(getSourceExprs().begin(), getSourceExprs().end()); } helper_expr_const_range destination_exprs() const { return helper_expr_const_range(getDestinationExprs().begin(), getDestinationExprs().end()); } helper_expr_range destination_exprs() { return helper_expr_range(getDestinationExprs().begin(), getDestinationExprs().end()); } helper_expr_const_range assignment_ops() const { return helper_expr_const_range(getAssignmentOps().begin(), getAssignmentOps().end()); } helper_expr_range assignment_ops() { return helper_expr_range(getAssignmentOps().begin(), getAssignmentOps().end()); } child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPLastprivateClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_lastprivate; } }; /// This represents clause 'shared' in the '#pragma omp ...' directives. /// /// \code /// #pragma omp parallel shared(a,b) /// \endcode /// In this example directive '#pragma omp parallel' has clause 'shared' /// with the variables 'a' and 'b'. class OMPSharedClause final : public OMPVarListClause<OMPSharedClause>, private llvm::TrailingObjects<OMPSharedClause, Expr *> { friend OMPVarListClause; friend TrailingObjects; /// Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. OMPSharedClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N) : OMPVarListClause<OMPSharedClause>(llvm::omp::OMPC_shared, StartLoc, LParenLoc, EndLoc, N) {} /// Build an empty clause. /// /// \param N Number of variables. explicit OMPSharedClause(unsigned N) : OMPVarListClause<OMPSharedClause>(llvm::omp::OMPC_shared, SourceLocation(), SourceLocation(), SourceLocation(), N) {} public: /// Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the variables. static OMPSharedClause *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL); /// Creates an empty clause with \a N variables. /// /// \param C AST context. /// \param N The number of variables. static OMPSharedClause *CreateEmpty(const ASTContext &C, unsigned N); child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPSharedClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_shared; } }; /// This represents clause 'reduction' in the '#pragma omp ...' /// directives. /// /// \code /// #pragma omp parallel reduction(+:a,b) /// \endcode /// In this example directive '#pragma omp parallel' has clause 'reduction' /// with operator '+' and the variables 'a' and 'b'. class OMPReductionClause final : public OMPVarListClause<OMPReductionClause>, public OMPClauseWithPostUpdate, private llvm::TrailingObjects<OMPReductionClause, Expr *> { friend class OMPClauseReader; friend OMPVarListClause; friend TrailingObjects; /// Reduction modifier. OpenMPReductionClauseModifier Modifier = OMPC_REDUCTION_unknown; /// Reduction modifier location. SourceLocation ModifierLoc; /// Location of ':'. SourceLocation ColonLoc; /// Nested name specifier for C++. NestedNameSpecifierLoc QualifierLoc; /// Name of custom operator. DeclarationNameInfo NameInfo; /// Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param ModifierLoc Modifier location. /// \param ColonLoc Location of ':'. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. /// \param QualifierLoc The nested-name qualifier with location information /// \param NameInfo The full name info for reduction identifier. OMPReductionClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ModifierLoc, SourceLocation ColonLoc, SourceLocation EndLoc, OpenMPReductionClauseModifier Modifier, unsigned N, NestedNameSpecifierLoc QualifierLoc, const DeclarationNameInfo &NameInfo) : OMPVarListClause<OMPReductionClause>(llvm::omp::OMPC_reduction, StartLoc, LParenLoc, EndLoc, N), OMPClauseWithPostUpdate(this), Modifier(Modifier), ModifierLoc(ModifierLoc), ColonLoc(ColonLoc), QualifierLoc(QualifierLoc), NameInfo(NameInfo) {} /// Build an empty clause. /// /// \param N Number of variables. explicit OMPReductionClause(unsigned N) : OMPVarListClause<OMPReductionClause>(llvm::omp::OMPC_reduction, SourceLocation(), SourceLocation(), SourceLocation(), N), OMPClauseWithPostUpdate(this) {} /// Sets reduction modifier. void setModifier(OpenMPReductionClauseModifier M) { Modifier = M; } /// Sets location of the modifier. void setModifierLoc(SourceLocation Loc) { ModifierLoc = Loc; } /// Sets location of ':' symbol in clause. void setColonLoc(SourceLocation CL) { ColonLoc = CL; } /// Sets the name info for specified reduction identifier. void setNameInfo(DeclarationNameInfo DNI) { NameInfo = DNI; } /// Sets the nested name specifier. void setQualifierLoc(NestedNameSpecifierLoc NSL) { QualifierLoc = NSL; } /// Set list of helper expressions, required for proper codegen of the /// clause. These expressions represent private copy of the reduction /// variable. void setPrivates(ArrayRef<Expr *> Privates); /// Get the list of helper privates. MutableArrayRef<Expr *> getPrivates() { return MutableArrayRef<Expr *>(varlist_end(), varlist_size()); } ArrayRef<const Expr *> getPrivates() const { return llvm::makeArrayRef(varlist_end(), varlist_size()); } /// Set list of helper expressions, required for proper codegen of the /// clause. These expressions represent LHS expression in the final /// reduction expression performed by the reduction clause. void setLHSExprs(ArrayRef<Expr *> LHSExprs); /// Get the list of helper LHS expressions. MutableArrayRef<Expr *> getLHSExprs() { return MutableArrayRef<Expr *>(getPrivates().end(), varlist_size()); } ArrayRef<const Expr *> getLHSExprs() const { return llvm::makeArrayRef(getPrivates().end(), varlist_size()); } /// Set list of helper expressions, required for proper codegen of the /// clause. These expressions represent RHS expression in the final /// reduction expression performed by the reduction clause. /// Also, variables in these expressions are used for proper initialization of /// reduction copies. void setRHSExprs(ArrayRef<Expr *> RHSExprs); /// Get the list of helper destination expressions. MutableArrayRef<Expr *> getRHSExprs() { return MutableArrayRef<Expr *>(getLHSExprs().end(), varlist_size()); } ArrayRef<const Expr *> getRHSExprs() const { return llvm::makeArrayRef(getLHSExprs().end(), varlist_size()); } /// Set list of helper reduction expressions, required for proper /// codegen of the clause. These expressions are binary expressions or /// operator/custom reduction call that calculates new value from source /// helper expressions to destination helper expressions. void setReductionOps(ArrayRef<Expr *> ReductionOps); /// Get the list of helper reduction expressions. MutableArrayRef<Expr *> getReductionOps() { return MutableArrayRef<Expr *>(getRHSExprs().end(), varlist_size()); } ArrayRef<const Expr *> getReductionOps() const { return llvm::makeArrayRef(getRHSExprs().end(), varlist_size()); } /// Set list of helper copy operations for inscan reductions. /// The form is: Temps[i] = LHS[i]; void setInscanCopyOps(ArrayRef<Expr *> Ops); /// Get the list of helper inscan copy operations. MutableArrayRef<Expr *> getInscanCopyOps() { return MutableArrayRef<Expr *>(getReductionOps().end(), varlist_size()); } ArrayRef<const Expr *> getInscanCopyOps() const { return llvm::makeArrayRef(getReductionOps().end(), varlist_size()); } /// Set list of helper temp vars for inscan copy array operations. void setInscanCopyArrayTemps(ArrayRef<Expr *> CopyArrayTemps); /// Get the list of helper inscan copy temps. MutableArrayRef<Expr *> getInscanCopyArrayTemps() { return MutableArrayRef<Expr *>(getInscanCopyOps().end(), varlist_size()); } ArrayRef<const Expr *> getInscanCopyArrayTemps() const { return llvm::makeArrayRef(getInscanCopyOps().end(), varlist_size()); } /// Set list of helper temp elements vars for inscan copy array operations. void setInscanCopyArrayElems(ArrayRef<Expr *> CopyArrayElems); /// Get the list of helper inscan copy temps. MutableArrayRef<Expr *> getInscanCopyArrayElems() { return MutableArrayRef<Expr *>(getInscanCopyArrayTemps().end(), varlist_size()); } ArrayRef<const Expr *> getInscanCopyArrayElems() const { return llvm::makeArrayRef(getInscanCopyArrayTemps().end(), varlist_size()); } public: /// Creates clause with a list of variables \a VL. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param ModifierLoc Modifier location. /// \param ColonLoc Location of ':'. /// \param EndLoc Ending location of the clause. /// \param VL The variables in the clause. /// \param QualifierLoc The nested-name qualifier with location information /// \param NameInfo The full name info for reduction identifier. /// \param Privates List of helper expressions for proper generation of /// private copies. /// \param LHSExprs List of helper expressions for proper generation of /// assignment operation required for copyprivate clause. This list represents /// LHSs of the reduction expressions. /// \param RHSExprs List of helper expressions for proper generation of /// assignment operation required for copyprivate clause. This list represents /// RHSs of the reduction expressions. /// Also, variables in these expressions are used for proper initialization of /// reduction copies. /// \param ReductionOps List of helper expressions that represents reduction /// expressions: /// \code /// LHSExprs binop RHSExprs; /// operator binop(LHSExpr, RHSExpr); /// <CutomReduction>(LHSExpr, RHSExpr); /// \endcode /// Required for proper codegen of final reduction operation performed by the /// reduction clause. /// \param CopyOps List of copy operations for inscan reductions: /// \code /// TempExprs = LHSExprs; /// \endcode /// \param CopyArrayTemps Temp arrays for prefix sums. /// \param CopyArrayElems Temp arrays for prefix sums. /// \param PreInit Statement that must be executed before entering the OpenMP /// region with this clause. /// \param PostUpdate Expression that must be executed after exit from the /// OpenMP region with this clause. static OMPReductionClause * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ModifierLoc, SourceLocation ColonLoc, SourceLocation EndLoc, OpenMPReductionClauseModifier Modifier, ArrayRef<Expr *> VL, NestedNameSpecifierLoc QualifierLoc, const DeclarationNameInfo &NameInfo, ArrayRef<Expr *> Privates, ArrayRef<Expr *> LHSExprs, ArrayRef<Expr *> RHSExprs, ArrayRef<Expr *> ReductionOps, ArrayRef<Expr *> CopyOps, ArrayRef<Expr *> CopyArrayTemps, ArrayRef<Expr *> CopyArrayElems, Stmt *PreInit, Expr *PostUpdate); /// Creates an empty clause with the place for \a N variables. /// /// \param C AST context. /// \param N The number of variables. /// \param Modifier Reduction modifier. static OMPReductionClause * CreateEmpty(const ASTContext &C, unsigned N, OpenMPReductionClauseModifier Modifier); /// Returns modifier. OpenMPReductionClauseModifier getModifier() const { return Modifier; } /// Returns modifier location. SourceLocation getModifierLoc() const { return ModifierLoc; } /// Gets location of ':' symbol in clause. SourceLocation getColonLoc() const { return ColonLoc; } /// Gets the name info for specified reduction identifier. const DeclarationNameInfo &getNameInfo() const { return NameInfo; } /// Gets the nested name specifier. NestedNameSpecifierLoc getQualifierLoc() const { return QualifierLoc; } using helper_expr_iterator = MutableArrayRef<Expr *>::iterator; using helper_expr_const_iterator = ArrayRef<const Expr *>::iterator; using helper_expr_range = llvm::iterator_range<helper_expr_iterator>; using helper_expr_const_range = llvm::iterator_range<helper_expr_const_iterator>; helper_expr_const_range privates() const { return helper_expr_const_range(getPrivates().begin(), getPrivates().end()); } helper_expr_range privates() { return helper_expr_range(getPrivates().begin(), getPrivates().end()); } helper_expr_const_range lhs_exprs() const { return helper_expr_const_range(getLHSExprs().begin(), getLHSExprs().end()); } helper_expr_range lhs_exprs() { return helper_expr_range(getLHSExprs().begin(), getLHSExprs().end()); } helper_expr_const_range rhs_exprs() const { return helper_expr_const_range(getRHSExprs().begin(), getRHSExprs().end()); } helper_expr_range rhs_exprs() { return helper_expr_range(getRHSExprs().begin(), getRHSExprs().end()); } helper_expr_const_range reduction_ops() const { return helper_expr_const_range(getReductionOps().begin(), getReductionOps().end()); } helper_expr_range reduction_ops() { return helper_expr_range(getReductionOps().begin(), getReductionOps().end()); } helper_expr_const_range copy_ops() const { return helper_expr_const_range(getInscanCopyOps().begin(), getInscanCopyOps().end()); } helper_expr_range copy_ops() { return helper_expr_range(getInscanCopyOps().begin(), getInscanCopyOps().end()); } helper_expr_const_range copy_array_temps() const { return helper_expr_const_range(getInscanCopyArrayTemps().begin(), getInscanCopyArrayTemps().end()); } helper_expr_range copy_array_temps() { return helper_expr_range(getInscanCopyArrayTemps().begin(), getInscanCopyArrayTemps().end()); } helper_expr_const_range copy_array_elems() const { return helper_expr_const_range(getInscanCopyArrayElems().begin(), getInscanCopyArrayElems().end()); } helper_expr_range copy_array_elems() { return helper_expr_range(getInscanCopyArrayElems().begin(), getInscanCopyArrayElems().end()); } child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPReductionClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range used_children() const { auto Children = const_cast<OMPReductionClause *>(this)->used_children(); return const_child_range(Children.begin(), Children.end()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_reduction; } }; /// This represents clause 'task_reduction' in the '#pragma omp taskgroup' /// directives. /// /// \code /// #pragma omp taskgroup task_reduction(+:a,b) /// \endcode /// In this example directive '#pragma omp taskgroup' has clause /// 'task_reduction' with operator '+' and the variables 'a' and 'b'. class OMPTaskReductionClause final : public OMPVarListClause<OMPTaskReductionClause>, public OMPClauseWithPostUpdate, private llvm::TrailingObjects<OMPTaskReductionClause, Expr *> { friend class OMPClauseReader; friend OMPVarListClause; friend TrailingObjects; /// Location of ':'. SourceLocation ColonLoc; /// Nested name specifier for C++. NestedNameSpecifierLoc QualifierLoc; /// Name of custom operator. DeclarationNameInfo NameInfo; /// Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param ColonLoc Location of ':'. /// \param N Number of the variables in the clause. /// \param QualifierLoc The nested-name qualifier with location information /// \param NameInfo The full name info for reduction identifier. OMPTaskReductionClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, unsigned N, NestedNameSpecifierLoc QualifierLoc, const DeclarationNameInfo &NameInfo) : OMPVarListClause<OMPTaskReductionClause>( llvm::omp::OMPC_task_reduction, StartLoc, LParenLoc, EndLoc, N), OMPClauseWithPostUpdate(this), ColonLoc(ColonLoc), QualifierLoc(QualifierLoc), NameInfo(NameInfo) {} /// Build an empty clause. /// /// \param N Number of variables. explicit OMPTaskReductionClause(unsigned N) : OMPVarListClause<OMPTaskReductionClause>( llvm::omp::OMPC_task_reduction, SourceLocation(), SourceLocation(), SourceLocation(), N), OMPClauseWithPostUpdate(this) {} /// Sets location of ':' symbol in clause. void setColonLoc(SourceLocation CL) { ColonLoc = CL; } /// Sets the name info for specified reduction identifier. void setNameInfo(DeclarationNameInfo DNI) { NameInfo = DNI; } /// Sets the nested name specifier. void setQualifierLoc(NestedNameSpecifierLoc NSL) { QualifierLoc = NSL; } /// Set list of helper expressions, required for proper codegen of the clause. /// These expressions represent private copy of the reduction variable. void setPrivates(ArrayRef<Expr *> Privates); /// Get the list of helper privates. MutableArrayRef<Expr *> getPrivates() { return MutableArrayRef<Expr *>(varlist_end(), varlist_size()); } ArrayRef<const Expr *> getPrivates() const { return llvm::makeArrayRef(varlist_end(), varlist_size()); } /// Set list of helper expressions, required for proper codegen of the clause. /// These expressions represent LHS expression in the final reduction /// expression performed by the reduction clause. void setLHSExprs(ArrayRef<Expr *> LHSExprs); /// Get the list of helper LHS expressions. MutableArrayRef<Expr *> getLHSExprs() { return MutableArrayRef<Expr *>(getPrivates().end(), varlist_size()); } ArrayRef<const Expr *> getLHSExprs() const { return llvm::makeArrayRef(getPrivates().end(), varlist_size()); } /// Set list of helper expressions, required for proper codegen of the clause. /// These expressions represent RHS expression in the final reduction /// expression performed by the reduction clause. Also, variables in these /// expressions are used for proper initialization of reduction copies. void setRHSExprs(ArrayRef<Expr *> RHSExprs); /// Get the list of helper destination expressions. MutableArrayRef<Expr *> getRHSExprs() { return MutableArrayRef<Expr *>(getLHSExprs().end(), varlist_size()); } ArrayRef<const Expr *> getRHSExprs() const { return llvm::makeArrayRef(getLHSExprs().end(), varlist_size()); } /// Set list of helper reduction expressions, required for proper /// codegen of the clause. These expressions are binary expressions or /// operator/custom reduction call that calculates new value from source /// helper expressions to destination helper expressions. void setReductionOps(ArrayRef<Expr *> ReductionOps); /// Get the list of helper reduction expressions. MutableArrayRef<Expr *> getReductionOps() { return MutableArrayRef<Expr *>(getRHSExprs().end(), varlist_size()); } ArrayRef<const Expr *> getReductionOps() const { return llvm::makeArrayRef(getRHSExprs().end(), varlist_size()); } public: /// Creates clause with a list of variables \a VL. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param ColonLoc Location of ':'. /// \param EndLoc Ending location of the clause. /// \param VL The variables in the clause. /// \param QualifierLoc The nested-name qualifier with location information /// \param NameInfo The full name info for reduction identifier. /// \param Privates List of helper expressions for proper generation of /// private copies. /// \param LHSExprs List of helper expressions for proper generation of /// assignment operation required for copyprivate clause. This list represents /// LHSs of the reduction expressions. /// \param RHSExprs List of helper expressions for proper generation of /// assignment operation required for copyprivate clause. This list represents /// RHSs of the reduction expressions. /// Also, variables in these expressions are used for proper initialization of /// reduction copies. /// \param ReductionOps List of helper expressions that represents reduction /// expressions: /// \code /// LHSExprs binop RHSExprs; /// operator binop(LHSExpr, RHSExpr); /// <CutomReduction>(LHSExpr, RHSExpr); /// \endcode /// Required for proper codegen of final reduction operation performed by the /// reduction clause. /// \param PreInit Statement that must be executed before entering the OpenMP /// region with this clause. /// \param PostUpdate Expression that must be executed after exit from the /// OpenMP region with this clause. static OMPTaskReductionClause * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL, NestedNameSpecifierLoc QualifierLoc, const DeclarationNameInfo &NameInfo, ArrayRef<Expr *> Privates, ArrayRef<Expr *> LHSExprs, ArrayRef<Expr *> RHSExprs, ArrayRef<Expr *> ReductionOps, Stmt *PreInit, Expr *PostUpdate); /// Creates an empty clause with the place for \a N variables. /// /// \param C AST context. /// \param N The number of variables. static OMPTaskReductionClause *CreateEmpty(const ASTContext &C, unsigned N); /// Gets location of ':' symbol in clause. SourceLocation getColonLoc() const { return ColonLoc; } /// Gets the name info for specified reduction identifier. const DeclarationNameInfo &getNameInfo() const { return NameInfo; } /// Gets the nested name specifier. NestedNameSpecifierLoc getQualifierLoc() const { return QualifierLoc; } using helper_expr_iterator = MutableArrayRef<Expr *>::iterator; using helper_expr_const_iterator = ArrayRef<const Expr *>::iterator; using helper_expr_range = llvm::iterator_range<helper_expr_iterator>; using helper_expr_const_range = llvm::iterator_range<helper_expr_const_iterator>; helper_expr_const_range privates() const { return helper_expr_const_range(getPrivates().begin(), getPrivates().end()); } helper_expr_range privates() { return helper_expr_range(getPrivates().begin(), getPrivates().end()); } helper_expr_const_range lhs_exprs() const { return helper_expr_const_range(getLHSExprs().begin(), getLHSExprs().end()); } helper_expr_range lhs_exprs() { return helper_expr_range(getLHSExprs().begin(), getLHSExprs().end()); } helper_expr_const_range rhs_exprs() const { return helper_expr_const_range(getRHSExprs().begin(), getRHSExprs().end()); } helper_expr_range rhs_exprs() { return helper_expr_range(getRHSExprs().begin(), getRHSExprs().end()); } helper_expr_const_range reduction_ops() const { return helper_expr_const_range(getReductionOps().begin(), getReductionOps().end()); } helper_expr_range reduction_ops() { return helper_expr_range(getReductionOps().begin(), getReductionOps().end()); } child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPTaskReductionClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_task_reduction; } }; /// This represents clause 'in_reduction' in the '#pragma omp task' directives. /// /// \code /// #pragma omp task in_reduction(+:a,b) /// \endcode /// In this example directive '#pragma omp task' has clause 'in_reduction' with /// operator '+' and the variables 'a' and 'b'. class OMPInReductionClause final : public OMPVarListClause<OMPInReductionClause>, public OMPClauseWithPostUpdate, private llvm::TrailingObjects<OMPInReductionClause, Expr *> { friend class OMPClauseReader; friend OMPVarListClause; friend TrailingObjects; /// Location of ':'. SourceLocation ColonLoc; /// Nested name specifier for C++. NestedNameSpecifierLoc QualifierLoc; /// Name of custom operator. DeclarationNameInfo NameInfo; /// Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param ColonLoc Location of ':'. /// \param N Number of the variables in the clause. /// \param QualifierLoc The nested-name qualifier with location information /// \param NameInfo The full name info for reduction identifier. OMPInReductionClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, unsigned N, NestedNameSpecifierLoc QualifierLoc, const DeclarationNameInfo &NameInfo) : OMPVarListClause<OMPInReductionClause>(llvm::omp::OMPC_in_reduction, StartLoc, LParenLoc, EndLoc, N), OMPClauseWithPostUpdate(this), ColonLoc(ColonLoc), QualifierLoc(QualifierLoc), NameInfo(NameInfo) {} /// Build an empty clause. /// /// \param N Number of variables. explicit OMPInReductionClause(unsigned N) : OMPVarListClause<OMPInReductionClause>( llvm::omp::OMPC_in_reduction, SourceLocation(), SourceLocation(), SourceLocation(), N), OMPClauseWithPostUpdate(this) {} /// Sets location of ':' symbol in clause. void setColonLoc(SourceLocation CL) { ColonLoc = CL; } /// Sets the name info for specified reduction identifier. void setNameInfo(DeclarationNameInfo DNI) { NameInfo = DNI; } /// Sets the nested name specifier. void setQualifierLoc(NestedNameSpecifierLoc NSL) { QualifierLoc = NSL; } /// Set list of helper expressions, required for proper codegen of the clause. /// These expressions represent private copy of the reduction variable. void setPrivates(ArrayRef<Expr *> Privates); /// Get the list of helper privates. MutableArrayRef<Expr *> getPrivates() { return MutableArrayRef<Expr *>(varlist_end(), varlist_size()); } ArrayRef<const Expr *> getPrivates() const { return llvm::makeArrayRef(varlist_end(), varlist_size()); } /// Set list of helper expressions, required for proper codegen of the clause. /// These expressions represent LHS expression in the final reduction /// expression performed by the reduction clause. void setLHSExprs(ArrayRef<Expr *> LHSExprs); /// Get the list of helper LHS expressions. MutableArrayRef<Expr *> getLHSExprs() { return MutableArrayRef<Expr *>(getPrivates().end(), varlist_size()); } ArrayRef<const Expr *> getLHSExprs() const { return llvm::makeArrayRef(getPrivates().end(), varlist_size()); } /// Set list of helper expressions, required for proper codegen of the clause. /// These expressions represent RHS expression in the final reduction /// expression performed by the reduction clause. Also, variables in these /// expressions are used for proper initialization of reduction copies. void setRHSExprs(ArrayRef<Expr *> RHSExprs); /// Get the list of helper destination expressions. MutableArrayRef<Expr *> getRHSExprs() { return MutableArrayRef<Expr *>(getLHSExprs().end(), varlist_size()); } ArrayRef<const Expr *> getRHSExprs() const { return llvm::makeArrayRef(getLHSExprs().end(), varlist_size()); } /// Set list of helper reduction expressions, required for proper /// codegen of the clause. These expressions are binary expressions or /// operator/custom reduction call that calculates new value from source /// helper expressions to destination helper expressions. void setReductionOps(ArrayRef<Expr *> ReductionOps); /// Get the list of helper reduction expressions. MutableArrayRef<Expr *> getReductionOps() { return MutableArrayRef<Expr *>(getRHSExprs().end(), varlist_size()); } ArrayRef<const Expr *> getReductionOps() const { return llvm::makeArrayRef(getRHSExprs().end(), varlist_size()); } /// Set list of helper reduction taskgroup descriptors. void setTaskgroupDescriptors(ArrayRef<Expr *> ReductionOps); /// Get the list of helper reduction taskgroup descriptors. MutableArrayRef<Expr *> getTaskgroupDescriptors() { return MutableArrayRef<Expr *>(getReductionOps().end(), varlist_size()); } ArrayRef<const Expr *> getTaskgroupDescriptors() const { return llvm::makeArrayRef(getReductionOps().end(), varlist_size()); } public: /// Creates clause with a list of variables \a VL. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param ColonLoc Location of ':'. /// \param EndLoc Ending location of the clause. /// \param VL The variables in the clause. /// \param QualifierLoc The nested-name qualifier with location information /// \param NameInfo The full name info for reduction identifier. /// \param Privates List of helper expressions for proper generation of /// private copies. /// \param LHSExprs List of helper expressions for proper generation of /// assignment operation required for copyprivate clause. This list represents /// LHSs of the reduction expressions. /// \param RHSExprs List of helper expressions for proper generation of /// assignment operation required for copyprivate clause. This list represents /// RHSs of the reduction expressions. /// Also, variables in these expressions are used for proper initialization of /// reduction copies. /// \param ReductionOps List of helper expressions that represents reduction /// expressions: /// \code /// LHSExprs binop RHSExprs; /// operator binop(LHSExpr, RHSExpr); /// <CutomReduction>(LHSExpr, RHSExpr); /// \endcode /// Required for proper codegen of final reduction operation performed by the /// reduction clause. /// \param TaskgroupDescriptors List of helper taskgroup descriptors for /// corresponding items in parent taskgroup task_reduction clause. /// \param PreInit Statement that must be executed before entering the OpenMP /// region with this clause. /// \param PostUpdate Expression that must be executed after exit from the /// OpenMP region with this clause. static OMPInReductionClause * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL, NestedNameSpecifierLoc QualifierLoc, const DeclarationNameInfo &NameInfo, ArrayRef<Expr *> Privates, ArrayRef<Expr *> LHSExprs, ArrayRef<Expr *> RHSExprs, ArrayRef<Expr *> ReductionOps, ArrayRef<Expr *> TaskgroupDescriptors, Stmt *PreInit, Expr *PostUpdate); /// Creates an empty clause with the place for \a N variables. /// /// \param C AST context. /// \param N The number of variables. static OMPInReductionClause *CreateEmpty(const ASTContext &C, unsigned N); /// Gets location of ':' symbol in clause. SourceLocation getColonLoc() const { return ColonLoc; } /// Gets the name info for specified reduction identifier. const DeclarationNameInfo &getNameInfo() const { return NameInfo; } /// Gets the nested name specifier. NestedNameSpecifierLoc getQualifierLoc() const { return QualifierLoc; } using helper_expr_iterator = MutableArrayRef<Expr *>::iterator; using helper_expr_const_iterator = ArrayRef<const Expr *>::iterator; using helper_expr_range = llvm::iterator_range<helper_expr_iterator>; using helper_expr_const_range = llvm::iterator_range<helper_expr_const_iterator>; helper_expr_const_range privates() const { return helper_expr_const_range(getPrivates().begin(), getPrivates().end()); } helper_expr_range privates() { return helper_expr_range(getPrivates().begin(), getPrivates().end()); } helper_expr_const_range lhs_exprs() const { return helper_expr_const_range(getLHSExprs().begin(), getLHSExprs().end()); } helper_expr_range lhs_exprs() { return helper_expr_range(getLHSExprs().begin(), getLHSExprs().end()); } helper_expr_const_range rhs_exprs() const { return helper_expr_const_range(getRHSExprs().begin(), getRHSExprs().end()); } helper_expr_range rhs_exprs() { return helper_expr_range(getRHSExprs().begin(), getRHSExprs().end()); } helper_expr_const_range reduction_ops() const { return helper_expr_const_range(getReductionOps().begin(), getReductionOps().end()); } helper_expr_range reduction_ops() { return helper_expr_range(getReductionOps().begin(), getReductionOps().end()); } helper_expr_const_range taskgroup_descriptors() const { return helper_expr_const_range(getTaskgroupDescriptors().begin(), getTaskgroupDescriptors().end()); } helper_expr_range taskgroup_descriptors() { return helper_expr_range(getTaskgroupDescriptors().begin(), getTaskgroupDescriptors().end()); } child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPInReductionClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_in_reduction; } }; /// This represents clause 'linear' in the '#pragma omp ...' /// directives. /// /// \code /// #pragma omp simd linear(a,b : 2) /// \endcode /// In this example directive '#pragma omp simd' has clause 'linear' /// with variables 'a', 'b' and linear step '2'. class OMPLinearClause final : public OMPVarListClause<OMPLinearClause>, public OMPClauseWithPostUpdate, private llvm::TrailingObjects<OMPLinearClause, Expr *> { friend class OMPClauseReader; friend OMPVarListClause; friend TrailingObjects; /// Modifier of 'linear' clause. OpenMPLinearClauseKind Modifier = OMPC_LINEAR_val; /// Location of linear modifier if any. SourceLocation ModifierLoc; /// Location of ':'. SourceLocation ColonLoc; /// Sets the linear step for clause. void setStep(Expr *Step) { *(getFinals().end()) = Step; } /// Sets the expression to calculate linear step for clause. void setCalcStep(Expr *CalcStep) { *(getFinals().end() + 1) = CalcStep; } /// Build 'linear' clause with given number of variables \a NumVars. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param ColonLoc Location of ':'. /// \param EndLoc Ending location of the clause. /// \param NumVars Number of variables. OMPLinearClause(SourceLocation StartLoc, SourceLocation LParenLoc, OpenMPLinearClauseKind Modifier, SourceLocation ModifierLoc, SourceLocation ColonLoc, SourceLocation EndLoc, unsigned NumVars) : OMPVarListClause<OMPLinearClause>(llvm::omp::OMPC_linear, StartLoc, LParenLoc, EndLoc, NumVars), OMPClauseWithPostUpdate(this), Modifier(Modifier), ModifierLoc(ModifierLoc), ColonLoc(ColonLoc) {} /// Build an empty clause. /// /// \param NumVars Number of variables. explicit OMPLinearClause(unsigned NumVars) : OMPVarListClause<OMPLinearClause>(llvm::omp::OMPC_linear, SourceLocation(), SourceLocation(), SourceLocation(), NumVars), OMPClauseWithPostUpdate(this) {} /// Gets the list of initial values for linear variables. /// /// There are NumVars expressions with initial values allocated after the /// varlist, they are followed by NumVars update expressions (used to update /// the linear variable's value on current iteration) and they are followed by /// NumVars final expressions (used to calculate the linear variable's /// value after the loop body). After these lists, there are 2 helper /// expressions - linear step and a helper to calculate it before the /// loop body (used when the linear step is not constant): /// /// { Vars[] /* in OMPVarListClause */; Privates[]; Inits[]; Updates[]; /// Finals[]; Step; CalcStep; } MutableArrayRef<Expr *> getPrivates() { return MutableArrayRef<Expr *>(varlist_end(), varlist_size()); } ArrayRef<const Expr *> getPrivates() const { return llvm::makeArrayRef(varlist_end(), varlist_size()); } MutableArrayRef<Expr *> getInits() { return MutableArrayRef<Expr *>(getPrivates().end(), varlist_size()); } ArrayRef<const Expr *> getInits() const { return llvm::makeArrayRef(getPrivates().end(), varlist_size()); } /// Sets the list of update expressions for linear variables. MutableArrayRef<Expr *> getUpdates() { return MutableArrayRef<Expr *>(getInits().end(), varlist_size()); } ArrayRef<const Expr *> getUpdates() const { return llvm::makeArrayRef(getInits().end(), varlist_size()); } /// Sets the list of final update expressions for linear variables. MutableArrayRef<Expr *> getFinals() { return MutableArrayRef<Expr *>(getUpdates().end(), varlist_size()); } ArrayRef<const Expr *> getFinals() const { return llvm::makeArrayRef(getUpdates().end(), varlist_size()); } /// Gets the list of used expressions for linear variables. MutableArrayRef<Expr *> getUsedExprs() { return MutableArrayRef<Expr *>(getFinals().end() + 2, varlist_size() + 1); } ArrayRef<const Expr *> getUsedExprs() const { return llvm::makeArrayRef(getFinals().end() + 2, varlist_size() + 1); } /// Sets the list of the copies of original linear variables. /// \param PL List of expressions. void setPrivates(ArrayRef<Expr *> PL); /// Sets the list of the initial values for linear variables. /// \param IL List of expressions. void setInits(ArrayRef<Expr *> IL); public: /// Creates clause with a list of variables \a VL and a linear step /// \a Step. /// /// \param C AST Context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param Modifier Modifier of 'linear' clause. /// \param ModifierLoc Modifier location. /// \param ColonLoc Location of ':'. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the variables. /// \param PL List of private copies of original variables. /// \param IL List of initial values for the variables. /// \param Step Linear step. /// \param CalcStep Calculation of the linear step. /// \param PreInit Statement that must be executed before entering the OpenMP /// region with this clause. /// \param PostUpdate Expression that must be executed after exit from the /// OpenMP region with this clause. static OMPLinearClause * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, OpenMPLinearClauseKind Modifier, SourceLocation ModifierLoc, SourceLocation ColonLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL, ArrayRef<Expr *> PL, ArrayRef<Expr *> IL, Expr *Step, Expr *CalcStep, Stmt *PreInit, Expr *PostUpdate); /// Creates an empty clause with the place for \a NumVars variables. /// /// \param C AST context. /// \param NumVars Number of variables. static OMPLinearClause *CreateEmpty(const ASTContext &C, unsigned NumVars); /// Set modifier. void setModifier(OpenMPLinearClauseKind Kind) { Modifier = Kind; } /// Return modifier. OpenMPLinearClauseKind getModifier() const { return Modifier; } /// Set modifier location. void setModifierLoc(SourceLocation Loc) { ModifierLoc = Loc; } /// Return modifier location. SourceLocation getModifierLoc() const { return ModifierLoc; } /// Sets the location of ':'. void setColonLoc(SourceLocation Loc) { ColonLoc = Loc; } /// Returns the location of ':'. SourceLocation getColonLoc() const { return ColonLoc; } /// Returns linear step. Expr *getStep() { return *(getFinals().end()); } /// Returns linear step. const Expr *getStep() const { return *(getFinals().end()); } /// Returns expression to calculate linear step. Expr *getCalcStep() { return *(getFinals().end() + 1); } /// Returns expression to calculate linear step. const Expr *getCalcStep() const { return *(getFinals().end() + 1); } /// Sets the list of update expressions for linear variables. /// \param UL List of expressions. void setUpdates(ArrayRef<Expr *> UL); /// Sets the list of final update expressions for linear variables. /// \param FL List of expressions. void setFinals(ArrayRef<Expr *> FL); /// Sets the list of used expressions for the linear clause. void setUsedExprs(ArrayRef<Expr *> UE); using privates_iterator = MutableArrayRef<Expr *>::iterator; using privates_const_iterator = ArrayRef<const Expr *>::iterator; using privates_range = llvm::iterator_range<privates_iterator>; using privates_const_range = llvm::iterator_range<privates_const_iterator>; privates_range privates() { return privates_range(getPrivates().begin(), getPrivates().end()); } privates_const_range privates() const { return privates_const_range(getPrivates().begin(), getPrivates().end()); } using inits_iterator = MutableArrayRef<Expr *>::iterator; using inits_const_iterator = ArrayRef<const Expr *>::iterator; using inits_range = llvm::iterator_range<inits_iterator>; using inits_const_range = llvm::iterator_range<inits_const_iterator>; inits_range inits() { return inits_range(getInits().begin(), getInits().end()); } inits_const_range inits() const { return inits_const_range(getInits().begin(), getInits().end()); } using updates_iterator = MutableArrayRef<Expr *>::iterator; using updates_const_iterator = ArrayRef<const Expr *>::iterator; using updates_range = llvm::iterator_range<updates_iterator>; using updates_const_range = llvm::iterator_range<updates_const_iterator>; updates_range updates() { return updates_range(getUpdates().begin(), getUpdates().end()); } updates_const_range updates() const { return updates_const_range(getUpdates().begin(), getUpdates().end()); } using finals_iterator = MutableArrayRef<Expr *>::iterator; using finals_const_iterator = ArrayRef<const Expr *>::iterator; using finals_range = llvm::iterator_range<finals_iterator>; using finals_const_range = llvm::iterator_range<finals_const_iterator>; finals_range finals() { return finals_range(getFinals().begin(), getFinals().end()); } finals_const_range finals() const { return finals_const_range(getFinals().begin(), getFinals().end()); } using used_expressions_iterator = MutableArrayRef<Expr *>::iterator; using used_expressions_const_iterator = ArrayRef<const Expr *>::iterator; using used_expressions_range = llvm::iterator_range<used_expressions_iterator>; using used_expressions_const_range = llvm::iterator_range<used_expressions_const_iterator>; used_expressions_range used_expressions() { return finals_range(getUsedExprs().begin(), getUsedExprs().end()); } used_expressions_const_range used_expressions() const { return finals_const_range(getUsedExprs().begin(), getUsedExprs().end()); } child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPLinearClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children(); const_child_range used_children() const { auto Children = const_cast<OMPLinearClause *>(this)->used_children(); return const_child_range(Children.begin(), Children.end()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_linear; } }; /// This represents clause 'aligned' in the '#pragma omp ...' /// directives. /// /// \code /// #pragma omp simd aligned(a,b : 8) /// \endcode /// In this example directive '#pragma omp simd' has clause 'aligned' /// with variables 'a', 'b' and alignment '8'. class OMPAlignedClause final : public OMPVarListClause<OMPAlignedClause>, private llvm::TrailingObjects<OMPAlignedClause, Expr *> { friend class OMPClauseReader; friend OMPVarListClause; friend TrailingObjects; /// Location of ':'. SourceLocation ColonLoc; /// Sets the alignment for clause. void setAlignment(Expr *A) { *varlist_end() = A; } /// Build 'aligned' clause with given number of variables \a NumVars. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param ColonLoc Location of ':'. /// \param EndLoc Ending location of the clause. /// \param NumVars Number of variables. OMPAlignedClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, unsigned NumVars) : OMPVarListClause<OMPAlignedClause>(llvm::omp::OMPC_aligned, StartLoc, LParenLoc, EndLoc, NumVars), ColonLoc(ColonLoc) {} /// Build an empty clause. /// /// \param NumVars Number of variables. explicit OMPAlignedClause(unsigned NumVars) : OMPVarListClause<OMPAlignedClause>(llvm::omp::OMPC_aligned, SourceLocation(), SourceLocation(), SourceLocation(), NumVars) {} public: /// Creates clause with a list of variables \a VL and alignment \a A. /// /// \param C AST Context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param ColonLoc Location of ':'. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the variables. /// \param A Alignment. static OMPAlignedClause *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL, Expr *A); /// Creates an empty clause with the place for \a NumVars variables. /// /// \param C AST context. /// \param NumVars Number of variables. static OMPAlignedClause *CreateEmpty(const ASTContext &C, unsigned NumVars); /// Sets the location of ':'. void setColonLoc(SourceLocation Loc) { ColonLoc = Loc; } /// Returns the location of ':'. SourceLocation getColonLoc() const { return ColonLoc; } /// Returns alignment. Expr *getAlignment() { return *varlist_end(); } /// Returns alignment. const Expr *getAlignment() const { return *varlist_end(); } child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPAlignedClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_aligned; } }; /// This represents clause 'copyin' in the '#pragma omp ...' directives. /// /// \code /// #pragma omp parallel copyin(a,b) /// \endcode /// In this example directive '#pragma omp parallel' has clause 'copyin' /// with the variables 'a' and 'b'. class OMPCopyinClause final : public OMPVarListClause<OMPCopyinClause>, private llvm::TrailingObjects<OMPCopyinClause, Expr *> { // Class has 3 additional tail allocated arrays: // 1. List of helper expressions for proper generation of assignment operation // required for copyin clause. This list represents sources. // 2. List of helper expressions for proper generation of assignment operation // required for copyin clause. This list represents destinations. // 3. List of helper expressions that represents assignment operation: // \code // DstExprs = SrcExprs; // \endcode // Required for proper codegen of propagation of master's thread values of // threadprivate variables to local instances of that variables in other // implicit threads. friend class OMPClauseReader; friend OMPVarListClause; friend TrailingObjects; /// Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. OMPCopyinClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N) : OMPVarListClause<OMPCopyinClause>(llvm::omp::OMPC_copyin, StartLoc, LParenLoc, EndLoc, N) {} /// Build an empty clause. /// /// \param N Number of variables. explicit OMPCopyinClause(unsigned N) : OMPVarListClause<OMPCopyinClause>(llvm::omp::OMPC_copyin, SourceLocation(), SourceLocation(), SourceLocation(), N) {} /// Set list of helper expressions, required for proper codegen of the /// clause. These expressions represent source expression in the final /// assignment statement performed by the copyin clause. void setSourceExprs(ArrayRef<Expr *> SrcExprs); /// Get the list of helper source expressions. MutableArrayRef<Expr *> getSourceExprs() { return MutableArrayRef<Expr *>(varlist_end(), varlist_size()); } ArrayRef<const Expr *> getSourceExprs() const { return llvm::makeArrayRef(varlist_end(), varlist_size()); } /// Set list of helper expressions, required for proper codegen of the /// clause. These expressions represent destination expression in the final /// assignment statement performed by the copyin clause. void setDestinationExprs(ArrayRef<Expr *> DstExprs); /// Get the list of helper destination expressions. MutableArrayRef<Expr *> getDestinationExprs() { return MutableArrayRef<Expr *>(getSourceExprs().end(), varlist_size()); } ArrayRef<const Expr *> getDestinationExprs() const { return llvm::makeArrayRef(getSourceExprs().end(), varlist_size()); } /// Set list of helper assignment expressions, required for proper /// codegen of the clause. These expressions are assignment expressions that /// assign source helper expressions to destination helper expressions /// correspondingly. void setAssignmentOps(ArrayRef<Expr *> AssignmentOps); /// Get the list of helper assignment expressions. MutableArrayRef<Expr *> getAssignmentOps() { return MutableArrayRef<Expr *>(getDestinationExprs().end(), varlist_size()); } ArrayRef<const Expr *> getAssignmentOps() const { return llvm::makeArrayRef(getDestinationExprs().end(), varlist_size()); } public: /// Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the variables. /// \param SrcExprs List of helper expressions for proper generation of /// assignment operation required for copyin clause. This list represents /// sources. /// \param DstExprs List of helper expressions for proper generation of /// assignment operation required for copyin clause. This list represents /// destinations. /// \param AssignmentOps List of helper expressions that represents assignment /// operation: /// \code /// DstExprs = SrcExprs; /// \endcode /// Required for proper codegen of propagation of master's thread values of /// threadprivate variables to local instances of that variables in other /// implicit threads. static OMPCopyinClause * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL, ArrayRef<Expr *> SrcExprs, ArrayRef<Expr *> DstExprs, ArrayRef<Expr *> AssignmentOps); /// Creates an empty clause with \a N variables. /// /// \param C AST context. /// \param N The number of variables. static OMPCopyinClause *CreateEmpty(const ASTContext &C, unsigned N); using helper_expr_iterator = MutableArrayRef<Expr *>::iterator; using helper_expr_const_iterator = ArrayRef<const Expr *>::iterator; using helper_expr_range = llvm::iterator_range<helper_expr_iterator>; using helper_expr_const_range = llvm::iterator_range<helper_expr_const_iterator>; helper_expr_const_range source_exprs() const { return helper_expr_const_range(getSourceExprs().begin(), getSourceExprs().end()); } helper_expr_range source_exprs() { return helper_expr_range(getSourceExprs().begin(), getSourceExprs().end()); } helper_expr_const_range destination_exprs() const { return helper_expr_const_range(getDestinationExprs().begin(), getDestinationExprs().end()); } helper_expr_range destination_exprs() { return helper_expr_range(getDestinationExprs().begin(), getDestinationExprs().end()); } helper_expr_const_range assignment_ops() const { return helper_expr_const_range(getAssignmentOps().begin(), getAssignmentOps().end()); } helper_expr_range assignment_ops() { return helper_expr_range(getAssignmentOps().begin(), getAssignmentOps().end()); } child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPCopyinClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_copyin; } }; /// This represents clause 'copyprivate' in the '#pragma omp ...' /// directives. /// /// \code /// #pragma omp single copyprivate(a,b) /// \endcode /// In this example directive '#pragma omp single' has clause 'copyprivate' /// with the variables 'a' and 'b'. class OMPCopyprivateClause final : public OMPVarListClause<OMPCopyprivateClause>, private llvm::TrailingObjects<OMPCopyprivateClause, Expr *> { friend class OMPClauseReader; friend OMPVarListClause; friend TrailingObjects; /// Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. OMPCopyprivateClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N) : OMPVarListClause<OMPCopyprivateClause>(llvm::omp::OMPC_copyprivate, StartLoc, LParenLoc, EndLoc, N) { } /// Build an empty clause. /// /// \param N Number of variables. explicit OMPCopyprivateClause(unsigned N) : OMPVarListClause<OMPCopyprivateClause>( llvm::omp::OMPC_copyprivate, SourceLocation(), SourceLocation(), SourceLocation(), N) {} /// Set list of helper expressions, required for proper codegen of the /// clause. These expressions represent source expression in the final /// assignment statement performed by the copyprivate clause. void setSourceExprs(ArrayRef<Expr *> SrcExprs); /// Get the list of helper source expressions. MutableArrayRef<Expr *> getSourceExprs() { return MutableArrayRef<Expr *>(varlist_end(), varlist_size()); } ArrayRef<const Expr *> getSourceExprs() const { return llvm::makeArrayRef(varlist_end(), varlist_size()); } /// Set list of helper expressions, required for proper codegen of the /// clause. These expressions represent destination expression in the final /// assignment statement performed by the copyprivate clause. void setDestinationExprs(ArrayRef<Expr *> DstExprs); /// Get the list of helper destination expressions. MutableArrayRef<Expr *> getDestinationExprs() { return MutableArrayRef<Expr *>(getSourceExprs().end(), varlist_size()); } ArrayRef<const Expr *> getDestinationExprs() const { return llvm::makeArrayRef(getSourceExprs().end(), varlist_size()); } /// Set list of helper assignment expressions, required for proper /// codegen of the clause. These expressions are assignment expressions that /// assign source helper expressions to destination helper expressions /// correspondingly. void setAssignmentOps(ArrayRef<Expr *> AssignmentOps); /// Get the list of helper assignment expressions. MutableArrayRef<Expr *> getAssignmentOps() { return MutableArrayRef<Expr *>(getDestinationExprs().end(), varlist_size()); } ArrayRef<const Expr *> getAssignmentOps() const { return llvm::makeArrayRef(getDestinationExprs().end(), varlist_size()); } public: /// Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the variables. /// \param SrcExprs List of helper expressions for proper generation of /// assignment operation required for copyprivate clause. This list represents /// sources. /// \param DstExprs List of helper expressions for proper generation of /// assignment operation required for copyprivate clause. This list represents /// destinations. /// \param AssignmentOps List of helper expressions that represents assignment /// operation: /// \code /// DstExprs = SrcExprs; /// \endcode /// Required for proper codegen of final assignment performed by the /// copyprivate clause. static OMPCopyprivateClause * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL, ArrayRef<Expr *> SrcExprs, ArrayRef<Expr *> DstExprs, ArrayRef<Expr *> AssignmentOps); /// Creates an empty clause with \a N variables. /// /// \param C AST context. /// \param N The number of variables. static OMPCopyprivateClause *CreateEmpty(const ASTContext &C, unsigned N); using helper_expr_iterator = MutableArrayRef<Expr *>::iterator; using helper_expr_const_iterator = ArrayRef<const Expr *>::iterator; using helper_expr_range = llvm::iterator_range<helper_expr_iterator>; using helper_expr_const_range = llvm::iterator_range<helper_expr_const_iterator>; helper_expr_const_range source_exprs() const { return helper_expr_const_range(getSourceExprs().begin(), getSourceExprs().end()); } helper_expr_range source_exprs() { return helper_expr_range(getSourceExprs().begin(), getSourceExprs().end()); } helper_expr_const_range destination_exprs() const { return helper_expr_const_range(getDestinationExprs().begin(), getDestinationExprs().end()); } helper_expr_range destination_exprs() { return helper_expr_range(getDestinationExprs().begin(), getDestinationExprs().end()); } helper_expr_const_range assignment_ops() const { return helper_expr_const_range(getAssignmentOps().begin(), getAssignmentOps().end()); } helper_expr_range assignment_ops() { return helper_expr_range(getAssignmentOps().begin(), getAssignmentOps().end()); } child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPCopyprivateClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_copyprivate; } }; /// This represents implicit clause 'flush' for the '#pragma omp flush' /// directive. /// This clause does not exist by itself, it can be only as a part of 'omp /// flush' directive. This clause is introduced to keep the original structure /// of \a OMPExecutableDirective class and its derivatives and to use the /// existing infrastructure of clauses with the list of variables. /// /// \code /// #pragma omp flush(a,b) /// \endcode /// In this example directive '#pragma omp flush' has implicit clause 'flush' /// with the variables 'a' and 'b'. class OMPFlushClause final : public OMPVarListClause<OMPFlushClause>, private llvm::TrailingObjects<OMPFlushClause, Expr *> { friend OMPVarListClause; friend TrailingObjects; /// Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. OMPFlushClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N) : OMPVarListClause<OMPFlushClause>(llvm::omp::OMPC_flush, StartLoc, LParenLoc, EndLoc, N) {} /// Build an empty clause. /// /// \param N Number of variables. explicit OMPFlushClause(unsigned N) : OMPVarListClause<OMPFlushClause>(llvm::omp::OMPC_flush, SourceLocation(), SourceLocation(), SourceLocation(), N) {} public: /// Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the variables. static OMPFlushClause *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL); /// Creates an empty clause with \a N variables. /// /// \param C AST context. /// \param N The number of variables. static OMPFlushClause *CreateEmpty(const ASTContext &C, unsigned N); child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPFlushClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_flush; } }; /// This represents implicit clause 'depobj' for the '#pragma omp depobj' /// directive. /// This clause does not exist by itself, it can be only as a part of 'omp /// depobj' directive. This clause is introduced to keep the original structure /// of \a OMPExecutableDirective class and its derivatives and to use the /// existing infrastructure of clauses with the list of variables. /// /// \code /// #pragma omp depobj(a) destroy /// \endcode /// In this example directive '#pragma omp depobj' has implicit clause 'depobj' /// with the depobj 'a'. class OMPDepobjClause final : public OMPClause { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Chunk size. Expr *Depobj = nullptr; /// Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. OMPDepobjClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_depobj, StartLoc, EndLoc), LParenLoc(LParenLoc) {} /// Build an empty clause. /// explicit OMPDepobjClause() : OMPClause(llvm::omp::OMPC_depobj, SourceLocation(), SourceLocation()) {} void setDepobj(Expr *E) { Depobj = E; } /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } public: /// Creates clause. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param Depobj depobj expression associated with the 'depobj' directive. static OMPDepobjClause *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, Expr *Depobj); /// Creates an empty clause. /// /// \param C AST context. static OMPDepobjClause *CreateEmpty(const ASTContext &C); /// Returns depobj expression associated with the clause. Expr *getDepobj() { return Depobj; } const Expr *getDepobj() const { return Depobj; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } child_range children() { return child_range(reinterpret_cast<Stmt **>(&Depobj), reinterpret_cast<Stmt **>(&Depobj) + 1); } const_child_range children() const { auto Children = const_cast<OMPDepobjClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_depobj; } }; /// This represents implicit clause 'depend' for the '#pragma omp task' /// directive. /// /// \code /// #pragma omp task depend(in:a,b) /// \endcode /// In this example directive '#pragma omp task' with clause 'depend' with the /// variables 'a' and 'b' with dependency 'in'. class OMPDependClause final : public OMPVarListClause<OMPDependClause>, private llvm::TrailingObjects<OMPDependClause, Expr *> { friend class OMPClauseReader; friend OMPVarListClause; friend TrailingObjects; /// Dependency type (one of in, out, inout). OpenMPDependClauseKind DepKind = OMPC_DEPEND_unknown; /// Dependency type location. SourceLocation DepLoc; /// Colon location. SourceLocation ColonLoc; /// Number of loops, associated with the depend clause. unsigned NumLoops = 0; /// Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. /// \param NumLoops Number of loops that is associated with this depend /// clause. OMPDependClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N, unsigned NumLoops) : OMPVarListClause<OMPDependClause>(llvm::omp::OMPC_depend, StartLoc, LParenLoc, EndLoc, N), NumLoops(NumLoops) {} /// Build an empty clause. /// /// \param N Number of variables. /// \param NumLoops Number of loops that is associated with this depend /// clause. explicit OMPDependClause(unsigned N, unsigned NumLoops) : OMPVarListClause<OMPDependClause>(llvm::omp::OMPC_depend, SourceLocation(), SourceLocation(), SourceLocation(), N), NumLoops(NumLoops) {} /// Set dependency kind. void setDependencyKind(OpenMPDependClauseKind K) { DepKind = K; } /// Set dependency kind and its location. void setDependencyLoc(SourceLocation Loc) { DepLoc = Loc; } /// Set colon location. void setColonLoc(SourceLocation Loc) { ColonLoc = Loc; } /// Sets optional dependency modifier. void setModifier(Expr *DepModifier); public: /// Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param DepKind Dependency type. /// \param DepLoc Location of the dependency type. /// \param ColonLoc Colon location. /// \param VL List of references to the variables. /// \param NumLoops Number of loops that is associated with this depend /// clause. static OMPDependClause *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, Expr *DepModifier, OpenMPDependClauseKind DepKind, SourceLocation DepLoc, SourceLocation ColonLoc, ArrayRef<Expr *> VL, unsigned NumLoops); /// Creates an empty clause with \a N variables. /// /// \param C AST context. /// \param N The number of variables. /// \param NumLoops Number of loops that is associated with this depend /// clause. static OMPDependClause *CreateEmpty(const ASTContext &C, unsigned N, unsigned NumLoops); /// Get dependency type. OpenMPDependClauseKind getDependencyKind() const { return DepKind; } /// Return optional depend modifier. Expr *getModifier(); const Expr *getModifier() const { return const_cast<OMPDependClause *>(this)->getModifier(); } /// Get dependency type location. SourceLocation getDependencyLoc() const { return DepLoc; } /// Get colon location. SourceLocation getColonLoc() const { return ColonLoc; } /// Get number of loops associated with the clause. unsigned getNumLoops() const { return NumLoops; } /// Set the loop data for the depend clauses with 'sink|source' kind of /// dependency. void setLoopData(unsigned NumLoop, Expr *Cnt); /// Get the loop data. Expr *getLoopData(unsigned NumLoop); const Expr *getLoopData(unsigned NumLoop) const; child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPDependClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_depend; } }; /// This represents 'device' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp target device(a) /// \endcode /// In this example directive '#pragma omp target' has clause 'device' /// with single expression 'a'. class OMPDeviceClause : public OMPClause, public OMPClauseWithPreInit { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Device clause modifier. OpenMPDeviceClauseModifier Modifier = OMPC_DEVICE_unknown; /// Location of the modifier. SourceLocation ModifierLoc; /// Device number. Stmt *Device = nullptr; /// Set the device number. /// /// \param E Device number. void setDevice(Expr *E) { Device = E; } /// Sets modifier. void setModifier(OpenMPDeviceClauseModifier M) { Modifier = M; } /// Setst modifier location. void setModifierLoc(SourceLocation Loc) { ModifierLoc = Loc; } public: /// Build 'device' clause. /// /// \param Modifier Clause modifier. /// \param E Expression associated with this clause. /// \param CaptureRegion Innermost OpenMP region where expressions in this /// clause must be captured. /// \param StartLoc Starting location of the clause. /// \param ModifierLoc Modifier location. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. OMPDeviceClause(OpenMPDeviceClauseModifier Modifier, Expr *E, Stmt *HelperE, OpenMPDirectiveKind CaptureRegion, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ModifierLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_device, StartLoc, EndLoc), OMPClauseWithPreInit(this), LParenLoc(LParenLoc), Modifier(Modifier), ModifierLoc(ModifierLoc), Device(E) { setPreInitStmt(HelperE, CaptureRegion); } /// Build an empty clause. OMPDeviceClause() : OMPClause(llvm::omp::OMPC_device, SourceLocation(), SourceLocation()), OMPClauseWithPreInit(this) {} /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Return device number. Expr *getDevice() { return cast<Expr>(Device); } /// Return device number. Expr *getDevice() const { return cast<Expr>(Device); } /// Gets modifier. OpenMPDeviceClauseModifier getModifier() const { return Modifier; } /// Gets modifier location. SourceLocation getModifierLoc() const { return ModifierLoc; } child_range children() { return child_range(&Device, &Device + 1); } const_child_range children() const { return const_child_range(&Device, &Device + 1); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_device; } }; /// This represents 'threads' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp ordered threads /// \endcode /// In this example directive '#pragma omp ordered' has simple 'threads' clause. class OMPThreadsClause : public OMPClause { public: /// Build 'threads' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPThreadsClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_threads, StartLoc, EndLoc) {} /// Build an empty clause. OMPThreadsClause() : OMPClause(llvm::omp::OMPC_threads, SourceLocation(), SourceLocation()) { } child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_threads; } }; /// This represents 'simd' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp ordered simd /// \endcode /// In this example directive '#pragma omp ordered' has simple 'simd' clause. class OMPSIMDClause : public OMPClause { public: /// Build 'simd' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPSIMDClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_simd, StartLoc, EndLoc) {} /// Build an empty clause. OMPSIMDClause() : OMPClause(llvm::omp::OMPC_simd, SourceLocation(), SourceLocation()) {} child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_simd; } }; /// Struct that defines common infrastructure to handle mappable /// expressions used in OpenMP clauses. class OMPClauseMappableExprCommon { public: /// Class that represents a component of a mappable expression. E.g. /// for an expression S.a, the first component is a declaration reference /// expression associated with 'S' and the second is a member expression /// associated with the field declaration 'a'. If the expression is an array /// subscript it may not have any associated declaration. In that case the /// associated declaration is set to nullptr. class MappableComponent { /// Expression associated with the component. Expr *AssociatedExpression = nullptr; /// Declaration associated with the declaration. If the component does /// not have a declaration (e.g. array subscripts or section), this is set /// to nullptr. ValueDecl *AssociatedDeclaration = nullptr; public: explicit MappableComponent() = default; explicit MappableComponent(Expr *AssociatedExpression, ValueDecl *AssociatedDeclaration) : AssociatedExpression(AssociatedExpression), AssociatedDeclaration( AssociatedDeclaration ? cast<ValueDecl>(AssociatedDeclaration->getCanonicalDecl()) : nullptr) {} Expr *getAssociatedExpression() const { return AssociatedExpression; } ValueDecl *getAssociatedDeclaration() const { return AssociatedDeclaration; } }; // List of components of an expression. This first one is the whole // expression and the last one is the base expression. using MappableExprComponentList = SmallVector<MappableComponent, 8>; using MappableExprComponentListRef = ArrayRef<MappableComponent>; // List of all component lists associated to the same base declaration. // E.g. if both 'S.a' and 'S.b' are a mappable expressions, each will have // their component list but the same base declaration 'S'. using MappableExprComponentLists = SmallVector<MappableExprComponentList, 8>; using MappableExprComponentListsRef = ArrayRef<MappableExprComponentList>; protected: // Return the total number of elements in a list of component lists. static unsigned getComponentsTotalNumber(MappableExprComponentListsRef ComponentLists); // Return the total number of elements in a list of declarations. All // declarations are expected to be canonical. static unsigned getUniqueDeclarationsTotalNumber(ArrayRef<const ValueDecl *> Declarations); }; /// This structure contains all sizes needed for by an /// OMPMappableExprListClause. struct OMPMappableExprListSizeTy { /// Number of expressions listed. unsigned NumVars; /// Number of unique base declarations. unsigned NumUniqueDeclarations; /// Number of component lists. unsigned NumComponentLists; /// Total number of expression components. unsigned NumComponents; OMPMappableExprListSizeTy() = default; OMPMappableExprListSizeTy(unsigned NumVars, unsigned NumUniqueDeclarations, unsigned NumComponentLists, unsigned NumComponents) : NumVars(NumVars), NumUniqueDeclarations(NumUniqueDeclarations), NumComponentLists(NumComponentLists), NumComponents(NumComponents) {} }; /// This represents clauses with a list of expressions that are mappable. /// Examples of these clauses are 'map' in /// '#pragma omp target [enter|exit] [data]...' directives, and 'to' and 'from /// in '#pragma omp target update...' directives. template <class T> class OMPMappableExprListClause : public OMPVarListClause<T>, public OMPClauseMappableExprCommon { friend class OMPClauseReader; /// Number of unique declarations in this clause. unsigned NumUniqueDeclarations; /// Number of component lists in this clause. unsigned NumComponentLists; /// Total number of components in this clause. unsigned NumComponents; /// Whether this clause is possible to have user-defined mappers associated. /// It should be true for map, to, and from clauses, and false for /// use_device_ptr and is_device_ptr. const bool SupportsMapper; /// C++ nested name specifier for the associated user-defined mapper. NestedNameSpecifierLoc MapperQualifierLoc; /// The associated user-defined mapper identifier information. DeclarationNameInfo MapperIdInfo; protected: /// Build a clause for \a NumUniqueDeclarations declarations, \a /// NumComponentLists total component lists, and \a NumComponents total /// components. /// /// \param K Kind of the clause. /// \param Locs Locations needed to build a mappable clause. It includes 1) /// StartLoc: starting location of the clause (the clause keyword); 2) /// LParenLoc: location of '('; 3) EndLoc: ending location of the clause. /// \param Sizes All required sizes to build a mappable clause. It includes 1) /// NumVars: number of expressions listed in this clause; 2) /// NumUniqueDeclarations: number of unique base declarations in this clause; /// 3) NumComponentLists: number of component lists in this clause; and 4) /// NumComponents: total number of expression components in the clause. /// \param SupportsMapper Indicates whether this clause is possible to have /// user-defined mappers associated. /// \param MapperQualifierLocPtr C++ nested name specifier for the associated /// user-defined mapper. /// \param MapperIdInfoPtr The identifier of associated user-defined mapper. OMPMappableExprListClause( OpenMPClauseKind K, const OMPVarListLocTy &Locs, const OMPMappableExprListSizeTy &Sizes, bool SupportsMapper = false, NestedNameSpecifierLoc *MapperQualifierLocPtr = nullptr, DeclarationNameInfo *MapperIdInfoPtr = nullptr) : OMPVarListClause<T>(K, Locs.StartLoc, Locs.LParenLoc, Locs.EndLoc, Sizes.NumVars), NumUniqueDeclarations(Sizes.NumUniqueDeclarations), NumComponentLists(Sizes.NumComponentLists), NumComponents(Sizes.NumComponents), SupportsMapper(SupportsMapper) { if (MapperQualifierLocPtr) MapperQualifierLoc = *MapperQualifierLocPtr; if (MapperIdInfoPtr) MapperIdInfo = *MapperIdInfoPtr; } /// Get the unique declarations that are in the trailing objects of the /// class. MutableArrayRef<ValueDecl *> getUniqueDeclsRef() { return MutableArrayRef<ValueDecl *>( static_cast<T *>(this)->template getTrailingObjects<ValueDecl *>(), NumUniqueDeclarations); } /// Get the unique declarations that are in the trailing objects of the /// class. ArrayRef<ValueDecl *> getUniqueDeclsRef() const { return ArrayRef<ValueDecl *>( static_cast<const T *>(this) ->template getTrailingObjects<ValueDecl *>(), NumUniqueDeclarations); } /// Set the unique declarations that are in the trailing objects of the /// class. void setUniqueDecls(ArrayRef<ValueDecl *> UDs) { assert(UDs.size() == NumUniqueDeclarations && "Unexpected amount of unique declarations."); std::copy(UDs.begin(), UDs.end(), getUniqueDeclsRef().begin()); } /// Get the number of lists per declaration that are in the trailing /// objects of the class. MutableArrayRef<unsigned> getDeclNumListsRef() { return MutableArrayRef<unsigned>( static_cast<T *>(this)->template getTrailingObjects<unsigned>(), NumUniqueDeclarations); } /// Get the number of lists per declaration that are in the trailing /// objects of the class. ArrayRef<unsigned> getDeclNumListsRef() const { return ArrayRef<unsigned>( static_cast<const T *>(this)->template getTrailingObjects<unsigned>(), NumUniqueDeclarations); } /// Set the number of lists per declaration that are in the trailing /// objects of the class. void setDeclNumLists(ArrayRef<unsigned> DNLs) { assert(DNLs.size() == NumUniqueDeclarations && "Unexpected amount of list numbers."); std::copy(DNLs.begin(), DNLs.end(), getDeclNumListsRef().begin()); } /// Get the cumulative component lists sizes that are in the trailing /// objects of the class. They are appended after the number of lists. MutableArrayRef<unsigned> getComponentListSizesRef() { return MutableArrayRef<unsigned>( static_cast<T *>(this)->template getTrailingObjects<unsigned>() + NumUniqueDeclarations, NumComponentLists); } /// Get the cumulative component lists sizes that are in the trailing /// objects of the class. They are appended after the number of lists. ArrayRef<unsigned> getComponentListSizesRef() const { return ArrayRef<unsigned>( static_cast<const T *>(this)->template getTrailingObjects<unsigned>() + NumUniqueDeclarations, NumComponentLists); } /// Set the cumulative component lists sizes that are in the trailing /// objects of the class. void setComponentListSizes(ArrayRef<unsigned> CLSs) { assert(CLSs.size() == NumComponentLists && "Unexpected amount of component lists."); std::copy(CLSs.begin(), CLSs.end(), getComponentListSizesRef().begin()); } /// Get the components that are in the trailing objects of the class. MutableArrayRef<MappableComponent> getComponentsRef() { return MutableArrayRef<MappableComponent>( static_cast<T *>(this) ->template getTrailingObjects<MappableComponent>(), NumComponents); } /// Get the components that are in the trailing objects of the class. ArrayRef<MappableComponent> getComponentsRef() const { return ArrayRef<MappableComponent>( static_cast<const T *>(this) ->template getTrailingObjects<MappableComponent>(), NumComponents); } /// Set the components that are in the trailing objects of the class. /// This requires the list sizes so that it can also fill the original /// expressions, which are the first component of each list. void setComponents(ArrayRef<MappableComponent> Components, ArrayRef<unsigned> CLSs) { assert(Components.size() == NumComponents && "Unexpected amount of component lists."); assert(CLSs.size() == NumComponentLists && "Unexpected amount of list sizes."); std::copy(Components.begin(), Components.end(), getComponentsRef().begin()); } /// Fill the clause information from the list of declarations and /// associated component lists. void setClauseInfo(ArrayRef<ValueDecl *> Declarations, MappableExprComponentListsRef ComponentLists) { // Perform some checks to make sure the data sizes are consistent with the // information available when the clause was created. assert(getUniqueDeclarationsTotalNumber(Declarations) == NumUniqueDeclarations && "Unexpected number of mappable expression info entries!"); assert(getComponentsTotalNumber(ComponentLists) == NumComponents && "Unexpected total number of components!"); assert(Declarations.size() == ComponentLists.size() && "Declaration and component lists size is not consistent!"); assert(Declarations.size() == NumComponentLists && "Unexpected declaration and component lists size!"); // Organize the components by declaration and retrieve the original // expression. Original expressions are always the first component of the // mappable component list. llvm::MapVector<ValueDecl *, SmallVector<MappableExprComponentListRef, 8>> ComponentListMap; { auto CI = ComponentLists.begin(); for (auto DI = Declarations.begin(), DE = Declarations.end(); DI != DE; ++DI, ++CI) { assert(!CI->empty() && "Invalid component list!"); ComponentListMap[*DI].push_back(*CI); } } // Iterators of the target storage. auto UniqueDeclarations = getUniqueDeclsRef(); auto UDI = UniqueDeclarations.begin(); auto DeclNumLists = getDeclNumListsRef(); auto DNLI = DeclNumLists.begin(); auto ComponentListSizes = getComponentListSizesRef(); auto CLSI = ComponentListSizes.begin(); auto Components = getComponentsRef(); auto CI = Components.begin(); // Variable to compute the accumulation of the number of components. unsigned PrevSize = 0u; // Scan all the declarations and associated component lists. for (auto &M : ComponentListMap) { // The declaration. auto *D = M.first; // The component lists. auto CL = M.second; // Initialize the entry. *UDI = D; ++UDI; *DNLI = CL.size(); ++DNLI; // Obtain the cumulative sizes and concatenate all the components in the // reserved storage. for (auto C : CL) { // Accumulate with the previous size. PrevSize += C.size(); // Save the size. *CLSI = PrevSize; ++CLSI; // Append components after the current components iterator. CI = std::copy(C.begin(), C.end(), CI); } } } /// Set the nested name specifier of associated user-defined mapper. void setMapperQualifierLoc(NestedNameSpecifierLoc NNSL) { MapperQualifierLoc = NNSL; } /// Set the name of associated user-defined mapper. void setMapperIdInfo(DeclarationNameInfo MapperId) { MapperIdInfo = MapperId; } /// Get the user-defined mapper references that are in the trailing objects of /// the class. MutableArrayRef<Expr *> getUDMapperRefs() { assert(SupportsMapper && "Must be a clause that is possible to have user-defined mappers"); return llvm::makeMutableArrayRef<Expr *>( static_cast<T *>(this)->template getTrailingObjects<Expr *>() + OMPVarListClause<T>::varlist_size(), OMPVarListClause<T>::varlist_size()); } /// Get the user-defined mappers references that are in the trailing objects /// of the class. ArrayRef<Expr *> getUDMapperRefs() const { assert(SupportsMapper && "Must be a clause that is possible to have user-defined mappers"); return llvm::makeArrayRef<Expr *>( static_cast<const T *>(this)->template getTrailingObjects<Expr *>() + OMPVarListClause<T>::varlist_size(), OMPVarListClause<T>::varlist_size()); } /// Set the user-defined mappers that are in the trailing objects of the /// class. void setUDMapperRefs(ArrayRef<Expr *> DMDs) { assert(DMDs.size() == OMPVarListClause<T>::varlist_size() && "Unexpected number of user-defined mappers."); assert(SupportsMapper && "Must be a clause that is possible to have user-defined mappers"); std::copy(DMDs.begin(), DMDs.end(), getUDMapperRefs().begin()); } public: /// Return the number of unique base declarations in this clause. unsigned getUniqueDeclarationsNum() const { return NumUniqueDeclarations; } /// Return the number of lists derived from the clause expressions. unsigned getTotalComponentListNum() const { return NumComponentLists; } /// Return the total number of components in all lists derived from the /// clause. unsigned getTotalComponentsNum() const { return NumComponents; } /// Gets the nested name specifier for associated user-defined mapper. NestedNameSpecifierLoc getMapperQualifierLoc() const { return MapperQualifierLoc; } /// Gets the name info for associated user-defined mapper. const DeclarationNameInfo &getMapperIdInfo() const { return MapperIdInfo; } /// Iterator that browse the components by lists. It also allows /// browsing components of a single declaration. class const_component_lists_iterator : public llvm::iterator_adaptor_base< const_component_lists_iterator, MappableExprComponentListRef::const_iterator, std::forward_iterator_tag, MappableComponent, ptrdiff_t, MappableComponent, MappableComponent> { // The declaration the iterator currently refers to. ArrayRef<ValueDecl *>::iterator DeclCur; // The list number associated with the current declaration. ArrayRef<unsigned>::iterator NumListsCur; // Whether this clause is possible to have user-defined mappers associated. const bool SupportsMapper; // The user-defined mapper associated with the current declaration. ArrayRef<Expr *>::iterator MapperCur; // Remaining lists for the current declaration. unsigned RemainingLists = 0; // The cumulative size of the previous list, or zero if there is no previous // list. unsigned PrevListSize = 0; // The cumulative sizes of the current list - it will delimit the remaining // range of interest. ArrayRef<unsigned>::const_iterator ListSizeCur; ArrayRef<unsigned>::const_iterator ListSizeEnd; // Iterator to the end of the components storage. MappableExprComponentListRef::const_iterator End; public: /// Construct an iterator that scans all lists. explicit const_component_lists_iterator( ArrayRef<ValueDecl *> UniqueDecls, ArrayRef<unsigned> DeclsListNum, ArrayRef<unsigned> CumulativeListSizes, MappableExprComponentListRef Components, bool SupportsMapper, ArrayRef<Expr *> Mappers) : const_component_lists_iterator::iterator_adaptor_base( Components.begin()), DeclCur(UniqueDecls.begin()), NumListsCur(DeclsListNum.begin()), SupportsMapper(SupportsMapper), ListSizeCur(CumulativeListSizes.begin()), ListSizeEnd(CumulativeListSizes.end()), End(Components.end()) { assert(UniqueDecls.size() == DeclsListNum.size() && "Inconsistent number of declarations and list sizes!"); if (!DeclsListNum.empty()) RemainingLists = *NumListsCur; if (SupportsMapper) MapperCur = Mappers.begin(); } /// Construct an iterator that scan lists for a given declaration \a /// Declaration. explicit const_component_lists_iterator( const ValueDecl *Declaration, ArrayRef<ValueDecl *> UniqueDecls, ArrayRef<unsigned> DeclsListNum, ArrayRef<unsigned> CumulativeListSizes, MappableExprComponentListRef Components, bool SupportsMapper, ArrayRef<Expr *> Mappers) : const_component_lists_iterator(UniqueDecls, DeclsListNum, CumulativeListSizes, Components, SupportsMapper, Mappers) { // Look for the desired declaration. While we are looking for it, we // update the state so that we know the component where a given list // starts. for (; DeclCur != UniqueDecls.end(); ++DeclCur, ++NumListsCur) { if (*DeclCur == Declaration) break; assert(*NumListsCur > 0 && "No lists associated with declaration??"); // Skip the lists associated with the current declaration, but save the // last list size that was skipped. std::advance(ListSizeCur, *NumListsCur - 1); PrevListSize = *ListSizeCur; ++ListSizeCur; if (SupportsMapper) ++MapperCur; } // If we didn't find any declaration, advance the iterator to after the // last component and set remaining lists to zero. if (ListSizeCur == CumulativeListSizes.end()) { this->I = End; RemainingLists = 0u; return; } // Set the remaining lists with the total number of lists of the current // declaration. RemainingLists = *NumListsCur; // Adjust the list size end iterator to the end of the relevant range. ListSizeEnd = ListSizeCur; std::advance(ListSizeEnd, RemainingLists); // Given that the list sizes are cumulative, the index of the component // that start the list is the size of the previous list. std::advance(this->I, PrevListSize); } // Return the array with the current list. The sizes are cumulative, so the // array size is the difference between the current size and previous one. std::tuple<const ValueDecl *, MappableExprComponentListRef, const ValueDecl *> operator*() const { assert(ListSizeCur != ListSizeEnd && "Invalid iterator!"); const ValueDecl *Mapper = nullptr; if (SupportsMapper && *MapperCur) Mapper = cast<ValueDecl>(cast<DeclRefExpr>(*MapperCur)->getDecl()); return std::make_tuple( *DeclCur, MappableExprComponentListRef(&*this->I, *ListSizeCur - PrevListSize), Mapper); } std::tuple<const ValueDecl *, MappableExprComponentListRef, const ValueDecl *> operator->() const { return **this; } // Skip the components of the current list. const_component_lists_iterator &operator++() { assert(ListSizeCur != ListSizeEnd && RemainingLists && "Invalid iterator!"); // If we don't have more lists just skip all the components. Otherwise, // advance the iterator by the number of components in the current list. if (std::next(ListSizeCur) == ListSizeEnd) { this->I = End; RemainingLists = 0; } else { std::advance(this->I, *ListSizeCur - PrevListSize); PrevListSize = *ListSizeCur; // We are done with a declaration, move to the next one. if (!(--RemainingLists)) { ++DeclCur; ++NumListsCur; if (SupportsMapper) ++MapperCur; RemainingLists = *NumListsCur; assert(RemainingLists && "No lists in the following declaration??"); } } ++ListSizeCur; return *this; } }; using const_component_lists_range = llvm::iterator_range<const_component_lists_iterator>; /// Iterators for all component lists. const_component_lists_iterator component_lists_begin() const { return const_component_lists_iterator( getUniqueDeclsRef(), getDeclNumListsRef(), getComponentListSizesRef(), getComponentsRef(), SupportsMapper, SupportsMapper ? getUDMapperRefs() : llvm::None); } const_component_lists_iterator component_lists_end() const { return const_component_lists_iterator( ArrayRef<ValueDecl *>(), ArrayRef<unsigned>(), ArrayRef<unsigned>(), MappableExprComponentListRef(getComponentsRef().end(), getComponentsRef().end()), SupportsMapper, llvm::None); } const_component_lists_range component_lists() const { return {component_lists_begin(), component_lists_end()}; } /// Iterators for component lists associated with the provided /// declaration. const_component_lists_iterator decl_component_lists_begin(const ValueDecl *VD) const { return const_component_lists_iterator( VD, getUniqueDeclsRef(), getDeclNumListsRef(), getComponentListSizesRef(), getComponentsRef(), SupportsMapper, SupportsMapper ? getUDMapperRefs() : llvm::None); } const_component_lists_iterator decl_component_lists_end() const { return component_lists_end(); } const_component_lists_range decl_component_lists(const ValueDecl *VD) const { return {decl_component_lists_begin(VD), decl_component_lists_end()}; } /// Iterators to access all the declarations, number of lists, list sizes, and /// components. using const_all_decls_iterator = ArrayRef<ValueDecl *>::iterator; using const_all_decls_range = llvm::iterator_range<const_all_decls_iterator>; const_all_decls_range all_decls() const { auto A = getUniqueDeclsRef(); return const_all_decls_range(A.begin(), A.end()); } using const_all_num_lists_iterator = ArrayRef<unsigned>::iterator; using const_all_num_lists_range = llvm::iterator_range<const_all_num_lists_iterator>; const_all_num_lists_range all_num_lists() const { auto A = getDeclNumListsRef(); return const_all_num_lists_range(A.begin(), A.end()); } using const_all_lists_sizes_iterator = ArrayRef<unsigned>::iterator; using const_all_lists_sizes_range = llvm::iterator_range<const_all_lists_sizes_iterator>; const_all_lists_sizes_range all_lists_sizes() const { auto A = getComponentListSizesRef(); return const_all_lists_sizes_range(A.begin(), A.end()); } using const_all_components_iterator = ArrayRef<MappableComponent>::iterator; using const_all_components_range = llvm::iterator_range<const_all_components_iterator>; const_all_components_range all_components() const { auto A = getComponentsRef(); return const_all_components_range(A.begin(), A.end()); } using mapperlist_iterator = MutableArrayRef<Expr *>::iterator; using mapperlist_const_iterator = ArrayRef<const Expr *>::iterator; using mapperlist_range = llvm::iterator_range<mapperlist_iterator>; using mapperlist_const_range = llvm::iterator_range<mapperlist_const_iterator>; mapperlist_iterator mapperlist_begin() { return getUDMapperRefs().begin(); } mapperlist_iterator mapperlist_end() { return getUDMapperRefs().end(); } mapperlist_const_iterator mapperlist_begin() const { return getUDMapperRefs().begin(); } mapperlist_const_iterator mapperlist_end() const { return getUDMapperRefs().end(); } mapperlist_range mapperlists() { return mapperlist_range(mapperlist_begin(), mapperlist_end()); } mapperlist_const_range mapperlists() const { return mapperlist_const_range(mapperlist_begin(), mapperlist_end()); } }; /// This represents clause 'map' in the '#pragma omp ...' /// directives. /// /// \code /// #pragma omp target map(a,b) /// \endcode /// In this example directive '#pragma omp target' has clause 'map' /// with the variables 'a' and 'b'. class OMPMapClause final : public OMPMappableExprListClause<OMPMapClause>, private llvm::TrailingObjects< OMPMapClause, Expr *, ValueDecl *, unsigned, OMPClauseMappableExprCommon::MappableComponent> { friend class OMPClauseReader; friend OMPMappableExprListClause; friend OMPVarListClause; friend TrailingObjects; /// Define the sizes of each trailing object array except the last one. This /// is required for TrailingObjects to work properly. size_t numTrailingObjects(OverloadToken<Expr *>) const { // There are varlist_size() of expressions, and varlist_size() of // user-defined mappers. return 2 * varlist_size(); } size_t numTrailingObjects(OverloadToken<ValueDecl *>) const { return getUniqueDeclarationsNum(); } size_t numTrailingObjects(OverloadToken<unsigned>) const { return getUniqueDeclarationsNum() + getTotalComponentListNum(); } private: /// Map-type-modifiers for the 'map' clause. OpenMPMapModifierKind MapTypeModifiers[NumberOfOMPMapClauseModifiers] = { OMPC_MAP_MODIFIER_unknown, OMPC_MAP_MODIFIER_unknown, OMPC_MAP_MODIFIER_unknown, OMPC_MAP_MODIFIER_unknown}; /// Location of map-type-modifiers for the 'map' clause. SourceLocation MapTypeModifiersLoc[NumberOfOMPMapClauseModifiers]; /// Map type for the 'map' clause. OpenMPMapClauseKind MapType = OMPC_MAP_unknown; /// Is this an implicit map type or not. bool MapTypeIsImplicit = false; /// Location of the map type. SourceLocation MapLoc; /// Colon location. SourceLocation ColonLoc; /// Build a clause for \a NumVars listed expressions, \a /// NumUniqueDeclarations declarations, \a NumComponentLists total component /// lists, and \a NumComponents total expression components. /// /// \param MapModifiers Map-type-modifiers. /// \param MapModifiersLoc Locations of map-type-modifiers. /// \param MapperQualifierLoc C++ nested name specifier for the associated /// user-defined mapper. /// \param MapperIdInfo The identifier of associated user-defined mapper. /// \param MapType Map type. /// \param MapTypeIsImplicit Map type is inferred implicitly. /// \param MapLoc Location of the map type. /// \param Locs Locations needed to build a mappable clause. It includes 1) /// StartLoc: starting location of the clause (the clause keyword); 2) /// LParenLoc: location of '('; 3) EndLoc: ending location of the clause. /// \param Sizes All required sizes to build a mappable clause. It includes 1) /// NumVars: number of expressions listed in this clause; 2) /// NumUniqueDeclarations: number of unique base declarations in this clause; /// 3) NumComponentLists: number of component lists in this clause; and 4) /// NumComponents: total number of expression components in the clause. explicit OMPMapClause(ArrayRef<OpenMPMapModifierKind> MapModifiers, ArrayRef<SourceLocation> MapModifiersLoc, NestedNameSpecifierLoc MapperQualifierLoc, DeclarationNameInfo MapperIdInfo, OpenMPMapClauseKind MapType, bool MapTypeIsImplicit, SourceLocation MapLoc, const OMPVarListLocTy &Locs, const OMPMappableExprListSizeTy &Sizes) : OMPMappableExprListClause(llvm::omp::OMPC_map, Locs, Sizes, /*SupportsMapper=*/true, &MapperQualifierLoc, &MapperIdInfo), MapType(MapType), MapTypeIsImplicit(MapTypeIsImplicit), MapLoc(MapLoc) { assert(llvm::array_lengthof(MapTypeModifiers) == MapModifiers.size() && "Unexpected number of map type modifiers."); llvm::copy(MapModifiers, std::begin(MapTypeModifiers)); assert(llvm::array_lengthof(MapTypeModifiersLoc) == MapModifiersLoc.size() && "Unexpected number of map type modifier locations."); llvm::copy(MapModifiersLoc, std::begin(MapTypeModifiersLoc)); } /// Build an empty clause. /// /// \param Sizes All required sizes to build a mappable clause. It includes 1) /// NumVars: number of expressions listed in this clause; 2) /// NumUniqueDeclarations: number of unique base declarations in this clause; /// 3) NumComponentLists: number of component lists in this clause; and 4) /// NumComponents: total number of expression components in the clause. explicit OMPMapClause(const OMPMappableExprListSizeTy &Sizes) : OMPMappableExprListClause(llvm::omp::OMPC_map, OMPVarListLocTy(), Sizes, /*SupportsMapper=*/true) {} /// Set map-type-modifier for the clause. /// /// \param I index for map-type-modifier. /// \param T map-type-modifier for the clause. void setMapTypeModifier(unsigned I, OpenMPMapModifierKind T) { assert(I < NumberOfOMPMapClauseModifiers && "Unexpected index to store map type modifier, exceeds array size."); MapTypeModifiers[I] = T; } /// Set location for the map-type-modifier. /// /// \param I index for map-type-modifier location. /// \param TLoc map-type-modifier location. void setMapTypeModifierLoc(unsigned I, SourceLocation TLoc) { assert(I < NumberOfOMPMapClauseModifiers && "Index to store map type modifier location exceeds array size."); MapTypeModifiersLoc[I] = TLoc; } /// Set type for the clause. /// /// \param T Type for the clause. void setMapType(OpenMPMapClauseKind T) { MapType = T; } /// Set type location. /// /// \param TLoc Type location. void setMapLoc(SourceLocation TLoc) { MapLoc = TLoc; } /// Set colon location. void setColonLoc(SourceLocation Loc) { ColonLoc = Loc; } public: /// Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param Locs Locations needed to build a mappable clause. It includes 1) /// StartLoc: starting location of the clause (the clause keyword); 2) /// LParenLoc: location of '('; 3) EndLoc: ending location of the clause. /// \param Vars The original expression used in the clause. /// \param Declarations Declarations used in the clause. /// \param ComponentLists Component lists used in the clause. /// \param UDMapperRefs References to user-defined mappers associated with /// expressions used in the clause. /// \param MapModifiers Map-type-modifiers. /// \param MapModifiersLoc Location of map-type-modifiers. /// \param UDMQualifierLoc C++ nested name specifier for the associated /// user-defined mapper. /// \param MapperId The identifier of associated user-defined mapper. /// \param Type Map type. /// \param TypeIsImplicit Map type is inferred implicitly. /// \param TypeLoc Location of the map type. static OMPMapClause * Create(const ASTContext &C, const OMPVarListLocTy &Locs, ArrayRef<Expr *> Vars, ArrayRef<ValueDecl *> Declarations, MappableExprComponentListsRef ComponentLists, ArrayRef<Expr *> UDMapperRefs, ArrayRef<OpenMPMapModifierKind> MapModifiers, ArrayRef<SourceLocation> MapModifiersLoc, NestedNameSpecifierLoc UDMQualifierLoc, DeclarationNameInfo MapperId, OpenMPMapClauseKind Type, bool TypeIsImplicit, SourceLocation TypeLoc); /// Creates an empty clause with the place for \a NumVars original /// expressions, \a NumUniqueDeclarations declarations, \NumComponentLists /// lists, and \a NumComponents expression components. /// /// \param C AST context. /// \param Sizes All required sizes to build a mappable clause. It includes 1) /// NumVars: number of expressions listed in this clause; 2) /// NumUniqueDeclarations: number of unique base declarations in this clause; /// 3) NumComponentLists: number of component lists in this clause; and 4) /// NumComponents: total number of expression components in the clause. static OMPMapClause *CreateEmpty(const ASTContext &C, const OMPMappableExprListSizeTy &Sizes); /// Fetches mapping kind for the clause. OpenMPMapClauseKind getMapType() const LLVM_READONLY { return MapType; } /// Is this an implicit map type? /// We have to capture 'IsMapTypeImplicit' from the parser for more /// informative error messages. It helps distinguish map(r) from /// map(tofrom: r), which is important to print more helpful error /// messages for some target directives. bool isImplicitMapType() const LLVM_READONLY { return MapTypeIsImplicit; } /// Fetches the map-type-modifier at 'Cnt' index of array of modifiers. /// /// \param Cnt index for map-type-modifier. OpenMPMapModifierKind getMapTypeModifier(unsigned Cnt) const LLVM_READONLY { assert(Cnt < NumberOfOMPMapClauseModifiers && "Requested modifier exceeds the total number of modifiers."); return MapTypeModifiers[Cnt]; } /// Fetches the map-type-modifier location at 'Cnt' index of array of /// modifiers' locations. /// /// \param Cnt index for map-type-modifier location. SourceLocation getMapTypeModifierLoc(unsigned Cnt) const LLVM_READONLY { assert(Cnt < NumberOfOMPMapClauseModifiers && "Requested modifier location exceeds total number of modifiers."); return MapTypeModifiersLoc[Cnt]; } /// Fetches ArrayRef of map-type-modifiers. ArrayRef<OpenMPMapModifierKind> getMapTypeModifiers() const LLVM_READONLY { return llvm::makeArrayRef(MapTypeModifiers); } /// Fetches ArrayRef of location of map-type-modifiers. ArrayRef<SourceLocation> getMapTypeModifiersLoc() const LLVM_READONLY { return llvm::makeArrayRef(MapTypeModifiersLoc); } /// Fetches location of clause mapping kind. SourceLocation getMapLoc() const LLVM_READONLY { return MapLoc; } /// Get colon location. SourceLocation getColonLoc() const { return ColonLoc; } child_range children() { return child_range( reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPMapClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { if (MapType == OMPC_MAP_to || MapType == OMPC_MAP_tofrom) return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { auto Children = const_cast<OMPMapClause *>(this)->used_children(); return const_child_range(Children.begin(), Children.end()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_map; } }; /// This represents 'num_teams' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp teams num_teams(n) /// \endcode /// In this example directive '#pragma omp teams' has clause 'num_teams' /// with single expression 'n'. class OMPNumTeamsClause : public OMPClause, public OMPClauseWithPreInit { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// NumTeams number. Stmt *NumTeams = nullptr; /// Set the NumTeams number. /// /// \param E NumTeams number. void setNumTeams(Expr *E) { NumTeams = E; } public: /// Build 'num_teams' clause. /// /// \param E Expression associated with this clause. /// \param HelperE Helper Expression associated with this clause. /// \param CaptureRegion Innermost OpenMP region where expressions in this /// clause must be captured. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. OMPNumTeamsClause(Expr *E, Stmt *HelperE, OpenMPDirectiveKind CaptureRegion, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_num_teams, StartLoc, EndLoc), OMPClauseWithPreInit(this), LParenLoc(LParenLoc), NumTeams(E) { setPreInitStmt(HelperE, CaptureRegion); } /// Build an empty clause. OMPNumTeamsClause() : OMPClause(llvm::omp::OMPC_num_teams, SourceLocation(), SourceLocation()), OMPClauseWithPreInit(this) {} /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Return NumTeams number. Expr *getNumTeams() { return cast<Expr>(NumTeams); } /// Return NumTeams number. Expr *getNumTeams() const { return cast<Expr>(NumTeams); } child_range children() { return child_range(&NumTeams, &NumTeams + 1); } const_child_range children() const { return const_child_range(&NumTeams, &NumTeams + 1); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_num_teams; } }; /// This represents 'thread_limit' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp teams thread_limit(n) /// \endcode /// In this example directive '#pragma omp teams' has clause 'thread_limit' /// with single expression 'n'. class OMPThreadLimitClause : public OMPClause, public OMPClauseWithPreInit { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// ThreadLimit number. Stmt *ThreadLimit = nullptr; /// Set the ThreadLimit number. /// /// \param E ThreadLimit number. void setThreadLimit(Expr *E) { ThreadLimit = E; } public: /// Build 'thread_limit' clause. /// /// \param E Expression associated with this clause. /// \param HelperE Helper Expression associated with this clause. /// \param CaptureRegion Innermost OpenMP region where expressions in this /// clause must be captured. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. OMPThreadLimitClause(Expr *E, Stmt *HelperE, OpenMPDirectiveKind CaptureRegion, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_thread_limit, StartLoc, EndLoc), OMPClauseWithPreInit(this), LParenLoc(LParenLoc), ThreadLimit(E) { setPreInitStmt(HelperE, CaptureRegion); } /// Build an empty clause. OMPThreadLimitClause() : OMPClause(llvm::omp::OMPC_thread_limit, SourceLocation(), SourceLocation()), OMPClauseWithPreInit(this) {} /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Return ThreadLimit number. Expr *getThreadLimit() { return cast<Expr>(ThreadLimit); } /// Return ThreadLimit number. Expr *getThreadLimit() const { return cast<Expr>(ThreadLimit); } child_range children() { return child_range(&ThreadLimit, &ThreadLimit + 1); } const_child_range children() const { return const_child_range(&ThreadLimit, &ThreadLimit + 1); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_thread_limit; } }; /// This represents 'priority' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp task priority(n) /// \endcode /// In this example directive '#pragma omp teams' has clause 'priority' with /// single expression 'n'. class OMPPriorityClause : public OMPClause, public OMPClauseWithPreInit { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Priority number. Stmt *Priority = nullptr; /// Set the Priority number. /// /// \param E Priority number. void setPriority(Expr *E) { Priority = E; } public: /// Build 'priority' clause. /// /// \param Priority Expression associated with this clause. /// \param HelperPriority Helper priority for the construct. /// \param CaptureRegion Innermost OpenMP region where expressions in this /// clause must be captured. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. OMPPriorityClause(Expr *Priority, Stmt *HelperPriority, OpenMPDirectiveKind CaptureRegion, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_priority, StartLoc, EndLoc), OMPClauseWithPreInit(this), LParenLoc(LParenLoc), Priority(Priority) { setPreInitStmt(HelperPriority, CaptureRegion); } /// Build an empty clause. OMPPriorityClause() : OMPClause(llvm::omp::OMPC_priority, SourceLocation(), SourceLocation()), OMPClauseWithPreInit(this) {} /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Return Priority number. Expr *getPriority() { return cast<Expr>(Priority); } /// Return Priority number. Expr *getPriority() const { return cast<Expr>(Priority); } child_range children() { return child_range(&Priority, &Priority + 1); } const_child_range children() const { return const_child_range(&Priority, &Priority + 1); } child_range used_children(); const_child_range used_children() const { auto Children = const_cast<OMPPriorityClause *>(this)->used_children(); return const_child_range(Children.begin(), Children.end()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_priority; } }; /// This represents 'grainsize' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp taskloop grainsize(4) /// \endcode /// In this example directive '#pragma omp taskloop' has clause 'grainsize' /// with single expression '4'. class OMPGrainsizeClause : public OMPClause, public OMPClauseWithPreInit { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Safe iteration space distance. Stmt *Grainsize = nullptr; /// Set safelen. void setGrainsize(Expr *Size) { Grainsize = Size; } public: /// Build 'grainsize' clause. /// /// \param Size Expression associated with this clause. /// \param HelperSize Helper grainsize for the construct. /// \param CaptureRegion Innermost OpenMP region where expressions in this /// clause must be captured. /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPGrainsizeClause(Expr *Size, Stmt *HelperSize, OpenMPDirectiveKind CaptureRegion, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_grainsize, StartLoc, EndLoc), OMPClauseWithPreInit(this), LParenLoc(LParenLoc), Grainsize(Size) { setPreInitStmt(HelperSize, CaptureRegion); } /// Build an empty clause. explicit OMPGrainsizeClause() : OMPClause(llvm::omp::OMPC_grainsize, SourceLocation(), SourceLocation()), OMPClauseWithPreInit(this) {} /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Return safe iteration space distance. Expr *getGrainsize() const { return cast_or_null<Expr>(Grainsize); } child_range children() { return child_range(&Grainsize, &Grainsize + 1); } const_child_range children() const { return const_child_range(&Grainsize, &Grainsize + 1); } child_range used_children(); const_child_range used_children() const { auto Children = const_cast<OMPGrainsizeClause *>(this)->used_children(); return const_child_range(Children.begin(), Children.end()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_grainsize; } }; /// This represents 'nogroup' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp taskloop nogroup /// \endcode /// In this example directive '#pragma omp taskloop' has 'nogroup' clause. class OMPNogroupClause : public OMPClause { public: /// Build 'nogroup' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPNogroupClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_nogroup, StartLoc, EndLoc) {} /// Build an empty clause. OMPNogroupClause() : OMPClause(llvm::omp::OMPC_nogroup, SourceLocation(), SourceLocation()) { } child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_nogroup; } }; /// This represents 'num_tasks' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp taskloop num_tasks(4) /// \endcode /// In this example directive '#pragma omp taskloop' has clause 'num_tasks' /// with single expression '4'. class OMPNumTasksClause : public OMPClause, public OMPClauseWithPreInit { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Safe iteration space distance. Stmt *NumTasks = nullptr; /// Set safelen. void setNumTasks(Expr *Size) { NumTasks = Size; } public: /// Build 'num_tasks' clause. /// /// \param Size Expression associated with this clause. /// \param HelperSize Helper grainsize for the construct. /// \param CaptureRegion Innermost OpenMP region where expressions in this /// clause must be captured. /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPNumTasksClause(Expr *Size, Stmt *HelperSize, OpenMPDirectiveKind CaptureRegion, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_num_tasks, StartLoc, EndLoc), OMPClauseWithPreInit(this), LParenLoc(LParenLoc), NumTasks(Size) { setPreInitStmt(HelperSize, CaptureRegion); } /// Build an empty clause. explicit OMPNumTasksClause() : OMPClause(llvm::omp::OMPC_num_tasks, SourceLocation(), SourceLocation()), OMPClauseWithPreInit(this) {} /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Return safe iteration space distance. Expr *getNumTasks() const { return cast_or_null<Expr>(NumTasks); } child_range children() { return child_range(&NumTasks, &NumTasks + 1); } const_child_range children() const { return const_child_range(&NumTasks, &NumTasks + 1); } child_range used_children(); const_child_range used_children() const { auto Children = const_cast<OMPNumTasksClause *>(this)->used_children(); return const_child_range(Children.begin(), Children.end()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_num_tasks; } }; /// This represents 'hint' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp critical (name) hint(6) /// \endcode /// In this example directive '#pragma omp critical' has name 'name' and clause /// 'hint' with argument '6'. class OMPHintClause : public OMPClause { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Hint expression of the 'hint' clause. Stmt *Hint = nullptr; /// Set hint expression. void setHint(Expr *H) { Hint = H; } public: /// Build 'hint' clause with expression \a Hint. /// /// \param Hint Hint expression. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. OMPHintClause(Expr *Hint, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_hint, StartLoc, EndLoc), LParenLoc(LParenLoc), Hint(Hint) {} /// Build an empty clause. OMPHintClause() : OMPClause(llvm::omp::OMPC_hint, SourceLocation(), SourceLocation()) {} /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Returns number of threads. Expr *getHint() const { return cast_or_null<Expr>(Hint); } child_range children() { return child_range(&Hint, &Hint + 1); } const_child_range children() const { return const_child_range(&Hint, &Hint + 1); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_hint; } }; /// This represents 'dist_schedule' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp distribute dist_schedule(static, 3) /// \endcode /// In this example directive '#pragma omp distribute' has 'dist_schedule' /// clause with arguments 'static' and '3'. class OMPDistScheduleClause : public OMPClause, public OMPClauseWithPreInit { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// A kind of the 'schedule' clause. OpenMPDistScheduleClauseKind Kind = OMPC_DIST_SCHEDULE_unknown; /// Start location of the schedule kind in source code. SourceLocation KindLoc; /// Location of ',' (if any). SourceLocation CommaLoc; /// Chunk size. Expr *ChunkSize = nullptr; /// Set schedule kind. /// /// \param K Schedule kind. void setDistScheduleKind(OpenMPDistScheduleClauseKind K) { Kind = K; } /// Sets the location of '('. /// /// \param Loc Location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Set schedule kind start location. /// /// \param KLoc Schedule kind location. void setDistScheduleKindLoc(SourceLocation KLoc) { KindLoc = KLoc; } /// Set location of ','. /// /// \param Loc Location of ','. void setCommaLoc(SourceLocation Loc) { CommaLoc = Loc; } /// Set chunk size. /// /// \param E Chunk size. void setChunkSize(Expr *E) { ChunkSize = E; } public: /// Build 'dist_schedule' clause with schedule kind \a Kind and chunk /// size expression \a ChunkSize. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param KLoc Starting location of the argument. /// \param CommaLoc Location of ','. /// \param EndLoc Ending location of the clause. /// \param Kind DistSchedule kind. /// \param ChunkSize Chunk size. /// \param HelperChunkSize Helper chunk size for combined directives. OMPDistScheduleClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation KLoc, SourceLocation CommaLoc, SourceLocation EndLoc, OpenMPDistScheduleClauseKind Kind, Expr *ChunkSize, Stmt *HelperChunkSize) : OMPClause(llvm::omp::OMPC_dist_schedule, StartLoc, EndLoc), OMPClauseWithPreInit(this), LParenLoc(LParenLoc), Kind(Kind), KindLoc(KLoc), CommaLoc(CommaLoc), ChunkSize(ChunkSize) { setPreInitStmt(HelperChunkSize); } /// Build an empty clause. explicit OMPDistScheduleClause() : OMPClause(llvm::omp::OMPC_dist_schedule, SourceLocation(), SourceLocation()), OMPClauseWithPreInit(this) {} /// Get kind of the clause. OpenMPDistScheduleClauseKind getDistScheduleKind() const { return Kind; } /// Get location of '('. SourceLocation getLParenLoc() { return LParenLoc; } /// Get kind location. SourceLocation getDistScheduleKindLoc() { return KindLoc; } /// Get location of ','. SourceLocation getCommaLoc() { return CommaLoc; } /// Get chunk size. Expr *getChunkSize() { return ChunkSize; } /// Get chunk size. const Expr *getChunkSize() const { return ChunkSize; } child_range children() { return child_range(reinterpret_cast<Stmt **>(&ChunkSize), reinterpret_cast<Stmt **>(&ChunkSize) + 1); } const_child_range children() const { auto Children = const_cast<OMPDistScheduleClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_dist_schedule; } }; /// This represents 'defaultmap' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp target defaultmap(tofrom: scalar) /// \endcode /// In this example directive '#pragma omp target' has 'defaultmap' clause of kind /// 'scalar' with modifier 'tofrom'. class OMPDefaultmapClause : public OMPClause { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Modifiers for 'defaultmap' clause. OpenMPDefaultmapClauseModifier Modifier = OMPC_DEFAULTMAP_MODIFIER_unknown; /// Locations of modifiers. SourceLocation ModifierLoc; /// A kind of the 'defaultmap' clause. OpenMPDefaultmapClauseKind Kind = OMPC_DEFAULTMAP_unknown; /// Start location of the defaultmap kind in source code. SourceLocation KindLoc; /// Set defaultmap kind. /// /// \param K Defaultmap kind. void setDefaultmapKind(OpenMPDefaultmapClauseKind K) { Kind = K; } /// Set the defaultmap modifier. /// /// \param M Defaultmap modifier. void setDefaultmapModifier(OpenMPDefaultmapClauseModifier M) { Modifier = M; } /// Set location of the defaultmap modifier. void setDefaultmapModifierLoc(SourceLocation Loc) { ModifierLoc = Loc; } /// Sets the location of '('. /// /// \param Loc Location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Set defaultmap kind start location. /// /// \param KLoc Defaultmap kind location. void setDefaultmapKindLoc(SourceLocation KLoc) { KindLoc = KLoc; } public: /// Build 'defaultmap' clause with defaultmap kind \a Kind /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param KLoc Starting location of the argument. /// \param EndLoc Ending location of the clause. /// \param Kind Defaultmap kind. /// \param M The modifier applied to 'defaultmap' clause. /// \param MLoc Location of the modifier OMPDefaultmapClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation MLoc, SourceLocation KLoc, SourceLocation EndLoc, OpenMPDefaultmapClauseKind Kind, OpenMPDefaultmapClauseModifier M) : OMPClause(llvm::omp::OMPC_defaultmap, StartLoc, EndLoc), LParenLoc(LParenLoc), Modifier(M), ModifierLoc(MLoc), Kind(Kind), KindLoc(KLoc) {} /// Build an empty clause. explicit OMPDefaultmapClause() : OMPClause(llvm::omp::OMPC_defaultmap, SourceLocation(), SourceLocation()) {} /// Get kind of the clause. OpenMPDefaultmapClauseKind getDefaultmapKind() const { return Kind; } /// Get the modifier of the clause. OpenMPDefaultmapClauseModifier getDefaultmapModifier() const { return Modifier; } /// Get location of '('. SourceLocation getLParenLoc() { return LParenLoc; } /// Get kind location. SourceLocation getDefaultmapKindLoc() { return KindLoc; } /// Get the modifier location. SourceLocation getDefaultmapModifierLoc() const { return ModifierLoc; } child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_defaultmap; } }; /// This represents clause 'to' in the '#pragma omp ...' /// directives. /// /// \code /// #pragma omp target update to(a,b) /// \endcode /// In this example directive '#pragma omp target update' has clause 'to' /// with the variables 'a' and 'b'. class OMPToClause final : public OMPMappableExprListClause<OMPToClause>, private llvm::TrailingObjects< OMPToClause, Expr *, ValueDecl *, unsigned, OMPClauseMappableExprCommon::MappableComponent> { friend class OMPClauseReader; friend OMPMappableExprListClause; friend OMPVarListClause; friend TrailingObjects; /// Motion-modifiers for the 'to' clause. OpenMPMotionModifierKind MotionModifiers[NumberOfOMPMotionModifiers] = { OMPC_MOTION_MODIFIER_unknown, OMPC_MOTION_MODIFIER_unknown}; /// Location of motion-modifiers for the 'to' clause. SourceLocation MotionModifiersLoc[NumberOfOMPMotionModifiers]; /// Colon location. SourceLocation ColonLoc; /// Build clause with number of variables \a NumVars. /// /// \param TheMotionModifiers Motion-modifiers. /// \param TheMotionModifiersLoc Locations of motion-modifiers. /// \param MapperQualifierLoc C++ nested name specifier for the associated /// user-defined mapper. /// \param MapperIdInfo The identifier of associated user-defined mapper. /// \param Locs Locations needed to build a mappable clause. It includes 1) /// StartLoc: starting location of the clause (the clause keyword); 2) /// LParenLoc: location of '('; 3) EndLoc: ending location of the clause. /// \param Sizes All required sizes to build a mappable clause. It includes 1) /// NumVars: number of expressions listed in this clause; 2) /// NumUniqueDeclarations: number of unique base declarations in this clause; /// 3) NumComponentLists: number of component lists in this clause; and 4) /// NumComponents: total number of expression components in the clause. explicit OMPToClause(ArrayRef<OpenMPMotionModifierKind> TheMotionModifiers, ArrayRef<SourceLocation> TheMotionModifiersLoc, NestedNameSpecifierLoc MapperQualifierLoc, DeclarationNameInfo MapperIdInfo, const OMPVarListLocTy &Locs, const OMPMappableExprListSizeTy &Sizes) : OMPMappableExprListClause(llvm::omp::OMPC_to, Locs, Sizes, /*SupportsMapper=*/true, &MapperQualifierLoc, &MapperIdInfo) { assert(llvm::array_lengthof(MotionModifiers) == TheMotionModifiers.size() && "Unexpected number of motion modifiers."); llvm::copy(TheMotionModifiers, std::begin(MotionModifiers)); assert(llvm::array_lengthof(MotionModifiersLoc) == TheMotionModifiersLoc.size() && "Unexpected number of motion modifier locations."); llvm::copy(TheMotionModifiersLoc, std::begin(MotionModifiersLoc)); } /// Build an empty clause. /// /// \param Sizes All required sizes to build a mappable clause. It includes 1) /// NumVars: number of expressions listed in this clause; 2) /// NumUniqueDeclarations: number of unique base declarations in this clause; /// 3) NumComponentLists: number of component lists in this clause; and 4) /// NumComponents: total number of expression components in the clause. explicit OMPToClause(const OMPMappableExprListSizeTy &Sizes) : OMPMappableExprListClause(llvm::omp::OMPC_to, OMPVarListLocTy(), Sizes, /*SupportsMapper=*/true) {} /// Set motion-modifier for the clause. /// /// \param I index for motion-modifier. /// \param T motion-modifier for the clause. void setMotionModifier(unsigned I, OpenMPMotionModifierKind T) { assert(I < NumberOfOMPMotionModifiers && "Unexpected index to store motion modifier, exceeds array size."); MotionModifiers[I] = T; } /// Set location for the motion-modifier. /// /// \param I index for motion-modifier location. /// \param TLoc motion-modifier location. void setMotionModifierLoc(unsigned I, SourceLocation TLoc) { assert(I < NumberOfOMPMotionModifiers && "Index to store motion modifier location exceeds array size."); MotionModifiersLoc[I] = TLoc; } /// Set colon location. void setColonLoc(SourceLocation Loc) { ColonLoc = Loc; } /// Define the sizes of each trailing object array except the last one. This /// is required for TrailingObjects to work properly. size_t numTrailingObjects(OverloadToken<Expr *>) const { // There are varlist_size() of expressions, and varlist_size() of // user-defined mappers. return 2 * varlist_size(); } size_t numTrailingObjects(OverloadToken<ValueDecl *>) const { return getUniqueDeclarationsNum(); } size_t numTrailingObjects(OverloadToken<unsigned>) const { return getUniqueDeclarationsNum() + getTotalComponentListNum(); } public: /// Creates clause with a list of variables \a Vars. /// /// \param C AST context. /// \param Locs Locations needed to build a mappable clause. It includes 1) /// StartLoc: starting location of the clause (the clause keyword); 2) /// LParenLoc: location of '('; 3) EndLoc: ending location of the clause. /// \param Vars The original expression used in the clause. /// \param Declarations Declarations used in the clause. /// \param ComponentLists Component lists used in the clause. /// \param MotionModifiers Motion-modifiers. /// \param MotionModifiersLoc Location of motion-modifiers. /// \param UDMapperRefs References to user-defined mappers associated with /// expressions used in the clause. /// \param UDMQualifierLoc C++ nested name specifier for the associated /// user-defined mapper. /// \param MapperId The identifier of associated user-defined mapper. static OMPToClause *Create(const ASTContext &C, const OMPVarListLocTy &Locs, ArrayRef<Expr *> Vars, ArrayRef<ValueDecl *> Declarations, MappableExprComponentListsRef ComponentLists, ArrayRef<Expr *> UDMapperRefs, ArrayRef<OpenMPMotionModifierKind> MotionModifiers, ArrayRef<SourceLocation> MotionModifiersLoc, NestedNameSpecifierLoc UDMQualifierLoc, DeclarationNameInfo MapperId); /// Creates an empty clause with the place for \a NumVars variables. /// /// \param C AST context. /// \param Sizes All required sizes to build a mappable clause. It includes 1) /// NumVars: number of expressions listed in this clause; 2) /// NumUniqueDeclarations: number of unique base declarations in this clause; /// 3) NumComponentLists: number of component lists in this clause; and 4) /// NumComponents: total number of expression components in the clause. static OMPToClause *CreateEmpty(const ASTContext &C, const OMPMappableExprListSizeTy &Sizes); /// Fetches the motion-modifier at 'Cnt' index of array of modifiers. /// /// \param Cnt index for motion-modifier. OpenMPMotionModifierKind getMotionModifier(unsigned Cnt) const LLVM_READONLY { assert(Cnt < NumberOfOMPMotionModifiers && "Requested modifier exceeds the total number of modifiers."); return MotionModifiers[Cnt]; } /// Fetches the motion-modifier location at 'Cnt' index of array of modifiers' /// locations. /// /// \param Cnt index for motion-modifier location. SourceLocation getMotionModifierLoc(unsigned Cnt) const LLVM_READONLY { assert(Cnt < NumberOfOMPMotionModifiers && "Requested modifier location exceeds total number of modifiers."); return MotionModifiersLoc[Cnt]; } /// Fetches ArrayRef of motion-modifiers. ArrayRef<OpenMPMotionModifierKind> getMotionModifiers() const LLVM_READONLY { return llvm::makeArrayRef(MotionModifiers); } /// Fetches ArrayRef of location of motion-modifiers. ArrayRef<SourceLocation> getMotionModifiersLoc() const LLVM_READONLY { return llvm::makeArrayRef(MotionModifiersLoc); } /// Get colon location. SourceLocation getColonLoc() const { return ColonLoc; } child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPToClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_to; } }; /// This represents clause 'from' in the '#pragma omp ...' /// directives. /// /// \code /// #pragma omp target update from(a,b) /// \endcode /// In this example directive '#pragma omp target update' has clause 'from' /// with the variables 'a' and 'b'. class OMPFromClause final : public OMPMappableExprListClause<OMPFromClause>, private llvm::TrailingObjects< OMPFromClause, Expr *, ValueDecl *, unsigned, OMPClauseMappableExprCommon::MappableComponent> { friend class OMPClauseReader; friend OMPMappableExprListClause; friend OMPVarListClause; friend TrailingObjects; /// Motion-modifiers for the 'from' clause. OpenMPMotionModifierKind MotionModifiers[NumberOfOMPMotionModifiers] = { OMPC_MOTION_MODIFIER_unknown, OMPC_MOTION_MODIFIER_unknown}; /// Location of motion-modifiers for the 'from' clause. SourceLocation MotionModifiersLoc[NumberOfOMPMotionModifiers]; /// Colon location. SourceLocation ColonLoc; /// Build clause with number of variables \a NumVars. /// /// \param TheMotionModifiers Motion-modifiers. /// \param TheMotionModifiersLoc Locations of motion-modifiers. /// \param MapperQualifierLoc C++ nested name specifier for the associated /// user-defined mapper. /// \param MapperIdInfo The identifier of associated user-defined mapper. /// \param Locs Locations needed to build a mappable clause. It includes 1) /// StartLoc: starting location of the clause (the clause keyword); 2) /// LParenLoc: location of '('; 3) EndLoc: ending location of the clause. /// \param Sizes All required sizes to build a mappable clause. It includes 1) /// NumVars: number of expressions listed in this clause; 2) /// NumUniqueDeclarations: number of unique base declarations in this clause; /// 3) NumComponentLists: number of component lists in this clause; and 4) /// NumComponents: total number of expression components in the clause. explicit OMPFromClause(ArrayRef<OpenMPMotionModifierKind> TheMotionModifiers, ArrayRef<SourceLocation> TheMotionModifiersLoc, NestedNameSpecifierLoc MapperQualifierLoc, DeclarationNameInfo MapperIdInfo, const OMPVarListLocTy &Locs, const OMPMappableExprListSizeTy &Sizes) : OMPMappableExprListClause(llvm::omp::OMPC_from, Locs, Sizes, /*SupportsMapper=*/true, &MapperQualifierLoc, &MapperIdInfo) { assert(llvm::array_lengthof(MotionModifiers) == TheMotionModifiers.size() && "Unexpected number of motion modifiers."); llvm::copy(TheMotionModifiers, std::begin(MotionModifiers)); assert(llvm::array_lengthof(MotionModifiersLoc) == TheMotionModifiersLoc.size() && "Unexpected number of motion modifier locations."); llvm::copy(TheMotionModifiersLoc, std::begin(MotionModifiersLoc)); } /// Build an empty clause. /// /// \param Sizes All required sizes to build a mappable clause. It includes 1) /// NumVars: number of expressions listed in this clause; 2) /// NumUniqueDeclarations: number of unique base declarations in this clause; /// 3) NumComponentLists: number of component lists in this clause; and 4) /// NumComponents: total number of expression components in the clause. explicit OMPFromClause(const OMPMappableExprListSizeTy &Sizes) : OMPMappableExprListClause(llvm::omp::OMPC_from, OMPVarListLocTy(), Sizes, /*SupportsMapper=*/true) {} /// Set motion-modifier for the clause. /// /// \param I index for motion-modifier. /// \param T motion-modifier for the clause. void setMotionModifier(unsigned I, OpenMPMotionModifierKind T) { assert(I < NumberOfOMPMotionModifiers && "Unexpected index to store motion modifier, exceeds array size."); MotionModifiers[I] = T; } /// Set location for the motion-modifier. /// /// \param I index for motion-modifier location. /// \param TLoc motion-modifier location. void setMotionModifierLoc(unsigned I, SourceLocation TLoc) { assert(I < NumberOfOMPMotionModifiers && "Index to store motion modifier location exceeds array size."); MotionModifiersLoc[I] = TLoc; } /// Set colon location. void setColonLoc(SourceLocation Loc) { ColonLoc = Loc; } /// Define the sizes of each trailing object array except the last one. This /// is required for TrailingObjects to work properly. size_t numTrailingObjects(OverloadToken<Expr *>) const { // There are varlist_size() of expressions, and varlist_size() of // user-defined mappers. return 2 * varlist_size(); } size_t numTrailingObjects(OverloadToken<ValueDecl *>) const { return getUniqueDeclarationsNum(); } size_t numTrailingObjects(OverloadToken<unsigned>) const { return getUniqueDeclarationsNum() + getTotalComponentListNum(); } public: /// Creates clause with a list of variables \a Vars. /// /// \param C AST context. /// \param Locs Locations needed to build a mappable clause. It includes 1) /// StartLoc: starting location of the clause (the clause keyword); 2) /// LParenLoc: location of '('; 3) EndLoc: ending location of the clause. /// \param Vars The original expression used in the clause. /// \param Declarations Declarations used in the clause. /// \param ComponentLists Component lists used in the clause. /// \param MotionModifiers Motion-modifiers. /// \param MotionModifiersLoc Location of motion-modifiers. /// \param UDMapperRefs References to user-defined mappers associated with /// expressions used in the clause. /// \param UDMQualifierLoc C++ nested name specifier for the associated /// user-defined mapper. /// \param MapperId The identifier of associated user-defined mapper. static OMPFromClause * Create(const ASTContext &C, const OMPVarListLocTy &Locs, ArrayRef<Expr *> Vars, ArrayRef<ValueDecl *> Declarations, MappableExprComponentListsRef ComponentLists, ArrayRef<Expr *> UDMapperRefs, ArrayRef<OpenMPMotionModifierKind> MotionModifiers, ArrayRef<SourceLocation> MotionModifiersLoc, NestedNameSpecifierLoc UDMQualifierLoc, DeclarationNameInfo MapperId); /// Creates an empty clause with the place for \a NumVars variables. /// /// \param C AST context. /// \param Sizes All required sizes to build a mappable clause. It includes 1) /// NumVars: number of expressions listed in this clause; 2) /// NumUniqueDeclarations: number of unique base declarations in this clause; /// 3) NumComponentLists: number of component lists in this clause; and 4) /// NumComponents: total number of expression components in the clause. static OMPFromClause *CreateEmpty(const ASTContext &C, const OMPMappableExprListSizeTy &Sizes); /// Fetches the motion-modifier at 'Cnt' index of array of modifiers. /// /// \param Cnt index for motion-modifier. OpenMPMotionModifierKind getMotionModifier(unsigned Cnt) const LLVM_READONLY { assert(Cnt < NumberOfOMPMotionModifiers && "Requested modifier exceeds the total number of modifiers."); return MotionModifiers[Cnt]; } /// Fetches the motion-modifier location at 'Cnt' index of array of modifiers' /// locations. /// /// \param Cnt index for motion-modifier location. SourceLocation getMotionModifierLoc(unsigned Cnt) const LLVM_READONLY { assert(Cnt < NumberOfOMPMotionModifiers && "Requested modifier location exceeds total number of modifiers."); return MotionModifiersLoc[Cnt]; } /// Fetches ArrayRef of motion-modifiers. ArrayRef<OpenMPMotionModifierKind> getMotionModifiers() const LLVM_READONLY { return llvm::makeArrayRef(MotionModifiers); } /// Fetches ArrayRef of location of motion-modifiers. ArrayRef<SourceLocation> getMotionModifiersLoc() const LLVM_READONLY { return llvm::makeArrayRef(MotionModifiersLoc); } /// Get colon location. SourceLocation getColonLoc() const { return ColonLoc; } child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPFromClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_from; } }; /// This represents clause 'use_device_ptr' in the '#pragma omp ...' /// directives. /// /// \code /// #pragma omp target data use_device_ptr(a,b) /// \endcode /// In this example directive '#pragma omp target data' has clause /// 'use_device_ptr' with the variables 'a' and 'b'. class OMPUseDevicePtrClause final : public OMPMappableExprListClause<OMPUseDevicePtrClause>, private llvm::TrailingObjects< OMPUseDevicePtrClause, Expr *, ValueDecl *, unsigned, OMPClauseMappableExprCommon::MappableComponent> { friend class OMPClauseReader; friend OMPMappableExprListClause; friend OMPVarListClause; friend TrailingObjects; /// Build clause with number of variables \a NumVars. /// /// \param Locs Locations needed to build a mappable clause. It includes 1) /// StartLoc: starting location of the clause (the clause keyword); 2) /// LParenLoc: location of '('; 3) EndLoc: ending location of the clause. /// \param Sizes All required sizes to build a mappable clause. It includes 1) /// NumVars: number of expressions listed in this clause; 2) /// NumUniqueDeclarations: number of unique base declarations in this clause; /// 3) NumComponentLists: number of component lists in this clause; and 4) /// NumComponents: total number of expression components in the clause. explicit OMPUseDevicePtrClause(const OMPVarListLocTy &Locs, const OMPMappableExprListSizeTy &Sizes) : OMPMappableExprListClause(llvm::omp::OMPC_use_device_ptr, Locs, Sizes) { } /// Build an empty clause. /// /// \param Sizes All required sizes to build a mappable clause. It includes 1) /// NumVars: number of expressions listed in this clause; 2) /// NumUniqueDeclarations: number of unique base declarations in this clause; /// 3) NumComponentLists: number of component lists in this clause; and 4) /// NumComponents: total number of expression components in the clause. explicit OMPUseDevicePtrClause(const OMPMappableExprListSizeTy &Sizes) : OMPMappableExprListClause(llvm::omp::OMPC_use_device_ptr, OMPVarListLocTy(), Sizes) {} /// Define the sizes of each trailing object array except the last one. This /// is required for TrailingObjects to work properly. size_t numTrailingObjects(OverloadToken<Expr *>) const { return 3 * varlist_size(); } size_t numTrailingObjects(OverloadToken<ValueDecl *>) const { return getUniqueDeclarationsNum(); } size_t numTrailingObjects(OverloadToken<unsigned>) const { return getUniqueDeclarationsNum() + getTotalComponentListNum(); } /// Sets the list of references to private copies with initializers for new /// private variables. /// \param VL List of references. void setPrivateCopies(ArrayRef<Expr *> VL); /// Gets the list of references to private copies with initializers for new /// private variables. MutableArrayRef<Expr *> getPrivateCopies() { return MutableArrayRef<Expr *>(varlist_end(), varlist_size()); } ArrayRef<const Expr *> getPrivateCopies() const { return llvm::makeArrayRef(varlist_end(), varlist_size()); } /// Sets the list of references to initializer variables for new private /// variables. /// \param VL List of references. void setInits(ArrayRef<Expr *> VL); /// Gets the list of references to initializer variables for new private /// variables. MutableArrayRef<Expr *> getInits() { return MutableArrayRef<Expr *>(getPrivateCopies().end(), varlist_size()); } ArrayRef<const Expr *> getInits() const { return llvm::makeArrayRef(getPrivateCopies().end(), varlist_size()); } public: /// Creates clause with a list of variables \a Vars. /// /// \param C AST context. /// \param Locs Locations needed to build a mappable clause. It includes 1) /// StartLoc: starting location of the clause (the clause keyword); 2) /// LParenLoc: location of '('; 3) EndLoc: ending location of the clause. /// \param Vars The original expression used in the clause. /// \param PrivateVars Expressions referring to private copies. /// \param Inits Expressions referring to private copy initializers. /// \param Declarations Declarations used in the clause. /// \param ComponentLists Component lists used in the clause. static OMPUseDevicePtrClause * Create(const ASTContext &C, const OMPVarListLocTy &Locs, ArrayRef<Expr *> Vars, ArrayRef<Expr *> PrivateVars, ArrayRef<Expr *> Inits, ArrayRef<ValueDecl *> Declarations, MappableExprComponentListsRef ComponentLists); /// Creates an empty clause with the place for \a NumVars variables. /// /// \param C AST context. /// \param Sizes All required sizes to build a mappable clause. It includes 1) /// NumVars: number of expressions listed in this clause; 2) /// NumUniqueDeclarations: number of unique base declarations in this clause; /// 3) NumComponentLists: number of component lists in this clause; and 4) /// NumComponents: total number of expression components in the clause. static OMPUseDevicePtrClause * CreateEmpty(const ASTContext &C, const OMPMappableExprListSizeTy &Sizes); using private_copies_iterator = MutableArrayRef<Expr *>::iterator; using private_copies_const_iterator = ArrayRef<const Expr *>::iterator; using private_copies_range = llvm::iterator_range<private_copies_iterator>; using private_copies_const_range = llvm::iterator_range<private_copies_const_iterator>; private_copies_range private_copies() { return private_copies_range(getPrivateCopies().begin(), getPrivateCopies().end()); } private_copies_const_range private_copies() const { return private_copies_const_range(getPrivateCopies().begin(), getPrivateCopies().end()); } using inits_iterator = MutableArrayRef<Expr *>::iterator; using inits_const_iterator = ArrayRef<const Expr *>::iterator; using inits_range = llvm::iterator_range<inits_iterator>; using inits_const_range = llvm::iterator_range<inits_const_iterator>; inits_range inits() { return inits_range(getInits().begin(), getInits().end()); } inits_const_range inits() const { return inits_const_range(getInits().begin(), getInits().end()); } child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPUseDevicePtrClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_use_device_ptr; } }; /// This represents clause 'use_device_addr' in the '#pragma omp ...' /// directives. /// /// \code /// #pragma omp target data use_device_addr(a,b) /// \endcode /// In this example directive '#pragma omp target data' has clause /// 'use_device_addr' with the variables 'a' and 'b'. class OMPUseDeviceAddrClause final : public OMPMappableExprListClause<OMPUseDeviceAddrClause>, private llvm::TrailingObjects< OMPUseDeviceAddrClause, Expr *, ValueDecl *, unsigned, OMPClauseMappableExprCommon::MappableComponent> { friend class OMPClauseReader; friend OMPMappableExprListClause; friend OMPVarListClause; friend TrailingObjects; /// Build clause with number of variables \a NumVars. /// /// \param Locs Locations needed to build a mappable clause. It includes 1) /// StartLoc: starting location of the clause (the clause keyword); 2) /// LParenLoc: location of '('; 3) EndLoc: ending location of the clause. /// \param Sizes All required sizes to build a mappable clause. It includes 1) /// NumVars: number of expressions listed in this clause; 2) /// NumUniqueDeclarations: number of unique base declarations in this clause; /// 3) NumComponentLists: number of component lists in this clause; and 4) /// NumComponents: total number of expression components in the clause. explicit OMPUseDeviceAddrClause(const OMPVarListLocTy &Locs, const OMPMappableExprListSizeTy &Sizes) : OMPMappableExprListClause(llvm::omp::OMPC_use_device_addr, Locs, Sizes) {} /// Build an empty clause. /// /// \param Sizes All required sizes to build a mappable clause. It includes 1) /// NumVars: number of expressions listed in this clause; 2) /// NumUniqueDeclarations: number of unique base declarations in this clause; /// 3) NumComponentLists: number of component lists in this clause; and 4) /// NumComponents: total number of expression components in the clause. explicit OMPUseDeviceAddrClause(const OMPMappableExprListSizeTy &Sizes) : OMPMappableExprListClause(llvm::omp::OMPC_use_device_addr, OMPVarListLocTy(), Sizes) {} /// Define the sizes of each trailing object array except the last one. This /// is required for TrailingObjects to work properly. size_t numTrailingObjects(OverloadToken<Expr *>) const { return varlist_size(); } size_t numTrailingObjects(OverloadToken<ValueDecl *>) const { return getUniqueDeclarationsNum(); } size_t numTrailingObjects(OverloadToken<unsigned>) const { return getUniqueDeclarationsNum() + getTotalComponentListNum(); } public: /// Creates clause with a list of variables \a Vars. /// /// \param C AST context. /// \param Locs Locations needed to build a mappable clause. It includes 1) /// StartLoc: starting location of the clause (the clause keyword); 2) /// LParenLoc: location of '('; 3) EndLoc: ending location of the clause. /// \param Vars The original expression used in the clause. /// \param Declarations Declarations used in the clause. /// \param ComponentLists Component lists used in the clause. static OMPUseDeviceAddrClause * Create(const ASTContext &C, const OMPVarListLocTy &Locs, ArrayRef<Expr *> Vars, ArrayRef<ValueDecl *> Declarations, MappableExprComponentListsRef ComponentLists); /// Creates an empty clause with the place for \a NumVars variables. /// /// \param C AST context. /// \param Sizes All required sizes to build a mappable clause. It includes 1) /// NumVars: number of expressions listed in this clause; 2) /// NumUniqueDeclarations: number of unique base declarations in this clause; /// 3) NumComponentLists: number of component lists in this clause; and 4) /// NumComponents: total number of expression components in the clause. static OMPUseDeviceAddrClause * CreateEmpty(const ASTContext &C, const OMPMappableExprListSizeTy &Sizes); child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPUseDeviceAddrClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_use_device_addr; } }; /// This represents clause 'is_device_ptr' in the '#pragma omp ...' /// directives. /// /// \code /// #pragma omp target is_device_ptr(a,b) /// \endcode /// In this example directive '#pragma omp target' has clause /// 'is_device_ptr' with the variables 'a' and 'b'. class OMPIsDevicePtrClause final : public OMPMappableExprListClause<OMPIsDevicePtrClause>, private llvm::TrailingObjects< OMPIsDevicePtrClause, Expr *, ValueDecl *, unsigned, OMPClauseMappableExprCommon::MappableComponent> { friend class OMPClauseReader; friend OMPMappableExprListClause; friend OMPVarListClause; friend TrailingObjects; /// Build clause with number of variables \a NumVars. /// /// \param Locs Locations needed to build a mappable clause. It includes 1) /// StartLoc: starting location of the clause (the clause keyword); 2) /// LParenLoc: location of '('; 3) EndLoc: ending location of the clause. /// \param Sizes All required sizes to build a mappable clause. It includes 1) /// NumVars: number of expressions listed in this clause; 2) /// NumUniqueDeclarations: number of unique base declarations in this clause; /// 3) NumComponentLists: number of component lists in this clause; and 4) /// NumComponents: total number of expression components in the clause. explicit OMPIsDevicePtrClause(const OMPVarListLocTy &Locs, const OMPMappableExprListSizeTy &Sizes) : OMPMappableExprListClause(llvm::omp::OMPC_is_device_ptr, Locs, Sizes) {} /// Build an empty clause. /// /// \param Sizes All required sizes to build a mappable clause. It includes 1) /// NumVars: number of expressions listed in this clause; 2) /// NumUniqueDeclarations: number of unique base declarations in this clause; /// 3) NumComponentLists: number of component lists in this clause; and 4) /// NumComponents: total number of expression components in the clause. explicit OMPIsDevicePtrClause(const OMPMappableExprListSizeTy &Sizes) : OMPMappableExprListClause(llvm::omp::OMPC_is_device_ptr, OMPVarListLocTy(), Sizes) {} /// Define the sizes of each trailing object array except the last one. This /// is required for TrailingObjects to work properly. size_t numTrailingObjects(OverloadToken<Expr *>) const { return varlist_size(); } size_t numTrailingObjects(OverloadToken<ValueDecl *>) const { return getUniqueDeclarationsNum(); } size_t numTrailingObjects(OverloadToken<unsigned>) const { return getUniqueDeclarationsNum() + getTotalComponentListNum(); } public: /// Creates clause with a list of variables \a Vars. /// /// \param C AST context. /// \param Locs Locations needed to build a mappable clause. It includes 1) /// StartLoc: starting location of the clause (the clause keyword); 2) /// LParenLoc: location of '('; 3) EndLoc: ending location of the clause. /// \param Vars The original expression used in the clause. /// \param Declarations Declarations used in the clause. /// \param ComponentLists Component lists used in the clause. static OMPIsDevicePtrClause * Create(const ASTContext &C, const OMPVarListLocTy &Locs, ArrayRef<Expr *> Vars, ArrayRef<ValueDecl *> Declarations, MappableExprComponentListsRef ComponentLists); /// Creates an empty clause with the place for \a NumVars variables. /// /// \param C AST context. /// \param Sizes All required sizes to build a mappable clause. It includes 1) /// NumVars: number of expressions listed in this clause; 2) /// NumUniqueDeclarations: number of unique base declarations in this clause; /// 3) NumComponentLists: number of component lists in this clause; and 4) /// NumComponents: total number of expression components in the clause. static OMPIsDevicePtrClause * CreateEmpty(const ASTContext &C, const OMPMappableExprListSizeTy &Sizes); child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPIsDevicePtrClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_is_device_ptr; } }; /// This represents clause 'nontemporal' in the '#pragma omp ...' directives. /// /// \code /// #pragma omp simd nontemporal(a) /// \endcode /// In this example directive '#pragma omp simd' has clause 'nontemporal' for /// the variable 'a'. class OMPNontemporalClause final : public OMPVarListClause<OMPNontemporalClause>, private llvm::TrailingObjects<OMPNontemporalClause, Expr *> { friend class OMPClauseReader; friend OMPVarListClause; friend TrailingObjects; /// Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. OMPNontemporalClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N) : OMPVarListClause<OMPNontemporalClause>(llvm::omp::OMPC_nontemporal, StartLoc, LParenLoc, EndLoc, N) { } /// Build an empty clause. /// /// \param N Number of variables. explicit OMPNontemporalClause(unsigned N) : OMPVarListClause<OMPNontemporalClause>( llvm::omp::OMPC_nontemporal, SourceLocation(), SourceLocation(), SourceLocation(), N) {} /// Get the list of privatied copies if the member expression was captured by /// one of the privatization clauses. MutableArrayRef<Expr *> getPrivateRefs() { return MutableArrayRef<Expr *>(varlist_end(), varlist_size()); } ArrayRef<const Expr *> getPrivateRefs() const { return llvm::makeArrayRef(varlist_end(), varlist_size()); } public: /// Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the variables. static OMPNontemporalClause * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL); /// Creates an empty clause with the place for \a N variables. /// /// \param C AST context. /// \param N The number of variables. static OMPNontemporalClause *CreateEmpty(const ASTContext &C, unsigned N); /// Sets the list of references to private copies created in private clauses. /// \param VL List of references. void setPrivateRefs(ArrayRef<Expr *> VL); child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPNontemporalClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range private_refs() { return child_range(reinterpret_cast<Stmt **>(getPrivateRefs().begin()), reinterpret_cast<Stmt **>(getPrivateRefs().end())); } const_child_range private_refs() const { auto Children = const_cast<OMPNontemporalClause *>(this)->private_refs(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_nontemporal; } }; /// This represents 'order' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp simd order(concurrent) /// \endcode /// In this example directive '#pragma omp parallel' has simple 'order' /// clause with kind 'concurrent'. class OMPOrderClause final : public OMPClause { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// A kind of the 'default' clause. OpenMPOrderClauseKind Kind = OMPC_ORDER_unknown; /// Start location of the kind in source code. SourceLocation KindKwLoc; /// Set kind of the clause. /// /// \param K Argument of clause. void setKind(OpenMPOrderClauseKind K) { Kind = K; } /// Set argument location. /// /// \param KLoc Argument location. void setKindKwLoc(SourceLocation KLoc) { KindKwLoc = KLoc; } public: /// Build 'order' clause with argument \p A ('concurrent'). /// /// \param A Argument of the clause ('concurrent'). /// \param ALoc Starting location of the argument. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. OMPOrderClause(OpenMPOrderClauseKind A, SourceLocation ALoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_order, StartLoc, EndLoc), LParenLoc(LParenLoc), Kind(A), KindKwLoc(ALoc) {} /// Build an empty clause. OMPOrderClause() : OMPClause(llvm::omp::OMPC_order, SourceLocation(), SourceLocation()) {} /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Returns kind of the clause. OpenMPOrderClauseKind getKind() const { return Kind; } /// Returns location of clause kind. SourceLocation getKindKwLoc() const { return KindKwLoc; } child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_order; } }; /// This represents 'destroy' clause in the '#pragma omp depobj' /// directive. /// /// \code /// #pragma omp depobj(a) destroy /// \endcode /// In this example directive '#pragma omp depobj' has 'destroy' clause. class OMPDestroyClause final : public OMPClause { public: /// Build 'destroy' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPDestroyClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_destroy, StartLoc, EndLoc) {} /// Build an empty clause. OMPDestroyClause() : OMPClause(llvm::omp::OMPC_destroy, SourceLocation(), SourceLocation()) { } child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_destroy; } }; /// This represents 'detach' clause in the '#pragma omp task' directive. /// /// \code /// #pragma omp task detach(evt) /// \endcode /// In this example directive '#pragma omp detach' has simple 'detach' clause /// with the variable 'evt'. class OMPDetachClause final : public OMPClause { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Expression of the 'detach' clause. Stmt *Evt = nullptr; /// Set condition. void setEventHandler(Expr *E) { Evt = E; } /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } public: /// Build 'detach' clause with event-handler \a Evt. /// /// \param Evt Event handler expression. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. OMPDetachClause(Expr *Evt, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_detach, StartLoc, EndLoc), LParenLoc(LParenLoc), Evt(Evt) {} /// Build an empty clause. OMPDetachClause() : OMPClause(llvm::omp::OMPC_detach, SourceLocation(), SourceLocation()) {} /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Returns event-handler expression. Expr *getEventHandler() const { return cast_or_null<Expr>(Evt); } child_range children() { return child_range(&Evt, &Evt + 1); } const_child_range children() const { return const_child_range(&Evt, &Evt + 1); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_detach; } }; /// This represents clause 'inclusive' in the '#pragma omp scan' directive. /// /// \code /// #pragma omp scan inclusive(a,b) /// \endcode /// In this example directive '#pragma omp scan' has clause 'inclusive' /// with the variables 'a' and 'b'. class OMPInclusiveClause final : public OMPVarListClause<OMPInclusiveClause>, private llvm::TrailingObjects<OMPInclusiveClause, Expr *> { friend class OMPClauseReader; friend OMPVarListClause; friend TrailingObjects; /// Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. OMPInclusiveClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N) : OMPVarListClause<OMPInclusiveClause>(llvm::omp::OMPC_inclusive, StartLoc, LParenLoc, EndLoc, N) {} /// Build an empty clause. /// /// \param N Number of variables. explicit OMPInclusiveClause(unsigned N) : OMPVarListClause<OMPInclusiveClause>(llvm::omp::OMPC_inclusive, SourceLocation(), SourceLocation(), SourceLocation(), N) {} public: /// Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the original variables. static OMPInclusiveClause *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL); /// Creates an empty clause with the place for \a N variables. /// /// \param C AST context. /// \param N The number of variables. static OMPInclusiveClause *CreateEmpty(const ASTContext &C, unsigned N); child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPInclusiveClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_inclusive; } }; /// This represents clause 'exclusive' in the '#pragma omp scan' directive. /// /// \code /// #pragma omp scan exclusive(a,b) /// \endcode /// In this example directive '#pragma omp scan' has clause 'exclusive' /// with the variables 'a' and 'b'. class OMPExclusiveClause final : public OMPVarListClause<OMPExclusiveClause>, private llvm::TrailingObjects<OMPExclusiveClause, Expr *> { friend class OMPClauseReader; friend OMPVarListClause; friend TrailingObjects; /// Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. OMPExclusiveClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N) : OMPVarListClause<OMPExclusiveClause>(llvm::omp::OMPC_exclusive, StartLoc, LParenLoc, EndLoc, N) {} /// Build an empty clause. /// /// \param N Number of variables. explicit OMPExclusiveClause(unsigned N) : OMPVarListClause<OMPExclusiveClause>(llvm::omp::OMPC_exclusive, SourceLocation(), SourceLocation(), SourceLocation(), N) {} public: /// Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the original variables. static OMPExclusiveClause *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL); /// Creates an empty clause with the place for \a N variables. /// /// \param C AST context. /// \param N The number of variables. static OMPExclusiveClause *CreateEmpty(const ASTContext &C, unsigned N); child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPExclusiveClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_exclusive; } }; /// This represents clause 'uses_allocators' in the '#pragma omp target'-based /// directives. /// /// \code /// #pragma omp target uses_allocators(default_allocator, my_allocator(traits)) /// \endcode /// In this example directive '#pragma omp target' has clause 'uses_allocators' /// with the allocators 'default_allocator' and user-defined 'my_allocator'. class OMPUsesAllocatorsClause final : public OMPClause, private llvm::TrailingObjects<OMPUsesAllocatorsClause, Expr *, SourceLocation> { public: /// Data for list of allocators. struct Data { /// Allocator. Expr *Allocator = nullptr; /// Allocator traits. Expr *AllocatorTraits = nullptr; /// Locations of '(' and ')' symbols. SourceLocation LParenLoc, RParenLoc; }; private: friend class OMPClauseReader; friend TrailingObjects; enum class ExprOffsets { Allocator, AllocatorTraits, Total, }; enum class ParenLocsOffsets { LParen, RParen, Total, }; /// Location of '('. SourceLocation LParenLoc; /// Total number of allocators in the clause. unsigned NumOfAllocators = 0; /// Build clause. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of allocators asssociated with the clause. OMPUsesAllocatorsClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N) : OMPClause(llvm::omp::OMPC_uses_allocators, StartLoc, EndLoc), LParenLoc(LParenLoc), NumOfAllocators(N) {} /// Build an empty clause. /// \param N Number of allocators asssociated with the clause. /// explicit OMPUsesAllocatorsClause(unsigned N) : OMPClause(llvm::omp::OMPC_uses_allocators, SourceLocation(), SourceLocation()), NumOfAllocators(N) {} unsigned numTrailingObjects(OverloadToken<Expr *>) const { return NumOfAllocators * static_cast<int>(ExprOffsets::Total); } /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Sets the allocators data for the clause. void setAllocatorsData(ArrayRef<OMPUsesAllocatorsClause::Data> Data); public: /// Creates clause with a list of allocators \p Data. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param Data List of allocators. static OMPUsesAllocatorsClause * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<OMPUsesAllocatorsClause::Data> Data); /// Creates an empty clause with the place for \p N allocators. /// /// \param C AST context. /// \param N The number of allocators. static OMPUsesAllocatorsClause *CreateEmpty(const ASTContext &C, unsigned N); /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Returns number of allocators associated with the clause. unsigned getNumberOfAllocators() const { return NumOfAllocators; } /// Returns data for the specified allocator. OMPUsesAllocatorsClause::Data getAllocatorData(unsigned I) const; // Iterators child_range children() { Stmt **Begin = reinterpret_cast<Stmt **>(getTrailingObjects<Expr *>()); return child_range(Begin, Begin + NumOfAllocators * static_cast<int>(ExprOffsets::Total)); } const_child_range children() const { Stmt *const *Begin = reinterpret_cast<Stmt *const *>(getTrailingObjects<Expr *>()); return const_child_range( Begin, Begin + NumOfAllocators * static_cast<int>(ExprOffsets::Total)); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_uses_allocators; } }; /// This represents clause 'affinity' in the '#pragma omp task'-based /// directives. /// /// \code /// #pragma omp task affinity(iterator(i = 0:n) : ([3][n])a, b[:n], c[i]) /// \endcode /// In this example directive '#pragma omp task' has clause 'affinity' with the /// affinity modifer 'iterator(i = 0:n)' and locator items '([3][n])a', 'b[:n]' /// and 'c[i]'. class OMPAffinityClause final : public OMPVarListClause<OMPAffinityClause>, private llvm::TrailingObjects<OMPAffinityClause, Expr *> { friend class OMPClauseReader; friend OMPVarListClause; friend TrailingObjects; /// Location of ':' symbol. SourceLocation ColonLoc; /// Build clause. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param ColonLoc Location of ':'. /// \param EndLoc Ending location of the clause. /// \param N Number of locators asssociated with the clause. OMPAffinityClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, unsigned N) : OMPVarListClause<OMPAffinityClause>(llvm::omp::OMPC_affinity, StartLoc, LParenLoc, EndLoc, N) {} /// Build an empty clause. /// \param N Number of locators asssociated with the clause. /// explicit OMPAffinityClause(unsigned N) : OMPVarListClause<OMPAffinityClause>(llvm::omp::OMPC_affinity, SourceLocation(), SourceLocation(), SourceLocation(), N) {} /// Sets the affinity modifier for the clause, if any. void setModifier(Expr *E) { getTrailingObjects<Expr *>()[varlist_size()] = E; } /// Sets the location of ':' symbol. void setColonLoc(SourceLocation Loc) { ColonLoc = Loc; } public: /// Creates clause with a modifier a list of locator items. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param ColonLoc Location of ':'. /// \param EndLoc Ending location of the clause. /// \param Locators List of locator items. static OMPAffinityClause *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, Expr *Modifier, ArrayRef<Expr *> Locators); /// Creates an empty clause with the place for \p N locator items. /// /// \param C AST context. /// \param N The number of locator items. static OMPAffinityClause *CreateEmpty(const ASTContext &C, unsigned N); /// Gets affinity modifier. Expr *getModifier() { return getTrailingObjects<Expr *>()[varlist_size()]; } Expr *getModifier() const { return getTrailingObjects<Expr *>()[varlist_size()]; } /// Gets the location of ':' symbol. SourceLocation getColonLoc() const { return ColonLoc; } // Iterators child_range children() { int Offset = getModifier() ? 1 : 0; return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end() + Offset)); } const_child_range children() const { auto Children = const_cast<OMPAffinityClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_affinity; } }; /// This class implements a simple visitor for OMPClause /// subclasses. template<class ImplClass, template <typename> class Ptr, typename RetTy> class OMPClauseVisitorBase { public: #define PTR(CLASS) Ptr<CLASS> #define DISPATCH(CLASS) \ return static_cast<ImplClass*>(this)->Visit##CLASS(static_cast<PTR(CLASS)>(S)) #define OMP_CLAUSE_CLASS(Enum, Str, Class) \ RetTy Visit ## Class (PTR(Class) S) { DISPATCH(Class); } #include "llvm/Frontend/OpenMP/OMPKinds.def" RetTy Visit(PTR(OMPClause) S) { // Top switch clause: visit each OMPClause. switch (S->getClauseKind()) { #define OMP_CLAUSE_CLASS(Enum, Str, Class) \ case llvm::omp::Clause::Enum: \ return Visit##Class(static_cast<PTR(Class)>(S)); #define OMP_CLAUSE_NO_CLASS(Enum, Str) \ case llvm::omp::Clause::Enum: \ break; #include "llvm/Frontend/OpenMP/OMPKinds.def" default: break; } } // Base case, ignore it. :) RetTy VisitOMPClause(PTR(OMPClause) Node) { return RetTy(); } #undef PTR #undef DISPATCH }; template <typename T> using const_ptr = std::add_pointer_t<std::add_const_t<T>>; template <class ImplClass, typename RetTy = void> class OMPClauseVisitor : public OMPClauseVisitorBase<ImplClass, std::add_pointer_t, RetTy> {}; template<class ImplClass, typename RetTy = void> class ConstOMPClauseVisitor : public OMPClauseVisitorBase <ImplClass, const_ptr, RetTy> {}; class OMPClausePrinter final : public OMPClauseVisitor<OMPClausePrinter> { raw_ostream &OS; const PrintingPolicy &Policy; /// Process clauses with list of variables. template <typename T> void VisitOMPClauseList(T *Node, char StartSym); /// Process motion clauses. template <typename T> void VisitOMPMotionClause(T *Node); public: OMPClausePrinter(raw_ostream &OS, const PrintingPolicy &Policy) : OS(OS), Policy(Policy) {} #define OMP_CLAUSE_CLASS(Enum, Str, Class) \ void Visit##Class(Class *S); #include "llvm/Frontend/OpenMP/OMPKinds.def" }; struct OMPTraitProperty { llvm::omp::TraitProperty Kind = llvm::omp::TraitProperty::invalid; /// The raw string as we parsed it. This is needed for the `isa` trait set /// (which accepts anything) and (later) extensions. StringRef RawString; }; struct OMPTraitSelector { Expr *ScoreOrCondition = nullptr; llvm::omp::TraitSelector Kind = llvm::omp::TraitSelector::invalid; llvm::SmallVector<OMPTraitProperty, 1> Properties; }; struct OMPTraitSet { llvm::omp::TraitSet Kind = llvm::omp::TraitSet::invalid; llvm::SmallVector<OMPTraitSelector, 2> Selectors; }; /// Helper data structure representing the traits in a match clause of an /// `declare variant` or `metadirective`. The outer level is an ordered /// collection of selector sets, each with an associated kind and an ordered /// collection of selectors. A selector has a kind, an optional score/condition, /// and an ordered collection of properties. class OMPTraitInfo { /// Private constructor accesible only by ASTContext. OMPTraitInfo() {} friend class ASTContext; public: /// Reconstruct a (partial) OMPTraitInfo object from a mangled name. OMPTraitInfo(StringRef MangledName); /// The outermost level of selector sets. llvm::SmallVector<OMPTraitSet, 2> Sets; bool anyScoreOrCondition( llvm::function_ref<bool(Expr *&, bool /* IsScore */)> Cond) { return llvm::any_of(Sets, [&](OMPTraitSet &Set) { return llvm::any_of( Set.Selectors, [&](OMPTraitSelector &Selector) { return Cond(Selector.ScoreOrCondition, /* IsScore */ Selector.Kind != llvm::omp::TraitSelector::user_condition); }); }); } /// Create a variant match info object from this trait info object. While the /// former is a flat representation the actual main difference is that the /// latter uses clang::Expr to store the score/condition while the former is /// independent of clang. Thus, expressions and conditions are evaluated in /// this method. void getAsVariantMatchInfo(ASTContext &ASTCtx, llvm::omp::VariantMatchInfo &VMI) const; /// Return a string representation identifying this context selector. std::string getMangledName() const; /// Print a human readable representation into \p OS. void print(llvm::raw_ostream &OS, const PrintingPolicy &Policy) const; }; llvm::raw_ostream &operator<<(llvm::raw_ostream &OS, const OMPTraitInfo &TI); llvm::raw_ostream &operator<<(llvm::raw_ostream &OS, const OMPTraitInfo *TI); /// Clang specific specialization of the OMPContext to lookup target features. struct TargetOMPContext final : public llvm::omp::OMPContext { TargetOMPContext(ASTContext &ASTCtx, std::function<void(StringRef)> &&DiagUnknownTrait, const FunctionDecl *CurrentFunctionDecl); virtual ~TargetOMPContext() = default; /// See llvm::omp::OMPContext::matchesISATrait bool matchesISATrait(StringRef RawString) const override; private: std::function<bool(StringRef)> FeatureValidityCheck; std::function<void(StringRef)> DiagUnknownTrait; llvm::StringMap<bool> FeatureMap; }; /// Contains data for OpenMP directives: clauses, children /// expressions/statements (helpers for codegen) and associated statement, if /// any. class OMPChildren final : private llvm::TrailingObjects<OMPChildren, OMPClause *, Stmt *> { friend TrailingObjects; friend class OMPClauseReader; friend class OMPExecutableDirective; template <typename T> friend class OMPDeclarativeDirective; /// Numbers of clauses. unsigned NumClauses = 0; /// Number of child expressions/stmts. unsigned NumChildren = 0; /// true if the directive has associated statement. bool HasAssociatedStmt = false; /// Define the sizes of each trailing object array except the last one. This /// is required for TrailingObjects to work properly. size_t numTrailingObjects(OverloadToken<OMPClause *>) const { return NumClauses; } OMPChildren() = delete; OMPChildren(unsigned NumClauses, unsigned NumChildren, bool HasAssociatedStmt) : NumClauses(NumClauses), NumChildren(NumChildren), HasAssociatedStmt(HasAssociatedStmt) {} static size_t size(unsigned NumClauses, bool HasAssociatedStmt, unsigned NumChildren); static OMPChildren *Create(void *Mem, ArrayRef<OMPClause *> Clauses); static OMPChildren *Create(void *Mem, ArrayRef<OMPClause *> Clauses, Stmt *S, unsigned NumChildren = 0); static OMPChildren *CreateEmpty(void *Mem, unsigned NumClauses, bool HasAssociatedStmt = false, unsigned NumChildren = 0); public: unsigned getNumClauses() const { return NumClauses; } unsigned getNumChildren() const { return NumChildren; } bool hasAssociatedStmt() const { return HasAssociatedStmt; } /// Set associated statement. void setAssociatedStmt(Stmt *S) { getTrailingObjects<Stmt *>()[NumChildren] = S; } void setChildren(ArrayRef<Stmt *> Children); /// Sets the list of variables for this clause. /// /// \param Clauses The list of clauses for the directive. /// void setClauses(ArrayRef<OMPClause *> Clauses); /// Returns statement associated with the directive. const Stmt *getAssociatedStmt() const { return const_cast<OMPChildren *>(this)->getAssociatedStmt(); } Stmt *getAssociatedStmt() { assert(HasAssociatedStmt && "Expected directive with the associated statement."); return getTrailingObjects<Stmt *>()[NumChildren]; } /// Get the clauses storage. MutableArrayRef<OMPClause *> getClauses() { return llvm::makeMutableArrayRef(getTrailingObjects<OMPClause *>(), NumClauses); } ArrayRef<OMPClause *> getClauses() const { return const_cast<OMPChildren *>(this)->getClauses(); } /// Returns the captured statement associated with the /// component region within the (combined) directive. /// /// \param RegionKind Component region kind. const CapturedStmt * getCapturedStmt(OpenMPDirectiveKind RegionKind, ArrayRef<OpenMPDirectiveKind> CaptureRegions) const { assert(llvm::any_of( CaptureRegions, [=](const OpenMPDirectiveKind K) { return K == RegionKind; }) && "RegionKind not found in OpenMP CaptureRegions."); auto *CS = cast<CapturedStmt>(getAssociatedStmt()); for (auto ThisCaptureRegion : CaptureRegions) { if (ThisCaptureRegion == RegionKind) return CS; CS = cast<CapturedStmt>(CS->getCapturedStmt()); } llvm_unreachable("Incorrect RegionKind specified for directive."); } /// Get innermost captured statement for the construct. CapturedStmt * getInnermostCapturedStmt(ArrayRef<OpenMPDirectiveKind> CaptureRegions) { assert(hasAssociatedStmt() && "Must have associated captured statement."); assert(!CaptureRegions.empty() && "At least one captured statement must be provided."); auto *CS = cast<CapturedStmt>(getAssociatedStmt()); for (unsigned Level = CaptureRegions.size(); Level > 1; --Level) CS = cast<CapturedStmt>(CS->getCapturedStmt()); return CS; } const CapturedStmt * getInnermostCapturedStmt(ArrayRef<OpenMPDirectiveKind> CaptureRegions) const { return const_cast<OMPChildren *>(this)->getInnermostCapturedStmt( CaptureRegions); } MutableArrayRef<Stmt *> getChildren(); ArrayRef<Stmt *> getChildren() const { return const_cast<OMPChildren *>(this)->getChildren(); } Stmt *getRawStmt() { assert(HasAssociatedStmt && "Expected directive with the associated statement."); if (auto *CS = dyn_cast<CapturedStmt>(getAssociatedStmt())) { Stmt *S = nullptr; do { S = CS->getCapturedStmt(); CS = dyn_cast<CapturedStmt>(S); } while (CS); return S; } return getAssociatedStmt(); } const Stmt *getRawStmt() const { return const_cast<OMPChildren *>(this)->getRawStmt(); } Stmt::child_range getAssociatedStmtAsRange() { if (!HasAssociatedStmt) return Stmt::child_range(Stmt::child_iterator(), Stmt::child_iterator()); return Stmt::child_range(&getTrailingObjects<Stmt *>()[NumChildren], &getTrailingObjects<Stmt *>()[NumChildren + 1]); } }; } // namespace clang #endif // LLVM_CLANG_AST_OPENMPCLAUSE_H
blackscholes.c
/* * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. * * NVIDIA CORPORATION and its licensors retain all intellectual property * and proprietary rights in and to this software, related documentation * and any modifications thereto. Any use, reproduction, disclosure or * distribution of this software and related documentation without an express * license agreement from NVIDIA CORPORATION is strictly prohibited. */ #include <math.h> #include <stdlib.h> #include <stdio.h> #include <accel.h> #include "timer.h" #ifdef FP64 typedef double real; #define SQRT(x) sqrt((x)) #define EXP(x) exp((x)) #define FABS(x) fabs((x)) #define LOG(x) log((x)) #else typedef float real; #define SQRT(x) sqrtf((x)) #define EXP(x) expf((x)) #define FABS(x) fabsf((x)) #define LOG(x) logf((x)) #endif const float RISKFREE = 0.02f; const float VOLATILITY = 0.30f; /////////////////////////////////////////////////////////////////////////////// // Polynomial approximation of cumulative normal distribution function /////////////////////////////////////////////////////////////////////////////// real CND(real d) { const real A1 = (real)0.31938153; const real A2 = (real)-0.356563782; const real A3 = (real)1.781477937; const real A4 = (real)-1.821255978; const real A5 = (real)1.330274429; const real RSQRT2PI = (real)0.39894228040143267793994605993438; real K = (real)1.0 / ((real)1.0 + (real)0.2316419 * FABS(d)); real cnd = RSQRT2PI * EXP(- (real)0.5 * d * d) * (K * (A1 + K * (A2 + K * (A3 + K * (A4 + K * A5))))); if(d > 0) cnd = (real)1.0 - cnd; return cnd; } //////////////////////////////////////////////////////////////////////////////// // Process an array of optN options //////////////////////////////////////////////////////////////////////////////// void BlackScholes( real * restrict callResult, real * restrict putResult, real * restrict stockPrice, real * restrict optionStrike, real * restrict optionYears, real Riskfree, real Volatility, int optN, int accelerate) { #pragma acc kernels loop if (accelerate) #pragma omp parallel for if (accelerate) for(int opt = 0; opt < optN; opt++) { real S = stockPrice[opt]; real X = optionStrike[opt]; real T = optionYears[opt]; real R = Riskfree, V = Volatility; real sqrtT = SQRT(T); real d1 = (LOG(S / X) + (R + (real)0.5 * V * V) * T) / (V * sqrtT); real d2 = d1 - V * sqrtT; real CNDD1 = CND(d1); real CNDD2 = CND(d2); //Calculate Call and Put simultaneously real expRT = EXP(- R * T); callResult[opt] = (real)(S * CNDD1 - X * expRT * CNDD2); putResult[opt] = (real)(X * expRT * ((real)1.0 - CNDD2) - S * ((real)1.0 - CNDD1)); } } float RandFloat(float low, float high){ float t = (float)rand() / (float)RAND_MAX; return (1.0f - t) * low + t * high; } int main(int argc, char **argv) { int OPT_N = 4000000; int OPT_SZ = OPT_N * sizeof(float); int iterations = 10; if (argc >= 2) iterations = atoi(argv[1]); real //Results calculated by CPU for reference *callResultCPU, *putResultCPU, //GPU results *callResultGPU, *putResultGPU, //CPU instance of input data *stockPrice, *optionStrike, *optionYears; real delta, ref, sum_delta, sum_ref, max_delta, L1norm, gpuTime; printf("Initializing data...\n"); callResultCPU = (real *)malloc(OPT_SZ); putResultCPU = (real *)malloc(OPT_SZ); callResultGPU = (real *)malloc(OPT_SZ); putResultGPU = (real *)malloc(OPT_SZ); stockPrice = (real *)malloc(OPT_SZ); optionStrike = (real *)malloc(OPT_SZ); optionYears = (real *)malloc(OPT_SZ); srand(5347); //Generate options set for(int i = 0; i < OPT_N; i++){ callResultCPU[i] = (real)0.0; putResultCPU[i] = (real)-1.0; callResultGPU[i] = (real)0.0; putResultGPU[i] = (real)-1.0; stockPrice[i] = (real)RandFloat(5.0f, 30.0f); optionStrike[i] = (real)RandFloat(1.0f, 100.0f); optionYears[i] = (real)RandFloat(0.25f, 10.0f); } #ifdef _OPENACC // run once outside timer to initialize/prime acc_init(acc_device_nvidia); #endif BlackScholes( callResultGPU, putResultGPU, stockPrice, optionStrike, optionYears, RISKFREE, VOLATILITY, OPT_N, 1 ); printf("Running Unaccelerated Version %d iterations...\n", 10); StartTimer(); for (int i = 0; i < 10; i++) { BlackScholes( callResultCPU, putResultCPU, stockPrice, optionStrike, optionYears, RISKFREE, VOLATILITY, OPT_N, 0 ); } double ms = GetTimer() / 10; printf("Running Accelerated Version %d iterations...\n", iterations); StartTimer(); for (int i = 0; i < iterations; i++) { BlackScholes( callResultGPU, putResultGPU, stockPrice, optionStrike, optionYears, RISKFREE, VOLATILITY, OPT_N, 1 ); } double msAccelerated = GetTimer() / iterations; //Both call and put is calculated printf("Options count : %i \n", 2 * OPT_N); printf("Unaccelerated:\n"); printf("\tBlackScholes() time : %f msec\n", ms); printf("\t%f GB/s, %f GOptions/s\n", ((double)(5 * OPT_N * sizeof(float)) * 1E-9) / (ms * 1E-3), ((double)(2 * OPT_N) * 1E-9) / (ms * 1E-3)); printf("Accelerated:\n"); printf("\tBlackScholes() time : %f msec\n", msAccelerated); printf("\t%f GB/s, %f GOptions/s\n", ((double)(5 * OPT_N * sizeof(float)) * 1E-9) / (msAccelerated * 1E-3), ((double)(2 * OPT_N) * 1E-9) / (msAccelerated * 1E-3)); printf("Comparing the results...\n"); //Calculate max absolute difference and L1 distance //between CPU and GPU results sum_delta = 0; sum_ref = 0; max_delta = 0; for(int i = 0; i < OPT_N; i++){ ref = callResultCPU[i]; delta = fabs(callResultCPU[i] - callResultGPU[i]); if(delta > max_delta) max_delta = delta; sum_delta += delta; sum_ref += fabs(ref); } L1norm = sum_delta / sum_ref; printf("L1 norm: %E\n", L1norm); printf("Max absolute error: %E\n\n", max_delta); if (max_delta > 2.0e-5) { printf("Test FAILED\n"); } else { printf("Test PASSED\n"); } free(callResultCPU); free(putResultCPU); free(callResultGPU); free(putResultGPU); free(stockPrice); free(optionStrike); free(optionYears); return 0; }
statistic.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % SSSSS TTTTT AAA TTTTT IIIII SSSSS TTTTT IIIII CCCC % % SS T A A T I SS T I C % % SSS T AAAAA T I SSS T I C % % SS T A A T I SS T I C % % SSSSS T A A T IIIII SSSSS T IIIII CCCC % % % % % % MagickCore Image Statistical Methods % % % % Software Design % % John Cristy % % July 1992 % % % % % % Copyright 1999-2011 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % http://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "magick/studio.h" #include "magick/property.h" #include "magick/animate.h" #include "magick/blob.h" #include "magick/blob-private.h" #include "magick/cache.h" #include "magick/cache-private.h" #include "magick/cache-view.h" #include "magick/client.h" #include "magick/color.h" #include "magick/color-private.h" #include "magick/colorspace.h" #include "magick/colorspace-private.h" #include "magick/composite.h" #include "magick/composite-private.h" #include "magick/compress.h" #include "magick/constitute.h" #include "magick/deprecate.h" #include "magick/display.h" #include "magick/draw.h" #include "magick/enhance.h" #include "magick/exception.h" #include "magick/exception-private.h" #include "magick/gem.h" #include "magick/geometry.h" #include "magick/list.h" #include "magick/image-private.h" #include "magick/magic.h" #include "magick/magick.h" #include "magick/memory_.h" #include "magick/module.h" #include "magick/monitor.h" #include "magick/monitor-private.h" #include "magick/option.h" #include "magick/paint.h" #include "magick/pixel-private.h" #include "magick/profile.h" #include "magick/quantize.h" #include "magick/random_.h" #include "magick/random-private.h" #include "magick/segment.h" #include "magick/semaphore.h" #include "magick/signature-private.h" #include "magick/statistic.h" #include "magick/string_.h" #include "magick/thread-private.h" #include "magick/timer.h" #include "magick/utility.h" #include "magick/version.h" /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % E v a l u a t e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % EvaluateImage() applies a value to the image with an arithmetic, relational, % or logical operator to an image. Use these operations to lighten or darken % an image, to increase or decrease contrast in an image, or to produce the % "negative" of an image. % % The format of the EvaluateImageChannel method is: % % MagickBooleanType EvaluateImage(Image *image, % const MagickEvaluateOperator op,const double value, % ExceptionInfo *exception) % MagickBooleanType EvaluateImages(Image *images, % const MagickEvaluateOperator op,const double value, % ExceptionInfo *exception) % MagickBooleanType EvaluateImageChannel(Image *image, % const ChannelType channel,const MagickEvaluateOperator op, % const double value,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel. % % o op: A channel op. % % o value: A value value. % % o exception: return any errors or warnings in this structure. % */ static MagickPixelPacket **DestroyPixelThreadSet(MagickPixelPacket **pixels) { register ssize_t i; assert(pixels != (MagickPixelPacket **) NULL); for (i=0; i < (ssize_t) GetOpenMPMaximumThreads(); i++) if (pixels[i] != (MagickPixelPacket *) NULL) pixels[i]=(MagickPixelPacket *) RelinquishMagickMemory(pixels[i]); pixels=(MagickPixelPacket **) RelinquishMagickMemory(pixels); return(pixels); } static MagickPixelPacket **AcquirePixelThreadSet(const Image *image, const size_t number_images) { register ssize_t i, j; MagickPixelPacket **pixels; size_t length, number_threads; number_threads=GetOpenMPMaximumThreads(); pixels=(MagickPixelPacket **) AcquireQuantumMemory(number_threads, sizeof(*pixels)); if (pixels == (MagickPixelPacket **) NULL) return((MagickPixelPacket **) NULL); (void) ResetMagickMemory(pixels,0,number_threads*sizeof(*pixels)); for (i=0; i < (ssize_t) number_threads; i++) { length=image->columns; if (length < number_images) length=number_images; pixels[i]=(MagickPixelPacket *) AcquireQuantumMemory(length, sizeof(**pixels)); if (pixels[i] == (MagickPixelPacket *) NULL) return(DestroyPixelThreadSet(pixels)); for (j=0; j < (ssize_t) length; j++) GetMagickPixelPacket(image,&pixels[i][j]); } return(pixels); } static inline double MagickMax(const double x,const double y) { if (x > y) return(x); return(y); } #if defined(__cplusplus) || defined(c_plusplus) extern "C" { #endif static int IntensityCompare(const void *x,const void *y) { const MagickPixelPacket *color_1, *color_2; int intensity; color_1=(const MagickPixelPacket *) x; color_2=(const MagickPixelPacket *) y; intensity=(int) MagickPixelIntensity(color_2)- (int) MagickPixelIntensity(color_1); return(intensity); } #if defined(__cplusplus) || defined(c_plusplus) } #endif static inline double MagickMin(const double x,const double y) { if (x < y) return(x); return(y); } static MagickRealType ApplyEvaluateOperator(RandomInfo *random_info, Quantum pixel,const MagickEvaluateOperator op,const MagickRealType value) { MagickRealType result; result=0.0; switch (op) { case UndefinedEvaluateOperator: break; case AbsEvaluateOperator: { result=(MagickRealType) fabs((double) (pixel+value)); break; } case AddEvaluateOperator: { result=(MagickRealType) (pixel+value); break; } case AddModulusEvaluateOperator: { /* This returns a 'floored modulus' of the addition which is a positive result. It differs from % or fmod() which returns a 'truncated modulus' result, where floor() is replaced by trunc() and could return a negative result (which is clipped). */ result=pixel+value; result-=(QuantumRange+1.0)*floor((double) result/(QuantumRange+1.0)); break; } case AndEvaluateOperator: { result=(MagickRealType) ((size_t) pixel & (size_t) (value+0.5)); break; } case CosineEvaluateOperator: { result=(MagickRealType) (QuantumRange*(0.5*cos((double) (2.0*MagickPI* QuantumScale*pixel*value))+0.5)); break; } case DivideEvaluateOperator: { result=pixel/(value == 0.0 ? 1.0 : value); break; } case ExponentialEvaluateOperator: { result=(MagickRealType) (QuantumRange*exp((double) (value*QuantumScale* pixel))); break; } case GaussianNoiseEvaluateOperator: { result=(MagickRealType) GenerateDifferentialNoise(random_info,pixel, GaussianNoise,value); break; } case ImpulseNoiseEvaluateOperator: { result=(MagickRealType) GenerateDifferentialNoise(random_info,pixel, ImpulseNoise,value); break; } case LaplacianNoiseEvaluateOperator: { result=(MagickRealType) GenerateDifferentialNoise(random_info,pixel, LaplacianNoise,value); break; } case LeftShiftEvaluateOperator: { result=(MagickRealType) ((size_t) pixel << (size_t) (value+0.5)); break; } case LogEvaluateOperator: { result=(MagickRealType) (QuantumRange*log((double) (QuantumScale*value* pixel+1.0))/log((double) (value+1.0))); break; } case MaxEvaluateOperator: { result=(MagickRealType) MagickMax((double) pixel,value); break; } case MeanEvaluateOperator: { result=(MagickRealType) (pixel+value); break; } case MedianEvaluateOperator: { result=(MagickRealType) (pixel+value); break; } case MinEvaluateOperator: { result=(MagickRealType) MagickMin((double) pixel,value); break; } case MultiplicativeNoiseEvaluateOperator: { result=(MagickRealType) GenerateDifferentialNoise(random_info,pixel, MultiplicativeGaussianNoise,value); break; } case MultiplyEvaluateOperator: { result=(MagickRealType) (value*pixel); break; } case OrEvaluateOperator: { result=(MagickRealType) ((size_t) pixel | (size_t) (value+0.5)); break; } case PoissonNoiseEvaluateOperator: { result=(MagickRealType) GenerateDifferentialNoise(random_info,pixel, PoissonNoise,value); break; } case PowEvaluateOperator: { result=(MagickRealType) (QuantumRange*pow((double) (QuantumScale*pixel), (double) value)); break; } case RightShiftEvaluateOperator: { result=(MagickRealType) ((size_t) pixel >> (size_t) (value+0.5)); break; } case SetEvaluateOperator: { result=value; break; } case SineEvaluateOperator: { result=(MagickRealType) (QuantumRange*(0.5*sin((double) (2.0*MagickPI* QuantumScale*pixel*value))+0.5)); break; } case SubtractEvaluateOperator: { result=(MagickRealType) (pixel-value); break; } case ThresholdEvaluateOperator: { result=(MagickRealType) (((MagickRealType) pixel <= value) ? 0 : QuantumRange); break; } case ThresholdBlackEvaluateOperator: { result=(MagickRealType) (((MagickRealType) pixel <= value) ? 0 : pixel); break; } case ThresholdWhiteEvaluateOperator: { result=(MagickRealType) (((MagickRealType) pixel > value) ? QuantumRange : pixel); break; } case UniformNoiseEvaluateOperator: { result=(MagickRealType) GenerateDifferentialNoise(random_info,pixel, UniformNoise,value); break; } case XorEvaluateOperator: { result=(MagickRealType) ((size_t) pixel ^ (size_t) (value+0.5)); break; } } return(result); } MagickExport MagickBooleanType EvaluateImage(Image *image, const MagickEvaluateOperator op,const double value,ExceptionInfo *exception) { MagickBooleanType status; status=EvaluateImageChannel(image,CompositeChannels,op,value,exception); return(status); } MagickExport Image *EvaluateImages(const Image *images, const MagickEvaluateOperator op,ExceptionInfo *exception) { #define EvaluateImageTag "Evaluate/Image" CacheView *evaluate_view; const Image *next; Image *evaluate_image; MagickBooleanType status; MagickOffsetType progress; MagickPixelPacket **restrict evaluate_pixels, zero; RandomInfo **restrict random_info; size_t number_images; ssize_t y; /* Ensure the image are the same size. */ assert(images != (Image *) NULL); assert(images->signature == MagickSignature); if (images->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); for (next=images; next != (Image *) NULL; next=GetNextImageInList(next)) if ((next->columns != images->columns) || (next->rows != images->rows)) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "ImageWidthsOrHeightsDiffer","`%s'",images->filename); return((Image *) NULL); } /* Initialize evaluate next attributes. */ evaluate_image=CloneImage(images,images->columns,images->rows,MagickTrue, exception); if (evaluate_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(evaluate_image,DirectClass) == MagickFalse) { InheritException(exception,&evaluate_image->exception); evaluate_image=DestroyImage(evaluate_image); return((Image *) NULL); } number_images=GetImageListLength(images); evaluate_pixels=AcquirePixelThreadSet(images,number_images); if (evaluate_pixels == (MagickPixelPacket **) NULL) { evaluate_image=DestroyImage(evaluate_image); (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",images->filename); return((Image *) NULL); } /* Evaluate image pixels. */ status=MagickTrue; progress=0; GetMagickPixelPacket(images,&zero); random_info=AcquireRandomInfoThreadSet(); evaluate_view=AcquireCacheView(evaluate_image); if (op == MedianEvaluateOperator) #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic) shared(progress,status) #endif for (y=0; y < (ssize_t) evaluate_image->rows; y++) { CacheView *image_view; const Image *next; const int id = GetOpenMPThreadId(); register IndexPacket *restrict evaluate_indexes; register MagickPixelPacket *evaluate_pixel; register PixelPacket *restrict q; register ssize_t x; if (status == MagickFalse) continue; q=QueueCacheViewAuthenticPixels(evaluate_view,0,y,evaluate_image->columns, 1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } evaluate_indexes=GetCacheViewAuthenticIndexQueue(evaluate_view); evaluate_pixel=evaluate_pixels[id]; for (x=0; x < (ssize_t) evaluate_image->columns; x++) { register ssize_t i; for (i=0; i < (ssize_t) number_images; i++) evaluate_pixel[i]=zero; next=images; for (i=0; i < (ssize_t) number_images; i++) { register const IndexPacket *indexes; register const PixelPacket *p; image_view=AcquireCacheView(next); p=GetCacheViewVirtualPixels(image_view,x,y,1,1,exception); if (p == (const PixelPacket *) NULL) { image_view=DestroyCacheView(image_view); break; } indexes=GetCacheViewVirtualIndexQueue(image_view); evaluate_pixel[i].red=ApplyEvaluateOperator(random_info[id], GetPixelRed(p),op,evaluate_pixel[i].red); evaluate_pixel[i].green=ApplyEvaluateOperator(random_info[id], GetPixelGreen(p),op,evaluate_pixel[i].green); evaluate_pixel[i].blue=ApplyEvaluateOperator(random_info[id], GetPixelBlue(p),op,evaluate_pixel[i].blue); evaluate_pixel[i].opacity=ApplyEvaluateOperator(random_info[id], GetPixelOpacity(p),op,evaluate_pixel[i].opacity); if (evaluate_image->colorspace == CMYKColorspace) evaluate_pixel[i].index=ApplyEvaluateOperator(random_info[id], *indexes,op,evaluate_pixel[i].index); image_view=DestroyCacheView(image_view); next=GetNextImageInList(next); } qsort((void *) evaluate_pixel,number_images,sizeof(*evaluate_pixel), IntensityCompare); SetPixelRed(q,ClampToQuantum(evaluate_pixel[i/2].red)); SetPixelGreen(q,ClampToQuantum(evaluate_pixel[i/2].green)); SetPixelBlue(q,ClampToQuantum(evaluate_pixel[i/2].blue)); if (evaluate_image->matte == MagickFalse) SetPixelOpacity(q,ClampToQuantum( evaluate_pixel[i/2].opacity)); else SetPixelAlpha(q,ClampToQuantum(evaluate_pixel[i/2].opacity)); if (evaluate_image->colorspace == CMYKColorspace) SetPixelIndex(evaluate_indexes+i,ClampToQuantum( evaluate_pixel[i/2].index)); q++; } if (SyncCacheViewAuthenticPixels(evaluate_view,exception) == MagickFalse) status=MagickFalse; if (images->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_EvaluateImages) #endif proceed=SetImageProgress(images,EvaluateImageTag,progress++, evaluate_image->rows); if (proceed == MagickFalse) status=MagickFalse; } } else #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic) shared(progress,status) #endif for (y=0; y < (ssize_t) evaluate_image->rows; y++) { CacheView *image_view; const Image *next; const int id = GetOpenMPThreadId(); register IndexPacket *restrict evaluate_indexes; register ssize_t i, x; register MagickPixelPacket *evaluate_pixel; register PixelPacket *restrict q; if (status == MagickFalse) continue; q=QueueCacheViewAuthenticPixels(evaluate_view,0,y,evaluate_image->columns, 1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } evaluate_indexes=GetCacheViewAuthenticIndexQueue(evaluate_view); evaluate_pixel=evaluate_pixels[id]; for (x=0; x < (ssize_t) evaluate_image->columns; x++) evaluate_pixel[x]=zero; next=images; for (i=0; i < (ssize_t) number_images; i++) { register const IndexPacket *indexes; register const PixelPacket *p; image_view=AcquireCacheView(next); p=GetCacheViewVirtualPixels(image_view,0,y,next->columns,1,exception); if (p == (const PixelPacket *) NULL) { image_view=DestroyCacheView(image_view); break; } indexes=GetCacheViewVirtualIndexQueue(image_view); for (x=0; x < (ssize_t) next->columns; x++) { evaluate_pixel[x].red=ApplyEvaluateOperator(random_info[id], GetPixelRed(p),i == 0 ? AddEvaluateOperator : op,evaluate_pixel[x].red); evaluate_pixel[x].green=ApplyEvaluateOperator(random_info[id], GetPixelGreen(p),i == 0 ? AddEvaluateOperator : op, evaluate_pixel[x].green); evaluate_pixel[x].blue=ApplyEvaluateOperator(random_info[id], GetPixelBlue(p),i == 0 ? AddEvaluateOperator : op, evaluate_pixel[x].blue); evaluate_pixel[x].opacity=ApplyEvaluateOperator(random_info[id], GetPixelOpacity(p),i == 0 ? AddEvaluateOperator : op, evaluate_pixel[x].opacity); if (evaluate_image->colorspace == CMYKColorspace) evaluate_pixel[x].index=ApplyEvaluateOperator(random_info[id], GetPixelIndex(indexes+x),i == 0 ? AddEvaluateOperator : op,evaluate_pixel[x].index); p++; } image_view=DestroyCacheView(image_view); next=GetNextImageInList(next); } if (op == MeanEvaluateOperator) for (x=0; x < (ssize_t) evaluate_image->columns; x++) { evaluate_pixel[x].red/=number_images; evaluate_pixel[x].green/=number_images; evaluate_pixel[x].blue/=number_images; evaluate_pixel[x].opacity/=number_images; evaluate_pixel[x].index/=number_images; } if (op == MultiplyEvaluateOperator) for (x=0; x < (ssize_t) evaluate_image->columns; x++) { register ssize_t j; for (j=0; x < (ssize_t) (number_images-1); j++) { evaluate_pixel[x].red*=QuantumScale; evaluate_pixel[x].green*=QuantumScale; evaluate_pixel[x].blue*=QuantumScale; evaluate_pixel[x].opacity*=QuantumScale; evaluate_pixel[x].index*=QuantumScale; } } for (x=0; x < (ssize_t) evaluate_image->columns; x++) { SetPixelRed(q,ClampToQuantum(evaluate_pixel[x].red)); SetPixelGreen(q,ClampToQuantum(evaluate_pixel[x].green)); SetPixelBlue(q,ClampToQuantum(evaluate_pixel[x].blue)); if (evaluate_image->matte == MagickFalse) SetPixelOpacity(q,ClampToQuantum(evaluate_pixel[x].opacity)); else SetPixelAlpha(q,ClampToQuantum(evaluate_pixel[x].opacity)); if (evaluate_image->colorspace == CMYKColorspace) SetPixelIndex(evaluate_indexes+x,ClampToQuantum( evaluate_pixel[x].index)); q++; } if (SyncCacheViewAuthenticPixels(evaluate_view,exception) == MagickFalse) status=MagickFalse; if (images->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_EvaluateImages) #endif proceed=SetImageProgress(images,EvaluateImageTag,progress++, evaluate_image->rows); if (proceed == MagickFalse) status=MagickFalse; } } evaluate_view=DestroyCacheView(evaluate_view); evaluate_pixels=DestroyPixelThreadSet(evaluate_pixels); random_info=DestroyRandomInfoThreadSet(random_info); if (status == MagickFalse) evaluate_image=DestroyImage(evaluate_image); return(evaluate_image); } MagickExport MagickBooleanType EvaluateImageChannel(Image *image, const ChannelType channel,const MagickEvaluateOperator op,const double value, ExceptionInfo *exception) { CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; RandomInfo **restrict random_info; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); if (SetImageStorageClass(image,DirectClass) == MagickFalse) { InheritException(exception,&image->exception); return(MagickFalse); } status=MagickTrue; progress=0; random_info=AcquireRandomInfoThreadSet(); image_view=AcquireCacheView(image); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,4) shared(progress,status) #endif for (y=0; y < (ssize_t) image->rows; y++) { const int id = GetOpenMPThreadId(); register IndexPacket *restrict indexes; register PixelPacket *restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { if ((channel & RedChannel) != 0) SetPixelRed(q,ClampToQuantum(ApplyEvaluateOperator( random_info[id],GetPixelRed(q),op,value))); if ((channel & GreenChannel) != 0) SetPixelGreen(q,ClampToQuantum(ApplyEvaluateOperator( random_info[id],GetPixelGreen(q),op,value))); if ((channel & BlueChannel) != 0) SetPixelBlue(q,ClampToQuantum(ApplyEvaluateOperator( random_info[id],GetPixelBlue(q),op,value))); if ((channel & OpacityChannel) != 0) { if (image->matte == MagickFalse) SetPixelOpacity(q,ClampToQuantum(ApplyEvaluateOperator( random_info[id],GetPixelOpacity(q),op,value))); else SetPixelAlpha(q,ClampToQuantum(ApplyEvaluateOperator( random_info[id],(Quantum) GetPixelAlpha(q),op,value))); } if (((channel & IndexChannel) != 0) && (indexes != (IndexPacket *) NULL)) SetPixelIndex(indexes+x,ClampToQuantum(ApplyEvaluateOperator( random_info[id],GetPixelIndex(indexes+x),op,value))); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_EvaluateImageChannel) #endif proceed=SetImageProgress(image,EvaluateImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); random_info=DestroyRandomInfoThreadSet(random_info); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % F u n c t i o n I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % FunctionImage() applies a value to the image with an arithmetic, relational, % or logical operator to an image. Use these operations to lighten or darken % an image, to increase or decrease contrast in an image, or to produce the % "negative" of an image. % % The format of the FunctionImageChannel method is: % % MagickBooleanType FunctionImage(Image *image, % const MagickFunction function,const ssize_t number_parameters, % const double *parameters,ExceptionInfo *exception) % MagickBooleanType FunctionImageChannel(Image *image, % const ChannelType channel,const MagickFunction function, % const ssize_t number_parameters,const double *argument, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel. % % o function: A channel function. % % o parameters: one or more parameters. % % o exception: return any errors or warnings in this structure. % */ static Quantum ApplyFunction(Quantum pixel,const MagickFunction function, const size_t number_parameters,const double *parameters, ExceptionInfo *exception) { MagickRealType result; register ssize_t i; (void) exception; result=0.0; switch (function) { case PolynomialFunction: { /* * Polynomial * Parameters: polynomial constants, highest to lowest order * For example: c0*x^3 + c1*x^2 + c2*x + c3 */ result=0.0; for (i=0; i < (ssize_t) number_parameters; i++) result = result*QuantumScale*pixel + parameters[i]; result *= QuantumRange; break; } case SinusoidFunction: { /* Sinusoid Function * Parameters: Freq, Phase, Ampl, bias */ double freq,phase,ampl,bias; freq = ( number_parameters >= 1 ) ? parameters[0] : 1.0; phase = ( number_parameters >= 2 ) ? parameters[1] : 0.0; ampl = ( number_parameters >= 3 ) ? parameters[2] : 0.5; bias = ( number_parameters >= 4 ) ? parameters[3] : 0.5; result=(MagickRealType) (QuantumRange*(ampl*sin((double) (2.0*MagickPI* (freq*QuantumScale*pixel + phase/360.0) )) + bias ) ); break; } case ArcsinFunction: { /* Arcsin Function (peged at range limits for invalid results) * Parameters: Width, Center, Range, Bias */ double width,range,center,bias; width = ( number_parameters >= 1 ) ? parameters[0] : 1.0; center = ( number_parameters >= 2 ) ? parameters[1] : 0.5; range = ( number_parameters >= 3 ) ? parameters[2] : 1.0; bias = ( number_parameters >= 4 ) ? parameters[3] : 0.5; result = 2.0/width*(QuantumScale*pixel - center); if ( result <= -1.0 ) result = bias - range/2.0; else if ( result >= 1.0 ) result = bias + range/2.0; else result=(MagickRealType) (range/MagickPI*asin((double) result)+bias); result *= QuantumRange; break; } case ArctanFunction: { /* Arctan Function * Parameters: Slope, Center, Range, Bias */ double slope,range,center,bias; slope = ( number_parameters >= 1 ) ? parameters[0] : 1.0; center = ( number_parameters >= 2 ) ? parameters[1] : 0.5; range = ( number_parameters >= 3 ) ? parameters[2] : 1.0; bias = ( number_parameters >= 4 ) ? parameters[3] : 0.5; result=(MagickRealType) (MagickPI*slope*(QuantumScale*pixel-center)); result=(MagickRealType) (QuantumRange*(range/MagickPI*atan((double) result) + bias ) ); break; } case UndefinedFunction: break; } return(ClampToQuantum(result)); } MagickExport MagickBooleanType FunctionImage(Image *image, const MagickFunction function,const size_t number_parameters, const double *parameters,ExceptionInfo *exception) { MagickBooleanType status; status=FunctionImageChannel(image,CompositeChannels,function,number_parameters, parameters,exception); return(status); } MagickExport MagickBooleanType FunctionImageChannel(Image *image, const ChannelType channel,const MagickFunction function, const size_t number_parameters,const double *parameters, ExceptionInfo *exception) { #define FunctionImageTag "Function/Image " CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); if (SetImageStorageClass(image,DirectClass) == MagickFalse) { InheritException(exception,&image->exception); return(MagickFalse); } status=MagickTrue; progress=0; image_view=AcquireCacheView(image); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,4) shared(progress,status) #endif for (y=0; y < (ssize_t) image->rows; y++) { register IndexPacket *restrict indexes; register ssize_t x; register PixelPacket *restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { if ((channel & RedChannel) != 0) SetPixelRed(q,ApplyFunction(GetPixelRed(q), function,number_parameters,parameters,exception)); if ((channel & GreenChannel) != 0) SetPixelGreen(q,ApplyFunction(GetPixelGreen(q), function,number_parameters,parameters,exception)); if ((channel & BlueChannel) != 0) SetPixelBlue(q,ApplyFunction(GetPixelBlue(q), function,number_parameters,parameters,exception)); if ((channel & OpacityChannel) != 0) { if (image->matte == MagickFalse) SetPixelOpacity(q,ApplyFunction( GetPixelOpacity(q),function,number_parameters,parameters, exception)); else SetPixelAlpha(q,ApplyFunction((Quantum) GetPixelAlpha(q),function,number_parameters,parameters, exception)); } if (((channel & IndexChannel) != 0) && (indexes != (IndexPacket *) NULL)) SetPixelIndex(indexes+x,ApplyFunction(GetPixelIndex( indexes+x),function,number_parameters,parameters,exception)); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_FunctionImageChannel) #endif proceed=SetImageProgress(image,FunctionImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t I m a g e C h a n n e l E x t r e m a % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageChannelExtrema() returns the extrema of one or more image channels. % % The format of the GetImageChannelExtrema method is: % % MagickBooleanType GetImageChannelExtrema(const Image *image, % const ChannelType channel,size_t *minima,size_t *maxima, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel. % % o minima: the minimum value in the channel. % % o maxima: the maximum value in the channel. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType GetImageExtrema(const Image *image, size_t *minima,size_t *maxima,ExceptionInfo *exception) { return(GetImageChannelExtrema(image,CompositeChannels,minima,maxima,exception)); } MagickExport MagickBooleanType GetImageChannelExtrema(const Image *image, const ChannelType channel,size_t *minima,size_t *maxima, ExceptionInfo *exception) { double max, min; MagickBooleanType status; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); status=GetImageChannelRange(image,channel,&min,&max,exception); *minima=(size_t) ceil(min-0.5); *maxima=(size_t) floor(max+0.5); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e C h a n n e l M e a n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageChannelMean() returns the mean and standard deviation of one or more % image channels. % % The format of the GetImageChannelMean method is: % % MagickBooleanType GetImageChannelMean(const Image *image, % const ChannelType channel,double *mean,double *standard_deviation, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel. % % o mean: the average value in the channel. % % o standard_deviation: the standard deviation of the channel. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType GetImageMean(const Image *image,double *mean, double *standard_deviation,ExceptionInfo *exception) { MagickBooleanType status; status=GetImageChannelMean(image,CompositeChannels,mean,standard_deviation, exception); return(status); } MagickExport MagickBooleanType GetImageChannelMean(const Image *image, const ChannelType channel,double *mean,double *standard_deviation, ExceptionInfo *exception) { ChannelStatistics *channel_statistics; size_t channels; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); channel_statistics=GetImageChannelStatistics(image,exception); if (channel_statistics == (ChannelStatistics *) NULL) return(MagickFalse); channels=0; channel_statistics[CompositeChannels].mean=0.0; channel_statistics[CompositeChannels].standard_deviation=0.0; if ((channel & RedChannel) != 0) { channel_statistics[CompositeChannels].mean+= channel_statistics[RedChannel].mean; channel_statistics[CompositeChannels].standard_deviation+= channel_statistics[RedChannel].variance- channel_statistics[RedChannel].mean* channel_statistics[RedChannel].mean; channels++; } if ((channel & GreenChannel) != 0) { channel_statistics[CompositeChannels].mean+= channel_statistics[GreenChannel].mean; channel_statistics[CompositeChannels].standard_deviation+= channel_statistics[GreenChannel].variance- channel_statistics[GreenChannel].mean* channel_statistics[GreenChannel].mean; channels++; } if ((channel & BlueChannel) != 0) { channel_statistics[CompositeChannels].mean+= channel_statistics[BlueChannel].mean; channel_statistics[CompositeChannels].standard_deviation+= channel_statistics[BlueChannel].variance- channel_statistics[BlueChannel].mean* channel_statistics[BlueChannel].mean; channels++; } if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse)) { channel_statistics[CompositeChannels].mean+= channel_statistics[OpacityChannel].mean; channel_statistics[CompositeChannels].standard_deviation+= channel_statistics[OpacityChannel].variance- channel_statistics[OpacityChannel].mean* channel_statistics[OpacityChannel].mean; channels++; } if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) { channel_statistics[CompositeChannels].mean+= channel_statistics[BlackChannel].mean; channel_statistics[CompositeChannels].standard_deviation+= channel_statistics[BlackChannel].variance- channel_statistics[BlackChannel].mean* channel_statistics[BlackChannel].mean; channels++; } channel_statistics[CompositeChannels].mean/=channels; channel_statistics[CompositeChannels].standard_deviation= sqrt(channel_statistics[CompositeChannels].standard_deviation/channels); *mean=channel_statistics[CompositeChannels].mean; *standard_deviation=channel_statistics[CompositeChannels].standard_deviation; channel_statistics=(ChannelStatistics *) RelinquishMagickMemory( channel_statistics); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e C h a n n e l K u r t o s i s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageChannelKurtosis() returns the kurtosis and skewness of one or more % image channels. % % The format of the GetImageChannelKurtosis method is: % % MagickBooleanType GetImageChannelKurtosis(const Image *image, % const ChannelType channel,double *kurtosis,double *skewness, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel. % % o kurtosis: the kurtosis of the channel. % % o skewness: the skewness of the channel. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType GetImageKurtosis(const Image *image, double *kurtosis,double *skewness,ExceptionInfo *exception) { MagickBooleanType status; status=GetImageChannelKurtosis(image,CompositeChannels,kurtosis,skewness, exception); return(status); } MagickExport MagickBooleanType GetImageChannelKurtosis(const Image *image, const ChannelType channel,double *kurtosis,double *skewness, ExceptionInfo *exception) { double area, mean, standard_deviation, sum_squares, sum_cubes, sum_fourth_power; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); *kurtosis=0.0; *skewness=0.0; area=0.0; mean=0.0; standard_deviation=0.0; sum_squares=0.0; sum_cubes=0.0; sum_fourth_power=0.0; for (y=0; y < (ssize_t) image->rows; y++) { register const IndexPacket *restrict indexes; register const PixelPacket *restrict p; register ssize_t x; p=GetVirtualPixels(image,0,y,image->columns,1,exception); if (p == (const PixelPacket *) NULL) break; indexes=GetVirtualIndexQueue(image); for (x=0; x < (ssize_t) image->columns; x++) { if ((channel & RedChannel) != 0) { mean+=GetPixelRed(p); sum_squares+=(double) GetPixelRed(p)*GetPixelRed(p); sum_cubes+=(double) GetPixelRed(p)*GetPixelRed(p)* GetPixelRed(p); sum_fourth_power+=(double) GetPixelRed(p)* GetPixelRed(p)*GetPixelRed(p)* GetPixelRed(p); area++; } if ((channel & GreenChannel) != 0) { mean+=GetPixelGreen(p); sum_squares+=(double) GetPixelGreen(p)* GetPixelGreen(p); sum_cubes+=(double) GetPixelGreen(p)* GetPixelGreen(p)*GetPixelGreen(p); sum_fourth_power+=(double) GetPixelGreen(p)* GetPixelGreen(p)*GetPixelGreen(p)* GetPixelGreen(p); area++; } if ((channel & BlueChannel) != 0) { mean+=GetPixelBlue(p); sum_squares+=(double) GetPixelBlue(p)* GetPixelBlue(p); sum_cubes+=(double) GetPixelBlue(p)*GetPixelBlue(p)* GetPixelBlue(p); sum_fourth_power+=(double) GetPixelBlue(p)* GetPixelBlue(p)*GetPixelBlue(p)* GetPixelBlue(p); area++; } if ((channel & OpacityChannel) != 0) { mean+=GetPixelOpacity(p); sum_squares+=(double) GetPixelOpacity(p)* GetPixelOpacity(p); sum_cubes+=(double) GetPixelOpacity(p)* GetPixelOpacity(p)*GetPixelOpacity(p); sum_fourth_power+=(double) GetPixelOpacity(p)* GetPixelOpacity(p)*GetPixelOpacity(p)* GetPixelOpacity(p); area++; } if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) { mean+=GetPixelIndex(indexes+x); sum_squares+=(double) GetPixelIndex(indexes+x)* GetPixelIndex(indexes+x); sum_cubes+=(double) GetPixelIndex(indexes+x)* GetPixelIndex(indexes+x)*GetPixelIndex(indexes+x); sum_fourth_power+=(double) GetPixelIndex(indexes+x)* GetPixelIndex(indexes+x)*GetPixelIndex(indexes+x)* GetPixelIndex(indexes+x); area++; } p++; } } if (y < (ssize_t) image->rows) return(MagickFalse); if (area != 0.0) { mean/=area; sum_squares/=area; sum_cubes/=area; sum_fourth_power/=area; } standard_deviation=sqrt(sum_squares-(mean*mean)); if (standard_deviation != 0.0) { *kurtosis=sum_fourth_power-4.0*mean*sum_cubes+6.0*mean*mean*sum_squares- 3.0*mean*mean*mean*mean; *kurtosis/=standard_deviation*standard_deviation*standard_deviation* standard_deviation; *kurtosis-=3.0; *skewness=sum_cubes-3.0*mean*sum_squares+2.0*mean*mean*mean; *skewness/=standard_deviation*standard_deviation*standard_deviation; } return(y == (ssize_t) image->rows ? MagickTrue : MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e C h a n n e l R a n g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageChannelRange() returns the range of one or more image channels. % % The format of the GetImageChannelRange method is: % % MagickBooleanType GetImageChannelRange(const Image *image, % const ChannelType channel,double *minima,double *maxima, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel. % % o minima: the minimum value in the channel. % % o maxima: the maximum value in the channel. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType GetImageRange(const Image *image, double *minima,double *maxima,ExceptionInfo *exception) { return(GetImageChannelRange(image,CompositeChannels,minima,maxima,exception)); } MagickExport MagickBooleanType GetImageChannelRange(const Image *image, const ChannelType channel,double *minima,double *maxima, ExceptionInfo *exception) { MagickPixelPacket pixel; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); *maxima=(-1.0E-37); *minima=1.0E+37; GetMagickPixelPacket(image,&pixel); for (y=0; y < (ssize_t) image->rows; y++) { register const IndexPacket *restrict indexes; register const PixelPacket *restrict p; register ssize_t x; p=GetVirtualPixels(image,0,y,image->columns,1,exception); if (p == (const PixelPacket *) NULL) break; indexes=GetVirtualIndexQueue(image); for (x=0; x < (ssize_t) image->columns; x++) { SetMagickPixelPacket(image,p,indexes+x,&pixel); if ((channel & RedChannel) != 0) { if (pixel.red < *minima) *minima=(double) pixel.red; if (pixel.red > *maxima) *maxima=(double) pixel.red; } if ((channel & GreenChannel) != 0) { if (pixel.green < *minima) *minima=(double) pixel.green; if (pixel.green > *maxima) *maxima=(double) pixel.green; } if ((channel & BlueChannel) != 0) { if (pixel.blue < *minima) *minima=(double) pixel.blue; if (pixel.blue > *maxima) *maxima=(double) pixel.blue; } if ((channel & OpacityChannel) != 0) { if (pixel.opacity < *minima) *minima=(double) pixel.opacity; if (pixel.opacity > *maxima) *maxima=(double) pixel.opacity; } if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) { if ((double) GetPixelIndex(indexes+x) < *minima) *minima=(double) GetPixelIndex(indexes+x); if ((double) GetPixelIndex(indexes+x) > *maxima) *maxima=(double) GetPixelIndex(indexes+x); } p++; } } return(y == (ssize_t) image->rows ? MagickTrue : MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e C h a n n e l S t a t i s t i c s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageChannelStatistics() returns statistics for each channel in the % image. The statistics include the channel depth, its minima, maxima, mean, % standard deviation, kurtosis and skewness. You can access the red channel % mean, for example, like this: % % channel_statistics=GetImageChannelStatistics(image,exception); % red_mean=channel_statistics[RedChannel].mean; % % Use MagickRelinquishMemory() to free the statistics buffer. % % The format of the GetImageChannelStatistics method is: % % ChannelStatistics *GetImageChannelStatistics(const Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport ChannelStatistics *GetImageChannelStatistics(const Image *image, ExceptionInfo *exception) { ChannelStatistics *channel_statistics; double area; MagickStatusType status; QuantumAny range; register ssize_t i; size_t channels, depth, length; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); length=CompositeChannels+1UL; channel_statistics=(ChannelStatistics *) AcquireQuantumMemory(length, sizeof(*channel_statistics)); if (channel_statistics == (ChannelStatistics *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); (void) ResetMagickMemory(channel_statistics,0,length* sizeof(*channel_statistics)); for (i=0; i <= (ssize_t) CompositeChannels; i++) { channel_statistics[i].depth=1; channel_statistics[i].maxima=(-1.0E-37); channel_statistics[i].minima=1.0E+37; } for (y=0; y < (ssize_t) image->rows; y++) { register const IndexPacket *restrict indexes; register const PixelPacket *restrict p; register ssize_t x; p=GetVirtualPixels(image,0,y,image->columns,1,exception); if (p == (const PixelPacket *) NULL) break; indexes=GetVirtualIndexQueue(image); for (x=0; x < (ssize_t) image->columns; ) { if (channel_statistics[RedChannel].depth != MAGICKCORE_QUANTUM_DEPTH) { depth=channel_statistics[RedChannel].depth; range=GetQuantumRange(depth); status=GetPixelRed(p) != ScaleAnyToQuantum( ScaleQuantumToAny(GetPixelRed(p),range),range) ? MagickTrue : MagickFalse; if (status != MagickFalse) { channel_statistics[RedChannel].depth++; continue; } } if (channel_statistics[GreenChannel].depth != MAGICKCORE_QUANTUM_DEPTH) { depth=channel_statistics[GreenChannel].depth; range=GetQuantumRange(depth); status=GetPixelGreen(p) != ScaleAnyToQuantum( ScaleQuantumToAny(GetPixelGreen(p),range),range) ? MagickTrue : MagickFalse; if (status != MagickFalse) { channel_statistics[GreenChannel].depth++; continue; } } if (channel_statistics[BlueChannel].depth != MAGICKCORE_QUANTUM_DEPTH) { depth=channel_statistics[BlueChannel].depth; range=GetQuantumRange(depth); status=GetPixelBlue(p) != ScaleAnyToQuantum( ScaleQuantumToAny(GetPixelBlue(p),range),range) ? MagickTrue : MagickFalse; if (status != MagickFalse) { channel_statistics[BlueChannel].depth++; continue; } } if (image->matte != MagickFalse) { if (channel_statistics[OpacityChannel].depth != MAGICKCORE_QUANTUM_DEPTH) { depth=channel_statistics[OpacityChannel].depth; range=GetQuantumRange(depth); status=GetPixelOpacity(p) != ScaleAnyToQuantum( ScaleQuantumToAny(GetPixelOpacity(p),range),range) ? MagickTrue : MagickFalse; if (status != MagickFalse) { channel_statistics[OpacityChannel].depth++; continue; } } } if (image->colorspace == CMYKColorspace) { if (channel_statistics[BlackChannel].depth != MAGICKCORE_QUANTUM_DEPTH) { depth=channel_statistics[BlackChannel].depth; range=GetQuantumRange(depth); status=GetPixelIndex(indexes+x) != ScaleAnyToQuantum(ScaleQuantumToAny(GetPixelIndex( indexes+x),range),range) ? MagickTrue : MagickFalse; if (status != MagickFalse) { channel_statistics[BlackChannel].depth++; continue; } } } if ((double) GetPixelRed(p) < channel_statistics[RedChannel].minima) channel_statistics[RedChannel].minima=(double) GetPixelRed(p); if ((double) GetPixelRed(p) > channel_statistics[RedChannel].maxima) channel_statistics[RedChannel].maxima=(double) GetPixelRed(p); channel_statistics[RedChannel].sum+=GetPixelRed(p); channel_statistics[RedChannel].sum_squared+=(double) GetPixelRed(p)* GetPixelRed(p); channel_statistics[RedChannel].sum_cubed+=(double) GetPixelRed(p)*GetPixelRed(p)* GetPixelRed(p); channel_statistics[RedChannel].sum_fourth_power+=(double) GetPixelRed(p)*GetPixelRed(p)* GetPixelRed(p)*GetPixelRed(p); if ((double) GetPixelGreen(p) < channel_statistics[GreenChannel].minima) channel_statistics[GreenChannel].minima=(double) GetPixelGreen(p); if ((double) GetPixelGreen(p) > channel_statistics[GreenChannel].maxima) channel_statistics[GreenChannel].maxima=(double) GetPixelGreen(p); channel_statistics[GreenChannel].sum+=GetPixelGreen(p); channel_statistics[GreenChannel].sum_squared+=(double) GetPixelGreen(p)*GetPixelGreen(p); channel_statistics[GreenChannel].sum_cubed+=(double) GetPixelGreen(p)*GetPixelGreen(p)* GetPixelGreen(p); channel_statistics[GreenChannel].sum_fourth_power+=(double) GetPixelGreen(p)*GetPixelGreen(p)* GetPixelGreen(p)*GetPixelGreen(p); if ((double) GetPixelBlue(p) < channel_statistics[BlueChannel].minima) channel_statistics[BlueChannel].minima=(double) GetPixelBlue(p); if ((double) GetPixelBlue(p) > channel_statistics[BlueChannel].maxima) channel_statistics[BlueChannel].maxima=(double) GetPixelBlue(p); channel_statistics[BlueChannel].sum+=GetPixelBlue(p); channel_statistics[BlueChannel].sum_squared+=(double) GetPixelBlue(p)*GetPixelBlue(p); channel_statistics[BlueChannel].sum_cubed+=(double) GetPixelBlue(p)*GetPixelBlue(p)* GetPixelBlue(p); channel_statistics[BlueChannel].sum_fourth_power+=(double) GetPixelBlue(p)*GetPixelBlue(p)* GetPixelBlue(p)*GetPixelBlue(p); if (image->matte != MagickFalse) { if ((double) GetPixelOpacity(p) < channel_statistics[OpacityChannel].minima) channel_statistics[OpacityChannel].minima=(double) GetPixelOpacity(p); if ((double) GetPixelOpacity(p) > channel_statistics[OpacityChannel].maxima) channel_statistics[OpacityChannel].maxima=(double) GetPixelOpacity(p); channel_statistics[OpacityChannel].sum+=GetPixelOpacity(p); channel_statistics[OpacityChannel].sum_squared+=(double) GetPixelOpacity(p)*GetPixelOpacity(p); channel_statistics[OpacityChannel].sum_cubed+=(double) GetPixelOpacity(p)*GetPixelOpacity(p)* GetPixelOpacity(p); channel_statistics[OpacityChannel].sum_fourth_power+=(double) GetPixelOpacity(p)*GetPixelOpacity(p)* GetPixelOpacity(p)*GetPixelOpacity(p); } if (image->colorspace == CMYKColorspace) { if ((double) GetPixelIndex(indexes+x) < channel_statistics[BlackChannel].minima) channel_statistics[BlackChannel].minima=(double) GetPixelIndex(indexes+x); if ((double) GetPixelIndex(indexes+x) > channel_statistics[BlackChannel].maxima) channel_statistics[BlackChannel].maxima=(double) GetPixelIndex(indexes+x); channel_statistics[BlackChannel].sum+= GetPixelIndex(indexes+x); channel_statistics[BlackChannel].sum_squared+=(double) GetPixelIndex(indexes+x)*GetPixelIndex(indexes+x); channel_statistics[BlackChannel].sum_cubed+=(double) GetPixelIndex(indexes+x)*GetPixelIndex(indexes+x)* GetPixelIndex(indexes+x); channel_statistics[BlackChannel].sum_fourth_power+=(double) GetPixelIndex(indexes+x)*GetPixelIndex(indexes+x)* GetPixelIndex(indexes+x)*GetPixelIndex(indexes+x); } x++; p++; } } area=(double) image->columns*image->rows; for (i=0; i < (ssize_t) CompositeChannels; i++) { channel_statistics[i].sum/=area; channel_statistics[i].sum_squared/=area; channel_statistics[i].sum_cubed/=area; channel_statistics[i].sum_fourth_power/=area; channel_statistics[i].mean=channel_statistics[i].sum; channel_statistics[i].variance=channel_statistics[i].sum_squared; channel_statistics[i].standard_deviation=sqrt( channel_statistics[i].variance-(channel_statistics[i].mean* channel_statistics[i].mean)); } for (i=0; i < (ssize_t) CompositeChannels; i++) { channel_statistics[CompositeChannels].depth=(size_t) MagickMax((double) channel_statistics[CompositeChannels].depth,(double) channel_statistics[i].depth); channel_statistics[CompositeChannels].minima=MagickMin( channel_statistics[CompositeChannels].minima, channel_statistics[i].minima); channel_statistics[CompositeChannels].maxima=MagickMax( channel_statistics[CompositeChannels].maxima, channel_statistics[i].maxima); channel_statistics[CompositeChannels].sum+=channel_statistics[i].sum; channel_statistics[CompositeChannels].sum_squared+= channel_statistics[i].sum_squared; channel_statistics[CompositeChannels].sum_cubed+= channel_statistics[i].sum_cubed; channel_statistics[CompositeChannels].sum_fourth_power+= channel_statistics[i].sum_fourth_power; channel_statistics[CompositeChannels].mean+=channel_statistics[i].mean; channel_statistics[CompositeChannels].variance+= channel_statistics[i].variance-channel_statistics[i].mean* channel_statistics[i].mean; channel_statistics[CompositeChannels].standard_deviation+= channel_statistics[i].variance-channel_statistics[i].mean* channel_statistics[i].mean; } channels=3; if (image->matte != MagickFalse) channels++; if (image->colorspace == CMYKColorspace) channels++; channel_statistics[CompositeChannels].sum/=channels; channel_statistics[CompositeChannels].sum_squared/=channels; channel_statistics[CompositeChannels].sum_cubed/=channels; channel_statistics[CompositeChannels].sum_fourth_power/=channels; channel_statistics[CompositeChannels].mean/=channels; channel_statistics[CompositeChannels].variance/=channels; channel_statistics[CompositeChannels].standard_deviation= sqrt(channel_statistics[CompositeChannels].standard_deviation/channels); channel_statistics[CompositeChannels].kurtosis/=channels; channel_statistics[CompositeChannels].skewness/=channels; for (i=0; i <= (ssize_t) CompositeChannels; i++) { if (channel_statistics[i].standard_deviation == 0.0) continue; channel_statistics[i].skewness=(channel_statistics[i].sum_cubed- 3.0*channel_statistics[i].mean*channel_statistics[i].sum_squared+ 2.0*channel_statistics[i].mean*channel_statistics[i].mean* channel_statistics[i].mean)/(channel_statistics[i].standard_deviation* channel_statistics[i].standard_deviation* channel_statistics[i].standard_deviation); channel_statistics[i].kurtosis=(channel_statistics[i].sum_fourth_power- 4.0*channel_statistics[i].mean*channel_statistics[i].sum_cubed+ 6.0*channel_statistics[i].mean*channel_statistics[i].mean* channel_statistics[i].sum_squared-3.0*channel_statistics[i].mean* channel_statistics[i].mean*1.0*channel_statistics[i].mean* channel_statistics[i].mean)/(channel_statistics[i].standard_deviation* channel_statistics[i].standard_deviation* channel_statistics[i].standard_deviation* channel_statistics[i].standard_deviation)-3.0; } return(channel_statistics); }
GB_critical_section.c
//------------------------------------------------------------------------------ // Source/Template/GB_critical_section: execute code in a critical section //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // All access to the global matrix queue, via GB_queue_* operations, must // be done through a critical section. No other part of SuiteSparse:GraphBLAS // uses this critical section; it is only used for accessing the global matrix // queue via GB_queue_*. All GB_queue_* operations use the GB_CRITICAL macro // to check the result, and if the critical section fails (ok == false), // they return GrB_PANIC. // Critical sections for Windows threads and ANSI C11 threads are listed below // as drafts, but these threading models are not yet supported. // not parallel: this function does O(1) work and is already thread-safe. { //-------------------------------------------------------------------------- // POSIX pthreads //-------------------------------------------------------------------------- #if defined (USER_POSIX_THREADS) { if (GB_Global.user_multithreaded) { ok = (pthread_mutex_lock (&GB_sync) == 0) ; } GB_CRITICAL_SECTION ; if (GB_Global.user_multithreaded) { ok = ok && (pthread_mutex_unlock (&GB_sync) == 0) ; } } //-------------------------------------------------------------------------- // Microsoft Windows //-------------------------------------------------------------------------- #elif defined (USER_WINDOWS_THREADS) { // This is not yet supported. if (GB_Global.user_multithreaded) { EnterCriticalSection (&GB_sync) ; } GB_CRITICAL_SECTION ; if (GB_Global.user_multithreaded) { LeaveCriticalSection (&GB_sync) ; } } //-------------------------------------------------------------------------- // ANSI C11 threads //-------------------------------------------------------------------------- #elif defined (USER_ANSI_THREADS) { // This should work per the ANSI C11 Spec, but is not yet supported. if (GB_Global.user_multithreaded) { ok = (mtx_lock (&GB_sync) == thrd_success) ; } GB_CRITICAL_SECTION ; if (GB_Global.user_multithreaded) { ok = ok && (mtx_unlock (&GB_sync) == thrd_success) ; } } //-------------------------------------------------------------------------- // OpenMP //-------------------------------------------------------------------------- #else // USER_OPENMP_THREADS or USER_NO_THREADS { // default: use a named OpenMP critical section. If OpenMP is not // available, then the #pragma is ignored and this becomes vanilla, // single-threaded code. #pragma omp critical (GB_critical_section) GB_CRITICAL_SECTION ; } #endif } #undef GB_CRITICAL_SECTION
static-chunk-size-one.c
/* { dg-do compile } */ /* { dg-options "-fopenmp -O2 -fdump-tree-optimized -fno-tree-pre" } */ int bar () { int a = 0, i; #pragma omp parallel for num_threads (3) reduction (+:a) schedule(static, 1) for (i = 0; i < 10; i++) a += i; return a; } /* Two phis for reduction, one in loop header, one in loop exit. One phi for iv in loop header. */ /* { dg-final { scan-tree-dump-times "PHI" 3 "optimized" } } */
GB_binop__land_int8.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__land_int8) // A.*B function (eWiseMult): GB (_AemultB_08__land_int8) // A.*B function (eWiseMult): GB (_AemultB_02__land_int8) // A.*B function (eWiseMult): GB (_AemultB_04__land_int8) // A.*B function (eWiseMult): GB (_AemultB_bitmap__land_int8) // A*D function (colscale): GB (_AxD__land_int8) // D*A function (rowscale): GB (_DxB__land_int8) // C+=B function (dense accum): GB (_Cdense_accumB__land_int8) // C+=b function (dense accum): GB (_Cdense_accumb__land_int8) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__land_int8) // C=scalar+B GB (_bind1st__land_int8) // C=scalar+B' GB (_bind1st_tran__land_int8) // C=A+scalar GB (_bind2nd__land_int8) // C=A'+scalar GB (_bind2nd_tran__land_int8) // C type: int8_t // A type: int8_t // A pattern? 0 // B type: int8_t // B pattern? 0 // BinaryOp: cij = ((aij != 0) && (bij != 0)) #define GB_ATYPE \ int8_t #define GB_BTYPE \ int8_t #define GB_CTYPE \ int8_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ int8_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ int8_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int8_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = ((x != 0) && (y != 0)) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LAND || GxB_NO_INT8 || GxB_NO_LAND_INT8) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__land_int8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__land_int8) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__land_int8) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int8_t int8_t bwork = (*((int8_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__land_int8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *restrict Cx = (int8_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__land_int8) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *restrict Cx = (int8_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__land_int8) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; int8_t alpha_scalar ; int8_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((int8_t *) alpha_scalar_in)) ; beta_scalar = (*((int8_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__land_int8) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__land_int8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__land_int8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__land_int8) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__land_int8) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *Cx = (int8_t *) Cx_output ; int8_t x = (*((int8_t *) x_input)) ; int8_t *Bx = (int8_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; int8_t bij = GBX (Bx, p, false) ; Cx [p] = ((x != 0) && (bij != 0)) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__land_int8) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int8_t *Cx = (int8_t *) Cx_output ; int8_t *Ax = (int8_t *) Ax_input ; int8_t y = (*((int8_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int8_t aij = GBX (Ax, p, false) ; Cx [p] = ((aij != 0) && (y != 0)) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = ((x != 0) && (aij != 0)) ; \ } GrB_Info GB (_bind1st_tran__land_int8) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int8_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t x = (*((const int8_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int8_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = ((aij != 0) && (y != 0)) ; \ } GrB_Info GB (_bind2nd_tran__land_int8) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t y = (*((const int8_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
cache_conflict.c
/* * Benchmark to force severe cache conflicts * * * * * * */ #include <stdlib.h> #include <stdio.h> #include <string.h> #include <omp.h> #ifdef USE_CALI #include <caliper/cali.h> #endif #ifndef SIZE #define SIZE 16384 // 2x doubles in A64FX L1 #endif #ifndef NUM_OPS #define NUM_OPS 268435456 #endif #define AVAL 3.0 // initial value of A #define BVAL 5.0 // initial value of B #define TOL 0.001 // tolerance used to check the result #define TYPE double #define TRUE 1 #define FALSE 0 struct Inputs { int threads; int offset; }; void loop_over_data(TYPE* A, size_t size, size_t offset, size_t num_loops); void get_input(int argc, char **argv, struct Inputs* input); void vector_init(TYPE** A, TYPE** B, TYPE** C, size_t row_len); void vector_free(TYPE* A, TYPE* B, TYPE* C, size_t size); void print_mat(TYPE* C); // main function int main(int argc, char **argv) { size_t i,j,k,r; size_t size,offset,num_loops; double run_time, start, end; struct Inputs input; get_input(argc, argv, &input); omp_set_num_threads(input.threads); #ifdef USE_CALI cali_id_t thread_attr = cali_create_attribute("thread_id", CALI_TYPE_INT, CALI_ATTR_ASVALUE | CALI_ATTR_SKIP_EVENTS); #pragma omp parallel { cali_set_int(thread_attr, omp_get_thread_num()); } #endif TYPE *A, *B, *C; size = SIZE; offset = input.offset; num_loops = NUM_OPS * offset / size; vector_init(&A, &B, &C, size); printf("Settings:\n"); printf(" Size: %d\n", size); printf(" Num Ops: %d\n", NUM_OPS); printf(" Offset: %d\n", offset); printf(" Num Loops: %d\n", num_loops); start = omp_get_wtime(); loop_over_data(A, size, offset, num_loops); end = omp_get_wtime(); vector_free(A,B,C,size); printf("Run time: %f\n", end - start); return 0; } void loop_over_data(TYPE* A, size_t size, size_t offset, size_t num_loops) { TYPE scale = 1.012; for (int i = 0; i < num_loops; i++) { for (int j = 0; j < size; j += offset) { A[j] = scale*A[j]; } } } /*************************************************************\ Utility Functions \*************************************************************/ void get_input(int argc, char **argv, struct Inputs* input) { int i = 1; input->offset = 16; input->threads = 4; for(i = 1; i < argc; i++) { if ( !(strcmp("-t", argv[i])) || !(strcmp("--threads", argv[i])) ) { if (i++ < argc){ input->threads = atoi(argv[i]); } else { printf("Please include a thread count that option\n"); exit(1); } } if ( !(strcmp("-o", argv[i])) || !(strcmp("--offset", argv[i])) ) { if (i++ < argc){ input->offset = atoi(argv[i]); } else { printf("Please include an integer that option\n"); exit(1); } } } } // Initialize the vectors (uniform values to make an easier check) void vector_init(TYPE** A, TYPE** B, TYPE** C, size_t row_len) { size_t i, j; if( ((row_len) % 64) != 0 ) { printf("ERROR aligning memory; make sure size is multiple of 64 bytes.\n"); exit(1); } (*A) = (TYPE*)aligned_alloc(64, row_len*sizeof(TYPE)); (*B) = (TYPE*)aligned_alloc(64, row_len*sizeof(TYPE)); (*C) = (TYPE*)aligned_alloc(64, row_len*sizeof(TYPE)); if( ((*A) == NULL) || ((*B) == NULL) || ((*C) == NULL) ) { printf("ERROR allocating memory\n"); exit(1); } for (j=0; j<row_len; j++) { (*A)[j] = AVAL; (*B)[j] = BVAL; (*C)[j] = 0.0; } } void vector_free(TYPE* A, TYPE* B, TYPE* C, size_t size) { free(A); free(B); free(C); }
reduce_demo.c
//------------------------------------------------------------------------------ // GraphBLAS/Demo/Program/reduce_demo: reduce a matrix to a scalar //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ #include "GraphBLAS.h" // #define N 65536 #define N 16384 int main (void) { #if defined ( _OPENMP ) double t0 = omp_get_wtime ( ) ; #endif // start GraphBLAS GrB_init (GrB_NONBLOCKING) ; int nthreads ; GxB_get (GxB_NTHREADS, &nthreads) ; printf ("demo: reduce a matrix to a scalar, nthreads: %d\n", nthreads) ; int nthreads_max ; GxB_Global_Option_get (GxB_NTHREADS, &nthreads_max) ; printf ("# of threads: %d\n", nthreads_max) ; #if defined ( _OPENMP ) t0 = omp_get_wtime ( ) - t0 ; printf ("GPU warmup time: %g\n", t0) ; t0 = omp_get_wtime ( ) ; #endif GrB_Index nrows = N ; GrB_Index ncols = N ; GrB_Matrix A ; GrB_Matrix_new (&A, GrB_INT64, nrows, ncols) ; GrB_Index *I = malloc (nrows * ncols * sizeof (GrB_Index)) ; GrB_Index *J = malloc (nrows * ncols * sizeof (GrB_Index)) ; int64_t *X = malloc (nrows * ncols * sizeof (int64_t)) ; int64_t k ; #pragma omp parallel for num_threads(nthreads_max) schedule(static) for (k = 0 ; k < N*N ; k++) { // k = i * N + j ; int64_t i = k / N ; int64_t j = k % N ; // int x = (int) (rand ( ) & 0xFF) ; int x = (int) (k & 0xFF) ; I [k] = i ; J [k] = j ; X [k] = x ; } GrB_Index nvals = N*N ; GrB_Matrix_build_INT64 (A, I, J, X, nvals, GrB_PLUS_INT64) ; free (I) ; free (J) ; free (X) ; #if defined ( _OPENMP ) t0 = omp_get_wtime ( ) - t0 ; printf ("time to create matrix: %g\n", t0) ; #endif GrB_Index result ; double t1 ; printf ("\nreduce to a scalar:\n") ; for (int nthreads = 1 ; nthreads <= nthreads_max ; nthreads++) { GxB_Global_Option_set (GxB_NTHREADS, nthreads) ; #if defined ( _OPENMP ) double t = omp_get_wtime ( ) ; #endif GrB_Matrix_reduce_UINT64 (&result, NULL, GxB_PLUS_INT64_MONOID, A, NULL) ; #if defined ( _OPENMP ) t = omp_get_wtime ( ) - t ; if (nthreads == 1) t1 = t ; printf ("nthreads %3d time: %12.6f speedup %8.2f\n", nthreads, t, t1/t) ; #endif } printf ("result %"PRId64"\n", result) ; // free everyting GrB_Matrix_free (&A) ; GrB_finalize ( ) ; }
matrixadd_column.c
#include<stdio.h> #include<stdlib.h> #include "generic.h" #define size 10000 #define NT 8 int A[size][size]; int B[size][size]; int C[size][size]; int flag[size];//to set flag[i]==1 if arr[i] is maximum int main(int argc, char *argv[]){ srand(atoi(argv[1]));//Seed for random number command line integer value //generates random number for(int i=0;i<size;i++){ for(int j=0;j<size;j++){ A[i][j]=rand()%1048576; B[i][j]=rand()%1048576; } } double t1=rtclock(); #pragma omp parallel for num_threads(8) for(int i=0;i<size;i++) for(int j=0;j<size;j++) C[j][i]=A[j][i]+B[j][i]; double t2=rtclock(); printf("\nTIME =%f \n",(t2-t1)*1000); } /*Run executable-path <integer-seed-value> *example: ./a.out 3 */
OpenmpUtil.h
#ifndef OPENMP_UTILITIES #define OPENMP_UTILITIES //#include <stdio.h> #include <omp.h> #include <assert.h> int get_CPU_num_threads() { int procs; #pragma omp parallel shared(procs) { int th_id = omp_get_thread_num(); if(th_id == 0) { procs = omp_get_num_threads(); } } bool valid_procs = (procs > 0) && (procs <= 1024); assert(valid_procs && "Number of threads NOT in {1, ..., 1024}"); return procs; } #endif //OPENMP_UTILITIES
kacz_hybrid_split.c
#include "ghost/kacz_hybrid_split.h" #include <limits.h> typedef enum { MIN_LOWER = 0, MAX_LOWER = 1, MIN_UPPER = 2, MAX_UPPER = 3 }zone_extrema; //returns the virtual column index; ie takes into account the permutation of halo elements also #define virtual_col(col_idx)\ (mat->context->flags & GHOST_PERM_NO_DISTINCTION)?( (col_ptr[col_idx]<(mat->context->col_map->dim-mat->context->col_map->nhalo))?col_ptr[col_idx]:mat->context->col_map->loc_perm[col_ptr[col_idx]] ):col_ptr[col_idx]\ ghost_error find_zone_extrema(ghost_sparsemat *mat, int **extrema, ghost_lidx a, ghost_lidx b) { //for SELL-1-1 ghost_lidx *chunk_ptr = mat->chunkStart; ghost_lidx *col_ptr = mat->col;//virtual_col would be used ghost_error ret = GHOST_SUCCESS; GHOST_CALL_GOTO(ghost_malloc((void **)extrema,sizeof(int)*4),err,ret); ghost_lidx max_lower = 0; ghost_lidx max_upper = 0; ghost_lidx min_lower = INT_MAX; ghost_lidx min_upper = INT_MAX; ghost_lidx chunk = 0; ghost_lidx rowinchunk = 0; ghost_lidx chunkheight = mat->traits.C; //TODO work on virtual columns for(int i=a; i< b; ++i) { chunk = i/chunkheight; rowinchunk = i%chunkheight; min_lower = MIN( min_lower, virtual_col(chunk_ptr[chunk])+rowinchunk); max_lower = MAX( max_lower, virtual_col(chunk_ptr[chunk])+rowinchunk); min_upper = MIN( min_upper, virtual_col(chunk_ptr[chunk]+rowinchunk+chunkheight*(mat->chunkLen[chunk]-1)) ); max_upper = MAX( max_upper, virtual_col(chunk_ptr[chunk]+rowinchunk+chunkheight*(mat->chunkLen[chunk]-1)) ); } (*extrema)[MIN_LOWER] = min_lower; (*extrema)[MAX_LOWER] = max_lower; (*extrema)[MIN_UPPER] = min_upper; (*extrema)[MAX_UPPER] = max_upper; goto out; err: out: return ret; } //This check is not necessary since this should not fail, if implemented correctly //Right now this works only for 1 processor since if multiple processor the remote entries gets moved to the end ghost_error checker(ghost_sparsemat *mat) { ghost_error ret = GHOST_SUCCESS; ghost_lidx *zones = mat->context->zone_ptr; //for SELL-1-1 ghost_lidx *row_ptr = mat->chunkStart; //TODO give virtual columns ghost_lidx *col_ptr = mat->col; int *extrema_pure, *extrema_red, *extrema_black, *extrema_trans, *extrema_trans_1, *extrema_trans_2; int pure_min, pure_max, red_min, red_max, black_min, black_max, trans_min, trans_max; find_zone_extrema(mat, &extrema_pure, zones[0], zones[1]); find_zone_extrema(mat, &extrema_red, zones[1], zones[2]); find_zone_extrema(mat, &extrema_trans, zones[2], zones[3]); find_zone_extrema(mat, &extrema_black, zones[3], zones[4]); for(int i=1; i<mat->context->kacz_setting.active_threads; ++i) { pure_max = extrema_pure[MAX_UPPER]; free(extrema_pure); find_zone_extrema(mat, &extrema_pure, zones[4*i], zones[4*i+1]); pure_min = extrema_pure[MIN_LOWER]; //check pure zones if( (zones[4*i] != zones[4*i+1]) && ((zones[4*(i-1)] != zones[4*(i-1)+1])) && pure_min <= pure_max ) { ret = GHOST_ERR_BLOCKCOLOR; GHOST_ERROR_LOG("ERR 1"); GHOST_ERROR_LOG("pure_min = %d, pure_max=%d, btw [%d-%d] and [%d-%d]",pure_min,pure_max, zones[4*(i-1)], zones[4*(i-1)+1],zones[4*i],zones[4*i+1]); // break; } red_max = extrema_red[MAX_UPPER]; free(extrema_red); find_zone_extrema(mat, &extrema_red, zones[4*i+1], zones[4*i+2]); red_min = extrema_red[MIN_LOWER]; //check transition zones //check red color in transition zones if( (zones[4*i+1] != zones[4*i+2]) && (zones[4*(i-1)+1] != zones[4*(i-1)+2]) && red_min <= red_max) {//col_ptr[row_ptr[zones[4*i+1]]] <= col_ptr[row_ptr[zones[4*i-2]]-1] ) { ret = GHOST_ERR_BLOCKCOLOR; printf("ERR 2\n"); // break; } black_max = extrema_black[MAX_UPPER]; free(extrema_black); find_zone_extrema(mat, &extrema_black, zones[4*i+3], zones[4*i+4]); black_min = extrema_black[MIN_LOWER]; //check black color in transition zones if( (zones[4*i+3] != zones[4*i+4]) &&(zones[4*(i-1)+3] != zones[4*(i-1)+4]) && black_min <= black_max) {//col_ptr[row_ptr[zones[4*i+3]]] <= col_ptr[row_ptr[zones[4*i]]-1] ) { printf("check lower = %"PRLIDX", upper = %"PRLIDX"\n",virtual_col(row_ptr[zones[4*i+3]]),virtual_col(row_ptr[zones[4*i]]-1) ); ret = GHOST_ERR_BLOCKCOLOR; printf("ERR 3\n"); // break; } //check transition in transition zones, if we are using one sweep method, if(mat->context->kacz_setting.kacz_method == GHOST_KACZ_METHOD_BMC_one_sweep) { trans_max = extrema_trans[MAX_UPPER]; free(extrema_trans); find_zone_extrema(mat, &extrema_trans, zones[4*i+2], zones[4*i+3]); trans_min = extrema_trans[MIN_LOWER]; if( (zones[4*i+2] != zones[4*i+3]) &&(zones[4*(i-1)+2] != zones[4*(i-1)+3]) && trans_min <= trans_max) {//col_ptr[row_ptr[zones[4*i+2]]] <= col_ptr[row_ptr[zones[4*i-1]]-1] ) { ret = GHOST_ERR_BLOCKCOLOR; printf("ERR 4\n"); // break; } } else if(i <mat->context->kacz_setting.active_threads-1) { find_zone_extrema(mat, &extrema_trans_1, zones[4*(i-1)+2], zones[4*(i-1)+3]); trans_max = extrema_trans_1[MAX_UPPER]; free(extrema_trans_1); find_zone_extrema(mat, &extrema_trans_2, zones[4*i+6], zones[4*i+7]); trans_min = extrema_trans_2[MIN_LOWER]; free(extrema_trans_2); if( (zones[4*i+6] != zones[4*i+7]) &&(zones[4*(i-1)+2] != zones[4*(i-1)+3]) && trans_min <= trans_max) {//col_ptr[row_ptr[zones[4*i+2]]] <= col_ptr[row_ptr[zones[4*i-1]]-1] ) { printf("check between %d-%d and %d-%d zoneptr\n",4*i-2,4*i-1,4*i+6,4*i+7); ret = GHOST_ERR_BLOCKCOLOR; printf("ERR 5\n"); // break; } } } if(extrema_pure != NULL) free(extrema_pure); if(extrema_red != NULL) free(extrema_red); if(extrema_black != NULL) free(extrema_black); if(extrema_trans != NULL) free(extrema_trans); if(ret == GHOST_ERR_BLOCKCOLOR) GHOST_ERROR_LOG("ERROR in BLOCK COLORING, Check hybrid splitting"); return ret; } //finds lower and upper bandwidth of the matrix ghost_error mat_bandwidth(ghost_sparsemat *mat, int *lower_bw, int *upper_bw, int a, int b) { int lower = 0; int upper = 0; ghost_lidx* chunk_ptr = mat->chunkStart; ghost_lidx* col_ptr = mat->col;//TODO give virtual colums int start_col, end_col; ghost_lidx chunk; ghost_lidx rowinchunk; ghost_lidx chunkheight = mat->traits.C; ghost_lidx idx = 0; /* for(int i=a; i<b; ++i){ * start_col = SPM_NROWS(mat) + mat->context->col_map->dimpad; * end_col = 0; * for(int j=chunk_ptr[i]; j<chunk_ptr[i+1]; ++j) { * start_col = MIN(start_col, virtual_col(j)); * end_col = MAX(end_col, virtual_col(j)); } lower = MAX(lower,i-start_col); upper = MAX(upper,end_col-i); } printf("Bandwidth from calculation crs = %d\n", lower+upper); lower = 0; upper = 0; */ for(int i=a; i<b; ++i){ chunk = i/chunkheight; //can avoid this by doing reminder loops rowinchunk = i%chunkheight; start_col = SPM_NROWS(mat) + mat->context->col_map->dimpad; end_col = 0; idx = chunk_ptr[chunk]+rowinchunk; for(int j=0; j<mat->chunkLen[chunk]; ++j) { if(j==0 || virtual_col(idx)!=0) { //TODO somehow fix it, since filling dummy columns with 0 start_col = MIN(start_col, virtual_col(idx)); end_col = MAX(end_col, virtual_col(idx)); } idx+=chunkheight; } lower = MAX(lower,i-start_col); upper = MAX(upper,end_col-i); } *lower_bw = lower; *upper_bw = upper; return GHOST_SUCCESS; } ghost_error split_transition(ghost_sparsemat *mat) { ghost_error ret = GHOST_SUCCESS; //ghost_lidx *row_ptr = mat->chunkStart; //ghost_lidx *col_ptr = mat->col; //height might vary from nrows if we have multicoloring ghost_lidx height = mat->context->zone_ptr[mat->context->nzones]; //width might vary from ncols if we consider remote permutations also //ghost_lidx width = mat->context->maxColRange+1; int n_zones = mat->context->kacz_setting.active_threads;//nthread[0]; ghost_lidx *new_zone_ptr = NULL; mat->context->nzones = mat->context->nzones + 2*(n_zones);//add the new zones //GHOST_CALL_GOTO(ghost_malloc((void **)&new_zone_ptr,sizeof(ghost_lidx)*(4*n_zones+2)),err,ret); new_zone_ptr = (ghost_lidx*) malloc(sizeof(ghost_lidx)*(4*n_zones+2)); ghost_lidx lower_bw = 0; ghost_lidx upper_bw = 0; //the bandwidth might have changed due to previous permutations (it can also increase) //Further bandwidth only from 0 to height has to be calculated mat_bandwidth(mat, &lower_bw, &upper_bw, 0, height); ghost_lidx total_bw = lower_bw + upper_bw;//lower_bw + upper_bw; //printf("New lower b/w =%d, upper b/w=%d, total =%d",lower_bw,upper_bw,total_bw); //printf("HEIGHT = %d, WIDTH = %d\n",height,width); //double diagonal_slope = (double)(height)/width; //ghost_lidx separation = (int)(ceil((diagonal_slope*total_bw))); for (int i=0; i<n_zones; ++i) { new_zone_ptr[4*i] = mat->context->zone_ptr[2*i]; new_zone_ptr[4*i+1] = mat->context->zone_ptr[2*i+1]; } new_zone_ptr[4*(n_zones)] = mat->context->zone_ptr[2*(n_zones)] ; new_zone_ptr[4*(n_zones)+1] = mat->context->zone_ptr[2*(n_zones)] ; //simply for ease of calculation, not accessible by user for (int i=0; i<n_zones; ++i) { ghost_lidx black_start = new_zone_ptr[4*i] + total_bw; ghost_lidx red_end = new_zone_ptr[4*i+5] - total_bw; if(i==n_zones-1) red_end = new_zone_ptr[4*i+5]; if(black_start < new_zone_ptr[4*i+1]) black_start = new_zone_ptr[4*i+1]; if(black_start > new_zone_ptr[4*i+4]) black_start = new_zone_ptr[4*i+4]; if(red_end > new_zone_ptr[4*i+4]) red_end = new_zone_ptr[4*i+4]; if(red_end < new_zone_ptr[4*i+1]) red_end = new_zone_ptr[4*i+1]; //now check the cases if(black_start <= red_end) { int median =(int)( (black_start+red_end)/2.0); new_zone_ptr[4*i+2] = median; //else can leave as it is , but idea is to reduce this transition zones, will have to check load balancing new_zone_ptr[4*i+3] = median; } else { new_zone_ptr[4*i+2] = red_end; new_zone_ptr[4*i+3] = black_start; } } //now check whether the transition in transition is overlapping- if one region overlaps we use 2 sweep method (with threads/2) , else one sweep method mat->context->kacz_setting.kacz_method = GHOST_KACZ_METHOD_BMC_one_sweep; for(int i=1; i<n_zones; ++i) { // ghost_gidx lower = virtual_col(row_ptr[new_zone_ptr[4*i+2]]); //This might not work if the matrix is not RCM permuted // ghost_gidx upper = virtual_col(row_ptr[new_zone_ptr[4*i-1]]-1); int *extrema_lower_trans = NULL, *extrema_upper_trans = NULL; find_zone_extrema(mat, &extrema_lower_trans, new_zone_ptr[4*i+2], new_zone_ptr[4*i+3]); ghost_lidx lower = extrema_lower_trans[MIN_LOWER]; find_zone_extrema(mat, &extrema_upper_trans, new_zone_ptr[4*i-2], new_zone_ptr[4*i-1]); ghost_lidx upper = extrema_lower_trans[MAX_UPPER]; if(lower <= upper) { //printf("check lower = %d and upper =%d\n",virtual_col(row_ptr[new_zone_ptr[4*i+2]]) , virtual_col(row_ptr[new_zone_ptr[4*i-1]]-1)); mat->context->kacz_setting.kacz_method = GHOST_KACZ_METHOD_BMC_two_sweep; GHOST_WARNING_LOG("ONLY half the available threads would be used for transitional sweep\n"); break; } if(extrema_lower_trans != NULL) free(extrema_lower_trans); if(extrema_upper_trans != NULL) free(extrema_upper_trans); } mat->context->zone_ptr = new_zone_ptr; #ifdef GHOST_KACZ_ANALYZE kacz_analyze_print(mat); #endif //currently be done only if CHUNKHEIGHT==1, and NO_DISTINCTION is on, since if no distinction is not on //further permutation occurs after ghost_sparsemat_fromfunc_common , which permutes remote entries //this causes problem for checking although the result is correct if(mat->traits.C == 1/* && mat->context->flags & GHOST_PERM_NO_DISTINCTION*/) { GHOST_INFO_LOG("CHECKING BLOCK COLORING") checker(mat); GHOST_INFO_LOG("CHECKING FINISHED") } return ret; } //To be used only when the requirement is satisfied ghost_error split_analytical(ghost_sparsemat *mat) { //for KACZ_ANALYZE #ifdef GHOST_KACZ_ANALYZE //ghost_lidx line_size, n_lines, rem_lines; //int start=0 , end=0; //ghost_lidx *rows; //ghost_lidx *nnz; #endif int height = SPM_NROWS(mat); int width = mat->context->maxColRange+1; double diagonal_slope = (double)(height)/width; //int separation = (int)ceil(diagonal_slope*mat->context->bandwidth); int possible_threads = (int) ((double)height/mat->context->bandwidth); //height/separation ghost_error ret = GHOST_SUCCESS; int nthread; #ifdef GHOST_HAVE_OPENMP #pragma omp parallel { #pragma omp master nthread = ghost_omp_nthread(); } #else nthread = 1; #endif int current_threads = nthread; if( current_threads > possible_threads) { GHOST_WARNING_LOG("Specified number of threads cannot be used for the specified KACZ kernel, setting from %d to %d",current_threads,possible_threads); current_threads = possible_threads; // disable dynamic thread adjustments ghost_omp_set_dynamic(0); ghost_omp_nthread_set(current_threads); } mat->context->kacz_setting.active_threads = current_threads; //ghost_lidx *chunk_ptr = mat->chunkStart; //ghost_lidx *col_ptr = mat->col; //ghost_lidx chunkheight = mat->traits.C; mat->context->nzones = 4*current_threads; ghost_malloc((void **)&mat->context->zone_ptr,(mat->context->nzones+2)*sizeof(ghost_lidx)); //one extra zone added for convenience ghost_lidx *zone_ptr = mat->context->zone_ptr; int pure_gap = (int)( ((double)height/current_threads)); int pure_thickness = (int)( ( ((double)height/current_threads)-mat->context->bandwidth*diagonal_slope)) ; int red_ctr = 0; ; int black_ctr = 0; ; int black_start = 0; int red_end = 0; int median = 0; for(int i=0; i<current_threads; ++i) { zone_ptr[4*i] = i*pure_gap; zone_ptr[4*i+1] = zone_ptr[4*i] + pure_thickness; } zone_ptr[4*current_threads] = height; zone_ptr[4*current_threads+1] = height+pure_thickness;//dummy for(int i=0; i<current_threads; ++i) { black_start = zone_ptr[4*i] + (int) ceil(mat->context->bandwidth*diagonal_slope); red_end = zone_ptr[4*(i+1)+1] - (int) ceil(mat->context->bandwidth*diagonal_slope); if(black_start<zone_ptr[4*i+1]) { black_start = zone_ptr[4*i+1]; black_ctr += 1; } if(red_end>zone_ptr[4*(i+1)]) { red_end = zone_ptr[4*(i+1)]; red_ctr += 1; } if(black_start <= red_end) { median = (int) ((black_start+red_end)/2.0); zone_ptr[4*i+2] = median; zone_ptr[4*i+3] = median; } else { zone_ptr[4*i+2] = red_end; zone_ptr[4*i+3] = black_start; } } if( (red_ctr == current_threads) && (black_ctr == current_threads) ) { GHOST_INFO_LOG("USING RED BLACK SWEEP WITHOUT TRANSITION"); } //multicoloring is also dummy initialise pointers so we can use the same kacz kernel mat->context->ncolors = 1;//mat->context->zone_ptr[mat->context->nzones+1] - mat->context->zone_ptr[mat->context->nzones]; ghost_malloc((void **)&mat->context->color_ptr,(mat->context->ncolors+1)*sizeof(ghost_lidx)); for(int i=0; i<mat->context->ncolors+1; ++i) { mat->context->color_ptr[i] = mat->context->zone_ptr[mat->context->nzones]; } mat->context->kacz_setting.kacz_method = GHOST_KACZ_METHOD_BMC_one_sweep; for(int i=1; i<current_threads; ++i) { //ghost_lidx chunk_lower = zone_ptr[4*i+2]/chunkheight; //ghost_lidx rowinchunk_lower = zone_ptr[4*i+2]%chunkheight; //ghost_lidx chunk_upper = (zone_ptr[4*i-1]-1)/chunkheight; //ghost_lidx rowinchunk_upper = (zone_ptr[4*i-1]-1)%chunkheight; int *extrema_lower_trans = NULL, *extrema_upper_trans = NULL; find_zone_extrema(mat, &extrema_lower_trans, zone_ptr[4*i+2], zone_ptr[4*i+3]); ghost_lidx lower = extrema_lower_trans[MIN_LOWER]; find_zone_extrema(mat, &extrema_upper_trans, zone_ptr[4*i-2], zone_ptr[4*i-1]); ghost_lidx upper = extrema_lower_trans[MAX_UPPER]; /*ghost_lidx lower = virtual_col(chunk_ptr[chunk_lower]+rowinchunk_lower); // TODO : A scanning might be required * ghost_lidx upper = virtual_col(chunk_ptr[chunk_upper]+rowinchunk_upper+chunkheight*(mat->chunkLen[chunk_upper]-1)); */ if(lower <= upper) { //printf("check lower = %d and upper =%d\n",virtual_col(row_ptr[zone_ptr[4*i+2]]) , virtual_col(row_ptr[zone_ptr[4*i-1]]-1)); mat->context->kacz_setting.kacz_method = GHOST_KACZ_METHOD_BMC_two_sweep; GHOST_WARNING_LOG("ONLY half the available threads would be used for transitional sweep\n"); break; } if(extrema_lower_trans != NULL) free(extrema_lower_trans); if(extrema_upper_trans != NULL) free(extrema_upper_trans); } #ifdef GHOST_KACZ_ANALYZE kacz_analyze_print(mat); #endif if(mat->traits.C == 1) { GHOST_INFO_LOG("CHECKING BLOCK COLORING") checker(mat); GHOST_INFO_LOG("CHECKING FINISHED") } return ret; } //not used - expensive, eventhough it refines in detail, it might then lead to load balancing /*ghost_error split_transition(ghost_sparsemat *mat) * { * ghost_error ret = GHOST_SUCCESS; * * //for SELL-1-1 * ghost_lidx *row_ptr = mat->chunkStart; * ghost_lidx *col_ptr = mat->col; * * int nthread[1]; * #ifdef GHOST_HAVE_OPENMP * #pragma omp parallel * { * #pragma omp master * nthread[0] = ghost_omp_nthread(); * } * #else * nthread[0] = 1; * #endif * * * int n_zones = nthread[0]; * ghost_lidx *new_zone_ptr = NULL; * * mat->context->kacz_setting.active_threads = nthread[0];//TODO add this to sparsemat * * mat->context->nzones = mat->context->nzones + 2*(n_zones-1);//add the new zones * * GHOST_CALL_GOTO(ghost_malloc((void **)&new_zone_ptr,sizeof(ghost_lidx)*(4*n_zones-2)),err,ret); * * for (int i=0; i<n_zones; ++i) { * new_zone_ptr[4*i] = mat->context->zone_ptr[2*i]; * new_zone_ptr[4*i+1] = mat->context->zone_ptr[2*i+1]; * } * * * * for (int i=0; i<n_zones-1; ++i) { * ghost_lidx red_start = new_zone_ptr[4*i+1]; * ghost_lidx red_end = -1; * ghost_lidx black_start = -1; * ghost_lidx black_end = new_zone_ptr[4*i+4]; * * int *extrema_pre, *extrema_post; * * find_zone_extrema(mat, &extrema_pre, new_zone_ptr[4*i], new_zone_ptr[4*i+1]); * find_zone_extrema(mat, &extrema_post, new_zone_ptr[4*i+4], new_zone_ptr[4*i+5]); * * ghost_lidx red_end_col = extrema_pre[MIN_UPPER];//col_ptr[row_ptr[new_zone_ptr[4*i]+1]-1]; * ghost_lidx black_start_col = extrema_post[MAX_LOWER];//col_ptr[row_ptr[new_zone_ptr[4*i+5]]]; * * bool flag_red = 0; * bool flag_black = 0; * * //now find corresponding rows of red_end and black_start * for (int j=red_start; j<black_end; ++j) { * if(flag_red == 0 || flag_black == 0) { * if(flag_red == 0 && col_ptr[row_ptr[j]] >= red_end_col) { * red_end = j; * flag_red = 1; * } * if(flag_black == 0 && col_ptr[row_ptr[j+1]-1] >= black_start_col) { * black_start = j; * flag_black = 1; * } * } * } * * free(extrema_pre); * free(extrema_post); * * //now check the cases * if(flag_red == 0 && flag_black == 0) { * new_zone_ptr[4*i+2] = black_end; * new_zone_ptr[4*i+3] = black_end; * } * else if(flag_red == 0 && flag_black == 1) { * new_zone_ptr[4*i+2] = black_start; * new_zone_ptr[4*i+3] = black_start; * } * else if(flag_red == 1 && flag_black == 0) { * new_zone_ptr[4*i+2] = red_end; * new_zone_ptr[4*i+3] = red_end; * } * //this is the only case where we have transition zones * else if(black_start < red_end) { * new_zone_ptr[4*i+2] = black_start; * new_zone_ptr[4*i+3] = red_end; * } * else if(black_start >= red_end) { * int median = (int)( (black_start+red_end)/2.0 ); * new_zone_ptr[4*i+2] = median; * new_zone_ptr[4*i+3] = median; * } * else { * EROOR_LOG("GHOST UNKNOWN ERROR in hybrid splitting \n"); * ret = GHOST_ERR_UNKNOWN; * goto err; * } * * } * * //now check whether the transition in transition is overlapping- if one region overlaps we use 2 sweep method (with threads/2) , else one sweep method * for(int i=1; i<n_zones-1; ++i) { * mat->context->kacz_setting.kacz_method = GHOST_KACZ_METHOD_BMC_one_sweep; * * if(col_ptr[row_ptr[new_zone_ptr[4*i+2]]] <= col_ptr[row_ptr[new_zone_ptr[[4*i-1]]]-1]) { * mat->context->kacz_setting.kacz_method = GHOST_KACZ_METHOD_BMC_two_sweep; * break; * } * } * * * mat->context->zone_ptr = new_zone_ptr; * goto out; * * err: * * out: * return ret; * * }*/
begin_declare_variant_no_end_for_matching_selector.c
// RUN: %clang_cc1 -triple=x86_64-pc-win32 -verify -fopenmp -x c -std=c99 -fms-extensions -Wno-pragma-pack %s // RUN: %clang_cc1 -triple=x86_64-pc-win32 -verify -fopenmp-simd -x c -std=c99 -fms-extensions -Wno-pragma-pack %s #pragma omp begin declare variant match(device={kind(cpu)}) int also_before(void) { return 0; } #pragma omp end declare variant #pragma omp begin declare variant match(device={kind(gpu)}) // expected-note {{to match this '#pragma omp begin declare variant'}} // The matching end is missing. Since the device clause is not matching we will // cause us to elide the rest of the file and emit and error. int also_after(void) { return 2; } int also_before(void) { return 2; } #pragma omp begin declare variant match(device={kind(fpga)}) This text is never parsed! #pragma omp end declare variant int also_after(void) { return 0; } int test() { return also_after() + also_before(); } // expected-error {{expected '#pragma omp end declare variant'}}
GB_binop__first_fc32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__first_fc32) // A.*B function (eWiseMult): GB (_AemultB_08__first_fc32) // A.*B function (eWiseMult): GB (_AemultB_02__first_fc32) // A.*B function (eWiseMult): GB (_AemultB_04__first_fc32) // A.*B function (eWiseMult): GB (_AemultB_bitmap__first_fc32) // A*D function (colscale): GB (_AxD__first_fc32) // D*A function (rowscale): GB (_DxB__first_fc32) // C+=B function (dense accum): GB (_Cdense_accumB__first_fc32) // C+=b function (dense accum): GB (_Cdense_accumb__first_fc32) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__first_fc32) // C=scalar+B GB ((none)) // C=scalar+B' GB ((none)) // C=A+scalar GB ((none)) // C=A'+scalar GB ((none)) // C type: GxB_FC32_t // A type: GxB_FC32_t // A pattern? 0 // B type: GxB_FC32_t // B pattern? 1 // BinaryOp: cij = aij #define GB_ATYPE \ GxB_FC32_t #define GB_BTYPE \ GxB_FC32_t #define GB_CTYPE \ GxB_FC32_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ GxB_FC32_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ ; // true if values of B are not used #define GB_B_IS_PATTERN \ 1 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ GxB_FC32_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = x ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_FIRST || GxB_NO_FC32 || GxB_NO_FIRST_FC32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__first_fc32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__first_fc32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { #include "GB_dense_subassign_23_template.c" } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__first_fc32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { // get the scalar b for C += b, of type GxB_FC32_t GxB_FC32_t bwork = (*((GxB_FC32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__first_fc32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC32_t *restrict Cx = (GxB_FC32_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__first_fc32) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC32_t *restrict Cx = (GxB_FC32_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__first_fc32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; GxB_FC32_t alpha_scalar ; GxB_FC32_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((GxB_FC32_t *) alpha_scalar_in)) ; beta_scalar = (*((GxB_FC32_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__first_fc32) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__first_fc32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__first_fc32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__first_fc32) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC32_t *Cx = (GxB_FC32_t *) Cx_output ; GxB_FC32_t x = (*((GxB_FC32_t *) x_input)) ; GxB_FC32_t *Bx = (GxB_FC32_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; ; ; Cx [p] = x ; } return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; GxB_FC32_t *Cx = (GxB_FC32_t *) Cx_output ; GxB_FC32_t *Ax = (GxB_FC32_t *) Ax_input ; GxB_FC32_t y = (*((GxB_FC32_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; GxB_FC32_t aij = GBX (Ax, p, false) ; Cx [p] = aij ; } return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ #if 0 // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ ; ; \ Cx [pC] = x ; \ } GrB_Info GB ((none)) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ GxB_FC32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC32_t x = (*((const GxB_FC32_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ GxB_FC32_t } #endif //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ #if 0 // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ GxB_FC32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = aij ; \ } GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC32_t y = (*((const GxB_FC32_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif #endif
texture.c
#include "mana/graphics/utilities/texture.h" #define STB_IMAGE_IMPLEMENTATION #include <stb_image.h> int texture_init(struct Texture *texture, struct GPUAPI *gpu_api, struct TextureSettings texture_settings) { VkFilter filter = (texture_settings.filter_type == FILTER_NEAREST) ? VK_FILTER_NEAREST : VK_FILTER_LINEAR; VkSamplerAddressMode mode; switch (texture_settings.mode_type) { case (MODE_REPEAT): mode = VK_SAMPLER_ADDRESS_MODE_REPEAT; break; case (MODE_MIRRORED_REPEAT): mode = VK_SAMPLER_ADDRESS_MODE_MIRRORED_REPEAT; break; case (MODE_CLAMP_TO_EDGE): mode = VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE; break; case (MODE_CLAMP_TO_BORDER): mode = VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER; break; } texture->path = strdup(texture_settings.path); char *name_location = strrchr(texture_settings.path, '/'); if (!name_location) texture->name = strdup(name_location); else texture->name = strdup(name_location + 1); char *type_location = strrchr(texture_settings.path, '.'); if (!type_location) texture->type = strdup(type_location); else texture->type = strdup(type_location + 1); // Todo: Detect pixel bit // Note: Something like this could be useful for optimizing but not needed as stbi will correctly convert up/down bits //int pixel_bit = 16; //int tex_width, tex_height, tex_channels; //void *pixels; //VkDeviceSize image_size; //if (pixel_bit == 8) { // pixels = (void *)stbi_load(texture->path, &tex_width, &tex_height, &tex_channels, STBI_rgb_alpha); // image_size = tex_width * tex_height * 4; //} else if (pixel_bit == 16) { // pixels = (void *)stbi_load_16(texture->path, &tex_width, &tex_height, &tex_channels, STBI_rgb_alpha); // image_size = tex_width * tex_height * 4 * 2; //} else if (pixel_bit == 32) { // pixels = (void *)stbi_load_32(texture->path, &tex_width, &tex_height, &tex_channels, STBI_rgb_alpha); // image_size = tex_width * tex_height * 4 * 2 * 2; //} else // return -1; int tex_width, tex_height, tex_channels; stbi_us *pixels = stbi_load_16(texture->path, &tex_width, &tex_height, &tex_channels, STBI_rgb_alpha); VkDeviceSize image_size = tex_width * tex_height * 4 * 2; texture->width = tex_width; texture->height = tex_height; if (!pixels) { printf("Failed to load texture image!\n"); return 1; } // Guaranteed to be multiple of 4 // ifndef avx512 -> ifndef avx2 -> ifndef avx -> ifndef sse2 -> ifndef neon -> fallback if (texture_settings.premultiplied_alpha == 0) { // Note: Let openmp take care of simd for now, outside of texture uses parallel for inside used parallel simd //https://stackoverflow.com/questions/14674049/parallel-for-vs-omp-simd-when-to-use-each #pragma omp simd for (int pixel_group_num = 0; pixel_group_num < tex_width * tex_height * tex_channels; pixel_group_num += 4) { unsigned short alpha_value = pixels[pixel_group_num + 3]; pixels[pixel_group_num] *= ((float)alpha_value / USHRT_MAX); pixels[pixel_group_num + 1] *= ((float)alpha_value / USHRT_MAX); pixels[pixel_group_num + 2] *= ((float)alpha_value / USHRT_MAX); } } // int total_pixels = tex_width * tex_height * tex_channels; //#ifdef __AVX__ // for (int iterations = total_pixels / 4;;) // ; // if (total_pixels % 4 != 0) { // unsigned short alpha_value = pixels[total_pixels - 1]; // pixels[total_pixels - 4] *= ((float)alpha_value / USHRT_MAX); // pixels[total_pixels - 3] *= ((float)alpha_value / USHRT_MAX); // pixels[total_pixels - 2] *= ((float)alpha_value / USHRT_MAX); // } //#else // if (texture_settings.premultiplied_alpha == 0) { //#pragma omp parallel for // for (int pixel_group_num = 0; pixel_group_num < tex_width * tex_height * tex_channels; pixel_group_num += 4) { // unsigned short alpha_value = pixels[pixel_group_num + 3]; // pixels[pixel_group_num] *= ((float)alpha_value / USHRT_MAX); // pixels[pixel_group_num + 1] *= ((float)alpha_value / USHRT_MAX); // pixels[pixel_group_num + 2] *= ((float)alpha_value / USHRT_MAX); // } // } //#endif VkBuffer staging_buffer = {0}; VkDeviceMemory staging_buffer_memory = {0}; graphics_utils_create_buffer(gpu_api->vulkan_state->device, gpu_api->vulkan_state->physical_device, image_size, VK_BUFFER_USAGE_TRANSFER_SRC_BIT, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT, &staging_buffer, &staging_buffer_memory); void *data; vkMapMemory(gpu_api->vulkan_state->device, staging_buffer_memory, 0, image_size, 0, &data); memcpy(data, pixels, image_size); vkUnmapMemory(gpu_api->vulkan_state->device, staging_buffer_memory); stbi_image_free(pixels); uint32_t mip_levels = (uint32_t)(floor(log2(MAX(tex_width, tex_height)))); if (texture_settings.mip_maps_enabled == 0) mip_levels = 1; graphics_utils_create_image(gpu_api->vulkan_state->device, gpu_api->vulkan_state->physical_device, tex_width, tex_height, mip_levels, VK_SAMPLE_COUNT_1_BIT, VK_FORMAT_R16G16B16A16_UNORM, VK_IMAGE_TILING_OPTIMAL, VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_SAMPLED_BIT, VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT, &texture->texture_image, &texture->texture_image_memory); graphics_utils_transition_image_layout(gpu_api->vulkan_state->device, gpu_api->vulkan_state->graphics_queue, gpu_api->vulkan_state->command_pool, texture->texture_image, VK_FORMAT_R16G16B16A16_UNORM, VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, mip_levels); graphics_utils_copy_buffer_to_image(gpu_api->vulkan_state->device, gpu_api->vulkan_state->graphics_queue, gpu_api->vulkan_state->command_pool, &staging_buffer, &texture->texture_image, tex_width, tex_height); vkDestroyBuffer(gpu_api->vulkan_state->device, staging_buffer, NULL); vkFreeMemory(gpu_api->vulkan_state->device, staging_buffer_memory, NULL); graphics_utils_generate_mipmaps(gpu_api->vulkan_state->device, gpu_api->vulkan_state->physical_device, gpu_api->vulkan_state->graphics_queue, gpu_api->vulkan_state->command_pool, texture->texture_image, VK_FORMAT_R16G16B16A16_UNORM, tex_width, tex_height, mip_levels); graphics_utils_create_image_view(gpu_api->vulkan_state->device, texture->texture_image, VK_FORMAT_R16G16B16A16_UNORM, VK_IMAGE_ASPECT_COLOR_BIT, mip_levels, &texture->texture_image_view); graphics_utils_create_sampler(gpu_api->vulkan_state->device, &texture->texture_sampler, (struct SamplerSettings){.mip_levels = mip_levels, .filter = filter, .address_mode = mode}); return 0; } void texture_delete(struct Texture *texture, struct GPUAPI *gpu_api) { vkDestroySampler(gpu_api->vulkan_state->device, texture->texture_sampler, NULL); vkDestroyImageView(gpu_api->vulkan_state->device, texture->texture_image_view, NULL); vkDestroyImage(gpu_api->vulkan_state->device, texture->texture_image, NULL); vkFreeMemory(gpu_api->vulkan_state->device, texture->texture_image_memory, NULL); free(texture->path); free(texture->name); free(texture->type); }
dct_lee_cpu.h
/** * @file dct_lee_cpu.h * @author Yibo Lin * @date Oct 2018 */ #ifndef DREAMPLACE_DCT_LEE_CPU_H #define DREAMPLACE_DCT_LEE_CPU_H #include <vector> #include <cmath> #include <stdexcept> #include "utility/src/Msg.h" DREAMPLACE_BEGIN_NAMESPACE namespace lee { constexpr double PI = 3.14159265358979323846; /// Return true if a number is power of 2 template <typename T = unsigned> inline bool isPowerOf2(T val) { return val && (val & (val - 1)) == 0; } /// Transpose a row-major matrix with M rows and N columns using block transpose method template <typename TValue, typename TIndex = unsigned> inline void transpose(const TValue *in, TValue *out, TIndex M, TIndex N, TIndex blockSize = 16) { //#pragma omp parallel for collapse(2) schedule(static) for (TIndex j = 0; j < N; j += blockSize) { for (TIndex i = 0; i < M; i += blockSize) { // Transpose the block beginning at [i, j] TIndex xend = std::min(M, i + blockSize); TIndex yend = std::min(N, j + blockSize); for (TIndex y = j; y < yend; ++y) { for (TIndex x = i; x < xend; ++x) { out[x + y * M] = in[y + x * N]; } } } } } /// Negate values in odd position of a vector template <typename TValue, typename TIndex = unsigned> inline void negateOddEntries(TValue *vec, TIndex N) { for (TIndex i = 1; i < N; i += 2) { vec[i] = -vec[i]; } } /// Precompute cosine values needed for N-point dct /// @param cos size N - 1 buffer, contains the result after function call /// @param N the length of target dct, must be power of 2 template <typename TValue, typename TIndex = unsigned> void precompute_dct_cos(TValue *cos, TIndex N) { // The input length must be power of 2 if (! isPowerOf2<TIndex>(N)) { throw std::domain_error("Input length is not power of 2."); } TIndex offset = 0; TIndex halfLen = N / 2; while (halfLen) { TValue phaseStep = 0.5 * PI / halfLen; TValue phase = 0.5 * phaseStep; for (TIndex i = 0; i < halfLen; ++i) { cos[offset + i] = 0.5 / std::cos(phase); phase += phaseStep; } offset += halfLen; halfLen /= 2; } } /// Precompute cosine values needed for N-point idct /// @param cos size N - 1 buffer, contains the result after function call /// @param N the length of target idct, must be power of 2 template <typename TValue, typename TIndex = unsigned> void precompute_idct_cos(TValue *cos, TIndex N) { // The input length must be power of 2 if (! isPowerOf2<TIndex>(N)) { throw std::domain_error("Input length is not power of 2."); } TIndex offset = 0; TIndex halfLen = 1; while(halfLen < N) { TValue phaseStep = 0.5 * PI / halfLen; TValue phase = 0.5 * phaseStep; for (TIndex i = 0; i < halfLen; ++i) { cos[offset + i] = 0.5 / std::cos(phase); phase += phaseStep; } offset += halfLen; halfLen *= 2; } } /// The implementation of fast Discrete Cosine Transform (DCT) algorithm and its inverse (IDCT) are Lee's algorithms /// Algorithm reference: A New Algorithm to Compute the Discrete Cosine Transform, by Byeong Gi Lee, 1984 /// /// Lee's algorithm has a recursive structure in nature. /// Here is a sample recursive implementation: https://www.nayuki.io/page/fast-discrete-cosine-transform-algorithms /// /// My implementation here is iterative, which is more efficient than the recursive version. /// Here is a sample iterative implementation: https://www.codeproject.com/Articles/151043/Iterative-Fast-1D-Forvard-DCT /// Compute y[k] = sum_n=0..N-1 (x[n] * cos((n + 0.5) * k * PI / N)), for k = 0..N-1 /// /// @param vec length N sequence to be transformed /// @param temp length 2 * N helping buffer /// @param cos length N - 1, stores cosine values precomputed by function 'precompute_dct_cos' /// @param N length of vec, must be power of 2 template <typename TValue, typename TIndex = unsigned> inline void dct(TValue *vec, TValue *out, TValue *buf, const TValue *cos, TIndex N) { // The input length must be power of 2 if (! isPowerOf2<TIndex>(N)) { throw std::domain_error("Input length is not power of 2."); } // Pointers point to the beginning indices of two adjacent iterations TValue *curr = out; TValue *next = buf; // 'temp' is used to store data of two adjacent iterations // Copy 'vec' to the first N element in 'temp' std::copy(vec, vec + N, curr); // Current bufferfly length and half length TIndex len = N; TIndex halfLen = len / 2; // Iteratively bi-partition sequences into sub-sequences TIndex cosOffset = 0; while (halfLen) { TIndex offset = 0; TIndex steps = N / len; for (TIndex k = 0; k < steps; ++k) { for (TIndex i = 0; i < halfLen; ++i) { next[offset + i] = curr[offset + i] + curr[offset + len - i - 1]; next[offset + halfLen + i] = (curr[offset + i] - curr[offset + len -i - 1]) * cos[cosOffset + i]; } offset += len; } std::swap(curr, next); cosOffset += halfLen; len = halfLen; halfLen /= 2; } // Bottom-up form the final DCT solution // Note that the case len = 2 will do nothing, so we start from len = 4 len = 4; halfLen = 2; while(halfLen < N) { TIndex offset = 0; TIndex steps = N / len; for(TIndex k = 0; k < steps; ++k) { for(TIndex i = 0; i < halfLen - 1; ++i) { next[offset + i * 2] = curr[offset + i]; next[offset + i * 2 + 1] = curr[offset + halfLen + i] + curr[offset + halfLen + i + 1]; } next[offset + len - 2] = curr[offset + halfLen - 1]; next[offset + len - 1] = curr[offset + len - 1]; offset += len; } std::swap(curr, next); halfLen = len; len *= 2; } // Populate the final results into 'out' if (curr != out) { std::copy(curr, curr+N, out); } } /// Compute y[k] = 0.5 * x[0] + sum_n=1..N-1 (x[n] * cos(n * (k + 0.5) * PI / N)), for k = 0..N-1 /// @param vec length N sequence to be transformed /// @param temp length 2 * N helping buffer /// @param cos length N - 1, stores cosine values precomputed by function 'precompute_idct_cos' /// @param N length of vec, must be power of 2 template <typename TValue, typename TIndex = unsigned> inline void idct(TValue *vec, TValue *out, TValue* buf, const TValue *cos, TIndex N) { // The input length must be power of 2 if (! isPowerOf2<TIndex>(N)) { throw std::domain_error("Input length is not power of 2."); } // Pointers point to the beginning indices of two adjacent iterations TValue *curr = out; TValue *next = buf; // This array is used to store date of two adjacent iterations // Copy 'vec' to the first N element in 'temp' std::copy(vec, vec + N, curr); curr[0] /= 2; // Current bufferfly length and half length TIndex len = N; TIndex halfLen = len / 2; // Iteratively bi-partition sequences into sub-sequences while (halfLen) { TIndex offset = 0; TIndex steps = N / len; for (TIndex k = 0; k < steps; ++k) { next[offset] = curr[offset]; next[offset + halfLen] = curr[offset + 1]; for (TIndex i = 1; i < halfLen; ++i) { next[offset + i] = curr[offset + i * 2]; next[offset + halfLen + i] = curr[offset + i * 2 - 1] + curr[offset + i * 2 + 1]; } offset += len; } std::swap(curr, next); len = halfLen; halfLen /= 2; } // Bottom-up form the final IDCT solution len = 2; halfLen = 1; TIndex cosOffset = 0; while(halfLen < N) { TIndex offset = 0; TIndex steps = N / len; for(TIndex k = 0; k < steps; ++k) { for(TIndex i = 0; i < halfLen; ++i) { TValue g = curr[offset + i]; TValue h = curr[offset + halfLen + i] * cos[cosOffset + i]; next[offset + i] = g + h; next[offset + len - 1 - i] = g - h; } offset += len; } std::swap(curr, next); cosOffset += halfLen; halfLen = len; len *= 2; } // Populate the final results into 'out' if (curr != out) { std::copy(curr, curr+N, out); } } /// Compute batch dct /// @param mtx size M * N row-major matrix to be transformed /// @param temp length 3 * M * N helping buffer, first 2 * M * N is for dct, the last M * N is for matrix transpose /// @param cosM length M - 1, stores cosine values precomputed by function 'precompute_dct_cos' for M-point dct /// @param cosN length N - 1, stores cosine values precomputed by function 'precompute_dct_cos' for N-point dct /// @param M number of rows /// @param N number of columns template <typename TValue, typename TIndex = unsigned> inline void dct(TValue *mtx, TValue *out, TValue* buf, const TValue *cos, TIndex M, TIndex N) { //#pragma omp parallel for schedule(static) for (TIndex i = 0; i < M; ++i) { dct<TValue, TIndex>(mtx + i * N, out + i * N, buf + i*N, cos, N); } } /// Compute batch idct /// @param mtx size M * N row-major matrix to be transformed /// @param temp length 3 * M * N helping buffer, first 2 * M * N is for dct, the last M * N is for matrix transpose /// @param cosM length M - 1, stores cosine values precomputed by function 'precompute_dct_cos' for M-point dct /// @param cosN length N - 1, stores cosine values precomputed by function 'precompute_dct_cos' for N-point dct /// @param M number of rows /// @param N number of columns template <typename TValue, typename TIndex = unsigned> inline void idct(TValue *mtx, TValue *out, TValue* buf, const TValue *cos, TIndex M, TIndex N) { //#pragma omp parallel for schedule(static) for (TIndex i = 0; i < M; ++i) { idct<TValue, TIndex>(mtx + i * N, out + i * N, buf + i*N, cos, N); } } } // End of namespace lee DREAMPLACE_END_NAMESPACE #endif
J2OrbitalSoA.h
////////////////////////////////////////////////////////////////////////////////////// // This file is distributed under the University of Illinois/NCSA Open Source License. // See LICENSE file in top directory for details. // // Copyright (c) 2016 Jeongnim Kim and QMCPACK developers. // // File developed by: Jeongnim Kim, jeongnim.kim@intel.com, Intel Corp. // Amrita Mathuriya, amrita.mathuriya@intel.com, Intel Corp. // Ye Luo, yeluo@anl.gov, Argonne National Laboratory // // File created by: Jeongnim Kim, jeongnim.kim@intel.com, Intel Corp. ////////////////////////////////////////////////////////////////////////////////////// // -*- C++ -*- #ifndef QMCPLUSPLUS_TWOBODYJASTROW_OPTIMIZED_SOA_H #define QMCPLUSPLUS_TWOBODYJASTROW_OPTIMIZED_SOA_H #include <map> #include <numeric> #include "Configuration.h" #if !defined(QMC_BUILD_SANDBOX_ONLY) #include "QMCWaveFunctions/WaveFunctionComponent.h" #include "QMCWaveFunctions/Jastrow/DiffTwoBodyJastrowOrbital.h" #endif #include "Particle/DistanceTableData.h" #include "LongRange/StructFact.h" #include <simd/allocator.hpp> #include <simd/algorithm.hpp> namespace qmcplusplus { // helper class to activate KEcorr during optimizing Jastrow template<typename RT, class FT> class J2KECorrection { size_t num_groups_; std::vector<size_t> num_elec_in_groups_; RT num_elecs_; RT vol; RT G0mag; const std::vector<FT*>& F_; bool SK_enabled; public: J2KECorrection(const ParticleSet& targetPtcl, const std::vector<FT*>& F) : num_groups_(targetPtcl.groups()), num_elecs_(targetPtcl.getTotalNum()), vol(targetPtcl.Lattice.Volume), F_(F), SK_enabled(targetPtcl.SK != nullptr) { // compute num_elec_in_groups_ num_elec_in_groups_.reserve(3); for (int i = 0; i < num_groups_; i++) num_elec_in_groups_.push_back(targetPtcl.last(i) - targetPtcl.first(i)); if (SK_enabled) G0mag = std::sqrt(targetPtcl.SK->KLists.ksq[0]); } RT computeKEcorr() { if (!SK_enabled) return 0; const int numPoints = 1000; RT uk = 0.0; RT a = 1.0; for (int i = 0; i < num_groups_; i++) { int Ni = num_elec_in_groups_[i]; for (int j = 0; j < num_groups_; j++) { int Nj = num_elec_in_groups_[j]; if (F_[i * num_groups_ + j]) { FT& ufunc = *(F_[i * num_groups_ + j]); RT radius = ufunc.cutoff_radius; RT k = G0mag; RT dr = radius / (RT)(numPoints - 1); for (int ir = 0; ir < numPoints; ir++) { RT r = dr * (RT)ir; RT u = ufunc.evaluate(r); uk += 0.5 * 4.0 * M_PI * r * std::sin(k * r) / k * u * dr * (RT)Nj / (RT)(Ni + Nj); } } } } for (int iter = 0; iter < 20; iter++) a = uk / (4.0 * M_PI * (1.0 / (G0mag * G0mag) - 1.0 / (G0mag * G0mag + 1.0 / a))); return 4.0 * M_PI * a / (4.0 * vol) * num_elecs_; } }; /** @ingroup WaveFunctionComponent * @brief Specialization for two-body Jastrow function using multiple functors * * Each pair-type can have distinct function \f$u(r_{ij})\f$. * For electrons, distinct pair correlation functions are used * for spins up-up/down-down and up-down/down-up. * * Based on J2OrbitalSoA.h with these considerations * - DistanceTableData using SoA containers * - support mixed precision: FT::real_type != OHMMS_PRECISION * - loops over the groups: elminated PairID * - support simd function * - double the loop counts * - Memory use is O(N). */ template<class FT> class J2OrbitalSoA : public WaveFunctionComponent { public: ///alias FuncType using FuncType = FT; ///type of each component U, dU, d2U; using valT = typename FT::real_type; ///element position type using posT = TinyVector<valT, OHMMS_DIM>; ///use the same container using RowContainer = DistanceTableData::RowContainer; using gContainer_type = VectorSoaContainer<valT, OHMMS_DIM>; // Ye: leaving this public is bad but currently used by unit tests. ///Container for \f$F[ig*NumGroups+jg]\f$. std::vector<FT*> F; protected: ///number of particles size_t N; ///number of particles + padded size_t N_padded; ///number of groups of the target particleset size_t NumGroups; ///diff value RealType DiffVal; ///Correction RealType KEcorr; ///\f$Uat[i] = sum_(j) u_{i,j}\f$ Vector<valT> Uat; ///\f$dUat[i] = sum_(j) du_{i,j}\f$ gContainer_type dUat; ///\f$d2Uat[i] = sum_(j) d2u_{i,j}\f$ Vector<valT> d2Uat; valT cur_Uat; aligned_vector<valT> cur_u, cur_du, cur_d2u; aligned_vector<valT> old_u, old_du, old_d2u; aligned_vector<valT> DistCompressed; aligned_vector<int> DistIndice; ///Uniquue J2 set for cleanup std::map<std::string, FT*> J2Unique; /// e-e table ID const int my_table_ID_; // helper for compute J2 Chiesa KE correction J2KECorrection<RealType, FT> j2_ke_corr_helper; public: J2OrbitalSoA(ParticleSet& p, int tid); J2OrbitalSoA(const J2OrbitalSoA& rhs) = delete; ~J2OrbitalSoA(); /* initialize storage */ void init(ParticleSet& p); /** add functor for (ia,ib) pair */ void addFunc(int ia, int ib, FT* j); void resetTargetParticleSet(ParticleSet& P) { if (dPsi) dPsi->resetTargetParticleSet(P); } /** check in an optimizable parameter * @param o a super set of optimizable variables */ void checkInVariables(opt_variables_type& active) { myVars.clear(); typename std::map<std::string, FT*>::iterator it(J2Unique.begin()), it_end(J2Unique.end()); while (it != it_end) { (*it).second->checkInVariables(active); (*it).second->checkInVariables(myVars); ++it; } } /** check out optimizable variables */ void checkOutVariables(const opt_variables_type& active) { myVars.getIndex(active); Optimizable = myVars.is_optimizable(); typename std::map<std::string, FT*>::iterator it(J2Unique.begin()), it_end(J2Unique.end()); while (it != it_end) { (*it).second->checkOutVariables(active); ++it; } if (dPsi) dPsi->checkOutVariables(active); } ///reset the value of all the unique Two-Body Jastrow functions void resetParameters(const opt_variables_type& active) { if (!Optimizable) return; typename std::map<std::string, FT*>::iterator it(J2Unique.begin()), it_end(J2Unique.end()); while (it != it_end) { (*it).second->resetParameters(active); ++it; } if (dPsi) dPsi->resetParameters(active); for (int i = 0; i < myVars.size(); ++i) { int ii = myVars.Index[i]; if (ii >= 0) myVars[i] = active[ii]; } } void finalizeOptimization() { KEcorr = j2_ke_corr_helper.computeKEcorr(); } /** print the state, e.g., optimizables */ void reportStatus(std::ostream& os) { typename std::map<std::string, FT*>::iterator it(J2Unique.begin()), it_end(J2Unique.end()); while (it != it_end) { (*it).second->myVars.print(os); ++it; } } WaveFunctionComponentPtr makeClone(ParticleSet& tqp) const; LogValueType evaluateLog(ParticleSet& P, ParticleSet::ParticleGradient_t& G, ParticleSet::ParticleLaplacian_t& L); void evaluateHessian(ParticleSet& P, HessVector_t& grad_grad_psi); /** recompute internal data assuming distance table is fully ready */ void recompute(ParticleSet& P); PsiValueType ratio(ParticleSet& P, int iat); void evaluateRatios(VirtualParticleSet& VP, std::vector<ValueType>& ratios) { for (int k = 0; k < ratios.size(); ++k) ratios[k] = std::exp(Uat[VP.refPtcl] - computeU(VP.refPS, VP.refPtcl, VP.getDistTable(my_table_ID_).Distances[k])); } void evaluateRatiosAlltoOne(ParticleSet& P, std::vector<ValueType>& ratios); GradType evalGrad(ParticleSet& P, int iat); PsiValueType ratioGrad(ParticleSet& P, int iat, GradType& grad_iat); void acceptMove(ParticleSet& P, int iat); inline void restore(int iat) {} /** compute G and L after the sweep */ void evaluateGL(ParticleSet& P, ParticleSet::ParticleGradient_t& G, ParticleSet::ParticleLaplacian_t& L, bool fromscratch = false); inline void registerData(ParticleSet& P, WFBufferType& buf) { if (Bytes_in_WFBuffer == 0) { Bytes_in_WFBuffer = buf.current(); buf.add(Uat.begin(), Uat.end()); buf.add(dUat.data(), dUat.end()); buf.add(d2Uat.begin(), d2Uat.end()); Bytes_in_WFBuffer = buf.current() - Bytes_in_WFBuffer; // free local space Uat.free(); dUat.free(); d2Uat.free(); } else { buf.forward(Bytes_in_WFBuffer); } } inline void copyFromBuffer(ParticleSet& P, WFBufferType& buf) { Uat.attachReference(buf.lendReference<valT>(N), N); dUat.attachReference(N, N_padded, buf.lendReference<valT>(N_padded * OHMMS_DIM)); d2Uat.attachReference(buf.lendReference<valT>(N), N); } LogValueType updateBuffer(ParticleSet& P, WFBufferType& buf, bool fromscratch = false) { evaluateGL(P, P.G, P.L, false); buf.forward(Bytes_in_WFBuffer); return LogValue; } /*@{ internal compute engines*/ inline valT computeU(const ParticleSet& P, int iat, const RealType* restrict dist) { valT curUat(0); const int igt = P.GroupID[iat] * NumGroups; for (int jg = 0; jg < NumGroups; ++jg) { const FuncType& f2(*F[igt + jg]); int iStart = P.first(jg); int iEnd = P.last(jg); curUat += f2.evaluateV(iat, iStart, iEnd, dist, DistCompressed.data()); } return curUat; } inline void computeU3(const ParticleSet& P, int iat, const RealType* restrict dist, RealType* restrict u, RealType* restrict du, RealType* restrict d2u, bool triangle = false); /** compute gradient */ inline posT accumulateG(const valT* restrict du, const RowContainer& displ) const { posT grad; for (int idim = 0; idim < OHMMS_DIM; ++idim) { const valT* restrict dX = displ.data(idim); valT s = valT(); #pragma omp simd reduction(+ : s) aligned(du, dX) for (int jat = 0; jat < N; ++jat) s += du[jat] * dX[jat]; grad[idim] = s; } return grad; } /**@} */ RealType ChiesaKEcorrection() { return KEcorr = j2_ke_corr_helper.computeKEcorr(); } RealType KECorrection() { return KEcorr; } }; template<typename FT> J2OrbitalSoA<FT>::J2OrbitalSoA(ParticleSet& p, int tid) : my_table_ID_(p.addTable(p, DT_SOA)), j2_ke_corr_helper(p, F) { init(p); KEcorr = 0.0; ClassName = "J2OrbitalSoA"; } template<typename FT> J2OrbitalSoA<FT>::~J2OrbitalSoA() { auto it = J2Unique.begin(); while (it != J2Unique.end()) { delete ((*it).second); ++it; } } //need to clean up J2Unique template<typename FT> void J2OrbitalSoA<FT>::init(ParticleSet& p) { N = p.getTotalNum(); N_padded = getAlignedSize<valT>(N); NumGroups = p.groups(); Uat.resize(N); dUat.resize(N); d2Uat.resize(N); cur_u.resize(N); cur_du.resize(N); cur_d2u.resize(N); old_u.resize(N); old_du.resize(N); old_d2u.resize(N); F.resize(NumGroups * NumGroups, nullptr); DistCompressed.resize(N); DistIndice.resize(N); } template<typename FT> void J2OrbitalSoA<FT>::addFunc(int ia, int ib, FT* j) { if (ia == ib) { if (ia == 0) //first time, assign everything { int ij = 0; for (int ig = 0; ig < NumGroups; ++ig) for (int jg = 0; jg < NumGroups; ++jg, ++ij) if (F[ij] == nullptr) F[ij] = j; } else F[ia * NumGroups + ib] = j; } else { if (N == 2) { // a very special case, 1 up + 1 down // uu/dd was prevented by the builder for (int ig = 0; ig < NumGroups; ++ig) for (int jg = 0; jg < NumGroups; ++jg) F[ig * NumGroups + jg] = j; } else { // generic case F[ia * NumGroups + ib] = j; F[ib * NumGroups + ia] = j; } } std::stringstream aname; aname << ia << ib; J2Unique[aname.str()] = j; } template<typename FT> WaveFunctionComponentPtr J2OrbitalSoA<FT>::makeClone(ParticleSet& tqp) const { J2OrbitalSoA<FT>* j2copy = new J2OrbitalSoA<FT>(tqp, -1); if (dPsi) j2copy->dPsi = dPsi->makeClone(tqp); std::map<const FT*, FT*> fcmap; for (int ig = 0; ig < NumGroups; ++ig) for (int jg = ig; jg < NumGroups; ++jg) { int ij = ig * NumGroups + jg; if (F[ij] == 0) continue; typename std::map<const FT*, FT*>::iterator fit = fcmap.find(F[ij]); if (fit == fcmap.end()) { FT* fc = new FT(*F[ij]); j2copy->addFunc(ig, jg, fc); //if (dPsi) (j2copy->dPsi)->addFunc(aname.str(),ig,jg,fc); fcmap[F[ij]] = fc; } } j2copy->Optimizable = Optimizable; return j2copy; } /** intenal function to compute \f$\sum_j u(r_j), du/dr, d2u/dr2\f$ * @param P particleset * @param iat particle index * @param dist starting distance * @param u starting value * @param du starting first deriv * @param d2u starting second deriv */ template<typename FT> inline void J2OrbitalSoA<FT>::computeU3(const ParticleSet& P, int iat, const RealType* restrict dist, RealType* restrict u, RealType* restrict du, RealType* restrict d2u, bool triangle) { const int jelmax = triangle ? iat : N; constexpr valT czero(0); std::fill_n(u, jelmax, czero); std::fill_n(du, jelmax, czero); std::fill_n(d2u, jelmax, czero); const int igt = P.GroupID[iat] * NumGroups; for (int jg = 0; jg < NumGroups; ++jg) { const FuncType& f2(*F[igt + jg]); int iStart = P.first(jg); int iEnd = std::min(jelmax, P.last(jg)); f2.evaluateVGL(iat, iStart, iEnd, dist, u, du, d2u, DistCompressed.data(), DistIndice.data()); } //u[iat]=czero; //du[iat]=czero; //d2u[iat]=czero; } template<typename FT> typename J2OrbitalSoA<FT>::PsiValueType J2OrbitalSoA<FT>::ratio(ParticleSet& P, int iat) { //only ratio, ready to compute it again UpdateMode = ORB_PBYP_RATIO; cur_Uat = computeU(P, iat, P.getDistTable(my_table_ID_).Temp_r.data()); return std::exp(static_cast<PsiValueType>(Uat[iat] - cur_Uat)); } template<typename FT> inline void J2OrbitalSoA<FT>::evaluateRatiosAlltoOne(ParticleSet& P, std::vector<ValueType>& ratios) { const auto& d_table = P.getDistTable(my_table_ID_); const auto* restrict dist = d_table.Temp_r.data(); for (int ig = 0; ig < NumGroups; ++ig) { const int igt = ig * NumGroups; valT sumU(0); for (int jg = 0; jg < NumGroups; ++jg) { const FuncType& f2(*F[igt + jg]); int iStart = P.first(jg); int iEnd = P.last(jg); sumU += f2.evaluateV(-1, iStart, iEnd, dist, DistCompressed.data()); } for (int i = P.first(ig); i < P.last(ig); ++i) { // remove self-interaction const valT Uself = F[igt + ig]->evaluate(dist[i]); ratios[i] = std::exp(Uat[i] + Uself - sumU); } } } template<typename FT> typename J2OrbitalSoA<FT>::GradType J2OrbitalSoA<FT>::evalGrad(ParticleSet& P, int iat) { return GradType(dUat[iat]); } template<typename FT> typename J2OrbitalSoA<FT>::PsiValueType J2OrbitalSoA<FT>::ratioGrad(ParticleSet& P, int iat, GradType& grad_iat) { UpdateMode = ORB_PBYP_PARTIAL; computeU3(P, iat, P.getDistTable(my_table_ID_).Temp_r.data(), cur_u.data(), cur_du.data(), cur_d2u.data()); cur_Uat = simd::accumulate_n(cur_u.data(), N, valT()); DiffVal = Uat[iat] - cur_Uat; grad_iat += accumulateG(cur_du.data(), P.getDistTable(my_table_ID_).Temp_dr); return std::exp(static_cast<PsiValueType>(DiffVal)); } template<typename FT> void J2OrbitalSoA<FT>::acceptMove(ParticleSet& P, int iat) { // get the old u, du, d2u const auto& d_table = P.getDistTable(my_table_ID_); computeU3(P, iat, d_table.Distances[iat], old_u.data(), old_du.data(), old_d2u.data()); if (UpdateMode == ORB_PBYP_RATIO) { //ratio-only during the move; need to compute derivatives const auto* restrict dist = d_table.Temp_r.data(); computeU3(P, iat, dist, cur_u.data(), cur_du.data(), cur_d2u.data()); } valT cur_d2Uat(0); const auto& new_dr = d_table.Temp_dr; const auto& old_dr = d_table.Displacements[iat]; constexpr valT lapfac = OHMMS_DIM - RealType(1); #pragma omp simd reduction(+ : cur_d2Uat) for (int jat = 0; jat < N; jat++) { const valT du = cur_u[jat] - old_u[jat]; const valT newl = cur_d2u[jat] + lapfac * cur_du[jat]; const valT dl = old_d2u[jat] + lapfac * old_du[jat] - newl; Uat[jat] += du; d2Uat[jat] += dl; cur_d2Uat -= newl; } posT cur_dUat; for (int idim = 0; idim < OHMMS_DIM; ++idim) { const valT* restrict new_dX = new_dr.data(idim); const valT* restrict old_dX = old_dr.data(idim); const valT* restrict cur_du_pt = cur_du.data(); const valT* restrict old_du_pt = old_du.data(); valT* restrict save_g = dUat.data(idim); valT cur_g = cur_dUat[idim]; #pragma omp simd reduction(+ : cur_g) aligned(old_dX, new_dX, save_g, cur_du_pt, old_du_pt) for (int jat = 0; jat < N; jat++) { const valT newg = cur_du_pt[jat] * new_dX[jat]; const valT dg = newg - old_du_pt[jat] * old_dX[jat]; save_g[jat] -= dg; cur_g += newg; } cur_dUat[idim] = cur_g; } LogValue += Uat[iat] - cur_Uat; Uat[iat] = cur_Uat; dUat(iat) = cur_dUat; d2Uat[iat] = cur_d2Uat; } template<typename FT> void J2OrbitalSoA<FT>::recompute(ParticleSet& P) { const auto& d_table = P.getDistTable(my_table_ID_); for (int ig = 0; ig < NumGroups; ++ig) { for (int iat = P.first(ig), last = P.last(ig); iat < last; ++iat) { computeU3(P, iat, d_table.Distances[iat], cur_u.data(), cur_du.data(), cur_d2u.data(), true); Uat[iat] = simd::accumulate_n(cur_u.data(), iat, valT()); posT grad; valT lap(0); const valT* restrict u = cur_u.data(); const valT* restrict du = cur_du.data(); const valT* restrict d2u = cur_d2u.data(); const RowContainer& displ = d_table.Displacements[iat]; constexpr valT lapfac = OHMMS_DIM - RealType(1); #pragma omp simd reduction(+ : lap) aligned(du, d2u) for (int jat = 0; jat < iat; ++jat) lap += d2u[jat] + lapfac * du[jat]; for (int idim = 0; idim < OHMMS_DIM; ++idim) { const valT* restrict dX = displ.data(idim); valT s = valT(); #pragma omp simd reduction(+ : s) aligned(du, dX) for (int jat = 0; jat < iat; ++jat) s += du[jat] * dX[jat]; grad[idim] = s; } dUat(iat) = grad; d2Uat[iat] = -lap; // add the contribution from the upper triangle #pragma omp simd aligned(u, du, d2u) for (int jat = 0; jat < iat; jat++) { Uat[jat] += u[jat]; d2Uat[jat] -= d2u[jat] + lapfac * du[jat]; } for (int idim = 0; idim < OHMMS_DIM; ++idim) { valT* restrict save_g = dUat.data(idim); const valT* restrict dX = displ.data(idim); #pragma omp simd aligned(save_g, du, dX) for (int jat = 0; jat < iat; jat++) save_g[jat] -= du[jat] * dX[jat]; } } } } template<typename FT> typename J2OrbitalSoA<FT>::LogValueType J2OrbitalSoA<FT>::evaluateLog(ParticleSet& P, ParticleSet::ParticleGradient_t& G, ParticleSet::ParticleLaplacian_t& L) { evaluateGL(P, G, L, true); return LogValue; } template<typename FT> void J2OrbitalSoA<FT>::evaluateGL(ParticleSet& P, ParticleSet::ParticleGradient_t& G, ParticleSet::ParticleLaplacian_t& L, bool fromscratch) { if (fromscratch) recompute(P); LogValue = valT(0); for (int iat = 0; iat < N; ++iat) { LogValue += Uat[iat]; G[iat] += dUat[iat]; L[iat] += d2Uat[iat]; } LogValue = -LogValue * 0.5; } template<typename FT> void J2OrbitalSoA<FT>::evaluateHessian(ParticleSet& P, HessVector_t& grad_grad_psi) { LogValue = 0.0; const DistanceTableData& d_ee(P.getDistTable(my_table_ID_)); valT dudr, d2udr2; Tensor<valT, DIM> ident; grad_grad_psi = 0.0; ident.diagonal(1.0); for (int i = 1; i < N; ++i) { const valT* dist = d_ee.Distances[i]; const RowContainer& displ = d_ee.Displacements[i]; auto ig = P.GroupID[i]; const int igt = ig * NumGroups; for (int j = 0; j < i; ++j) { auto r = dist[j]; auto rinv = 1.0 / r; auto dr = displ[j]; auto jg = P.GroupID[j]; auto uij = F[igt + jg]->evaluate(r, dudr, d2udr2); LogValue -= uij; auto hess = rinv * rinv * outerProduct(dr, dr) * (d2udr2 - dudr * rinv) + ident * dudr * rinv; grad_grad_psi[i] -= hess; grad_grad_psi[j] -= hess; } } } } // namespace qmcplusplus #endif
task-two.c
/* * task-two.c -- Archer testcase */ //===----------------------------------------------------------------------===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // // See tools/archer/LICENSE.txt for details. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // RUN: %libarcher-compile-and-run-race | FileCheck %s // REQUIRES: tsan #include <omp.h> #include <stdio.h> #include <unistd.h> #define NUM_THREADS 2 int main(int argc, char *argv[]) { int var = 0; int i; #pragma omp parallel for num_threads(NUM_THREADS) shared(var) \ schedule(static, 1) for (i = 0; i < NUM_THREADS; i++) { #pragma omp task shared(var) if (0) // the task is inlined an executed locally { var++; } } int error = (var != 2); fprintf(stderr, "DONE\n"); return error; } // CHECK: WARNING: ThreadSanitizer: data race // CHECK-NEXT: {{(Write|Read)}} of size 4 // CHECK-NEXT: #0 {{.*}}task-two.c:30 // CHECK: Previous write of size 4 // CHECK-NEXT: #0 {{.*}}task-two.c:30 // CHECK: DONE // CHECK: ThreadSanitizer: reported 1 warnings
convolution_3x3_pack4.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2019 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void conv3x3s1_winograd64_transform_kernel_pack4_neon(const Mat& kernel, Mat& kernel_tm_pack4, int inch, int outch, const Option& opt) { // winograd63 transform kernel Mat kernel_tm; kernel_tm.create(8 * 8, inch, outch); const float ktm[8][3] = { {1.0f, 0.0f, 0.0f}, {-2.0f / 9, -2.0f / 9, -2.0f / 9}, {-2.0f / 9, 2.0f / 9, -2.0f / 9}, {1.0f / 90, 1.0f / 45, 2.0f / 45}, {1.0f / 90, -1.0f / 45, 2.0f / 45}, {1.0f / 45, 1.0f / 90, 1.0f / 180}, {1.0f / 45, -1.0f / 90, 1.0f / 180}, {0.0f, 0.0f, 1.0f} }; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { for (int q = 0; q < inch; q++) { const float* kernel0 = (const float*)kernel + p * inch * 9 + q * 9; float* kernel_tm0 = kernel_tm.channel(p).row(q); // transform kernel, transposed const float* k0 = kernel0; const float* k1 = kernel0 + 3; const float* k2 = kernel0 + 6; // h float tmp[8][3]; for (int i = 0; i < 8; i++) { tmp[i][0] = k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2]; tmp[i][1] = k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2]; tmp[i][2] = k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2]; } // v for (int j = 0; j < 8; j++) { float* tmpp = &tmp[j][0]; for (int i = 0; i < 8; i++) { kernel_tm0[j * 8 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2]; } } } } // interleave // src = 64-inch-outch // dst = 4b-4a-inch/4a-64-outch/4b; #if __aarch64__ kernel_tm_pack4.create(2 * inch / 4, 64, (outch / 4) / 2 + (outch / 4) % 2, (size_t)4u * 16, 16); #else kernel_tm_pack4.create(inch / 4, 64, outch / 4, (size_t)4u * 16, 16); #endif int q = 0; #if __aarch64__ for (; q + 7 < outch; q += 8) { const Mat k0 = kernel_tm.channel(q); const Mat k1 = kernel_tm.channel(q + 1); const Mat k2 = kernel_tm.channel(q + 2); const Mat k3 = kernel_tm.channel(q + 3); const Mat k4 = kernel_tm.channel(q + 4); const Mat k5 = kernel_tm.channel(q + 5); const Mat k6 = kernel_tm.channel(q + 6); const Mat k7 = kernel_tm.channel(q + 7); Mat g0 = kernel_tm_pack4.channel(q / 8); for (int k = 0; k < 64; k++) { float* g00 = g0.row(k); for (int p = 0; p + 3 < inch; p += 4) { const float* k00 = k0.row(p); const float* k01 = k0.row(p + 1); const float* k02 = k0.row(p + 2); const float* k03 = k0.row(p + 3); const float* k10 = k1.row(p); const float* k11 = k1.row(p + 1); const float* k12 = k1.row(p + 2); const float* k13 = k1.row(p + 3); const float* k20 = k2.row(p); const float* k21 = k2.row(p + 1); const float* k22 = k2.row(p + 2); const float* k23 = k2.row(p + 3); const float* k30 = k3.row(p); const float* k31 = k3.row(p + 1); const float* k32 = k3.row(p + 2); const float* k33 = k3.row(p + 3); const float* k40 = k4.row(p); const float* k41 = k4.row(p + 1); const float* k42 = k4.row(p + 2); const float* k43 = k4.row(p + 3); const float* k50 = k5.row(p); const float* k51 = k5.row(p + 1); const float* k52 = k5.row(p + 2); const float* k53 = k5.row(p + 3); const float* k60 = k6.row(p); const float* k61 = k6.row(p + 1); const float* k62 = k6.row(p + 2); const float* k63 = k6.row(p + 3); const float* k70 = k7.row(p); const float* k71 = k7.row(p + 1); const float* k72 = k7.row(p + 2); const float* k73 = k7.row(p + 3); g00[0] = k00[k]; g00[1] = k10[k]; g00[2] = k20[k]; g00[3] = k30[k]; g00[4] = k40[k]; g00[5] = k50[k]; g00[6] = k60[k]; g00[7] = k70[k]; g00[8] = k01[k]; g00[9] = k11[k]; g00[10] = k21[k]; g00[11] = k31[k]; g00[12] = k41[k]; g00[13] = k51[k]; g00[14] = k61[k]; g00[15] = k71[k]; g00[16] = k02[k]; g00[17] = k12[k]; g00[18] = k22[k]; g00[19] = k32[k]; g00[20] = k42[k]; g00[21] = k52[k]; g00[22] = k62[k]; g00[23] = k72[k]; g00[24] = k03[k]; g00[25] = k13[k]; g00[26] = k23[k]; g00[27] = k33[k]; g00[28] = k43[k]; g00[29] = k53[k]; g00[30] = k63[k]; g00[31] = k73[k]; g00 += 32; } } } #endif // __aarch64__ for (; q + 3 < outch; q += 4) { const Mat k0 = kernel_tm.channel(q); const Mat k1 = kernel_tm.channel(q + 1); const Mat k2 = kernel_tm.channel(q + 2); const Mat k3 = kernel_tm.channel(q + 3); #if __aarch64__ Mat g0 = kernel_tm_pack4.channel(q / 8 + (q % 8) / 4); #else Mat g0 = kernel_tm_pack4.channel(q / 4); #endif for (int k = 0; k < 64; k++) { float* g00 = g0.row(k); for (int p = 0; p + 3 < inch; p += 4) { const float* k00 = k0.row(p); const float* k01 = k0.row(p + 1); const float* k02 = k0.row(p + 2); const float* k03 = k0.row(p + 3); const float* k10 = k1.row(p); const float* k11 = k1.row(p + 1); const float* k12 = k1.row(p + 2); const float* k13 = k1.row(p + 3); const float* k20 = k2.row(p); const float* k21 = k2.row(p + 1); const float* k22 = k2.row(p + 2); const float* k23 = k2.row(p + 3); const float* k30 = k3.row(p); const float* k31 = k3.row(p + 1); const float* k32 = k3.row(p + 2); const float* k33 = k3.row(p + 3); g00[0] = k00[k]; g00[1] = k10[k]; g00[2] = k20[k]; g00[3] = k30[k]; g00[4] = k01[k]; g00[5] = k11[k]; g00[6] = k21[k]; g00[7] = k31[k]; g00[8] = k02[k]; g00[9] = k12[k]; g00[10] = k22[k]; g00[11] = k32[k]; g00[12] = k03[k]; g00[13] = k13[k]; g00[14] = k23[k]; g00[15] = k33[k]; g00 += 16; } } } } static void conv3x3s1_winograd64_pack4_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel_tm, const Mat& _bias, const Option& opt) { int w = bottom_blob.w; int h = bottom_blob.h; int inch = bottom_blob.c; size_t elemsize = bottom_blob.elemsize; int elempack = bottom_blob.elempack; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; // pad to 6n+2 Mat bottom_blob_bordered = bottom_blob; outw = (outw + 5) / 6 * 6; outh = (outh + 5) / 6 * 6; w = outw + 2; h = outh + 2; copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, BORDER_CONSTANT, 0.f, opt); const float* bias = _bias; // BEGIN transform input Mat bottom_blob_tm; { int w_tm = outw / 6 * 8; int h_tm = outh / 6 * 8; const int tiles = w_tm / 8 * h_tm / 8; bottom_blob_tm.create(tiles, 64, inch, elemsize, elempack, opt.workspace_allocator); // const float itm[8][8] = { // {1.0f, 0.0f, -5.25f, 0.00f, 5.25f, 0.00f, -1.0f, 0.0f}, // // {0.0f, 1.0f, 1.00f, -4.25f, -4.25f, 1.00f, 1.0f, 0.0f}, // {0.0f, -1.0f, 1.00f, 4.25f, -4.25f, -1.00f, 1.0f, 0.0f}, // // {0.0f, 0.5f, 0.25f, -2.50f, -1.25f, 2.00f, 1.0f, 0.0f}, // {0.0f, -0.5f, 0.25f, 2.50f, -1.25f, -2.00f, 1.0f, 0.0f}, // // {0.0f, 2.0f, 4.00f, -2.50f, -5.00f, 0.50f, 1.0f, 0.0f}, // {0.0f, -2.0f, 4.00f, 2.50f, -5.00f, -0.50f, 1.0f, 0.0f}, // // {0.0f, -1.0f, 0.00f, 5.25f, 0.00f, -5.25f, 0.0f, 1.0f} // }; // 0 = r00 - r06 + (r04 - r02) * 5.25 // 7 = r07 - r01 + (r03 - r05) * 5.25 // 1 = (r02 + r06 - r04 * 4.25) + (r01 - r03 * 4.25 + r05) // 2 = (r02 + r06 - r04 * 4.25) - (r01 - r03 * 4.25 + r05) // 3 = (r06 + r02 * 0.25 - r04 * 1.25) + (r01 * 0.5 - r03 * 2.5 + r05 * 2) // 4 = (r06 + r02 * 0.25 - r04 * 1.25) - (r01 * 0.5 - r03 * 2.5 + r05 * 2) // reuse r04 * 1.25 // reuse r03 * 2.5 // 5 = (r06 + (r02 - r04 * 1.25) * 4) + (r01 * 2 - r03 * 2.5 + r05 * 0.5) // 6 = (r06 + (r02 - r04 * 1.25) * 4) - (r01 * 2 - r03 * 2.5 + r05 * 0.5) #pragma omp parallel for num_threads(opt.num_threads) for (int q = 0; q < inch; q++) { const Mat img0 = bottom_blob_bordered.channel(q); Mat img0_tm = bottom_blob_tm.channel(q); float tmp[8][8][4]; // tile for (int i = 0; i < h_tm / 8; i++) { for (int j = 0; j < w_tm / 8; j++) { const float* r0 = img0.row(i * 6) + (j * 6) * 4; for (int m = 0; m < 8; m++) { float32x4_t _r00 = vld1q_f32(r0); float32x4_t _r01 = vld1q_f32(r0 + 4); float32x4_t _r02 = vld1q_f32(r0 + 8); float32x4_t _r03 = vld1q_f32(r0 + 12); float32x4_t _r04 = vld1q_f32(r0 + 16); float32x4_t _r05 = vld1q_f32(r0 + 20); float32x4_t _r06 = vld1q_f32(r0 + 24); float32x4_t _r07 = vld1q_f32(r0 + 28); float32x4_t _tmp0m = vmlaq_n_f32(vsubq_f32(_r00, _r06), vsubq_f32(_r04, _r02), 5.25f); float32x4_t _tmp7m = vmlaq_n_f32(vsubq_f32(_r07, _r01), vsubq_f32(_r03, _r05), 5.25f); vst1q_f32(tmp[0][m], _tmp0m); vst1q_f32(tmp[7][m], _tmp7m); // tmp[0][m] = r0[0] - r0[6] + (r0[4] - r0[2]) * 5.25; // tmp[7][m] = r0[7] - r0[1] + (r0[3] - r0[5]) * 5.25; float32x4_t _tmp12a = vmlsq_n_f32(vaddq_f32(_r02, _r06), _r04, 4.25f); float32x4_t _tmp12b = vmlsq_n_f32(vaddq_f32(_r01, _r05), _r03, 4.25f); // float tmp12a = (r0[2] + r0[6] - r0[4] * 4.25); // float tmp12b = (r0[1] + r0[5] - r0[3] * 4.25); float32x4_t _tmp1m = vaddq_f32(_tmp12a, _tmp12b); float32x4_t _tmp2m = vsubq_f32(_tmp12a, _tmp12b); vst1q_f32(tmp[1][m], _tmp1m); vst1q_f32(tmp[2][m], _tmp2m); // tmp[1][m] = tmp12a + tmp12b; // tmp[2][m] = tmp12a - tmp12b; float32x4_t _tmp34a = vmlsq_n_f32(vmlaq_n_f32(_r06, _r02, 0.25f), _r04, 1.25f); float32x4_t _tmp34b = vmlaq_n_f32(vmlsq_n_f32(vmulq_n_f32(_r01, 0.5f), _r03, 2.5f), _r05, 2.f); // float tmp34a = (r0[6] + r0[2] * 0.25 - r0[4] * 1.25); // float tmp34b = (r0[1] * 0.5 - r0[3] * 2.5 + r0[5] * 2); float32x4_t _tmp3m = vaddq_f32(_tmp34a, _tmp34b); float32x4_t _tmp4m = vsubq_f32(_tmp34a, _tmp34b); vst1q_f32(tmp[3][m], _tmp3m); vst1q_f32(tmp[4][m], _tmp4m); // tmp[3][m] = tmp34a + tmp34b; // tmp[4][m] = tmp34a - tmp34b; float32x4_t _tmp56a = vmlaq_n_f32(_r06, vmlsq_n_f32(_r02, _r04, 1.25f), 4.f); float32x4_t _tmp56b = vmlaq_n_f32(vmlsq_n_f32(vmulq_n_f32(_r01, 2.f), _r03, 2.5f), _r05, 0.5f); // float tmp56a = (r0[6] + (r0[2] - r0[4] * 1.25) * 4); // float tmp56b = (r0[1] * 2 - r0[3] * 2.5 + r0[5] * 0.5); float32x4_t _tmp5m = vaddq_f32(_tmp56a, _tmp56b); float32x4_t _tmp6m = vsubq_f32(_tmp56a, _tmp56b); vst1q_f32(tmp[5][m], _tmp5m); vst1q_f32(tmp[6][m], _tmp6m); // tmp[5][m] = tmp56a + tmp56b; // tmp[6][m] = tmp56a - tmp56b; r0 += w * 4; } float* r0_tm_0 = (float*)img0_tm + (i * w_tm / 8 + j) * 4; float* r0_tm_1 = r0_tm_0 + tiles * 4; float* r0_tm_2 = r0_tm_0 + tiles * 8; float* r0_tm_3 = r0_tm_0 + tiles * 12; float* r0_tm_4 = r0_tm_0 + tiles * 16; float* r0_tm_5 = r0_tm_0 + tiles * 20; float* r0_tm_6 = r0_tm_0 + tiles * 24; float* r0_tm_7 = r0_tm_0 + tiles * 28; for (int m = 0; m < 8; m++) { float32x4_t _tmp00 = vld1q_f32(tmp[m][0]); float32x4_t _tmp01 = vld1q_f32(tmp[m][1]); float32x4_t _tmp02 = vld1q_f32(tmp[m][2]); float32x4_t _tmp03 = vld1q_f32(tmp[m][3]); float32x4_t _tmp04 = vld1q_f32(tmp[m][4]); float32x4_t _tmp05 = vld1q_f32(tmp[m][5]); float32x4_t _tmp06 = vld1q_f32(tmp[m][6]); float32x4_t _tmp07 = vld1q_f32(tmp[m][7]); float32x4_t _r0tm0 = vmlaq_n_f32(vsubq_f32(_tmp00, _tmp06), vsubq_f32(_tmp04, _tmp02), 5.25f); float32x4_t _r0tm7 = vmlaq_n_f32(vsubq_f32(_tmp07, _tmp01), vsubq_f32(_tmp03, _tmp05), 5.25f); // r0_tm[0] = tmp0[0] - tmp0[6] + (tmp0[4] - tmp0[2]) * 5.25; // r0_tm[7] = tmp0[7] - tmp0[1] + (tmp0[3] - tmp0[5]) * 5.25; float32x4_t _tmp12a = vmlsq_n_f32(vaddq_f32(_tmp02, _tmp06), _tmp04, 4.25f); float32x4_t _tmp12b = vmlsq_n_f32(vaddq_f32(_tmp01, _tmp05), _tmp03, 4.25f); // float tmp12a = (tmp0[2] + tmp0[6] - tmp0[4] * 4.25); // float tmp12b = (tmp0[1] + tmp0[5] - tmp0[3] * 4.25); float32x4_t _r0tm1 = vaddq_f32(_tmp12a, _tmp12b); float32x4_t _r0tm2 = vsubq_f32(_tmp12a, _tmp12b); // r0_tm[1] = tmp12a + tmp12b; // r0_tm[2] = tmp12a - tmp12b; float32x4_t _tmp34a = vmlsq_n_f32(vmlaq_n_f32(_tmp06, _tmp02, 0.25f), _tmp04, 1.25f); float32x4_t _tmp34b = vmlaq_n_f32(vmlsq_n_f32(vmulq_n_f32(_tmp01, 0.5f), _tmp03, 2.5f), _tmp05, 2.f); // float tmp34a = (tmp0[6] + tmp0[2] * 0.25 - tmp0[4] * 1.25); // float tmp34b = (tmp0[1] * 0.5 - tmp0[3] * 2.5 + tmp0[5] * 2); float32x4_t _r0tm3 = vaddq_f32(_tmp34a, _tmp34b); float32x4_t _r0tm4 = vsubq_f32(_tmp34a, _tmp34b); // r0_tm[3] = tmp34a + tmp34b; // r0_tm[4] = tmp34a - tmp34b; float32x4_t _tmp56a = vmlaq_n_f32(_tmp06, vmlsq_n_f32(_tmp02, _tmp04, 1.25f), 4.f); float32x4_t _tmp56b = vmlaq_n_f32(vmlsq_n_f32(vmulq_n_f32(_tmp01, 2.f), _tmp03, 2.5f), _tmp05, 0.5f); // float tmp56a = (tmp0[6] + (tmp0[2] - tmp0[4] * 1.25) * 4); // float tmp56b = (tmp0[1] * 2 - tmp0[3] * 2.5 + tmp0[5] * 0.5); float32x4_t _r0tm5 = vaddq_f32(_tmp56a, _tmp56b); float32x4_t _r0tm6 = vsubq_f32(_tmp56a, _tmp56b); // r0_tm[5] = tmp56a + tmp56b; // r0_tm[6] = tmp56a - tmp56b; vst1q_f32(r0_tm_0, _r0tm0); vst1q_f32(r0_tm_1, _r0tm1); vst1q_f32(r0_tm_2, _r0tm2); vst1q_f32(r0_tm_3, _r0tm3); vst1q_f32(r0_tm_4, _r0tm4); vst1q_f32(r0_tm_5, _r0tm5); vst1q_f32(r0_tm_6, _r0tm6); vst1q_f32(r0_tm_7, _r0tm7); r0_tm_0 += tiles * 32; r0_tm_1 += tiles * 32; r0_tm_2 += tiles * 32; r0_tm_3 += tiles * 32; r0_tm_4 += tiles * 32; r0_tm_5 += tiles * 32; r0_tm_6 += tiles * 32; r0_tm_7 += tiles * 32; } } } } } bottom_blob_bordered = Mat(); // END transform input // BEGIN dot Mat top_blob_tm; { int w_tm = outw / 6 * 8; int h_tm = outh / 6 * 8; const int tiles = h_tm / 8 * w_tm / 8; // permute // bottom_blob_tm.create(tiles, 64, inch, elemsize, elempack, opt.workspace_allocator); Mat bottom_blob_tm2; #if __aarch64__ if (tiles >= 12) bottom_blob_tm2.create(12 * inch, tiles / 12 + (tiles % 12) / 8 + (tiles % 12 % 8) / 4 + (tiles % 12 % 4) / 2 + tiles % 12 % 2, 64, elemsize, elempack, opt.workspace_allocator); else if (tiles >= 8) bottom_blob_tm2.create(8 * inch, tiles / 8 + (tiles % 8) / 4 + (tiles % 4) / 2 + tiles % 2, 64, elemsize, elempack, opt.workspace_allocator); else if (tiles >= 4) bottom_blob_tm2.create(4 * inch, tiles / 4 + (tiles % 4) / 2 + tiles % 2, 64, elemsize, elempack, opt.workspace_allocator); else if (tiles >= 2) bottom_blob_tm2.create(2 * inch, tiles / 2 + tiles % 2, 64, elemsize, elempack, opt.workspace_allocator); else // if (tiles >= 1) bottom_blob_tm2.create(1 * inch, tiles, 64, elemsize, elempack, opt.workspace_allocator); #else if (tiles >= 8) bottom_blob_tm2.create(8 * inch, tiles / 8 + (tiles % 8) / 4 + (tiles % 4) / 2 + tiles % 2, 64, elemsize, elempack, opt.workspace_allocator); else if (tiles >= 4) bottom_blob_tm2.create(4 * inch, tiles / 4 + (tiles % 4) / 2 + tiles % 2, 64, elemsize, elempack, opt.workspace_allocator); else if (tiles >= 2) bottom_blob_tm2.create(2 * inch, tiles / 2 + tiles % 2, 64, elemsize, elempack, opt.workspace_allocator); else // if (tiles >= 1) bottom_blob_tm2.create(1 * inch, tiles, 64, elemsize, elempack, opt.workspace_allocator); #endif #pragma omp parallel for num_threads(opt.num_threads) for (int r = 0; r < 64; r++) { Mat tm2 = bottom_blob_tm2.channel(r); // tile int i = 0; #if __aarch64__ for (; i + 11 < tiles; i += 12) { float* tm2p = tm2.row(i / 12); const float* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 4; for (int q = 0; q < inch; q++) { asm volatile( "prfm pldl1keep, [%0, #512] \n" "ld4 {v0.4s, v1.4s, v2.4s, v3.4s}, [%0], #64 \n" "prfm pldl1keep, [%0, #512] \n" "ld4 {v4.4s, v5.4s, v6.4s, v7.4s}, [%0], #64 \n" "prfm pldl1keep, [%0, #512] \n" "ld4 {v8.4s, v9.4s, v10.4s, v11.4s}, [%0] \n" "st1 {v0.4s}, [%1], #16 \n" "st1 {v4.4s}, [%1], #16 \n" "st1 {v8.4s}, [%1], #16 \n" "sub %0, %0, #128 \n" "st1 {v1.4s}, [%1], #16 \n" "st1 {v5.4s}, [%1], #16 \n" "st1 {v9.4s}, [%1], #16 \n" "st1 {v2.4s}, [%1], #16 \n" "st1 {v6.4s}, [%1], #16 \n" "st1 {v10.4s}, [%1], #16 \n" "st1 {v3.4s}, [%1], #16 \n" "st1 {v7.4s}, [%1], #16 \n" "st1 {v11.4s}, [%1], #16 \n" : "=r"(r0), // %0 "=r"(tm2p) // %1 : "0"(r0), "1"(tm2p) : "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11"); r0 += bottom_blob_tm.cstep * 4; } } #endif for (; i + 7 < tiles; i += 8) { #if __aarch64__ float* tm2p = tm2.row(i / 12 + (i % 12) / 8); #else float* tm2p = tm2.row(i / 8); #endif const float* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 4; for (int q = 0; q < inch; q++) { #if __aarch64__ asm volatile( "prfm pldl1keep, [%0, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%0], #64 \n" "prfm pldl1keep, [%0, #512] \n" "ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%0] \n" "st1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%1], #64 \n" "sub %0, %0, #64 \n" "st1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%1], #64 \n" : "=r"(r0), // %0 "=r"(tm2p) // %1 : "0"(r0), "1"(tm2p) : "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7"); #else asm volatile( "pld [%0, #512] \n" "vldm %0!, {d0-d7} \n" "pld [%0, #512] \n" "vldm %0, {d16-d23} \n" // transpose 8x4 "vtrn.32 q0, q1 \n" "vtrn.32 q2, q3 \n" "vtrn.32 q8, q9 \n" "vtrn.32 q10, q11 \n" "vswp d1, d4 \n" "vswp d3, d6 \n" "vswp d17, d20 \n" "vswp d19, d22 \n" "vswp q1, q8 \n" "vswp q3, q10 \n" "vst1.f32 {d0-d3}, [%1 :128]! \n" "vst1.f32 {d16-d19}, [%1 :128]! \n" "sub %0, %0, #64 \n" "vst1.f32 {d4-d7}, [%1 :128]! \n" "vst1.f32 {d20-d23}, [%1 :128]! \n" : "=r"(r0), // %0 "=r"(tm2p) // %1 : "0"(r0), "1"(tm2p) : "memory", "q0", "q1", "q2", "q3", "q8", "q9", "q10", "q11"); #endif r0 += bottom_blob_tm.cstep * 4; } } for (; i + 3 < tiles; i += 4) { #if __aarch64__ float* tm2p = tm2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4); #else float* tm2p = tm2.row(i / 8 + (i % 8) / 4); #endif const float* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 4; for (int q = 0; q < inch; q++) { #if __aarch64__ asm volatile( "prfm pldl1keep, [%0, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%0] \n" "st1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%1], #64 \n" : "=r"(r0), // %0 "=r"(tm2p) // %1 : "0"(r0), "1"(tm2p) : "memory", "v0", "v1", "v2", "v3"); #else asm volatile( "pld [%0, #512] \n" "vldm %0, {d0-d7} \n" "vstm %1!, {d0-d7} \n" : "=r"(r0), // %0 "=r"(tm2p) // %1 : "0"(r0), "1"(tm2p) : "memory", "q0", "q1", "q2", "q3"); #endif // __aarch64__ r0 += bottom_blob_tm.cstep * 4; } } for (; i + 1 < tiles; i += 2) { #if __aarch64__ float* tm2p = tm2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2); #else float* tm2p = tm2.row(i / 8 + (i % 8) / 4 + (i % 4) / 2); #endif const float* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 4; for (int q = 0; q < inch; q++) { #if __aarch64__ asm volatile( "prfm pldl1keep, [%0, #256] \n" "ld1 {v0.4s, v1.4s}, [%0] \n" "st1 {v0.4s, v1.4s}, [%1], #32 \n" : "=r"(r0), // %0 "=r"(tm2p) // %1 : "0"(r0), "1"(tm2p) : "memory", "v0", "v1"); #else asm volatile( "pld [%0, #256] \n" "vld1.f32 {d0-d3}, [%0 :128] \n" "vst1.f32 {d0-d3}, [%1 :128]! \n" : "=r"(r0), // %0 "=r"(tm2p) // %1 : "0"(r0), "1"(tm2p) : "memory", "q0", "q1"); #endif // __aarch64__ r0 += bottom_blob_tm.cstep * 4; } } for (; i < tiles; i++) { #if __aarch64__ float* tm2p = tm2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2 + i % 12 % 2); #else float* tm2p = tm2.row(i / 8 + (i % 8) / 4 + (i % 4) / 2 + i % 2); #endif const float* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 4; for (int q = 0; q < inch; q++) { #if __aarch64__ asm volatile( "prfm pldl1keep, [%0, #128] \n" "ld1 {v0.4s}, [%0] \n" "st1 {v0.4s}, [%1], #16 \n" : "=r"(r0), // %0 "=r"(tm2p) // %1 : "0"(r0), "1"(tm2p) : "memory", "v0"); #else asm volatile( "pld [%0, #128] \n" "vld1.f32 {d0-d1}, [%0 :128] \n" "vst1.f32 {d0-d1}, [%1 :128]! \n" : "=r"(r0), // %0 "=r"(tm2p) // %1 : "0"(r0), "1"(tm2p) : "memory", "q0"); #endif // __aarch64__ r0 += bottom_blob_tm.cstep * 4; } } } bottom_blob_tm = Mat(); // permute end top_blob_tm.create(tiles, 64, outch, elemsize, elempack, opt.workspace_allocator); int remain_outch_start = 0; #if __ARM_NEON && __aarch64__ int nn_outch = 0; nn_outch = outch >> 1; remain_outch_start = nn_outch << 1; #pragma omp parallel for num_threads(opt.num_threads) for (int pp = 0; pp < nn_outch; pp++) { int p = pp * 2; float* output0_tm = top_blob_tm.channel(p); float* output1_tm = top_blob_tm.channel(p + 1); const Mat kernel01_tm = kernel_tm.channel(pp); for (int r = 0; r < 64; r++) { const Mat bb2 = bottom_blob_tm2.channel(r); int i = 0; for (; i + 11 < tiles; i += 12) { const float* r0 = bb2.row(i / 12); const float* k01 = kernel01_tm.row(r); int nn = inch; // inch always > 0 asm volatile( "eor v8.16b, v8.16b, v8.16b \n" "eor v9.16b, v9.16b, v9.16b \n" "eor v10.16b, v10.16b, v10.16b \n" "eor v11.16b, v11.16b, v11.16b \n" "eor v12.16b, v12.16b, v12.16b \n" "eor v13.16b, v13.16b, v13.16b \n" "eor v14.16b, v14.16b, v14.16b \n" "eor v15.16b, v15.16b, v15.16b \n" "eor v16.16b, v16.16b, v16.16b \n" "eor v17.16b, v17.16b, v17.16b \n" "eor v18.16b, v18.16b, v18.16b \n" "eor v19.16b, v19.16b, v19.16b \n" "eor v20.16b, v20.16b, v20.16b \n" "eor v21.16b, v21.16b, v21.16b \n" "eor v22.16b, v22.16b, v22.16b \n" "eor v23.16b, v23.16b, v23.16b \n" "eor v24.16b, v24.16b, v24.16b \n" "eor v25.16b, v25.16b, v25.16b \n" "eor v26.16b, v26.16b, v26.16b \n" "eor v27.16b, v27.16b, v27.16b \n" "eor v28.16b, v28.16b, v28.16b \n" "eor v29.16b, v29.16b, v29.16b \n" "eor v30.16b, v30.16b, v30.16b \n" "eor v31.16b, v31.16b, v31.16b \n" "0: \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%3], #64 \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%4], #64 \n" // w0011_01 "fmla v8.4s, v4.4s, v0.s[0] \n" "fmla v9.4s, v4.4s, v0.s[1] \n" "fmla v10.4s, v4.4s, v0.s[2] \n" "fmla v11.4s, v4.4s, v0.s[3] \n" "fmla v12.4s, v4.4s, v1.s[0] \n" "fmla v13.4s, v4.4s, v1.s[1] \n" "fmla v14.4s, v4.4s, v1.s[2] \n" "fmla v15.4s, v4.4s, v1.s[3] \n" "fmla v16.4s, v4.4s, v2.s[0] \n" "fmla v17.4s, v4.4s, v2.s[1] \n" "fmla v18.4s, v4.4s, v2.s[2] \n" "fmla v19.4s, v4.4s, v2.s[3] \n" "fmla v20.4s, v5.4s, v0.s[0] \n" "fmla v21.4s, v5.4s, v0.s[1] \n" "fmla v22.4s, v5.4s, v0.s[2] \n" "fmla v23.4s, v5.4s, v0.s[3] \n" "fmla v24.4s, v5.4s, v1.s[0] \n" "fmla v25.4s, v5.4s, v1.s[1] \n" "fmla v26.4s, v5.4s, v1.s[2] \n" "fmla v27.4s, v5.4s, v1.s[3] \n" "fmla v28.4s, v5.4s, v2.s[0] \n" "fmla v29.4s, v5.4s, v2.s[1] \n" "fmla v30.4s, v5.4s, v2.s[2] \n" "fmla v31.4s, v5.4s, v2.s[3] \n" "fmla v8.4s, v6.4s, v3.s[0] \n" "fmla v9.4s, v6.4s, v3.s[1] \n" "fmla v10.4s, v6.4s, v3.s[2] \n" "fmla v11.4s, v6.4s, v3.s[3] \n" "fmla v20.4s, v7.4s, v3.s[0] \n" "fmla v21.4s, v7.4s, v3.s[1] \n" "fmla v22.4s, v7.4s, v3.s[2] \n" "fmla v23.4s, v7.4s, v3.s[3] \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%3], #64 \n" "fmla v12.4s, v6.4s, v0.s[0] \n" "fmla v13.4s, v6.4s, v0.s[1] \n" "fmla v14.4s, v6.4s, v0.s[2] \n" "fmla v15.4s, v6.4s, v0.s[3] \n" "fmla v16.4s, v6.4s, v1.s[0] \n" "fmla v17.4s, v6.4s, v1.s[1] \n" "fmla v18.4s, v6.4s, v1.s[2] \n" "fmla v19.4s, v6.4s, v1.s[3] \n" "fmla v24.4s, v7.4s, v0.s[0] \n" "fmla v25.4s, v7.4s, v0.s[1] \n" "fmla v26.4s, v7.4s, v0.s[2] \n" "fmla v27.4s, v7.4s, v0.s[3] \n" "fmla v28.4s, v7.4s, v1.s[0] \n" "fmla v29.4s, v7.4s, v1.s[1] \n" "fmla v30.4s, v7.4s, v1.s[2] \n" "fmla v31.4s, v7.4s, v1.s[3] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%4], #64 \n" // w2233_01 "fmla v8.4s, v4.4s, v2.s[0] \n" "fmla v9.4s, v4.4s, v2.s[1] \n" "fmla v10.4s, v4.4s, v2.s[2] \n" "fmla v11.4s, v4.4s, v2.s[3] \n" "fmla v12.4s, v4.4s, v3.s[0] \n" "fmla v13.4s, v4.4s, v3.s[1] \n" "fmla v14.4s, v4.4s, v3.s[2] \n" "fmla v15.4s, v4.4s, v3.s[3] \n" "fmla v20.4s, v5.4s, v2.s[0] \n" "fmla v21.4s, v5.4s, v2.s[1] \n" "fmla v22.4s, v5.4s, v2.s[2] \n" "fmla v23.4s, v5.4s, v2.s[3] \n" "fmla v24.4s, v5.4s, v3.s[0] \n" "fmla v25.4s, v5.4s, v3.s[1] \n" "fmla v26.4s, v5.4s, v3.s[2] \n" "fmla v27.4s, v5.4s, v3.s[3] \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%3], #64 \n" "fmla v16.4s, v4.4s, v0.s[0] \n" "fmla v17.4s, v4.4s, v0.s[1] \n" "fmla v18.4s, v4.4s, v0.s[2] \n" "fmla v19.4s, v4.4s, v0.s[3] \n" "fmla v28.4s, v5.4s, v0.s[0] \n" "fmla v29.4s, v5.4s, v0.s[1] \n" "fmla v30.4s, v5.4s, v0.s[2] \n" "fmla v31.4s, v5.4s, v0.s[3] \n" "fmla v8.4s, v6.4s, v1.s[0] \n" "fmla v9.4s, v6.4s, v1.s[1] \n" "fmla v10.4s, v6.4s, v1.s[2] \n" "fmla v11.4s, v6.4s, v1.s[3] \n" "fmla v12.4s, v6.4s, v2.s[0] \n" "fmla v13.4s, v6.4s, v2.s[1] \n" "fmla v14.4s, v6.4s, v2.s[2] \n" "fmla v15.4s, v6.4s, v2.s[3] \n" "fmla v16.4s, v6.4s, v3.s[0] \n" "fmla v17.4s, v6.4s, v3.s[1] \n" "fmla v18.4s, v6.4s, v3.s[2] \n" "fmla v19.4s, v6.4s, v3.s[3] \n" "subs %w0, %w0, #1 \n" "fmla v20.4s, v7.4s, v1.s[0] \n" "fmla v21.4s, v7.4s, v1.s[1] \n" "fmla v22.4s, v7.4s, v1.s[2] \n" "fmla v23.4s, v7.4s, v1.s[3] \n" "fmla v24.4s, v7.4s, v2.s[0] \n" "fmla v25.4s, v7.4s, v2.s[1] \n" "fmla v26.4s, v7.4s, v2.s[2] \n" "fmla v27.4s, v7.4s, v2.s[3] \n" "fmla v28.4s, v7.4s, v3.s[0] \n" "fmla v29.4s, v7.4s, v3.s[1] \n" "fmla v30.4s, v7.4s, v3.s[2] \n" "fmla v31.4s, v7.4s, v3.s[3] \n" "bne 0b \n" "st1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%1], #64 \n" "st1 {v20.4s, v21.4s, v22.4s, v23.4s}, [%2], #64 \n" "st1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%1], #64 \n" "st1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%2], #64 \n" "st1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%1], #64 \n" "st1 {v28.4s, v29.4s, v30.4s, v31.4s}, [%2], #64 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(output1_tm), // %2 "=r"(r0), // %3 "=r"(k01) // %4 : "0"(nn), "1"(output0_tm), "2"(output1_tm), "3"(r0), "4"(k01) : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); } for (; i + 7 < tiles; i += 8) { const float* r0 = bb2.row(i / 12 + (i % 12) / 8); const float* k01 = kernel01_tm.row(r); int nn = inch; // inch always > 0 asm volatile( "eor v16.16b, v16.16b, v16.16b \n" "eor v17.16b, v17.16b, v17.16b \n" "eor v18.16b, v18.16b, v18.16b \n" "eor v19.16b, v19.16b, v19.16b \n" "eor v20.16b, v20.16b, v20.16b \n" "eor v21.16b, v21.16b, v21.16b \n" "eor v22.16b, v22.16b, v22.16b \n" "eor v23.16b, v23.16b, v23.16b \n" "eor v24.16b, v24.16b, v24.16b \n" "eor v25.16b, v25.16b, v25.16b \n" "eor v26.16b, v26.16b, v26.16b \n" "eor v27.16b, v27.16b, v27.16b \n" "eor v28.16b, v28.16b, v28.16b \n" "eor v29.16b, v29.16b, v29.16b \n" "eor v30.16b, v30.16b, v30.16b \n" "eor v31.16b, v31.16b, v31.16b \n" "0: \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%3], #64 \n" // r0 r1 r2 r3 "prfm pldl1keep, [%4, #512] \n" "ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%4], #64 \n" // w0011_01 "prfm pldl1keep, [%3, #512] \n" "ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%3], #64 \n" // r4 r5 r6 r7 "fmla v16.4s, v8.4s, v0.s[0] \n" "fmla v17.4s, v8.4s, v1.s[0] \n" "fmla v18.4s, v8.4s, v2.s[0] \n" "fmla v19.4s, v8.4s, v3.s[0] \n" "fmla v20.4s, v8.4s, v4.s[0] \n" "fmla v21.4s, v8.4s, v5.s[0] \n" "fmla v22.4s, v8.4s, v6.s[0] \n" "fmla v23.4s, v8.4s, v7.s[0] \n" "fmla v24.4s, v9.4s, v0.s[0] \n" "fmla v25.4s, v9.4s, v1.s[0] \n" "fmla v26.4s, v9.4s, v2.s[0] \n" "fmla v27.4s, v9.4s, v3.s[0] \n" "fmla v28.4s, v9.4s, v4.s[0] \n" "fmla v29.4s, v9.4s, v5.s[0] \n" "fmla v30.4s, v9.4s, v6.s[0] \n" "fmla v31.4s, v9.4s, v7.s[0] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%4], #64 \n" // w2233_01 "fmla v16.4s, v10.4s, v0.s[1] \n" "fmla v17.4s, v10.4s, v1.s[1] \n" "fmla v18.4s, v10.4s, v2.s[1] \n" "fmla v19.4s, v10.4s, v3.s[1] \n" "fmla v20.4s, v10.4s, v4.s[1] \n" "fmla v21.4s, v10.4s, v5.s[1] \n" "fmla v22.4s, v10.4s, v6.s[1] \n" "fmla v23.4s, v10.4s, v7.s[1] \n" "fmla v24.4s, v11.4s, v0.s[1] \n" "fmla v25.4s, v11.4s, v1.s[1] \n" "fmla v26.4s, v11.4s, v2.s[1] \n" "fmla v27.4s, v11.4s, v3.s[1] \n" "fmla v28.4s, v11.4s, v4.s[1] \n" "fmla v29.4s, v11.4s, v5.s[1] \n" "fmla v30.4s, v11.4s, v6.s[1] \n" "fmla v31.4s, v11.4s, v7.s[1] \n" "fmla v16.4s, v12.4s, v0.s[2] \n" "fmla v17.4s, v12.4s, v1.s[2] \n" "fmla v18.4s, v12.4s, v2.s[2] \n" "fmla v19.4s, v12.4s, v3.s[2] \n" "fmla v20.4s, v12.4s, v4.s[2] \n" "fmla v21.4s, v12.4s, v5.s[2] \n" "fmla v22.4s, v12.4s, v6.s[2] \n" "fmla v23.4s, v12.4s, v7.s[2] \n" "fmla v24.4s, v13.4s, v0.s[2] \n" "fmla v25.4s, v13.4s, v1.s[2] \n" "fmla v26.4s, v13.4s, v2.s[2] \n" "fmla v27.4s, v13.4s, v3.s[2] \n" "fmla v28.4s, v13.4s, v4.s[2] \n" "fmla v29.4s, v13.4s, v5.s[2] \n" "fmla v30.4s, v13.4s, v6.s[2] \n" "fmla v31.4s, v13.4s, v7.s[2] \n" "fmla v16.4s, v14.4s, v0.s[3] \n" "fmla v17.4s, v14.4s, v1.s[3] \n" "fmla v18.4s, v14.4s, v2.s[3] \n" "fmla v19.4s, v14.4s, v3.s[3] \n" "fmla v20.4s, v14.4s, v4.s[3] \n" "fmla v21.4s, v14.4s, v5.s[3] \n" "fmla v22.4s, v14.4s, v6.s[3] \n" "fmla v23.4s, v14.4s, v7.s[3] \n" "subs %w0, %w0, #1 \n" "fmla v24.4s, v15.4s, v0.s[3] \n" "fmla v25.4s, v15.4s, v1.s[3] \n" "fmla v26.4s, v15.4s, v2.s[3] \n" "fmla v27.4s, v15.4s, v3.s[3] \n" "fmla v28.4s, v15.4s, v4.s[3] \n" "fmla v29.4s, v15.4s, v5.s[3] \n" "fmla v30.4s, v15.4s, v6.s[3] \n" "fmla v31.4s, v15.4s, v7.s[3] \n" "bne 0b \n" "st1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%1], #64 \n" "st1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%2], #64 \n" "st1 {v20.4s, v21.4s, v22.4s, v23.4s}, [%1], #64 \n" "st1 {v28.4s, v29.4s, v30.4s, v31.4s}, [%2], #64 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(output1_tm), // %2 "=r"(r0), // %3 "=r"(k01) // %4 : "0"(nn), "1"(output0_tm), "2"(output1_tm), "3"(r0), "4"(k01) : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); } for (; i + 3 < tiles; i += 4) { const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4); const float* k01 = kernel01_tm.row(r); int nn = inch; // inch always > 0 asm volatile( "eor v16.16b, v16.16b, v16.16b \n" "eor v17.16b, v17.16b, v17.16b \n" "eor v18.16b, v18.16b, v18.16b \n" "eor v19.16b, v19.16b, v19.16b \n" "eor v20.16b, v20.16b, v20.16b \n" "eor v21.16b, v21.16b, v21.16b \n" "eor v22.16b, v22.16b, v22.16b \n" "eor v23.16b, v23.16b, v23.16b \n" "0: \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%3], #64 \n" // r0 r1 r2 r3 "prfm pldl1keep, [%4, #512] \n" "ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%4], #64 \n" // w0011_01 "fmla v16.4s, v8.4s, v0.s[0] \n" "fmla v17.4s, v8.4s, v1.s[0] \n" "fmla v18.4s, v8.4s, v2.s[0] \n" "fmla v19.4s, v8.4s, v3.s[0] \n" "fmla v20.4s, v9.4s, v0.s[0] \n" "fmla v21.4s, v9.4s, v1.s[0] \n" "fmla v22.4s, v9.4s, v2.s[0] \n" "fmla v23.4s, v9.4s, v3.s[0] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%4], #64 \n" // w2233_01 "fmla v16.4s, v10.4s, v0.s[1] \n" "fmla v17.4s, v10.4s, v1.s[1] \n" "fmla v18.4s, v10.4s, v2.s[1] \n" "fmla v19.4s, v10.4s, v3.s[1] \n" "fmla v20.4s, v11.4s, v0.s[1] \n" "fmla v21.4s, v11.4s, v1.s[1] \n" "fmla v22.4s, v11.4s, v2.s[1] \n" "fmla v23.4s, v11.4s, v3.s[1] \n" "fmla v16.4s, v12.4s, v0.s[2] \n" "fmla v17.4s, v12.4s, v1.s[2] \n" "fmla v18.4s, v12.4s, v2.s[2] \n" "fmla v19.4s, v12.4s, v3.s[2] \n" "fmla v20.4s, v13.4s, v0.s[2] \n" "fmla v21.4s, v13.4s, v1.s[2] \n" "fmla v22.4s, v13.4s, v2.s[2] \n" "fmla v23.4s, v13.4s, v3.s[2] \n" "subs %w0, %w0, #1 \n" "fmla v16.4s, v14.4s, v0.s[3] \n" "fmla v17.4s, v14.4s, v1.s[3] \n" "fmla v18.4s, v14.4s, v2.s[3] \n" "fmla v19.4s, v14.4s, v3.s[3] \n" "fmla v20.4s, v15.4s, v0.s[3] \n" "fmla v21.4s, v15.4s, v1.s[3] \n" "fmla v22.4s, v15.4s, v2.s[3] \n" "fmla v23.4s, v15.4s, v3.s[3] \n" "bne 0b \n" "st1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%1], #64 \n" "st1 {v20.4s, v21.4s, v22.4s, v23.4s}, [%2], #64 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(output1_tm), // %2 "=r"(r0), // %3 "=r"(k01) // %4 : "0"(nn), "1"(output0_tm), "2"(output1_tm), "3"(r0), "4"(k01) : "cc", "memory", "v0", "v1", "v2", "v3", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23"); } for (; i + 1 < tiles; i += 2) { const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2); const float* k01 = kernel01_tm.row(r); int nn = inch; // inch always > 0 asm volatile( "eor v16.16b, v16.16b, v16.16b \n" "eor v17.16b, v17.16b, v17.16b \n" "eor v18.16b, v18.16b, v18.16b \n" "eor v19.16b, v19.16b, v19.16b \n" "0: \n" "prfm pldl1keep, [%3, #256] \n" "ld1 {v0.4s, v1.4s}, [%3], #32 \n" // r0 r1 "prfm pldl1keep, [%4, #512] \n" "ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%4], #64 \n" // w0011_01 "fmla v16.4s, v8.4s, v0.s[0] \n" "fmla v17.4s, v8.4s, v1.s[0] \n" "fmla v18.4s, v9.4s, v0.s[0] \n" "fmla v19.4s, v9.4s, v1.s[0] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%4], #64 \n" // w2233_01 "fmla v16.4s, v10.4s, v0.s[1] \n" "fmla v17.4s, v10.4s, v1.s[1] \n" "fmla v18.4s, v11.4s, v0.s[1] \n" "fmla v19.4s, v11.4s, v1.s[1] \n" "fmla v16.4s, v12.4s, v0.s[2] \n" "fmla v17.4s, v12.4s, v1.s[2] \n" "fmla v18.4s, v13.4s, v0.s[2] \n" "fmla v19.4s, v13.4s, v1.s[2] \n" "subs %w0, %w0, #1 \n" "fmla v16.4s, v14.4s, v0.s[3] \n" "fmla v17.4s, v14.4s, v1.s[3] \n" "fmla v18.4s, v15.4s, v0.s[3] \n" "fmla v19.4s, v15.4s, v1.s[3] \n" "bne 0b \n" "st1 {v16.4s, v17.4s}, [%1], #32 \n" "st1 {v18.4s, v19.4s}, [%2], #32 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(output1_tm), // %2 "=r"(r0), // %3 "=r"(k01) // %4 : "0"(nn), "1"(output0_tm), "2"(output1_tm), "3"(r0), "4"(k01) : "cc", "memory", "v0", "v1", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19"); } for (; i < tiles; i++) { const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2 + i % 12 % 2); const float* k01 = kernel01_tm.row(r); int nn = inch; // inch always > 0 asm volatile( "eor v16.16b, v16.16b, v16.16b \n" "eor v17.16b, v17.16b, v17.16b \n" "0: \n" "prfm pldl1keep, [%3, #128] \n" "ld1 {v0.4s}, [%3], #16 \n" // r0 "prfm pldl1keep, [%4, #512] \n" "ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%4], #64 \n" // w0011_01 "fmla v16.4s, v8.4s, v0.s[0] \n" "fmla v17.4s, v9.4s, v0.s[0] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%4], #64 \n" // w2233_01 "fmla v16.4s, v10.4s, v0.s[1] \n" "fmla v17.4s, v11.4s, v0.s[1] \n" "fmla v16.4s, v12.4s, v0.s[2] \n" "fmla v17.4s, v13.4s, v0.s[2] \n" "subs %w0, %w0, #1 \n" "fmla v16.4s, v14.4s, v0.s[3] \n" "fmla v17.4s, v15.4s, v0.s[3] \n" "bne 0b \n" "st1 {v16.4s}, [%1], #16 \n" "st1 {v17.4s}, [%2], #16 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(output1_tm), // %2 "=r"(r0), // %3 "=r"(k01) // %4 : "0"(nn), "1"(output0_tm), "2"(output1_tm), "3"(r0), "4"(k01) : "cc", "memory", "v0", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17"); } } } #endif // __ARM_NEON && __aarch64__ #pragma omp parallel for num_threads(opt.num_threads) for (int p = remain_outch_start; p < outch; p++) { float* output0_tm = top_blob_tm.channel(p); #if __aarch64__ const Mat kernel0_tm = kernel_tm.channel(p / 2 + p % 2); #else const Mat kernel0_tm = kernel_tm.channel(p); #endif for (int r = 0; r < 64; r++) { const Mat bb2 = bottom_blob_tm2.channel(r); int i = 0; #if __aarch64__ for (; i + 11 < tiles; i += 12) { const float* r0 = bb2.row(i / 12); const float* k0 = kernel0_tm.row(r); int nn = inch; // inch always > 0 asm volatile( "eor v8.16b, v8.16b, v8.16b \n" "eor v9.16b, v9.16b, v9.16b \n" "eor v10.16b, v10.16b, v10.16b \n" "eor v11.16b, v11.16b, v11.16b \n" "eor v12.16b, v12.16b, v12.16b \n" "eor v13.16b, v13.16b, v13.16b \n" "eor v14.16b, v14.16b, v14.16b \n" "eor v15.16b, v15.16b, v15.16b \n" "eor v16.16b, v16.16b, v16.16b \n" "eor v17.16b, v17.16b, v17.16b \n" "eor v18.16b, v18.16b, v18.16b \n" "eor v19.16b, v19.16b, v19.16b \n" "0: \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%2], #64 \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%3], #64 \n" // w0123_0 "fmla v8.4s, v4.4s, v0.s[0] \n" "fmla v9.4s, v4.4s, v0.s[1] \n" "fmla v10.4s, v4.4s, v0.s[2] \n" "fmla v11.4s, v4.4s, v0.s[3] \n" "fmla v12.4s, v4.4s, v1.s[0] \n" "fmla v13.4s, v4.4s, v1.s[1] \n" "fmla v14.4s, v4.4s, v1.s[2] \n" "fmla v15.4s, v4.4s, v1.s[3] \n" "fmla v16.4s, v4.4s, v2.s[0] \n" "fmla v17.4s, v4.4s, v2.s[1] \n" "fmla v18.4s, v4.4s, v2.s[2] \n" "fmla v19.4s, v4.4s, v2.s[3] \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v20.4s, v21.4s, v22.4s, v23.4s}, [%2], #64 \n" "fmla v8.4s, v5.4s, v3.s[0] \n" "fmla v9.4s, v5.4s, v3.s[1] \n" "fmla v10.4s, v5.4s, v3.s[2] \n" "fmla v11.4s, v5.4s, v3.s[3] \n" "fmla v12.4s, v5.4s, v20.s[0] \n" "fmla v13.4s, v5.4s, v20.s[1] \n" "fmla v14.4s, v5.4s, v20.s[2] \n" "fmla v15.4s, v5.4s, v20.s[3] \n" "fmla v16.4s, v5.4s, v21.s[0] \n" "fmla v17.4s, v5.4s, v21.s[1] \n" "fmla v18.4s, v5.4s, v21.s[2] \n" "fmla v19.4s, v5.4s, v21.s[3] \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%2], #64 \n" "fmla v8.4s, v6.4s, v22.s[0] \n" "fmla v9.4s, v6.4s, v22.s[1] \n" "fmla v10.4s, v6.4s, v22.s[2] \n" "fmla v11.4s, v6.4s, v22.s[3] \n" "fmla v12.4s, v6.4s, v23.s[0] \n" "fmla v13.4s, v6.4s, v23.s[1] \n" "fmla v14.4s, v6.4s, v23.s[2] \n" "fmla v15.4s, v6.4s, v23.s[3] \n" "fmla v16.4s, v6.4s, v24.s[0] \n" "fmla v17.4s, v6.4s, v24.s[1] \n" "fmla v18.4s, v6.4s, v24.s[2] \n" "fmla v19.4s, v6.4s, v24.s[3] \n" "subs %w0, %w0, #1 \n" "fmla v8.4s, v7.4s, v25.s[0] \n" "fmla v9.4s, v7.4s, v25.s[1] \n" "fmla v10.4s, v7.4s, v25.s[2] \n" "fmla v11.4s, v7.4s, v25.s[3] \n" "fmla v12.4s, v7.4s, v26.s[0] \n" "fmla v13.4s, v7.4s, v26.s[1] \n" "fmla v14.4s, v7.4s, v26.s[2] \n" "fmla v15.4s, v7.4s, v26.s[3] \n" "fmla v16.4s, v7.4s, v27.s[0] \n" "fmla v17.4s, v7.4s, v27.s[1] \n" "fmla v18.4s, v7.4s, v27.s[2] \n" "fmla v19.4s, v7.4s, v27.s[3] \n" "bne 0b \n" "st1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%1], #64 \n" "st1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%1], #64 \n" "st1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%1], #64 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(r0), // %2 "=r"(k0) // %3 : "0"(nn), "1"(output0_tm), "2"(r0), "3"(k0) : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27"); } #endif for (; i + 7 < tiles; i += 8) { #if __aarch64__ const float* r0 = bb2.row(i / 12 + (i % 12) / 8); #else const float* r0 = bb2.row(i / 8); #endif const float* k0 = kernel0_tm.row(r); int nn = inch; // inch always > 0 #if __aarch64__ asm volatile( "eor v16.16b, v16.16b, v16.16b \n" "eor v17.16b, v17.16b, v17.16b \n" "eor v18.16b, v18.16b, v18.16b \n" "eor v19.16b, v19.16b, v19.16b \n" "eor v20.16b, v20.16b, v20.16b \n" "eor v21.16b, v21.16b, v21.16b \n" "eor v22.16b, v22.16b, v22.16b \n" "eor v23.16b, v23.16b, v23.16b \n" "0: \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%2], #64 \n" // r0 r1 r2 r3 "prfm pldl1keep, [%3, #512] \n" "ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%3], #64 \n" // w0123 "fmla v16.4s, v8.4s, v0.s[0] \n" "fmla v17.4s, v8.4s, v1.s[0] \n" "fmla v18.4s, v8.4s, v2.s[0] \n" "fmla v19.4s, v8.4s, v3.s[0] \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%2], #64 \n" // r4 r5 r6 r7 "fmla v20.4s, v8.4s, v4.s[0] \n" "fmla v21.4s, v8.4s, v5.s[0] \n" "fmla v22.4s, v8.4s, v6.s[0] \n" "fmla v23.4s, v8.4s, v7.s[0] \n" "fmla v16.4s, v9.4s, v0.s[1] \n" "fmla v17.4s, v9.4s, v1.s[1] \n" "fmla v18.4s, v9.4s, v2.s[1] \n" "fmla v19.4s, v9.4s, v3.s[1] \n" "fmla v20.4s, v9.4s, v4.s[1] \n" "fmla v21.4s, v9.4s, v5.s[1] \n" "fmla v22.4s, v9.4s, v6.s[1] \n" "fmla v23.4s, v9.4s, v7.s[1] \n" "fmla v16.4s, v10.4s, v0.s[2] \n" "fmla v17.4s, v10.4s, v1.s[2] \n" "fmla v18.4s, v10.4s, v2.s[2] \n" "fmla v19.4s, v10.4s, v3.s[2] \n" "fmla v20.4s, v10.4s, v4.s[2] \n" "fmla v21.4s, v10.4s, v5.s[2] \n" "fmla v22.4s, v10.4s, v6.s[2] \n" "fmla v23.4s, v10.4s, v7.s[2] \n" "subs %w0, %w0, #1 \n" "fmla v16.4s, v11.4s, v0.s[3] \n" "fmla v17.4s, v11.4s, v1.s[3] \n" "fmla v18.4s, v11.4s, v2.s[3] \n" "fmla v19.4s, v11.4s, v3.s[3] \n" "fmla v20.4s, v11.4s, v4.s[3] \n" "fmla v21.4s, v11.4s, v5.s[3] \n" "fmla v22.4s, v11.4s, v6.s[3] \n" "fmla v23.4s, v11.4s, v7.s[3] \n" "bne 0b \n" "st1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%1], #64 \n" "st1 {v20.4s, v21.4s, v22.4s, v23.4s}, [%1], #64 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(r0), // %2 "=r"(k0) // %3 : "0"(nn), "1"(output0_tm), "2"(r0), "3"(k0) : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23"); #else asm volatile( "veor q8, q8 \n" "veor q9, q9 \n" "veor q10, q10 \n" "veor q11, q11 \n" "veor q12, q12 \n" "veor q13, q13 \n" "veor q14, q14 \n" "veor q15, q15 \n" "0: \n" "pld [%2, #512] \n" "vldm %2!, {d0-d7} \n" "pld [%3, #512] \n" "vldm %3!, {d8-d15} \n" "vmla.f32 q8, q4, d0[0] \n" "vmla.f32 q9, q4, d0[1] \n" "vmla.f32 q10, q4, d1[0] \n" "vmla.f32 q11, q4, d1[1] \n" "vmla.f32 q12, q4, d2[0] \n" "vmla.f32 q13, q4, d2[1] \n" "vmla.f32 q14, q4, d3[0] \n" "vmla.f32 q15, q4, d3[1] \n" "vmla.f32 q8, q5, d4[0] \n" "vmla.f32 q9, q5, d4[1] \n" "vmla.f32 q10, q5, d5[0] \n" "vmla.f32 q11, q5, d5[1] \n" "vmla.f32 q12, q5, d6[0] \n" "vmla.f32 q13, q5, d6[1] \n" "vmla.f32 q14, q5, d7[0] \n" "vmla.f32 q15, q5, d7[1] \n" "pld [%2, #512] \n" "vldm %2!, {d0-d7} \n" "vmla.f32 q8, q6, d0[0] \n" "vmla.f32 q9, q6, d0[1] \n" "vmla.f32 q10, q6, d1[0] \n" "vmla.f32 q11, q6, d1[1] \n" "vmla.f32 q12, q6, d2[0] \n" "vmla.f32 q13, q6, d2[1] \n" "vmla.f32 q14, q6, d3[0] \n" "vmla.f32 q15, q6, d3[1] \n" "subs %0, %0, #1 \n" "vmla.f32 q8, q7, d4[0] \n" "vmla.f32 q9, q7, d4[1] \n" "vmla.f32 q10, q7, d5[0] \n" "vmla.f32 q11, q7, d5[1] \n" "vmla.f32 q12, q7, d6[0] \n" "vmla.f32 q13, q7, d6[1] \n" "vmla.f32 q14, q7, d7[0] \n" "vmla.f32 q15, q7, d7[1] \n" "bne 0b \n" "vstm %1!, {d16-d23} \n" "vstm %1!, {d24-d31} \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(r0), // %2 "=r"(k0) // %3 : "0"(nn), "1"(output0_tm), "2"(r0), "3"(k0) : "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"); #endif } for (; i + 3 < tiles; i += 4) { #if __aarch64__ const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4); #else const float* r0 = bb2.row(i / 8 + (i % 8) / 4); #endif const float* k0 = kernel0_tm.row(r); int nn = inch; // inch always > 0 #if __aarch64__ asm volatile( "eor v16.16b, v16.16b, v16.16b \n" "eor v17.16b, v17.16b, v17.16b \n" "eor v18.16b, v18.16b, v18.16b \n" "eor v19.16b, v19.16b, v19.16b \n" "0: \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%2], #64 \n" // r0 r1 r2 r3 "prfm pldl1keep, [%3, #512] \n" "ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%3], #64 \n" // w0123 "fmla v16.4s, v8.4s, v0.s[0] \n" "fmla v17.4s, v8.4s, v1.s[0] \n" "fmla v18.4s, v8.4s, v2.s[0] \n" "fmla v19.4s, v8.4s, v3.s[0] \n" "fmla v16.4s, v9.4s, v0.s[1] \n" "fmla v17.4s, v9.4s, v1.s[1] \n" "fmla v18.4s, v9.4s, v2.s[1] \n" "fmla v19.4s, v9.4s, v3.s[1] \n" "fmla v16.4s, v10.4s, v0.s[2] \n" "fmla v17.4s, v10.4s, v1.s[2] \n" "fmla v18.4s, v10.4s, v2.s[2] \n" "fmla v19.4s, v10.4s, v3.s[2] \n" "subs %w0, %w0, #1 \n" "fmla v16.4s, v11.4s, v0.s[3] \n" "fmla v17.4s, v11.4s, v1.s[3] \n" "fmla v18.4s, v11.4s, v2.s[3] \n" "fmla v19.4s, v11.4s, v3.s[3] \n" "bne 0b \n" "st1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%1], #64 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(r0), // %2 "=r"(k0) // %3 : "0"(nn), "1"(output0_tm), "2"(r0), "3"(k0) : "cc", "memory", "v0", "v1", "v2", "v3", "v8", "v9", "v10", "v11", "v16", "v17", "v18", "v19"); #else asm volatile( "veor q8, q8 \n" "veor q9, q9 \n" "veor q10, q10 \n" "veor q11, q11 \n" "0: \n" "pld [%2, #512] \n" "vldm %2!, {d0-d7} \n" "pld [%3, #512] \n" "vldm %3!, {d8-d15} \n" "vmla.f32 q8, q4, d0[0] \n" "vmla.f32 q9, q4, d2[0] \n" "vmla.f32 q10, q4, d4[0] \n" "vmla.f32 q11, q4, d6[0] \n" "vmla.f32 q8, q5, d0[1] \n" "vmla.f32 q9, q5, d2[1] \n" "vmla.f32 q10, q5, d4[1] \n" "vmla.f32 q11, q5, d6[1] \n" "vmla.f32 q8, q6, d1[0] \n" "vmla.f32 q9, q6, d3[0] \n" "vmla.f32 q10, q6, d5[0] \n" "vmla.f32 q11, q6, d7[0] \n" "subs %0, %0, #1 \n" "vmla.f32 q8, q7, d1[1] \n" "vmla.f32 q9, q7, d3[1] \n" "vmla.f32 q10, q7, d5[1] \n" "vmla.f32 q11, q7, d7[1] \n" "bne 0b \n" "vstm %1!, {d16-d23} \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(r0), // %2 "=r"(k0) // %3 : "0"(nn), "1"(output0_tm), "2"(r0), "3"(k0) : "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11"); #endif } for (; i + 1 < tiles; i += 2) { #if __aarch64__ const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2); #else const float* r0 = bb2.row(i / 8 + (i % 8) / 4 + (i % 4) / 2); #endif const float* k0 = kernel0_tm.row(r); int nn = inch; // inch always > 0 #if __aarch64__ asm volatile( "eor v16.16b, v16.16b, v16.16b \n" "eor v17.16b, v17.16b, v17.16b \n" "0: \n" "prfm pldl1keep, [%2, #256] \n" "ld1 {v0.4s, v1.4s}, [%2], #32 \n" // r0 r1 "prfm pldl1keep, [%3, #512] \n" "ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%3], #64 \n" // w0123 "fmla v16.4s, v8.4s, v0.s[0] \n" "fmla v17.4s, v8.4s, v1.s[0] \n" "fmla v16.4s, v9.4s, v0.s[1] \n" "fmla v17.4s, v9.4s, v1.s[1] \n" "fmla v16.4s, v10.4s, v0.s[2] \n" "fmla v17.4s, v10.4s, v1.s[2] \n" "subs %w0, %w0, #1 \n" "fmla v16.4s, v11.4s, v0.s[3] \n" "fmla v17.4s, v11.4s, v1.s[3] \n" "bne 0b \n" "st1 {v16.4s, v17.4s}, [%1], #32 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(r0), // %2 "=r"(k0) // %3 : "0"(nn), "1"(output0_tm), "2"(r0), "3"(k0) : "cc", "memory", "v0", "v1", "v8", "v9", "v10", "v11", "v16", "v17"); #else asm volatile( "veor q8, q8 \n" "veor q9, q9 \n" "0: \n" "pld [%2, #256] \n" "vld1.f32 {d0-d3}, [%2 :128]! \n" "pld [%3, #512] \n" "vldm %3!, {d8-d15} \n" "vmla.f32 q8, q4, d0[0] \n" "vmla.f32 q9, q4, d2[0] \n" "vmla.f32 q8, q5, d0[1] \n" "vmla.f32 q9, q5, d2[1] \n" "vmla.f32 q8, q6, d1[0] \n" "vmla.f32 q9, q6, d3[0] \n" "subs %0, %0, #1 \n" "vmla.f32 q8, q7, d1[1] \n" "vmla.f32 q9, q7, d3[1] \n" "bne 0b \n" "vst1.f32 {d16-d19}, [%1 :128]! \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(r0), // %2 "=r"(k0) // %3 : "0"(nn), "1"(output0_tm), "2"(r0), "3"(k0) : "cc", "memory", "q0", "q1", "q4", "q5", "q6", "q7", "q8", "q9"); #endif } for (; i < tiles; i++) { #if __aarch64__ const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2 + i % 12 % 2); #else const float* r0 = bb2.row(i / 8 + (i % 8) / 4 + (i % 4) / 2 + i % 2); #endif const float* k0 = kernel0_tm.row(r); int nn = inch; // inch always > 0 #if __aarch64__ asm volatile( "eor v16.16b, v16.16b, v16.16b \n" "0: \n" "prfm pldl1keep, [%2, #128] \n" "ld1 {v0.4s}, [%2], #16 \n" // r0 "prfm pldl1keep, [%3, #512] \n" "ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%3], #64 \n" // w0123 "fmla v16.4s, v8.4s, v0.s[0] \n" "fmla v16.4s, v9.4s, v0.s[1] \n" "subs %w0, %w0, #1 \n" "fmla v16.4s, v10.4s, v0.s[2] \n" "fmla v16.4s, v11.4s, v0.s[3] \n" "bne 0b \n" "st1 {v16.4s}, [%1], #16 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(r0), // %2 "=r"(k0) // %3 : "0"(nn), "1"(output0_tm), "2"(r0), "3"(k0) : "cc", "memory", "v0", "v8", "v9", "v10", "v11", "v16"); #else asm volatile( "veor q8, q8 \n" "0: \n" "pld [%2, #128] \n" "vld1.f32 {d0-d1}, [%2 :128]! \n" "pld [%3, #512] \n" "vldm %3!, {d8-d15} \n" "vmla.f32 q8, q4, d0[0] \n" "vmla.f32 q8, q5, d0[1] \n" "subs %0, %0, #1 \n" "vmla.f32 q8, q6, d1[0] \n" "vmla.f32 q8, q7, d1[1] \n" "bne 0b \n" "vst1.f32 {d16-d17}, [%1 :128]! \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(r0), // %2 "=r"(k0) // %3 : "0"(nn), "1"(output0_tm), "2"(r0), "3"(k0) : "cc", "memory", "q0", "q4", "q5", "q6", "q7", "q8"); #endif } } } } bottom_blob_tm = Mat(); // END dot // BEGIN transform output Mat top_blob_bordered; if (outw == top_blob.w && outh == top_blob.h) { top_blob_bordered = top_blob; } else { top_blob_bordered.create(outw, outh, outch, elemsize, elempack, opt.workspace_allocator); } { // const float otm[6][8] = { // {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 32.0f, 32.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 16.0f,-16.0f, 0.0f}, // {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 8.0f, 8.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 4.0f, -4.0f, 0.0f}, // {0.0f, 1.0f, 1.0f, 16.0f, 16.0f, 2.0f, 2.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 32.0f, -32.0f, 1.0f, -1.0f, 1.0f} // }; // 0 = r0 + (r1 + r2) + (r3 + r4) + (r5 + r6) * 32 // 1 = (r1 - r2) + (r3 - r4) * 2 + (r5 - r6) * 16 // 2 = (r1 + r2) + (r3 + r4) * 4 + (r5 + r6) * 8 // 3 = (r1 - r2) + (r3 - r4) * 8 + (r5 - r6) * 4 // 4 = (r1 + r2) + (r3 + r4) * 16+ (r5 + r6) * 2 // 5 = r7 + (r1 - r2) + (r3 - r4) * 32+ (r5 - r6) int w_tm = outw / 6 * 8; int h_tm = outh / 6 * 8; const int tiles = w_tm / 8 * h_tm / 8; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { const Mat out0_tm = top_blob_tm.channel(p); Mat out0 = top_blob_bordered.channel(p); // const float bias0 = bias ? bias[p] : 0.f; float32x4_t _bias0 = bias ? vld1q_f32((const float*)bias + p * 4) : vdupq_n_f32(0.f); float tmp[6][8][4]; // tile for (int i = 0; i < outh / 6; i++) { for (int j = 0; j < outw / 6; j++) { // top_blob_tm.create(tiles, 64, outch, elemsize, elempack); const float* output0_tm_0 = (const float*)out0_tm + (i * w_tm / 8 + j) * 4; const float* output0_tm_1 = output0_tm_0 + tiles * 4; const float* output0_tm_2 = output0_tm_0 + tiles * 8; const float* output0_tm_3 = output0_tm_0 + tiles * 12; const float* output0_tm_4 = output0_tm_0 + tiles * 16; const float* output0_tm_5 = output0_tm_0 + tiles * 20; const float* output0_tm_6 = output0_tm_0 + tiles * 24; const float* output0_tm_7 = output0_tm_0 + tiles * 28; float* output0 = out0.row(i * 6) + (j * 6) * 4; // TODO neon optimize for (int m = 0; m < 8; m++) { float32x4_t _out0tm0 = vld1q_f32(output0_tm_0); float32x4_t _out0tm1 = vld1q_f32(output0_tm_1); float32x4_t _out0tm2 = vld1q_f32(output0_tm_2); float32x4_t _out0tm3 = vld1q_f32(output0_tm_3); float32x4_t _out0tm4 = vld1q_f32(output0_tm_4); float32x4_t _out0tm5 = vld1q_f32(output0_tm_5); float32x4_t _out0tm6 = vld1q_f32(output0_tm_6); float32x4_t _out0tm7 = vld1q_f32(output0_tm_7); float32x4_t _tmp024a = vaddq_f32(_out0tm1, _out0tm2); float32x4_t _tmp135a = vsubq_f32(_out0tm1, _out0tm2); // float tmp024a = output0_tm[1] + output0_tm[2]; // float tmp135a = output0_tm[1] - output0_tm[2]; float32x4_t _tmp024b = vaddq_f32(_out0tm3, _out0tm4); float32x4_t _tmp135b = vsubq_f32(_out0tm3, _out0tm4); // float tmp024b = output0_tm[3] + output0_tm[4]; // float tmp135b = output0_tm[3] - output0_tm[4]; float32x4_t _tmp024c = vaddq_f32(_out0tm5, _out0tm6); float32x4_t _tmp135c = vsubq_f32(_out0tm5, _out0tm6); // float tmp024c = output0_tm[5] + output0_tm[6]; // float tmp135c = output0_tm[5] - output0_tm[6]; float32x4_t _tmp0m = vaddq_f32(vaddq_f32(_out0tm0, _tmp024a), vmlaq_n_f32(_tmp024b, _tmp024c, 32.f)); float32x4_t _tmp2m = vmlaq_n_f32(vmlaq_n_f32(_tmp024a, _tmp024b, 4.f), _tmp024c, 8.f); float32x4_t _tmp4m = vmlaq_n_f32(vmlaq_n_f32(_tmp024a, _tmp024b, 16.f), _tmp024c, 2.f); vst1q_f32(tmp[0][m], _tmp0m); vst1q_f32(tmp[2][m], _tmp2m); vst1q_f32(tmp[4][m], _tmp4m); // tmp[0][m] = output0_tm[0] + tmp024a + tmp024b + tmp024c * 32; // tmp[2][m] = tmp024a + tmp024b * 4 + tmp024c * 8; // tmp[4][m] = tmp024a + tmp024b * 16 + tmp024c + tmp024c; float32x4_t _tmp1m = vmlaq_n_f32(vmlaq_n_f32(_tmp135a, _tmp135b, 2.f), _tmp135c, 16.f); float32x4_t _tmp3m = vmlaq_n_f32(vmlaq_n_f32(_tmp135a, _tmp135b, 8.f), _tmp135c, 4.f); float32x4_t _tmp5m = vaddq_f32(vaddq_f32(_out0tm7, _tmp135a), vmlaq_n_f32(_tmp135c, _tmp135b, 32.f)); vst1q_f32(tmp[1][m], _tmp1m); vst1q_f32(tmp[3][m], _tmp3m); vst1q_f32(tmp[5][m], _tmp5m); // tmp[1][m] = tmp135a + tmp135b + tmp135b + tmp135c * 16; // tmp[3][m] = tmp135a + tmp135b * 8 + tmp135c * 4; // tmp[5][m] = output0_tm[7] + tmp135a + tmp135b * 32 + tmp135c; output0_tm_0 += tiles * 32; output0_tm_1 += tiles * 32; output0_tm_2 += tiles * 32; output0_tm_3 += tiles * 32; output0_tm_4 += tiles * 32; output0_tm_5 += tiles * 32; output0_tm_6 += tiles * 32; output0_tm_7 += tiles * 32; } for (int m = 0; m < 6; m++) { float32x4_t _tmp00 = vld1q_f32(tmp[m][0]); float32x4_t _tmp01 = vld1q_f32(tmp[m][1]); float32x4_t _tmp02 = vld1q_f32(tmp[m][2]); float32x4_t _tmp03 = vld1q_f32(tmp[m][3]); float32x4_t _tmp04 = vld1q_f32(tmp[m][4]); float32x4_t _tmp05 = vld1q_f32(tmp[m][5]); float32x4_t _tmp06 = vld1q_f32(tmp[m][6]); float32x4_t _tmp07 = vld1q_f32(tmp[m][7]); float32x4_t _tmp024a = vaddq_f32(_tmp01, _tmp02); float32x4_t _tmp135a = vsubq_f32(_tmp01, _tmp02); // float tmp024a = tmp0[1] + tmp0[2]; // float tmp135a = tmp0[1] - tmp0[2]; float32x4_t _tmp024b = vaddq_f32(_tmp03, _tmp04); float32x4_t _tmp135b = vsubq_f32(_tmp03, _tmp04); // float tmp024b = tmp0[3] + tmp0[4]; // float tmp135b = tmp0[3] - tmp0[4]; float32x4_t _tmp024c = vaddq_f32(_tmp05, _tmp06); float32x4_t _tmp135c = vsubq_f32(_tmp05, _tmp06); // float tmp024c = tmp0[5] + tmp0[6]; // float tmp135c = tmp0[5] - tmp0[6]; float32x4_t _out00 = vaddq_f32(_bias0, vaddq_f32(vaddq_f32(_tmp00, _tmp024a), vmlaq_n_f32(_tmp024b, _tmp024c, 32.f))); float32x4_t _out02 = vaddq_f32(_bias0, vmlaq_n_f32(vmlaq_n_f32(_tmp024a, _tmp024b, 4.f), _tmp024c, 8.f)); float32x4_t _out04 = vaddq_f32(_bias0, vmlaq_n_f32(vmlaq_n_f32(_tmp024a, _tmp024b, 16.f), _tmp024c, 2.f)); vst1q_f32(output0, _out00); vst1q_f32(output0 + 8, _out02); vst1q_f32(output0 + 16, _out04); // output0[0] = bias0 + tmp0[0] + tmp024a + tmp024b + tmp024c * 32; // output0[2] = bias0 + tmp024a + tmp024b * 4 + tmp024c * 8; // output0[4] = bias0 + tmp024a + tmp024b * 16 + tmp024c + tmp024c; float32x4_t _out01 = vaddq_f32(_bias0, vmlaq_n_f32(vmlaq_n_f32(_tmp135a, _tmp135b, 2.f), _tmp135c, 16.f)); float32x4_t _out03 = vaddq_f32(_bias0, vmlaq_n_f32(vmlaq_n_f32(_tmp135a, _tmp135b, 8.f), _tmp135c, 4.f)); float32x4_t _out05 = vaddq_f32(_bias0, vaddq_f32(vaddq_f32(_tmp07, _tmp135a), vmlaq_n_f32(_tmp135c, _tmp135b, 32.f))); vst1q_f32(output0 + 4, _out01); vst1q_f32(output0 + 12, _out03); vst1q_f32(output0 + 20, _out05); // output0[1] = bias0 + tmp135a + tmp135b + tmp135b + tmp135c * 16; // output0[3] = bias0 + tmp135a + tmp135b * 8 + tmp135c * 4; // output0[5] = bias0 + tmp0[7] + tmp135a + tmp135b * 32 + tmp135c; output0 += outw * 4; } } } } } // END transform output // cut result pad copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt); } static void conv3x3s1_winograd42_transform_kernel_pack4_neon(const Mat& kernel, Mat& kernel_tm_pack4, int inch, int outch, const Option& opt) { // winograd43 transform kernel Mat kernel_tm(6 * 6, inch, outch); const float ktm[6][3] = { {1.0f / 4, 0.0f, 0.0f}, {-1.0f / 6, -1.0f / 6, -1.0f / 6}, {-1.0f / 6, 1.0f / 6, -1.0f / 6}, {1.0f / 24, 1.0f / 12, 1.0f / 6}, {1.0f / 24, -1.0f / 12, 1.0f / 6}, {0.0f, 0.0f, 1.0f} }; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { for (int q = 0; q < inch; q++) { const float* kernel0 = (const float*)kernel + p * inch * 9 + q * 9; float* kernel_tm0 = kernel_tm.channel(p).row(q); // transform kernel const float* k0 = kernel0; const float* k1 = kernel0 + 3; const float* k2 = kernel0 + 6; // h float tmp[6][3]; for (int i = 0; i < 6; i++) { tmp[i][0] = k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2]; tmp[i][1] = k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2]; tmp[i][2] = k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2]; } // U for (int j = 0; j < 6; j++) { float* tmpp = &tmp[j][0]; for (int i = 0; i < 6; i++) { kernel_tm0[j * 6 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2]; } } } } // interleave // src = 36-inch-outch // dst = 4b-4a-inch/4a-36-outch/4b; #if __aarch64__ kernel_tm_pack4.create(2 * inch / 4, 36, (outch / 4) / 2 + (outch / 4) % 2, (size_t)4u * 16, 16); #else kernel_tm_pack4.create(inch / 4, 36, outch / 4, (size_t)4u * 16, 16); #endif int q = 0; #if __aarch64__ for (; q + 7 < outch; q += 8) { const Mat k0 = kernel_tm.channel(q); const Mat k1 = kernel_tm.channel(q + 1); const Mat k2 = kernel_tm.channel(q + 2); const Mat k3 = kernel_tm.channel(q + 3); const Mat k4 = kernel_tm.channel(q + 4); const Mat k5 = kernel_tm.channel(q + 5); const Mat k6 = kernel_tm.channel(q + 6); const Mat k7 = kernel_tm.channel(q + 7); Mat g0 = kernel_tm_pack4.channel(q / 8); for (int k = 0; k < 36; k++) { float* g00 = g0.row(k); for (int p = 0; p + 3 < inch; p += 4) { const float* k00 = k0.row(p); const float* k01 = k0.row(p + 1); const float* k02 = k0.row(p + 2); const float* k03 = k0.row(p + 3); const float* k10 = k1.row(p); const float* k11 = k1.row(p + 1); const float* k12 = k1.row(p + 2); const float* k13 = k1.row(p + 3); const float* k20 = k2.row(p); const float* k21 = k2.row(p + 1); const float* k22 = k2.row(p + 2); const float* k23 = k2.row(p + 3); const float* k30 = k3.row(p); const float* k31 = k3.row(p + 1); const float* k32 = k3.row(p + 2); const float* k33 = k3.row(p + 3); const float* k40 = k4.row(p); const float* k41 = k4.row(p + 1); const float* k42 = k4.row(p + 2); const float* k43 = k4.row(p + 3); const float* k50 = k5.row(p); const float* k51 = k5.row(p + 1); const float* k52 = k5.row(p + 2); const float* k53 = k5.row(p + 3); const float* k60 = k6.row(p); const float* k61 = k6.row(p + 1); const float* k62 = k6.row(p + 2); const float* k63 = k6.row(p + 3); const float* k70 = k7.row(p); const float* k71 = k7.row(p + 1); const float* k72 = k7.row(p + 2); const float* k73 = k7.row(p + 3); g00[0] = k00[k]; g00[1] = k10[k]; g00[2] = k20[k]; g00[3] = k30[k]; g00[4] = k40[k]; g00[5] = k50[k]; g00[6] = k60[k]; g00[7] = k70[k]; g00[8] = k01[k]; g00[9] = k11[k]; g00[10] = k21[k]; g00[11] = k31[k]; g00[12] = k41[k]; g00[13] = k51[k]; g00[14] = k61[k]; g00[15] = k71[k]; g00[16] = k02[k]; g00[17] = k12[k]; g00[18] = k22[k]; g00[19] = k32[k]; g00[20] = k42[k]; g00[21] = k52[k]; g00[22] = k62[k]; g00[23] = k72[k]; g00[24] = k03[k]; g00[25] = k13[k]; g00[26] = k23[k]; g00[27] = k33[k]; g00[28] = k43[k]; g00[29] = k53[k]; g00[30] = k63[k]; g00[31] = k73[k]; g00 += 32; } } } #endif // __aarch64__ for (; q + 3 < outch; q += 4) { const Mat k0 = kernel_tm.channel(q); const Mat k1 = kernel_tm.channel(q + 1); const Mat k2 = kernel_tm.channel(q + 2); const Mat k3 = kernel_tm.channel(q + 3); #if __aarch64__ Mat g0 = kernel_tm_pack4.channel(q / 8 + (q % 8) / 4); #else Mat g0 = kernel_tm_pack4.channel(q / 4); #endif for (int k = 0; k < 36; k++) { float* g00 = g0.row(k); for (int p = 0; p + 3 < inch; p += 4) { const float* k00 = k0.row(p); const float* k01 = k0.row(p + 1); const float* k02 = k0.row(p + 2); const float* k03 = k0.row(p + 3); const float* k10 = k1.row(p); const float* k11 = k1.row(p + 1); const float* k12 = k1.row(p + 2); const float* k13 = k1.row(p + 3); const float* k20 = k2.row(p); const float* k21 = k2.row(p + 1); const float* k22 = k2.row(p + 2); const float* k23 = k2.row(p + 3); const float* k30 = k3.row(p); const float* k31 = k3.row(p + 1); const float* k32 = k3.row(p + 2); const float* k33 = k3.row(p + 3); g00[0] = k00[k]; g00[1] = k10[k]; g00[2] = k20[k]; g00[3] = k30[k]; g00[4] = k01[k]; g00[5] = k11[k]; g00[6] = k21[k]; g00[7] = k31[k]; g00[8] = k02[k]; g00[9] = k12[k]; g00[10] = k22[k]; g00[11] = k32[k]; g00[12] = k03[k]; g00[13] = k13[k]; g00[14] = k23[k]; g00[15] = k33[k]; g00 += 16; } } } } static void conv3x3s1_winograd42_pack4_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel_tm, const Mat& _bias, const Option& opt) { int w = bottom_blob.w; int h = bottom_blob.h; int inch = bottom_blob.c; size_t elemsize = bottom_blob.elemsize; int elempack = bottom_blob.elempack; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; // pad to 4n+2 Mat bottom_blob_bordered = bottom_blob; outw = (outw + 3) / 4 * 4; outh = (outh + 3) / 4 * 4; w = outw + 2; h = outh + 2; copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, BORDER_CONSTANT, 0.f, opt); const float* bias = _bias; // BEGIN transform input Mat bottom_blob_tm; { int w_tm = outw / 4 * 6; int h_tm = outh / 4 * 6; const int tiles = w_tm / 6 * h_tm / 6; bottom_blob_tm.create(tiles, 36, inch, elemsize, elempack, opt.workspace_allocator); // const float itm[4][4] = { // {4.0f, 0.0f, -5.0f, 0.0f, 1.0f, 0.0f}, // {0.0f,-4.0f, -4.0f, 1.0f, 1.0f, 0.0f}, // {0.0f, 4.0f, -4.0f,-1.0f, 1.0f, 0.0f}, // {0.0f,-2.0f, -1.0f, 2.0f, 1.0f, 0.0f}, // {0.0f, 2.0f, -1.0f,-2.0f, 1.0f, 0.0f}, // {0.0f, 4.0f, 0.0f,-5.0f, 0.0f, 1.0f} // }; // 0 = 4 * r00 - 5 * r02 + r04 // 1 = -4 * (r01 + r02) + r04 + r03 // 2 = 4 * (r01 - r02) + r04 - r03 // 3 = -2 * (r01 - r03) + r04 - r02 // 4 = 2 * (r01 - r03) + r04 - r02 // 5 = 4 * r01 - 5 * r03 + r05 #pragma omp parallel for num_threads(opt.num_threads) for (int q = 0; q < inch; q++) { const Mat img0 = bottom_blob_bordered.channel(q); Mat img0_tm = bottom_blob_tm.channel(q); float tmp[6][6][4]; // tile for (int i = 0; i < h_tm / 6; i++) { for (int j = 0; j < w_tm / 6; j++) { const float* r0 = img0.row(i * 4) + (j * 4) * 4; for (int m = 0; m < 6; m++) { float32x4_t _r00 = vld1q_f32(r0); float32x4_t _r01 = vld1q_f32(r0 + 4); float32x4_t _r02 = vld1q_f32(r0 + 8); float32x4_t _r03 = vld1q_f32(r0 + 12); float32x4_t _r04 = vld1q_f32(r0 + 16); float32x4_t _r05 = vld1q_f32(r0 + 20); float32x4_t _tmp0m = vmlsq_n_f32(vmlaq_n_f32(_r04, _r00, 4.f), _r02, 5.f); float32x4_t _tmp1m = vmlsq_n_f32(vaddq_f32(_r04, _r03), vaddq_f32(_r01, _r02), 4.f); float32x4_t _tmp2m = vmlaq_n_f32(vsubq_f32(_r04, _r03), vsubq_f32(_r01, _r02), 4.f); float32x4_t _tmp3m = vmlsq_n_f32(vsubq_f32(_r04, _r02), vsubq_f32(_r01, _r03), 2.f); float32x4_t _tmp4m = vmlaq_n_f32(vsubq_f32(_r04, _r02), vsubq_f32(_r01, _r03), 2.f); float32x4_t _tmp5m = vmlsq_n_f32(vmlaq_n_f32(_r05, _r01, 4.f), _r03, 5.f); vst1q_f32(tmp[0][m], _tmp0m); vst1q_f32(tmp[1][m], _tmp1m); vst1q_f32(tmp[2][m], _tmp2m); vst1q_f32(tmp[3][m], _tmp3m); vst1q_f32(tmp[4][m], _tmp4m); vst1q_f32(tmp[5][m], _tmp5m); r0 += w * 4; } float* r0_tm_0 = (float*)img0_tm + (i * w_tm / 6 + j) * 4; float* r0_tm_1 = r0_tm_0 + tiles * 4; float* r0_tm_2 = r0_tm_0 + tiles * 8; float* r0_tm_3 = r0_tm_0 + tiles * 12; float* r0_tm_4 = r0_tm_0 + tiles * 16; float* r0_tm_5 = r0_tm_0 + tiles * 20; for (int m = 0; m < 6; m++) { float32x4_t _tmp00 = vld1q_f32(tmp[m][0]); float32x4_t _tmp01 = vld1q_f32(tmp[m][1]); float32x4_t _tmp02 = vld1q_f32(tmp[m][2]); float32x4_t _tmp03 = vld1q_f32(tmp[m][3]); float32x4_t _tmp04 = vld1q_f32(tmp[m][4]); float32x4_t _tmp05 = vld1q_f32(tmp[m][5]); float32x4_t _r0tm0 = vmlsq_n_f32(vmlaq_n_f32(_tmp04, _tmp00, 4.f), _tmp02, 5.f); float32x4_t _r0tm1 = vmlsq_n_f32(vaddq_f32(_tmp04, _tmp03), vaddq_f32(_tmp01, _tmp02), 4.f); float32x4_t _r0tm2 = vmlaq_n_f32(vsubq_f32(_tmp04, _tmp03), vsubq_f32(_tmp01, _tmp02), 4.f); float32x4_t _r0tm3 = vmlsq_n_f32(vsubq_f32(_tmp04, _tmp02), vsubq_f32(_tmp01, _tmp03), 2.f); float32x4_t _r0tm4 = vmlaq_n_f32(vsubq_f32(_tmp04, _tmp02), vsubq_f32(_tmp01, _tmp03), 2.f); float32x4_t _r0tm5 = vmlsq_n_f32(vmlaq_n_f32(_tmp05, _tmp01, 4.f), _tmp03, 5.f); vst1q_f32(r0_tm_0, _r0tm0); vst1q_f32(r0_tm_1, _r0tm1); vst1q_f32(r0_tm_2, _r0tm2); vst1q_f32(r0_tm_3, _r0tm3); vst1q_f32(r0_tm_4, _r0tm4); vst1q_f32(r0_tm_5, _r0tm5); r0_tm_0 += tiles * 24; r0_tm_1 += tiles * 24; r0_tm_2 += tiles * 24; r0_tm_3 += tiles * 24; r0_tm_4 += tiles * 24; r0_tm_5 += tiles * 24; } } } } } bottom_blob_bordered = Mat(); // END transform input // BEGIN dot Mat top_blob_tm; { int w_tm = outw / 4 * 6; int h_tm = outh / 4 * 6; const int tiles = h_tm / 6 * w_tm / 6; // permute // bottom_blob_tm.create(tiles, 36, inch, elemsize, elempack, opt.workspace_allocator); Mat bottom_blob_tm2; #if __aarch64__ if (tiles >= 12) bottom_blob_tm2.create(12 * inch, tiles / 12 + (tiles % 12) / 8 + (tiles % 12 % 8) / 4 + (tiles % 12 % 4) / 2 + tiles % 12 % 2, 36, elemsize, elempack, opt.workspace_allocator); else if (tiles >= 8) bottom_blob_tm2.create(8 * inch, tiles / 8 + (tiles % 8) / 4 + (tiles % 4) / 2 + tiles % 2, 36, elemsize, elempack, opt.workspace_allocator); else if (tiles >= 4) bottom_blob_tm2.create(4 * inch, tiles / 4 + (tiles % 4) / 2 + tiles % 2, 36, elemsize, elempack, opt.workspace_allocator); else if (tiles >= 2) bottom_blob_tm2.create(2 * inch, tiles / 2 + tiles % 2, 36, elemsize, elempack, opt.workspace_allocator); else // if (tiles >= 1) bottom_blob_tm2.create(1 * inch, tiles, 36, elemsize, elempack, opt.workspace_allocator); #else if (tiles >= 8) bottom_blob_tm2.create(8 * inch, tiles / 8 + (tiles % 8) / 4 + (tiles % 4) / 2 + tiles % 2, 36, elemsize, elempack, opt.workspace_allocator); else if (tiles >= 4) bottom_blob_tm2.create(4 * inch, tiles / 4 + (tiles % 4) / 2 + tiles % 2, 36, elemsize, elempack, opt.workspace_allocator); else if (tiles >= 2) bottom_blob_tm2.create(2 * inch, tiles / 2 + tiles % 2, 36, elemsize, elempack, opt.workspace_allocator); else // if (tiles >= 1) bottom_blob_tm2.create(1 * inch, tiles, 36, elemsize, elempack, opt.workspace_allocator); #endif #pragma omp parallel for num_threads(opt.num_threads) for (int r = 0; r < 36; r++) { Mat tm2 = bottom_blob_tm2.channel(r); // tile int i = 0; #if __aarch64__ for (; i + 11 < tiles; i += 12) { float* tm2p = tm2.row(i / 12); const float* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 4; for (int q = 0; q < inch; q++) { asm volatile( "prfm pldl1keep, [%0, #512] \n" "ld4 {v0.4s, v1.4s, v2.4s, v3.4s}, [%0], #64 \n" "prfm pldl1keep, [%0, #512] \n" "ld4 {v4.4s, v5.4s, v6.4s, v7.4s}, [%0], #64 \n" "prfm pldl1keep, [%0, #512] \n" "ld4 {v8.4s, v9.4s, v10.4s, v11.4s}, [%0] \n" "st1 {v0.4s}, [%1], #16 \n" "st1 {v4.4s}, [%1], #16 \n" "st1 {v8.4s}, [%1], #16 \n" "sub %0, %0, #128 \n" "st1 {v1.4s}, [%1], #16 \n" "st1 {v5.4s}, [%1], #16 \n" "st1 {v9.4s}, [%1], #16 \n" "st1 {v2.4s}, [%1], #16 \n" "st1 {v6.4s}, [%1], #16 \n" "st1 {v10.4s}, [%1], #16 \n" "st1 {v3.4s}, [%1], #16 \n" "st1 {v7.4s}, [%1], #16 \n" "st1 {v11.4s}, [%1], #16 \n" : "=r"(r0), // %0 "=r"(tm2p) // %1 : "0"(r0), "1"(tm2p) : "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11"); r0 += bottom_blob_tm.cstep * 4; } } #endif for (; i + 7 < tiles; i += 8) { #if __aarch64__ float* tm2p = tm2.row(i / 12 + (i % 12) / 8); #else float* tm2p = tm2.row(i / 8); #endif const float* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 4; for (int q = 0; q < inch; q++) { #if __aarch64__ asm volatile( "prfm pldl1keep, [%0, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%0], #64 \n" "prfm pldl1keep, [%0, #512] \n" "ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%0] \n" "st1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%1], #64 \n" "sub %0, %0, #64 \n" "st1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%1], #64 \n" : "=r"(r0), // %0 "=r"(tm2p) // %1 : "0"(r0), "1"(tm2p) : "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7"); #else asm volatile( "pld [%0, #512] \n" "vldm %0!, {d0-d7} \n" "pld [%0, #512] \n" "vldm %0, {d16-d23} \n" // transpose 8x4 "vtrn.32 q0, q1 \n" "vtrn.32 q2, q3 \n" "vtrn.32 q8, q9 \n" "vtrn.32 q10, q11 \n" "vswp d1, d4 \n" "vswp d3, d6 \n" "vswp d17, d20 \n" "vswp d19, d22 \n" "vswp q1, q8 \n" "vswp q3, q10 \n" "vst1.f32 {d0-d3}, [%1 :128]! \n" "vst1.f32 {d16-d19}, [%1 :128]! \n" "sub %0, %0, #64 \n" "vst1.f32 {d4-d7}, [%1 :128]! \n" "vst1.f32 {d20-d23}, [%1 :128]! \n" : "=r"(r0), // %0 "=r"(tm2p) // %1 : "0"(r0), "1"(tm2p) : "memory", "q0", "q1", "q2", "q3", "q8", "q9", "q10", "q11"); #endif r0 += bottom_blob_tm.cstep * 4; } } for (; i + 3 < tiles; i += 4) { #if __aarch64__ float* tm2p = tm2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4); #else float* tm2p = tm2.row(i / 8 + (i % 8) / 4); #endif const float* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 4; for (int q = 0; q < inch; q++) { #if __aarch64__ asm volatile( "prfm pldl1keep, [%0, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%0] \n" "st1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%1], #64 \n" : "=r"(r0), // %0 "=r"(tm2p) // %1 : "0"(r0), "1"(tm2p) : "memory", "v0", "v1", "v2", "v3"); #else asm volatile( "pld [%0, #512] \n" "vldm %0, {d0-d7} \n" "vstm %1!, {d0-d7} \n" : "=r"(r0), // %0 "=r"(tm2p) // %1 : "0"(r0), "1"(tm2p) : "memory", "q0", "q1", "q2", "q3"); #endif // __aarch64__ r0 += bottom_blob_tm.cstep * 4; } } for (; i + 1 < tiles; i += 2) { #if __aarch64__ float* tm2p = tm2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2); #else float* tm2p = tm2.row(i / 8 + (i % 8) / 4 + (i % 4) / 2); #endif const float* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 4; for (int q = 0; q < inch; q++) { #if __aarch64__ asm volatile( "prfm pldl1keep, [%0, #256] \n" "ld1 {v0.4s, v1.4s}, [%0] \n" "st1 {v0.4s, v1.4s}, [%1], #32 \n" : "=r"(r0), // %0 "=r"(tm2p) // %1 : "0"(r0), "1"(tm2p) : "memory", "v0", "v1"); #else asm volatile( "pld [%0, #256] \n" "vld1.f32 {d0-d3}, [%0 :128] \n" "vst1.f32 {d0-d3}, [%1 :128]! \n" : "=r"(r0), // %0 "=r"(tm2p) // %1 : "0"(r0), "1"(tm2p) : "memory", "q0", "q1"); #endif // __aarch64__ r0 += bottom_blob_tm.cstep * 4; } } for (; i < tiles; i++) { #if __aarch64__ float* tm2p = tm2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2 + i % 12 % 2); #else float* tm2p = tm2.row(i / 8 + (i % 8) / 4 + (i % 4) / 2 + i % 2); #endif const float* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 4; for (int q = 0; q < inch; q++) { #if __aarch64__ asm volatile( "prfm pldl1keep, [%0, #128] \n" "ld1 {v0.4s}, [%0] \n" "st1 {v0.4s}, [%1], #16 \n" : "=r"(r0), // %0 "=r"(tm2p) // %1 : "0"(r0), "1"(tm2p) : "memory", "v0"); #else asm volatile( "pld [%0, #128] \n" "vld1.f32 {d0-d1}, [%0 :128] \n" "vst1.f32 {d0-d1}, [%1 :128]! \n" : "=r"(r0), // %0 "=r"(tm2p) // %1 : "0"(r0), "1"(tm2p) : "memory", "q0"); #endif // __aarch64__ r0 += bottom_blob_tm.cstep * 4; } } } bottom_blob_tm = Mat(); // permute end top_blob_tm.create(tiles, 36, outch, elemsize, elempack, opt.workspace_allocator); int remain_outch_start = 0; #if __ARM_NEON && __aarch64__ int nn_outch = 0; nn_outch = outch >> 1; remain_outch_start = nn_outch << 1; #pragma omp parallel for num_threads(opt.num_threads) for (int pp = 0; pp < nn_outch; pp++) { int p = pp * 2; float* output0_tm = top_blob_tm.channel(p); float* output1_tm = top_blob_tm.channel(p + 1); const Mat kernel01_tm = kernel_tm.channel(pp); for (int r = 0; r < 36; r++) { const Mat bb2 = bottom_blob_tm2.channel(r); int i = 0; for (; i + 11 < tiles; i += 12) { const float* r0 = bb2.row(i / 12); const float* k01 = kernel01_tm.row(r); int nn = inch; // inch always > 0 asm volatile( "eor v8.16b, v8.16b, v8.16b \n" "eor v9.16b, v9.16b, v9.16b \n" "eor v10.16b, v10.16b, v10.16b \n" "eor v11.16b, v11.16b, v11.16b \n" "eor v12.16b, v12.16b, v12.16b \n" "eor v13.16b, v13.16b, v13.16b \n" "eor v14.16b, v14.16b, v14.16b \n" "eor v15.16b, v15.16b, v15.16b \n" "eor v16.16b, v16.16b, v16.16b \n" "eor v17.16b, v17.16b, v17.16b \n" "eor v18.16b, v18.16b, v18.16b \n" "eor v19.16b, v19.16b, v19.16b \n" "eor v20.16b, v20.16b, v20.16b \n" "eor v21.16b, v21.16b, v21.16b \n" "eor v22.16b, v22.16b, v22.16b \n" "eor v23.16b, v23.16b, v23.16b \n" "eor v24.16b, v24.16b, v24.16b \n" "eor v25.16b, v25.16b, v25.16b \n" "eor v26.16b, v26.16b, v26.16b \n" "eor v27.16b, v27.16b, v27.16b \n" "eor v28.16b, v28.16b, v28.16b \n" "eor v29.16b, v29.16b, v29.16b \n" "eor v30.16b, v30.16b, v30.16b \n" "eor v31.16b, v31.16b, v31.16b \n" "0: \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%3], #64 \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%4], #64 \n" // w0011_01 "fmla v8.4s, v4.4s, v0.s[0] \n" "fmla v9.4s, v4.4s, v0.s[1] \n" "fmla v10.4s, v4.4s, v0.s[2] \n" "fmla v11.4s, v4.4s, v0.s[3] \n" "fmla v12.4s, v4.4s, v1.s[0] \n" "fmla v13.4s, v4.4s, v1.s[1] \n" "fmla v14.4s, v4.4s, v1.s[2] \n" "fmla v15.4s, v4.4s, v1.s[3] \n" "fmla v16.4s, v4.4s, v2.s[0] \n" "fmla v17.4s, v4.4s, v2.s[1] \n" "fmla v18.4s, v4.4s, v2.s[2] \n" "fmla v19.4s, v4.4s, v2.s[3] \n" "fmla v20.4s, v5.4s, v0.s[0] \n" "fmla v21.4s, v5.4s, v0.s[1] \n" "fmla v22.4s, v5.4s, v0.s[2] \n" "fmla v23.4s, v5.4s, v0.s[3] \n" "fmla v24.4s, v5.4s, v1.s[0] \n" "fmla v25.4s, v5.4s, v1.s[1] \n" "fmla v26.4s, v5.4s, v1.s[2] \n" "fmla v27.4s, v5.4s, v1.s[3] \n" "fmla v28.4s, v5.4s, v2.s[0] \n" "fmla v29.4s, v5.4s, v2.s[1] \n" "fmla v30.4s, v5.4s, v2.s[2] \n" "fmla v31.4s, v5.4s, v2.s[3] \n" "fmla v8.4s, v6.4s, v3.s[0] \n" "fmla v9.4s, v6.4s, v3.s[1] \n" "fmla v10.4s, v6.4s, v3.s[2] \n" "fmla v11.4s, v6.4s, v3.s[3] \n" "fmla v20.4s, v7.4s, v3.s[0] \n" "fmla v21.4s, v7.4s, v3.s[1] \n" "fmla v22.4s, v7.4s, v3.s[2] \n" "fmla v23.4s, v7.4s, v3.s[3] \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%3], #64 \n" "fmla v12.4s, v6.4s, v0.s[0] \n" "fmla v13.4s, v6.4s, v0.s[1] \n" "fmla v14.4s, v6.4s, v0.s[2] \n" "fmla v15.4s, v6.4s, v0.s[3] \n" "fmla v16.4s, v6.4s, v1.s[0] \n" "fmla v17.4s, v6.4s, v1.s[1] \n" "fmla v18.4s, v6.4s, v1.s[2] \n" "fmla v19.4s, v6.4s, v1.s[3] \n" "fmla v24.4s, v7.4s, v0.s[0] \n" "fmla v25.4s, v7.4s, v0.s[1] \n" "fmla v26.4s, v7.4s, v0.s[2] \n" "fmla v27.4s, v7.4s, v0.s[3] \n" "fmla v28.4s, v7.4s, v1.s[0] \n" "fmla v29.4s, v7.4s, v1.s[1] \n" "fmla v30.4s, v7.4s, v1.s[2] \n" "fmla v31.4s, v7.4s, v1.s[3] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%4], #64 \n" // w2233_01 "fmla v8.4s, v4.4s, v2.s[0] \n" "fmla v9.4s, v4.4s, v2.s[1] \n" "fmla v10.4s, v4.4s, v2.s[2] \n" "fmla v11.4s, v4.4s, v2.s[3] \n" "fmla v12.4s, v4.4s, v3.s[0] \n" "fmla v13.4s, v4.4s, v3.s[1] \n" "fmla v14.4s, v4.4s, v3.s[2] \n" "fmla v15.4s, v4.4s, v3.s[3] \n" "fmla v20.4s, v5.4s, v2.s[0] \n" "fmla v21.4s, v5.4s, v2.s[1] \n" "fmla v22.4s, v5.4s, v2.s[2] \n" "fmla v23.4s, v5.4s, v2.s[3] \n" "fmla v24.4s, v5.4s, v3.s[0] \n" "fmla v25.4s, v5.4s, v3.s[1] \n" "fmla v26.4s, v5.4s, v3.s[2] \n" "fmla v27.4s, v5.4s, v3.s[3] \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%3], #64 \n" "fmla v16.4s, v4.4s, v0.s[0] \n" "fmla v17.4s, v4.4s, v0.s[1] \n" "fmla v18.4s, v4.4s, v0.s[2] \n" "fmla v19.4s, v4.4s, v0.s[3] \n" "fmla v28.4s, v5.4s, v0.s[0] \n" "fmla v29.4s, v5.4s, v0.s[1] \n" "fmla v30.4s, v5.4s, v0.s[2] \n" "fmla v31.4s, v5.4s, v0.s[3] \n" "fmla v8.4s, v6.4s, v1.s[0] \n" "fmla v9.4s, v6.4s, v1.s[1] \n" "fmla v10.4s, v6.4s, v1.s[2] \n" "fmla v11.4s, v6.4s, v1.s[3] \n" "fmla v12.4s, v6.4s, v2.s[0] \n" "fmla v13.4s, v6.4s, v2.s[1] \n" "fmla v14.4s, v6.4s, v2.s[2] \n" "fmla v15.4s, v6.4s, v2.s[3] \n" "fmla v16.4s, v6.4s, v3.s[0] \n" "fmla v17.4s, v6.4s, v3.s[1] \n" "fmla v18.4s, v6.4s, v3.s[2] \n" "fmla v19.4s, v6.4s, v3.s[3] \n" "subs %w0, %w0, #1 \n" "fmla v20.4s, v7.4s, v1.s[0] \n" "fmla v21.4s, v7.4s, v1.s[1] \n" "fmla v22.4s, v7.4s, v1.s[2] \n" "fmla v23.4s, v7.4s, v1.s[3] \n" "fmla v24.4s, v7.4s, v2.s[0] \n" "fmla v25.4s, v7.4s, v2.s[1] \n" "fmla v26.4s, v7.4s, v2.s[2] \n" "fmla v27.4s, v7.4s, v2.s[3] \n" "fmla v28.4s, v7.4s, v3.s[0] \n" "fmla v29.4s, v7.4s, v3.s[1] \n" "fmla v30.4s, v7.4s, v3.s[2] \n" "fmla v31.4s, v7.4s, v3.s[3] \n" "bne 0b \n" "st1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%1], #64 \n" "st1 {v20.4s, v21.4s, v22.4s, v23.4s}, [%2], #64 \n" "st1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%1], #64 \n" "st1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%2], #64 \n" "st1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%1], #64 \n" "st1 {v28.4s, v29.4s, v30.4s, v31.4s}, [%2], #64 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(output1_tm), // %2 "=r"(r0), // %3 "=r"(k01) // %4 : "0"(nn), "1"(output0_tm), "2"(output1_tm), "3"(r0), "4"(k01) : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); } for (; i + 7 < tiles; i += 8) { const float* r0 = bb2.row(i / 12 + (i % 12) / 8); const float* k01 = kernel01_tm.row(r); int nn = inch; // inch always > 0 asm volatile( "eor v16.16b, v16.16b, v16.16b \n" "eor v17.16b, v17.16b, v17.16b \n" "eor v18.16b, v18.16b, v18.16b \n" "eor v19.16b, v19.16b, v19.16b \n" "eor v20.16b, v20.16b, v20.16b \n" "eor v21.16b, v21.16b, v21.16b \n" "eor v22.16b, v22.16b, v22.16b \n" "eor v23.16b, v23.16b, v23.16b \n" "eor v24.16b, v24.16b, v24.16b \n" "eor v25.16b, v25.16b, v25.16b \n" "eor v26.16b, v26.16b, v26.16b \n" "eor v27.16b, v27.16b, v27.16b \n" "eor v28.16b, v28.16b, v28.16b \n" "eor v29.16b, v29.16b, v29.16b \n" "eor v30.16b, v30.16b, v30.16b \n" "eor v31.16b, v31.16b, v31.16b \n" "0: \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%3], #64 \n" // r0 r1 r2 r3 "prfm pldl1keep, [%4, #512] \n" "ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%4], #64 \n" // w0011_01 "prfm pldl1keep, [%3, #512] \n" "ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%3], #64 \n" // r4 r5 r6 r7 "fmla v16.4s, v8.4s, v0.s[0] \n" "fmla v17.4s, v8.4s, v1.s[0] \n" "fmla v18.4s, v8.4s, v2.s[0] \n" "fmla v19.4s, v8.4s, v3.s[0] \n" "fmla v20.4s, v8.4s, v4.s[0] \n" "fmla v21.4s, v8.4s, v5.s[0] \n" "fmla v22.4s, v8.4s, v6.s[0] \n" "fmla v23.4s, v8.4s, v7.s[0] \n" "fmla v24.4s, v9.4s, v0.s[0] \n" "fmla v25.4s, v9.4s, v1.s[0] \n" "fmla v26.4s, v9.4s, v2.s[0] \n" "fmla v27.4s, v9.4s, v3.s[0] \n" "fmla v28.4s, v9.4s, v4.s[0] \n" "fmla v29.4s, v9.4s, v5.s[0] \n" "fmla v30.4s, v9.4s, v6.s[0] \n" "fmla v31.4s, v9.4s, v7.s[0] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%4], #64 \n" // w2233_01 "fmla v16.4s, v10.4s, v0.s[1] \n" "fmla v17.4s, v10.4s, v1.s[1] \n" "fmla v18.4s, v10.4s, v2.s[1] \n" "fmla v19.4s, v10.4s, v3.s[1] \n" "fmla v20.4s, v10.4s, v4.s[1] \n" "fmla v21.4s, v10.4s, v5.s[1] \n" "fmla v22.4s, v10.4s, v6.s[1] \n" "fmla v23.4s, v10.4s, v7.s[1] \n" "fmla v24.4s, v11.4s, v0.s[1] \n" "fmla v25.4s, v11.4s, v1.s[1] \n" "fmla v26.4s, v11.4s, v2.s[1] \n" "fmla v27.4s, v11.4s, v3.s[1] \n" "fmla v28.4s, v11.4s, v4.s[1] \n" "fmla v29.4s, v11.4s, v5.s[1] \n" "fmla v30.4s, v11.4s, v6.s[1] \n" "fmla v31.4s, v11.4s, v7.s[1] \n" "fmla v16.4s, v12.4s, v0.s[2] \n" "fmla v17.4s, v12.4s, v1.s[2] \n" "fmla v18.4s, v12.4s, v2.s[2] \n" "fmla v19.4s, v12.4s, v3.s[2] \n" "fmla v20.4s, v12.4s, v4.s[2] \n" "fmla v21.4s, v12.4s, v5.s[2] \n" "fmla v22.4s, v12.4s, v6.s[2] \n" "fmla v23.4s, v12.4s, v7.s[2] \n" "fmla v24.4s, v13.4s, v0.s[2] \n" "fmla v25.4s, v13.4s, v1.s[2] \n" "fmla v26.4s, v13.4s, v2.s[2] \n" "fmla v27.4s, v13.4s, v3.s[2] \n" "fmla v28.4s, v13.4s, v4.s[2] \n" "fmla v29.4s, v13.4s, v5.s[2] \n" "fmla v30.4s, v13.4s, v6.s[2] \n" "fmla v31.4s, v13.4s, v7.s[2] \n" "fmla v16.4s, v14.4s, v0.s[3] \n" "fmla v17.4s, v14.4s, v1.s[3] \n" "fmla v18.4s, v14.4s, v2.s[3] \n" "fmla v19.4s, v14.4s, v3.s[3] \n" "fmla v20.4s, v14.4s, v4.s[3] \n" "fmla v21.4s, v14.4s, v5.s[3] \n" "fmla v22.4s, v14.4s, v6.s[3] \n" "fmla v23.4s, v14.4s, v7.s[3] \n" "subs %w0, %w0, #1 \n" "fmla v24.4s, v15.4s, v0.s[3] \n" "fmla v25.4s, v15.4s, v1.s[3] \n" "fmla v26.4s, v15.4s, v2.s[3] \n" "fmla v27.4s, v15.4s, v3.s[3] \n" "fmla v28.4s, v15.4s, v4.s[3] \n" "fmla v29.4s, v15.4s, v5.s[3] \n" "fmla v30.4s, v15.4s, v6.s[3] \n" "fmla v31.4s, v15.4s, v7.s[3] \n" "bne 0b \n" "st1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%1], #64 \n" "st1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%2], #64 \n" "st1 {v20.4s, v21.4s, v22.4s, v23.4s}, [%1], #64 \n" "st1 {v28.4s, v29.4s, v30.4s, v31.4s}, [%2], #64 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(output1_tm), // %2 "=r"(r0), // %3 "=r"(k01) // %4 : "0"(nn), "1"(output0_tm), "2"(output1_tm), "3"(r0), "4"(k01) : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); } for (; i + 3 < tiles; i += 4) { const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4); const float* k01 = kernel01_tm.row(r); int nn = inch; // inch always > 0 asm volatile( "eor v16.16b, v16.16b, v16.16b \n" "eor v17.16b, v17.16b, v17.16b \n" "eor v18.16b, v18.16b, v18.16b \n" "eor v19.16b, v19.16b, v19.16b \n" "eor v20.16b, v20.16b, v20.16b \n" "eor v21.16b, v21.16b, v21.16b \n" "eor v22.16b, v22.16b, v22.16b \n" "eor v23.16b, v23.16b, v23.16b \n" "0: \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%3], #64 \n" // r0 r1 r2 r3 "prfm pldl1keep, [%4, #512] \n" "ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%4], #64 \n" // w0011_01 "fmla v16.4s, v8.4s, v0.s[0] \n" "fmla v17.4s, v8.4s, v1.s[0] \n" "fmla v18.4s, v8.4s, v2.s[0] \n" "fmla v19.4s, v8.4s, v3.s[0] \n" "fmla v20.4s, v9.4s, v0.s[0] \n" "fmla v21.4s, v9.4s, v1.s[0] \n" "fmla v22.4s, v9.4s, v2.s[0] \n" "fmla v23.4s, v9.4s, v3.s[0] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%4], #64 \n" // w2233_01 "fmla v16.4s, v10.4s, v0.s[1] \n" "fmla v17.4s, v10.4s, v1.s[1] \n" "fmla v18.4s, v10.4s, v2.s[1] \n" "fmla v19.4s, v10.4s, v3.s[1] \n" "fmla v20.4s, v11.4s, v0.s[1] \n" "fmla v21.4s, v11.4s, v1.s[1] \n" "fmla v22.4s, v11.4s, v2.s[1] \n" "fmla v23.4s, v11.4s, v3.s[1] \n" "fmla v16.4s, v12.4s, v0.s[2] \n" "fmla v17.4s, v12.4s, v1.s[2] \n" "fmla v18.4s, v12.4s, v2.s[2] \n" "fmla v19.4s, v12.4s, v3.s[2] \n" "fmla v20.4s, v13.4s, v0.s[2] \n" "fmla v21.4s, v13.4s, v1.s[2] \n" "fmla v22.4s, v13.4s, v2.s[2] \n" "fmla v23.4s, v13.4s, v3.s[2] \n" "subs %w0, %w0, #1 \n" "fmla v16.4s, v14.4s, v0.s[3] \n" "fmla v17.4s, v14.4s, v1.s[3] \n" "fmla v18.4s, v14.4s, v2.s[3] \n" "fmla v19.4s, v14.4s, v3.s[3] \n" "fmla v20.4s, v15.4s, v0.s[3] \n" "fmla v21.4s, v15.4s, v1.s[3] \n" "fmla v22.4s, v15.4s, v2.s[3] \n" "fmla v23.4s, v15.4s, v3.s[3] \n" "bne 0b \n" "st1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%1], #64 \n" "st1 {v20.4s, v21.4s, v22.4s, v23.4s}, [%2], #64 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(output1_tm), // %2 "=r"(r0), // %3 "=r"(k01) // %4 : "0"(nn), "1"(output0_tm), "2"(output1_tm), "3"(r0), "4"(k01) : "cc", "memory", "v0", "v1", "v2", "v3", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23"); } for (; i + 1 < tiles; i += 2) { const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2); const float* k01 = kernel01_tm.row(r); int nn = inch; // inch always > 0 asm volatile( "eor v16.16b, v16.16b, v16.16b \n" "eor v17.16b, v17.16b, v17.16b \n" "eor v18.16b, v18.16b, v18.16b \n" "eor v19.16b, v19.16b, v19.16b \n" "0: \n" "prfm pldl1keep, [%3, #256] \n" "ld1 {v0.4s, v1.4s}, [%3], #32 \n" // r0 r1 "prfm pldl1keep, [%4, #512] \n" "ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%4], #64 \n" // w0011_01 "fmla v16.4s, v8.4s, v0.s[0] \n" "fmla v17.4s, v8.4s, v1.s[0] \n" "fmla v18.4s, v9.4s, v0.s[0] \n" "fmla v19.4s, v9.4s, v1.s[0] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%4], #64 \n" // w2233_01 "fmla v16.4s, v10.4s, v0.s[1] \n" "fmla v17.4s, v10.4s, v1.s[1] \n" "fmla v18.4s, v11.4s, v0.s[1] \n" "fmla v19.4s, v11.4s, v1.s[1] \n" "fmla v16.4s, v12.4s, v0.s[2] \n" "fmla v17.4s, v12.4s, v1.s[2] \n" "fmla v18.4s, v13.4s, v0.s[2] \n" "fmla v19.4s, v13.4s, v1.s[2] \n" "subs %w0, %w0, #1 \n" "fmla v16.4s, v14.4s, v0.s[3] \n" "fmla v17.4s, v14.4s, v1.s[3] \n" "fmla v18.4s, v15.4s, v0.s[3] \n" "fmla v19.4s, v15.4s, v1.s[3] \n" "bne 0b \n" "st1 {v16.4s, v17.4s}, [%1], #32 \n" "st1 {v18.4s, v19.4s}, [%2], #32 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(output1_tm), // %2 "=r"(r0), // %3 "=r"(k01) // %4 : "0"(nn), "1"(output0_tm), "2"(output1_tm), "3"(r0), "4"(k01) : "cc", "memory", "v0", "v1", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19"); } for (; i < tiles; i++) { const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2 + i % 12 % 2); const float* k01 = kernel01_tm.row(r); int nn = inch; // inch always > 0 asm volatile( "eor v16.16b, v16.16b, v16.16b \n" "eor v17.16b, v17.16b, v17.16b \n" "0: \n" "prfm pldl1keep, [%3, #128] \n" "ld1 {v0.4s}, [%3], #16 \n" // r0 "prfm pldl1keep, [%4, #512] \n" "ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%4], #64 \n" // w0011_01 "fmla v16.4s, v8.4s, v0.s[0] \n" "fmla v17.4s, v9.4s, v0.s[0] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%4], #64 \n" // w2233_01 "fmla v16.4s, v10.4s, v0.s[1] \n" "fmla v17.4s, v11.4s, v0.s[1] \n" "fmla v16.4s, v12.4s, v0.s[2] \n" "fmla v17.4s, v13.4s, v0.s[2] \n" "subs %w0, %w0, #1 \n" "fmla v16.4s, v14.4s, v0.s[3] \n" "fmla v17.4s, v15.4s, v0.s[3] \n" "bne 0b \n" "st1 {v16.4s}, [%1], #16 \n" "st1 {v17.4s}, [%2], #16 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(output1_tm), // %2 "=r"(r0), // %3 "=r"(k01) // %4 : "0"(nn), "1"(output0_tm), "2"(output1_tm), "3"(r0), "4"(k01) : "cc", "memory", "v0", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17"); } } } #endif // __ARM_NEON && __aarch64__ #pragma omp parallel for num_threads(opt.num_threads) for (int p = remain_outch_start; p < outch; p++) { float* output0_tm = top_blob_tm.channel(p); #if __aarch64__ const Mat kernel0_tm = kernel_tm.channel(p / 2 + p % 2); #else const Mat kernel0_tm = kernel_tm.channel(p); #endif for (int r = 0; r < 36; r++) { const Mat bb2 = bottom_blob_tm2.channel(r); int i = 0; #if __aarch64__ for (; i + 11 < tiles; i += 12) { const float* r0 = bb2.row(i / 12); const float* k0 = kernel0_tm.row(r); int nn = inch; // inch always > 0 asm volatile( "eor v8.16b, v8.16b, v8.16b \n" "eor v9.16b, v9.16b, v9.16b \n" "eor v10.16b, v10.16b, v10.16b \n" "eor v11.16b, v11.16b, v11.16b \n" "eor v12.16b, v12.16b, v12.16b \n" "eor v13.16b, v13.16b, v13.16b \n" "eor v14.16b, v14.16b, v14.16b \n" "eor v15.16b, v15.16b, v15.16b \n" "eor v16.16b, v16.16b, v16.16b \n" "eor v17.16b, v17.16b, v17.16b \n" "eor v18.16b, v18.16b, v18.16b \n" "eor v19.16b, v19.16b, v19.16b \n" "0: \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%2], #64 \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%3], #64 \n" // w0123_0 "fmla v8.4s, v4.4s, v0.s[0] \n" "fmla v9.4s, v4.4s, v0.s[1] \n" "fmla v10.4s, v4.4s, v0.s[2] \n" "fmla v11.4s, v4.4s, v0.s[3] \n" "fmla v12.4s, v4.4s, v1.s[0] \n" "fmla v13.4s, v4.4s, v1.s[1] \n" "fmla v14.4s, v4.4s, v1.s[2] \n" "fmla v15.4s, v4.4s, v1.s[3] \n" "fmla v16.4s, v4.4s, v2.s[0] \n" "fmla v17.4s, v4.4s, v2.s[1] \n" "fmla v18.4s, v4.4s, v2.s[2] \n" "fmla v19.4s, v4.4s, v2.s[3] \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v20.4s, v21.4s, v22.4s, v23.4s}, [%2], #64 \n" "fmla v8.4s, v5.4s, v3.s[0] \n" "fmla v9.4s, v5.4s, v3.s[1] \n" "fmla v10.4s, v5.4s, v3.s[2] \n" "fmla v11.4s, v5.4s, v3.s[3] \n" "fmla v12.4s, v5.4s, v20.s[0] \n" "fmla v13.4s, v5.4s, v20.s[1] \n" "fmla v14.4s, v5.4s, v20.s[2] \n" "fmla v15.4s, v5.4s, v20.s[3] \n" "fmla v16.4s, v5.4s, v21.s[0] \n" "fmla v17.4s, v5.4s, v21.s[1] \n" "fmla v18.4s, v5.4s, v21.s[2] \n" "fmla v19.4s, v5.4s, v21.s[3] \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%2], #64 \n" "fmla v8.4s, v6.4s, v22.s[0] \n" "fmla v9.4s, v6.4s, v22.s[1] \n" "fmla v10.4s, v6.4s, v22.s[2] \n" "fmla v11.4s, v6.4s, v22.s[3] \n" "fmla v12.4s, v6.4s, v23.s[0] \n" "fmla v13.4s, v6.4s, v23.s[1] \n" "fmla v14.4s, v6.4s, v23.s[2] \n" "fmla v15.4s, v6.4s, v23.s[3] \n" "fmla v16.4s, v6.4s, v24.s[0] \n" "fmla v17.4s, v6.4s, v24.s[1] \n" "fmla v18.4s, v6.4s, v24.s[2] \n" "fmla v19.4s, v6.4s, v24.s[3] \n" "subs %w0, %w0, #1 \n" "fmla v8.4s, v7.4s, v25.s[0] \n" "fmla v9.4s, v7.4s, v25.s[1] \n" "fmla v10.4s, v7.4s, v25.s[2] \n" "fmla v11.4s, v7.4s, v25.s[3] \n" "fmla v12.4s, v7.4s, v26.s[0] \n" "fmla v13.4s, v7.4s, v26.s[1] \n" "fmla v14.4s, v7.4s, v26.s[2] \n" "fmla v15.4s, v7.4s, v26.s[3] \n" "fmla v16.4s, v7.4s, v27.s[0] \n" "fmla v17.4s, v7.4s, v27.s[1] \n" "fmla v18.4s, v7.4s, v27.s[2] \n" "fmla v19.4s, v7.4s, v27.s[3] \n" "bne 0b \n" "st1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%1], #64 \n" "st1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%1], #64 \n" "st1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%1], #64 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(r0), // %2 "=r"(k0) // %3 : "0"(nn), "1"(output0_tm), "2"(r0), "3"(k0) : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27"); } #endif for (; i + 7 < tiles; i += 8) { #if __aarch64__ const float* r0 = bb2.row(i / 12 + (i % 12) / 8); #else const float* r0 = bb2.row(i / 8); #endif const float* k0 = kernel0_tm.row(r); int nn = inch; // inch always > 0 #if __aarch64__ asm volatile( "eor v16.16b, v16.16b, v16.16b \n" "eor v17.16b, v17.16b, v17.16b \n" "eor v18.16b, v18.16b, v18.16b \n" "eor v19.16b, v19.16b, v19.16b \n" "eor v20.16b, v20.16b, v20.16b \n" "eor v21.16b, v21.16b, v21.16b \n" "eor v22.16b, v22.16b, v22.16b \n" "eor v23.16b, v23.16b, v23.16b \n" "0: \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%2], #64 \n" // r0 r1 r2 r3 "prfm pldl1keep, [%3, #512] \n" "ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%3], #64 \n" // w0123 "fmla v16.4s, v8.4s, v0.s[0] \n" "fmla v17.4s, v8.4s, v1.s[0] \n" "fmla v18.4s, v8.4s, v2.s[0] \n" "fmla v19.4s, v8.4s, v3.s[0] \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%2], #64 \n" // r4 r5 r6 r7 "fmla v20.4s, v8.4s, v4.s[0] \n" "fmla v21.4s, v8.4s, v5.s[0] \n" "fmla v22.4s, v8.4s, v6.s[0] \n" "fmla v23.4s, v8.4s, v7.s[0] \n" "fmla v16.4s, v9.4s, v0.s[1] \n" "fmla v17.4s, v9.4s, v1.s[1] \n" "fmla v18.4s, v9.4s, v2.s[1] \n" "fmla v19.4s, v9.4s, v3.s[1] \n" "fmla v20.4s, v9.4s, v4.s[1] \n" "fmla v21.4s, v9.4s, v5.s[1] \n" "fmla v22.4s, v9.4s, v6.s[1] \n" "fmla v23.4s, v9.4s, v7.s[1] \n" "fmla v16.4s, v10.4s, v0.s[2] \n" "fmla v17.4s, v10.4s, v1.s[2] \n" "fmla v18.4s, v10.4s, v2.s[2] \n" "fmla v19.4s, v10.4s, v3.s[2] \n" "fmla v20.4s, v10.4s, v4.s[2] \n" "fmla v21.4s, v10.4s, v5.s[2] \n" "fmla v22.4s, v10.4s, v6.s[2] \n" "fmla v23.4s, v10.4s, v7.s[2] \n" "subs %w0, %w0, #1 \n" "fmla v16.4s, v11.4s, v0.s[3] \n" "fmla v17.4s, v11.4s, v1.s[3] \n" "fmla v18.4s, v11.4s, v2.s[3] \n" "fmla v19.4s, v11.4s, v3.s[3] \n" "fmla v20.4s, v11.4s, v4.s[3] \n" "fmla v21.4s, v11.4s, v5.s[3] \n" "fmla v22.4s, v11.4s, v6.s[3] \n" "fmla v23.4s, v11.4s, v7.s[3] \n" "bne 0b \n" "st1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%1], #64 \n" "st1 {v20.4s, v21.4s, v22.4s, v23.4s}, [%1], #64 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(r0), // %2 "=r"(k0) // %3 : "0"(nn), "1"(output0_tm), "2"(r0), "3"(k0) : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23"); #else asm volatile( "veor q8, q8 \n" "veor q9, q9 \n" "veor q10, q10 \n" "veor q11, q11 \n" "veor q12, q12 \n" "veor q13, q13 \n" "veor q14, q14 \n" "veor q15, q15 \n" "0: \n" "pld [%2, #512] \n" "vldm %2!, {d0-d7} \n" "pld [%3, #512] \n" "vldm %3!, {d8-d15} \n" "vmla.f32 q8, q4, d0[0] \n" "vmla.f32 q9, q4, d0[1] \n" "vmla.f32 q10, q4, d1[0] \n" "vmla.f32 q11, q4, d1[1] \n" "vmla.f32 q12, q4, d2[0] \n" "vmla.f32 q13, q4, d2[1] \n" "vmla.f32 q14, q4, d3[0] \n" "vmla.f32 q15, q4, d3[1] \n" "vmla.f32 q8, q5, d4[0] \n" "vmla.f32 q9, q5, d4[1] \n" "vmla.f32 q10, q5, d5[0] \n" "vmla.f32 q11, q5, d5[1] \n" "vmla.f32 q12, q5, d6[0] \n" "vmla.f32 q13, q5, d6[1] \n" "vmla.f32 q14, q5, d7[0] \n" "vmla.f32 q15, q5, d7[1] \n" "pld [%2, #512] \n" "vldm %2!, {d0-d7} \n" "vmla.f32 q8, q6, d0[0] \n" "vmla.f32 q9, q6, d0[1] \n" "vmla.f32 q10, q6, d1[0] \n" "vmla.f32 q11, q6, d1[1] \n" "vmla.f32 q12, q6, d2[0] \n" "vmla.f32 q13, q6, d2[1] \n" "vmla.f32 q14, q6, d3[0] \n" "vmla.f32 q15, q6, d3[1] \n" "subs %0, %0, #1 \n" "vmla.f32 q8, q7, d4[0] \n" "vmla.f32 q9, q7, d4[1] \n" "vmla.f32 q10, q7, d5[0] \n" "vmla.f32 q11, q7, d5[1] \n" "vmla.f32 q12, q7, d6[0] \n" "vmla.f32 q13, q7, d6[1] \n" "vmla.f32 q14, q7, d7[0] \n" "vmla.f32 q15, q7, d7[1] \n" "bne 0b \n" "vstm %1!, {d16-d23} \n" "vstm %1!, {d24-d31} \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(r0), // %2 "=r"(k0) // %3 : "0"(nn), "1"(output0_tm), "2"(r0), "3"(k0) : "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"); #endif } for (; i + 3 < tiles; i += 4) { #if __aarch64__ const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4); #else const float* r0 = bb2.row(i / 8 + (i % 8) / 4); #endif const float* k0 = kernel0_tm.row(r); int nn = inch; // inch always > 0 #if __aarch64__ asm volatile( "eor v16.16b, v16.16b, v16.16b \n" "eor v17.16b, v17.16b, v17.16b \n" "eor v18.16b, v18.16b, v18.16b \n" "eor v19.16b, v19.16b, v19.16b \n" "0: \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%2], #64 \n" // r0 r1 r2 r3 "prfm pldl1keep, [%3, #512] \n" "ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%3], #64 \n" // w0123 "fmla v16.4s, v8.4s, v0.s[0] \n" "fmla v17.4s, v8.4s, v1.s[0] \n" "fmla v18.4s, v8.4s, v2.s[0] \n" "fmla v19.4s, v8.4s, v3.s[0] \n" "fmla v16.4s, v9.4s, v0.s[1] \n" "fmla v17.4s, v9.4s, v1.s[1] \n" "fmla v18.4s, v9.4s, v2.s[1] \n" "fmla v19.4s, v9.4s, v3.s[1] \n" "fmla v16.4s, v10.4s, v0.s[2] \n" "fmla v17.4s, v10.4s, v1.s[2] \n" "fmla v18.4s, v10.4s, v2.s[2] \n" "fmla v19.4s, v10.4s, v3.s[2] \n" "subs %w0, %w0, #1 \n" "fmla v16.4s, v11.4s, v0.s[3] \n" "fmla v17.4s, v11.4s, v1.s[3] \n" "fmla v18.4s, v11.4s, v2.s[3] \n" "fmla v19.4s, v11.4s, v3.s[3] \n" "bne 0b \n" "st1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%1], #64 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(r0), // %2 "=r"(k0) // %3 : "0"(nn), "1"(output0_tm), "2"(r0), "3"(k0) : "cc", "memory", "v0", "v1", "v2", "v3", "v8", "v9", "v10", "v11", "v16", "v17", "v18", "v19"); #else asm volatile( "veor q8, q8 \n" "veor q9, q9 \n" "veor q10, q10 \n" "veor q11, q11 \n" "0: \n" "pld [%2, #512] \n" "vldm %2!, {d0-d7} \n" "pld [%3, #512] \n" "vldm %3!, {d8-d15} \n" "vmla.f32 q8, q4, d0[0] \n" "vmla.f32 q9, q4, d2[0] \n" "vmla.f32 q10, q4, d4[0] \n" "vmla.f32 q11, q4, d6[0] \n" "vmla.f32 q8, q5, d0[1] \n" "vmla.f32 q9, q5, d2[1] \n" "vmla.f32 q10, q5, d4[1] \n" "vmla.f32 q11, q5, d6[1] \n" "vmla.f32 q8, q6, d1[0] \n" "vmla.f32 q9, q6, d3[0] \n" "vmla.f32 q10, q6, d5[0] \n" "vmla.f32 q11, q6, d7[0] \n" "subs %0, %0, #1 \n" "vmla.f32 q8, q7, d1[1] \n" "vmla.f32 q9, q7, d3[1] \n" "vmla.f32 q10, q7, d5[1] \n" "vmla.f32 q11, q7, d7[1] \n" "bne 0b \n" "vstm %1!, {d16-d23} \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(r0), // %2 "=r"(k0) // %3 : "0"(nn), "1"(output0_tm), "2"(r0), "3"(k0) : "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11"); #endif } for (; i + 1 < tiles; i += 2) { #if __aarch64__ const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2); #else const float* r0 = bb2.row(i / 8 + (i % 8) / 4 + (i % 4) / 2); #endif const float* k0 = kernel0_tm.row(r); int nn = inch; // inch always > 0 #if __aarch64__ asm volatile( "eor v16.16b, v16.16b, v16.16b \n" "eor v17.16b, v17.16b, v17.16b \n" "0: \n" "prfm pldl1keep, [%2, #256] \n" "ld1 {v0.4s, v1.4s}, [%2], #32 \n" // r0 r1 "prfm pldl1keep, [%3, #512] \n" "ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%3], #64 \n" // w0123 "fmla v16.4s, v8.4s, v0.s[0] \n" "fmla v17.4s, v8.4s, v1.s[0] \n" "fmla v16.4s, v9.4s, v0.s[1] \n" "fmla v17.4s, v9.4s, v1.s[1] \n" "fmla v16.4s, v10.4s, v0.s[2] \n" "fmla v17.4s, v10.4s, v1.s[2] \n" "subs %w0, %w0, #1 \n" "fmla v16.4s, v11.4s, v0.s[3] \n" "fmla v17.4s, v11.4s, v1.s[3] \n" "bne 0b \n" "st1 {v16.4s, v17.4s}, [%1], #32 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(r0), // %2 "=r"(k0) // %3 : "0"(nn), "1"(output0_tm), "2"(r0), "3"(k0) : "cc", "memory", "v0", "v1", "v8", "v9", "v10", "v11", "v16", "v17"); #else asm volatile( "veor q8, q8 \n" "veor q9, q9 \n" "0: \n" "pld [%2, #256] \n" "vld1.f32 {d0-d3}, [%2 :128]! \n" "pld [%3, #512] \n" "vldm %3!, {d8-d15} \n" "vmla.f32 q8, q4, d0[0] \n" "vmla.f32 q9, q4, d2[0] \n" "vmla.f32 q8, q5, d0[1] \n" "vmla.f32 q9, q5, d2[1] \n" "vmla.f32 q8, q6, d1[0] \n" "vmla.f32 q9, q6, d3[0] \n" "subs %0, %0, #1 \n" "vmla.f32 q8, q7, d1[1] \n" "vmla.f32 q9, q7, d3[1] \n" "bne 0b \n" "vst1.f32 {d16-d19}, [%1 :128]! \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(r0), // %2 "=r"(k0) // %3 : "0"(nn), "1"(output0_tm), "2"(r0), "3"(k0) : "cc", "memory", "q0", "q1", "q4", "q5", "q6", "q7", "q8", "q9"); #endif } for (; i < tiles; i++) { #if __aarch64__ const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2 + i % 12 % 2); #else const float* r0 = bb2.row(i / 8 + (i % 8) / 4 + (i % 4) / 2 + i % 2); #endif const float* k0 = kernel0_tm.row(r); int nn = inch; // inch always > 0 #if __aarch64__ asm volatile( "eor v16.16b, v16.16b, v16.16b \n" "0: \n" "prfm pldl1keep, [%2, #128] \n" "ld1 {v0.4s}, [%2], #16 \n" // r0 "prfm pldl1keep, [%3, #512] \n" "ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%3], #64 \n" // w0123 "fmla v16.4s, v8.4s, v0.s[0] \n" "fmla v16.4s, v9.4s, v0.s[1] \n" "subs %w0, %w0, #1 \n" "fmla v16.4s, v10.4s, v0.s[2] \n" "fmla v16.4s, v11.4s, v0.s[3] \n" "bne 0b \n" "st1 {v16.4s}, [%1], #16 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(r0), // %2 "=r"(k0) // %3 : "0"(nn), "1"(output0_tm), "2"(r0), "3"(k0) : "cc", "memory", "v0", "v8", "v9", "v10", "v11", "v16"); #else asm volatile( "veor q8, q8 \n" "0: \n" "pld [%2, #128] \n" "vld1.f32 {d0-d1}, [%2 :128]! \n" "pld [%3, #512] \n" "vldm %3!, {d8-d15} \n" "vmla.f32 q8, q4, d0[0] \n" "vmla.f32 q8, q5, d0[1] \n" "subs %0, %0, #1 \n" "vmla.f32 q8, q6, d1[0] \n" "vmla.f32 q8, q7, d1[1] \n" "bne 0b \n" "vst1.f32 {d16-d17}, [%1 :128]! \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(r0), // %2 "=r"(k0) // %3 : "0"(nn), "1"(output0_tm), "2"(r0), "3"(k0) : "cc", "memory", "q0", "q4", "q5", "q6", "q7", "q8"); #endif } } } } bottom_blob_tm = Mat(); // END dot // BEGIN transform output Mat top_blob_bordered; if (outw == top_blob.w && outh == top_blob.h) { top_blob_bordered = top_blob; } else { top_blob_bordered.create(outw, outh, outch, elemsize, elempack, opt.workspace_allocator); } { // const float otm[4][6] = { // {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 0.0f}, // {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 1.0f} // }; // 0 = r00 + (r01 + r02) + (r03 + r04) // 1 = (r01 - r02) + (r03 - r04) * 2 // 2 = (r01 + r02) + (r03 + r04) * 4 // 3 = r05 + (r01 - r02) + (r03 - r04) * 8 int w_tm = outw / 4 * 6; int h_tm = outh / 4 * 6; const int tiles = w_tm / 6 * h_tm / 6; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { const Mat out0_tm = top_blob_tm.channel(p); Mat out0 = top_blob_bordered.channel(p); // const float bias0 = bias ? bias[p] : 0.f; float32x4_t _bias0 = bias ? vld1q_f32((const float*)bias + p * 4) : vdupq_n_f32(0.f); float tmp[4][6][4]; // tile for (int i = 0; i < outh / 4; i++) { for (int j = 0; j < outw / 4; j++) { // top_blob_tm.create(tiles, 36, outch, elemsize, elempack); const float* output0_tm_0 = (const float*)out0_tm + (i * w_tm / 6 + j) * 4; const float* output0_tm_1 = output0_tm_0 + tiles * 4; const float* output0_tm_2 = output0_tm_0 + tiles * 8; const float* output0_tm_3 = output0_tm_0 + tiles * 12; const float* output0_tm_4 = output0_tm_0 + tiles * 16; const float* output0_tm_5 = output0_tm_0 + tiles * 20; float* output0 = out0.row(i * 4) + (j * 4) * 4; // TODO neon optimize for (int m = 0; m < 6; m++) { float32x4_t _out0tm0 = vld1q_f32(output0_tm_0); float32x4_t _out0tm1 = vld1q_f32(output0_tm_1); float32x4_t _out0tm2 = vld1q_f32(output0_tm_2); float32x4_t _out0tm3 = vld1q_f32(output0_tm_3); float32x4_t _out0tm4 = vld1q_f32(output0_tm_4); float32x4_t _out0tm5 = vld1q_f32(output0_tm_5); float32x4_t _tmp02a = vaddq_f32(_out0tm1, _out0tm2); float32x4_t _tmp13a = vsubq_f32(_out0tm1, _out0tm2); float32x4_t _tmp02b = vaddq_f32(_out0tm3, _out0tm4); float32x4_t _tmp13b = vsubq_f32(_out0tm3, _out0tm4); float32x4_t _tmp0m = vaddq_f32(vaddq_f32(_out0tm0, _tmp02a), _tmp02b); float32x4_t _tmp1m = vmlaq_n_f32(_tmp13a, _tmp13b, 2.f); float32x4_t _tmp2m = vmlaq_n_f32(_tmp02a, _tmp02b, 4.f); float32x4_t _tmp3m = vmlaq_n_f32(vaddq_f32(_out0tm5, _tmp13a), _tmp13b, 8.f); vst1q_f32(tmp[0][m], _tmp0m); vst1q_f32(tmp[1][m], _tmp1m); vst1q_f32(tmp[2][m], _tmp2m); vst1q_f32(tmp[3][m], _tmp3m); output0_tm_0 += tiles * 24; output0_tm_1 += tiles * 24; output0_tm_2 += tiles * 24; output0_tm_3 += tiles * 24; output0_tm_4 += tiles * 24; output0_tm_5 += tiles * 24; } for (int m = 0; m < 4; m++) { float32x4_t _tmp00 = vld1q_f32(tmp[m][0]); float32x4_t _tmp01 = vld1q_f32(tmp[m][1]); float32x4_t _tmp02 = vld1q_f32(tmp[m][2]); float32x4_t _tmp03 = vld1q_f32(tmp[m][3]); float32x4_t _tmp04 = vld1q_f32(tmp[m][4]); float32x4_t _tmp05 = vld1q_f32(tmp[m][5]); float32x4_t _tmp02a = vaddq_f32(_tmp01, _tmp02); float32x4_t _tmp13a = vsubq_f32(_tmp01, _tmp02); float32x4_t _tmp02b = vaddq_f32(_tmp03, _tmp04); float32x4_t _tmp13b = vsubq_f32(_tmp03, _tmp04); float32x4_t _out00 = vaddq_f32(_bias0, vaddq_f32(vaddq_f32(_tmp00, _tmp02a), _tmp02b)); float32x4_t _out01 = vaddq_f32(_bias0, vmlaq_n_f32(_tmp13a, _tmp13b, 2.f)); float32x4_t _out02 = vaddq_f32(_bias0, vmlaq_n_f32(_tmp02a, _tmp02b, 4.f)); float32x4_t _out03 = vaddq_f32(_bias0, vmlaq_n_f32(vaddq_f32(_tmp05, _tmp13a), _tmp13b, 8.f)); vst1q_f32(output0, _out00); vst1q_f32(output0 + 4, _out01); vst1q_f32(output0 + 8, _out02); vst1q_f32(output0 + 12, _out03); output0 += outw * 4; } } } } } // END transform output // cut result pad copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt); } static void conv3x3s2_pack4_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt) { int w = bottom_blob.w; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const int tailstep = (w - 2 * outw + w) * 4; const float* bias = _bias; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { Mat out0 = top_blob.channel(p); float32x4_t _bias0 = bias ? vld1q_f32((const float*)bias + p * 4) : vdupq_n_f32(0.f); out0.fill(_bias0); for (int q = 0; q < inch; q++) { float* outptr0 = out0.row(0); const Mat img0 = bottom_blob.channel(q); const float* r0 = img0.row(0); const float* r1 = img0.row(1); const float* r2 = img0.row(2); const float* kptr = (const float*)kernel.channel(p).row(q); int i = 0; for (; i < outh; i++) { int j = 0; for (; j + 3 < outw; j += 4) { #if __aarch64__ asm volatile( "prfm pldl1keep, [%0, #512] \n" "ld1 {v20.4s, v21.4s, v22.4s, v23.4s}, [%0] \n" // sum0 sum1 sum2 sum3 "prfm pldl1keep, [%1, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%1], #64 \n" // r00 r01 r02 r03 "prfm pldl1keep, [%1, #512] \n" "ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%1], #64 \n" // r04 r05 r06 r07 "prfm pldl1keep, [%4, #512] \n" "ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%4], #64 \n" "fmla v20.4s, v16.4s, v0.s[0] \n" "fmla v21.4s, v16.4s, v2.s[0] \n" "fmla v22.4s, v16.4s, v4.s[0] \n" "fmla v23.4s, v16.4s, v6.s[0] \n" "fmla v20.4s, v17.4s, v0.s[1] \n" "fmla v21.4s, v17.4s, v2.s[1] \n" "fmla v22.4s, v17.4s, v4.s[1] \n" "fmla v23.4s, v17.4s, v6.s[1] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%4], #64 \n" "fmla v20.4s, v18.4s, v0.s[2] \n" "fmla v21.4s, v18.4s, v2.s[2] \n" "fmla v22.4s, v18.4s, v4.s[2] \n" "fmla v23.4s, v18.4s, v6.s[2] \n" "fmla v20.4s, v19.4s, v0.s[3] \n" "fmla v21.4s, v19.4s, v2.s[3] \n" "fmla v22.4s, v19.4s, v4.s[3] \n" "fmla v23.4s, v19.4s, v6.s[3] \n" "prfm pldl1keep, [%1, #128] \n" "ld1 {v28.4s}, [%1] \n" // r08 "fmla v20.4s, v24.4s, v1.s[0] \n" "fmla v21.4s, v24.4s, v3.s[0] \n" "fmla v22.4s, v24.4s, v5.s[0] \n" "fmla v23.4s, v24.4s, v7.s[0] \n" "fmla v20.4s, v25.4s, v1.s[1] \n" "fmla v21.4s, v25.4s, v3.s[1] \n" "fmla v22.4s, v25.4s, v5.s[1] \n" "fmla v23.4s, v25.4s, v7.s[1] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%4], #64 \n" "fmla v20.4s, v26.4s, v1.s[2] \n" "fmla v21.4s, v26.4s, v3.s[2] \n" "fmla v22.4s, v26.4s, v5.s[2] \n" "fmla v23.4s, v26.4s, v7.s[2] \n" "fmla v20.4s, v27.4s, v1.s[3] \n" "fmla v21.4s, v27.4s, v3.s[3] \n" "fmla v22.4s, v27.4s, v5.s[3] \n" "fmla v23.4s, v27.4s, v7.s[3] \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%2], #64 \n" // r10 r11 r12 r13 "fmla v20.4s, v16.4s, v2.s[0] \n" "fmla v21.4s, v16.4s, v4.s[0] \n" "fmla v22.4s, v16.4s, v6.s[0] \n" "fmla v23.4s, v16.4s, v28.s[0] \n" "fmla v20.4s, v17.4s, v2.s[1] \n" "fmla v21.4s, v17.4s, v4.s[1] \n" "fmla v22.4s, v17.4s, v6.s[1] \n" "fmla v23.4s, v17.4s, v28.s[1] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%4], #64 \n" "fmla v20.4s, v18.4s, v2.s[2] \n" "fmla v21.4s, v18.4s, v4.s[2] \n" "fmla v22.4s, v18.4s, v6.s[2] \n" "fmla v23.4s, v18.4s, v28.s[2] \n" "fmla v20.4s, v19.4s, v2.s[3] \n" "fmla v21.4s, v19.4s, v4.s[3] \n" "fmla v22.4s, v19.4s, v6.s[3] \n" "fmla v23.4s, v19.4s, v28.s[3] \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%2], #64 \n" // r14 r15 r16 r17 "fmla v20.4s, v24.4s, v8.s[0] \n" "fmla v21.4s, v24.4s, v10.s[0] \n" "fmla v22.4s, v24.4s, v12.s[0] \n" "fmla v23.4s, v24.4s, v14.s[0] \n" "fmla v20.4s, v25.4s, v8.s[1] \n" "fmla v21.4s, v25.4s, v10.s[1] \n" "fmla v22.4s, v25.4s, v12.s[1] \n" "fmla v23.4s, v25.4s, v14.s[1] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%4], #64 \n" "fmla v20.4s, v26.4s, v8.s[2] \n" "fmla v21.4s, v26.4s, v10.s[2] \n" "fmla v22.4s, v26.4s, v12.s[2] \n" "fmla v23.4s, v26.4s, v14.s[2] \n" "fmla v20.4s, v27.4s, v8.s[3] \n" "fmla v21.4s, v27.4s, v10.s[3] \n" "fmla v22.4s, v27.4s, v12.s[3] \n" "fmla v23.4s, v27.4s, v14.s[3] \n" "prfm pldl1keep, [%2, #128] \n" "ld1 {v28.4s}, [%2] \n" // r18 "fmla v20.4s, v16.4s, v9.s[0] \n" "fmla v21.4s, v16.4s, v11.s[0] \n" "fmla v22.4s, v16.4s, v13.s[0] \n" "fmla v23.4s, v16.4s, v15.s[0] \n" "fmla v20.4s, v17.4s, v9.s[1] \n" "fmla v21.4s, v17.4s, v11.s[1] \n" "fmla v22.4s, v17.4s, v13.s[1] \n" "fmla v23.4s, v17.4s, v15.s[1] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%4], #64 \n" "fmla v20.4s, v18.4s, v9.s[2] \n" "fmla v21.4s, v18.4s, v11.s[2] \n" "fmla v22.4s, v18.4s, v13.s[2] \n" "fmla v23.4s, v18.4s, v15.s[2] \n" "fmla v20.4s, v19.4s, v9.s[3] \n" "fmla v21.4s, v19.4s, v11.s[3] \n" "fmla v22.4s, v19.4s, v13.s[3] \n" "fmla v23.4s, v19.4s, v15.s[3] \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%3], #64 \n" // r20 r21 r22 r23 "fmla v20.4s, v24.4s, v10.s[0] \n" "fmla v21.4s, v24.4s, v12.s[0] \n" "fmla v22.4s, v24.4s, v14.s[0] \n" "fmla v23.4s, v24.4s, v28.s[0] \n" "fmla v20.4s, v25.4s, v10.s[1] \n" "fmla v21.4s, v25.4s, v12.s[1] \n" "fmla v22.4s, v25.4s, v14.s[1] \n" "fmla v23.4s, v25.4s, v28.s[1] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%4], #64 \n" "fmla v20.4s, v26.4s, v10.s[2] \n" "fmla v21.4s, v26.4s, v12.s[2] \n" "fmla v22.4s, v26.4s, v14.s[2] \n" "fmla v23.4s, v26.4s, v28.s[2] \n" "fmla v20.4s, v27.4s, v10.s[3] \n" "fmla v21.4s, v27.4s, v12.s[3] \n" "fmla v22.4s, v27.4s, v14.s[3] \n" "fmla v23.4s, v27.4s, v28.s[3] \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%3], #64 \n" // r24 r25 r26 r27 "fmla v20.4s, v16.4s, v0.s[0] \n" "fmla v21.4s, v16.4s, v2.s[0] \n" "fmla v22.4s, v16.4s, v4.s[0] \n" "fmla v23.4s, v16.4s, v6.s[0] \n" "fmla v20.4s, v17.4s, v0.s[1] \n" "fmla v21.4s, v17.4s, v2.s[1] \n" "fmla v22.4s, v17.4s, v4.s[1] \n" "fmla v23.4s, v17.4s, v6.s[1] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%4], #64 \n" "fmla v20.4s, v18.4s, v0.s[2] \n" "fmla v21.4s, v18.4s, v2.s[2] \n" "fmla v22.4s, v18.4s, v4.s[2] \n" "fmla v23.4s, v18.4s, v6.s[2] \n" "fmla v20.4s, v19.4s, v0.s[3] \n" "fmla v21.4s, v19.4s, v2.s[3] \n" "fmla v22.4s, v19.4s, v4.s[3] \n" "fmla v23.4s, v19.4s, v6.s[3] \n" "prfm pldl1keep, [%3, #128] \n" "ld1 {v28.4s}, [%3] \n" // r28 "fmla v20.4s, v24.4s, v1.s[0] \n" "fmla v21.4s, v24.4s, v3.s[0] \n" "fmla v22.4s, v24.4s, v5.s[0] \n" "fmla v23.4s, v24.4s, v7.s[0] \n" "fmla v20.4s, v25.4s, v1.s[1] \n" "fmla v21.4s, v25.4s, v3.s[1] \n" "fmla v22.4s, v25.4s, v5.s[1] \n" "fmla v23.4s, v25.4s, v7.s[1] \n" // "prfm pldl1keep, [%4, #512] \n" "ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%4] \n" "fmla v20.4s, v26.4s, v1.s[2] \n" "fmla v21.4s, v26.4s, v3.s[2] \n" "fmla v22.4s, v26.4s, v5.s[2] \n" "fmla v23.4s, v26.4s, v7.s[2] \n" "fmla v20.4s, v27.4s, v1.s[3] \n" "fmla v21.4s, v27.4s, v3.s[3] \n" "fmla v22.4s, v27.4s, v5.s[3] \n" "fmla v23.4s, v27.4s, v7.s[3] \n" "fmla v20.4s, v16.4s, v2.s[0] \n" "fmla v21.4s, v16.4s, v4.s[0] \n" "fmla v22.4s, v16.4s, v6.s[0] \n" "fmla v23.4s, v16.4s, v28.s[0] \n" "fmla v20.4s, v17.4s, v2.s[1] \n" "fmla v21.4s, v17.4s, v4.s[1] \n" "fmla v22.4s, v17.4s, v6.s[1] \n" "fmla v23.4s, v17.4s, v28.s[1] \n" "fmla v20.4s, v18.4s, v2.s[2] \n" "fmla v21.4s, v18.4s, v4.s[2] \n" "fmla v22.4s, v18.4s, v6.s[2] \n" "fmla v23.4s, v18.4s, v28.s[2] \n" "fmla v20.4s, v19.4s, v2.s[3] \n" "fmla v21.4s, v19.4s, v4.s[3] \n" "fmla v22.4s, v19.4s, v6.s[3] \n" "fmla v23.4s, v19.4s, v28.s[3] \n" "sub %4, %4, #512 \n" // kptr -= 8 * 16; "st1 {v20.4s, v21.4s, v22.4s, v23.4s}, [%0], #64 \n" : "=r"(outptr0), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2), // %3 "=r"(kptr) // %4 : "0"(outptr0), "1"(r0), "2"(r1), "3"(r2), "4"(kptr) : "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28"); #else // __aarch64__ asm volatile( "pld [%0, #512] \n" "vldm %0, {d24-d31} \n" // sum0 sum1 sum2 sum3 "pld [%1, #512] \n" "vldm %1!, {d0-d7} \n" // r00 r01 r02 r03 "pld [%1, #512] \n" "vldm %1!, {d8-d15} \n" // r04 r05 r06 r07 "pld [%4, #512] \n" "vldm %4!, {d16-d23} \n" "vmla.f32 q12, q8, d0[0] \n" "vmla.f32 q13, q8, d4[0] \n" "vmla.f32 q14, q8, d8[0] \n" "vmla.f32 q15, q8, d12[0] \n" "vmla.f32 q12, q9, d0[1] \n" "vmla.f32 q13, q9, d4[1] \n" "vmla.f32 q14, q9, d8[1] \n" "vmla.f32 q15, q9, d12[1] \n" "vmla.f32 q12, q10, d1[0] \n" "vmla.f32 q13, q10, d5[0] \n" "vmla.f32 q14, q10, d9[0] \n" "vmla.f32 q15, q10, d13[0] \n" "vmla.f32 q12, q11, d1[1] \n" "vmla.f32 q13, q11, d5[1] \n" "vmla.f32 q14, q11, d9[1] \n" "vmla.f32 q15, q11, d13[1] \n" "pld [%4, #512] \n" "vldm %4!, {d16-d23} \n" "pld [%1, #128] \n" "vld1.f32 {d0-d1}, [%1 :128] \n" // r08 "vmla.f32 q12, q8, d2[0] \n" "vmla.f32 q13, q8, d6[0] \n" "vmla.f32 q14, q8, d10[0] \n" "vmla.f32 q15, q8, d14[0] \n" "vmla.f32 q12, q9, d2[1] \n" "vmla.f32 q13, q9, d6[1] \n" "vmla.f32 q14, q9, d10[1] \n" "vmla.f32 q15, q9, d14[1] \n" "vmla.f32 q12, q10, d3[0] \n" "vmla.f32 q13, q10, d7[0] \n" "vmla.f32 q14, q10, d11[0] \n" "vmla.f32 q15, q10, d15[0] \n" "vmla.f32 q12, q11, d3[1] \n" "vmla.f32 q13, q11, d7[1] \n" "vmla.f32 q14, q11, d11[1] \n" "vmla.f32 q15, q11, d15[1] \n" "pld [%4, #512] \n" "vldm %4!, {d16-d23} \n" "vmla.f32 q12, q8, d4[0] \n" "vmla.f32 q13, q8, d8[0] \n" "vmla.f32 q14, q8, d12[0] \n" "vmla.f32 q15, q8, d0[0] \n" "vmla.f32 q12, q9, d4[1] \n" "vmla.f32 q13, q9, d8[1] \n" "vmla.f32 q14, q9, d12[1] \n" "vmla.f32 q15, q9, d0[1] \n" "vmla.f32 q12, q10, d5[0] \n" "vmla.f32 q13, q10, d9[0] \n" "vmla.f32 q14, q10, d13[0] \n" "vmla.f32 q15, q10, d1[0] \n" "vmla.f32 q12, q11, d5[1] \n" "vmla.f32 q13, q11, d9[1] \n" "vmla.f32 q14, q11, d13[1] \n" "vmla.f32 q15, q11, d1[1] \n" "pld [%2, #512] \n" "vldm %2!, {d8-d15} \n" // r10 r11 r12 r13 "pld [%2, #512] \n" "vldm %2!, {d0-d7} \n" // r14 r15 r16 r17 "pld [%4, #512] \n" "vldm %4!, {d16-d23} \n" "vmla.f32 q12, q8, d8[0] \n" "vmla.f32 q13, q8, d12[0] \n" "vmla.f32 q14, q8, d0[0] \n" "vmla.f32 q15, q8, d4[0] \n" "vmla.f32 q12, q9, d8[1] \n" "vmla.f32 q13, q9, d12[1] \n" "vmla.f32 q14, q9, d0[1] \n" "vmla.f32 q15, q9, d4[1] \n" "vmla.f32 q12, q10, d9[0] \n" "vmla.f32 q13, q10, d13[0] \n" "vmla.f32 q14, q10, d1[0] \n" "vmla.f32 q15, q10, d5[0] \n" "vmla.f32 q12, q11, d9[1] \n" "vmla.f32 q13, q11, d13[1] \n" "vmla.f32 q14, q11, d1[1] \n" "vmla.f32 q15, q11, d5[1] \n" "pld [%4, #512] \n" "vldm %4!, {d16-d23} \n" "pld [%2, #128] \n" "vld1.f32 {d8-d9}, [%2 :128] \n" // r18 "vmla.f32 q12, q8, d10[0] \n" "vmla.f32 q13, q8, d14[0] \n" "vmla.f32 q14, q8, d2[0] \n" "vmla.f32 q15, q8, d6[0] \n" "vmla.f32 q12, q9, d10[1] \n" "vmla.f32 q13, q9, d14[1] \n" "vmla.f32 q14, q9, d2[1] \n" "vmla.f32 q15, q9, d6[1] \n" "vmla.f32 q12, q10, d11[0] \n" "vmla.f32 q13, q10, d15[0] \n" "vmla.f32 q14, q10, d3[0] \n" "vmla.f32 q15, q10, d7[0] \n" "vmla.f32 q12, q11, d11[1] \n" "vmla.f32 q13, q11, d15[1] \n" "vmla.f32 q14, q11, d3[1] \n" "vmla.f32 q15, q11, d7[1] \n" "pld [%4, #512] \n" "vldm %4!, {d16-d23} \n" "vmla.f32 q12, q8, d12[0] \n" "vmla.f32 q13, q8, d0[0] \n" "vmla.f32 q14, q8, d4[0] \n" "vmla.f32 q15, q8, d8[0] \n" "vmla.f32 q12, q9, d12[1] \n" "vmla.f32 q13, q9, d0[1] \n" "vmla.f32 q14, q9, d4[1] \n" "vmla.f32 q15, q9, d8[1] \n" "vmla.f32 q12, q10, d13[0] \n" "vmla.f32 q13, q10, d1[0] \n" "vmla.f32 q14, q10, d5[0] \n" "vmla.f32 q15, q10, d9[0] \n" "vmla.f32 q12, q11, d13[1] \n" "vmla.f32 q13, q11, d1[1] \n" "vmla.f32 q14, q11, d5[1] \n" "vmla.f32 q15, q11, d9[1] \n" "pld [%3, #512] \n" "vldm %3!, {d0-d7} \n" // r20 r21 r22 r23 "pld [%3, #512] \n" "vldm %3!, {d8-d15} \n" // r24 r25 r26 r27 "pld [%4, #512] \n" "vldm %4!, {d16-d23} \n" "vmla.f32 q12, q8, d0[0] \n" "vmla.f32 q13, q8, d4[0] \n" "vmla.f32 q14, q8, d8[0] \n" "vmla.f32 q15, q8, d12[0] \n" "vmla.f32 q12, q9, d0[1] \n" "vmla.f32 q13, q9, d4[1] \n" "vmla.f32 q14, q9, d8[1] \n" "vmla.f32 q15, q9, d12[1] \n" "vmla.f32 q12, q10, d1[0] \n" "vmla.f32 q13, q10, d5[0] \n" "vmla.f32 q14, q10, d9[0] \n" "vmla.f32 q15, q10, d13[0] \n" "vmla.f32 q12, q11, d1[1] \n" "vmla.f32 q13, q11, d5[1] \n" "vmla.f32 q14, q11, d9[1] \n" "vmla.f32 q15, q11, d13[1] \n" "pld [%4, #512] \n" "vldm %4!, {d16-d23} \n" "pld [%3, #128] \n" "vld1.f32 {d0-d1}, [%3 :128] \n" // r28 "vmla.f32 q12, q8, d2[0] \n" "vmla.f32 q13, q8, d6[0] \n" "vmla.f32 q14, q8, d10[0] \n" "vmla.f32 q15, q8, d14[0] \n" "vmla.f32 q12, q9, d2[1] \n" "vmla.f32 q13, q9, d6[1] \n" "vmla.f32 q14, q9, d10[1] \n" "vmla.f32 q15, q9, d14[1] \n" "vmla.f32 q12, q10, d3[0] \n" "vmla.f32 q13, q10, d7[0] \n" "vmla.f32 q14, q10, d11[0] \n" "vmla.f32 q15, q10, d15[0] \n" "vmla.f32 q12, q11, d3[1] \n" "vmla.f32 q13, q11, d7[1] \n" "vmla.f32 q14, q11, d11[1] \n" "vmla.f32 q15, q11, d15[1] \n" // "pld [%4, #512] \n" "vldm %4, {d16-d23} \n" "vmla.f32 q12, q8, d4[0] \n" "vmla.f32 q13, q8, d8[0] \n" "vmla.f32 q14, q8, d12[0] \n" "vmla.f32 q15, q8, d0[0] \n" "vmla.f32 q12, q9, d4[1] \n" "vmla.f32 q13, q9, d8[1] \n" "vmla.f32 q14, q9, d12[1] \n" "vmla.f32 q15, q9, d0[1] \n" "vmla.f32 q12, q10, d5[0] \n" "vmla.f32 q13, q10, d9[0] \n" "vmla.f32 q14, q10, d13[0] \n" "vmla.f32 q15, q10, d1[0] \n" "vmla.f32 q12, q11, d5[1] \n" "vmla.f32 q13, q11, d9[1] \n" "vmla.f32 q14, q11, d13[1] \n" "vmla.f32 q15, q11, d1[1] \n" "sub %4, %4, #512 \n" // kptr -= 8 * 16; "vstm %0!, {d24-d31} \n" : "=r"(outptr0), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2), // %3 "=r"(kptr) // %4 : "0"(outptr0), "1"(r0), "2"(r1), "3"(r2), "4"(kptr) : "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"); #endif // __aarch64__ } for (; j + 1 < outw; j += 2) { #if __aarch64__ asm volatile( "prfm pldl1keep, [%0, #256] \n" "ld1 {v20.4s, v21.4s}, [%0] \n" // sum0 sum1 "prfm pldl1keep, [%1, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%1], #64 \n" // r00 r01 r02 r03 "prfm pldl1keep, [%4, #512] \n" "ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%4], #64 \n" "fmul v22.4s, v16.4s, v0.s[0] \n" "fmul v23.4s, v16.4s, v2.s[0] \n" "fmla v20.4s, v17.4s, v0.s[1] \n" "fmla v21.4s, v17.4s, v2.s[1] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%4], #64 \n" "fmla v22.4s, v18.4s, v0.s[2] \n" "fmla v23.4s, v18.4s, v2.s[2] \n" "fmla v20.4s, v19.4s, v0.s[3] \n" "fmla v21.4s, v19.4s, v2.s[3] \n" "prfm pldl1keep, [%1, #128] \n" "ld1 {v4.4s}, [%1] \n" // r04 "fmla v22.4s, v24.4s, v1.s[0] \n" "fmla v23.4s, v24.4s, v3.s[0] \n" "fmla v20.4s, v25.4s, v1.s[1] \n" "fmla v21.4s, v25.4s, v3.s[1] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%4], #64 \n" "fmla v22.4s, v26.4s, v1.s[2] \n" "fmla v23.4s, v26.4s, v3.s[2] \n" "fmla v20.4s, v27.4s, v1.s[3] \n" "fmla v21.4s, v27.4s, v3.s[3] \n" "fmla v22.4s, v16.4s, v2.s[0] \n" "fmla v23.4s, v16.4s, v4.s[0] \n" "fmla v20.4s, v17.4s, v2.s[1] \n" "fmla v21.4s, v17.4s, v4.s[1] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%4], #64 \n" "fmla v22.4s, v18.4s, v2.s[2] \n" "fmla v23.4s, v18.4s, v4.s[2] \n" "fmla v20.4s, v19.4s, v2.s[3] \n" "fmla v21.4s, v19.4s, v4.s[3] \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%2], #64 \n" // r10 r11 r12 r13 "fmla v22.4s, v24.4s, v0.s[0] \n" "fmla v23.4s, v24.4s, v2.s[0] \n" "fmla v20.4s, v25.4s, v0.s[1] \n" "fmla v21.4s, v25.4s, v2.s[1] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%4], #64 \n" "fmla v22.4s, v26.4s, v0.s[2] \n" "fmla v23.4s, v26.4s, v2.s[2] \n" "fmla v20.4s, v27.4s, v0.s[3] \n" "fmla v21.4s, v27.4s, v2.s[3] \n" "prfm pldl1keep, [%2, #128] \n" "ld1 {v4.4s}, [%2] \n" // r14 "fmla v22.4s, v16.4s, v1.s[0] \n" "fmla v23.4s, v16.4s, v3.s[0] \n" "fmla v20.4s, v17.4s, v1.s[1] \n" "fmla v21.4s, v17.4s, v3.s[1] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%4], #64 \n" "fmla v22.4s, v18.4s, v1.s[2] \n" "fmla v23.4s, v18.4s, v3.s[2] \n" "fmla v20.4s, v19.4s, v1.s[3] \n" "fmla v21.4s, v19.4s, v3.s[3] \n" "fmla v22.4s, v24.4s, v2.s[0] \n" "fmla v23.4s, v24.4s, v4.s[0] \n" "fmla v20.4s, v25.4s, v2.s[1] \n" "fmla v21.4s, v25.4s, v4.s[1] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%4], #64 \n" "fmla v22.4s, v26.4s, v2.s[2] \n" "fmla v23.4s, v26.4s, v4.s[2] \n" "fmla v20.4s, v27.4s, v2.s[3] \n" "fmla v21.4s, v27.4s, v4.s[3] \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%3], #64 \n" // r20 r21 r22 r23 "fmla v22.4s, v16.4s, v0.s[0] \n" "fmla v23.4s, v16.4s, v2.s[0] \n" "fmla v20.4s, v17.4s, v0.s[1] \n" "fmla v21.4s, v17.4s, v2.s[1] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%4], #64 \n" "fmla v22.4s, v18.4s, v0.s[2] \n" "fmla v23.4s, v18.4s, v2.s[2] \n" "fmla v20.4s, v19.4s, v0.s[3] \n" "fmla v21.4s, v19.4s, v2.s[3] \n" "prfm pldl1keep, [%3, #128] \n" "ld1 {v4.4s}, [%3] \n" // r24 "fmla v22.4s, v24.4s, v1.s[0] \n" "fmla v23.4s, v24.4s, v3.s[0] \n" "fmla v20.4s, v25.4s, v1.s[1] \n" "fmla v21.4s, v25.4s, v3.s[1] \n" // "prfm pldl1keep, [%4, #512] \n" "ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%4] \n" "fmla v22.4s, v26.4s, v1.s[2] \n" "fmla v23.4s, v26.4s, v3.s[2] \n" "fmla v20.4s, v27.4s, v1.s[3] \n" "fmla v21.4s, v27.4s, v3.s[3] \n" "fmla v22.4s, v16.4s, v2.s[0] \n" "fmla v23.4s, v16.4s, v4.s[0] \n" "fmla v20.4s, v17.4s, v2.s[1] \n" "fmla v21.4s, v17.4s, v4.s[1] \n" "fmla v22.4s, v18.4s, v2.s[2] \n" "fmla v23.4s, v18.4s, v4.s[2] \n" "fmla v20.4s, v19.4s, v2.s[3] \n" "fmla v21.4s, v19.4s, v4.s[3] \n" "fadd v20.4s, v20.4s, v22.4s \n" "fadd v21.4s, v21.4s, v23.4s \n" "sub %4, %4, #512 \n" // kptr -= 8 * 16; "st1 {v20.4s, v21.4s}, [%0], #32 \n" : "=r"(outptr0), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2), // %3 "=r"(kptr) // %4 : "0"(outptr0), "1"(r0), "2"(r1), "3"(r2), "4"(kptr) : "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27"); #else // __aarch64__ asm volatile( "pld [%0, #256] \n" "vld1.f32 {d24-d27}, [%0 :128] \n" // sum0 sum1 "pld [%1, #512] \n" "vldm %1!, {d0-d7} \n" // r00 r01 r02 r03 "pld [%4, #512] \n" "vldm %4!, {d16-d23} \n" "vmul.f32 q14, q8, d0[0] \n" "vmul.f32 q15, q8, d4[0] \n" "vmla.f32 q12, q9, d0[1] \n" "vmla.f32 q13, q9, d4[1] \n" "vmla.f32 q14, q10, d1[0] \n" "vmla.f32 q15, q10, d5[0] \n" "vmla.f32 q12, q11, d1[1] \n" "vmla.f32 q13, q11, d5[1] \n" "pld [%4, #512] \n" "vldm %4!, {d16-d23} \n" "pld [%1, #128] \n" "vld1.f32 {d8-d9}, [%1 :128] \n" // r04 "vmla.f32 q14, q8, d2[0] \n" "vmla.f32 q15, q8, d6[0] \n" "vmla.f32 q12, q9, d2[1] \n" "vmla.f32 q13, q9, d6[1] \n" "vmla.f32 q14, q10, d3[0] \n" "vmla.f32 q15, q10, d7[0] \n" "vmla.f32 q12, q11, d3[1] \n" "vmla.f32 q13, q11, d7[1] \n" "pld [%4, #512] \n" "vldm %4!, {d16-d23} \n" "vmla.f32 q14, q8, d4[0] \n" "vmla.f32 q15, q8, d8[0] \n" "vmla.f32 q12, q9, d4[1] \n" "vmla.f32 q13, q9, d8[1] \n" "vmla.f32 q14, q10, d5[0] \n" "vmla.f32 q15, q10, d9[0] \n" "vmla.f32 q12, q11, d5[1] \n" "vmla.f32 q13, q11, d9[1] \n" "pld [%2, #512] \n" "vldm %2!, {d0-d7} \n" // r10 r11 r12 r13 "pld [%4, #512] \n" "vldm %4!, {d16-d23} \n" "vmla.f32 q14, q8, d0[0] \n" "vmla.f32 q15, q8, d4[0] \n" "vmla.f32 q12, q9, d0[1] \n" "vmla.f32 q13, q9, d4[1] \n" "vmla.f32 q14, q10, d1[0] \n" "vmla.f32 q15, q10, d5[0] \n" "vmla.f32 q12, q11, d1[1] \n" "vmla.f32 q13, q11, d5[1] \n" "pld [%4, #512] \n" "vldm %4!, {d16-d23} \n" "pld [%2, #128] \n" "vld1.f32 {d8-d9}, [%2 :128] \n" // r14 "vmla.f32 q14, q8, d2[0] \n" "vmla.f32 q15, q8, d6[0] \n" "vmla.f32 q12, q9, d2[1] \n" "vmla.f32 q13, q9, d6[1] \n" "vmla.f32 q14, q10, d3[0] \n" "vmla.f32 q15, q10, d7[0] \n" "vmla.f32 q12, q11, d3[1] \n" "vmla.f32 q13, q11, d7[1] \n" "pld [%4, #512] \n" "vldm %4!, {d16-d23} \n" "vmla.f32 q14, q8, d4[0] \n" "vmla.f32 q15, q8, d8[0] \n" "vmla.f32 q12, q9, d4[1] \n" "vmla.f32 q13, q9, d8[1] \n" "vmla.f32 q14, q10, d5[0] \n" "vmla.f32 q15, q10, d9[0] \n" "vmla.f32 q12, q11, d5[1] \n" "vmla.f32 q13, q11, d9[1] \n" "pld [%3, #512] \n" "vldm %3!, {d0-d7} \n" // r20 r21 r22 r23 "pld [%4, #512] \n" "vldm %4!, {d16-d23} \n" "vmla.f32 q14, q8, d0[0] \n" "vmla.f32 q15, q8, d4[0] \n" "vmla.f32 q12, q9, d0[1] \n" "vmla.f32 q13, q9, d4[1] \n" "vmla.f32 q14, q10, d1[0] \n" "vmla.f32 q15, q10, d5[0] \n" "vmla.f32 q12, q11, d1[1] \n" "vmla.f32 q13, q11, d5[1] \n" "pld [%4, #512] \n" "vldm %4!, {d16-d23} \n" "pld [%3, #128] \n" "vld1.f32 {d8-d9}, [%3 :128] \n" // r24 "vmla.f32 q14, q8, d2[0] \n" "vmla.f32 q15, q8, d6[0] \n" "vmla.f32 q12, q9, d2[1] \n" "vmla.f32 q13, q9, d6[1] \n" "vmla.f32 q14, q10, d3[0] \n" "vmla.f32 q15, q10, d7[0] \n" "vmla.f32 q12, q11, d3[1] \n" "vmla.f32 q13, q11, d7[1] \n" // "pld [%4, #512] \n" "vldm %4, {d16-d23} \n" "vmla.f32 q14, q8, d4[0] \n" "vmla.f32 q15, q8, d8[0] \n" "vmla.f32 q12, q9, d4[1] \n" "vmla.f32 q13, q9, d8[1] \n" "vmla.f32 q14, q10, d5[0] \n" "vmla.f32 q15, q10, d9[0] \n" "vmla.f32 q12, q11, d5[1] \n" "vmla.f32 q13, q11, d9[1] \n" "vadd.f32 q12, q12, q14 \n" "vadd.f32 q13, q13, q15 \n" "sub %4, %4, #512 \n" // kptr -= 8 * 16; "vst1.f32 {d24-d27}, [%0 :128]! \n" : "=r"(outptr0), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2), // %3 "=r"(kptr) // %4 : "0"(outptr0), "1"(r0), "2"(r1), "3"(r2), "4"(kptr) : "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"); #endif // __aarch64__ } for (; j < outw; j++) { #if __aarch64__ asm volatile( "prfm pldl1keep, [%0, #128] \n" "ld1 {v20.4s}, [%0] \n" // sum0 "prfm pldl1keep, [%1, #384] \n" "ld1 {v0.4s, v1.4s, v2.4s}, [%1] \n" // r00 r01 r02 "prfm pldl1keep, [%4, #512] \n" "ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%4], #64 \n" "fmul v21.4s, v16.4s, v0.s[0] \n" "fmul v22.4s, v17.4s, v0.s[1] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%4], #64 \n" "fmul v23.4s, v18.4s, v0.s[2] \n" "fmla v20.4s, v19.4s, v0.s[3] \n" "fmla v21.4s, v24.4s, v1.s[0] \n" "fmla v22.4s, v25.4s, v1.s[1] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%4], #64 \n" "fmla v23.4s, v26.4s, v1.s[2] \n" "fmla v20.4s, v27.4s, v1.s[3] \n" "prfm pldl1keep, [%2, #384] \n" "ld1 {v3.4s, v4.4s, v5.4s}, [%2] \n" // r10 r11 r12 "fmla v21.4s, v16.4s, v2.s[0] \n" "fmla v22.4s, v17.4s, v2.s[1] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%4], #64 \n" "fmla v23.4s, v18.4s, v2.s[2] \n" "fmla v20.4s, v19.4s, v2.s[3] \n" "fmla v21.4s, v24.4s, v3.s[0] \n" "fmla v22.4s, v25.4s, v3.s[1] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%4], #64 \n" "fmla v23.4s, v26.4s, v3.s[2] \n" "fmla v20.4s, v27.4s, v3.s[3] \n" "fmla v21.4s, v16.4s, v4.s[0] \n" "fmla v22.4s, v17.4s, v4.s[1] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%4], #64 \n" "fmla v23.4s, v18.4s, v4.s[2] \n" "fmla v20.4s, v19.4s, v4.s[3] \n" "prfm pldl1keep, [%3, #384] \n" "ld1 {v0.4s, v1.4s, v2.4s}, [%3] \n" // r20 r21 r22 "fmla v21.4s, v24.4s, v5.s[0] \n" "fmla v22.4s, v25.4s, v5.s[1] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%4], #64 \n" "fmla v23.4s, v26.4s, v5.s[2] \n" "fmla v20.4s, v27.4s, v5.s[3] \n" "fmla v21.4s, v16.4s, v0.s[0] \n" "fmla v22.4s, v17.4s, v0.s[1] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%4], #64 \n" "fmla v23.4s, v18.4s, v0.s[2] \n" "fmla v20.4s, v19.4s, v0.s[3] \n" "fmla v21.4s, v24.4s, v1.s[0] \n" "fmla v22.4s, v25.4s, v1.s[1] \n" // "prfm pldl1keep, [%4, #512] \n" "ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%4] \n" "fmla v23.4s, v26.4s, v1.s[2] \n" "fmla v20.4s, v27.4s, v1.s[3] \n" "fmla v21.4s, v16.4s, v2.s[0] \n" "fmla v22.4s, v17.4s, v2.s[1] \n" "fmla v23.4s, v18.4s, v2.s[2] \n" "fmla v20.4s, v19.4s, v2.s[3] \n" "add %1, %1, #32 \n" "fadd v22.4s, v21.4s, v22.4s \n" "add %2, %2, #32 \n" "fadd v23.4s, v23.4s, v22.4s \n" "add %3, %3, #32 \n" "fadd v20.4s, v20.4s, v23.4s \n" "sub %4, %4, #512 \n" // kptr -= 8 * 16; "st1 {v20.4s}, [%0], #16 \n" : "=r"(outptr0), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2), // %3 "=r"(kptr) // %4 : "0"(outptr0), "1"(r0), "2"(r1), "3"(r2), "4"(kptr) : "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27"); #else // __aarch64__ asm volatile( "pld [%0, #128] \n" "vld1.f32 {d24-d25}, [%0 :128] \n" // sum0 "pld [%1, #384] \n" "vldm %1, {d0-d5} \n" // r00 r01 r02 "pld [%4, #512] \n" "vldm %4!, {d16-d23} \n" "vmul.f32 q13, q8, d0[0] \n" "vmul.f32 q14, q9, d0[1] \n" "vmul.f32 q15, q10, d1[0] \n" "vmla.f32 q12, q11, d1[1] \n" "pld [%4, #512] \n" "vldm %4!, {d16-d23} \n" "vmla.f32 q13, q8, d2[0] \n" "vmla.f32 q14, q9, d2[1] \n" "vmla.f32 q15, q10, d3[0] \n" "vmla.f32 q12, q11, d3[1] \n" "pld [%4, #512] \n" "vldm %4!, {d16-d23} \n" "vmla.f32 q13, q8, d4[0] \n" "vmla.f32 q14, q9, d4[1] \n" "vmla.f32 q15, q10, d5[0] \n" "vmla.f32 q12, q11, d5[1] \n" "pld [%2, #384] \n" "vldm %2, {d0-d5} \n" // r10 r11 r12 "pld [%4, #512] \n" "vldm %4!, {d16-d23} \n" "vmla.f32 q13, q8, d0[0] \n" "vmla.f32 q14, q9, d0[1] \n" "vmla.f32 q15, q10, d1[0] \n" "vmla.f32 q12, q11, d1[1] \n" "pld [%4, #512] \n" "vldm %4!, {d16-d23} \n" "vmla.f32 q13, q8, d2[0] \n" "vmla.f32 q14, q9, d2[1] \n" "vmla.f32 q15, q10, d3[0] \n" "vmla.f32 q12, q11, d3[1] \n" "pld [%4, #512] \n" "vldm %4!, {d16-d23} \n" "vmla.f32 q13, q8, d4[0] \n" "vmla.f32 q14, q9, d4[1] \n" "vmla.f32 q15, q10, d5[0] \n" "vmla.f32 q12, q11, d5[1] \n" "pld [%3, #384] \n" "vldm %3, {d0-d5} \n" // r20 r21 r22 "pld [%4, #512] \n" "vldm %4!, {d16-d23} \n" "vmla.f32 q13, q8, d0[0] \n" "vmla.f32 q14, q9, d0[1] \n" "vmla.f32 q15, q10, d1[0] \n" "vmla.f32 q12, q11, d1[1] \n" "pld [%4, #512] \n" "vldm %4!, {d16-d23} \n" "vmla.f32 q13, q8, d2[0] \n" "vmla.f32 q14, q9, d2[1] \n" "vmla.f32 q15, q10, d3[0] \n" "vmla.f32 q12, q11, d3[1] \n" // "pld [%4, #512] \n" "vldm %4, {d16-d23} \n" "vmla.f32 q13, q8, d4[0] \n" "vmla.f32 q14, q9, d4[1] \n" "vmla.f32 q15, q10, d5[0] \n" "vmla.f32 q12, q11, d5[1] \n" "vadd.f32 q14, q14, q13 \n" "add %1, %1, #32 \n" "vadd.f32 q15, q15, q14 \n" "add %2, %2, #32 \n" "vadd.f32 q12, q12, q15 \n" "add %3, %3, #32 \n" "sub %4, %4, #512 \n" // kptr -= 8 * 16; "vst1.f32 {d24-d25}, [%0 :128]! \n" : "=r"(outptr0), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2), // %3 "=r"(kptr) // %4 : "0"(outptr0), "1"(r0), "2"(r1), "3"(r2), "4"(kptr) : "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"); #endif // __aarch64__ } r0 += tailstep; r1 += tailstep; r2 += tailstep; } } } } static void conv3x3s2_im2col_sgemm_pack4_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt) { int w = bottom_blob.w; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; const int size = outw * outh; // im2col Mat bottom_im2col(size, 9, inch, 16u, 4, opt.workspace_allocator); { const int gap = (w * 2 - outw * 2) * 4; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < inch; p++) { const Mat img = bottom_blob.channel(p); Mat out = bottom_im2col.channel(p); float* ptr0 = out.row(0); float* ptr1 = out.row(1); float* ptr2 = out.row(2); float* ptr3 = out.row(3); float* ptr4 = out.row(4); float* ptr5 = out.row(5); float* ptr6 = out.row(6); float* ptr7 = out.row(7); float* ptr8 = out.row(8); const float* r0 = img.row(0); const float* r1 = img.row(1); const float* r2 = img.row(2); for (int i = 0; i < outh; i++) { int j = 0; for (; j + 1 < outw; j += 2) { float32x4_t _r00 = vld1q_f32(r0); float32x4_t _r01 = vld1q_f32(r0 + 4); float32x4_t _r02 = vld1q_f32(r0 + 8); float32x4_t _r03 = vld1q_f32(r0 + 12); float32x4_t _r04 = vld1q_f32(r0 + 16); float32x4_t _r10 = vld1q_f32(r1); float32x4_t _r11 = vld1q_f32(r1 + 4); float32x4_t _r12 = vld1q_f32(r1 + 8); float32x4_t _r13 = vld1q_f32(r1 + 12); float32x4_t _r14 = vld1q_f32(r1 + 16); float32x4_t _r20 = vld1q_f32(r2); float32x4_t _r21 = vld1q_f32(r2 + 4); float32x4_t _r22 = vld1q_f32(r2 + 8); float32x4_t _r23 = vld1q_f32(r2 + 12); float32x4_t _r24 = vld1q_f32(r2 + 16); vst1q_f32(ptr0, _r00); vst1q_f32(ptr0 + 4, _r02); vst1q_f32(ptr1, _r01); vst1q_f32(ptr1 + 4, _r03); vst1q_f32(ptr2, _r02); vst1q_f32(ptr2 + 4, _r04); vst1q_f32(ptr3, _r10); vst1q_f32(ptr3 + 4, _r12); vst1q_f32(ptr4, _r11); vst1q_f32(ptr4 + 4, _r13); vst1q_f32(ptr5, _r12); vst1q_f32(ptr5 + 4, _r14); vst1q_f32(ptr6, _r20); vst1q_f32(ptr6 + 4, _r22); vst1q_f32(ptr7, _r21); vst1q_f32(ptr7 + 4, _r23); vst1q_f32(ptr8, _r22); vst1q_f32(ptr8 + 4, _r24); r0 += 16; r1 += 16; r2 += 16; ptr0 += 8; ptr1 += 8; ptr2 += 8; ptr3 += 8; ptr4 += 8; ptr5 += 8; ptr6 += 8; ptr7 += 8; ptr8 += 8; } for (; j < outw; j++) { float32x4_t _r00 = vld1q_f32(r0); float32x4_t _r01 = vld1q_f32(r0 + 4); float32x4_t _r02 = vld1q_f32(r0 + 8); float32x4_t _r10 = vld1q_f32(r1); float32x4_t _r11 = vld1q_f32(r1 + 4); float32x4_t _r12 = vld1q_f32(r1 + 8); float32x4_t _r20 = vld1q_f32(r2); float32x4_t _r21 = vld1q_f32(r2 + 4); float32x4_t _r22 = vld1q_f32(r2 + 8); vst1q_f32(ptr0, _r00); vst1q_f32(ptr1, _r01); vst1q_f32(ptr2, _r02); vst1q_f32(ptr3, _r10); vst1q_f32(ptr4, _r11); vst1q_f32(ptr5, _r12); vst1q_f32(ptr6, _r20); vst1q_f32(ptr7, _r21); vst1q_f32(ptr8, _r22); r0 += 8; r1 += 8; r2 += 8; ptr0 += 4; ptr1 += 4; ptr2 += 4; ptr3 += 4; ptr4 += 4; ptr5 += 4; ptr6 += 4; ptr7 += 4; ptr8 += 4; } r0 += gap; r1 += gap; r2 += gap; } } } im2col_sgemm_pack4_neon(bottom_im2col, top_blob, kernel, _bias, opt); }
heated_plate_openmp.c
# include <stdlib.h> # include <stdio.h> # include <math.h> # include <omp.h> int main ( int argc, char *argv[] ); /******************************************************************************/ int main ( int argc, char *argv[] ) /******************************************************************************/ /* Purpose: MAIN is the main program for HEATED_PLATE_OPENMP. Discussion: This code solves the steady state heat equation on a rectangular region. The sequential version of this program needs approximately 18/epsilon iterations to complete. The physical region, and the boundary conditions, are suggested by this diagram; W = 0 +------------------+ | | W = 100 | | W = 100 | | +------------------+ W = 100 The region is covered with a grid of M by N nodes, and an N by N array W is used to record the temperature. The correspondence between array indices and locations in the region is suggested by giving the indices of the four corners: I = 0 [0][0]-------------[0][N-1] | | J = 0 | | J = N-1 | | [M-1][0]-----------[M-1][N-1] I = M-1 The steady state solution to the discrete heat equation satisfies the following condition at an interior grid point: W[Central] = (1/4) * ( W[North] + W[South] + W[East] + W[West] ) where "Central" is the index of the grid point, "North" is the index of its immediate neighbor to the "north", and so on. Given an approximate solution of the steady state heat equation, a "better" solution is given by replacing each interior point by the average of its 4 neighbors - in other words, by using the condition as an ASSIGNMENT statement: W[Central] <= (1/4) * ( W[North] + W[South] + W[East] + W[West] ) If this process is repeated often enough, the difference between successive estimates of the solution will go to zero. This program carries out such an iteration, using a tolerance specified by the user, and writes the final estimate of the solution to a file that can be used for graphic processing. Licensing: This code is distributed under the GNU LGPL license. Modified: 18 October 2011 Author: Original C version by Michael Quinn. This C version by John Burkardt. Reference: Michael Quinn, Parallel Programming in C with MPI and OpenMP, McGraw-Hill, 2004, ISBN13: 978-0071232654, LC: QA76.73.C15.Q55. Local parameters: Local, double DIFF, the norm of the change in the solution from one iteration to the next. Local, double MEAN, the average of the boundary values, used to initialize the values of the solution in the interior. Local, double U[M][N], the solution at the previous iteration. Local, double W[M][N], the solution computed at the latest iteration. */ { # define M 500 # define N 500 double diff; double epsilon = 0.001; int i; int iterations; int iterations_print; int j; double mean; double my_diff; double u[M][N]; double w[M][N]; double wtime; printf ( "\n" ); printf ( "HEATED_PLATE_OPENMP\n" ); printf ( " C/OpenMP version\n" ); printf ( " A program to solve for the steady state temperature distribution\n" ); printf ( " over a rectangular plate.\n" ); printf ( "\n" ); printf ( " Spatial grid of %d by %d points.\n", M, N ); printf ( " The iteration will be repeated until the change is <= %e\n", epsilon ); printf ( " Number of processors available = %d\n", omp_get_num_procs ( ) ); printf ( " Number of threads = %d\n", omp_get_max_threads ( ) ); /* Set the boundary values, which don't change. */ mean = 0.0; #pragma omp parallel shared ( w ) private ( i, j ) { #pragma omp for for ( i = 1; i < M - 1; i++ ) { w[i][0] = 100.0; } #pragma omp for for ( i = 1; i < M - 1; i++ ) { w[i][N-1] = 100.0; } #pragma omp for for ( j = 0; j < N; j++ ) { w[M-1][j] = 100.0; } #pragma omp for for ( j = 0; j < N; j++ ) { w[0][j] = 0.0; } /* Average the boundary values, to come up with a reasonable initial value for the interior. */ #pragma omp for reduction ( + : mean ) for ( i = 1; i < M - 1; i++ ) { mean = mean + w[i][0] + w[i][N-1]; } #pragma omp for reduction ( + : mean ) for ( j = 0; j < N; j++ ) { mean = mean + w[M-1][j] + w[0][j]; } } /* OpenMP note: You cannot normalize MEAN inside the parallel region. It only gets its correct value once you leave the parallel region. So we interrupt the parallel region, set MEAN, and go back in. */ mean = mean / ( double ) ( 2 * M + 2 * N - 4 ); printf ( "\n" ); printf ( " MEAN = %f\n", mean ); /* Initialize the interior solution to the mean value. */ #pragma omp parallel shared ( mean, w ) private ( i, j ) { #pragma omp for for ( i = 1; i < M - 1; i++ ) { for ( j = 1; j < N - 1; j++ ) { w[i][j] = mean; } } } /* iterate until the new solution W differs from the old solution U by no more than EPSILON. */ iterations = 0; iterations_print = 1; printf ( "\n" ); printf ( " Iteration Change\n" ); printf ( "\n" ); wtime = omp_get_wtime ( ); diff = epsilon; while ( epsilon <= diff ) { # pragma omp parallel shared ( u, w ) private ( i, j ) { /* Save the old solution in U. */ # pragma omp for for ( i = 0; i < M; i++ ) { for ( j = 0; j < N; j++ ) { u[i][j] = w[i][j]; } } /* Determine the new estimate of the solution at the interior points. The new solution W is the average of north, south, east and west neighbors. */ # pragma omp for for ( i = 1; i < M - 1; i++ ) { for ( j = 1; j < N - 1; j++ ) { w[i][j] = ( u[i-1][j] + u[i+1][j] + u[i][j-1] + u[i][j+1] ) / 4.0; } } } /* C and C++ cannot compute a maximum as a reduction operation. Therefore, we define a private variable MY_DIFF for each thread. Once they have all computed their values, we use a CRITICAL section to update DIFF. */ diff = 0.0; # pragma omp parallel shared ( diff, u, w ) private ( i, j, my_diff ) { my_diff = 0.0; # pragma omp for for ( i = 1; i < M - 1; i++ ) { for ( j = 1; j < N - 1; j++ ) { if ( my_diff < fabs ( w[i][j] - u[i][j] ) ) { my_diff = fabs ( w[i][j] - u[i][j] ); } } } # pragma omp critical { if ( diff < my_diff ) { diff = my_diff; } } } iterations++; if ( iterations == iterations_print ) { printf ( " %8d %f\n", iterations, diff ); iterations_print = 2 * iterations_print; } } wtime = omp_get_wtime ( ) - wtime; printf ( "\n" ); printf ( " %8d %f\n", iterations, diff ); printf ( "\n" ); printf ( " Error tolerance achieved.\n" ); printf ( " Wallclock time = %f\n", wtime ); /* Terminate. */ printf ( "\n" ); printf ( "HEATED_PLATE_OPENMP:\n" ); printf ( " Normal end of execution.\n" ); return 0; # undef M # undef N }
SuperRayGenerator.h
/* * Copyright(c) 2016, Youngsun Kwon, Donghyuk Kim, and Sung-eui Yoon, KAIST * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met : * * * Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and / or other materials provided with the distribution. * * Neither the name of SuperRay nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED.IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * */ #ifndef OCTOMAP_SUPERRAY_SUPERRAY_GENERATOR_H #define OCTOMAP_SUPERRAY_SUPERRAY_GENERATOR_H #include <octomap/octomap_types.h> #include <octomap/OcTreeKey.h> #include <octomap/Pointcloud.h> #include <octomap_superray/SuperRayCloud.h> #ifdef _OPENMP #include <omp.h> #pragma omp declare reduction (merge : std::vector<octomap::SuperRay> : omp_out.insert(omp_out.end(), omp_in.begin(), omp_in.end())) #endif namespace octomap{ class SuperRayGenerator{ public: SuperRayGenerator(const double _resolution, const unsigned int _tree_max_val, const int _threshold = 0); ~SuperRayGenerator() {}; void GenerateSuperRay(const octomap::Pointcloud& _pc, const octomap::point3d& _origin, SuperRayCloud& _srcloud); protected: struct VoxelInfo; struct Axis3D; octomap::point3d originW; // origin point in World Space octomap::OcTreeKey originKey; // origin key // constants for generating super rays double RESOLUTION; // resolution double RESOLUTION_FACTOR; // 1.0 / resolution unsigned int TREE_MAX_VAL; // offset unsigned int THRESHOLD; // threshold for limiting to generate super rays for each voxel // Functions for generating super rays void GenerateSuperRay(const point3d_collection& _pointlist, std::vector<SuperRay>& _srcloud); void GenerateSuperRay2D(const point3d_collection& _pointlist, Axis3D& _axis, VoxelInfo& _voxelinfo, std::vector<SuperRay>& _srcloud); void GenerateSuperRay3D(const point3d_collection& _pointlist, Axis3D& _axis, VoxelInfo& _voxelinfo, std::vector<SuperRay>& _srcloud); // Function for generating mapping line in 2-D double GenerateMappingLine(VoxelInfo& _voxelinfo, const unsigned int& _axisX, const unsigned int& _axisY, std::vector<double>& _mappingPlane); // Utility functions typedef unordered_ns::unordered_map<octomap::OcTreeKey, std::vector<octomap::point3d>, octomap::OcTreeKey::KeyHash> Voxelized_Pointclouds; void ComputeAxis(const octomap::point3d& _min, const octomap::point3d& _max, Axis3D& _axis); // Re-implmentation for Key / coordinate conversion functions inline octomap::OcTreeKey coordToKey(const octomap::point3d& coord) const { return octomap::OcTreeKey(coordToKey(coord(0)), coordToKey(coord(1)), coordToKey(coord(2))); } inline octomap::key_type coordToKey(double coordinate) const { return ((int)floor(RESOLUTION_FACTOR * coordinate)) + TREE_MAX_VAL; } // Structures that represents the traversal information struct VoxelInfo{ VoxelInfo(void) {}; // Voxel Info. octomap::point3d minW; // min position of voxel octomap::point3d maxW; // max position of voxel octomap::OcTreeKey voxelKey; // key of voxel }; struct Axis3D{ Axis3D(void) : axisU(0), axisV(1), axisK(2) {}; unsigned int axisU; // Nearest Axis unsigned int axisV; // unsigned int axisK; // Farthest Axis }; }; } #endif
t.c
/*************************************************************************** * * (C) Copyright 2007 The Board of Trustees of the * University of Illinois * All Rights Reserved * * MRI-Q: Magnetic Resonance Imaging * Computes a matrix Q, representing the scanner configuration for * calibration, used in a 3D magnetic resonance image reconstruction * algorithms in non-Cartesian space. * ***************************************************************************/ /*************************************************************************** * * This benchmark was adapted to run on GPUs with OpenMP 4.0 pragmas * and OpenCL driver implemented in gpuclang 2.1 (based on clang 3.5) * * Marcio M Pereira <mpereira@ic.unicamp.br> * ***************************************************************************/ /* * C code for creating the Q data structure for fast convolution-based * Hessian multiplication for arbitrary k-space trajectories. * * Inputs: * kx - VECTOR of kx values, same length as ky and kz * ky - VECTOR of ky values, same length as kx and kz * kz - VECTOR of kz values, same length as kx and ky * x - VECTOR of x values, same length as y and z * y - VECTOR of y values, same length as x and z * z - VECTOR of z values, same length as x and y * phi - VECTOR of the Fourier transform of the spatial basis * function, evaluated at [kx, ky, kz]. Same length as kx, ky, and kz. */ /* * === NOTE === * * The Polyhedral optmization used in gpuclang restricts the class of loops it * can manipulate to sequences of imperfectly nested loops with particular * constraints on the loop bound and array subscript expressions. * * To allow this optimization we fixed the problem size with __STATIC__ tag * Comment this tag to use the original version. * * Recommended gpuclang options: * -O3 -lm -ffast-math -opt-poly=tile */ #ifndef __STATIC__ #define __STATIC__ #endif #include <stdio.h> #include <math.h> #include <stdlib.h> #include <string.h> #include <unistd.h> #include <inttypes.h> #include <sys/time.h> #ifdef __APPLE__ #include <sys/malloc.h> #include <machine/endian.h> #else #include <endian.h> #include <malloc.h> #endif #if _POSIX_VERSION >= 200112L # include <sys/time.h> #endif #if __BYTE_ORDER != __LITTLE_ENDIAN # error "File I/O is not implemented for this system: wrong endianness." #endif #define SMALL_FLOAT_VAL 0.00000001f #define ERROR_THRESHOLD 0.5 #define GPU_DEVICE 1 #define PI 3.1415926535897932384626433832795029f #define PIx2 6.2831853071795864769252867665590058f #define MIN(X,Y) ((X) < (Y) ? (X) : (Y)) #ifdef __STATIC__ // Define statically the problem size #define NK 2048 // K_ELEMS_PER_GRID #define NX 262144 #else int NK, NX; #endif double t_start, t_end, t_start_GPU, t_end_GPU; double rtclock() { struct timezone Tzp; struct timeval Tp; int stat; stat = gettimeofday (&Tp, &Tzp); if (stat != 0) printf("Error return from gettimeofday: %d",stat); return(Tp.tv_sec + Tp.tv_usec*1.0e-6); } float absVal(float a) { if(a < 0) return (a * -1); else return a; } float percentDiff(double val1, double val2) { if ((absVal(val1) < 0.01) && (absVal(val2) < 0.01)) return 0.0f; else return 100.0f * (absVal(absVal(val1 - val2) / absVal(val1 + SMALL_FLOAT_VAL))); } /* Command line parameters for benchmark */ struct pb_Parameters { char *outFile; /* If not NULL, the raw output of the * computation should be saved to this * file. The string is owned. */ char **inpFiles; /* A NULL-terminated array of strings * holding the input file(s) for the * computation. The array and strings * are owned. */ }; /* A time or duration. */ #if _POSIX_VERSION >= 200112L typedef unsigned long long pb_Timestamp; /* time in microseconds */ #else # error "Timestamps not implemented" #endif enum pb_TimerState { pb_Timer_STOPPED, pb_Timer_RUNNING, }; struct pb_Timer { enum pb_TimerState state; pb_Timestamp elapsed; /* Amount of time elapsed so far */ pb_Timestamp init; /* Beginning of the current time interval, * if state is RUNNING. End of the last * recorded time interfal otherwise. */ }; /* Execution time is assigned to one of these categories. */ enum pb_TimerID { pb_TimerID_NONE = 0, pb_TimerID_IO, /* Time spent in input/output */ pb_TimerID_KERNEL, /* Time spent computing on the device, * recorded asynchronously */ pb_TimerID_COPY, /* Time spent synchronously moving data * to/from device and allocating/freeing * memory on the device */ pb_TimerID_DRIVER, /* Time spent in the host interacting with the * driver, primarily for recording the time * spent queueing asynchronous operations */ pb_TimerID_COPY_ASYNC, /* Time spent in asynchronous transfers */ pb_TimerID_COMPUTE, /* Time for all program execution other * than parsing command line arguments, * I/O, kernel, and copy */ pb_TimerID_OVERLAP, /* Time double-counted in asynchronous and * host activity: automatically filled in, * not intended for direct usage */ pb_TimerID_LAST /* Number of timer IDs */ }; /* Dynamic list of asynchronously tracked times between events */ struct pb_async_time_marker_list { char *label; // actually just a pointer to a string enum pb_TimerID timerID; /* The ID to which the interval beginning * with this marker should be attributed */ void * marker; //cudaEvent_t marker; /* The driver event for this marker */ struct pb_async_time_marker_list *next; }; struct pb_SubTimer { char *label; struct pb_Timer timer; struct pb_SubTimer *next; }; struct pb_SubTimerList { struct pb_SubTimer *current; struct pb_SubTimer *subtimer_list; }; /* A set of timers for recording execution times. */ struct pb_TimerSet { enum pb_TimerID current; struct pb_async_time_marker_list* async_markers; pb_Timestamp async_begin; pb_Timestamp wall_begin; struct pb_Timer timers[pb_TimerID_LAST]; struct pb_SubTimerList *sub_timer_list[pb_TimerID_LAST]; }; /* Free an array of owned strings. */ static void free_string_array(char **string_array) { char **p; if (!string_array) return; for (p = string_array; *p; p++) free(*p); free(string_array); } /* Parse a comma-delimited list of strings into an * array of strings. */ static char ** read_string_array(char *in) { char **ret; int i; int count; /* Number of items in the input */ char *substring; /* Current substring within 'in' */ /* Count the number of items in the string */ count = 1; for (i = 0; in[i]; i++) if (in[i] == ',') count++; /* Allocate storage */ ret = (char **)malloc((count + 1) * sizeof(char *)); /* Create copies of the strings from the list */ substring = in; for (i = 0; i < count; i++) { char *substring_end; int substring_length; /* Find length of substring */ for (substring_end = substring; (*substring_end != ',') && (*substring_end != 0); substring_end++); substring_length = substring_end - substring; /* Allocate memory and copy the substring */ ret[i] = (char *)malloc(substring_length + 1); memcpy(ret[i], substring, substring_length); ret[i][substring_length] = 0; /* go to next substring */ substring = substring_end + 1; } ret[i] = NULL; /* Write the sentinel value */ return ret; } struct argparse { int argc; /* Number of arguments. Mutable. */ char **argv; /* Argument values. Immutable. */ int argn; /* Current argument number. */ char **argv_get; /* Argument value being read. */ char **argv_put; /* Argument value being written. * argv_put <= argv_get. */ }; static void initialize_argparse(struct argparse *ap, int argc, char **argv) { ap->argc = argc; ap->argn = 0; ap->argv_get = ap->argv_put = ap->argv = argv; } static void finalize_argparse(struct argparse *ap) { /* Move the remaining arguments */ for(; ap->argn < ap->argc; ap->argn++) *ap->argv_put++ = *ap->argv_get++; } /* Delete the current argument. */ static void delete_argument(struct argparse *ap) { if (ap->argn >= ap->argc) { fprintf(stderr, "delete_argument\n"); } ap->argc--; ap->argv_get++; } /* Go to the next argument. Also, move the current argument to its * final location in argv. */ static void next_argument(struct argparse *ap) { if (ap->argn >= ap->argc) { fprintf(stderr, "next_argument\n"); } /* Move argument to its new location. */ *ap->argv_put++ = *ap->argv_get++; ap->argn++; } static int is_end_of_arguments(struct argparse *ap) { return ap->argn == ap->argc; } static char * get_argument(struct argparse *ap) { return *ap->argv_get; } static char * consume_argument(struct argparse *ap) { char *ret = get_argument(ap); delete_argument(ap); return ret; } void pb_FreeParameters(struct pb_Parameters *p) { char **cpp; free(p->outFile); free_string_array(p->inpFiles); free(p); } struct pb_Parameters * pb_ReadParameters(int *_argc, char **argv) { char *err_message; struct argparse ap; struct pb_Parameters *ret = (struct pb_Parameters *)malloc(sizeof(struct pb_Parameters)); /* Initialize the parameters structure */ ret->outFile = NULL; ret->inpFiles = (char **)malloc(sizeof(char *)); ret->inpFiles[0] = NULL; /* Each argument */ initialize_argparse(&ap, *_argc, argv); while(!is_end_of_arguments(&ap)) { char *arg = get_argument(&ap); /* Single-character flag */ if ((arg[0] == '-') && (arg[1] != 0) && (arg[2] == 0)) { delete_argument(&ap); /* This argument is consumed here */ switch(arg[1]) { case 'o': /* Output file name */ if (is_end_of_arguments(&ap)) { err_message = "Expecting file name after '-o'\n"; goto error; } free(ret->outFile); ret->outFile = strdup(consume_argument(&ap)); break; case 'i': /* Input file name */ if (is_end_of_arguments(&ap)) { err_message = "Expecting file name after '-i'\n"; goto error; } ret->inpFiles = read_string_array(consume_argument(&ap)); break; case '-': /* End of options */ goto end_of_options; default: err_message = "Unexpected command-line parameter\n"; goto error; } } else { /* Other parameters are ignored */ next_argument(&ap); } } /* end for each argument */ end_of_options: *_argc = ap.argc; /* Save the modified argc value */ finalize_argparse(&ap); return ret; error: fputs(err_message, stderr); pb_FreeParameters(ret); return NULL; } int pb_Parameters_CountInputs(struct pb_Parameters *p) { int n; for (n = 0; p->inpFiles[n]; n++); return n; } /*****************************************************************************/ /* Timer routines */ static void accumulate_time(pb_Timestamp *accum, pb_Timestamp start, pb_Timestamp end) { #if _POSIX_VERSION >= 200112L *accum += end - start; #else # error "Timestamps not implemented for this system" #endif } #if _POSIX_VERSION >= 200112L static pb_Timestamp get_time() { struct timeval tv; gettimeofday(&tv, NULL); return (pb_Timestamp) (tv.tv_sec * 1000000LL + tv.tv_usec); } #else # error "no supported time libraries are available on this platform" #endif void pb_ResetTimer(struct pb_Timer *timer) { timer->state = pb_Timer_STOPPED; #if _POSIX_VERSION >= 200112L timer->elapsed = 0; #else # error "pb_ResetTimer: not implemented for this system" #endif } void pb_StartTimer(struct pb_Timer *timer) { if (timer->state != pb_Timer_STOPPED) { fputs("Ignoring attempt to start a running timer\n", stderr); return; } timer->state = pb_Timer_RUNNING; #if _POSIX_VERSION >= 200112L { struct timeval tv; gettimeofday(&tv, NULL); timer->init = tv.tv_sec * 1000000LL + tv.tv_usec; } #else # error "pb_StartTimer: not implemented for this system" #endif } void pb_StartTimerAndSubTimer(struct pb_Timer *timer, struct pb_Timer *subtimer) { unsigned int numNotStopped = 0x3; // 11 if (timer->state != pb_Timer_STOPPED) { fputs("Warning: Timer was not stopped\n", stderr); numNotStopped &= 0x1; // Zero out 2^1 } if (subtimer->state != pb_Timer_STOPPED) { fputs("Warning: Subtimer was not stopped\n", stderr); numNotStopped &= 0x2; // Zero out 2^0 } if (numNotStopped == 0x0) { fputs("Ignoring attempt to start running timer and subtimer\n", stderr); return; } timer->state = pb_Timer_RUNNING; subtimer->state = pb_Timer_RUNNING; #if _POSIX_VERSION >= 200112L { struct timeval tv; gettimeofday(&tv, NULL); if (numNotStopped & 0x2) { timer->init = tv.tv_sec * 1000000LL + tv.tv_usec; } if (numNotStopped & 0x1) { subtimer->init = tv.tv_sec * 1000000LL + tv.tv_usec; } } #else # error "pb_StartTimer: not implemented for this system" #endif } void pb_StopTimer(struct pb_Timer *timer) { pb_Timestamp fini; if (timer->state != pb_Timer_RUNNING) { fputs("Ignoring attempt to stop a stopped timer\n", stderr); return; } timer->state = pb_Timer_STOPPED; #if _POSIX_VERSION >= 200112L { struct timeval tv; gettimeofday(&tv, NULL); fini = tv.tv_sec * 1000000LL + tv.tv_usec; } #else # error "pb_StopTimer: not implemented for this system" #endif accumulate_time(&timer->elapsed, timer->init, fini); timer->init = fini; } void pb_StopTimerAndSubTimer(struct pb_Timer *timer, struct pb_Timer *subtimer) { pb_Timestamp fini; unsigned int numNotRunning = 0x3; // 0b11 if (timer->state != pb_Timer_RUNNING) { fputs("Warning: Timer was not running\n", stderr); numNotRunning &= 0x1; // Zero out 2^1 } if (subtimer->state != pb_Timer_RUNNING) { fputs("Warning: Subtimer was not running\n", stderr); numNotRunning &= 0x2; // Zero out 2^0 } if (numNotRunning == 0x0) { fputs("Ignoring attempt to stop stopped timer and subtimer\n", stderr); return; } timer->state = pb_Timer_STOPPED; subtimer->state = pb_Timer_STOPPED; #if _POSIX_VERSION >= 200112L { struct timeval tv; gettimeofday(&tv, NULL); fini = tv.tv_sec * 1000000LL + tv.tv_usec; } #else # error "pb_StopTimer: not implemented for this system" #endif if (numNotRunning & 0x2) { accumulate_time(&timer->elapsed, timer->init, fini); timer->init = fini; } if (numNotRunning & 0x1) { accumulate_time(&subtimer->elapsed, subtimer->init, fini); subtimer->init = fini; } } /* Get the elapsed time in seconds. */ double pb_GetElapsedTime(struct pb_Timer *timer) { double ret; if (timer->state != pb_Timer_STOPPED) { fputs("Elapsed time from a running timer is inaccurate\n", stderr); } #if _POSIX_VERSION >= 200112L ret = timer->elapsed / 1e6; #else # error "pb_GetElapsedTime: not implemented for this system" #endif return ret; } void pb_InitializeTimerSet(struct pb_TimerSet *timers) { int n; timers->wall_begin = get_time(); timers->current = pb_TimerID_NONE; timers->async_markers = NULL; for (n = 0; n < pb_TimerID_LAST; n++) { pb_ResetTimer(&timers->timers[n]); timers->sub_timer_list[n] = NULL; // free first? } } void pb_AddSubTimer(struct pb_TimerSet *timers, char *label, enum pb_TimerID pb_Category) { struct pb_SubTimer *subtimer = (struct pb_SubTimer *) malloc (sizeof(struct pb_SubTimer)); int len = strlen(label); subtimer->label = (char *) malloc (sizeof(char)*(len+1)); sprintf(subtimer->label, "%s\n", label); pb_ResetTimer(&subtimer->timer); subtimer->next = NULL; struct pb_SubTimerList *subtimerlist = timers->sub_timer_list[pb_Category]; if (subtimerlist == NULL) { subtimerlist = (struct pb_SubTimerList *) malloc (sizeof(struct pb_SubTimerList)); subtimerlist->subtimer_list = subtimer; timers->sub_timer_list[pb_Category] = subtimerlist; } else { // Append to list struct pb_SubTimer *element = subtimerlist->subtimer_list; while (element->next != NULL) { element = element->next; } element->next = subtimer; } } void pb_SwitchToSubTimer(struct pb_TimerSet *timers, char *label, enum pb_TimerID category) { // switchToSub( NULL, NONE // switchToSub( NULL, some // switchToSub( some, some // switchToSub( some, NONE -- tries to find "some" in NONE's sublist, which won't be printed struct pb_Timer *topLevelToStop = NULL; if (timers->current != category && timers->current != pb_TimerID_NONE) { // Switching to subtimer in a different category needs to stop the top-level current, different categoried timer. // NONE shouldn't have a timer associated with it, so exclude from branch topLevelToStop = &timers->timers[timers->current]; } struct pb_SubTimerList *subtimerlist = timers->sub_timer_list[timers->current]; struct pb_SubTimer *curr = (subtimerlist == NULL) ? NULL : subtimerlist->current; if (timers->current != pb_TimerID_NONE) { if (curr != NULL && topLevelToStop != NULL) { pb_StopTimerAndSubTimer(topLevelToStop, &curr->timer); } else if (curr != NULL) { pb_StopTimer(&curr->timer); } else { pb_StopTimer(topLevelToStop); } } subtimerlist = timers->sub_timer_list[category]; struct pb_SubTimer *subtimer = NULL; if (label != NULL) { subtimer = subtimerlist->subtimer_list; while (subtimer != NULL) { if (strcmp(subtimer->label, label) == 0) { break; } else { subtimer = subtimer->next; } } } if (category != pb_TimerID_NONE) { if (subtimerlist != NULL) { subtimerlist->current = subtimer; } if (category != timers->current && subtimer != NULL) { pb_StartTimerAndSubTimer(&timers->timers[category], &subtimer->timer); } else if (subtimer != NULL) { // Same category, different non-NULL subtimer pb_StartTimer(&subtimer->timer); } else{ // Different category, but no subtimer (not found or specified as NULL) -- unprefered way of setting topLevel timer pb_StartTimer(&timers->timers[category]); } } timers->current = category; } void pb_SwitchToTimer(struct pb_TimerSet *timers, enum pb_TimerID timer) { /* Stop the currently running timer */ if (timers->current != pb_TimerID_NONE) { struct pb_SubTimer *currSubTimer = NULL; struct pb_SubTimerList *subtimerlist = timers->sub_timer_list[timers->current]; if ( subtimerlist != NULL) { currSubTimer = timers->sub_timer_list[timers->current]->current; } if ( currSubTimer!= NULL) { pb_StopTimerAndSubTimer(&timers->timers[timers->current], &currSubTimer->timer); } else { pb_StopTimer(&timers->timers[timers->current]); } } timers->current = timer; if (timer != pb_TimerID_NONE) { pb_StartTimer(&timers->timers[timer]); } } void pb_PrintTimerSet(struct pb_TimerSet *timers) { pb_Timestamp wall_end = get_time(); struct pb_Timer *t = timers->timers; struct pb_SubTimer* sub = NULL; int maxSubLength; const char *categories[] = { "IO", "Kernel", "Copy", "Driver", "Copy Async", "Compute" }; const int maxCategoryLength = 10; int i; for(i = 1; i < pb_TimerID_LAST-1; ++i) { // exclude NONE and OVRELAP from this format if(pb_GetElapsedTime(&t[i]) != 0) { // Print Category Timer printf("%-*s: %f\n", maxCategoryLength, categories[i-1], pb_GetElapsedTime(&t[i])); if (timers->sub_timer_list[i] != NULL) { sub = timers->sub_timer_list[i]->subtimer_list; maxSubLength = 0; while (sub != NULL) { // Find longest SubTimer label if (strlen(sub->label) > maxSubLength) { maxSubLength = strlen(sub->label); } sub = sub->next; } // Fit to Categories if (maxSubLength <= maxCategoryLength) { maxSubLength = maxCategoryLength; } sub = timers->sub_timer_list[i]->subtimer_list; // Print SubTimers while (sub != NULL) { printf(" -%-*s: %f\n", maxSubLength, sub->label, pb_GetElapsedTime(&sub->timer)); sub = sub->next; } } } } if(pb_GetElapsedTime(&t[pb_TimerID_OVERLAP]) != 0) printf("CPU/Kernel Overlap: %f\n", pb_GetElapsedTime(&t[pb_TimerID_OVERLAP])); float walltime = (wall_end - timers->wall_begin)/ 1e6; printf("Timer Wall Time: %f\n", walltime); } void pb_DestroyTimerSet(struct pb_TimerSet * timers) { /* clean up all of the async event markers */ struct pb_async_time_marker_list ** event = &(timers->async_markers); while( *event != NULL) { struct pb_async_time_marker_list ** next = &((*event)->next); free(*event); (*event) = NULL; event = next; } int i = 0; for(i = 0; i < pb_TimerID_LAST; ++i) { if (timers->sub_timer_list[i] != NULL) { struct pb_SubTimer *subtimer = timers->sub_timer_list[i]->subtimer_list; struct pb_SubTimer *prev = NULL; while (subtimer != NULL) { free(subtimer->label); prev = subtimer; subtimer = subtimer->next; free(prev); } free(timers->sub_timer_list[i]); } } } float *Qr_GPU, *Qi_GPU; /* Q signal (complex) */ float *Qr_CPU, *Qi_CPU; /* Q signal (complex) */ struct kValues { float Kx; float Ky; float Kz; float PhiMag; }; void ComputePhiMagCPU(float* phiR, float* phiI, float* phiMag) { int indexK = 0; for (indexK = 0; indexK < NK; indexK++) { float real = phiR[indexK]; float imag = phiI[indexK]; phiMag[indexK] = real*real + imag*imag; } } void ComputeQGPU(struct kValues *kVals, float* x, float* y, float* z, float *Qr, float *Qi) { int indexK, indexX; #pragma omp target device(1) #pragma omp target map(to: kVals[:NK], x[:NX], y[:NX], z[:NX]) map(tofrom: Qr[:NX], Qi[:NX]) #pragma omp parallel for for (indexK = 0; indexK < NK; indexK++) { for (indexX = 0; indexX < NX; indexX++) { float expArg = PIx2 * (kVals[indexK].Kx * x[indexX] + kVals[indexK].Ky * y[indexX] + kVals[indexK].Kz * z[indexX]); float cosArg = cos(expArg); float sinArg = sin(expArg); float phi = kVals[indexK].PhiMag; Qr[indexX] += phi * cosArg; Qi[indexX] += phi * sinArg; } } } void ComputeQCPU(struct kValues *kVals, float* x, float* y, float* z, float *Qr, float *Qi) { int indexK, indexX; for (indexK = 0; indexK < NK; indexK++) { for (indexX = 0; indexX < NX; indexX++) { float expArg = PIx2 * (kVals[indexK].Kx * x[indexX] + kVals[indexK].Ky * y[indexX] + kVals[indexK].Kz * z[indexX]); float cosArg = cos(expArg); float sinArg = sin(expArg); float phi = kVals[indexK].PhiMag; Qr[indexX] += phi * cosArg; Qi[indexX] += phi * sinArg; } } } void createDataStructsCPU(float** phiMag, float** Qr, float** Qi) { *phiMag = (float* ) malloc(NK * sizeof(float)); *Qr = (float*) malloc(NX * sizeof (float)); memset((void *)*Qr, 0, NX * sizeof(float)); *Qi = (float*) malloc(NX * sizeof (float)); memset((void *)*Qi, 0, NX * sizeof(float)); } void inputData(char* fName, int* _numK, int* _numX, float** kx, float** ky, float** kz, float** x, float** y, float** z, float** phiR, float** phiI) { int numK, numX; FILE* fid = fopen(fName, "r"); if (fid == NULL) { fprintf(stderr, "Cannot open input file\n"); exit(-1); } fread (&numK, sizeof (int), 1, fid); *_numK = numK; fread (&numX, sizeof (int), 1, fid); *_numX = numX; *kx = (float *) malloc(numK * sizeof (float)); fread (*kx, sizeof (float), numK, fid); *ky = (float *) malloc(numK * sizeof (float)); fread (*ky, sizeof (float), numK, fid); *kz = (float *) malloc(numK * sizeof (float)); fread (*kz, sizeof (float), numK, fid); *x = (float *) malloc(numX * sizeof (float)); fread (*x, sizeof (float), numX, fid); *y = (float *) malloc(numX * sizeof (float)); fread (*y, sizeof (float), numX, fid); *z = (float *) malloc(numX * sizeof (float)); fread (*z, sizeof (float), numX, fid); *phiR = (float *) malloc(numK * sizeof (float)); fread (*phiR, sizeof (float), numK, fid); *phiI = (float *) malloc(numK * sizeof (float)); fread (*phiI, sizeof (float), numK, fid); fclose (fid); } void compareResults(float *A, float *A_GPU, float *B, float *B_GPU) { int i,fail=0; for (i=0; i < NX; i++) { if (percentDiff(A[i], A_GPU[i]) > ERROR_THRESHOLD) { fail++; } } for (i=0; i < NX; i++) { if (percentDiff(B[i], B_GPU[i]) > ERROR_THRESHOLD) { fail++; } } // print results printf(">>\n Non-Matching CPU-GPU Outputs Beyond Error Threshold of %4.2f%s: %d\n", ERROR_THRESHOLD, "%", fail); } double mriqGPU(int argc, char *argv[]) { int numX, numK; /* Number of X and K values */ int original_numK; /* Number of K values in input file */ float *kx, *ky, *kz; /* K trajectory (3D vectors) */ float *x, *y, *z; /* X coordinates (3D vectors) */ float *phiR, *phiI; /* Phi values (complex) */ float *phiMag; /* Magnitude of Phi */ struct kValues* kVals; struct pb_Parameters *params; /* Read command line */ params = pb_ReadParameters(&argc, argv); if ((params->inpFiles[0] == NULL) || (params->inpFiles[1] != NULL)) { fprintf(stderr, "Expecting one input filename\n"); exit(-1); } /* Read in data */ fprintf(stdout, "<< Reading data ... "); inputData(params->inpFiles[0], &original_numK, &numX, &kx, &ky, &kz, &x, &y, &z, &phiR, &phiI); /* Reduce the number of k-space samples if a number is given * on the command line */ if (argc < 2) numK = original_numK; else { int inputK; char *end; inputK = strtol(argv[1], &end, 10); if (end == argv[1]) { fprintf(stderr, "Expecting an integer parameter\n"); exit(-1); } numK = MIN(inputK, original_numK); } #ifndef __STATIC__ NK = numK; NX = numX; #endif /* Create CPU data structures */ createDataStructsCPU(&phiMag, &Qr_GPU, &Qi_GPU); ComputePhiMagCPU(phiR, phiI, phiMag); kVals = (struct kValues*)calloc(numK, sizeof (struct kValues)); int k; for (k = 0; k < numK; k++) { kVals[k].Kx = kx[k]; kVals[k].Ky = ky[k]; kVals[k].Kz = kz[k]; kVals[k].PhiMag = phiMag[k]; } fprintf(stdout, ">>\n<< Start computation on GPU... "); t_start_GPU = rtclock(); ComputeQGPU(kVals, x, y, z, Qr_GPU, Qi_GPU); t_end_GPU = rtclock(); free (kx); free (ky); free (kz); free (x); free (y); free (z); free (phiR); free (phiI); free (phiMag); free (kVals); return t_end_GPU - t_start_GPU; } double mriqCPU(int argc, char *argv[]) { int numX, numK; /* Number of X and K values */ int original_numK; /* Number of K values in input file */ float *kx, *ky, *kz; /* K trajectory (3D vectors) */ float *x, *y, *z; /* X coordinates (3D vectors) */ float *phiR, *phiI; /* Phi values (complex) */ float *phiMag; /* Magnitude of Phi */ struct kValues* kVals; struct pb_Parameters *params; /* Read command line */ params = pb_ReadParameters(&argc, argv); if ((params->inpFiles[0] == NULL) || (params->inpFiles[1] != NULL)) { fprintf(stderr, "Expecting one input filename\n"); exit(-1); } /* Read in data */ inputData(params->inpFiles[0], &original_numK, &numX, &kx, &ky, &kz, &x, &y, &z, &phiR, &phiI); /* Reduce the number of k-space samples if a number is given * on the command line */ if (argc < 2) numK = original_numK; else { int inputK; char *end; inputK = strtol(argv[1], &end, 10); if (end == argv[1]) { fprintf(stderr, "Expecting an integer parameter\n"); exit(-1); } numK = MIN(inputK, original_numK); } #ifndef __STATIC__ NK = numK; NX = numX; #endif /* Create CPU data structures */ createDataStructsCPU(&phiMag, &Qr_CPU, &Qi_CPU); ComputePhiMagCPU(phiR, phiI, phiMag); kVals = (struct kValues*)calloc(numK, sizeof (struct kValues)); int k; for (k = 0; k < numK; k++) { kVals[k].Kx = kx[k]; kVals[k].Ky = ky[k]; kVals[k].Kz = kz[k]; kVals[k].PhiMag = phiMag[k]; } fprintf(stdout, "\n<< Start computation on CPU... "); t_start = rtclock(); ComputeQCPU(kVals, x, y, z, Qr_CPU, Qi_CPU); t_end = rtclock(); free (kx); free (ky); free (kz); free (x); free (y); free (z); free (phiR); free (phiI); free (phiMag); free (kVals); return t_end - t_start; } int main (int argc, char *argv[]) { double t_GPU, t_CPU; fprintf(stdout, "<< Creating the Q data structure for fast convolution-based\n"); fprintf(stdout, " Hessian multiplication for arbitrary k-space trajectories.>>\n"); fprintf(stdout, "<< Elements per Grid: 2048 >>\n\n"); fprintf(stdout, " for (indexK = 0; indexK < 2048; indexK++) \n"); fprintf(stdout, " for (indexX = 0; indexX < 262144; indexX++) { \n"); fprintf(stdout, " float expArg = PIx2 * (kVals[indexK].Kx * x[indexX] +\n"); fprintf(stdout, " kVals[indexK].Ky * y[indexX] + \n"); fprintf(stdout, " kVals[indexK].Kz * z[indexX]);\n"); fprintf(stdout, " float cosArg = cos(expArg);\n"); fprintf(stdout, " float sinArg = sin(expArg);\n"); fprintf(stdout, " float phi = kVals[indexK].PhiMag;\n"); fprintf(stdout, " Qr[indexX] += phi * cosArg;\n"); fprintf(stdout, " Qi[indexX] += phi * sinArg;\n"); fprintf(stdout, " } \n\n"); t_GPU = mriqGPU(argc, argv); fprintf(stdout, ">>\n GPU Runtime: %0.6lfs\n", t_GPU); t_CPU = mriqCPU(argc, argv); fprintf(stdout, ">>\n CPU Runtime: %0.6lfs\n", t_CPU); fprintf(stdout, "\n<< Comparing Results..."); compareResults(Qr_CPU, Qr_GPU, Qi_CPU, Qi_GPU); free(Qr_GPU); free(Qi_GPU); free(Qr_CPU); free(Qi_CPU); return 0; }
main.c
/// /// @copyright Copyright (c) 2013-2016, Université Pierre et Marie Curie /// All rights reserved. /// /// <b>hiCL</b> is owned by Université Pierre et Marie Curie (UPMC), /// funded by TOTAL, and written by Issam SAID <said.issam@gmail.com>. /// /// Redistribution and use in source and binary forms, with or without /// modification, are permitted provided that the following conditions /// are met: /// /// 1. Redistributions of source code must retain the above copyright /// notice, this list of conditions and the following disclaimer. /// 2. Redistributions in binary form must reproduce the above copyright /// notice, this list of conditions and the following disclaimer in the /// documentation and/or other materials provided with the distribution. /// 3. Neither the name of the UPMC nor the names of its contributors /// may be used to endorse or promote products derived from this software /// without specific prior written permission. /// /// THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, /// INCLUDING, BUT NOT LIMITED TO, WARRANTIES OF MERCHANTABILITY AND FITNESS /// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE UPMC OR /// ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, /// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, /// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR /// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF /// LIABILITY, WETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING /// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS /// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. /// /// @file sgemm/main.c /// @author Issam SAID /// @brief An example of matrix to matrix multiplication code based on /// the hiCL C/C++ interface. /// #include <stdio.h> #include <time.h> #include <string.h> #include <hiCL/hiCL.h> #define N 1024 /// /// @brief The main program of the hiCL based sgemm C/C++ example. /// /// This is the main routine that shows how to use the hiCL C/C++ interface /// to implement a simple matrix to matrix multiplication. /// Note that the OpenCL kernel is implemented in a seperate file (sgemm.cl). /// @return Error code if any. /// int main(void) { unsigned int i; float *a; float *b; float *c; size_t global[3] = {N, N, 1}; size_t local[3] = {16, 16, 1}; hidev_t device; fprintf(stdout, "... start of the hiCL sgemm C/C++ example\n"); /// ///< Initialize hiCL with selecting the default GPU. /// hicl_init(GPU); /// ///< Load and build the OpenCL kernel that runs the multiplication. ///< Note the '-cl-kernel-arg-info' that is added for NVIDIA GPUs to ///< use some OpenCL 1.2 features. /// hicl_load(PREFIX"/sgemm.cl", NULL); /// ///< Get a pointer to the desired device (in this case the default GPU). /// device = hicl_dev_find(DEFAULT); a = (float*)malloc(N*N*sizeof(float)); b = (float*)malloc(N*N*sizeof(float)); c = (float*)malloc(N*N*sizeof(float)); memset(c, 0, N*N*sizeof(float)); srand (time(NULL)); #pragma omp parallel for private(i) for (i = 0; i< N*N; ++i) a[i] = i%2 == 0 ? -rand()%10 : rand()%10; #pragma omp parallel for private(i) for (i = 0; i< N*N; ++i) b[i] = 1; /// ///< Wrap the matrices into hiCL memory objects. /// hicl_mem_wrap(device, a, N*N, FLOAT | READ_ONLY | HWA); hicl_mem_wrap(device, b, N*N, FLOAT | READ_ONLY | HWA); hicl_mem_wrap(device, c, N*N, FLOAT | READ_WRITE | HWA); /// ///< Set the work size and the dimensions of the kernel. /// hicl_knl_set_wrk("sgemm", 2, global, local); /// ///< Run the kernel on the default GPU. /// hicl_knl_run("sgemm", device, a, b, c, N); /// ///< Update the C matrix on the CPU side so that the results can be seen ///< on the host side. /// hicl_mem_update(c, READ_ONLY); free(a); free(b); free(c); /// ///< Release hiCL resources. /// hicl_release(); fprintf(stdout, "... end of the hiCL sgemm C/C++ example\n"); return EXIT_SUCCESS; }
selu_ref.c
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * License); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /* * Copyright (c) 2021, OPEN AI LAB * Author: hhchen@openailab.com */ #include "selu_param.h" #include "graph/tensor.h" #include "graph/node.h" #include "graph/graph.h" #include "utility/sys_port.h" #include "utility/float.h" #include "utility/log.h" #include "device/cpu/cpu_node.h" #include "device/cpu/cpu_graph.h" #include "device/cpu/cpu_module.h" #include <math.h> int ref_selu_fp32(struct tensor* output_tensor, struct tensor* input_tensor, struct selu_param* selu_param, int num_thread) { float* data = ( float* )input_tensor->data; float* out_data = ( float* )output_tensor->data; float alpha = selu_param->alpha; float lambda = selu_param->lambda; float alpha_lambda = alpha * lambda; int chan_num = input_tensor->dims[0] * input_tensor->dims[1]; int chan_size = input_tensor->dims[2] * input_tensor->dims[3]; #pragma omp parallel for num_threads(num_thread) for (int i = 0; i < chan_num; i++) { int offset = i * chan_size; float* input_data = ( float* )input_tensor->data + i * chan_size; float* output_data = ( float* )output_tensor->data + i * chan_size; for (int i = 0; i < chan_size; i++) { if (input_data[i] < 0.f) output_data[i] = (exp(input_data[i]) - 1.f) * alpha_lambda; else output_data[i] = input_data[i] * lambda; } } return 0; } int ref_selu_uint8(struct tensor* output_tensor, struct tensor* input_tensor, struct selu_param* selu_param, int num_thread) { /* dequant */ uint8_t* input_uint8 = input_tensor->data; uint8_t* output_uint8 = output_tensor->data; float input_scale = input_tensor->scale; float output_scale = output_tensor->scale; int32_t input_zero = input_tensor->zero_point; int32_t output_zero = output_tensor->zero_point; int input_size = input_tensor->elem_num; int output_size = output_tensor->elem_num; float* input_data = ( float* )sys_malloc(input_size * sizeof(float)); float* output_data = ( float* )sys_malloc(output_size * sizeof(float)); for (int i = 0; i < input_size; i++) { input_data[i] = (( float )input_uint8[i] - ( float )input_zero) * input_scale; } float alpha = selu_param->alpha; float lambda = selu_param->lambda; float alpha_lambda = alpha * lambda; int chan_num = input_tensor->dims[0] * input_tensor->dims[1]; int chan_size = input_tensor->dims[2] * input_tensor->dims[3]; #pragma omp parallel for num_threads(num_thread) for (int i = 0; i < chan_num; i++) { int offset = i * chan_size; float* input_data = ( float* )input_tensor->data + i * chan_size; float* output_data = ( float* )output_tensor->data + i * chan_size; for (int i = 0; i < chan_size; i++) { if (input_data[i] < 0.f) output_data[i] = (exp(input_data[i]) - 1.f) * alpha_lambda; else output_data[i] = input_data[i] * lambda; } } /* quant */ for (int i = 0; i < output_size; i++) { int udata = round(output_data[i] / output_scale + output_zero); if (udata > 255) udata = 255; else if (udata < 0) udata = 0; output_uint8[i] = udata; } sys_free(input_data); sys_free(output_data); return 0; } static int init_node(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph) { return 0; } static int release_node(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph) { return 0; } static int prerun(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph) { return 0; } static int run(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph) { struct node* ir_node = exec_node->ir_node; struct graph* ir_graph = ir_node->graph; struct tensor* input_tensor; struct tensor* output_tensor; input_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[0]); output_tensor = get_ir_graph_tensor(ir_graph, ir_node->output_tensors[0]); struct selu_param* selu_param = ( struct selu_param* )ir_node->op.param_mem; int num_thread = exec_graph->num_thread; int ret = -1; if (input_tensor->data_type == TENGINE_DT_FP32) ret = ref_selu_fp32(output_tensor, input_tensor, selu_param, num_thread); else if(input_tensor->data_type == TENGINE_DT_UINT8) ret = ref_selu_uint8(output_tensor, input_tensor, selu_param, num_thread); return ret; } static int score(struct node_ops* node_ops, struct exec_graph* exec_graph, struct node* exec_node) { struct node* ir_node = exec_node; struct graph* ir_graph = ir_node->graph; struct tensor* input_tensor; input_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[0]); if (input_tensor->data_type != TENGINE_DT_FP32 || input_tensor->layout != TENGINE_LAYOUT_NCHW) return 0; return OPS_SCORE_CANDO; } static struct node_ops hcl_node_ops = {.prerun = prerun, .run = run, .reshape = NULL, .postrun = NULL, .init_node = init_node, .release_node = release_node, .score = score}; int register_selu_ref_op(void* arg) { return register_builtin_node_ops(OP_SELU, &hcl_node_ops); } int unregister_selu_ref_op(void* arg) { return unregister_builtin_node_ops(OP_SELU, &hcl_node_ops); }
GB_binop__ge_int16.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_mkl.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__ge_int16 // A.*B function (eWiseMult): GB_AemultB__ge_int16 // A*D function (colscale): GB_AxD__ge_int16 // D*A function (rowscale): GB_DxB__ge_int16 // C+=B function (dense accum): GB_Cdense_accumB__ge_int16 // C+=b function (dense accum): GB_Cdense_accumb__ge_int16 // C+=A+B function (dense ewise3): (none) // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__ge_int16 // C=scalar+B GB_bind1st__ge_int16 // C=scalar+B' GB_bind1st_tran__ge_int16 // C=A+scalar GB_bind2nd__ge_int16 // C=A'+scalar GB_bind2nd_tran__ge_int16 // C type: bool // A type: int16_t // B,b type: int16_t // BinaryOp: cij = (aij >= bij) #define GB_ATYPE \ int16_t #define GB_BTYPE \ int16_t #define GB_CTYPE \ bool // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 0 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 0 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int16_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ int16_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ bool t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y) \ z = (x >= y) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_GE || GxB_NO_INT16 || GxB_NO_GE_INT16) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void (none) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__ge_int16 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__ge_int16 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { #include "GB_dense_subassign_23_template.c" } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__ge_int16 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { // get the scalar b for C += b, of type int16_t int16_t bwork = (*((int16_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_AxD__ge_int16 ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *GB_RESTRICT Cx = (bool *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_DxB__ge_int16 ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *GB_RESTRICT Cx = (bool *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB_AaddB__ge_int16 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_add_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__ge_int16 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__ge_int16 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *Cx = (bool *) Cx_output ; int16_t x = (*((int16_t *) x_input)) ; int16_t *Bx = (int16_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int16_t bij = Bx [p] ; Cx [p] = (x >= bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__ge_int16 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; bool *Cx = (bool *) Cx_output ; int16_t *Ax = (int16_t *) Ax_input ; int16_t y = (*((int16_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int16_t aij = Ax [p] ; Cx [p] = (aij >= y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int16_t aij = Ax [pA] ; \ Cx [pC] = (x >= aij) ; \ } GrB_Info GB_bind1st_tran__ge_int16 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int16_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t x = (*((const int16_t *) x_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int16_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int16_t aij = Ax [pA] ; \ Cx [pC] = (aij >= y) ; \ } GrB_Info GB_bind2nd_tran__ge_int16 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t y = (*((const int16_t *) y_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
project.c
//----------------------------------------------------------------------------- // project.c // // Project: EPA SWMM5 // Version: 5.1 // Date: 03/19/14 (Build 5.1.000) // 04/14/14 (Build 5.1.004) // 09/15/14 (Build 5.1.007) // 03/19/15 (Build 5.1.008) // 04/30/15 (Build 5.1.009) // 08/01/16 (Build 5.1.011) // 03/14/17 (Build 5.1.012) // 05/10/18 (Build 5.1.013) // Author: L. Rossman // // Project management functions. // // This module provides project-related services such as: // o opening a new project and reading its input data // o allocating and freeing memory for project objects // o setting default values for object properties and options // o initializing the internal state of all objects // o managing hash tables for identifying objects by ID name // // Build 5.1.004: // - Ignore RDII option added. // // Build 5.1.007: // - Default monthly adjustments for climate variables included. // - User-supplied GW flow equations initialized to NULL. // - Storage node exfiltration object initialized to NULL. // - Freeing of memory used for storage node exfiltration included. // // Build 5.1.008: // - Constants used for dynamic wave routing moved to dynwave.c. // - Input processing of minimum time step & number of // parallel threads for dynamic wave routing added. // - Default values of hyd. conductivity adjustments added. // - Freeing of memory used for outfall pollutant load added. // // Build 5.1.009: // - Fixed bug in computing total duration introduced in 5.1.008. // // Build 5.1.011: // - Memory management of hydraulic event dates array added. // // Build 5.1.012: // - Minimum conduit slope option initialized to 0 (none). // - NO/YES no longer accepted as options for NORMAL_FLOW_LIMITED. // // Build 5.1.013: // - omp_get_num_threads function protected against lack of compiler // support for OpenMP. // - Rain gage validation now performed after subcatchment validation. // - More robust parsing of MinSurfarea option provided. // - Support added for new RuleStep analysis option. // //----------------------------------------------------------------------------- #define _CRT_SECURE_NO_DEPRECATE #include <stdlib.h> #include <string.h> #include <stdlib.h> #include <math.h> #if defined(_OPENMP) //(5.1.013) #include <omp.h> // #else // int omp_get_num_threads(void) { return 1;} // #endif // #include "headers.h" #include "lid.h" #include "hash.h" #include "mempool.h" //----------------------------------------------------------------------------- // Shared variables //----------------------------------------------------------------------------- static HTtable* Htable[MAX_OBJ_TYPES]; // Hash tables for object ID names static char MemPoolAllocated; // TRUE if memory pool allocated //----------------------------------------------------------------------------- // External Functions (declared in funcs.h) //----------------------------------------------------------------------------- // project_open (called from swmm_open in swmm5.c) // project_close (called from swmm_close in swmm5.c) // project_readInput (called from swmm_open in swmm5.c) // project_readOption (called from readOption in input.c) // project_validate (called from swmm_open in swmm5.c) // project_init (called from swmm_start in swmm5.c) // project_addObject (called from addObject in input.c) // project_createMatrix (called from openFileForInput in iface.c) // project_freeMatrix (called from iface_closeRoutingFiles) // project_findObject // project_findID //----------------------------------------------------------------------------- // Function declarations //----------------------------------------------------------------------------- static void initPointers(void); static void setDefaults(void); static void openFiles(char *f1, char *f2, char *f3); static void createObjects(void); static void deleteObjects(void); static void createHashTables(void); static void deleteHashTables(void); //============================================================================= void project_open(char *f1, char *f2, char *f3) // // Input: f1 = pointer to name of input file // f2 = pointer to name of report file // f3 = pointer to name of binary output file // Output: none // Purpose: opens a new SWMM project. // { initPointers(); setDefaults(); openFiles(f1, f2, f3); } //============================================================================= void project_readInput() // // Input: none // Output: none // Purpose: retrieves project data from input file. // { // --- create hash tables for fast retrieval of objects by ID names createHashTables(); // --- count number of objects in input file and create them input_countObjects(); createObjects(); // --- read project data from input file input_readData(); if ( ErrorCode ) return; // --- establish starting & ending date/time StartDateTime = StartDate + StartTime; EndDateTime = EndDate + EndTime; ReportStart = ReportStartDate + ReportStartTime; ReportStart = MAX(ReportStart, StartDateTime); // --- check for valid starting & ending date/times if ( EndDateTime <= StartDateTime ) { report_writeErrorMsg(ERR_START_DATE, ""); } else if ( EndDateTime <= ReportStart ) { report_writeErrorMsg(ERR_REPORT_DATE, ""); } else { // --- compute total duration of simulation in seconds double durationDate = EndDate - StartDate; double durationTime = EndTime - StartTime; TotalDuration = floor(durationDate * SECperDAY + durationTime * SECperDAY); // --- reporting step must be <= total duration if ( (double)ReportStep > TotalDuration ) { ReportStep = (int)(TotalDuration); } // --- reporting step can't be < routing step if ( (double)ReportStep < RouteStep ) { report_writeErrorMsg(ERR_REPORT_STEP, ""); } // --- convert total duration to milliseconds TotalDuration *= 1000.0; } } //============================================================================= void project_validate() // // Input: none // Output: none // Purpose: checks validity of project data. // { int i; int j; int err; // --- validate Curves and TimeSeries for ( i=0; i<Nobjects[CURVE]; i++ ) { err = table_validate(&Curve[i]); if ( err ) report_writeErrorMsg(ERR_CURVE_SEQUENCE, Curve[i].ID); } for ( i=0; i<Nobjects[TSERIES]; i++ ) { err = table_validate(&Tseries[i]); if ( err ) report_writeTseriesErrorMsg(err, &Tseries[i]); } // --- validate hydrology objects // (NOTE: order is important !!!!) climate_validate(); lid_validate(); if ( Nobjects[SNOWMELT] == 0 ) IgnoreSnowmelt = TRUE; if ( Nobjects[AQUIFER] == 0 ) IgnoreGwater = TRUE; for ( i=0; i<Nobjects[AQUIFER]; i++ ) gwater_validateAquifer(i); for ( i=0; i<Nobjects[SUBCATCH]; i++ ) subcatch_validate(i); for ( i=0; i<Nobjects[GAGE]; i++ ) gage_validate(i); //(5.1.013) for ( i=0; i<Nobjects[SNOWMELT]; i++ ) snow_validateSnowmelt(i); // --- compute geometry tables for each shape curve j = 0; for ( i=0; i<Nobjects[CURVE]; i++ ) { if ( Curve[i].curveType == SHAPE_CURVE ) { Curve[i].refersTo = j; Shape[j].curve = i; if ( !shape_validate(&Shape[j], &Curve[i]) ) report_writeErrorMsg(ERR_CURVE_SEQUENCE, Curve[i].ID); j++; } } // --- validate links before nodes, since the latter can // result in adjustment of node depths for ( i=0; i<Nobjects[NODE]; i++) Node[i].oldDepth = Node[i].fullDepth; for ( i=0; i<Nobjects[LINK]; i++) link_validate(i); for ( i=0; i<Nobjects[NODE]; i++) node_validate(i); // --- adjust time steps if necessary if ( DryStep < WetStep ) { report_writeWarningMsg(WARN06, ""); DryStep = WetStep; } if ( RouteStep > (double)WetStep ) { report_writeWarningMsg(WARN07, ""); RouteStep = WetStep; } // --- adjust individual reporting flags to match global reporting flag if ( RptFlags.subcatchments == ALL ) for (i=0; i<Nobjects[SUBCATCH]; i++) Subcatch[i].rptFlag = TRUE; if ( RptFlags.nodes == ALL ) for (i=0; i<Nobjects[NODE]; i++) Node[i].rptFlag = TRUE; if ( RptFlags.links == ALL ) for (i=0; i<Nobjects[LINK]; i++) Link[i].rptFlag = TRUE; // --- validate dynamic wave options if ( RouteModel == DW ) dynwave_validate(); // --- adjust number of parallel threads to be used //(5.1.013) #pragma omp parallel //(5.1.008) { if ( NumThreads == 0 ) NumThreads = omp_get_num_threads(); //(5.1.008) else NumThreads = MIN(NumThreads, omp_get_num_threads()); //(5.1.008) } if ( Nobjects[LINK] < 4 * NumThreads ) NumThreads = 1; //(5.1.008) } //============================================================================= void project_close() // // Input: none // Output: none // Purpose: closes a SWMM project. // { deleteObjects(); deleteHashTables(); } //============================================================================= int project_init(void) // // Input: none // Output: returns an error code // Purpose: initializes the internal state of all objects. // { int j; climate_initState(); lid_initState(); for (j=0; j<Nobjects[TSERIES]; j++) table_tseriesInit(&Tseries[j]); for (j=0; j<Nobjects[GAGE]; j++) gage_initState(j); for (j=0; j<Nobjects[SUBCATCH]; j++) subcatch_initState(j); for (j=0; j<Nobjects[NODE]; j++) node_initState(j); for (j=0; j<Nobjects[LINK]; j++) link_initState(j); return ErrorCode; } //============================================================================= int project_addObject(int type, char *id, int n) // // Input: type = object type // id = object ID string // n = object index // Output: returns 0 if object already added, 1 if not, -1 if hashing fails // Purpose: adds an object ID to a hash table // { int result; int len; char *newID; // --- do nothing if object already placed in hash table if ( project_findObject(type, id) >= 0 ) return 0; // --- use memory from the hash tables' common memory pool to store // a copy of the object's ID string len = strlen(id) + 1; newID = (char *) Alloc(len*sizeof(char)); strcpy(newID, id); // --- insert object's ID into the hash table for that type of object result = HTinsert(Htable[type], newID, n); if ( result == 0 ) result = -1; return result; } //============================================================================= int project_findObject(int type, char *id) // // Input: type = object type // id = object ID // Output: returns index of object with given ID, or -1 if ID not found // Purpose: uses hash table to find index of an object with a given ID. // { return HTfind(Htable[type], id); } //============================================================================= char *project_findID(int type, char *id) // // Input: type = object type // id = ID name being sought // Output: returns pointer to location where object's ID string is stored // Purpose: uses hash table to find address of given string entry. // { return HTfindKey(Htable[type], id); } //============================================================================= double ** project_createMatrix(int nrows, int ncols) // // Input: nrows = number of rows (0-based) // ncols = number of columns (0-based) // Output: returns a pointer to a matrix // Purpose: allocates memory for a matrix of doubles. // { int i,j; double **a; // --- allocate pointers to rows a = (double **) malloc(nrows * sizeof(double *)); if ( !a ) return NULL; // --- allocate rows and set pointers to them a[0] = (double *) malloc (nrows * ncols * sizeof(double)); if ( !a[0] ) return NULL; for ( i = 1; i < nrows; i++ ) a[i] = a[i-1] + ncols; for ( i = 0; i < nrows; i++) { for ( j = 0; j < ncols; j++) a[i][j] = 0.0; } // --- return pointer to array of pointers to rows return a; } //============================================================================= void project_freeMatrix(double **a) // // Input: a = matrix of floats // Output: none // Purpose: frees memory allocated for a matrix of doubles. // { if ( a != NULL ) { if ( a[0] != NULL ) free( a[0] ); free( a ); } } //============================================================================= int project_readOption(char* s1, char* s2) // // Input: s1 = option keyword // s2 = string representation of option's value // Output: returns error code // Purpose: reads a project option from a pair of string tokens. // // NOTE: all project options have default values assigned in setDefaults(). // { int k, m, h, s; double tStep; char strDate[25]; DateTime aTime; DateTime aDate; // --- determine which option is being read k = findmatch(s1, OptionWords); if ( k < 0 ) return error_setInpError(ERR_KEYWORD, s1); switch ( k ) { // --- choice of flow units case FLOW_UNITS: m = findmatch(s2, FlowUnitWords); if ( m < 0 ) return error_setInpError(ERR_KEYWORD, s2); FlowUnits = m; if ( FlowUnits <= MGD ) UnitSystem = US; else UnitSystem = SI; break; // --- choice of infiltration modeling method case INFIL_MODEL: m = findmatch(s2, InfilModelWords); if ( m < 0 ) return error_setInpError(ERR_KEYWORD, s2); InfilModel = m; break; // --- choice of flow routing method case ROUTE_MODEL: m = findmatch(s2, RouteModelWords); if ( m < 0 ) m = findmatch(s2, OldRouteModelWords); if ( m < 0 ) return error_setInpError(ERR_KEYWORD, s2); if ( m == NO_ROUTING ) IgnoreRouting = TRUE; else RouteModel = m; if ( RouteModel == EKW ) RouteModel = KW; break; // --- simulation start date case START_DATE: if ( !datetime_strToDate(s2, &StartDate) ) { return error_setInpError(ERR_DATETIME, s2); } break; // --- simulation start time of day case START_TIME: if ( !datetime_strToTime(s2, &StartTime) ) { return error_setInpError(ERR_DATETIME, s2); } break; // --- simulation ending date case END_DATE: if ( !datetime_strToDate(s2, &EndDate) ) { return error_setInpError(ERR_DATETIME, s2); } break; // --- simulation ending time of day case END_TIME: if ( !datetime_strToTime(s2, &EndTime) ) { return error_setInpError(ERR_DATETIME, s2); } break; // --- reporting start date case REPORT_START_DATE: if ( !datetime_strToDate(s2, &ReportStartDate) ) { return error_setInpError(ERR_DATETIME, s2); } break; // --- reporting start time of day case REPORT_START_TIME: if ( !datetime_strToTime(s2, &ReportStartTime) ) { return error_setInpError(ERR_DATETIME, s2); } break; // --- day of year when street sweeping begins or when it ends // (year is arbitrarily set to 1947 so that the dayOfYear // function can be applied) case SWEEP_START: case SWEEP_END: strcpy(strDate, s2); strcat(strDate, "/1947"); if ( !datetime_strToDate(strDate, &aDate) ) { return error_setInpError(ERR_DATETIME, s2); } m = datetime_dayOfYear(aDate); if ( k == SWEEP_START ) SweepStart = m; else SweepEnd = m; break; // --- number of antecedent dry days case START_DRY_DAYS: StartDryDays = atof(s2); if ( StartDryDays < 0.0 ) { return error_setInpError(ERR_NUMBER, s2); } break; // --- runoff or reporting time steps // (input is in hrs:min:sec format, time step saved as seconds) case WET_STEP: case DRY_STEP: case REPORT_STEP: case RULE_STEP: //(5.1.013) if ( !datetime_strToTime(s2, &aTime) ) { return error_setInpError(ERR_DATETIME, s2); } datetime_decodeTime(aTime, &h, &m, &s); h += 24*(int)aTime; s = s + 60*m + 3600*h; // --- RuleStep allowed to be 0 while other time steps must be > 0 //(5.1.013) if (k == RULE_STEP) // { // if (s < 0) return error_setInpError(ERR_NUMBER, s2); // } // else if ( s <= 0 ) return error_setInpError(ERR_NUMBER, s2); // switch ( k ) { case WET_STEP: WetStep = s; break; case DRY_STEP: DryStep = s; break; case REPORT_STEP: ReportStep = s; break; case RULE_STEP: RuleStep = s; break; //(5.1.013) } break; // --- type of damping applied to inertial terms of dynamic wave routing case INERT_DAMPING: m = findmatch(s2, InertDampingWords); if ( m < 0 ) return error_setInpError(ERR_KEYWORD, s2); else InertDamping = m; break; // --- Yes/No options (NO = 0, YES = 1) case ALLOW_PONDING: case SLOPE_WEIGHTING: case SKIP_STEADY_STATE: case IGNORE_RAINFALL: case IGNORE_SNOWMELT: case IGNORE_GWATER: case IGNORE_ROUTING: case IGNORE_QUALITY: case IGNORE_RDII: m = findmatch(s2, NoYesWords); if ( m < 0 ) return error_setInpError(ERR_KEYWORD, s2); switch ( k ) { case ALLOW_PONDING: AllowPonding = m; break; case SLOPE_WEIGHTING: SlopeWeighting = m; break; case SKIP_STEADY_STATE: SkipSteadyState = m; break; case IGNORE_RAINFALL: IgnoreRainfall = m; break; case IGNORE_SNOWMELT: IgnoreSnowmelt = m; break; case IGNORE_GWATER: IgnoreGwater = m; break; case IGNORE_ROUTING: IgnoreRouting = m; break; case IGNORE_QUALITY: IgnoreQuality = m; break; case IGNORE_RDII: IgnoreRDII = m; break; } break; case NORMAL_FLOW_LTD: m = findmatch(s2, NormalFlowWords); if ( m < 0 ) return error_setInpError(ERR_KEYWORD, s2); NormalFlowLtd = m; break; case FORCE_MAIN_EQN: m = findmatch(s2, ForceMainEqnWords); if ( m < 0 ) return error_setInpError(ERR_KEYWORD, s2); ForceMainEqn = m; break; case LINK_OFFSETS: m = findmatch(s2, LinkOffsetWords); if ( m < 0 ) return error_setInpError(ERR_KEYWORD, s2); LinkOffsets = m; break; // --- compatibility option for selecting solution method for // dynamic wave flow routing (NOT CURRENTLY USED) case COMPATIBILITY: if ( strcomp(s2, "3") ) Compatibility = SWMM3; else if ( strcomp(s2, "4") ) Compatibility = SWMM4; else if ( strcomp(s2, "5") ) Compatibility = SWMM5; else return error_setInpError(ERR_KEYWORD, s2); break; // --- routing or lengthening time step (in decimal seconds) // (lengthening time step is used in Courant stability formula // to artificially lengthen conduits for dynamic wave flow routing // (a value of 0 means that no lengthening is used)) case ROUTE_STEP: case LENGTHENING_STEP: if ( !getDouble(s2, &tStep) ) { if ( !datetime_strToTime(s2, &aTime) ) { return error_setInpError(ERR_NUMBER, s2); } else { datetime_decodeTime(aTime, &h, &m, &s); h += 24*(int)aTime; s = s + 60*m + 3600*h; tStep = s; } } if ( k == ROUTE_STEP ) { if ( tStep <= 0.0 ) return error_setInpError(ERR_NUMBER, s2); RouteStep = tStep; } else LengtheningStep = MAX(0.0, tStep); break; // --- minimum variable time step for dynamic wave routing case MIN_ROUTE_STEP: if ( !getDouble(s2, &MinRouteStep) || MinRouteStep < 0.0 ) return error_setInpError(ERR_NUMBER, s2); break; case NUM_THREADS: m = atoi(s2); if ( m < 0 ) return error_setInpError(ERR_NUMBER, s2); NumThreads = m; break; // --- safety factor applied to variable time step estimates under // dynamic wave flow routing (value of 0 indicates that variable // time step option not used) case VARIABLE_STEP: if ( !getDouble(s2, &CourantFactor) ) return error_setInpError(ERR_NUMBER, s2); if ( CourantFactor < 0.0 || CourantFactor > 2.0 ) return error_setInpError(ERR_NUMBER, s2); break; // --- minimum surface area (ft2 or sq. meters) associated with nodes // under dynamic wave flow routing case MIN_SURFAREA: if (!getDouble(s2, &MinSurfArea)) //(5.1.013) return error_setInpError(ERR_NUMBER, s2); //(5.1.013) if (MinSurfArea < 0.0) //(5.1.013) return error_setInpError(ERR_NUMBER, s2); //(5.1.013) break; // --- minimum conduit slope (%) case MIN_SLOPE: if ( !getDouble(s2, &MinSlope) ) return error_setInpError(ERR_NUMBER, s2); if ( MinSlope < 0.0 || MinSlope >= 100 ) return error_setInpError(ERR_NUMBER, s2); MinSlope /= 100.0; break; // --- maximum trials / time step for dynamic wave routing case MAX_TRIALS: m = atoi(s2); if ( m < 0 ) return error_setInpError(ERR_NUMBER, s2); MaxTrials = m; break; // --- head convergence tolerance for dynamic wave routing case HEAD_TOL: if ( !getDouble(s2, &HeadTol) ) { return error_setInpError(ERR_NUMBER, s2); } break; // --- steady state tolerance on system inflow - outflow case SYS_FLOW_TOL: if ( !getDouble(s2, &SysFlowTol) ) { return error_setInpError(ERR_NUMBER, s2); } SysFlowTol /= 100.0; break; // --- steady state tolerance on nodal lateral inflow case LAT_FLOW_TOL: if ( !getDouble(s2, &LatFlowTol) ) { return error_setInpError(ERR_NUMBER, s2); } LatFlowTol /= 100.0; break; // --- method used for surcharging in dynamic wave flow routing //(5.1.013) case SURCHARGE_METHOD: m = findmatch(s2, SurchargeWords); if (m < 0) return error_setInpError(ERR_KEYWORD, s2); SurchargeMethod = m; break; case TEMPDIR: // Temporary Directory sstrncpy(TempDir, s2, MAXFNAME); break; } return 0; } //============================================================================= void initPointers() // // Input: none // Output: none // Purpose: assigns NULL to all dynamic arrays for a new project. // { Gage = NULL; Subcatch = NULL; Node = NULL; Outfall = NULL; Divider = NULL; Storage = NULL; Link = NULL; Conduit = NULL; Pump = NULL; Orifice = NULL; Weir = NULL; Outlet = NULL; Pollut = NULL; Landuse = NULL; Pattern = NULL; Curve = NULL; Tseries = NULL; Transect = NULL; Shape = NULL; Aquifer = NULL; UnitHyd = NULL; Snowmelt = NULL; Event = NULL; MemPoolAllocated = FALSE; } //============================================================================= void setDefaults() // // Input: none // Output: none // Purpose: assigns default values to project variables. // { int i, j; // Project title & temp. file path for (i = 0; i < MAXTITLE; i++) strcpy(Title[i], ""); strcpy(TempDir, ""); // Interface files Frain.mode = SCRATCH_FILE; // Use scratch rainfall file Fclimate.mode = NO_FILE; Frunoff.mode = NO_FILE; Frdii.mode = NO_FILE; Fhotstart1.mode = NO_FILE; Fhotstart2.mode = NO_FILE; Finflows.mode = NO_FILE; Foutflows.mode = NO_FILE; Frain.file = NULL; Fclimate.file = NULL; Frunoff.file = NULL; Frdii.file = NULL; Fhotstart1.file = NULL; Fhotstart2.file = NULL; Finflows.file = NULL; Foutflows.file = NULL; Fout.file = NULL; Fout.mode = NO_FILE; // Analysis options UnitSystem = US; // US unit system FlowUnits = CFS; // CFS flow units InfilModel = HORTON; // Horton infiltration method RouteModel = KW; // Kin. wave flow routing method SurchargeMethod = EXTRAN; // Use EXTRAN method for surcharging //(5.1.013) CrownCutoff = 0.96; //(5.1.013) AllowPonding = FALSE; // No ponding at nodes InertDamping = SOME; // Partial inertial damping NormalFlowLtd = BOTH; // Default normal flow limitation ForceMainEqn = H_W; // Hazen-Williams eqn. for force mains LinkOffsets = DEPTH_OFFSET; // Use depth for link offsets LengtheningStep = 0; // No lengthening of conduits CourantFactor = 0.0; // No variable time step MinSurfArea = 0.0; // Force use of default min. surface area MinSlope = 0.0; // No user supplied minimum conduit slope SkipSteadyState = FALSE; // Do flow routing in steady state periods IgnoreRainfall = FALSE; // Analyze rainfall/runoff IgnoreRDII = FALSE; // Analyze RDII IgnoreSnowmelt = FALSE; // Analyze snowmelt IgnoreGwater = FALSE; // Analyze groundwater IgnoreRouting = FALSE; // Analyze flow routing IgnoreQuality = FALSE; // Analyze water quality WetStep = 300; // Runoff wet time step (secs) DryStep = 3600; // Runoff dry time step (secs) RuleStep = 0; // Rules evaluated at each routing step RouteStep = 300.0; // Routing time step (secs) MinRouteStep = 0.5; // Minimum variable time step (sec) ReportStep = 900; // Reporting time step (secs) StartDryDays = 0.0; // Antecedent dry days MaxTrials = 0; // Force use of default max. trials HeadTol = 0.0; // Force use of default head tolerance SysFlowTol = 0.05; // System flow tolerance for steady state LatFlowTol = 0.05; // Lateral flow tolerance for steady state NumThreads = 0; // Number of parallel threads to use NumEvents = 0; // Number of detailed routing events // Deprecated options SlopeWeighting = TRUE; // Use slope weighting Compatibility = SWMM4; // Use SWMM 4 up/dn weighting method // Starting & ending date/time StartDate = datetime_encodeDate(2004, 1, 1); StartTime = datetime_encodeTime(0,0,0); StartDateTime = StartDate + StartTime; EndDate = StartDate; EndTime = 0.0; ReportStartDate = NO_DATE; ReportStartTime = NO_DATE; SweepStart = 1; SweepEnd = 365; // Reporting options RptFlags.input = FALSE; RptFlags.continuity = TRUE; RptFlags.flowStats = TRUE; RptFlags.controls = FALSE; RptFlags.subcatchments = FALSE; RptFlags.nodes = FALSE; RptFlags.links = FALSE; RptFlags.nodeStats = FALSE; RptFlags.averages = FALSE; // Temperature data Temp.dataSource = NO_TEMP; Temp.tSeries = -1; Temp.ta = 70.0; Temp.elev = 0.0; Temp.anglat = 40.0; Temp.dtlong = 0.0; Temp.tmax = MISSING; // Wind speed data Wind.type = MONTHLY_WIND; for ( i=0; i<12; i++ ) Wind.aws[i] = 0.0; // Snowmelt parameters Snow.snotmp = 34.0; Snow.tipm = 0.5; Snow.rnm = 0.6; // Snow areal depletion curves for pervious and impervious surfaces for ( i=0; i<2; i++ ) { for ( j=0; j<10; j++) Snow.adc[i][j] = 1.0; } // Evaporation rates Evap.type = CONSTANT_EVAP; for (i=0; i<12; i++) { Evap.monthlyEvap[i] = 0.0; Evap.panCoeff[i] = 1.0; } Evap.recoveryPattern = -1; Evap.recoveryFactor = 1.0; Evap.tSeries = -1; Evap.dryOnly = FALSE; // Climate adjustments for (i = 0; i < 12; i++) { Adjust.temp[i] = 0.0; // additive adjustments Adjust.evap[i] = 0.0; // additive adjustments Adjust.rain[i] = 1.0; // multiplicative adjustments Adjust.hydcon[i] = 1.0; // hyd. conductivity adjustments } Adjust.rainFactor = 1.0; Adjust.hydconFactor = 1.0; } //============================================================================= void openFiles(char *f1, char *f2, char *f3) // // Input: f1 = name of input file // f2 = name of report file // f3 = name of binary output file // Output: none // Purpose: opens a project's input and report files. // { // --- initialize file pointers to NULL Finp.file = NULL; Frpt.file = NULL; Fout.file = NULL; // --- save file names sstrncpy(Finp.name, f1, MAXFNAME); sstrncpy(Frpt.name, f2, MAXFNAME); sstrncpy(Fout.name, f3, MAXFNAME); // --- check that file names are not identical if (strcomp(f1, f2) || strcomp(f1, f3) || strcomp(f2, f3)) { writecon(FMT11); ErrorCode = ERR_FILE_NAME; return; } // --- open input and report files if ((Finp.file = fopen(f1,"rt")) == NULL) { writecon(FMT12); writecon(f1); ErrorCode = ERR_INP_FILE; return; } if ((Frpt.file = fopen(f2,"wt")) == NULL) { writecon(FMT13); ErrorCode = ERR_RPT_FILE; return; } } //============================================================================= void createObjects() // // Input: none // Output: none // Purpose: allocates memory for project's objects. // // NOTE: number of each type of object has already been determined in // project_readInput(). // { int j, k; // --- allocate memory for each category of object if ( ErrorCode ) return; Gage = (TGage *) calloc(Nobjects[GAGE], sizeof(TGage)); Subcatch = (TSubcatch *) calloc(Nobjects[SUBCATCH], sizeof(TSubcatch)); Node = (TNode *) calloc(Nobjects[NODE], sizeof(TNode)); Outfall = (TOutfall *) calloc(Nnodes[OUTFALL], sizeof(TOutfall)); Divider = (TDivider *) calloc(Nnodes[DIVIDER], sizeof(TDivider)); Storage = (TStorage *) calloc(Nnodes[STORAGE], sizeof(TStorage)); Link = (TLink *) calloc(Nobjects[LINK], sizeof(TLink)); Conduit = (TConduit *) calloc(Nlinks[CONDUIT], sizeof(TConduit)); Pump = (TPump *) calloc(Nlinks[PUMP], sizeof(TPump)); Orifice = (TOrifice *) calloc(Nlinks[ORIFICE], sizeof(TOrifice)); Weir = (TWeir *) calloc(Nlinks[WEIR], sizeof(TWeir)); Outlet = (TOutlet *) calloc(Nlinks[OUTLET], sizeof(TOutlet)); Pollut = (TPollut *) calloc(Nobjects[POLLUT], sizeof(TPollut)); Landuse = (TLanduse *) calloc(Nobjects[LANDUSE], sizeof(TLanduse)); Pattern = (TPattern *) calloc(Nobjects[TIMEPATTERN], sizeof(TPattern)); Curve = (TTable *) calloc(Nobjects[CURVE], sizeof(TTable)); Tseries = (TTable *) calloc(Nobjects[TSERIES], sizeof(TTable)); Aquifer = (TAquifer *) calloc(Nobjects[AQUIFER], sizeof(TAquifer)); UnitHyd = (TUnitHyd *) calloc(Nobjects[UNITHYD], sizeof(TUnitHyd)); Snowmelt = (TSnowmelt *) calloc(Nobjects[SNOWMELT], sizeof(TSnowmelt)); Shape = (TShape *) calloc(Nobjects[SHAPE], sizeof(TShape)); // --- create array of detailed routing event periods Event = (TEvent *) calloc(NumEvents+1, sizeof(TEvent)); Event[NumEvents].start = BIG; Event[NumEvents].end = BIG + 1.0; // --- create LID objects lid_create(Nobjects[LID], Nobjects[SUBCATCH]); // --- create control rules ErrorCode = controls_create(Nobjects[CONTROL]); if ( ErrorCode ) return; // --- create cross section transects ErrorCode = transect_create(Nobjects[TRANSECT]); if ( ErrorCode ) return; // --- allocate memory for infiltration data infil_create(Nobjects[SUBCATCH], InfilModel); // --- allocate memory for water quality state variables for (j = 0; j < Nobjects[SUBCATCH]; j++) { Subcatch[j].initBuildup = (double *) calloc(Nobjects[POLLUT], sizeof(double)); Subcatch[j].oldQual = (double *) calloc(Nobjects[POLLUT], sizeof(double)); Subcatch[j].newQual = (double *) calloc(Nobjects[POLLUT], sizeof(double)); Subcatch[j].pondedQual = (double *) calloc(Nobjects[POLLUT], sizeof(double)); Subcatch[j].concPonded = (double *) calloc(Nobjects[POLLUT], sizeof(double)); Subcatch[j].totalLoad = (double *) calloc(Nobjects[POLLUT], sizeof(double)); Subcatch[j].surfaceBuildup = (double *) calloc(Nobjects[POLLUT], sizeof(double)); } for (j = 0; j < Nobjects[NODE]; j++) { Node[j].oldQual = (double *) calloc(Nobjects[POLLUT], sizeof(double)); Node[j].newQual = (double *) calloc(Nobjects[POLLUT], sizeof(double)); Node[j].extInflow = NULL; Node[j].dwfInflow = NULL; Node[j].rdiiInflow = NULL; Node[j].treatment = NULL; } for (j = 0; j < Nobjects[LINK]; j++) { Link[j].oldQual = (double *) calloc(Nobjects[POLLUT], sizeof(double)); Link[j].newQual = (double *) calloc(Nobjects[POLLUT], sizeof(double)); Link[j].totalLoad = (double *) calloc(Nobjects[POLLUT], sizeof(double)); } // --- allocate memory for land use buildup/washoff functions for (j = 0; j < Nobjects[LANDUSE]; j++) { Landuse[j].buildupFunc = (TBuildup *) calloc(Nobjects[POLLUT], sizeof(TBuildup)); Landuse[j].washoffFunc = (TWashoff *) calloc(Nobjects[POLLUT], sizeof(TWashoff)); } // --- allocate memory for subcatchment landuse factors for (j = 0; j < Nobjects[SUBCATCH]; j++) { Subcatch[j].landFactor = (TLandFactor *) calloc(Nobjects[LANDUSE], sizeof(TLandFactor)); for (k = 0; k < Nobjects[LANDUSE]; k++) { Subcatch[j].landFactor[k].buildup = (double *) calloc(Nobjects[POLLUT], sizeof(double)); } } // --- initialize buildup & washoff functions for (j = 0; j < Nobjects[LANDUSE]; j++) { for (k = 0; k < Nobjects[POLLUT]; k++) { Landuse[j].buildupFunc[k].funcType = NO_BUILDUP; Landuse[j].buildupFunc[k].normalizer = PER_AREA; Landuse[j].washoffFunc[k].funcType = NO_WASHOFF; } } // --- initialize rain gage properties for (j = 0; j < Nobjects[GAGE]; j++) { Gage[j].tSeries = -1; strcpy(Gage[j].fname, ""); } // --- initialize subcatchment properties for (j = 0; j < Nobjects[SUBCATCH]; j++) { Subcatch[j].outSubcatch = -1; Subcatch[j].outNode = -1; Subcatch[j].infil = -1; Subcatch[j].groundwater = NULL; Subcatch[j].gwLatFlowExpr = NULL; Subcatch[j].gwDeepFlowExpr = NULL; Subcatch[j].snowpack = NULL; Subcatch[j].lidArea = 0.0; for (k = 0; k < Nobjects[POLLUT]; k++) { Subcatch[j].initBuildup[k] = 0.0; } } // --- initialize RDII unit hydrograph properties for ( j = 0; j < Nobjects[UNITHYD]; j++ ) rdii_initUnitHyd(j); // --- initialize snowmelt properties for ( j = 0; j < Nobjects[SNOWMELT]; j++ ) snow_initSnowmelt(j); // --- initialize storage node exfiltration for (j = 0; j < Nnodes[STORAGE]; j++) Storage[j].exfil = NULL; // --- initialize link properties for (j = 0; j < Nobjects[LINK]; j++) { Link[j].xsect.type = -1; Link[j].cLossInlet = 0.0; Link[j].cLossOutlet = 0.0; Link[j].cLossAvg = 0.0; Link[j].hasFlapGate = FALSE; } for (j = 0; j < Nlinks[PUMP]; j++) Pump[j].pumpCurve = -1; // --- initialize reporting flags for (j = 0; j < Nobjects[SUBCATCH]; j++) Subcatch[j].rptFlag = FALSE; for (j = 0; j < Nobjects[NODE]; j++) Node[j].rptFlag = FALSE; for (j = 0; j < Nobjects[LINK]; j++) Link[j].rptFlag = FALSE; // --- initialize curves, time series, and time patterns for (j = 0; j < Nobjects[CURVE]; j++) table_init(&Curve[j]); for (j = 0; j < Nobjects[TSERIES]; j++) table_init(&Tseries[j]); for (j = 0; j < Nobjects[TIMEPATTERN]; j++) inflow_initDwfPattern(j); } //============================================================================= void deleteObjects() // // Input: none // Output: none // Purpose: frees memory allocated for a project's objects. // // NOTE: care is taken to first free objects that are properties of another // object before the latter is freed (e.g., we must free a // subcatchment's land use factors before freeing the subcatchment). // { int j, k; // --- free memory for landuse factors & groundwater if ( Subcatch ) for (j = 0; j < Nobjects[SUBCATCH]; j++) { for (k = 0; k < Nobjects[LANDUSE]; k++) { FREE(Subcatch[j].landFactor[k].buildup); } FREE(Subcatch[j].landFactor); FREE(Subcatch[j].groundwater); gwater_deleteFlowExpression(j); FREE(Subcatch[j].snowpack); } // --- free memory for buildup/washoff functions if ( Landuse ) for (j = 0; j < Nobjects[LANDUSE]; j++) { FREE(Landuse[j].buildupFunc); FREE(Landuse[j].washoffFunc) } // --- free memory for water quality state variables if ( Subcatch ) for (j = 0; j < Nobjects[SUBCATCH]; j++) { FREE(Subcatch[j].initBuildup); FREE(Subcatch[j].oldQual); FREE(Subcatch[j].newQual); FREE(Subcatch[j].pondedQual); FREE(Subcatch[j].totalLoad); } if ( Node ) for (j = 0; j < Nobjects[NODE]; j++) { FREE(Node[j].oldQual); FREE(Node[j].newQual); } if ( Link ) for (j = 0; j < Nobjects[LINK]; j++) { FREE(Link[j].oldQual); FREE(Link[j].newQual); FREE(Link[j].totalLoad); } // --- free memory used for rainfall infiltration infil_delete(); // --- free memory used for storage exfiltration if ( Node ) for (j = 0; j < Nnodes[STORAGE]; j++) { if ( Storage[j].exfil ) { FREE(Storage[j].exfil->btmExfil); FREE(Storage[j].exfil->bankExfil); FREE(Storage[j].exfil); } } // --- free memory used for outfall pollutants loads if ( Node ) for (j = 0; j < Nnodes[OUTFALL]; j++) FREE(Outfall[j].wRouted); // --- free memory used for nodal inflows & treatment functions if ( Node ) for (j = 0; j < Nobjects[NODE]; j++) { inflow_deleteExtInflows(j); inflow_deleteDwfInflows(j); rdii_deleteRdiiInflow(j); treatmnt_delete(j); } // --- delete table entries for curves and time series if ( Tseries ) for (j = 0; j < Nobjects[TSERIES]; j++) table_deleteEntries(&Tseries[j]); if ( Curve ) for (j = 0; j < Nobjects[CURVE]; j++) table_deleteEntries(&Curve[j]); // --- delete cross section transects transect_delete(); // --- delete control rules controls_delete(); // --- delete LIDs lid_delete(); // --- now free each major category of object FREE(Gage); FREE(Subcatch); FREE(Node); FREE(Outfall); FREE(Divider); FREE(Storage); FREE(Link); FREE(Conduit); FREE(Pump); FREE(Orifice); FREE(Weir); FREE(Outlet); FREE(Pollut); FREE(Landuse); FREE(Pattern); FREE(Curve); FREE(Tseries); FREE(Aquifer); FREE(UnitHyd); FREE(Snowmelt); FREE(Shape); FREE(Event); } //============================================================================= void createHashTables() // // Input: none // Output: returns error code // Purpose: allocates memory for object ID hash tables // { int j; MemPoolAllocated = FALSE; for (j = 0; j < MAX_OBJ_TYPES ; j++) { Htable[j] = HTcreate(); if ( Htable[j] == NULL ) report_writeErrorMsg(ERR_MEMORY, ""); } // --- initialize memory pool used to store object ID's if ( AllocInit() == NULL ) report_writeErrorMsg(ERR_MEMORY, ""); else MemPoolAllocated = TRUE; } //============================================================================= void deleteHashTables() // // Input: none // Output: none // Purpose: frees memory allocated for object ID hash tables // { int j; for (j = 0; j < MAX_OBJ_TYPES; j++) { if ( Htable[j] != NULL ) HTfree(Htable[j]); } // --- free object ID memory pool if ( MemPoolAllocated ) AllocFreePool(); } //=============================================================================
pagerank.c
#include <getopt.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <math.h> #include <sys/time.h> #include <unistd.h> #include <omp.h> #include "mt19937p.h" #define g(x, y) (g[y*n+x]) /** * Pr(x) = (1-d)/n + d*sum_{n in g(n,x)}(Pr(n)/(outdegree n)) * Runs 1 iteration of pagerank * Returns 1 if done, 0 otherwise */ int run_iteration(int n, double d, int* restrict g, double* restrict w, double* restrict wnew, int* restrict degree) { int done = 1; #pragma omp parallel for shared(g, w, wnew) reduction(&& : done) for (int i=0; i<n; ++i) { double sum = 0.0; for (int j=0; j<n; ++j) { //find edges pointing toward i if (g(j,i)) { //count out degree of j sum += w[j]/(double)degree[j]; } } wnew[i] = ((1.0 - d)/(double)n) + (d*sum); done = fabs(wnew[i] - w[i]) < 1.0/(1000000.0 * (double)n); } memcpy(w, wnew, n * sizeof(double)); return done; } /** * */ int pagerank(int n, double d, int* restrict g, double* restrict w) { int iterations = 0; double* restrict wnew = (double*) calloc(n, sizeof(double)); //compute degree of each item prior (if degree = 0, it should be n) int* restrict degree = (int*) calloc(n, sizeof(int)); for (int i=0; i<n; ++i) { int count = 0; for (int j=0; j<n; ++j) { count += g(i,j); } if (count == 0) { count = n; } degree[i] = count; } for (int done = 0; !done; ) { done = run_iteration(n, d, g, w, wnew, degree); iterations++; } free(wnew); free(degree); return iterations; } /** * # The random graph model * * Of course, we need to run the shortest path algorithm on something! * For the sake of keeping things interesting, let's use a simple random graph * model to generate the input data. The $G(n,p)$ model simply includes each * possible edge with probability $p$, drops it otherwise -- doesn't get much * simpler than that. We use a thread-safe version of the Mersenne twister * random number generator in lieu of coin flips. */ int* gen_graph(int n, double p) { int* g = calloc(n*n, sizeof(int)); struct mt19937p state; struct timeval time; gettimeofday(&time, NULL); sgenrand((unsigned long)time.tv_usec, &state); for (int j = 0; j < n; ++j) { for (int i = 0; i < n; ++i) g(i, j) = (genrand(&state) < p); g(j, j) = 0; //no self edges } return g; } void write_matrix(const char* fname, int n, int* g) { FILE* fp = fopen(fname, "w+"); if (fp == NULL) { fprintf(stderr, "Could not open output file: %s\n", fname); exit(-1); } for (int i = 0; i < n; ++i) { for (int j = 0; j < n; ++j) fprintf(fp, "%d ", g(i,j)); fprintf(fp, "\n"); } fclose(fp); } void write_weights(const char* fname, int n, double* w) { FILE* fp = fopen(fname, "w+"); if (fp == NULL) { fprintf(stderr, "Could not open output file: %s\n", fname); exit(-1); } for (int i = 0; i < n; ++i) { fprintf(fp, "%g ", w[i]); } fprintf(fp, "\n"); fclose(fp); } double checksum(const double* restrict w, int n) { double sum = 0.0; for (int i=0; i<n; ++i) { sum += w[i]; } return sum; } /** * # The `main` event */ const char* usage = "pagerank.x -- Compute pagerank on a random graph\n" "Flags:\n" " - n -- number of nodes (200)\n" " - p -- probability of including edges (0.05)\n" " - d -- probability that a user follows a link (0.85)\n" " - i -- file name where adjacency matrix should be stored (none)\n" " - o -- file name where output weights should be stored (none)\n"; int main(int argc, char** argv) { int n = 200; // Number of nodes double p = 0.05; // Edge probability double d = 0.85; // Probability a link is followed const char* ifname = NULL; // Adjacency matrix file name const char* ofname = NULL; // Distance matrix file name // Option processing extern char* optarg; const char* optstring = "hn:d:p:o:i:"; int c; while ((c = getopt(argc, argv, optstring)) != -1) { switch (c) { case 'h': fprintf(stderr, "%s", usage); return -1; case 'n': n = atoi(optarg); break; case 'p': p = atof(optarg); break; case 'd': d = atof(optarg); break; case 'o': ofname = optarg; break; case 'i': ifname = optarg; break; } } // Graph generation + output int* g = gen_graph(n, p); if (ifname) write_matrix(ifname, n, g); // Generate initial weights double* w = calloc(n, sizeof(double)); for (int i = 0; i < n; ++i) { w[i] = 1.0/(double)n; } // Time the pagerank code double t0 = omp_get_wtime(); int iterations = pagerank(n, d, g, w); double t1 = omp_get_wtime(); //openmp, cores, time, n, iterations, p, d, checksum printf("openmp, %d, %g, %d, %d, %g, %g, %g\n", omp_get_max_threads(), (t1-t0), n, iterations, p, d, checksum(w, n)); // Generate output file if (ofname) write_weights(ofname, n, w); // Clean up free(g); free(w); return 0; }
attribute.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % AAA TTTTT TTTTT RRRR IIIII BBBB U U TTTTT EEEEE % % A A T T R R I B B U U T E % % AAAAA T T RRRR I BBBB U U T EEE % % A A T T R R I B B U U T E % % A A T T R R IIIII BBBB UUU T EEEEE % % % % % % MagickCore Get / Set Image Attributes % % % % Software Design % % Cristy % % October 2002 % % % % % % Copyright 1999-2019 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/artifact.h" #include "MagickCore/attribute.h" #include "MagickCore/blob.h" #include "MagickCore/blob-private.h" #include "MagickCore/cache.h" #include "MagickCore/cache-private.h" #include "MagickCore/cache-view.h" #include "MagickCore/channel.h" #include "MagickCore/client.h" #include "MagickCore/color.h" #include "MagickCore/color-private.h" #include "MagickCore/colormap.h" #include "MagickCore/colormap-private.h" #include "MagickCore/colorspace.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/composite.h" #include "MagickCore/composite-private.h" #include "MagickCore/constitute.h" #include "MagickCore/draw.h" #include "MagickCore/draw-private.h" #include "MagickCore/effect.h" #include "MagickCore/enhance.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/geometry.h" #include "MagickCore/histogram.h" #include "MagickCore/identify.h" #include "MagickCore/image.h" #include "MagickCore/image-private.h" #include "MagickCore/list.h" #include "MagickCore/log.h" #include "MagickCore/memory_.h" #include "MagickCore/magick.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/option.h" #include "MagickCore/paint.h" #include "MagickCore/pixel.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/property.h" #include "MagickCore/quantize.h" #include "MagickCore/quantum-private.h" #include "MagickCore/random_.h" #include "MagickCore/resource_.h" #include "MagickCore/semaphore.h" #include "MagickCore/segment.h" #include "MagickCore/splay-tree.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/thread-private.h" #include "MagickCore/threshold.h" #include "MagickCore/transform.h" #include "MagickCore/utility.h" /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t I m a g e B o u n d i n g B o x % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageBoundingBox() returns the bounding box of an image canvas. % % The format of the GetImageBoundingBox method is: % % RectangleInfo GetImageBoundingBox(const Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o bounds: Method GetImageBoundingBox returns the bounding box of an % image canvas. % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ typedef struct _EdgeInfo { double left, right, top, bottom; } EdgeInfo; static double GetEdgeBackgroundFactor(const Image *image, const CacheView *image_view,const GravityType gravity,const size_t width, const size_t height,const ssize_t x_offset,const ssize_t y_offset, ExceptionInfo *exception) { CacheView *edge_view; const char *artifact; double factor; Image *edge_image; PixelInfo background, pixel; RectangleInfo edge_geometry; register const Quantum *p; ssize_t y; /* Determine the percent of image background for this edge. */ switch (gravity) { case NorthWestGravity: case NorthGravity: default: { p=GetCacheViewVirtualPixels(image_view,0,0,1,1,exception); break; } case NorthEastGravity: case EastGravity: { p=GetCacheViewVirtualPixels(image_view,(ssize_t) image->columns-1,0,1,1, exception); break; } case SouthEastGravity: case SouthGravity: { p=GetCacheViewVirtualPixels(image_view,(ssize_t) image->columns-1, (ssize_t) image->rows-1,1,1,exception); break; } case SouthWestGravity: case WestGravity: { p=GetCacheViewVirtualPixels(image_view,0,(ssize_t) image->rows-1,1,1, exception); break; } } GetPixelInfoPixel(image,p,&background); artifact=GetImageArtifact(image,"trim:background-color"); if (artifact != (const char *) NULL) (void) QueryColorCompliance(artifact,AllCompliance,&background,exception); edge_geometry.width=width; edge_geometry.height=height; edge_geometry.x=x_offset; edge_geometry.y=y_offset; GravityAdjustGeometry(image->columns,image->rows,gravity,&edge_geometry); edge_image=CropImage(image,&edge_geometry,exception); if (edge_image == (Image *) NULL) return(0.0); factor=0.0; edge_view=AcquireVirtualCacheView(edge_image,exception); for (y=0; y < (ssize_t) edge_image->rows; y++) { register ssize_t x; p=GetCacheViewVirtualPixels(edge_view,0,y,edge_image->columns,1,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) edge_image->columns; x++) { GetPixelInfoPixel(edge_image,p,&pixel); if (IsFuzzyEquivalencePixelInfo(&pixel,&background) == MagickFalse) factor++; p+=GetPixelChannels(edge_image); } } factor/=((double) edge_image->columns*edge_image->rows); edge_view=DestroyCacheView(edge_view); edge_image=DestroyImage(edge_image); return(factor); } static inline double GetMinEdgeBackgroundFactor(const EdgeInfo *edge) { double factor; factor=MagickMin(MagickMin(MagickMin(edge->left,edge->right),edge->top), edge->bottom); return(factor); } static RectangleInfo GetEdgeBoundingBox(const Image *image, ExceptionInfo *exception) { CacheView *edge_view; const char *artifact; double background_factor, percent_background; EdgeInfo edge, vertex; Image *edge_image; RectangleInfo bounds; /* Get the image bounding box. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); SetGeometry(image,&bounds); edge_image=CloneImage(image,0,0,MagickTrue,exception); if (edge_image == (Image *) NULL) return(bounds); (void) ParseAbsoluteGeometry("0x0+0+0",&edge_image->page); memset(&vertex,0,sizeof(vertex)); edge_view=AcquireVirtualCacheView(edge_image,exception); edge.left=GetEdgeBackgroundFactor(edge_image,edge_view,WestGravity, 1,0,0,0,exception); edge.right=GetEdgeBackgroundFactor(edge_image,edge_view,EastGravity, 1,0,0,0,exception); edge.top=GetEdgeBackgroundFactor(edge_image,edge_view,NorthGravity, 0,1,0,0,exception); edge.bottom=GetEdgeBackgroundFactor(edge_image,edge_view,SouthGravity, 0,1,0,0,exception); percent_background=1.0; artifact=GetImageArtifact(edge_image,"trim:percent-background"); if (artifact != (const char *) NULL) percent_background=StringToDouble(artifact,(char **) NULL)/100.0; percent_background=MagickMin(MagickMax(1.0-percent_background,MagickEpsilon), 1.0); background_factor=GetMinEdgeBackgroundFactor(&edge); for ( ; background_factor < percent_background; background_factor=GetMinEdgeBackgroundFactor(&edge)) { if ((bounds.width == 0) || (bounds.height == 0)) break; if (fabs(edge.left-background_factor) < MagickEpsilon) { /* Trim left edge. */ vertex.left++; bounds.width--; edge.left=GetEdgeBackgroundFactor(edge_image,edge_view, NorthWestGravity,1,bounds.height,(ssize_t) vertex.left,(ssize_t) vertex.top,exception); edge.top=GetEdgeBackgroundFactor(edge_image,edge_view, NorthWestGravity,bounds.width,1,(ssize_t) vertex.left,(ssize_t) vertex.top,exception); edge.bottom=GetEdgeBackgroundFactor(edge_image,edge_view, SouthWestGravity,bounds.width,1,(ssize_t) vertex.left,(ssize_t) vertex.bottom,exception); continue; } if (fabs(edge.right-background_factor) < MagickEpsilon) { /* Trim right edge. */ vertex.right++; bounds.width--; edge.right=GetEdgeBackgroundFactor(edge_image,edge_view, NorthEastGravity,1,bounds.height,(ssize_t) vertex.right,(ssize_t) vertex.top,exception); edge.top=GetEdgeBackgroundFactor(edge_image,edge_view, NorthWestGravity,bounds.width,1,(ssize_t) vertex.left,(ssize_t) vertex.top,exception); edge.bottom=GetEdgeBackgroundFactor(edge_image,edge_view, SouthWestGravity,bounds.width,1,(ssize_t) vertex.left,(ssize_t) vertex.bottom,exception); continue; } if (fabs(edge.top-background_factor) < MagickEpsilon) { /* Trim top edge. */ vertex.top++; bounds.height--; edge.left=GetEdgeBackgroundFactor(edge_image,edge_view, NorthWestGravity,1,bounds.height,(ssize_t) vertex.left,(ssize_t) vertex.top,exception); edge.right=GetEdgeBackgroundFactor(edge_image,edge_view, NorthEastGravity,1,bounds.height,(ssize_t) vertex.right,(ssize_t) vertex.top,exception); edge.top=GetEdgeBackgroundFactor(edge_image,edge_view, NorthWestGravity,bounds.width,1,(ssize_t) vertex.left,(ssize_t) vertex.top,exception); continue; } if (fabs(edge.bottom-background_factor) < MagickEpsilon) { /* Trim bottom edge. */ vertex.bottom++; bounds.height--; edge.left=GetEdgeBackgroundFactor(edge_image,edge_view, NorthWestGravity,1,bounds.height,(ssize_t) vertex.left,(ssize_t) vertex.top,exception); edge.right=GetEdgeBackgroundFactor(edge_image,edge_view, NorthEastGravity,1,bounds.height,(ssize_t) vertex.right,(ssize_t) vertex.top,exception); edge.bottom=GetEdgeBackgroundFactor(edge_image,edge_view, SouthWestGravity,bounds.width,1,(ssize_t) vertex.left,(ssize_t) vertex.bottom,exception); continue; } } edge_view=DestroyCacheView(edge_view); edge_image=DestroyImage(edge_image); bounds.x=(ssize_t) vertex.left; bounds.y=(ssize_t) vertex.top; if ((bounds.width == 0) || (bounds.height == 0)) (void) ThrowMagickException(exception,GetMagickModule(),OptionWarning, "GeometryDoesNotContainImage","`%s'",image->filename); return(bounds); } MagickExport RectangleInfo GetImageBoundingBox(const Image *image, ExceptionInfo *exception) { CacheView *image_view; const char *artifact; MagickBooleanType status; PixelInfo target[3], zero; RectangleInfo bounds; register const Quantum *p; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); artifact=GetImageArtifact(image,"trim:percent-background"); if (artifact != (const char *) NULL) return(GetEdgeBoundingBox(image,exception)); bounds.width=0; bounds.height=0; bounds.x=(ssize_t) image->columns; bounds.y=(ssize_t) image->rows; GetPixelInfo(image,&target[0]); image_view=AcquireVirtualCacheView(image,exception); p=GetCacheViewVirtualPixels(image_view,0,0,1,1,exception); if (p == (const Quantum *) NULL) { image_view=DestroyCacheView(image_view); return(bounds); } GetPixelInfoPixel(image,p,&target[0]); GetPixelInfo(image,&target[1]); p=GetCacheViewVirtualPixels(image_view,(ssize_t) image->columns-1,0,1,1, exception); if (p != (const Quantum *) NULL) GetPixelInfoPixel(image,p,&target[1]); GetPixelInfo(image,&target[2]); p=GetCacheViewVirtualPixels(image_view,0,(ssize_t) image->rows-1,1,1, exception); if (p != (const Quantum *) NULL) GetPixelInfoPixel(image,p,&target[2]); status=MagickTrue; GetPixelInfo(image,&zero); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { PixelInfo pixel; RectangleInfo bounding_box; register const Quantum *magick_restrict p; register ssize_t x; if (status == MagickFalse) continue; #if defined(MAGICKCORE_OPENMP_SUPPORT) # pragma omp critical (MagickCore_GetImageBoundingBox) #endif bounding_box=bounds; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) { status=MagickFalse; continue; } pixel=zero; for (x=0; x < (ssize_t) image->columns; x++) { GetPixelInfoPixel(image,p,&pixel); if ((x < bounding_box.x) && (IsFuzzyEquivalencePixelInfo(&pixel,&target[0]) == MagickFalse)) bounding_box.x=x; if ((x > (ssize_t) bounding_box.width) && (IsFuzzyEquivalencePixelInfo(&pixel,&target[1]) == MagickFalse)) bounding_box.width=(size_t) x; if ((y < bounding_box.y) && (IsFuzzyEquivalencePixelInfo(&pixel,&target[0]) == MagickFalse)) bounding_box.y=y; if ((y > (ssize_t) bounding_box.height) && (IsFuzzyEquivalencePixelInfo(&pixel,&target[2]) == MagickFalse)) bounding_box.height=(size_t) y; p+=GetPixelChannels(image); } #if defined(MAGICKCORE_OPENMP_SUPPORT) # pragma omp critical (MagickCore_GetImageBoundingBox) #endif { if (bounding_box.x < bounds.x) bounds.x=bounding_box.x; if (bounding_box.y < bounds.y) bounds.y=bounding_box.y; if (bounding_box.width > bounds.width) bounds.width=bounding_box.width; if (bounding_box.height > bounds.height) bounds.height=bounding_box.height; } } image_view=DestroyCacheView(image_view); if ((bounds.width == 0) || (bounds.height == 0)) (void) ThrowMagickException(exception,GetMagickModule(),OptionWarning, "GeometryDoesNotContainImage","`%s'",image->filename); else { bounds.width-=(bounds.x-1); bounds.height-=(bounds.y-1); } return(bounds); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e D e p t h % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageDepth() returns the depth of a particular image channel. % % The format of the GetImageDepth method is: % % size_t GetImageDepth(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport size_t GetImageDepth(const Image *image,ExceptionInfo *exception) { CacheView *image_view; MagickBooleanType status; register ssize_t i; size_t *current_depth, depth, number_threads; ssize_t y; /* Compute image depth. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); number_threads=(size_t) GetMagickResourceLimit(ThreadResource); current_depth=(size_t *) AcquireQuantumMemory(number_threads, sizeof(*current_depth)); if (current_depth == (size_t *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); status=MagickTrue; for (i=0; i < (ssize_t) number_threads; i++) current_depth[i]=1; if ((image->storage_class == PseudoClass) && (image->alpha_trait == UndefinedPixelTrait)) { for (i=0; i < (ssize_t) image->colors; i++) { const int id = GetOpenMPThreadId(); while (current_depth[id] < MAGICKCORE_QUANTUM_DEPTH) { MagickBooleanType atDepth; QuantumAny range; atDepth=MagickTrue; range=GetQuantumRange(current_depth[id]); if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) if (IsPixelAtDepth(ClampToQuantum(image->colormap[i].red),range) == MagickFalse) atDepth=MagickFalse; if ((atDepth != MagickFalse) && (GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) if (IsPixelAtDepth(ClampToQuantum(image->colormap[i].green),range) == MagickFalse) atDepth=MagickFalse; if ((atDepth != MagickFalse) && (GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) if (IsPixelAtDepth(ClampToQuantum(image->colormap[i].blue),range) == MagickFalse) atDepth=MagickFalse; if ((atDepth != MagickFalse)) break; current_depth[id]++; } } depth=current_depth[0]; for (i=1; i < (ssize_t) number_threads; i++) if (depth < current_depth[i]) depth=current_depth[i]; current_depth=(size_t *) RelinquishMagickMemory(current_depth); return(depth); } image_view=AcquireVirtualCacheView(image,exception); #if !defined(MAGICKCORE_HDRI_SUPPORT) if ((1UL*QuantumRange) <= MaxMap) { size_t *depth_map; /* Scale pixels to desired (optimized with depth map). */ depth_map=(size_t *) AcquireQuantumMemory(MaxMap+1,sizeof(*depth_map)); if (depth_map == (size_t *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); for (i=0; i <= (ssize_t) MaxMap; i++) { unsigned int depth; for (depth=1; depth < MAGICKCORE_QUANTUM_DEPTH; depth++) { Quantum pixel; QuantumAny range; range=GetQuantumRange(depth); pixel=(Quantum) i; if (pixel == ScaleAnyToQuantum(ScaleQuantumToAny(pixel,range),range)) break; } depth_map[i]=depth; } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { const int id = GetOpenMPThreadId(); register const Quantum *magick_restrict p; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) continue; for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; if (depth_map[ScaleQuantumToMap(p[i])] > current_depth[id]) current_depth[id]=depth_map[ScaleQuantumToMap(p[i])]; } p+=GetPixelChannels(image); } if (current_depth[id] == MAGICKCORE_QUANTUM_DEPTH) status=MagickFalse; } image_view=DestroyCacheView(image_view); depth=current_depth[0]; for (i=1; i < (ssize_t) number_threads; i++) if (depth < current_depth[i]) depth=current_depth[i]; depth_map=(size_t *) RelinquishMagickMemory(depth_map); current_depth=(size_t *) RelinquishMagickMemory(current_depth); return(depth); } #endif /* Compute pixel depth. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { const int id = GetOpenMPThreadId(); register const Quantum *magick_restrict p; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) continue; for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel; PixelTrait traits; channel=GetPixelChannelChannel(image,i); traits=GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; while (current_depth[id] < MAGICKCORE_QUANTUM_DEPTH) { QuantumAny range; range=GetQuantumRange(current_depth[id]); if (p[i] == ScaleAnyToQuantum(ScaleQuantumToAny(p[i],range),range)) break; current_depth[id]++; } } p+=GetPixelChannels(image); } if (current_depth[id] == MAGICKCORE_QUANTUM_DEPTH) status=MagickFalse; } image_view=DestroyCacheView(image_view); depth=current_depth[0]; for (i=1; i < (ssize_t) number_threads; i++) if (depth < current_depth[i]) depth=current_depth[i]; current_depth=(size_t *) RelinquishMagickMemory(current_depth); return(depth); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e Q u a n t u m D e p t h % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageQuantumDepth() returns the depth of the image rounded to a legal % quantum depth: 8, 16, or 32. % % The format of the GetImageQuantumDepth method is: % % size_t GetImageQuantumDepth(const Image *image, % const MagickBooleanType constrain) % % A description of each parameter follows: % % o image: the image. % % o constrain: A value other than MagickFalse, constrains the depth to % a maximum of MAGICKCORE_QUANTUM_DEPTH. % */ MagickExport size_t GetImageQuantumDepth(const Image *image, const MagickBooleanType constrain) { size_t depth; depth=image->depth; if (depth <= 8) depth=8; else if (depth <= 16) depth=16; else if (depth <= 32) depth=32; else if (depth <= 64) depth=64; if (constrain != MagickFalse) depth=(size_t) MagickMin((double) depth,(double) MAGICKCORE_QUANTUM_DEPTH); return(depth); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e T y p e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageType() returns the type of image: % % Bilevel Grayscale GrayscaleMatte % Palette PaletteMatte TrueColor % TrueColorMatte ColorSeparation ColorSeparationMatte % % The format of the GetImageType method is: % % ImageType GetImageType(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport ImageType GetImageType(const Image *image) { assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->colorspace == CMYKColorspace) { if (image->alpha_trait == UndefinedPixelTrait) return(ColorSeparationType); return(ColorSeparationAlphaType); } if (IsImageMonochrome(image) != MagickFalse) return(BilevelType); if (IsImageGray(image) != MagickFalse) { if (image->alpha_trait != UndefinedPixelTrait) return(GrayscaleAlphaType); return(GrayscaleType); } if (IsPaletteImage(image) != MagickFalse) { if (image->alpha_trait != UndefinedPixelTrait) return(PaletteAlphaType); return(PaletteType); } if (image->alpha_trait != UndefinedPixelTrait) return(TrueColorAlphaType); return(TrueColorType); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I d e n t i f y I m a g e G r a y % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IdentifyImageGray() returns grayscale if all the pixels in the image have % the same red, green, and blue intensities, and bi-level is the intensity is % either 0 or QuantumRange. Otherwise undefined is returned. % % The format of the IdentifyImageGray method is: % % ImageType IdentifyImageGray(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport ImageType IdentifyImageGray(const Image *image, ExceptionInfo *exception) { CacheView *image_view; ImageType type; register const Quantum *p; register ssize_t x; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if ((image->type == BilevelType) || (image->type == GrayscaleType) || (image->type == GrayscaleAlphaType)) return(image->type); if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse) return(UndefinedType); type=BilevelType; image_view=AcquireVirtualCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { if (IsPixelGray(image,p) == MagickFalse) { type=UndefinedType; break; } if ((type == BilevelType) && (IsPixelMonochrome(image,p) == MagickFalse)) type=GrayscaleType; p+=GetPixelChannels(image); } if (type == UndefinedType) break; } image_view=DestroyCacheView(image_view); if ((type == GrayscaleType) && (image->alpha_trait != UndefinedPixelTrait)) type=GrayscaleAlphaType; return(type); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I d e n t i f y I m a g e M o n o c h r o m e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IdentifyImageMonochrome() returns MagickTrue if all the pixels in the image % have the same red, green, and blue intensities and the intensity is either % 0 or QuantumRange. % % The format of the IdentifyImageMonochrome method is: % % MagickBooleanType IdentifyImageMonochrome(const Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType IdentifyImageMonochrome(const Image *image, ExceptionInfo *exception) { CacheView *image_view; MagickBooleanType bilevel; register ssize_t x; register const Quantum *p; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->type == BilevelType) return(MagickTrue); if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse) return(MagickFalse); bilevel=MagickTrue; image_view=AcquireVirtualCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { if (IsPixelMonochrome(image,p) == MagickFalse) { bilevel=MagickFalse; break; } p+=GetPixelChannels(image); } if (bilevel == MagickFalse) break; } image_view=DestroyCacheView(image_view); return(bilevel); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I d e n t i f y I m a g e T y p e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IdentifyImageType() returns the potential type of image: % % Bilevel Grayscale GrayscaleMatte % Palette PaletteMatte TrueColor % TrueColorMatte ColorSeparation ColorSeparationMatte % % To ensure the image type matches its potential, use SetImageType(): % % (void) SetImageType(image,IdentifyImageType(image,exception),exception); % % The format of the IdentifyImageType method is: % % ImageType IdentifyImageType(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport ImageType IdentifyImageType(const Image *image, ExceptionInfo *exception) { assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->colorspace == CMYKColorspace) { if (image->alpha_trait == UndefinedPixelTrait) return(ColorSeparationType); return(ColorSeparationAlphaType); } if (IdentifyImageMonochrome(image,exception) != MagickFalse) return(BilevelType); if (IdentifyImageGray(image,exception) != UndefinedType) { if (image->alpha_trait != UndefinedPixelTrait) return(GrayscaleAlphaType); return(GrayscaleType); } if (IdentifyPaletteImage(image,exception) != MagickFalse) { if (image->alpha_trait != UndefinedPixelTrait) return(PaletteAlphaType); return(PaletteType); } if (image->alpha_trait != UndefinedPixelTrait) return(TrueColorAlphaType); return(TrueColorType); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s I m a g e G r a y % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsImageGray() returns MagickTrue if the type of the image is grayscale or % bi-level. % % The format of the IsImageGray method is: % % MagickBooleanType IsImageGray(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport MagickBooleanType IsImageGray(const Image *image) { assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if ((image->type == BilevelType) || (image->type == GrayscaleType) || (image->type == GrayscaleAlphaType)) return(MagickTrue); return(MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s I m a g e M o n o c h r o m e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsImageMonochrome() returns MagickTrue if type of the image is bi-level. % % The format of the IsImageMonochrome method is: % % MagickBooleanType IsImageMonochrome(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport MagickBooleanType IsImageMonochrome(const Image *image) { assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->type == BilevelType) return(MagickTrue); return(MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s I m a g e O p a q u e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsImageOpaque() returns MagickTrue if none of the pixels in the image have % an alpha value other than OpaqueAlpha (QuantumRange). % % Will return true immediatally is alpha channel is not available. % % The format of the IsImageOpaque method is: % % MagickBooleanType IsImageOpaque(const Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType IsImageOpaque(const Image *image, ExceptionInfo *exception) { CacheView *image_view; register const Quantum *p; register ssize_t x; ssize_t y; /* Determine if image is opaque. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->alpha_trait == UndefinedPixelTrait) return(MagickTrue); image_view=AcquireVirtualCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { if (GetPixelAlpha(image,p) != OpaqueAlpha) break; p+=GetPixelChannels(image); } if (x < (ssize_t) image->columns) break; } image_view=DestroyCacheView(image_view); return(y < (ssize_t) image->rows ? MagickFalse : MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e D e p t h % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageDepth() sets the depth of the image. % % The format of the SetImageDepth method is: % % MagickBooleanType SetImageDepth(Image *image,const size_t depth, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel. % % o depth: the image depth. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType SetImageDepth(Image *image, const size_t depth,ExceptionInfo *exception) { CacheView *image_view; MagickBooleanType status; QuantumAny range; ssize_t y; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickCoreSignature); if (depth >= MAGICKCORE_QUANTUM_DEPTH) { image->depth=depth; return(MagickTrue); } range=GetQuantumRange(depth); if (image->storage_class == PseudoClass) { register ssize_t i; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->colors,1) #endif for (i=0; i < (ssize_t) image->colors; i++) { if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].red=(double) ScaleAnyToQuantum(ScaleQuantumToAny( ClampPixel(image->colormap[i].red),range),range); if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].green=(double) ScaleAnyToQuantum(ScaleQuantumToAny( ClampPixel(image->colormap[i].green),range),range); if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].blue=(double) ScaleAnyToQuantum(ScaleQuantumToAny( ClampPixel(image->colormap[i].blue),range),range); if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].alpha=(double) ScaleAnyToQuantum(ScaleQuantumToAny( ClampPixel(image->colormap[i].alpha),range),range); } } status=MagickTrue; image_view=AcquireAuthenticCacheView(image,exception); #if !defined(MAGICKCORE_HDRI_SUPPORT) if ((1UL*QuantumRange) <= MaxMap) { Quantum *depth_map; register ssize_t i; /* Scale pixels to desired (optimized with depth map). */ depth_map=(Quantum *) AcquireQuantumMemory(MaxMap+1,sizeof(*depth_map)); if (depth_map == (Quantum *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); for (i=0; i <= (ssize_t) MaxMap; i++) depth_map[i]=ScaleAnyToQuantum(ScaleQuantumToAny((Quantum) i,range), range); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel; PixelTrait traits; channel=GetPixelChannelChannel(image,i); traits=GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; q[i]=depth_map[ScaleQuantumToMap(q[i])]; } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) { status=MagickFalse; continue; } } image_view=DestroyCacheView(image_view); depth_map=(Quantum *) RelinquishMagickMemory(depth_map); if (status != MagickFalse) image->depth=depth; return(status); } #endif /* Scale pixels to desired depth. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel; PixelTrait traits; channel=GetPixelChannelChannel(image,i); traits=GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; q[i]=ScaleAnyToQuantum(ScaleQuantumToAny(ClampPixel((MagickRealType) q[i]),range),range); } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) { status=MagickFalse; continue; } } image_view=DestroyCacheView(image_view); if (status != MagickFalse) image->depth=depth; return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e T y p e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageType() sets the type of image. Choose from these types: % % Bilevel Grayscale GrayscaleMatte % Palette PaletteMatte TrueColor % TrueColorMatte ColorSeparation ColorSeparationMatte % OptimizeType % % The format of the SetImageType method is: % % MagickBooleanType SetImageType(Image *image,const ImageType type, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o type: Image type. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType SetImageType(Image *image,const ImageType type, ExceptionInfo *exception) { const char *artifact; ImageInfo *image_info; MagickBooleanType status; QuantizeInfo *quantize_info; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickCoreSignature); status=MagickTrue; image_info=AcquireImageInfo(); image_info->dither=image->dither; artifact=GetImageArtifact(image,"dither"); if (artifact != (const char *) NULL) (void) SetImageOption(image_info,"dither",artifact); switch (type) { case BilevelType: { status=TransformImageColorspace(image,GRAYColorspace,exception); (void) NormalizeImage(image,exception); quantize_info=AcquireQuantizeInfo(image_info); quantize_info->number_colors=2; quantize_info->dither_method=NoDitherMethod; status=QuantizeImage(quantize_info,image,exception); quantize_info=DestroyQuantizeInfo(quantize_info); image->alpha_trait=UndefinedPixelTrait; break; } case GrayscaleType: { status=TransformImageColorspace(image,GRAYColorspace,exception); image->alpha_trait=UndefinedPixelTrait; break; } case GrayscaleAlphaType: { status=TransformImageColorspace(image,GRAYColorspace,exception); if (image->alpha_trait == UndefinedPixelTrait) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception); break; } case PaletteType: { status=TransformImageColorspace(image,sRGBColorspace,exception); if ((image->storage_class == DirectClass) || (image->colors > 256)) { quantize_info=AcquireQuantizeInfo(image_info); quantize_info->number_colors=256; status=QuantizeImage(quantize_info,image,exception); quantize_info=DestroyQuantizeInfo(quantize_info); } image->alpha_trait=UndefinedPixelTrait; break; } case PaletteBilevelAlphaType: { ChannelType channel_mask; status=TransformImageColorspace(image,sRGBColorspace,exception); if (image->alpha_trait == UndefinedPixelTrait) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception); channel_mask=SetImageChannelMask(image,AlphaChannel); (void) BilevelImage(image,(double) QuantumRange/2.0,exception); (void) SetImageChannelMask(image,channel_mask); quantize_info=AcquireQuantizeInfo(image_info); status=QuantizeImage(quantize_info,image,exception); quantize_info=DestroyQuantizeInfo(quantize_info); break; } case PaletteAlphaType: { status=TransformImageColorspace(image,sRGBColorspace,exception); if (image->alpha_trait == UndefinedPixelTrait) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception); quantize_info=AcquireQuantizeInfo(image_info); quantize_info->colorspace=TransparentColorspace; status=QuantizeImage(quantize_info,image,exception); quantize_info=DestroyQuantizeInfo(quantize_info); break; } case TrueColorType: { status=TransformImageColorspace(image,sRGBColorspace,exception); if (image->storage_class != DirectClass) status=SetImageStorageClass(image,DirectClass,exception); image->alpha_trait=UndefinedPixelTrait; break; } case TrueColorAlphaType: { status=TransformImageColorspace(image,sRGBColorspace,exception); if (image->storage_class != DirectClass) status=SetImageStorageClass(image,DirectClass,exception); if (image->alpha_trait == UndefinedPixelTrait) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception); break; } case ColorSeparationType: { status=TransformImageColorspace(image,CMYKColorspace,exception); if (image->storage_class != DirectClass) status=SetImageStorageClass(image,DirectClass,exception); image->alpha_trait=UndefinedPixelTrait; break; } case ColorSeparationAlphaType: { status=TransformImageColorspace(image,CMYKColorspace,exception); if (image->storage_class != DirectClass) status=SetImageStorageClass(image,DirectClass,exception); if (image->alpha_trait == UndefinedPixelTrait) status=SetImageAlphaChannel(image,OpaqueAlphaChannel,exception); break; } case OptimizeType: case UndefinedType: break; } image_info=DestroyImageInfo(image_info); if (status == MagickFalse) return(status); image->type=type; return(MagickTrue); }
GB_dense_subassign_25_template.c
//------------------------------------------------------------------------------ // GB_dense_subassign_25_template: C<M> = A where C is empty and A is dense //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // C<M> = A where C starts as empty, M is structural, and A is dense. The // pattern of C is an exact copy of M. { //-------------------------------------------------------------------------- // get C, M, and A //-------------------------------------------------------------------------- GB_CTYPE *GB_RESTRICT Cx = C->x ; const int64_t *GB_RESTRICT Mp = M->p ; const int64_t *GB_RESTRICT Mh = M->h ; const int64_t *GB_RESTRICT Mi = M->i ; const GB_CTYPE *GB_RESTRICT Ax = A->x ; const int64_t avlen = A->vlen ; //-------------------------------------------------------------------------- // C<M> = A //-------------------------------------------------------------------------- int taskid ; #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) for (taskid = 0 ; taskid < ntasks ; taskid++) { // if kfirst > klast then taskid does no work at all int64_t kfirst = kfirst_slice [taskid] ; int64_t klast = klast_slice [taskid] ; //---------------------------------------------------------------------- // C<M(:,kfirst:klast)> = A(:,kfirst:klast) //---------------------------------------------------------------------- for (int64_t k = kfirst ; k <= klast ; k++) { //------------------------------------------------------------------ // find the part of M(:,k) to be operated on by this task //------------------------------------------------------------------ int64_t j = (Mh == NULL) ? k : Mh [k] ; int64_t pM_start, pM_end ; GB_get_pA_and_pC (&pM_start, &pM_end, NULL, taskid, k, kfirst, klast, pstart_slice, NULL, NULL, Mp) ; // pA points to the start of A(:,j) since A is dense int64_t pA = j * avlen ; //------------------------------------------------------------------ // C<M(:,j)> = A(:,j) //------------------------------------------------------------------ GB_PRAGMA_VECTORIZE for (int64_t pM = pM_start ; pM < pM_end ; pM++) { int64_t p = pA + Mi [pM] ; GB_COPY_A_TO_C (Cx, pM, Ax, p) ; // Cx [pM] = Ax [p] } } } }
feature.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % FFFFF EEEEE AAA TTTTT U U RRRR EEEEE % % F E A A T U U R R E % % FFF EEE AAAAA T U U RRRR EEE % % F E A A T U U R R E % % F EEEEE A A T UUU R R EEEEE % % % % % % MagickCore Image Feature Methods % % % % Software Design % % Cristy % % July 1992 % % % % % % Copyright 1999-2018 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "magick/studio.h" #include "magick/animate.h" #include "magick/artifact.h" #include "magick/blob.h" #include "magick/blob-private.h" #include "magick/cache.h" #include "magick/cache-private.h" #include "magick/cache-view.h" #include "magick/channel.h" #include "magick/client.h" #include "magick/color.h" #include "magick/color-private.h" #include "magick/colorspace.h" #include "magick/colorspace-private.h" #include "magick/composite.h" #include "magick/composite-private.h" #include "magick/compress.h" #include "magick/constitute.h" #include "magick/deprecate.h" #include "magick/display.h" #include "magick/draw.h" #include "magick/enhance.h" #include "magick/exception.h" #include "magick/exception-private.h" #include "magick/feature.h" #include "magick/gem.h" #include "magick/geometry.h" #include "magick/list.h" #include "magick/image-private.h" #include "magick/magic.h" #include "magick/magick.h" #include "magick/matrix.h" #include "magick/memory_.h" #include "magick/module.h" #include "magick/monitor.h" #include "magick/monitor-private.h" #include "magick/morphology-private.h" #include "magick/option.h" #include "magick/paint.h" #include "magick/pixel-private.h" #include "magick/profile.h" #include "magick/property.h" #include "magick/quantize.h" #include "magick/random_.h" #include "magick/resource_.h" #include "magick/segment.h" #include "magick/semaphore.h" #include "magick/signature-private.h" #include "magick/string_.h" #include "magick/thread-private.h" #include "magick/timer.h" #include "magick/token.h" #include "magick/utility.h" #include "magick/version.h" /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C a n n y E d g e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CannyEdgeImage() uses a multi-stage algorithm to detect a wide range of % edges in images. % % The format of the CannyEdgeImage method is: % % Image *CannyEdgeImage(const Image *image,const double radius, % const double sigma,const double lower_percent, % const double upper_percent,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the gaussian smoothing filter. % % o sigma: the sigma of the gaussian smoothing filter. % % o lower_percent: percentage of edge pixels in the lower threshold. % % o upper_percent: percentage of edge pixels in the upper threshold. % % o exception: return any errors or warnings in this structure. % */ typedef struct _CannyInfo { double magnitude, intensity; int orientation; ssize_t x, y; } CannyInfo; static inline MagickBooleanType IsAuthenticPixel(const Image *image, const ssize_t x,const ssize_t y) { if ((x < 0) || (x >= (ssize_t) image->columns)) return(MagickFalse); if ((y < 0) || (y >= (ssize_t) image->rows)) return(MagickFalse); return(MagickTrue); } static MagickBooleanType TraceEdges(Image *edge_image,CacheView *edge_view, MatrixInfo *canny_cache,const ssize_t x,const ssize_t y, const double lower_threshold,ExceptionInfo *exception) { CannyInfo edge, pixel; MagickBooleanType status; register PixelPacket *q; register ssize_t i; q=GetCacheViewAuthenticPixels(edge_view,x,y,1,1,exception); if (q == (PixelPacket *) NULL) return(MagickFalse); q->red=QuantumRange; q->green=QuantumRange; q->blue=QuantumRange; status=SyncCacheViewAuthenticPixels(edge_view,exception); if (status == MagickFalse) return(MagickFalse); if (GetMatrixElement(canny_cache,0,0,&edge) == MagickFalse) return(MagickFalse); edge.x=x; edge.y=y; if (SetMatrixElement(canny_cache,0,0,&edge) == MagickFalse) return(MagickFalse); for (i=1; i != 0; ) { ssize_t v; i--; status=GetMatrixElement(canny_cache,i,0,&edge); if (status == MagickFalse) return(MagickFalse); for (v=(-1); v <= 1; v++) { ssize_t u; for (u=(-1); u <= 1; u++) { if ((u == 0) && (v == 0)) continue; if (IsAuthenticPixel(edge_image,edge.x+u,edge.y+v) == MagickFalse) continue; /* Not an edge if gradient value is below the lower threshold. */ q=GetCacheViewAuthenticPixels(edge_view,edge.x+u,edge.y+v,1,1, exception); if (q == (PixelPacket *) NULL) return(MagickFalse); status=GetMatrixElement(canny_cache,edge.x+u,edge.y+v,&pixel); if (status == MagickFalse) return(MagickFalse); if ((GetPixelIntensity(edge_image,q) == 0.0) && (pixel.intensity >= lower_threshold)) { q->red=QuantumRange; q->green=QuantumRange; q->blue=QuantumRange; status=SyncCacheViewAuthenticPixels(edge_view,exception); if (status == MagickFalse) return(MagickFalse); edge.x+=u; edge.y+=v; status=SetMatrixElement(canny_cache,i,0,&edge); if (status == MagickFalse) return(MagickFalse); i++; } } } } return(MagickTrue); } MagickExport Image *CannyEdgeImage(const Image *image,const double radius, const double sigma,const double lower_percent,const double upper_percent, ExceptionInfo *exception) { #define CannyEdgeImageTag "CannyEdge/Image" CacheView *edge_view; CannyInfo element; char geometry[MaxTextExtent]; double lower_threshold, max, min, upper_threshold; Image *edge_image; KernelInfo *kernel_info; MagickBooleanType status; MagickOffsetType progress; MatrixInfo *canny_cache; ssize_t y; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); /* Filter out noise. */ (void) FormatLocaleString(geometry,MaxTextExtent, "blur:%.20gx%.20g;blur:%.20gx%.20g+90",radius,sigma,radius,sigma); kernel_info=AcquireKernelInfo(geometry); if (kernel_info == (KernelInfo *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); edge_image=MorphologyImageChannel(image,DefaultChannels,ConvolveMorphology,1, kernel_info,exception); kernel_info=DestroyKernelInfo(kernel_info); if (edge_image == (Image *) NULL) return((Image *) NULL); if (TransformImageColorspace(edge_image,GRAYColorspace) == MagickFalse) { edge_image=DestroyImage(edge_image); return((Image *) NULL); } (void) SetImageAlphaChannel(edge_image,DeactivateAlphaChannel); /* Find the intensity gradient of the image. */ canny_cache=AcquireMatrixInfo(edge_image->columns,edge_image->rows, sizeof(CannyInfo),exception); if (canny_cache == (MatrixInfo *) NULL) { edge_image=DestroyImage(edge_image); return((Image *) NULL); } status=MagickTrue; edge_view=AcquireVirtualCacheView(edge_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(edge_image,edge_image,edge_image->rows,1) #endif for (y=0; y < (ssize_t) edge_image->rows; y++) { register const PixelPacket *magick_restrict p; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(edge_view,0,y,edge_image->columns+1,2, exception); if (p == (const PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) edge_image->columns; x++) { CannyInfo pixel; double dx, dy; register const PixelPacket *magick_restrict kernel_pixels; ssize_t v; static double Gx[2][2] = { { -1.0, +1.0 }, { -1.0, +1.0 } }, Gy[2][2] = { { +1.0, +1.0 }, { -1.0, -1.0 } }; (void) memset(&pixel,0,sizeof(pixel)); dx=0.0; dy=0.0; kernel_pixels=p; for (v=0; v < 2; v++) { ssize_t u; for (u=0; u < 2; u++) { double intensity; intensity=GetPixelIntensity(edge_image,kernel_pixels+u); dx+=0.5*Gx[v][u]*intensity; dy+=0.5*Gy[v][u]*intensity; } kernel_pixels+=edge_image->columns+1; } pixel.magnitude=hypot(dx,dy); pixel.orientation=0; if (fabs(dx) > MagickEpsilon) { double slope; slope=dy/dx; if (slope < 0.0) { if (slope < -2.41421356237) pixel.orientation=0; else if (slope < -0.414213562373) pixel.orientation=1; else pixel.orientation=2; } else { if (slope > 2.41421356237) pixel.orientation=0; else if (slope > 0.414213562373) pixel.orientation=3; else pixel.orientation=2; } } if (SetMatrixElement(canny_cache,x,y,&pixel) == MagickFalse) continue; p++; } } edge_view=DestroyCacheView(edge_view); /* Non-maxima suppression, remove pixels that are not considered to be part of an edge. */ progress=0; (void) GetMatrixElement(canny_cache,0,0,&element); max=element.intensity; min=element.intensity; edge_view=AcquireAuthenticCacheView(edge_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(edge_image,edge_image,edge_image->rows,1) #endif for (y=0; y < (ssize_t) edge_image->rows; y++) { register PixelPacket *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(edge_view,0,y,edge_image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) edge_image->columns; x++) { CannyInfo alpha_pixel, beta_pixel, pixel; (void) GetMatrixElement(canny_cache,x,y,&pixel); switch (pixel.orientation) { case 0: default: { /* 0 degrees, north and south. */ (void) GetMatrixElement(canny_cache,x,y-1,&alpha_pixel); (void) GetMatrixElement(canny_cache,x,y+1,&beta_pixel); break; } case 1: { /* 45 degrees, northwest and southeast. */ (void) GetMatrixElement(canny_cache,x-1,y-1,&alpha_pixel); (void) GetMatrixElement(canny_cache,x+1,y+1,&beta_pixel); break; } case 2: { /* 90 degrees, east and west. */ (void) GetMatrixElement(canny_cache,x-1,y,&alpha_pixel); (void) GetMatrixElement(canny_cache,x+1,y,&beta_pixel); break; } case 3: { /* 135 degrees, northeast and southwest. */ (void) GetMatrixElement(canny_cache,x+1,y-1,&beta_pixel); (void) GetMatrixElement(canny_cache,x-1,y+1,&alpha_pixel); break; } } pixel.intensity=pixel.magnitude; if ((pixel.magnitude < alpha_pixel.magnitude) || (pixel.magnitude < beta_pixel.magnitude)) pixel.intensity=0; (void) SetMatrixElement(canny_cache,x,y,&pixel); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_CannyEdgeImage) #endif { if (pixel.intensity < min) min=pixel.intensity; if (pixel.intensity > max) max=pixel.intensity; } q->red=0; q->green=0; q->blue=0; q++; } if (SyncCacheViewAuthenticPixels(edge_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_CannyEdgeImage) #endif proceed=SetImageProgress(image,CannyEdgeImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } edge_view=DestroyCacheView(edge_view); /* Estimate hysteresis threshold. */ lower_threshold=lower_percent*(max-min)+min; upper_threshold=upper_percent*(max-min)+min; /* Hysteresis threshold. */ edge_view=AcquireAuthenticCacheView(edge_image,exception); for (y=0; y < (ssize_t) edge_image->rows; y++) { register ssize_t x; if (status == MagickFalse) continue; for (x=0; x < (ssize_t) edge_image->columns; x++) { CannyInfo pixel; register const PixelPacket *magick_restrict p; /* Edge if pixel gradient higher than upper threshold. */ p=GetCacheViewVirtualPixels(edge_view,x,y,1,1,exception); if (p == (const PixelPacket *) NULL) continue; status=GetMatrixElement(canny_cache,x,y,&pixel); if (status == MagickFalse) continue; if ((GetPixelIntensity(edge_image,p) == 0.0) && (pixel.intensity >= upper_threshold)) status=TraceEdges(edge_image,edge_view,canny_cache,x,y,lower_threshold, exception); } } edge_view=DestroyCacheView(edge_view); /* Free resources. */ canny_cache=DestroyMatrixInfo(canny_cache); return(edge_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e C h a n n e l F e a t u r e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageChannelFeatures() returns features for each channel in the image in % each of four directions (horizontal, vertical, left and right diagonals) % for the specified distance. The features include the angular second % moment, contrast, correlation, sum of squares: variance, inverse difference % moment, sum average, sum varience, sum entropy, entropy, difference variance,% difference entropy, information measures of correlation 1, information % measures of correlation 2, and maximum correlation coefficient. You can % access the red channel contrast, for example, like this: % % channel_features=GetImageChannelFeatures(image,1,exception); % contrast=channel_features[RedChannel].contrast[0]; % % Use MagickRelinquishMemory() to free the features buffer. % % The format of the GetImageChannelFeatures method is: % % ChannelFeatures *GetImageChannelFeatures(const Image *image, % const size_t distance,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o distance: the distance. % % o exception: return any errors or warnings in this structure. % */ static inline double MagickLog10(const double x) { #define Log10Epsilon (1.0e-11) if (fabs(x) < Log10Epsilon) return(log10(Log10Epsilon)); return(log10(fabs(x))); } MagickExport ChannelFeatures *GetImageChannelFeatures(const Image *image, const size_t distance,ExceptionInfo *exception) { typedef struct _ChannelStatistics { DoublePixelPacket direction[4]; /* horizontal, vertical, left and right diagonals */ } ChannelStatistics; CacheView *image_view; ChannelFeatures *channel_features; ChannelStatistics **cooccurrence, correlation, *density_x, *density_xy, *density_y, entropy_x, entropy_xy, entropy_xy1, entropy_xy2, entropy_y, mean, **Q, *sum, sum_squares, variance; LongPixelPacket gray, *grays; MagickBooleanType status; register ssize_t i; size_t length; ssize_t y; unsigned int number_grays; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if ((image->columns < (distance+1)) || (image->rows < (distance+1))) return((ChannelFeatures *) NULL); length=CompositeChannels+1UL; channel_features=(ChannelFeatures *) AcquireQuantumMemory(length, sizeof(*channel_features)); if (channel_features == (ChannelFeatures *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); (void) memset(channel_features,0,length* sizeof(*channel_features)); /* Form grays. */ grays=(LongPixelPacket *) AcquireQuantumMemory(MaxMap+1UL,sizeof(*grays)); if (grays == (LongPixelPacket *) NULL) { channel_features=(ChannelFeatures *) RelinquishMagickMemory( channel_features); (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(channel_features); } for (i=0; i <= (ssize_t) MaxMap; i++) { grays[i].red=(~0U); grays[i].green=(~0U); grays[i].blue=(~0U); grays[i].opacity=(~0U); grays[i].index=(~0U); } status=MagickTrue; image_view=AcquireVirtualCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register const IndexPacket *magick_restrict indexes; register const PixelPacket *magick_restrict p; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewVirtualIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { grays[ScaleQuantumToMap(GetPixelRed(p))].red= ScaleQuantumToMap(GetPixelRed(p)); grays[ScaleQuantumToMap(GetPixelGreen(p))].green= ScaleQuantumToMap(GetPixelGreen(p)); grays[ScaleQuantumToMap(GetPixelBlue(p))].blue= ScaleQuantumToMap(GetPixelBlue(p)); if (image->colorspace == CMYKColorspace) grays[ScaleQuantumToMap(GetPixelIndex(indexes+x))].index= ScaleQuantumToMap(GetPixelIndex(indexes+x)); if (image->matte != MagickFalse) grays[ScaleQuantumToMap(GetPixelOpacity(p))].opacity= ScaleQuantumToMap(GetPixelOpacity(p)); p++; } } image_view=DestroyCacheView(image_view); if (status == MagickFalse) { grays=(LongPixelPacket *) RelinquishMagickMemory(grays); channel_features=(ChannelFeatures *) RelinquishMagickMemory( channel_features); return(channel_features); } (void) memset(&gray,0,sizeof(gray)); for (i=0; i <= (ssize_t) MaxMap; i++) { if (grays[i].red != ~0U) grays[(ssize_t) gray.red++].red=grays[i].red; if (grays[i].green != ~0U) grays[(ssize_t) gray.green++].green=grays[i].green; if (grays[i].blue != ~0U) grays[(ssize_t) gray.blue++].blue=grays[i].blue; if (image->colorspace == CMYKColorspace) if (grays[i].index != ~0U) grays[(ssize_t) gray.index++].index=grays[i].index; if (image->matte != MagickFalse) if (grays[i].opacity != ~0U) grays[(ssize_t) gray.opacity++].opacity=grays[i].opacity; } /* Allocate spatial dependence matrix. */ number_grays=gray.red; if (gray.green > number_grays) number_grays=gray.green; if (gray.blue > number_grays) number_grays=gray.blue; if (image->colorspace == CMYKColorspace) if (gray.index > number_grays) number_grays=gray.index; if (image->matte != MagickFalse) if (gray.opacity > number_grays) number_grays=gray.opacity; cooccurrence=(ChannelStatistics **) AcquireQuantumMemory(number_grays, sizeof(*cooccurrence)); density_x=(ChannelStatistics *) AcquireQuantumMemory(2*(number_grays+1), sizeof(*density_x)); density_xy=(ChannelStatistics *) AcquireQuantumMemory(2*(number_grays+1), sizeof(*density_xy)); density_y=(ChannelStatistics *) AcquireQuantumMemory(2*(number_grays+1), sizeof(*density_y)); Q=(ChannelStatistics **) AcquireQuantumMemory(number_grays,sizeof(*Q)); sum=(ChannelStatistics *) AcquireQuantumMemory(number_grays,sizeof(*sum)); if ((cooccurrence == (ChannelStatistics **) NULL) || (density_x == (ChannelStatistics *) NULL) || (density_xy == (ChannelStatistics *) NULL) || (density_y == (ChannelStatistics *) NULL) || (Q == (ChannelStatistics **) NULL) || (sum == (ChannelStatistics *) NULL)) { if (Q != (ChannelStatistics **) NULL) { for (i=0; i < (ssize_t) number_grays; i++) Q[i]=(ChannelStatistics *) RelinquishMagickMemory(Q[i]); Q=(ChannelStatistics **) RelinquishMagickMemory(Q); } if (sum != (ChannelStatistics *) NULL) sum=(ChannelStatistics *) RelinquishMagickMemory(sum); if (density_y != (ChannelStatistics *) NULL) density_y=(ChannelStatistics *) RelinquishMagickMemory(density_y); if (density_xy != (ChannelStatistics *) NULL) density_xy=(ChannelStatistics *) RelinquishMagickMemory(density_xy); if (density_x != (ChannelStatistics *) NULL) density_x=(ChannelStatistics *) RelinquishMagickMemory(density_x); if (cooccurrence != (ChannelStatistics **) NULL) { for (i=0; i < (ssize_t) number_grays; i++) cooccurrence[i]=(ChannelStatistics *) RelinquishMagickMemory(cooccurrence[i]); cooccurrence=(ChannelStatistics **) RelinquishMagickMemory( cooccurrence); } grays=(LongPixelPacket *) RelinquishMagickMemory(grays); channel_features=(ChannelFeatures *) RelinquishMagickMemory( channel_features); (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(channel_features); } (void) memset(&correlation,0,sizeof(correlation)); (void) memset(density_x,0,2*(number_grays+1)*sizeof(*density_x)); (void) memset(density_xy,0,2*(number_grays+1)*sizeof(*density_xy)); (void) memset(density_y,0,2*(number_grays+1)*sizeof(*density_y)); (void) memset(&mean,0,sizeof(mean)); (void) memset(sum,0,number_grays*sizeof(*sum)); (void) memset(&sum_squares,0,sizeof(sum_squares)); (void) memset(density_xy,0,2*number_grays*sizeof(*density_xy)); (void) memset(&entropy_x,0,sizeof(entropy_x)); (void) memset(&entropy_xy,0,sizeof(entropy_xy)); (void) memset(&entropy_xy1,0,sizeof(entropy_xy1)); (void) memset(&entropy_xy2,0,sizeof(entropy_xy2)); (void) memset(&entropy_y,0,sizeof(entropy_y)); (void) memset(&variance,0,sizeof(variance)); for (i=0; i < (ssize_t) number_grays; i++) { cooccurrence[i]=(ChannelStatistics *) AcquireQuantumMemory(number_grays, sizeof(**cooccurrence)); Q[i]=(ChannelStatistics *) AcquireQuantumMemory(number_grays,sizeof(**Q)); if ((cooccurrence[i] == (ChannelStatistics *) NULL) || (Q[i] == (ChannelStatistics *) NULL)) break; (void) memset(cooccurrence[i],0,number_grays* sizeof(**cooccurrence)); (void) memset(Q[i],0,number_grays*sizeof(**Q)); } if (i < (ssize_t) number_grays) { for (i--; i >= 0; i--) { if (Q[i] != (ChannelStatistics *) NULL) Q[i]=(ChannelStatistics *) RelinquishMagickMemory(Q[i]); if (cooccurrence[i] != (ChannelStatistics *) NULL) cooccurrence[i]=(ChannelStatistics *) RelinquishMagickMemory(cooccurrence[i]); } Q=(ChannelStatistics **) RelinquishMagickMemory(Q); cooccurrence=(ChannelStatistics **) RelinquishMagickMemory(cooccurrence); sum=(ChannelStatistics *) RelinquishMagickMemory(sum); density_y=(ChannelStatistics *) RelinquishMagickMemory(density_y); density_xy=(ChannelStatistics *) RelinquishMagickMemory(density_xy); density_x=(ChannelStatistics *) RelinquishMagickMemory(density_x); grays=(LongPixelPacket *) RelinquishMagickMemory(grays); channel_features=(ChannelFeatures *) RelinquishMagickMemory( channel_features); (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(channel_features); } /* Initialize spatial dependence matrix. */ status=MagickTrue; image_view=AcquireVirtualCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { register const IndexPacket *magick_restrict indexes; register const PixelPacket *magick_restrict p; register ssize_t x; ssize_t i, offset, u, v; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,-(ssize_t) distance,y,image->columns+ 2*distance,distance+2,exception); if (p == (const PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewVirtualIndexQueue(image_view); p+=distance; indexes+=distance; for (x=0; x < (ssize_t) image->columns; x++) { for (i=0; i < 4; i++) { switch (i) { case 0: default: { /* Horizontal adjacency. */ offset=(ssize_t) distance; break; } case 1: { /* Vertical adjacency. */ offset=(ssize_t) (image->columns+2*distance); break; } case 2: { /* Right diagonal adjacency. */ offset=(ssize_t) ((image->columns+2*distance)-distance); break; } case 3: { /* Left diagonal adjacency. */ offset=(ssize_t) ((image->columns+2*distance)+distance); break; } } u=0; v=0; while (grays[u].red != ScaleQuantumToMap(GetPixelRed(p))) u++; while (grays[v].red != ScaleQuantumToMap(GetPixelRed(p+offset))) v++; cooccurrence[u][v].direction[i].red++; cooccurrence[v][u].direction[i].red++; u=0; v=0; while (grays[u].green != ScaleQuantumToMap(GetPixelGreen(p))) u++; while (grays[v].green != ScaleQuantumToMap(GetPixelGreen(p+offset))) v++; cooccurrence[u][v].direction[i].green++; cooccurrence[v][u].direction[i].green++; u=0; v=0; while (grays[u].blue != ScaleQuantumToMap(GetPixelBlue(p))) u++; while (grays[v].blue != ScaleQuantumToMap((p+offset)->blue)) v++; cooccurrence[u][v].direction[i].blue++; cooccurrence[v][u].direction[i].blue++; if (image->colorspace == CMYKColorspace) { u=0; v=0; while (grays[u].index != ScaleQuantumToMap(GetPixelIndex(indexes+x))) u++; while (grays[v].index != ScaleQuantumToMap(GetPixelIndex(indexes+x+offset))) v++; cooccurrence[u][v].direction[i].index++; cooccurrence[v][u].direction[i].index++; } if (image->matte != MagickFalse) { u=0; v=0; while (grays[u].opacity != ScaleQuantumToMap(GetPixelOpacity(p))) u++; while (grays[v].opacity != ScaleQuantumToMap((p+offset)->opacity)) v++; cooccurrence[u][v].direction[i].opacity++; cooccurrence[v][u].direction[i].opacity++; } } p++; } } grays=(LongPixelPacket *) RelinquishMagickMemory(grays); image_view=DestroyCacheView(image_view); if (status == MagickFalse) { for (i=0; i < (ssize_t) number_grays; i++) cooccurrence[i]=(ChannelStatistics *) RelinquishMagickMemory(cooccurrence[i]); cooccurrence=(ChannelStatistics **) RelinquishMagickMemory(cooccurrence); channel_features=(ChannelFeatures *) RelinquishMagickMemory( channel_features); (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(channel_features); } /* Normalize spatial dependence matrix. */ for (i=0; i < 4; i++) { double normalize; register ssize_t y; switch (i) { case 0: default: { /* Horizontal adjacency. */ normalize=2.0*image->rows*(image->columns-distance); break; } case 1: { /* Vertical adjacency. */ normalize=2.0*(image->rows-distance)*image->columns; break; } case 2: { /* Right diagonal adjacency. */ normalize=2.0*(image->rows-distance)*(image->columns-distance); break; } case 3: { /* Left diagonal adjacency. */ normalize=2.0*(image->rows-distance)*(image->columns-distance); break; } } normalize=PerceptibleReciprocal(normalize); for (y=0; y < (ssize_t) number_grays; y++) { register ssize_t x; for (x=0; x < (ssize_t) number_grays; x++) { cooccurrence[x][y].direction[i].red*=normalize; cooccurrence[x][y].direction[i].green*=normalize; cooccurrence[x][y].direction[i].blue*=normalize; if (image->colorspace == CMYKColorspace) cooccurrence[x][y].direction[i].index*=normalize; if (image->matte != MagickFalse) cooccurrence[x][y].direction[i].opacity*=normalize; } } } /* Compute texture features. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,number_grays,1) #endif for (i=0; i < 4; i++) { register ssize_t y; for (y=0; y < (ssize_t) number_grays; y++) { register ssize_t x; for (x=0; x < (ssize_t) number_grays; x++) { /* Angular second moment: measure of homogeneity of the image. */ channel_features[RedChannel].angular_second_moment[i]+= cooccurrence[x][y].direction[i].red* cooccurrence[x][y].direction[i].red; channel_features[GreenChannel].angular_second_moment[i]+= cooccurrence[x][y].direction[i].green* cooccurrence[x][y].direction[i].green; channel_features[BlueChannel].angular_second_moment[i]+= cooccurrence[x][y].direction[i].blue* cooccurrence[x][y].direction[i].blue; if (image->colorspace == CMYKColorspace) channel_features[BlackChannel].angular_second_moment[i]+= cooccurrence[x][y].direction[i].index* cooccurrence[x][y].direction[i].index; if (image->matte != MagickFalse) channel_features[OpacityChannel].angular_second_moment[i]+= cooccurrence[x][y].direction[i].opacity* cooccurrence[x][y].direction[i].opacity; /* Correlation: measure of linear-dependencies in the image. */ sum[y].direction[i].red+=cooccurrence[x][y].direction[i].red; sum[y].direction[i].green+=cooccurrence[x][y].direction[i].green; sum[y].direction[i].blue+=cooccurrence[x][y].direction[i].blue; if (image->colorspace == CMYKColorspace) sum[y].direction[i].index+=cooccurrence[x][y].direction[i].index; if (image->matte != MagickFalse) sum[y].direction[i].opacity+=cooccurrence[x][y].direction[i].opacity; correlation.direction[i].red+=x*y*cooccurrence[x][y].direction[i].red; correlation.direction[i].green+=x*y* cooccurrence[x][y].direction[i].green; correlation.direction[i].blue+=x*y* cooccurrence[x][y].direction[i].blue; if (image->colorspace == CMYKColorspace) correlation.direction[i].index+=x*y* cooccurrence[x][y].direction[i].index; if (image->matte != MagickFalse) correlation.direction[i].opacity+=x*y* cooccurrence[x][y].direction[i].opacity; /* Inverse Difference Moment. */ channel_features[RedChannel].inverse_difference_moment[i]+= cooccurrence[x][y].direction[i].red/((y-x)*(y-x)+1); channel_features[GreenChannel].inverse_difference_moment[i]+= cooccurrence[x][y].direction[i].green/((y-x)*(y-x)+1); channel_features[BlueChannel].inverse_difference_moment[i]+= cooccurrence[x][y].direction[i].blue/((y-x)*(y-x)+1); if (image->colorspace == CMYKColorspace) channel_features[IndexChannel].inverse_difference_moment[i]+= cooccurrence[x][y].direction[i].index/((y-x)*(y-x)+1); if (image->matte != MagickFalse) channel_features[OpacityChannel].inverse_difference_moment[i]+= cooccurrence[x][y].direction[i].opacity/((y-x)*(y-x)+1); /* Sum average. */ density_xy[y+x+2].direction[i].red+= cooccurrence[x][y].direction[i].red; density_xy[y+x+2].direction[i].green+= cooccurrence[x][y].direction[i].green; density_xy[y+x+2].direction[i].blue+= cooccurrence[x][y].direction[i].blue; if (image->colorspace == CMYKColorspace) density_xy[y+x+2].direction[i].index+= cooccurrence[x][y].direction[i].index; if (image->matte != MagickFalse) density_xy[y+x+2].direction[i].opacity+= cooccurrence[x][y].direction[i].opacity; /* Entropy. */ channel_features[RedChannel].entropy[i]-= cooccurrence[x][y].direction[i].red* MagickLog10(cooccurrence[x][y].direction[i].red); channel_features[GreenChannel].entropy[i]-= cooccurrence[x][y].direction[i].green* MagickLog10(cooccurrence[x][y].direction[i].green); channel_features[BlueChannel].entropy[i]-= cooccurrence[x][y].direction[i].blue* MagickLog10(cooccurrence[x][y].direction[i].blue); if (image->colorspace == CMYKColorspace) channel_features[IndexChannel].entropy[i]-= cooccurrence[x][y].direction[i].index* MagickLog10(cooccurrence[x][y].direction[i].index); if (image->matte != MagickFalse) channel_features[OpacityChannel].entropy[i]-= cooccurrence[x][y].direction[i].opacity* MagickLog10(cooccurrence[x][y].direction[i].opacity); /* Information Measures of Correlation. */ density_x[x].direction[i].red+=cooccurrence[x][y].direction[i].red; density_x[x].direction[i].green+=cooccurrence[x][y].direction[i].green; density_x[x].direction[i].blue+=cooccurrence[x][y].direction[i].blue; if (image->colorspace == CMYKColorspace) density_x[x].direction[i].index+= cooccurrence[x][y].direction[i].index; if (image->matte != MagickFalse) density_x[x].direction[i].opacity+= cooccurrence[x][y].direction[i].opacity; density_y[y].direction[i].red+=cooccurrence[x][y].direction[i].red; density_y[y].direction[i].green+=cooccurrence[x][y].direction[i].green; density_y[y].direction[i].blue+=cooccurrence[x][y].direction[i].blue; if (image->colorspace == CMYKColorspace) density_y[y].direction[i].index+= cooccurrence[x][y].direction[i].index; if (image->matte != MagickFalse) density_y[y].direction[i].opacity+= cooccurrence[x][y].direction[i].opacity; } mean.direction[i].red+=y*sum[y].direction[i].red; sum_squares.direction[i].red+=y*y*sum[y].direction[i].red; mean.direction[i].green+=y*sum[y].direction[i].green; sum_squares.direction[i].green+=y*y*sum[y].direction[i].green; mean.direction[i].blue+=y*sum[y].direction[i].blue; sum_squares.direction[i].blue+=y*y*sum[y].direction[i].blue; if (image->colorspace == CMYKColorspace) { mean.direction[i].index+=y*sum[y].direction[i].index; sum_squares.direction[i].index+=y*y*sum[y].direction[i].index; } if (image->matte != MagickFalse) { mean.direction[i].opacity+=y*sum[y].direction[i].opacity; sum_squares.direction[i].opacity+=y*y*sum[y].direction[i].opacity; } } /* Correlation: measure of linear-dependencies in the image. */ channel_features[RedChannel].correlation[i]= (correlation.direction[i].red-mean.direction[i].red* mean.direction[i].red)/(sqrt(sum_squares.direction[i].red- (mean.direction[i].red*mean.direction[i].red))*sqrt( sum_squares.direction[i].red-(mean.direction[i].red* mean.direction[i].red))); channel_features[GreenChannel].correlation[i]= (correlation.direction[i].green-mean.direction[i].green* mean.direction[i].green)/(sqrt(sum_squares.direction[i].green- (mean.direction[i].green*mean.direction[i].green))*sqrt( sum_squares.direction[i].green-(mean.direction[i].green* mean.direction[i].green))); channel_features[BlueChannel].correlation[i]= (correlation.direction[i].blue-mean.direction[i].blue* mean.direction[i].blue)/(sqrt(sum_squares.direction[i].blue- (mean.direction[i].blue*mean.direction[i].blue))*sqrt( sum_squares.direction[i].blue-(mean.direction[i].blue* mean.direction[i].blue))); if (image->colorspace == CMYKColorspace) channel_features[IndexChannel].correlation[i]= (correlation.direction[i].index-mean.direction[i].index* mean.direction[i].index)/(sqrt(sum_squares.direction[i].index- (mean.direction[i].index*mean.direction[i].index))*sqrt( sum_squares.direction[i].index-(mean.direction[i].index* mean.direction[i].index))); if (image->matte != MagickFalse) channel_features[OpacityChannel].correlation[i]= (correlation.direction[i].opacity-mean.direction[i].opacity* mean.direction[i].opacity)/(sqrt(sum_squares.direction[i].opacity- (mean.direction[i].opacity*mean.direction[i].opacity))*sqrt( sum_squares.direction[i].opacity-(mean.direction[i].opacity* mean.direction[i].opacity))); } /* Compute more texture features. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,number_grays,1) #endif for (i=0; i < 4; i++) { register ssize_t x; for (x=2; x < (ssize_t) (2*number_grays); x++) { /* Sum average. */ channel_features[RedChannel].sum_average[i]+= x*density_xy[x].direction[i].red; channel_features[GreenChannel].sum_average[i]+= x*density_xy[x].direction[i].green; channel_features[BlueChannel].sum_average[i]+= x*density_xy[x].direction[i].blue; if (image->colorspace == CMYKColorspace) channel_features[IndexChannel].sum_average[i]+= x*density_xy[x].direction[i].index; if (image->matte != MagickFalse) channel_features[OpacityChannel].sum_average[i]+= x*density_xy[x].direction[i].opacity; /* Sum entropy. */ channel_features[RedChannel].sum_entropy[i]-= density_xy[x].direction[i].red* MagickLog10(density_xy[x].direction[i].red); channel_features[GreenChannel].sum_entropy[i]-= density_xy[x].direction[i].green* MagickLog10(density_xy[x].direction[i].green); channel_features[BlueChannel].sum_entropy[i]-= density_xy[x].direction[i].blue* MagickLog10(density_xy[x].direction[i].blue); if (image->colorspace == CMYKColorspace) channel_features[IndexChannel].sum_entropy[i]-= density_xy[x].direction[i].index* MagickLog10(density_xy[x].direction[i].index); if (image->matte != MagickFalse) channel_features[OpacityChannel].sum_entropy[i]-= density_xy[x].direction[i].opacity* MagickLog10(density_xy[x].direction[i].opacity); /* Sum variance. */ channel_features[RedChannel].sum_variance[i]+= (x-channel_features[RedChannel].sum_entropy[i])* (x-channel_features[RedChannel].sum_entropy[i])* density_xy[x].direction[i].red; channel_features[GreenChannel].sum_variance[i]+= (x-channel_features[GreenChannel].sum_entropy[i])* (x-channel_features[GreenChannel].sum_entropy[i])* density_xy[x].direction[i].green; channel_features[BlueChannel].sum_variance[i]+= (x-channel_features[BlueChannel].sum_entropy[i])* (x-channel_features[BlueChannel].sum_entropy[i])* density_xy[x].direction[i].blue; if (image->colorspace == CMYKColorspace) channel_features[IndexChannel].sum_variance[i]+= (x-channel_features[IndexChannel].sum_entropy[i])* (x-channel_features[IndexChannel].sum_entropy[i])* density_xy[x].direction[i].index; if (image->matte != MagickFalse) channel_features[OpacityChannel].sum_variance[i]+= (x-channel_features[OpacityChannel].sum_entropy[i])* (x-channel_features[OpacityChannel].sum_entropy[i])* density_xy[x].direction[i].opacity; } } /* Compute more texture features. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,number_grays,1) #endif for (i=0; i < 4; i++) { register ssize_t y; for (y=0; y < (ssize_t) number_grays; y++) { register ssize_t x; for (x=0; x < (ssize_t) number_grays; x++) { /* Sum of Squares: Variance */ variance.direction[i].red+=(y-mean.direction[i].red+1)* (y-mean.direction[i].red+1)*cooccurrence[x][y].direction[i].red; variance.direction[i].green+=(y-mean.direction[i].green+1)* (y-mean.direction[i].green+1)*cooccurrence[x][y].direction[i].green; variance.direction[i].blue+=(y-mean.direction[i].blue+1)* (y-mean.direction[i].blue+1)*cooccurrence[x][y].direction[i].blue; if (image->colorspace == CMYKColorspace) variance.direction[i].index+=(y-mean.direction[i].index+1)* (y-mean.direction[i].index+1)*cooccurrence[x][y].direction[i].index; if (image->matte != MagickFalse) variance.direction[i].opacity+=(y-mean.direction[i].opacity+1)* (y-mean.direction[i].opacity+1)* cooccurrence[x][y].direction[i].opacity; /* Sum average / Difference Variance. */ density_xy[MagickAbsoluteValue(y-x)].direction[i].red+= cooccurrence[x][y].direction[i].red; density_xy[MagickAbsoluteValue(y-x)].direction[i].green+= cooccurrence[x][y].direction[i].green; density_xy[MagickAbsoluteValue(y-x)].direction[i].blue+= cooccurrence[x][y].direction[i].blue; if (image->colorspace == CMYKColorspace) density_xy[MagickAbsoluteValue(y-x)].direction[i].index+= cooccurrence[x][y].direction[i].index; if (image->matte != MagickFalse) density_xy[MagickAbsoluteValue(y-x)].direction[i].opacity+= cooccurrence[x][y].direction[i].opacity; /* Information Measures of Correlation. */ entropy_xy.direction[i].red-=cooccurrence[x][y].direction[i].red* MagickLog10(cooccurrence[x][y].direction[i].red); entropy_xy.direction[i].green-=cooccurrence[x][y].direction[i].green* MagickLog10(cooccurrence[x][y].direction[i].green); entropy_xy.direction[i].blue-=cooccurrence[x][y].direction[i].blue* MagickLog10(cooccurrence[x][y].direction[i].blue); if (image->colorspace == CMYKColorspace) entropy_xy.direction[i].index-=cooccurrence[x][y].direction[i].index* MagickLog10(cooccurrence[x][y].direction[i].index); if (image->matte != MagickFalse) entropy_xy.direction[i].opacity-= cooccurrence[x][y].direction[i].opacity*MagickLog10( cooccurrence[x][y].direction[i].opacity); entropy_xy1.direction[i].red-=(cooccurrence[x][y].direction[i].red* MagickLog10(density_x[x].direction[i].red* density_y[y].direction[i].red)); entropy_xy1.direction[i].green-=(cooccurrence[x][y].direction[i].green* MagickLog10(density_x[x].direction[i].green* density_y[y].direction[i].green)); entropy_xy1.direction[i].blue-=(cooccurrence[x][y].direction[i].blue* MagickLog10(density_x[x].direction[i].blue* density_y[y].direction[i].blue)); if (image->colorspace == CMYKColorspace) entropy_xy1.direction[i].index-=( cooccurrence[x][y].direction[i].index*MagickLog10( density_x[x].direction[i].index*density_y[y].direction[i].index)); if (image->matte != MagickFalse) entropy_xy1.direction[i].opacity-=( cooccurrence[x][y].direction[i].opacity*MagickLog10( density_x[x].direction[i].opacity* density_y[y].direction[i].opacity)); entropy_xy2.direction[i].red-=(density_x[x].direction[i].red* density_y[y].direction[i].red*MagickLog10( density_x[x].direction[i].red*density_y[y].direction[i].red)); entropy_xy2.direction[i].green-=(density_x[x].direction[i].green* density_y[y].direction[i].green*MagickLog10( density_x[x].direction[i].green*density_y[y].direction[i].green)); entropy_xy2.direction[i].blue-=(density_x[x].direction[i].blue* density_y[y].direction[i].blue*MagickLog10( density_x[x].direction[i].blue*density_y[y].direction[i].blue)); if (image->colorspace == CMYKColorspace) entropy_xy2.direction[i].index-=(density_x[x].direction[i].index* density_y[y].direction[i].index*MagickLog10( density_x[x].direction[i].index*density_y[y].direction[i].index)); if (image->matte != MagickFalse) entropy_xy2.direction[i].opacity-=(density_x[x].direction[i].opacity* density_y[y].direction[i].opacity*MagickLog10( density_x[x].direction[i].opacity* density_y[y].direction[i].opacity)); } } channel_features[RedChannel].variance_sum_of_squares[i]= variance.direction[i].red; channel_features[GreenChannel].variance_sum_of_squares[i]= variance.direction[i].green; channel_features[BlueChannel].variance_sum_of_squares[i]= variance.direction[i].blue; if (image->colorspace == CMYKColorspace) channel_features[RedChannel].variance_sum_of_squares[i]= variance.direction[i].index; if (image->matte != MagickFalse) channel_features[RedChannel].variance_sum_of_squares[i]= variance.direction[i].opacity; } /* Compute more texture features. */ (void) memset(&variance,0,sizeof(variance)); (void) memset(&sum_squares,0,sizeof(sum_squares)); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,number_grays,1) #endif for (i=0; i < 4; i++) { register ssize_t x; for (x=0; x < (ssize_t) number_grays; x++) { /* Difference variance. */ variance.direction[i].red+=density_xy[x].direction[i].red; variance.direction[i].green+=density_xy[x].direction[i].green; variance.direction[i].blue+=density_xy[x].direction[i].blue; if (image->colorspace == CMYKColorspace) variance.direction[i].index+=density_xy[x].direction[i].index; if (image->matte != MagickFalse) variance.direction[i].opacity+=density_xy[x].direction[i].opacity; sum_squares.direction[i].red+=density_xy[x].direction[i].red* density_xy[x].direction[i].red; sum_squares.direction[i].green+=density_xy[x].direction[i].green* density_xy[x].direction[i].green; sum_squares.direction[i].blue+=density_xy[x].direction[i].blue* density_xy[x].direction[i].blue; if (image->colorspace == CMYKColorspace) sum_squares.direction[i].index+=density_xy[x].direction[i].index* density_xy[x].direction[i].index; if (image->matte != MagickFalse) sum_squares.direction[i].opacity+=density_xy[x].direction[i].opacity* density_xy[x].direction[i].opacity; /* Difference entropy. */ channel_features[RedChannel].difference_entropy[i]-= density_xy[x].direction[i].red* MagickLog10(density_xy[x].direction[i].red); channel_features[GreenChannel].difference_entropy[i]-= density_xy[x].direction[i].green* MagickLog10(density_xy[x].direction[i].green); channel_features[BlueChannel].difference_entropy[i]-= density_xy[x].direction[i].blue* MagickLog10(density_xy[x].direction[i].blue); if (image->colorspace == CMYKColorspace) channel_features[IndexChannel].difference_entropy[i]-= density_xy[x].direction[i].index* MagickLog10(density_xy[x].direction[i].index); if (image->matte != MagickFalse) channel_features[OpacityChannel].difference_entropy[i]-= density_xy[x].direction[i].opacity* MagickLog10(density_xy[x].direction[i].opacity); /* Information Measures of Correlation. */ entropy_x.direction[i].red-=(density_x[x].direction[i].red* MagickLog10(density_x[x].direction[i].red)); entropy_x.direction[i].green-=(density_x[x].direction[i].green* MagickLog10(density_x[x].direction[i].green)); entropy_x.direction[i].blue-=(density_x[x].direction[i].blue* MagickLog10(density_x[x].direction[i].blue)); if (image->colorspace == CMYKColorspace) entropy_x.direction[i].index-=(density_x[x].direction[i].index* MagickLog10(density_x[x].direction[i].index)); if (image->matte != MagickFalse) entropy_x.direction[i].opacity-=(density_x[x].direction[i].opacity* MagickLog10(density_x[x].direction[i].opacity)); entropy_y.direction[i].red-=(density_y[x].direction[i].red* MagickLog10(density_y[x].direction[i].red)); entropy_y.direction[i].green-=(density_y[x].direction[i].green* MagickLog10(density_y[x].direction[i].green)); entropy_y.direction[i].blue-=(density_y[x].direction[i].blue* MagickLog10(density_y[x].direction[i].blue)); if (image->colorspace == CMYKColorspace) entropy_y.direction[i].index-=(density_y[x].direction[i].index* MagickLog10(density_y[x].direction[i].index)); if (image->matte != MagickFalse) entropy_y.direction[i].opacity-=(density_y[x].direction[i].opacity* MagickLog10(density_y[x].direction[i].opacity)); } /* Difference variance. */ channel_features[RedChannel].difference_variance[i]= (((double) number_grays*number_grays*sum_squares.direction[i].red)- (variance.direction[i].red*variance.direction[i].red))/ ((double) number_grays*number_grays*number_grays*number_grays); channel_features[GreenChannel].difference_variance[i]= (((double) number_grays*number_grays*sum_squares.direction[i].green)- (variance.direction[i].green*variance.direction[i].green))/ ((double) number_grays*number_grays*number_grays*number_grays); channel_features[BlueChannel].difference_variance[i]= (((double) number_grays*number_grays*sum_squares.direction[i].blue)- (variance.direction[i].blue*variance.direction[i].blue))/ ((double) number_grays*number_grays*number_grays*number_grays); if (image->matte != MagickFalse) channel_features[OpacityChannel].difference_variance[i]= (((double) number_grays*number_grays*sum_squares.direction[i].opacity)- (variance.direction[i].opacity*variance.direction[i].opacity))/ ((double) number_grays*number_grays*number_grays*number_grays); if (image->colorspace == CMYKColorspace) channel_features[IndexChannel].difference_variance[i]= (((double) number_grays*number_grays*sum_squares.direction[i].index)- (variance.direction[i].index*variance.direction[i].index))/ ((double) number_grays*number_grays*number_grays*number_grays); /* Information Measures of Correlation. */ channel_features[RedChannel].measure_of_correlation_1[i]= (entropy_xy.direction[i].red-entropy_xy1.direction[i].red)/ (entropy_x.direction[i].red > entropy_y.direction[i].red ? entropy_x.direction[i].red : entropy_y.direction[i].red); channel_features[GreenChannel].measure_of_correlation_1[i]= (entropy_xy.direction[i].green-entropy_xy1.direction[i].green)/ (entropy_x.direction[i].green > entropy_y.direction[i].green ? entropy_x.direction[i].green : entropy_y.direction[i].green); channel_features[BlueChannel].measure_of_correlation_1[i]= (entropy_xy.direction[i].blue-entropy_xy1.direction[i].blue)/ (entropy_x.direction[i].blue > entropy_y.direction[i].blue ? entropy_x.direction[i].blue : entropy_y.direction[i].blue); if (image->colorspace == CMYKColorspace) channel_features[IndexChannel].measure_of_correlation_1[i]= (entropy_xy.direction[i].index-entropy_xy1.direction[i].index)/ (entropy_x.direction[i].index > entropy_y.direction[i].index ? entropy_x.direction[i].index : entropy_y.direction[i].index); if (image->matte != MagickFalse) channel_features[OpacityChannel].measure_of_correlation_1[i]= (entropy_xy.direction[i].opacity-entropy_xy1.direction[i].opacity)/ (entropy_x.direction[i].opacity > entropy_y.direction[i].opacity ? entropy_x.direction[i].opacity : entropy_y.direction[i].opacity); channel_features[RedChannel].measure_of_correlation_2[i]= (sqrt(fabs(1.0-exp(-2.0*(entropy_xy2.direction[i].red- entropy_xy.direction[i].red))))); channel_features[GreenChannel].measure_of_correlation_2[i]= (sqrt(fabs(1.0-exp(-2.0*(entropy_xy2.direction[i].green- entropy_xy.direction[i].green))))); channel_features[BlueChannel].measure_of_correlation_2[i]= (sqrt(fabs(1.0-exp(-2.0*(entropy_xy2.direction[i].blue- entropy_xy.direction[i].blue))))); if (image->colorspace == CMYKColorspace) channel_features[IndexChannel].measure_of_correlation_2[i]= (sqrt(fabs(1.0-exp(-2.0*(entropy_xy2.direction[i].index- entropy_xy.direction[i].index))))); if (image->matte != MagickFalse) channel_features[OpacityChannel].measure_of_correlation_2[i]= (sqrt(fabs(1.0-exp(-2.0*(entropy_xy2.direction[i].opacity- entropy_xy.direction[i].opacity))))); } /* Compute more texture features. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,number_grays,1) #endif for (i=0; i < 4; i++) { register ssize_t z; for (z=0; z < (ssize_t) number_grays; z++) { register ssize_t y; ChannelStatistics pixel; (void) memset(&pixel,0,sizeof(pixel)); for (y=0; y < (ssize_t) number_grays; y++) { register ssize_t x; for (x=0; x < (ssize_t) number_grays; x++) { /* Contrast: amount of local variations present in an image. */ if (((y-x) == z) || ((x-y) == z)) { pixel.direction[i].red+=cooccurrence[x][y].direction[i].red; pixel.direction[i].green+=cooccurrence[x][y].direction[i].green; pixel.direction[i].blue+=cooccurrence[x][y].direction[i].blue; if (image->colorspace == CMYKColorspace) pixel.direction[i].index+=cooccurrence[x][y].direction[i].index; if (image->matte != MagickFalse) pixel.direction[i].opacity+= cooccurrence[x][y].direction[i].opacity; } /* Maximum Correlation Coefficient. */ Q[z][y].direction[i].red+=cooccurrence[z][x].direction[i].red* cooccurrence[y][x].direction[i].red/density_x[z].direction[i].red/ density_y[x].direction[i].red; Q[z][y].direction[i].green+=cooccurrence[z][x].direction[i].green* cooccurrence[y][x].direction[i].green/ density_x[z].direction[i].green/density_y[x].direction[i].red; Q[z][y].direction[i].blue+=cooccurrence[z][x].direction[i].blue* cooccurrence[y][x].direction[i].blue/density_x[z].direction[i].blue/ density_y[x].direction[i].blue; if (image->colorspace == CMYKColorspace) Q[z][y].direction[i].index+=cooccurrence[z][x].direction[i].index* cooccurrence[y][x].direction[i].index/ density_x[z].direction[i].index/density_y[x].direction[i].index; if (image->matte != MagickFalse) Q[z][y].direction[i].opacity+= cooccurrence[z][x].direction[i].opacity* cooccurrence[y][x].direction[i].opacity/ density_x[z].direction[i].opacity/ density_y[x].direction[i].opacity; } } channel_features[RedChannel].contrast[i]+=z*z*pixel.direction[i].red; channel_features[GreenChannel].contrast[i]+=z*z*pixel.direction[i].green; channel_features[BlueChannel].contrast[i]+=z*z*pixel.direction[i].blue; if (image->colorspace == CMYKColorspace) channel_features[BlackChannel].contrast[i]+=z*z* pixel.direction[i].index; if (image->matte != MagickFalse) channel_features[OpacityChannel].contrast[i]+=z*z* pixel.direction[i].opacity; } /* Maximum Correlation Coefficient. Future: return second largest eigenvalue of Q. */ channel_features[RedChannel].maximum_correlation_coefficient[i]= sqrt((double) -1.0); channel_features[GreenChannel].maximum_correlation_coefficient[i]= sqrt((double) -1.0); channel_features[BlueChannel].maximum_correlation_coefficient[i]= sqrt((double) -1.0); if (image->colorspace == CMYKColorspace) channel_features[IndexChannel].maximum_correlation_coefficient[i]= sqrt((double) -1.0); if (image->matte != MagickFalse) channel_features[OpacityChannel].maximum_correlation_coefficient[i]= sqrt((double) -1.0); } /* Relinquish resources. */ sum=(ChannelStatistics *) RelinquishMagickMemory(sum); for (i=0; i < (ssize_t) number_grays; i++) Q[i]=(ChannelStatistics *) RelinquishMagickMemory(Q[i]); Q=(ChannelStatistics **) RelinquishMagickMemory(Q); density_y=(ChannelStatistics *) RelinquishMagickMemory(density_y); density_xy=(ChannelStatistics *) RelinquishMagickMemory(density_xy); density_x=(ChannelStatistics *) RelinquishMagickMemory(density_x); for (i=0; i < (ssize_t) number_grays; i++) cooccurrence[i]=(ChannelStatistics *) RelinquishMagickMemory(cooccurrence[i]); cooccurrence=(ChannelStatistics **) RelinquishMagickMemory(cooccurrence); return(channel_features); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % H o u g h L i n e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % Use HoughLineImage() in conjunction with any binary edge extracted image (we % recommand Canny) to identify lines in the image. The algorithm accumulates % counts for every white pixel for every possible orientation (for angles from % 0 to 179 in 1 degree increments) and distance from the center of the image to % the corner (in 1 px increments) and stores the counts in an accumulator matrix % of angle vs distance. The size of the accumulator is 180x(diagonal/2). Next % it searches this space for peaks in counts and converts the locations of the % peaks to slope and intercept in the normal x,y input image space. Use the % slope/intercepts to find the endpoints clipped to the bounds of the image. The % lines are then drawn. The counts are a measure of the length of the lines % % The format of the HoughLineImage method is: % % Image *HoughLineImage(const Image *image,const size_t width, % const size_t height,const size_t threshold,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o width, height: find line pairs as local maxima in this neighborhood. % % o threshold: the line count threshold. % % o exception: return any errors or warnings in this structure. % */ static inline double MagickRound(double x) { /* Round the fraction to nearest integer. */ if ((x-floor(x)) < (ceil(x)-x)) return(floor(x)); return(ceil(x)); } static Image *RenderHoughLines(const ImageInfo *image_info,const size_t columns, const size_t rows,ExceptionInfo *exception) { #define BoundingBox "viewbox" DrawInfo *draw_info; Image *image; MagickBooleanType status; /* Open image. */ image=AcquireImage(image_info); status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception); if (status == MagickFalse) { image=DestroyImageList(image); return((Image *) NULL); } image->columns=columns; image->rows=rows; draw_info=CloneDrawInfo(image_info,(DrawInfo *) NULL); draw_info->affine.sx=image->x_resolution == 0.0 ? 1.0 : image->x_resolution/ DefaultResolution; draw_info->affine.sy=image->y_resolution == 0.0 ? 1.0 : image->y_resolution/ DefaultResolution; image->columns=(size_t) (draw_info->affine.sx*image->columns); image->rows=(size_t) (draw_info->affine.sy*image->rows); status=SetImageExtent(image,image->columns,image->rows); if (status == MagickFalse) return(DestroyImageList(image)); if (SetImageBackgroundColor(image) == MagickFalse) { image=DestroyImageList(image); return((Image *) NULL); } /* Render drawing. */ if (GetBlobStreamData(image) == (unsigned char *) NULL) draw_info->primitive=FileToString(image->filename,~0UL,exception); else { draw_info->primitive=(char *) AcquireMagickMemory((size_t) GetBlobSize(image)+1); if (draw_info->primitive != (char *) NULL) { (void) memcpy(draw_info->primitive,GetBlobStreamData(image), (size_t) GetBlobSize(image)); draw_info->primitive[GetBlobSize(image)]='\0'; } } (void) DrawImage(image,draw_info); draw_info=DestroyDrawInfo(draw_info); (void) CloseBlob(image); return(GetFirstImageInList(image)); } MagickExport Image *HoughLineImage(const Image *image,const size_t width, const size_t height,const size_t threshold,ExceptionInfo *exception) { #define HoughLineImageTag "HoughLine/Image" CacheView *image_view; char message[MaxTextExtent], path[MaxTextExtent]; const char *artifact; double hough_height; Image *lines_image = NULL; ImageInfo *image_info; int file; MagickBooleanType status; MagickOffsetType progress; MatrixInfo *accumulator; PointInfo center; register ssize_t y; size_t accumulator_height, accumulator_width, line_count; /* Create the accumulator. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); accumulator_width=180; hough_height=((sqrt(2.0)*(double) (image->rows > image->columns ? image->rows : image->columns))/2.0); accumulator_height=(size_t) (2.0*hough_height); accumulator=AcquireMatrixInfo(accumulator_width,accumulator_height, sizeof(double),exception); if (accumulator == (MatrixInfo *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); if (NullMatrix(accumulator) == MagickFalse) { accumulator=DestroyMatrixInfo(accumulator); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } /* Populate the accumulator. */ status=MagickTrue; progress=0; center.x=(double) image->columns/2.0; center.y=(double) image->rows/2.0; image_view=AcquireVirtualCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { register const PixelPacket *magick_restrict p; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { if (GetPixelIntensity(image,p) > (QuantumRange/2.0)) { register ssize_t i; for (i=0; i < 180; i++) { double count, radius; radius=(((double) x-center.x)*cos(DegreesToRadians((double) i)))+ (((double) y-center.y)*sin(DegreesToRadians((double) i))); (void) GetMatrixElement(accumulator,i,(ssize_t) MagickRound(radius+hough_height),&count); count++; (void) SetMatrixElement(accumulator,i,(ssize_t) MagickRound(radius+hough_height),&count); } } p++; } if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_HoughLineImage) #endif proceed=SetImageProgress(image,HoughLineImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); if (status == MagickFalse) { accumulator=DestroyMatrixInfo(accumulator); return((Image *) NULL); } /* Generate line segments from accumulator. */ file=AcquireUniqueFileResource(path); if (file == -1) { accumulator=DestroyMatrixInfo(accumulator); return((Image *) NULL); } (void) FormatLocaleString(message,MaxTextExtent, "# Hough line transform: %.20gx%.20g%+.20g\n",(double) width, (double) height,(double) threshold); if (write(file,message,strlen(message)) != (ssize_t) strlen(message)) status=MagickFalse; (void) FormatLocaleString(message,MaxTextExtent,"viewbox 0 0 %.20g %.20g\n", (double) image->columns,(double) image->rows); if (write(file,message,strlen(message)) != (ssize_t) strlen(message)) status=MagickFalse; (void) FormatLocaleString(message,MaxTextExtent, "# x1,y1 x2,y2 # count angle distance\n"); if (write(file,message,strlen(message)) != (ssize_t) strlen(message)) status=MagickFalse; line_count=image->columns > image->rows ? image->columns/4 : image->rows/4; if (threshold != 0) line_count=threshold; for (y=0; y < (ssize_t) accumulator_height; y++) { register ssize_t x; for (x=0; x < (ssize_t) accumulator_width; x++) { double count; (void) GetMatrixElement(accumulator,x,y,&count); if (count >= (double) line_count) { double maxima; SegmentInfo line; ssize_t v; /* Is point a local maxima? */ maxima=count; for (v=(-((ssize_t) height/2)); v <= (((ssize_t) height/2)); v++) { ssize_t u; for (u=(-((ssize_t) width/2)); u <= (((ssize_t) width/2)); u++) { if ((u != 0) || (v !=0)) { (void) GetMatrixElement(accumulator,x+u,y+v,&count); if (count > maxima) { maxima=count; break; } } } if (u < (ssize_t) (width/2)) break; } (void) GetMatrixElement(accumulator,x,y,&count); if (maxima > count) continue; if ((x >= 45) && (x <= 135)) { /* y = (r-x cos(t))/sin(t) */ line.x1=0.0; line.y1=((double) (y-(accumulator_height/2.0))-((line.x1- (image->columns/2.0))*cos(DegreesToRadians((double) x))))/ sin(DegreesToRadians((double) x))+(image->rows/2.0); line.x2=(double) image->columns; line.y2=((double) (y-(accumulator_height/2.0))-((line.x2- (image->columns/2.0))*cos(DegreesToRadians((double) x))))/ sin(DegreesToRadians((double) x))+(image->rows/2.0); } else { /* x = (r-y cos(t))/sin(t) */ line.y1=0.0; line.x1=((double) (y-(accumulator_height/2.0))-((line.y1- (image->rows/2.0))*sin(DegreesToRadians((double) x))))/ cos(DegreesToRadians((double) x))+(image->columns/2.0); line.y2=(double) image->rows; line.x2=((double) (y-(accumulator_height/2.0))-((line.y2- (image->rows/2.0))*sin(DegreesToRadians((double) x))))/ cos(DegreesToRadians((double) x))+(image->columns/2.0); } (void) FormatLocaleString(message,MaxTextExtent, "line %g,%g %g,%g # %g %g %g\n",line.x1,line.y1,line.x2,line.y2, maxima,(double) x,(double) y); if (write(file,message,strlen(message)) != (ssize_t) strlen(message)) status=MagickFalse; } } } (void) close(file); /* Render lines to image canvas. */ image_info=AcquireImageInfo(); image_info->background_color=image->background_color; (void) FormatLocaleString(image_info->filename,MaxTextExtent,"%s",path); artifact=GetImageArtifact(image,"background"); if (artifact != (const char *) NULL) (void) SetImageOption(image_info,"background",artifact); artifact=GetImageArtifact(image,"fill"); if (artifact != (const char *) NULL) (void) SetImageOption(image_info,"fill",artifact); artifact=GetImageArtifact(image,"stroke"); if (artifact != (const char *) NULL) (void) SetImageOption(image_info,"stroke",artifact); artifact=GetImageArtifact(image,"strokewidth"); if (artifact != (const char *) NULL) (void) SetImageOption(image_info,"strokewidth",artifact); lines_image=RenderHoughLines(image_info,image->columns,image->rows,exception); artifact=GetImageArtifact(image,"hough-lines:accumulator"); if ((lines_image != (Image *) NULL) && (IsMagickTrue(artifact) != MagickFalse)) { Image *accumulator_image; accumulator_image=MatrixToImage(accumulator,exception); if (accumulator_image != (Image *) NULL) AppendImageToList(&lines_image,accumulator_image); } /* Free resources. */ accumulator=DestroyMatrixInfo(accumulator); image_info=DestroyImageInfo(image_info); (void) RelinquishUniqueFileResource(path); return(GetFirstImageInList(lines_image)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M e a n S h i f t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MeanShiftImage() delineate arbitrarily shaped clusters in the image. For % each pixel, it visits all the pixels in the neighborhood specified by % the window centered at the pixel and excludes those that are outside the % radius=(window-1)/2 surrounding the pixel. From those pixels, it finds those % that are within the specified color distance from the current mean, and % computes a new x,y centroid from those coordinates and a new mean. This new % x,y centroid is used as the center for a new window. This process iterates % until it converges and the final mean is replaces the (original window % center) pixel value. It repeats this process for the next pixel, etc., % until it processes all pixels in the image. Results are typically better with % colorspaces other than sRGB. We recommend YIQ, YUV or YCbCr. % % The format of the MeanShiftImage method is: % % Image *MeanShiftImage(const Image *image,const size_t width, % const size_t height,const double color_distance, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o width, height: find pixels in this neighborhood. % % o color_distance: the color distance. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *MeanShiftImage(const Image *image,const size_t width, const size_t height,const double color_distance,ExceptionInfo *exception) { #define MaxMeanShiftIterations 100 #define MeanShiftImageTag "MeanShift/Image" CacheView *image_view, *mean_view, *pixel_view; Image *mean_image; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); mean_image=CloneImage(image,0,0,MagickTrue,exception); if (mean_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(mean_image,DirectClass) == MagickFalse) { InheritException(exception,&mean_image->exception); mean_image=DestroyImage(mean_image); return((Image *) NULL); } status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); pixel_view=AcquireVirtualCacheView(image,exception); mean_view=AcquireAuthenticCacheView(mean_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status,progress) \ magick_number_threads(mean_image,mean_image,mean_image->rows,1) #endif for (y=0; y < (ssize_t) mean_image->rows; y++) { register const IndexPacket *magick_restrict indexes; register const PixelPacket *magick_restrict p; register PixelPacket *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=GetCacheViewAuthenticPixels(mean_view,0,y,mean_image->columns,1, exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } indexes=GetCacheViewVirtualIndexQueue(image_view); for (x=0; x < (ssize_t) mean_image->columns; x++) { MagickPixelPacket mean_pixel, previous_pixel; PointInfo mean_location, previous_location; register ssize_t i; GetMagickPixelPacket(image,&mean_pixel); SetMagickPixelPacket(image,p,indexes+x,&mean_pixel); mean_location.x=(double) x; mean_location.y=(double) y; for (i=0; i < MaxMeanShiftIterations; i++) { double distance, gamma; MagickPixelPacket sum_pixel; PointInfo sum_location; ssize_t count, v; sum_location.x=0.0; sum_location.y=0.0; GetMagickPixelPacket(image,&sum_pixel); previous_location=mean_location; previous_pixel=mean_pixel; count=0; for (v=(-((ssize_t) height/2)); v <= (((ssize_t) height/2)); v++) { ssize_t u; for (u=(-((ssize_t) width/2)); u <= (((ssize_t) width/2)); u++) { if ((v*v+u*u) <= (ssize_t) ((width/2)*(height/2))) { PixelPacket pixel; status=GetOneCacheViewVirtualPixel(pixel_view,(ssize_t) MagickRound(mean_location.x+u),(ssize_t) MagickRound( mean_location.y+v),&pixel,exception); distance=(mean_pixel.red-pixel.red)*(mean_pixel.red-pixel.red)+ (mean_pixel.green-pixel.green)*(mean_pixel.green-pixel.green)+ (mean_pixel.blue-pixel.blue)*(mean_pixel.blue-pixel.blue); if (distance <= (color_distance*color_distance)) { sum_location.x+=mean_location.x+u; sum_location.y+=mean_location.y+v; sum_pixel.red+=pixel.red; sum_pixel.green+=pixel.green; sum_pixel.blue+=pixel.blue; sum_pixel.opacity+=pixel.opacity; count++; } } } } gamma=1.0/count; mean_location.x=gamma*sum_location.x; mean_location.y=gamma*sum_location.y; mean_pixel.red=gamma*sum_pixel.red; mean_pixel.green=gamma*sum_pixel.green; mean_pixel.blue=gamma*sum_pixel.blue; mean_pixel.opacity=gamma*sum_pixel.opacity; distance=(mean_location.x-previous_location.x)* (mean_location.x-previous_location.x)+ (mean_location.y-previous_location.y)* (mean_location.y-previous_location.y)+ 255.0*QuantumScale*(mean_pixel.red-previous_pixel.red)* 255.0*QuantumScale*(mean_pixel.red-previous_pixel.red)+ 255.0*QuantumScale*(mean_pixel.green-previous_pixel.green)* 255.0*QuantumScale*(mean_pixel.green-previous_pixel.green)+ 255.0*QuantumScale*(mean_pixel.blue-previous_pixel.blue)* 255.0*QuantumScale*(mean_pixel.blue-previous_pixel.blue); if (distance <= 3.0) break; } q->red=ClampToQuantum(mean_pixel.red); q->green=ClampToQuantum(mean_pixel.green); q->blue=ClampToQuantum(mean_pixel.blue); q->opacity=ClampToQuantum(mean_pixel.opacity); p++; q++; } if (SyncCacheViewAuthenticPixels(mean_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_MeanShiftImage) #endif proceed=SetImageProgress(image,MeanShiftImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } mean_view=DestroyCacheView(mean_view); pixel_view=DestroyCacheView(pixel_view); image_view=DestroyCacheView(image_view); return(mean_image); }
3d25pt_var.c
/* * Order-1, 3D 25 point stencil with axis-symmetric ariable coefficients * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, m, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+8; Ny = atoi(argv[2])+8; Nz = atoi(argv[3])+8; } if (argc > 4) Nt = atoi(argv[4]); // allocate the arrays double ****A = (double ****) malloc(sizeof(double***)*2); for(m=0; m<2;m++){ A[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } double ****coef = (double ****) malloc(sizeof(double***)*13); for(m=0; m<13;m++){ coef[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ coef[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ coef[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 24; tile_size[1] = 24; tile_size[2] = 16; tile_size[3] = 512; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } for (m=0; m<13; m++) { for (i=1; i<Nz; i++) { for (j=1; j<Ny; j++) { for (k=1; k<Nx; k++) { coef[m][i][j][k] = 1.0 * (rand() % BASE); } } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 #pragma scop for (t = 0; t < Nt; t++) { for (i = 4; i < Nz-4; i++) { for (j = 4; j < Ny-4; j++) { for (k = 4; k < Nx-4; k++) { A[(t+1)%2][i][j][k] = coef[0][i][j][k] * A[(t)%2][i ][j ][k ] + coef[1][i][j][k] * (A[(t)%2][i-1][j ][k ] + A[(t)%2][i+1][j ][k ]) + coef[2][i][j][k] * (A[(t)%2][i ][j-1][k ] + A[(t)%2][i ][j+1][k ]) + coef[3][i][j][k] * (A[(t)%2][i ][j ][k-1] + A[(t)%2][i ][j ][k+1]) + coef[4][i][j][k] * (A[(t)%2][i-2][j ][k ] + A[(t)%2][i+2][j ][k ]) + coef[5][i][j][k] * (A[(t)%2][i ][j-2][k ] + A[(t)%2][i ][j+2][k ]) + coef[6][i][j][k] * (A[(t)%2][i ][j ][k-2] + A[(t)%2][i ][j ][k+2]) + coef[7][i][j][k] * (A[(t)%2][i-3][j ][k ] + A[(t)%2][i+3][j ][k ]) + coef[8][i][j][k] * (A[(t)%2][i ][j-3][k ] + A[(t)%2][i ][j+3][k ]) + coef[9][i][j][k] * (A[(t)%2][i ][j ][k-3] + A[(t)%2][i ][j ][k+3]) + coef[10][i][j][k]* (A[(t)%2][i-4][j ][k ] + A[(t)%2][i+4][j ][k ]) + coef[11][i][j][k]* (A[(t)%2][i ][j-4][k ] + A[(t)%2][i ][j+4][k ]) + coef[12][i][j][k]* (A[(t)%2][i ][j ][k-4] + A[(t)%2][i ][j ][k+4]) ; } } } } #pragma endscop gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(4, "variable axis-symmetric") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); for(m=0; m<13;m++){ for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(coef[m][i][j]); } free(coef[m][i]); } free(coef[m]); } return 0; }
gbdt.h
/*! * Original work Copyright (c) 2016 Microsoft Corporation. All rights reserved. * Modified work Copyright (c) 2020 Fabio Sigrist. All rights reserved. * Licensed under the Apache License Version 2.0 See LICENSE file in the project root for license information. */ #ifndef LIGHTGBM_BOOSTING_GBDT_H_ #define LIGHTGBM_BOOSTING_GBDT_H_ #include <LightGBM/boosting.h> #include <LightGBM/objective_function.h> #include <LightGBM/prediction_early_stop.h> #include <string> #include <algorithm> #ifndef AVOID_NOT_CRAN_COMPLIANT_CALLS #include <cstdio> #endif #include <fstream> #include <map> #include <memory> #include <mutex> #include <unordered_map> #include <utility> #include <vector> #include <LightGBM/json11.hpp> #include "score_updater.hpp" using namespace json11; namespace LightGBM { /*! * \brief GBDT algorithm implementation. including Training, prediction, bagging. */ class GBDT : public GBDTBase { public: /*! * \brief Constructor */ GBDT(); /*! * \brief Destructor */ ~GBDT(); /*! * \brief Initialization logic * \param gbdt_config Config for boosting * \param train_data Training data * \param objective_function Training objective function * \param training_metrics Training metrics */ void Init(const Config* gbdt_config, const Dataset* train_data, const ObjectiveFunction* objective_function, const std::vector<const Metric*>& training_metrics) override; /*! * \brief Merge model from other boosting object. Will insert to the front of current boosting object * \param other */ void MergeFrom(const Boosting* other) override { auto other_gbdt = reinterpret_cast<const GBDT*>(other); // tmp move to other vector auto original_models = std::move(models_); models_ = std::vector<std::unique_ptr<Tree>>(); // push model from other first for (const auto& tree : other_gbdt->models_) { auto new_tree = std::unique_ptr<Tree>(new Tree(*(tree.get()))); models_.push_back(std::move(new_tree)); } num_init_iteration_ = static_cast<int>(models_.size()) / num_tree_per_iteration_; // push model in current object for (const auto& tree : original_models) { auto new_tree = std::unique_ptr<Tree>(new Tree(*(tree.get()))); models_.push_back(std::move(new_tree)); } num_iteration_for_pred_ = static_cast<int>(models_.size()) / num_tree_per_iteration_; } void ShuffleModels(int start_iter, int end_iter) override { int total_iter = static_cast<int>(models_.size()) / num_tree_per_iteration_; start_iter = std::max(0, start_iter); if (end_iter <= 0) { end_iter = total_iter; } end_iter = std::min(total_iter, end_iter); auto original_models = std::move(models_); std::vector<int> indices(total_iter); for (int i = 0; i < total_iter; ++i) { indices[i] = i; } Random tmp_rand(17); for (int i = start_iter; i < end_iter - 1; ++i) { int j = tmp_rand.NextShort(i + 1, end_iter); std::swap(indices[i], indices[j]); } models_ = std::vector<std::unique_ptr<Tree>>(); for (int i = 0; i < total_iter; ++i) { for (int j = 0; j < num_tree_per_iteration_; ++j) { int tree_idx = indices[i] * num_tree_per_iteration_ + j; auto new_tree = std::unique_ptr<Tree>(new Tree(*(original_models[tree_idx].get()))); models_.push_back(std::move(new_tree)); } } } /*! * \brief Reset the training data * \param train_data New Training data * \param objective_function Training objective function * \param training_metrics Training metrics */ void ResetTrainingData(const Dataset* train_data, const ObjectiveFunction* objective_function, const std::vector<const Metric*>& training_metrics) override; /*! * \brief Reset Boosting Config * \param gbdt_config Config for boosting */ void ResetConfig(const Config* gbdt_config) override; /*! * \brief Adding a validation dataset * \param valid_data Validation dataset * \param valid_metrics Metrics for validation dataset */ void AddValidDataset(const Dataset* valid_data, const std::vector<const Metric*>& valid_metrics) override; /*! * \brief Perform a full training procedure * \param snapshot_freq frequence of snapshot * \param model_output_path path of model file */ void Train(int snapshot_freq, const std::string& model_output_path) override; void RefitTree(const std::vector<std::vector<int>>& tree_leaf_prediction) override; /*! * \brief Training logic * \param gradients nullptr for using default objective, otherwise use self-defined boosting * \param hessians nullptr for using default objective, otherwise use self-defined boosting * \return True if cannot train any more */ bool TrainOneIter(const score_t* gradients, const score_t* hessians) override; /*! * \brief Rollback one iteration */ void RollbackOneIter() override; /*! * \brief Get current iteration */ int GetCurrentIteration() const override { return static_cast<int>(models_.size()) / num_tree_per_iteration_; } /*! * \brief Can use early stopping for prediction or not * \return True if cannot use early stopping for prediction */ bool NeedAccuratePrediction() const override { if (objective_function_ == nullptr) { return true; } else { return objective_function_->NeedAccuratePrediction(); } } /*! * \brief Get evaluation result at data_idx data * \param data_idx 0: training data, 1: 1st validation data * \return evaluation result */ std::vector<double> GetEvalAt(int data_idx) const override; /*! * \brief Get current training score * \param out_len length of returned score * \return training score */ const double* GetTrainingScore(int64_t* out_len) override; /*! * \brief Get size of prediction at data_idx data * \param data_idx 0: training data, 1: 1st validation data * \return The size of prediction */ int64_t GetNumPredictAt(int data_idx) const override { CHECK(data_idx >= 0 && data_idx <= static_cast<int>(valid_score_updater_.size())); data_size_t num_data = train_data_->num_data(); if (data_idx > 0) { num_data = valid_score_updater_[data_idx - 1]->num_data(); } return num_data * num_class_; } /*! * \brief Get prediction result at data_idx data * \param data_idx 0: training data, 1: 1st validation data * \param result used to store prediction result, should allocate memory before call this function * \param out_len length of returned score */ void GetPredictAt(int data_idx, double* out_result, int64_t* out_len) override; /*! * \brief Get number of prediction for one data * \param num_iteration number of used iterations * \param is_pred_leaf True if predicting leaf index * \param is_pred_contrib True if predicting feature contribution * \return number of prediction */ inline int NumPredictOneRow(int num_iteration, bool is_pred_leaf, bool is_pred_contrib) const override { int num_preb_in_one_row = num_class_; if (is_pred_leaf) { int max_iteration = GetCurrentIteration(); if (num_iteration > 0) { num_preb_in_one_row *= static_cast<int>(std::min(max_iteration, num_iteration)); } else { num_preb_in_one_row *= max_iteration; } } else if (is_pred_contrib) { num_preb_in_one_row = num_tree_per_iteration_ * (max_feature_idx_ + 2); // +1 for 0-based indexing, +1 for baseline } return num_preb_in_one_row; } void PredictRaw(const double* features, double* output, const PredictionEarlyStopInstance* earlyStop) const override; void PredictRawByMap(const std::unordered_map<int, double>& features, double* output, const PredictionEarlyStopInstance* early_stop) const override; void Predict(const double* features, double* output, const PredictionEarlyStopInstance* earlyStop) const override; void PredictByMap(const std::unordered_map<int, double>& features, double* output, const PredictionEarlyStopInstance* early_stop) const override; void PredictLeafIndex(const double* features, double* output) const override; void PredictLeafIndexByMap(const std::unordered_map<int, double>& features, double* output) const override; void PredictContrib(const double* features, double* output, const PredictionEarlyStopInstance* earlyStop) const override; /*! * \brief Dump model to json format string * \param start_iteration The model will be saved start from * \param num_iteration Number of iterations that want to dump, -1 means dump all * \return Json format string of model */ std::string DumpModel(int start_iteration, int num_iteration) const override; /*! * \brief Translate model to if-else statement * \param num_iteration Number of iterations that want to translate, -1 means translate all * \return if-else format codes of model */ std::string ModelToIfElse(int num_iteration) const override; /*! * \brief Translate model to if-else statement * \param num_iteration Number of iterations that want to translate, -1 means translate all * \param filename Filename that want to save to * \return is_finish Is training finished or not */ bool SaveModelToIfElse(int num_iteration, const char* filename) const override; /*! * \brief Save model to file * \param start_iteration The model will be saved start from * \param num_iterations Number of model that want to save, -1 means save all * \param filename Filename that want to save to * \return is_finish Is training finished or not */ bool SaveModelToFile(int start_iteration, int num_iterations, const char* filename) const override; /*! * \brief Save model to string * \param start_iteration The model will be saved start from * \param num_iterations Number of model that want to save, -1 means save all * \return Non-empty string if succeeded */ std::string SaveModelToString(int start_iteration, int num_iterations) const override; /*! * \brief Restore from a serialized buffer */ bool LoadModelFromString(const char* buffer, size_t len) override; /*! * \brief Calculate feature importances * \param num_iteration Number of model that want to use for feature importance, -1 means use all * \param importance_type: 0 for split, 1 for gain * \return vector of feature_importance */ std::vector<double> FeatureImportance(int num_iteration, int importance_type) const override; /*! * \brief Get max feature index of this model * \return Max feature index of this model */ inline int MaxFeatureIdx() const override { return max_feature_idx_; } /*! * \brief Get feature names of this model * \return Feature names of this model */ inline std::vector<std::string> FeatureNames() const override { return feature_names_; } /*! * \brief Get index of label column * \return index of label column */ inline int LabelIdx() const override { return label_idx_; } /*! * \brief Get number of weak sub-models * \return Number of weak sub-models */ inline int NumberOfTotalModel() const override { return static_cast<int>(models_.size()); } /*! * \brief Get number of tree per iteration * \return number of tree per iteration */ inline int NumModelPerIteration() const override { return num_tree_per_iteration_; } /*! * \brief Get number of classes * \return Number of classes */ inline int NumberOfClasses() const override { return num_class_; } inline void InitPredict(int num_iteration, bool is_pred_contrib) override { num_iteration_for_pred_ = static_cast<int>(models_.size()) / num_tree_per_iteration_; if (num_iteration > 0) { num_iteration_for_pred_ = std::min(num_iteration, num_iteration_for_pred_); } if (is_pred_contrib) { #pragma omp parallel for schedule(static) for (int i = 0; i < static_cast<int>(models_.size()); ++i) { models_[i]->RecomputeMaxDepth(); } } } inline double GetLeafValue(int tree_idx, int leaf_idx) const override { CHECK(tree_idx >= 0 && static_cast<size_t>(tree_idx) < models_.size()); CHECK(leaf_idx >= 0 && leaf_idx < models_[tree_idx]->num_leaves()); return models_[tree_idx]->LeafOutput(leaf_idx); } inline void SetLeafValue(int tree_idx, int leaf_idx, double val) override { CHECK(tree_idx >= 0 && static_cast<size_t>(tree_idx) < models_.size()); CHECK(leaf_idx >= 0 && leaf_idx < models_[tree_idx]->num_leaves()); models_[tree_idx]->SetLeafOutput(leaf_idx, val); } /*! * \brief Get Type name of this boosting object */ const char* SubModelName() const override { return "tree"; } /*! \brief Nesterov schedule */ inline double NesterovSchedule(int iter, int momentum_schedule_version = 0, double nesterov_acc_rate = 0.5, int momentum_offset = 0) const { if (iter < momentum_offset) { return(0.); } else { if (momentum_schedule_version == 0) { return(nesterov_acc_rate); } else if (momentum_schedule_version == 1) { return(1. - (3. / (6. + iter))); } else { return(0.); } } } protected: /*! * \brief Print eval result and check early stopping */ virtual bool EvalAndCheckEarlyStopping(); /*! * \brief reset config for bagging */ void ResetBaggingConfig(const Config* config, bool is_change_dataset); /*! * \brief Implement bagging logic * \param iter Current interation */ virtual void Bagging(int iter); /*! * \brief Helper function for bagging, used for multi-threading optimization * \param start start indice of bagging * \param cnt count * \param buffer output buffer * \return count of left size */ data_size_t BaggingHelper(Random* cur_rand, data_size_t start, data_size_t cnt, data_size_t* buffer); /*! * \brief Helper function for bagging, used for multi-threading optimization, balanced sampling * \param start start indice of bagging * \param cnt count * \param buffer output buffer * \return count of left size */ data_size_t BalancedBaggingHelper(Random* cur_rand, data_size_t start, data_size_t cnt, data_size_t* buffer); /*! * \brief calculate the object function */ virtual void Boosting(); /*! * \brief updating score after tree was trained * \param tree Trained tree of this iteration * \param cur_tree_id Current tree for multiclass training */ virtual void UpdateScore(const Tree* tree, const int cur_tree_id); /*! * \brief eval results for one metric */ virtual std::vector<double> EvalOneMetric(const Metric* metric, const double* score) const; /*! * \brief Print metric result of current iteration * \param iter Current interation * \return best_msg if met early_stopping */ std::string OutputMetric(int iter); double BoostFromAverage(int class_id, bool update_scorer); /*! \brief current iteration */ int iter_; /*! \brief Pointer to training data */ const Dataset* train_data_; /*! \brief Config of gbdt */ std::unique_ptr<Config> config_; /*! \brief Tree learner, will use this class to learn trees */ std::unique_ptr<TreeLearner> tree_learner_; /*! \brief Objective function */ const ObjectiveFunction* objective_function_; /*! \brief Store and update training data's score */ std::unique_ptr<ScoreUpdater> train_score_updater_; /*! \brief Metrics for training data */ std::vector<const Metric*> training_metrics_; /*! \brief Store and update validation data's scores */ std::vector<std::unique_ptr<ScoreUpdater>> valid_score_updater_; /*! \brief Metric for validation data */ std::vector<std::vector<const Metric*>> valid_metrics_; /*! \brief Number of rounds for early stopping */ int early_stopping_round_; /*! \brief Only use first metric for early stopping */ bool es_first_metric_only_; /*! \brief Best iteration(s) for early stopping */ std::vector<std::vector<int>> best_iter_; /*! \brief Best score(s) for early stopping */ std::vector<std::vector<double>> best_score_; /*! \brief output message of best iteration */ std::vector<std::vector<std::string>> best_msg_; /*! \brief Trained models(trees) */ std::vector<std::unique_ptr<Tree>> models_; /*! \brief Max feature index of training data*/ int max_feature_idx_; /*! \brief First order derivative of training data */ std::vector<score_t> gradients_; /*! \brief Secend order derivative of training data */ std::vector<score_t> hessians_; /*! \brief Store the indices of in-bag data */ std::vector<data_size_t> bag_data_indices_; /*! \brief Number of in-bag data */ data_size_t bag_data_cnt_; /*! \brief Store the indices of in-bag data */ std::vector<data_size_t> tmp_indices_; /*! \brief Number of training data */ data_size_t num_data_; /*! \brief Number of trees per iterations */ int num_tree_per_iteration_; /*! \brief Number of class */ int num_class_; /*! \brief Index of label column */ data_size_t label_idx_; /*! \brief number of used model */ int num_iteration_for_pred_; /*! \brief Shrinkage rate for one iteration */ double shrinkage_rate_; /*! \brief Number of loaded initial models */ int num_init_iteration_; /*! \brief Feature names */ std::vector<std::string> feature_names_; std::vector<std::string> feature_infos_; /*! \brief number of threads */ int num_threads_; /*! \brief Buffer for multi-threading bagging */ std::vector<data_size_t> offsets_buf_; /*! \brief Buffer for multi-threading bagging */ std::vector<data_size_t> left_cnts_buf_; /*! \brief Buffer for multi-threading bagging */ std::vector<data_size_t> right_cnts_buf_; /*! \brief Buffer for multi-threading bagging */ std::vector<data_size_t> left_write_pos_buf_; /*! \brief Buffer for multi-threading bagging */ std::vector<data_size_t> right_write_pos_buf_; std::unique_ptr<Dataset> tmp_subset_; bool is_use_subset_; std::vector<bool> class_need_train_; bool is_constant_hessian_; std::unique_ptr<ObjectiveFunction> loaded_objective_; bool average_output_; bool need_re_bagging_; bool balanced_bagging_; std::string loaded_parameter_; std::vector<int8_t> monotone_constraints_; /*! \brief If true, Nesterov acceleration is used for boosting */ bool use_nesterov_acc_ = false; /*! \brief Acceleration rate for momentum step in Nesterov step */ double nesterov_acc_rate_ = 0.5; /*! \brief Choose the acceleration rate schedule */ int momentum_schedule_version_ = 0; /*! \brief Acceleration rate is zero before the offset number */ int momentum_offset_ = 0; /*! \brief If true, a Newton update step is done for the tree leaves after the gradient step (only releveant for GPBoost algorithm, i.e. if objective_function_->HasGPModel()==true) */ bool leaves_newton_update_ = false; Json forced_splits_json_; }; } // namespace LightGBM #endif // LightGBM_BOOSTING_GBDT_H_
GB_binop__max_uint64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__max_uint64) // A.*B function (eWiseMult): GB (_AemultB_08__max_uint64) // A.*B function (eWiseMult): GB (_AemultB_02__max_uint64) // A.*B function (eWiseMult): GB (_AemultB_04__max_uint64) // A.*B function (eWiseMult): GB (_AemultB_bitmap__max_uint64) // A*D function (colscale): GB (_AxD__max_uint64) // D*A function (rowscale): GB (_DxB__max_uint64) // C+=B function (dense accum): GB (_Cdense_accumB__max_uint64) // C+=b function (dense accum): GB (_Cdense_accumb__max_uint64) // C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__max_uint64) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__max_uint64) // C=scalar+B GB (_bind1st__max_uint64) // C=scalar+B' GB (_bind1st_tran__max_uint64) // C=A+scalar GB (_bind2nd__max_uint64) // C=A'+scalar GB (_bind2nd_tran__max_uint64) // C type: uint64_t // A type: uint64_t // A pattern? 0 // B type: uint64_t // B pattern? 0 // BinaryOp: cij = GB_IMAX (aij, bij) #define GB_ATYPE \ uint64_t #define GB_BTYPE \ uint64_t #define GB_CTYPE \ uint64_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint64_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ uint64_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint64_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = GB_IMAX (x, y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MAX || GxB_NO_UINT64 || GxB_NO_MAX_UINT64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB (_Cdense_ewise3_accum__max_uint64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__max_uint64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__max_uint64) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__max_uint64) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint64_t uint64_t bwork = (*((uint64_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__max_uint64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t *restrict Cx = (uint64_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__max_uint64) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t *restrict Cx = (uint64_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__max_uint64) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; uint64_t alpha_scalar ; uint64_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((uint64_t *) alpha_scalar_in)) ; beta_scalar = (*((uint64_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__max_uint64) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__max_uint64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__max_uint64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__max_uint64) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__max_uint64) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t *Cx = (uint64_t *) Cx_output ; uint64_t x = (*((uint64_t *) x_input)) ; uint64_t *Bx = (uint64_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; uint64_t bij = GBX (Bx, p, false) ; Cx [p] = GB_IMAX (x, bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__max_uint64) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint64_t *Cx = (uint64_t *) Cx_output ; uint64_t *Ax = (uint64_t *) Ax_input ; uint64_t y = (*((uint64_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint64_t aij = GBX (Ax, p, false) ; Cx [p] = GB_IMAX (aij, y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_IMAX (x, aij) ; \ } GrB_Info GB (_bind1st_tran__max_uint64) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint64_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t x = (*((const uint64_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint64_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_IMAX (aij, y) ; \ } GrB_Info GB (_bind2nd_tran__max_uint64) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t y = (*((const uint64_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
omp_simd_linear2.c
//Variable examples of using simd directives void foo (int n, double *a, double* b) { for (int i=0; i<n; i++) a[i]=b[i]; } void foo2 (int n, double *a, double* b) { for (int i=0; i<n; i++) a[i]=b[i]; } void foo3 (int n, double *a, double* b) { int j=0; for (int i=0; i<n; i++,j++) { a[i]=b[i]+j; } } void foo32 (int n, double *a, double* b) { int j=0, k=0; for (int i=0; i<n; i++,j++,k++) { a[i]=b[i]+j+k; } } void foo33 (int n, double *a, double* b) { int j=0, k=0; #pragma omp simd linear(j,k:1) for (int i=0; i<n; i++,j++,k++) { a[i]=b[i]+j+k; } } void fooAligned (int n, double *a, double* b) { int j=0, k=0; for (int i=0; i<n; i++,j++,k++) { a[i]=b[i]+j+k; } } void fooAligned2 (int n, double *a, double* b) { int j=0, k=0; for (int i=0; i<n; i++,j++,k++) { a[i]=b[i]+j+k; } } double work( double *a, double *b, int n ) { int i; double tmp, sum; sum = 0.0; for (i = 0; i < n; i++) { tmp = a[i] + b[i]; sum += tmp; } return sum; } #define N 45 int a[N], b[N], c[N]; void foo4(int i, double* P) { int j; for (i = 0; i < 999; ++i) { j = P[i]; } } void work2( double **a, double **b, double **c, int n ) { int i, j; double tmp; for (i = 0; i < n; i++) { for (j = 0; j < n; j++) { tmp = a[i][j] + b[i][j]; c[i][j] = tmp; } } } void work3( double **a, double **b, double **c, int n ) { int i, j; double tmp; for (i = 0; i < n; i++) { for (j = 0; j < n; j++) { tmp = a[i][j] + b[i][j]; c[i][j] = tmp; } } } // declare simd can show up several times! float bar(int * p) { *p = *p +10; return *p; } // declare simd can show up several times! float bar2(int * p) { *p = *p +10; return *p; }
GB_binop__gt_uint64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__gt_uint64) // A.*B function (eWiseMult): GB (_AemultB) // A.*B function (eWiseMult): GB (_AemultB_02__gt_uint64) // A.*B function (eWiseMult): GB (_AemultB_03__gt_uint64) // A.*B function (eWiseMult): GB (_AemultB_bitmap__gt_uint64) // A*D function (colscale): GB (_AxD__gt_uint64) // D*A function (rowscale): GB (_DxB__gt_uint64) // C+=B function (dense accum): GB (_Cdense_accumB__gt_uint64) // C+=b function (dense accum): GB (_Cdense_accumb__gt_uint64) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__gt_uint64) // C=scalar+B GB (_bind1st__gt_uint64) // C=scalar+B' GB (_bind1st_tran__gt_uint64) // C=A+scalar GB (_bind2nd__gt_uint64) // C=A'+scalar GB (_bind2nd_tran__gt_uint64) // C type: bool // A type: uint64_t // B,b type: uint64_t // BinaryOp: cij = (aij > bij) #define GB_ATYPE \ uint64_t #define GB_BTYPE \ uint64_t #define GB_CTYPE \ bool // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 0 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 0 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint64_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ uint64_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ bool t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = (x > y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_GT || GxB_NO_UINT64 || GxB_NO_GT_UINT64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__gt_uint64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__gt_uint64) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { #include "GB_dense_subassign_23_template.c" } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__gt_uint64) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { // get the scalar b for C += b, of type uint64_t uint64_t bwork = (*((uint64_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__gt_uint64) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__gt_uint64) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__gt_uint64) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__gt_uint64) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__gt_uint64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__gt_uint64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__gt_uint64) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__gt_uint64) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *Cx = (bool *) Cx_output ; uint64_t x = (*((uint64_t *) x_input)) ; uint64_t *Bx = (uint64_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; uint64_t bij = Bx [p] ; Cx [p] = (x > bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__gt_uint64) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; bool *Cx = (bool *) Cx_output ; uint64_t *Ax = (uint64_t *) Ax_input ; uint64_t y = (*((uint64_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint64_t aij = Ax [p] ; Cx [p] = (aij > y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint64_t aij = Ax [pA] ; \ Cx [pC] = (x > aij) ; \ } GrB_Info GB (_bind1st_tran__gt_uint64) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint64_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t x = (*((const uint64_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint64_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint64_t aij = Ax [pA] ; \ Cx [pC] = (aij > y) ; \ } GrB_Info GB (_bind2nd_tran__gt_uint64) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t y = (*((const uint64_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
par_strength.c
/****************************************************************************** * Copyright 1998-2019 Lawrence Livermore National Security, LLC and other * HYPRE Project Developers. See the top-level COPYRIGHT file for details. * * SPDX-License-Identifier: (Apache-2.0 OR MIT) ******************************************************************************/ /****************************************************************************** * *****************************************************************************/ /* following should be in a header file */ #include "_hypre_parcsr_ls.h" #include "hypre_hopscotch_hash.h" /*==========================================================================*/ /*==========================================================================*/ /** Generates strength matrix Notes: \begin{itemize} \item The underlying matrix storage scheme is a hypre_ParCSR matrix. \item The routine returns the following: \begin{itemize} \item S - a ParCSR matrix representing the "strength matrix". This is used in the coarsening and interpolation routines. \end{itemize} \item The graph of the "strength matrix" for A is a subgraph of the graph of A, but requires nonsymmetric storage even if A is symmetric. This is because of the directional nature of the "strengh of dependence" notion (see below). Since we are using nonsymmetric storage for A right now, this is not a problem. If we ever add the ability to store A symmetrically, then we could store the strength graph as floats instead of doubles to save space. \item This routine currently "compresses" the strength matrix. We should consider the possibility of defining this matrix to have the same "nonzero structure" as A. To do this, we could use the same A\_i and A\_j arrays, and would need only define the S\_data array. There are several pros and cons to discuss. \end{itemize} Terminology: \begin{itemize} \item Ruge's terminology: A point is "strongly connected to" $j$, or "strongly depends on" $j$, if $-a_ij >= \theta max_{l != j} \{-a_il\}$. \item Here, we retain some of this terminology, but with a more generalized notion of "strength". We also retain the "natural" graph notation for representing the directed graph of a matrix. That is, the nonzero entry $a_ij$ is represented as: i --> j. In the strength matrix, S, the entry $s_ij$ is also graphically denoted as above, and means both of the following: \begin{itemize} \item $i$ "depends on" $j$ with "strength" $s_ij$ \item $j$ "influences" $i$ with "strength" $s_ij$ \end{itemize} \end{itemize} {\bf Input files:} _hypre_parcsr_ls.h @return Error code. @param A [IN] coefficient matrix @param strength_threshold [IN] threshold parameter used to define strength @param max_row_sum [IN] parameter used to modify definition of strength for diagonal dominant matrices @param S_ptr [OUT] strength matrix @see */ /*--------------------------------------------------------------------------*/ HYPRE_Int hypre_BoomerAMGCreateSHost(hypre_ParCSRMatrix *A, HYPRE_Real strength_threshold, HYPRE_Real max_row_sum, HYPRE_Int num_functions, HYPRE_Int *dof_func, hypre_ParCSRMatrix **S_ptr) { #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_CREATES] -= hypre_MPI_Wtime(); #endif MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); hypre_ParCSRCommHandle *comm_handle; hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Real *A_offd_data = NULL; HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); HYPRE_BigInt *row_starts = hypre_ParCSRMatrixRowStarts(A); HYPRE_Int num_variables = hypre_CSRMatrixNumRows(A_diag); HYPRE_BigInt global_num_vars = hypre_ParCSRMatrixGlobalNumRows(A); HYPRE_Int num_nonzeros_diag; HYPRE_Int num_nonzeros_offd = 0; HYPRE_Int num_cols_offd = 0; hypre_ParCSRMatrix *S; hypre_CSRMatrix *S_diag; HYPRE_Int *S_diag_i; HYPRE_Int *S_diag_j; /* HYPRE_Real *S_diag_data; */ hypre_CSRMatrix *S_offd; HYPRE_Int *S_offd_i = NULL; HYPRE_Int *S_offd_j = NULL; /* HYPRE_Real *S_offd_data; */ HYPRE_Real diag, row_scale, row_sum; HYPRE_Int i, jA, jS; HYPRE_Int ierr = 0; HYPRE_Int *dof_func_offd; HYPRE_Int num_sends; HYPRE_Int *int_buf_data; HYPRE_Int index, start, j; HYPRE_Int *prefix_sum_workspace; /*-------------------------------------------------------------- * Compute a ParCSR strength matrix, S. * * For now, the "strength" of dependence/influence is defined in * the following way: i depends on j if * aij > hypre_max (k != i) aik, aii < 0 * or * aij < hypre_min (k != i) aik, aii >= 0 * Then S_ij = 1, else S_ij = 0. * * NOTE: the entries are negative initially, corresponding * to "unaccounted-for" dependence. *----------------------------------------------------------------*/ num_nonzeros_diag = A_diag_i[num_variables]; num_cols_offd = hypre_CSRMatrixNumCols(A_offd); A_offd_i = hypre_CSRMatrixI(A_offd); num_nonzeros_offd = A_offd_i[num_variables]; S = hypre_ParCSRMatrixCreate(comm, global_num_vars, global_num_vars, row_starts, row_starts, num_cols_offd, num_nonzeros_diag, num_nonzeros_offd); /* row_starts is owned by A, col_starts = row_starts */ hypre_ParCSRMatrixSetRowStartsOwner(S,0); S_diag = hypre_ParCSRMatrixDiag(S); hypre_CSRMatrixI(S_diag) = hypre_CTAlloc(HYPRE_Int, num_variables+1, HYPRE_MEMORY_HOST); hypre_CSRMatrixJ(S_diag) = hypre_CTAlloc(HYPRE_Int, num_nonzeros_diag, HYPRE_MEMORY_HOST); S_offd = hypre_ParCSRMatrixOffd(S); hypre_CSRMatrixI(S_offd) = hypre_CTAlloc(HYPRE_Int, num_variables+1, HYPRE_MEMORY_HOST); S_diag_i = hypre_CSRMatrixI(S_diag); HYPRE_Int *S_temp_diag_j = hypre_CSRMatrixJ(S_diag); S_offd_i = hypre_CSRMatrixI(S_offd); S_diag_j = hypre_TAlloc(HYPRE_Int, num_nonzeros_diag, HYPRE_MEMORY_HOST); HYPRE_Int *S_temp_offd_j = NULL; dof_func_offd = NULL; if (num_cols_offd) { A_offd_data = hypre_CSRMatrixData(A_offd); hypre_CSRMatrixJ(S_offd) = hypre_CTAlloc(HYPRE_Int, num_nonzeros_offd, HYPRE_MEMORY_HOST); S_temp_offd_j = hypre_CSRMatrixJ(S_offd); HYPRE_BigInt *col_map_offd_S = hypre_TAlloc(HYPRE_BigInt, num_cols_offd, HYPRE_MEMORY_HOST); hypre_ParCSRMatrixColMapOffd(S) = col_map_offd_S; if (num_functions > 1) { dof_func_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd, HYPRE_MEMORY_HOST); } S_offd_j = hypre_TAlloc(HYPRE_Int, num_nonzeros_offd, HYPRE_MEMORY_HOST); HYPRE_BigInt *col_map_offd_A = hypre_ParCSRMatrixColMapOffd(A); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < num_cols_offd; i++) { col_map_offd_S[i] = col_map_offd_A[i]; } } /*------------------------------------------------------------------- * Get the dof_func data for the off-processor columns *-------------------------------------------------------------------*/ if (!comm_pkg) { hypre_MatvecCommPkgCreate(A); comm_pkg = hypre_ParCSRMatrixCommPkg(A); } num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); if (num_functions > 1) { int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST); index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j=start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) { int_buf_data[index++] = dof_func[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } } comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data, dof_func_offd); hypre_ParCSRCommHandleDestroy(comm_handle); hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST); } /*HYPRE_Int prefix_sum_workspace[2*(hypre_NumThreads() + 1)];*/ prefix_sum_workspace = hypre_TAlloc(HYPRE_Int, 2*(hypre_NumThreads() + 1), HYPRE_MEMORY_HOST); /* give S same nonzero structure as A */ #ifdef HYPRE_USING_OPENMP #pragma omp parallel private(i,diag,row_scale,row_sum,jA,jS) #endif { HYPRE_Int start, stop; hypre_GetSimpleThreadPartition(&start, &stop, num_variables); HYPRE_Int jS_diag = 0, jS_offd = 0; for (i = start; i < stop; i++) { S_diag_i[i] = jS_diag; if (num_cols_offd) { S_offd_i[i] = jS_offd; } diag = A_diag_data[A_diag_i[i]]; /* compute scaling factor and row sum */ row_scale = 0.0; row_sum = diag; if (num_functions > 1) { if (diag < 0) { for (jA = A_diag_i[i]+1; jA < A_diag_i[i+1]; jA++) { if (dof_func[i] == dof_func[A_diag_j[jA]]) { row_scale = hypre_max(row_scale, A_diag_data[jA]); row_sum += A_diag_data[jA]; } } for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++) { if (dof_func[i] == dof_func_offd[A_offd_j[jA]]) { row_scale = hypre_max(row_scale, A_offd_data[jA]); row_sum += A_offd_data[jA]; } } } else { for (jA = A_diag_i[i]+1; jA < A_diag_i[i+1]; jA++) { if (dof_func[i] == dof_func[A_diag_j[jA]]) { row_scale = hypre_min(row_scale, A_diag_data[jA]); row_sum += A_diag_data[jA]; } } for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++) { if (dof_func[i] == dof_func_offd[A_offd_j[jA]]) { row_scale = hypre_min(row_scale, A_offd_data[jA]); row_sum += A_offd_data[jA]; } } } /* diag >= 0 */ } /* num_functions > 1 */ else { if (diag < 0) { for (jA = A_diag_i[i]+1; jA < A_diag_i[i+1]; jA++) { row_scale = hypre_max(row_scale, A_diag_data[jA]); row_sum += A_diag_data[jA]; } for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++) { row_scale = hypre_max(row_scale, A_offd_data[jA]); row_sum += A_offd_data[jA]; } } else { for (jA = A_diag_i[i]+1; jA < A_diag_i[i+1]; jA++) { row_scale = hypre_min(row_scale, A_diag_data[jA]); row_sum += A_diag_data[jA]; } for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++) { row_scale = hypre_min(row_scale, A_offd_data[jA]); row_sum += A_offd_data[jA]; } } /* diag >= 0*/ } /* num_functions <= 1 */ jS_diag += A_diag_i[i + 1] - A_diag_i[i] - 1; jS_offd += A_offd_i[i + 1] - A_offd_i[i]; /* compute row entries of S */ S_temp_diag_j[A_diag_i[i]] = -1; if ((fabs(row_sum) > fabs(diag)*max_row_sum) && (max_row_sum < 1.0)) { /* make all dependencies weak */ for (jA = A_diag_i[i]+1; jA < A_diag_i[i+1]; jA++) { S_temp_diag_j[jA] = -1; } jS_diag -= A_diag_i[i + 1] - (A_diag_i[i] + 1); for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++) { S_temp_offd_j[jA] = -1; } jS_offd -= A_offd_i[i + 1] - A_offd_i[i]; } else { if (num_functions > 1) { if (diag < 0) { for (jA = A_diag_i[i]+1; jA < A_diag_i[i+1]; jA++) { if (A_diag_data[jA] <= strength_threshold * row_scale || dof_func[i] != dof_func[A_diag_j[jA]]) { S_temp_diag_j[jA] = -1; --jS_diag; } else { S_temp_diag_j[jA] = A_diag_j[jA]; } } for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++) { if (A_offd_data[jA] <= strength_threshold * row_scale || dof_func[i] != dof_func_offd[A_offd_j[jA]]) { S_temp_offd_j[jA] = -1; --jS_offd; } else { S_temp_offd_j[jA] = A_offd_j[jA]; } } } else { for (jA = A_diag_i[i]+1; jA < A_diag_i[i+1]; jA++) { if (A_diag_data[jA] >= strength_threshold * row_scale || dof_func[i] != dof_func[A_diag_j[jA]]) { S_temp_diag_j[jA] = -1; --jS_diag; } else { S_temp_diag_j[jA] = A_diag_j[jA]; } } for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++) { if (A_offd_data[jA] >= strength_threshold * row_scale || dof_func[i] != dof_func_offd[A_offd_j[jA]]) { S_temp_offd_j[jA] = -1; --jS_offd; } else { S_temp_offd_j[jA] = A_offd_j[jA]; } } } /* diag >= 0 */ } /* num_functions > 1 */ else { if (diag < 0) { for (jA = A_diag_i[i]+1; jA < A_diag_i[i+1]; jA++) { if (A_diag_data[jA] <= strength_threshold * row_scale) { S_temp_diag_j[jA] = -1; --jS_diag; } else { S_temp_diag_j[jA] = A_diag_j[jA]; } } for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++) { if (A_offd_data[jA] <= strength_threshold * row_scale) { S_temp_offd_j[jA] = -1; --jS_offd; } else { S_temp_offd_j[jA] = A_offd_j[jA]; } } } else { for (jA = A_diag_i[i]+1; jA < A_diag_i[i+1]; jA++) { if (A_diag_data[jA] >= strength_threshold * row_scale) { S_temp_diag_j[jA] = -1; --jS_diag; } else { S_temp_diag_j[jA] = A_diag_j[jA]; } } for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++) { if (A_offd_data[jA] >= strength_threshold * row_scale) { S_temp_offd_j[jA] = -1; --jS_offd; } else { S_temp_offd_j[jA] = A_offd_j[jA]; } } } /* diag >= 0 */ } /* num_functions <= 1 */ } /* !((row_sum > max_row_sum) && (max_row_sum < 1.0)) */ } /* for each variable */ hypre_prefix_sum_pair(&jS_diag, S_diag_i + num_variables, &jS_offd, S_offd_i + num_variables, prefix_sum_workspace); /*-------------------------------------------------------------- * "Compress" the strength matrix. * * NOTE: S has *NO DIAGONAL ELEMENT* on any row. Caveat Emptor! * * NOTE: This "compression" section of code may be removed, and * coarsening will still be done correctly. However, the routine * that builds interpolation would have to be modified first. *----------------------------------------------------------------*/ for (i = start; i < stop; i++) { S_diag_i[i] += jS_diag; S_offd_i[i] += jS_offd; jS = S_diag_i[i]; for (jA = A_diag_i[i]; jA < A_diag_i[i+1]; jA++) { if (S_temp_diag_j[jA] > -1) { S_diag_j[jS] = S_temp_diag_j[jA]; jS++; } } jS = S_offd_i[i]; for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++) { if (S_temp_offd_j[jA] > -1) { S_offd_j[jS] = S_temp_offd_j[jA]; jS++; } } } /* for each variable */ } /* omp parallel */ hypre_CSRMatrixNumNonzeros(S_diag) = S_diag_i[num_variables]; hypre_CSRMatrixNumNonzeros(S_offd) = S_offd_i[num_variables]; hypre_CSRMatrixJ(S_diag) = S_diag_j; hypre_CSRMatrixJ(S_offd) = S_offd_j; hypre_CSRMatrixMemoryLocation(S_diag) = HYPRE_MEMORY_HOST; hypre_CSRMatrixMemoryLocation(S_offd) = HYPRE_MEMORY_HOST; hypre_ParCSRMatrixCommPkg(S) = NULL; *S_ptr = S; hypre_TFree(prefix_sum_workspace, HYPRE_MEMORY_HOST); hypre_TFree(dof_func_offd, HYPRE_MEMORY_HOST); hypre_TFree(S_temp_diag_j, HYPRE_MEMORY_HOST); hypre_TFree(S_temp_offd_j, HYPRE_MEMORY_HOST); #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_CREATES] += hypre_MPI_Wtime(); #endif return (ierr); } /* ----------------------------------------------------------------------- */ HYPRE_Int hypre_BoomerAMGCreateS(hypre_ParCSRMatrix *A, HYPRE_Real strength_threshold, HYPRE_Real max_row_sum, HYPRE_Int num_functions, HYPRE_Int *dof_func, hypre_ParCSRMatrix **S_ptr) { #if defined(HYPRE_USING_CUDA) hypre_NvtxPushRange("CreateS"); #endif HYPRE_Int ierr = 0; #if defined(HYPRE_USING_CUDA) HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy1( hypre_CSRMatrixMemoryLocation(hypre_ParCSRMatrixDiag(A)) ); if (exec == HYPRE_EXEC_DEVICE) { ierr = hypre_BoomerAMGCreateSDevice(A,strength_threshold,max_row_sum,num_functions,dof_func,S_ptr); } else #endif { ierr = hypre_BoomerAMGCreateSHost(A,strength_threshold,max_row_sum,num_functions,dof_func,S_ptr); } #if defined(HYPRE_USING_CUDA) hypre_NvtxPopRange(); #endif return ierr; } /* ----------------------------------------------------------------------- */ /* Create Strength matrix from CF marker array data. Provides a more general form to build S for specific nodes of the 'global' matrix (for example, F points or A_FF part), given the entire matrix. These nodes have the SMRK tag. Could possibly be merged with BoomerAMGCreateS() to yield a more general function. */ HYPRE_Int hypre_BoomerAMGCreateSFromCFMarker(hypre_ParCSRMatrix *A, HYPRE_Real strength_threshold, HYPRE_Real max_row_sum, HYPRE_Int *CF_marker, HYPRE_Int num_functions, HYPRE_Int *dof_func, HYPRE_Int SMRK, hypre_ParCSRMatrix **S_ptr) { #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_CREATES] -= hypre_MPI_Wtime(); #endif MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); hypre_ParCSRCommHandle *comm_handle; hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Real *A_offd_data = NULL; HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); HYPRE_BigInt *row_starts = hypre_ParCSRMatrixRowStarts(A); HYPRE_Int num_variables = hypre_CSRMatrixNumRows(A_diag); HYPRE_BigInt global_num_vars = hypre_ParCSRMatrixGlobalNumRows(A); HYPRE_Int num_nonzeros_diag; HYPRE_Int num_nonzeros_offd = 0; HYPRE_Int num_cols_offd = 0; hypre_ParCSRMatrix *S; hypre_CSRMatrix *S_diag; HYPRE_Int *S_diag_i; HYPRE_Int *S_diag_j; /* HYPRE_Real *S_diag_data; */ hypre_CSRMatrix *S_offd; HYPRE_Int *S_offd_i = NULL; HYPRE_Int *S_offd_j = NULL; /* HYPRE_Real *S_offd_data; */ HYPRE_Int *dof_func_offd = NULL; HYPRE_Real diag, row_scale, row_sum; HYPRE_Int i, jj, jA, jS; HYPRE_Int num_sends, start, j, index; HYPRE_Int *int_buf_data; HYPRE_Int ierr = 0; HYPRE_Int *CF_marker_offd = NULL; HYPRE_Int *prefix_sum_workspace; HYPRE_Int my_id; /*-------------------------------------------------------------- * Compute a ParCSR strength matrix, S. * * For now, the "strength" of dependence/influence is defined in * the following way: i depends on j if * aij > hypre_max (k != i) aik, aii < 0 * or * aij < hypre_min (k != i) aik, aii >= 0 * Then S_ij = 1, else S_ij = 0. * * NOTE: the entries are negative initially, corresponding * to "unaccounted-for" dependence. *----------------------------------------------------------------*/ hypre_MPI_Comm_rank(comm, &my_id); num_nonzeros_diag = A_diag_i[num_variables]; num_cols_offd = hypre_CSRMatrixNumCols(A_offd); A_offd_i = hypre_CSRMatrixI(A_offd); num_nonzeros_offd = A_offd_i[num_variables]; S = hypre_ParCSRMatrixCreate(comm, global_num_vars, global_num_vars, row_starts, row_starts, num_cols_offd, num_nonzeros_diag, num_nonzeros_offd); /* row_starts is owned by A, col_starts = row_starts */ hypre_ParCSRMatrixSetRowStartsOwner(S,0); S_diag = hypre_ParCSRMatrixDiag(S); hypre_CSRMatrixI(S_diag) = hypre_CTAlloc(HYPRE_Int, num_variables+1, HYPRE_MEMORY_HOST); hypre_CSRMatrixJ(S_diag) = hypre_CTAlloc(HYPRE_Int, num_nonzeros_diag, HYPRE_MEMORY_HOST); S_offd = hypre_ParCSRMatrixOffd(S); hypre_CSRMatrixI(S_offd) = hypre_CTAlloc(HYPRE_Int, num_variables+1, HYPRE_MEMORY_HOST); S_diag_i = hypre_CSRMatrixI(S_diag); HYPRE_Int *S_temp_diag_j = hypre_CSRMatrixJ(S_diag); S_offd_i = hypre_CSRMatrixI(S_offd); S_diag_j = hypre_CTAlloc(HYPRE_Int, num_nonzeros_diag, HYPRE_MEMORY_HOST); HYPRE_Int *S_temp_offd_j = NULL; if (num_cols_offd) { A_offd_data = hypre_CSRMatrixData(A_offd); hypre_CSRMatrixJ(S_offd) = hypre_CTAlloc(HYPRE_Int, num_nonzeros_offd, HYPRE_MEMORY_HOST); S_temp_offd_j = hypre_CSRMatrixJ(S_offd); HYPRE_BigInt *col_map_offd_S = hypre_TAlloc(HYPRE_BigInt, num_cols_offd, HYPRE_MEMORY_HOST); hypre_ParCSRMatrixColMapOffd(S) = col_map_offd_S; if (num_functions > 1) { dof_func_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd, HYPRE_MEMORY_HOST); } S_offd_j = hypre_CTAlloc(HYPRE_Int, num_nonzeros_offd, HYPRE_MEMORY_HOST); HYPRE_BigInt *col_map_offd_A = hypre_ParCSRMatrixColMapOffd(A); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < num_cols_offd; i++) { col_map_offd_S[i] = col_map_offd_A[i]; } } /*------------------------------------------------------------------- * Get the dof_func data for the off-processor columns *-------------------------------------------------------------------*/ if (!comm_pkg) { hypre_MatvecCommPkgCreate(A); comm_pkg = hypre_ParCSRMatrixCommPkg(A); } num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); if (num_functions > 1) { int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST); index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j=start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) int_buf_data[index++] = dof_func[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data, dof_func_offd); hypre_ParCSRCommHandleDestroy(comm_handle); hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST); } /*------------------------------------------------------------------- * Get the CF_marker data for the off-processor columns *-------------------------------------------------------------------*/ if (num_cols_offd) CF_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd, HYPRE_MEMORY_HOST); if (!comm_pkg) { hypre_MatvecCommPkgCreate(A); comm_pkg = hypre_ParCSRMatrixCommPkg(A); } num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST); index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) int_buf_data[index++] = CF_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data, CF_marker_offd); hypre_ParCSRCommHandleDestroy(comm_handle); hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST); /*HYPRE_Int prefix_sum_workspace[2*(hypre_NumThreads() + 1)];*/ prefix_sum_workspace = hypre_TAlloc(HYPRE_Int, 2*(hypre_NumThreads() + 1), HYPRE_MEMORY_HOST); /* give S same nonzero structure as A */ #ifdef HYPRE_USING_OPENMP #pragma omp parallel private(i,diag,row_scale,row_sum,jA,jS) #endif { HYPRE_Int start, stop; hypre_GetSimpleThreadPartition(&start, &stop, num_variables); HYPRE_Int jS_diag = 0, jS_offd = 0; for (i = start; i < stop; i++) { if (CF_marker[i] == SMRK) { S_diag_i[i] = jS_diag; if (num_cols_offd) { S_offd_i[i] = jS_offd; } diag = A_diag_data[A_diag_i[i]]; /* compute scaling factor and row sum */ row_scale = 0.0; row_sum = diag; if (num_functions > 1) { if (diag < 0) { for (jA = A_diag_i[i]+1; jA < A_diag_i[i+1]; jA++) { jj = A_diag_j[jA]; if ((CF_marker[jj] == SMRK) && (dof_func[i] == dof_func[jj])) { row_scale = hypre_max(row_scale, A_diag_data[jA]); row_sum += A_diag_data[jA]; } } for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++) { jj = A_offd_j[jA]; if ((CF_marker_offd[jj] == SMRK) && (dof_func[i] == dof_func_offd[jj])) { row_scale = hypre_max(row_scale, A_offd_data[jA]); row_sum += A_offd_data[jA]; } } } /* diag < 0 */ else { for (jA = A_diag_i[i]+1; jA < A_diag_i[i+1]; jA++) { jj = A_diag_j[jA]; if ((CF_marker[jj] == SMRK) && (dof_func[i] == dof_func[jj])) { row_scale = hypre_min(row_scale, A_diag_data[jA]); row_sum += A_diag_data[jA]; } } for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++) { jj = A_offd_j[jA]; if ((CF_marker_offd[jj] == SMRK) && (dof_func[i] == dof_func_offd[A_offd_j[jA]])) { row_scale = hypre_min(row_scale, A_offd_data[jA]); row_sum += A_offd_data[jA]; } } } /* diag >= 0 */ } /* num_functions > 1 */ else { if (diag < 0) { for (jA = A_diag_i[i]+1; jA < A_diag_i[i+1]; jA++) { jj = A_diag_j[jA]; if (CF_marker[jj] == SMRK) { row_scale = hypre_max(row_scale, A_diag_data[jA]); row_sum += A_diag_data[jA]; } } for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++) { jj = A_offd_j[jA]; if (CF_marker_offd[jj] == SMRK) { row_scale = hypre_max(row_scale, A_offd_data[jA]); row_sum += A_offd_data[jA]; } } } /* diag < 0 */ else { for (jA = A_diag_i[i]+1; jA < A_diag_i[i+1]; jA++) { jj = A_diag_j[jA]; if (CF_marker[jj] == SMRK) { row_scale = hypre_min(row_scale, A_diag_data[jA]); row_sum += A_diag_data[jA]; } } for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++) { jj = A_offd_j[jA]; if (CF_marker_offd[jj] == SMRK) { row_scale = hypre_min(row_scale, A_offd_data[jA]); row_sum += A_offd_data[jA]; } } } /* diag >= 0*/ } /* num_functions <=1 */ /* compute row entries of S */ S_temp_diag_j[A_diag_i[i]] = -1; if ((fabs(row_sum) > fabs(diag)*max_row_sum) && (max_row_sum < 1.0)) { /* make all dependencies weak */ for (jA = A_diag_i[i]+1; jA < A_diag_i[i+1]; jA++) { S_temp_diag_j[jA] = -1; } for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++) { S_temp_offd_j[jA] = -1; } } else { if (num_functions > 1) { if (diag < 0) { for (jA = A_diag_i[i]+1; jA < A_diag_i[i+1]; jA++) { jj = A_diag_j[jA]; if (CF_marker[jj] == SMRK) { if ((A_diag_data[jA] <= strength_threshold * row_scale) || (dof_func[i] != dof_func[jj])) { S_temp_diag_j[jA] = -1; } else { S_temp_diag_j[jA] = jj; ++jS_diag; } } else { S_temp_diag_j[jA] = -1; } } for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++) { jj = A_offd_j[jA]; if (CF_marker_offd[jj] == SMRK) { if ((A_offd_data[jA] <= strength_threshold * row_scale) || (dof_func[i] != dof_func_offd[jj])) { S_temp_offd_j[jA] = -1; } else { S_temp_offd_j[jA] = jj; ++jS_offd; } } else { S_temp_offd_j[jA] = -1; } } } /* end diag < 0 */ else { for (jA = A_diag_i[i]+1; jA < A_diag_i[i+1]; jA++) { jj = A_diag_j[jA]; if (CF_marker[jj] == SMRK) { if ((A_diag_data[jA] >= strength_threshold * row_scale) || (dof_func[i] != dof_func[jj])) { S_temp_diag_j[jA] = -1; } else { S_temp_diag_j[jA] = jj; ++jS_diag; } } else { S_temp_diag_j[jA] = -1; } } for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++) { jj = A_offd_j[jA]; if (CF_marker_offd[jj] == SMRK) { if ((A_offd_data[jA] >= strength_threshold * row_scale) || (dof_func[i] != dof_func_offd[jj])) { S_temp_offd_j[jA] = -1; } else { S_temp_offd_j[jA] = jj; ++jS_offd; } } else { S_temp_offd_j[jA] = -1; } } } /* diag >= 0 */ } /* num_functions > 1 */ else { if (diag < 0) { for (jA = A_diag_i[i]+1; jA < A_diag_i[i+1]; jA++) { jj = A_diag_j[jA]; if (CF_marker[jj] == SMRK) { if (A_diag_data[jA] <= strength_threshold * row_scale) { S_temp_diag_j[jA] = -1; } else { S_temp_diag_j[jA] = jj; ++jS_diag; } } else { S_temp_diag_j[jA] = -1; } } for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++) { jj = A_offd_j[jA]; if (CF_marker_offd[jj] == SMRK) { if (A_offd_data[jA] <= strength_threshold * row_scale) { S_temp_offd_j[jA] = -1; } else { S_temp_offd_j[jA] = jj; ++jS_offd; } } else { S_temp_offd_j[jA] = -1; } } } /* diag < 0 */ else { for (jA = A_diag_i[i]+1; jA < A_diag_i[i+1]; jA++) { jj = A_diag_j[jA]; if (CF_marker[jj] == SMRK) { if (A_diag_data[jA] >= strength_threshold * row_scale) { S_temp_diag_j[jA] = -1; } else { S_temp_diag_j[jA] = jj; ++jS_diag; } } else { S_temp_diag_j[jA] = -1; } } for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++) { jj = A_offd_j[jA]; if (CF_marker_offd[jj] == SMRK) { if (A_offd_data[jA] >= strength_threshold * row_scale) { S_temp_offd_j[jA] = -1; } else { S_temp_offd_j[jA] = jj; ++jS_offd; } } else { S_temp_offd_j[jA] = -1; } } } /* diag >= 0 */ } /* num_functions <=1 */ } /* !((row_sum > max_row_sum) && (max_row_sum < 1.0)) */ } /* CF_marker == SMRK */ else { S_diag_i[i] = jS_diag; if (num_cols_offd) { S_offd_i[i] = jS_offd; } for (jA = A_diag_i[i]; jA < A_diag_i[i+1]; jA++) { S_temp_diag_j[jA] = -1; } for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++) { S_temp_offd_j[jA] = -1; } } /* CF_marker != SMRK */ } /* for each variable */ hypre_prefix_sum_pair(&jS_diag, S_diag_i + num_variables, &jS_offd, S_offd_i + num_variables, prefix_sum_workspace); /*-------------------------------------------------------------- * "Compress" the strength matrix. * * NOTE: S has *NO DIAGONAL ELEMENT* on any row. Caveat Emptor! * * NOTE: This "compression" section of code may be removed, and * coarsening will still be done correctly. However, the routine * that builds interpolation would have to be modified first. *----------------------------------------------------------------*/ for (i = start; i < stop; i++) { S_diag_i[i] += jS_diag; S_offd_i[i] += jS_offd; jS = S_diag_i[i]; for (jA = A_diag_i[i]; jA < A_diag_i[i+1]; jA++) { if (S_temp_diag_j[jA] > -1) { S_diag_j[jS] = S_temp_diag_j[jA]; jS++; } } jS = S_offd_i[i]; for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++) { if (S_temp_offd_j[jA] > -1) { S_offd_j[jS] = S_temp_offd_j[jA]; jS++; } } } /* for each variable */ } /* omp parallel */ hypre_CSRMatrixNumNonzeros(S_diag) = S_diag_i[num_variables]; hypre_CSRMatrixNumNonzeros(S_offd) = S_offd_i[num_variables]; hypre_CSRMatrixJ(S_diag) = S_diag_j; hypre_CSRMatrixJ(S_offd) = S_offd_j; hypre_CSRMatrixMemoryLocation(S_diag) = HYPRE_MEMORY_HOST; hypre_CSRMatrixMemoryLocation(S_offd) = HYPRE_MEMORY_HOST; hypre_ParCSRMatrixCommPkg(S) = NULL; *S_ptr = S; hypre_TFree(prefix_sum_workspace, HYPRE_MEMORY_HOST); hypre_TFree(S_temp_diag_j, HYPRE_MEMORY_HOST); hypre_TFree(S_temp_offd_j, HYPRE_MEMORY_HOST); hypre_TFree(dof_func_offd, HYPRE_MEMORY_HOST); hypre_TFree(CF_marker_offd, HYPRE_MEMORY_HOST); #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_CREATES] += hypre_MPI_Wtime(); #endif return (ierr); } /*==========================================================================*/ /*==========================================================================*/ /** Generates strength matrix Notes: \begin{itemize} \item The underlying matrix storage scheme is a hypre_ParCSR matrix. \item The routine returns the following: \begin{itemize} \item S - a ParCSR matrix representing the "strength matrix". This is used in the coarsening and interpolation routines. \end{itemize} \item The graph of the "strength matrix" for A is a subgraph of the graph of A, but requires nonsymmetric storage even if A is symmetric. This is because of the directional nature of the "strengh of dependence" notion (see below). Since we are using nonsymmetric storage for A right now, this is not a problem. If we ever add the ability to store A symmetrically, then we could store the strength graph as floats instead of doubles to save space. \item This routine currently "compresses" the strength matrix. We should consider the possibility of defining this matrix to have the same "nonzero structure" as A. To do this, we could use the same A\_i and A\_j arrays, and would need only define the S\_data array. There are several pros and cons to discuss. \end{itemize} Terminology: \begin{itemize} \item Ruge's terminology: A point is "strongly connected to" $j$, or "strongly depends on" $j$, if $|a_ij| >= \theta max_{l != j} |a_il|}$. \item Here, we retain some of this terminology, but with a more generalized notion of "strength". We also retain the "natural" graph notation for representing the directed graph of a matrix. That is, the nonzero entry $a_ij$ is represented as: i --> j. In the strength matrix, S, the entry $s_ij$ is also graphically denoted as above, and means both of the following: \begin{itemize} \item $i$ "depends on" $j$ with "strength" $s_ij$ \item $j$ "influences" $i$ with "strength" $s_ij$ \end{itemize} \end{itemize} {\bf Input files:} _hypre_parcsr_ls.h @return Error code. @param A [IN] coefficient matrix @param strength_threshold [IN] threshold parameter used to define strength @param max_row_sum [IN] parameter used to modify definition of strength for diagonal dominant matrices @param S_ptr [OUT] strength matrix @see */ /*--------------------------------------------------------------------------*/ HYPRE_Int hypre_BoomerAMGCreateSabs(hypre_ParCSRMatrix *A, HYPRE_Real strength_threshold, HYPRE_Real max_row_sum, HYPRE_Int num_functions, HYPRE_Int *dof_func, hypre_ParCSRMatrix **S_ptr) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); hypre_ParCSRCommHandle *comm_handle; hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Real *A_offd_data = NULL; HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); HYPRE_BigInt *row_starts = hypre_ParCSRMatrixRowStarts(A); HYPRE_Int num_variables = hypre_CSRMatrixNumRows(A_diag); HYPRE_BigInt global_num_vars = hypre_ParCSRMatrixGlobalNumRows(A); HYPRE_Int num_nonzeros_diag; HYPRE_Int num_nonzeros_offd = 0; HYPRE_Int num_cols_offd = 0; hypre_ParCSRMatrix *S; hypre_CSRMatrix *S_diag; HYPRE_Int *S_diag_i; HYPRE_Int *S_diag_j; /* HYPRE_Real *S_diag_data; */ hypre_CSRMatrix *S_offd; HYPRE_Int *S_offd_i = NULL; HYPRE_Int *S_offd_j = NULL; /* HYPRE_Real *S_offd_data; */ HYPRE_Real diag, row_scale, row_sum; HYPRE_Int i, jA, jS; HYPRE_Int ierr = 0; HYPRE_Int *dof_func_offd; HYPRE_Int num_sends; HYPRE_Int *int_buf_data; HYPRE_Int index, start, j; /*-------------------------------------------------------------- * Compute a ParCSR strength matrix, S. * * For now, the "strength" of dependence/influence is defined in * the following way: i depends on j if * aij > hypre_max (k != i) aik, aii < 0 * or * aij < hypre_min (k != i) aik, aii >= 0 * Then S_ij = 1, else S_ij = 0. * * NOTE: the entries are negative initially, corresponding * to "unaccounted-for" dependence. *----------------------------------------------------------------*/ num_nonzeros_diag = A_diag_i[num_variables]; num_cols_offd = hypre_CSRMatrixNumCols(A_offd); A_offd_i = hypre_CSRMatrixI(A_offd); num_nonzeros_offd = A_offd_i[num_variables]; S = hypre_ParCSRMatrixCreate(comm, global_num_vars, global_num_vars, row_starts, row_starts, num_cols_offd, num_nonzeros_diag, num_nonzeros_offd); /* row_starts is owned by A, col_starts = row_starts */ hypre_ParCSRMatrixSetRowStartsOwner(S,0); S_diag = hypre_ParCSRMatrixDiag(S); hypre_CSRMatrixI(S_diag) = hypre_CTAlloc(HYPRE_Int, num_variables+1, HYPRE_MEMORY_HOST); hypre_CSRMatrixJ(S_diag) = hypre_CTAlloc(HYPRE_Int, num_nonzeros_diag, HYPRE_MEMORY_HOST); S_offd = hypre_ParCSRMatrixOffd(S); hypre_CSRMatrixI(S_offd) = hypre_CTAlloc(HYPRE_Int, num_variables+1, HYPRE_MEMORY_HOST); S_diag_i = hypre_CSRMatrixI(S_diag); S_diag_j = hypre_CSRMatrixJ(S_diag); S_offd_i = hypre_CSRMatrixI(S_offd); hypre_CSRMatrixMemoryLocation(S_diag) = HYPRE_MEMORY_HOST; hypre_CSRMatrixMemoryLocation(S_offd) = HYPRE_MEMORY_HOST; dof_func_offd = NULL; if (num_cols_offd) { A_offd_data = hypre_CSRMatrixData(A_offd); hypre_CSRMatrixJ(S_offd) = hypre_CTAlloc(HYPRE_Int, num_nonzeros_offd, HYPRE_MEMORY_HOST); S_offd_j = hypre_CSRMatrixJ(S_offd); hypre_ParCSRMatrixColMapOffd(S) = hypre_CTAlloc(HYPRE_BigInt, num_cols_offd, HYPRE_MEMORY_HOST); if (num_functions > 1) dof_func_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd, HYPRE_MEMORY_HOST); } /*------------------------------------------------------------------- * Get the dof_func data for the off-processor columns *-------------------------------------------------------------------*/ if (!comm_pkg) { hypre_MatvecCommPkgCreate(A); comm_pkg = hypre_ParCSRMatrixCommPkg(A); } num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); if (num_functions > 1) { int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST); index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j=start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) { int_buf_data[index++] = dof_func[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } } comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data, dof_func_offd); hypre_ParCSRCommHandleDestroy(comm_handle); hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST); } /* give S same nonzero structure as A */ hypre_ParCSRMatrixCopy(A,S,0); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,diag,row_scale,row_sum,jA) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < num_variables; i++) { diag = A_diag_data[A_diag_i[i]]; /* compute scaling factor and row sum */ row_scale = 0.0; row_sum = fabs(diag); if (num_functions > 1) { for (jA = A_diag_i[i]+1; jA < A_diag_i[i+1]; jA++) { if (dof_func[i] == dof_func[A_diag_j[jA]]) { row_scale = hypre_max(row_scale, fabs(A_diag_data[jA])); row_sum += fabs(A_diag_data[jA]); } } for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++) { if (dof_func[i] == dof_func_offd[A_offd_j[jA]]) { row_scale = hypre_max(row_scale, fabs(A_offd_data[jA])); row_sum += fabs(A_offd_data[jA]); } } } else { for (jA = A_diag_i[i]+1; jA < A_diag_i[i+1]; jA++) { row_scale = hypre_max(row_scale, fabs(A_diag_data[jA])); row_sum += fabs(A_diag_data[jA]); } for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++) { row_scale = hypre_max(row_scale, fabs(A_offd_data[jA])); row_sum += fabs(A_offd_data[jA]); } } /* compute row entries of S */ S_diag_j[A_diag_i[i]] = -1; /* reject diag entry */ if ( fabs(row_sum) < fabs(diag)*(2.0-max_row_sum) && max_row_sum < 1.0 ) { /* make all dependencies weak */ for (jA = A_diag_i[i]+1; jA < A_diag_i[i+1]; jA++) { S_diag_j[jA] = -1; } for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++) { S_offd_j[jA] = -1; } } else { if (num_functions > 1) { for (jA = A_diag_i[i]+1; jA < A_diag_i[i+1]; jA++) { if (fabs(A_diag_data[jA]) <= strength_threshold * row_scale || dof_func[i] != dof_func[A_diag_j[jA]]) { S_diag_j[jA] = -1; } } for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++) { if (fabs(A_offd_data[jA]) <= strength_threshold * row_scale || dof_func[i] != dof_func_offd[A_offd_j[jA]]) { S_offd_j[jA] = -1; } } } else { for (jA = A_diag_i[i]+1; jA < A_diag_i[i+1]; jA++) { if (fabs(A_diag_data[jA]) <= strength_threshold * row_scale) { S_diag_j[jA] = -1; } } for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++) { if (fabs(A_offd_data[jA]) <= strength_threshold * row_scale) { S_offd_j[jA] = -1; } } } } } /*-------------------------------------------------------------- * "Compress" the strength matrix. * * NOTE: S has *NO DIAGONAL ELEMENT* on any row. Caveat Emptor! * * NOTE: This "compression" section of code may be removed, and * coarsening will still be done correctly. However, the routine * that builds interpolation would have to be modified first. *----------------------------------------------------------------*/ /* RDF: not sure if able to thread this loop */ jS = 0; for (i = 0; i < num_variables; i++) { S_diag_i[i] = jS; for (jA = A_diag_i[i]; jA < A_diag_i[i+1]; jA++) { if (S_diag_j[jA] > -1) { S_diag_j[jS] = S_diag_j[jA]; jS++; } } } S_diag_i[num_variables] = jS; hypre_CSRMatrixNumNonzeros(S_diag) = jS; /* RDF: not sure if able to thread this loop */ jS = 0; for (i = 0; i < num_variables; i++) { S_offd_i[i] = jS; for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++) { if (S_offd_j[jA] > -1) { S_offd_j[jS] = S_offd_j[jA]; jS++; } } } S_offd_i[num_variables] = jS; hypre_CSRMatrixNumNonzeros(S_offd) = jS; hypre_ParCSRMatrixCommPkg(S) = NULL; *S_ptr = S; hypre_TFree(dof_func_offd, HYPRE_MEMORY_HOST); return (ierr); } /*--------------------------------------------------------------------------*/ HYPRE_Int hypre_BoomerAMGCreateSCommPkg(hypre_ParCSRMatrix *A, hypre_ParCSRMatrix *S, HYPRE_Int **col_offd_S_to_A_ptr) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_MPI_Status *status; hypre_MPI_Request *requests; hypre_ParCSRCommPkg *comm_pkg_A = hypre_ParCSRMatrixCommPkg(A); hypre_ParCSRCommPkg *comm_pkg_S; hypre_ParCSRCommHandle *comm_handle; hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_BigInt *col_map_offd_A = hypre_ParCSRMatrixColMapOffd(A); hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S); hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S); HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd); HYPRE_Int *S_offd_j = hypre_CSRMatrixJ(S_offd); HYPRE_BigInt *col_map_offd_S = hypre_ParCSRMatrixColMapOffd(S); HYPRE_Int *recv_procs_A = hypre_ParCSRCommPkgRecvProcs(comm_pkg_A); HYPRE_Int *recv_vec_starts_A = hypre_ParCSRCommPkgRecvVecStarts(comm_pkg_A); HYPRE_Int *send_procs_A = hypre_ParCSRCommPkgSendProcs(comm_pkg_A); HYPRE_Int *send_map_starts_A = hypre_ParCSRCommPkgSendMapStarts(comm_pkg_A); HYPRE_Int *recv_procs_S; HYPRE_Int *recv_vec_starts_S; HYPRE_Int *send_procs_S; HYPRE_Int *send_map_starts_S; HYPRE_Int *send_map_elmts_S = NULL; HYPRE_BigInt *big_send_map_elmts_S = NULL; HYPRE_Int *col_offd_S_to_A; HYPRE_Int *S_marker; HYPRE_Int *send_change; HYPRE_Int *recv_change; HYPRE_Int num_variables = hypre_CSRMatrixNumRows(S_diag); HYPRE_Int num_cols_offd_A = hypre_CSRMatrixNumCols(A_offd); HYPRE_Int num_cols_offd_S; HYPRE_Int i, j, jcol; HYPRE_Int proc, cnt, proc_cnt, total_nz; HYPRE_BigInt first_row; HYPRE_Int ierr = 0; HYPRE_Int num_sends_A = hypre_ParCSRCommPkgNumSends(comm_pkg_A); HYPRE_Int num_recvs_A = hypre_ParCSRCommPkgNumRecvs(comm_pkg_A); HYPRE_Int num_sends_S; HYPRE_Int num_recvs_S; HYPRE_Int num_nonzeros; num_nonzeros = S_offd_i[num_variables]; S_marker = NULL; if (num_cols_offd_A) S_marker = hypre_CTAlloc(HYPRE_Int, num_cols_offd_A, HYPRE_MEMORY_HOST); for (i=0; i < num_cols_offd_A; i++) S_marker[i] = -1; for (i=0; i < num_nonzeros; i++) { jcol = S_offd_j[i]; S_marker[jcol] = 0; } proc = 0; proc_cnt = 0; cnt = 0; num_recvs_S = 0; for (i=0; i < num_recvs_A; i++) { for (j=recv_vec_starts_A[i]; j < recv_vec_starts_A[i+1]; j++) { if (!S_marker[j]) { S_marker[j] = cnt; cnt++; proc = 1; } } if (proc) {num_recvs_S++; proc = 0;} } num_cols_offd_S = cnt; recv_change = NULL; recv_procs_S = NULL; send_change = NULL; if (col_map_offd_S) hypre_TFree(col_map_offd_S, HYPRE_MEMORY_HOST); col_map_offd_S = NULL; col_offd_S_to_A = NULL; if (num_recvs_A) recv_change = hypre_CTAlloc(HYPRE_Int, num_recvs_A, HYPRE_MEMORY_HOST); if (num_sends_A) send_change = hypre_CTAlloc(HYPRE_Int, num_sends_A, HYPRE_MEMORY_HOST); if (num_recvs_S) recv_procs_S = hypre_CTAlloc(HYPRE_Int, num_recvs_S, HYPRE_MEMORY_HOST); recv_vec_starts_S = hypre_CTAlloc(HYPRE_Int, num_recvs_S+1, HYPRE_MEMORY_HOST); if (num_cols_offd_S) { col_map_offd_S = hypre_CTAlloc(HYPRE_BigInt, num_cols_offd_S, HYPRE_MEMORY_HOST); col_offd_S_to_A = hypre_CTAlloc(HYPRE_Int, num_cols_offd_S, HYPRE_MEMORY_HOST); } if (num_cols_offd_S < num_cols_offd_A) { for (i=0; i < num_nonzeros; i++) { jcol = S_offd_j[i]; S_offd_j[i] = S_marker[jcol]; } proc = 0; proc_cnt = 0; cnt = 0; recv_vec_starts_S[0] = 0; for (i=0; i < num_recvs_A; i++) { for (j=recv_vec_starts_A[i]; j < recv_vec_starts_A[i+1]; j++) { if (S_marker[j] != -1) { col_map_offd_S[cnt] = col_map_offd_A[j]; col_offd_S_to_A[cnt++] = j; proc = 1; } } recv_change[i] = j-cnt-recv_vec_starts_A[i]+recv_vec_starts_S[proc_cnt]; if (proc) { recv_procs_S[proc_cnt++] = recv_procs_A[i]; recv_vec_starts_S[proc_cnt] = cnt; proc = 0; } } } else { for (i=0; i < num_recvs_A; i++) { for (j=recv_vec_starts_A[i]; j < recv_vec_starts_A[i+1]; j++) { col_map_offd_S[j] = col_map_offd_A[j]; col_offd_S_to_A[j] = j; } recv_procs_S[i] = recv_procs_A[i]; recv_vec_starts_S[i] = recv_vec_starts_A[i]; } recv_vec_starts_S[num_recvs_A] = recv_vec_starts_A[num_recvs_A]; } requests = hypre_CTAlloc(hypre_MPI_Request, num_sends_A+num_recvs_A, HYPRE_MEMORY_HOST); j=0; for (i=0; i < num_sends_A; i++) hypre_MPI_Irecv(&send_change[i],1,HYPRE_MPI_INT,send_procs_A[i], 0,comm,&requests[j++]); for (i=0; i < num_recvs_A; i++) hypre_MPI_Isend(&recv_change[i],1,HYPRE_MPI_INT,recv_procs_A[i], 0,comm,&requests[j++]); status = hypre_CTAlloc(hypre_MPI_Status, j, HYPRE_MEMORY_HOST); hypre_MPI_Waitall(j,requests,status); hypre_TFree(status, HYPRE_MEMORY_HOST); hypre_TFree(requests, HYPRE_MEMORY_HOST); num_sends_S = 0; total_nz = send_map_starts_A[num_sends_A]; for (i=0; i < num_sends_A; i++) { if (send_change[i]) { if ((send_map_starts_A[i+1]-send_map_starts_A[i]) > send_change[i]) num_sends_S++; } else num_sends_S++; total_nz -= send_change[i]; } send_procs_S = NULL; if (num_sends_S) send_procs_S = hypre_CTAlloc(HYPRE_Int, num_sends_S, HYPRE_MEMORY_HOST); send_map_starts_S = hypre_CTAlloc(HYPRE_Int, num_sends_S+1, HYPRE_MEMORY_HOST); send_map_elmts_S = NULL; if (total_nz) { send_map_elmts_S = hypre_CTAlloc(HYPRE_Int, total_nz, HYPRE_MEMORY_HOST); big_send_map_elmts_S = hypre_CTAlloc(HYPRE_BigInt, total_nz, HYPRE_MEMORY_HOST); } proc = 0; proc_cnt = 0; for (i=0; i < num_sends_A; i++) { cnt = send_map_starts_A[i+1]-send_map_starts_A[i]-send_change[i]; if (cnt) { send_procs_S[proc_cnt++] = send_procs_A[i]; send_map_starts_S[proc_cnt] = send_map_starts_S[proc_cnt-1]+cnt; } } comm_pkg_S = hypre_CTAlloc(hypre_ParCSRCommPkg, 1, HYPRE_MEMORY_HOST); hypre_ParCSRCommPkgComm(comm_pkg_S) = comm; hypre_ParCSRCommPkgNumRecvs(comm_pkg_S) = num_recvs_S; hypre_ParCSRCommPkgRecvProcs(comm_pkg_S) = recv_procs_S; hypre_ParCSRCommPkgRecvVecStarts(comm_pkg_S) = recv_vec_starts_S; hypre_ParCSRCommPkgNumSends(comm_pkg_S) = num_sends_S; hypre_ParCSRCommPkgSendProcs(comm_pkg_S) = send_procs_S; hypre_ParCSRCommPkgSendMapStarts(comm_pkg_S) = send_map_starts_S; comm_handle = hypre_ParCSRCommHandleCreate(22, comm_pkg_S, col_map_offd_S, big_send_map_elmts_S); hypre_ParCSRCommHandleDestroy(comm_handle); first_row = hypre_ParCSRMatrixFirstRowIndex(A); if (first_row) for (i=0; i < send_map_starts_S[num_sends_S]; i++) send_map_elmts_S[i] = (HYPRE_Int)(big_send_map_elmts_S[i]-first_row); hypre_ParCSRCommPkgSendMapElmts(comm_pkg_S) = send_map_elmts_S; hypre_ParCSRMatrixCommPkg(S) = comm_pkg_S; hypre_ParCSRMatrixColMapOffd(S) = col_map_offd_S; hypre_CSRMatrixNumCols(S_offd) = num_cols_offd_S; hypre_TFree(S_marker, HYPRE_MEMORY_HOST); hypre_TFree(send_change, HYPRE_MEMORY_HOST); hypre_TFree(recv_change, HYPRE_MEMORY_HOST); *col_offd_S_to_A_ptr = col_offd_S_to_A; return ierr; } /*-------------------------------------------------------------------------- * hypre_BoomerAMGCreate2ndS : creates strength matrix on coarse points * for second coarsening pass in aggressive coarsening (S*S+2S) *--------------------------------------------------------------------------*/ HYPRE_Int hypre_BoomerAMGCreate2ndSHost( hypre_ParCSRMatrix *S, HYPRE_Int *CF_marker, HYPRE_Int num_paths, HYPRE_BigInt *coarse_row_starts, hypre_ParCSRMatrix **C_ptr) { #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_CREATE_2NDS] -= hypre_MPI_Wtime(); #endif MPI_Comm comm = hypre_ParCSRMatrixComm(S); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(S); hypre_ParCSRCommPkg *tmp_comm_pkg; hypre_ParCSRCommHandle *comm_handle; hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S); HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag); HYPRE_Int *S_diag_j = hypre_CSRMatrixJ(S_diag); hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S); HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd); HYPRE_Int *S_offd_j = hypre_CSRMatrixJ(S_offd); HYPRE_Int num_cols_diag_S = hypre_CSRMatrixNumCols(S_diag); HYPRE_Int num_cols_offd_S = hypre_CSRMatrixNumCols(S_offd); hypre_ParCSRMatrix *S2; HYPRE_BigInt *col_map_offd_C = NULL; hypre_CSRMatrix *C_diag; /*HYPRE_Int *C_diag_data = NULL;*/ HYPRE_Int *C_diag_i; HYPRE_Int *C_diag_j = NULL; hypre_CSRMatrix *C_offd; /*HYPRE_Int *C_offd_data=NULL;*/ HYPRE_Int *C_offd_i; HYPRE_Int *C_offd_j=NULL; HYPRE_Int num_cols_offd_C = 0; HYPRE_Int *S_ext_diag_i = NULL; HYPRE_Int *S_ext_diag_j = NULL; HYPRE_Int S_ext_diag_size = 0; HYPRE_Int *S_ext_offd_i = NULL; HYPRE_Int *S_ext_offd_j = NULL; HYPRE_Int S_ext_offd_size = 0; HYPRE_Int *CF_marker_offd = NULL; HYPRE_Int *S_marker = NULL; HYPRE_Int *S_marker_offd = NULL; //HYPRE_Int *temp = NULL; HYPRE_Int *fine_to_coarse = NULL; HYPRE_BigInt *fine_to_coarse_offd = NULL; HYPRE_Int *map_S_to_C = NULL; HYPRE_Int num_sends = 0; HYPRE_Int num_recvs = 0; HYPRE_Int *send_map_starts; HYPRE_Int *tmp_send_map_starts = NULL; HYPRE_Int *send_map_elmts; HYPRE_Int *recv_vec_starts; HYPRE_Int *tmp_recv_vec_starts = NULL; HYPRE_Int *int_buf_data = NULL; HYPRE_BigInt *big_int_buf_data = NULL; HYPRE_BigInt *temp = NULL; HYPRE_Int i, j, k; HYPRE_Int i1, i2, i3; HYPRE_BigInt big_i1; HYPRE_Int jj1, jj2, jrow, j_cnt; /*HYPRE_Int cnt, cnt_offd, cnt_diag;*/ HYPRE_Int num_procs, my_id; HYPRE_Int index; /*HYPRE_Int value;*/ HYPRE_Int num_coarse; HYPRE_Int num_nonzeros; HYPRE_BigInt global_num_coarse; HYPRE_BigInt my_first_cpt, my_last_cpt; HYPRE_Int *S_int_i = NULL; HYPRE_BigInt *S_int_j = NULL; HYPRE_Int *S_ext_i = NULL; HYPRE_BigInt *S_ext_j = NULL; /*HYPRE_Int prefix_sum_workspace[2*(hypre_NumThreads() + 1)];*/ HYPRE_Int *prefix_sum_workspace; HYPRE_Int *num_coarse_prefix_sum; prefix_sum_workspace = hypre_TAlloc(HYPRE_Int, 2*(hypre_NumThreads() + 1), HYPRE_MEMORY_HOST); num_coarse_prefix_sum = hypre_TAlloc(HYPRE_Int, hypre_NumThreads() + 1, HYPRE_MEMORY_HOST); /*----------------------------------------------------------------------- * Extract S_ext, i.e. portion of B that is stored on neighbor procs * and needed locally for matrix matrix product *-----------------------------------------------------------------------*/ hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm, &my_id); #ifdef HYPRE_NO_GLOBAL_PARTITION my_first_cpt = coarse_row_starts[0]; my_last_cpt = coarse_row_starts[1]-1; if (my_id == (num_procs -1)) global_num_coarse = coarse_row_starts[1]; hypre_MPI_Bcast(&global_num_coarse, 1, HYPRE_MPI_BIG_INT, num_procs-1, comm); #else my_first_cpt = coarse_row_starts[my_id]; my_last_cpt = coarse_row_starts[my_id+1]-1; global_num_coarse = coarse_row_starts[num_procs]; #endif if (num_cols_offd_S) { CF_marker_offd = hypre_TAlloc(HYPRE_Int, num_cols_offd_S, HYPRE_MEMORY_HOST); fine_to_coarse_offd = hypre_TAlloc(HYPRE_BigInt, num_cols_offd_S, HYPRE_MEMORY_HOST); } HYPRE_Int *coarse_to_fine = NULL; if (num_cols_diag_S) { fine_to_coarse = hypre_TAlloc(HYPRE_Int, num_cols_diag_S, HYPRE_MEMORY_HOST); coarse_to_fine = hypre_TAlloc(HYPRE_Int, num_cols_diag_S, HYPRE_MEMORY_HOST); } /*HYPRE_Int num_coarse_prefix_sum[hypre_NumThreads() + 1];*/ #ifdef HYPRE_USING_OPENMP #pragma omp parallel private(i) #endif { HYPRE_Int num_coarse_private = 0; HYPRE_Int i_begin, i_end; hypre_GetSimpleThreadPartition(&i_begin, &i_end, num_cols_diag_S); for (i = i_begin; i < i_end; i++) { if (CF_marker[i] > 0) num_coarse_private++; } hypre_prefix_sum(&num_coarse_private, &num_coarse, num_coarse_prefix_sum); for (i = i_begin; i < i_end; i++) { if (CF_marker[i] > 0) { fine_to_coarse[i] = num_coarse_private; coarse_to_fine[num_coarse_private] = i; num_coarse_private++; } else { fine_to_coarse[i] = -1; } } } /* omp parallel */ if (num_procs > 1) { if (!comm_pkg) { hypre_MatvecCommPkgCreate(S); comm_pkg = hypre_ParCSRMatrixCommPkg(S); } num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); send_map_starts = hypre_ParCSRCommPkgSendMapStarts(comm_pkg); send_map_elmts = hypre_ParCSRCommPkgSendMapElmts(comm_pkg); num_recvs = hypre_ParCSRCommPkgNumRecvs(comm_pkg); recv_vec_starts = hypre_ParCSRCommPkgRecvVecStarts(comm_pkg); HYPRE_Int begin = send_map_starts[0]; HYPRE_Int end = send_map_starts[num_sends]; big_int_buf_data = hypre_TAlloc(HYPRE_BigInt, end, HYPRE_MEMORY_HOST); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for HYPRE_SMP_SCHEDULE #endif for (index = begin; index < end; index++) { big_int_buf_data[index - begin] = (HYPRE_BigInt)fine_to_coarse[send_map_elmts[index]] + my_first_cpt; } comm_handle = hypre_ParCSRCommHandleCreate( 21, comm_pkg, big_int_buf_data, fine_to_coarse_offd); hypre_ParCSRCommHandleDestroy(comm_handle); int_buf_data = hypre_TAlloc(HYPRE_Int, end, HYPRE_MEMORY_HOST); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for HYPRE_SMP_SCHEDULE #endif for (index = begin; index < end; index++) { int_buf_data[index - begin] = CF_marker[send_map_elmts[index]]; } comm_handle = hypre_ParCSRCommHandleCreate(11, comm_pkg, int_buf_data, CF_marker_offd); hypre_ParCSRCommHandleDestroy(comm_handle); hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST); hypre_TFree(big_int_buf_data, HYPRE_MEMORY_HOST); S_int_i = hypre_TAlloc(HYPRE_Int, end+1, HYPRE_MEMORY_HOST); S_ext_i = hypre_CTAlloc(HYPRE_Int, recv_vec_starts[num_recvs]+1, HYPRE_MEMORY_HOST); /*-------------------------------------------------------------------------- * generate S_int_i through adding number of coarse row-elements of offd and diag * for corresponding rows. S_int_i[j+1] contains the number of coarse elements of * a row j (which is determined through send_map_elmts) *--------------------------------------------------------------------------*/ S_int_i[0] = 0; num_nonzeros = 0; #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(j,k) reduction(+:num_nonzeros) HYPRE_SMP_SCHEDULE #endif for (j = begin; j < end; j++) { HYPRE_Int jrow = send_map_elmts[j]; HYPRE_Int index = 0; for (k = S_diag_i[jrow]; k < S_diag_i[jrow+1]; k++) { if (CF_marker[S_diag_j[k]] > 0) index++; } for (k = S_offd_i[jrow]; k < S_offd_i[jrow+1]; k++) { if (CF_marker_offd[S_offd_j[k]] > 0) index++; } S_int_i[j - begin + 1] = index; num_nonzeros += S_int_i[j - begin + 1]; } /*-------------------------------------------------------------------------- * initialize communication *--------------------------------------------------------------------------*/ if (num_procs > 1) comm_handle = hypre_ParCSRCommHandleCreate(11,comm_pkg,&S_int_i[1],&S_ext_i[1]); if (num_nonzeros) S_int_j = hypre_TAlloc(HYPRE_BigInt, num_nonzeros, HYPRE_MEMORY_HOST); tmp_send_map_starts = hypre_CTAlloc(HYPRE_Int, num_sends+1, HYPRE_MEMORY_HOST); tmp_recv_vec_starts = hypre_CTAlloc(HYPRE_Int, num_recvs+1, HYPRE_MEMORY_HOST); tmp_send_map_starts[0] = 0; j_cnt = 0; for (i=0; i < num_sends; i++) { for (j = send_map_starts[i]; j < send_map_starts[i+1]; j++) { jrow = send_map_elmts[j]; for (k=S_diag_i[jrow]; k < S_diag_i[jrow+1]; k++) { if (CF_marker[S_diag_j[k]] > 0) S_int_j[j_cnt++] = (HYPRE_BigInt)fine_to_coarse[S_diag_j[k]]+my_first_cpt; } for (k=S_offd_i[jrow]; k < S_offd_i[jrow+1]; k++) { if (CF_marker_offd[S_offd_j[k]] > 0) S_int_j[j_cnt++] = fine_to_coarse_offd[S_offd_j[k]]; } } tmp_send_map_starts[i+1] = j_cnt; } tmp_comm_pkg = hypre_CTAlloc(hypre_ParCSRCommPkg, 1, HYPRE_MEMORY_HOST); hypre_ParCSRCommPkgComm(tmp_comm_pkg) = comm; hypre_ParCSRCommPkgNumSends(tmp_comm_pkg) = num_sends; hypre_ParCSRCommPkgNumRecvs(tmp_comm_pkg) = num_recvs; hypre_ParCSRCommPkgSendProcs(tmp_comm_pkg) = hypre_ParCSRCommPkgSendProcs(comm_pkg); hypre_ParCSRCommPkgRecvProcs(tmp_comm_pkg) = hypre_ParCSRCommPkgRecvProcs(comm_pkg); hypre_ParCSRCommPkgSendMapStarts(tmp_comm_pkg) = tmp_send_map_starts; hypre_ParCSRCommHandleDestroy(comm_handle); comm_handle = NULL; /*-------------------------------------------------------------------------- * after communication exchange S_ext_i[j+1] contains the number of coarse elements * of a row j ! * evaluate S_ext_i and compute num_nonzeros for S_ext *--------------------------------------------------------------------------*/ for (i=0; i < recv_vec_starts[num_recvs]; i++) S_ext_i[i+1] += S_ext_i[i]; num_nonzeros = S_ext_i[recv_vec_starts[num_recvs]]; if (num_nonzeros) S_ext_j = hypre_TAlloc(HYPRE_BigInt, num_nonzeros, HYPRE_MEMORY_HOST); tmp_recv_vec_starts[0] = 0; for (i=0; i < num_recvs; i++) tmp_recv_vec_starts[i+1] = S_ext_i[recv_vec_starts[i+1]]; hypre_ParCSRCommPkgRecvVecStarts(tmp_comm_pkg) = tmp_recv_vec_starts; comm_handle = hypre_ParCSRCommHandleCreate(21,tmp_comm_pkg,S_int_j,S_ext_j); hypre_ParCSRCommHandleDestroy(comm_handle); comm_handle = NULL; hypre_TFree(tmp_send_map_starts, HYPRE_MEMORY_HOST); hypre_TFree(tmp_recv_vec_starts, HYPRE_MEMORY_HOST); hypre_TFree(tmp_comm_pkg, HYPRE_MEMORY_HOST); hypre_TFree(S_int_i, HYPRE_MEMORY_HOST); hypre_TFree(S_int_j, HYPRE_MEMORY_HOST); #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_RENUMBER_COLIDX] -= hypre_MPI_Wtime(); #endif #ifdef HYPRE_CONCURRENT_HOPSCOTCH HYPRE_BigInt *S_big_offd_j = NULL; S_ext_diag_i = hypre_TAlloc(HYPRE_Int, num_cols_offd_S+1, HYPRE_MEMORY_HOST); S_ext_diag_i[0] = 0; S_ext_offd_i = hypre_TAlloc(HYPRE_Int, num_cols_offd_S+1, HYPRE_MEMORY_HOST); S_ext_offd_i[0] = 0; hypre_UnorderedBigIntSet found_set; hypre_UnorderedBigIntSetCreate(&found_set, S_ext_i[num_cols_offd_S] + num_cols_offd_S, 16*hypre_NumThreads()); #pragma omp parallel private(i,j, big_i1) { HYPRE_Int S_ext_offd_size_private = 0; HYPRE_Int S_ext_diag_size_private = 0; HYPRE_Int i_begin, i_end; hypre_GetSimpleThreadPartition(&i_begin, &i_end, num_cols_offd_S); for (i = i_begin; i < i_end; i++) { if (CF_marker_offd[i] > 0) { hypre_UnorderedBigIntSetPut(&found_set, fine_to_coarse_offd[i]); } for (j=S_ext_i[i]; j < S_ext_i[i+1]; j++) { big_i1 = S_ext_j[j]; if (big_i1 < my_first_cpt || big_i1 > my_last_cpt) { S_ext_offd_size_private++; hypre_UnorderedBigIntSetPut(&found_set, big_i1); } else S_ext_diag_size_private++; } } hypre_prefix_sum_pair( &S_ext_diag_size_private, &S_ext_diag_size, &S_ext_offd_size_private, &S_ext_offd_size, prefix_sum_workspace); #pragma omp master { if (S_ext_diag_size) S_ext_diag_j = hypre_TAlloc(HYPRE_Int, S_ext_diag_size, HYPRE_MEMORY_HOST); if (S_ext_offd_size) { S_ext_offd_j = hypre_TAlloc(HYPRE_Int, S_ext_offd_size, HYPRE_MEMORY_HOST); S_big_offd_j = hypre_TAlloc(HYPRE_BigInt, S_ext_offd_size, HYPRE_MEMORY_HOST); } } #pragma omp barrier for (i = i_begin; i < i_end; i++) { for (j=S_ext_i[i]; j < S_ext_i[i+1]; j++) { big_i1 = S_ext_j[j]; if (big_i1 < my_first_cpt || big_i1 > my_last_cpt) S_big_offd_j[S_ext_offd_size_private++] = big_i1; //S_ext_offd_j[S_ext_offd_size_private++] = big_i1; else S_ext_diag_j[S_ext_diag_size_private++] = (HYPRE_Int)(big_i1 - my_first_cpt); } S_ext_diag_i[i + 1] = S_ext_diag_size_private; S_ext_offd_i[i + 1] = S_ext_offd_size_private; } } // omp parallel temp = hypre_UnorderedBigIntSetCopyToArray(&found_set, &num_cols_offd_C); hypre_UnorderedBigIntSetDestroy(&found_set); hypre_TFree(S_ext_i, HYPRE_MEMORY_HOST); hypre_UnorderedBigIntMap col_map_offd_C_inverse; hypre_big_sort_and_create_inverse_map(temp, num_cols_offd_C, &col_map_offd_C, &col_map_offd_C_inverse); #pragma omp parallel for HYPRE_SMP_SCHEDULE for (i=0 ; i < S_ext_offd_size; i++) S_ext_offd_j[i] = hypre_UnorderedBigIntMapGet(&col_map_offd_C_inverse, S_big_offd_j[i]); //S_ext_offd_j[i] = hypre_UnorderedIntMapGet(&col_map_offd_C_inverse, S_ext_offd_j[i]); hypre_TFree(S_ext_j, HYPRE_MEMORY_HOST); hypre_TFree(S_big_offd_j, HYPRE_MEMORY_HOST); if (num_cols_offd_C) hypre_UnorderedBigIntMapDestroy(&col_map_offd_C_inverse); #else /* !HYPRE_CONCURRENT_HOPSCOTCH */ HYPRE_Int cnt_offd, cnt_diag, cnt, value; S_ext_diag_size = 0; S_ext_offd_size = 0; for (i=0; i < num_cols_offd_S; i++) { for (j=S_ext_i[i]; j < S_ext_i[i+1]; j++) { if (S_ext_j[j] < my_first_cpt || S_ext_j[j] > my_last_cpt) S_ext_offd_size++; else S_ext_diag_size++; } } S_ext_diag_i = hypre_CTAlloc(HYPRE_Int, num_cols_offd_S+1, HYPRE_MEMORY_HOST); S_ext_offd_i = hypre_CTAlloc(HYPRE_Int, num_cols_offd_S+1, HYPRE_MEMORY_HOST); if (S_ext_diag_size) { S_ext_diag_j = hypre_CTAlloc(HYPRE_Int, S_ext_diag_size, HYPRE_MEMORY_HOST); } if (S_ext_offd_size) { S_ext_offd_j = hypre_CTAlloc(HYPRE_Int, S_ext_offd_size, HYPRE_MEMORY_HOST); } cnt_offd = 0; cnt_diag = 0; cnt = 0; HYPRE_Int num_coarse_offd = 0; for (i=0; i < num_cols_offd_S; i++) { if (CF_marker_offd[i] > 0) num_coarse_offd++; for (j=S_ext_i[i]; j < S_ext_i[i+1]; j++) { big_i1 = S_ext_j[j]; if (big_i1 < my_first_cpt || big_i1 > my_last_cpt) S_ext_j[cnt_offd++] = big_i1; else S_ext_diag_j[cnt_diag++] = (HYPRE_Int)(big_i1 - my_first_cpt); } S_ext_diag_i[++cnt] = cnt_diag; S_ext_offd_i[cnt] = cnt_offd; } hypre_TFree(S_ext_i, HYPRE_MEMORY_HOST); cnt = 0; if (S_ext_offd_size || num_coarse_offd) { temp = hypre_CTAlloc(HYPRE_BigInt, S_ext_offd_size+num_coarse_offd, HYPRE_MEMORY_HOST); for (i=0; i < S_ext_offd_size; i++) temp[i] = S_ext_j[i]; cnt = S_ext_offd_size; for (i=0; i < num_cols_offd_S; i++) if (CF_marker_offd[i] > 0) temp[cnt++] = fine_to_coarse_offd[i]; } if (cnt) { hypre_BigQsort0(temp, 0, cnt-1); num_cols_offd_C = 1; value = temp[0]; for (i=1; i < cnt; i++) { if (temp[i] > value) { value = temp[i]; temp[num_cols_offd_C++] = value; } } } if (num_cols_offd_C) col_map_offd_C = hypre_CTAlloc(HYPRE_BigInt, num_cols_offd_C, HYPRE_MEMORY_HOST); for (i=0; i < num_cols_offd_C; i++) col_map_offd_C[i] = temp[i]; if (S_ext_offd_size || num_coarse_offd) hypre_TFree(temp, HYPRE_MEMORY_HOST); for (i=0 ; i < S_ext_offd_size; i++) S_ext_offd_j[i] = hypre_BigBinarySearch(col_map_offd_C, S_ext_j[i], num_cols_offd_C); hypre_TFree(S_ext_j, HYPRE_MEMORY_HOST); #endif /* !HYPRE_CONCURRENT_HOPSCOTCH */ if (num_cols_offd_S) { map_S_to_C = hypre_TAlloc(HYPRE_Int, num_cols_offd_S, HYPRE_MEMORY_HOST); #ifdef HYPRE_USING_OPENMP #pragma omp parallel private(i) #endif { HYPRE_Int i_begin, i_end; hypre_GetSimpleThreadPartition(&i_begin, &i_end, num_cols_offd_S); HYPRE_BigInt cnt = 0; for (i = i_begin; i < i_end; i++) { if (CF_marker_offd[i] > 0) { cnt = hypre_BigLowerBound(col_map_offd_C + cnt, col_map_offd_C + num_cols_offd_C, fine_to_coarse_offd[i]) - col_map_offd_C; map_S_to_C[i] = cnt++; } else map_S_to_C[i] = -1; } } /* omp parallel */ } #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_RENUMBER_COLIDX] += hypre_MPI_Wtime(); #endif } /* num_procs > 1 */ /*----------------------------------------------------------------------- * Allocate and initialize some stuff. *-----------------------------------------------------------------------*/ HYPRE_Int *S_marker_array = NULL, *S_marker_offd_array = NULL; if (num_coarse) S_marker_array = hypre_TAlloc(HYPRE_Int, num_coarse*hypre_NumThreads(), HYPRE_MEMORY_HOST); if (num_cols_offd_C) S_marker_offd_array = hypre_TAlloc(HYPRE_Int, num_cols_offd_C*hypre_NumThreads(), HYPRE_MEMORY_HOST); HYPRE_Int *C_temp_offd_j_array = NULL; HYPRE_Int *C_temp_diag_j_array = NULL; HYPRE_Int *C_temp_offd_data_array = NULL; HYPRE_Int *C_temp_diag_data_array = NULL; if (num_paths > 1) { C_temp_diag_j_array = hypre_TAlloc(HYPRE_Int, num_coarse*hypre_NumThreads(), HYPRE_MEMORY_HOST); C_temp_offd_j_array = hypre_TAlloc(HYPRE_Int, num_cols_offd_C*hypre_NumThreads(), HYPRE_MEMORY_HOST); C_temp_diag_data_array = hypre_TAlloc(HYPRE_Int, num_coarse*hypre_NumThreads(), HYPRE_MEMORY_HOST); C_temp_offd_data_array = hypre_TAlloc(HYPRE_Int, num_cols_offd_C*hypre_NumThreads(), HYPRE_MEMORY_HOST); } C_diag_i = hypre_CTAlloc(HYPRE_Int, num_coarse+1, HYPRE_MEMORY_HOST); C_offd_i = hypre_CTAlloc(HYPRE_Int, num_coarse+1, HYPRE_MEMORY_HOST); /*----------------------------------------------------------------------- * Loop over rows of S *-----------------------------------------------------------------------*/ #ifdef HYPRE_USING_OPENMP #pragma omp parallel private(i1,i2,i3,jj1,jj2,index) #endif { HYPRE_Int my_thread_num = hypre_GetThreadNum(); HYPRE_Int i1_begin, i1_end; hypre_GetSimpleThreadPartition(&i1_begin, &i1_end, num_cols_diag_S); HYPRE_Int *C_temp_diag_j = NULL, *C_temp_offd_j = NULL; HYPRE_Int *C_temp_diag_data = NULL, *C_temp_offd_data = NULL; if (num_paths > 1) { C_temp_diag_j = C_temp_diag_j_array + num_coarse*my_thread_num; C_temp_offd_j = C_temp_offd_j_array + num_cols_offd_C*my_thread_num; C_temp_diag_data = C_temp_diag_data_array + num_coarse*my_thread_num; C_temp_offd_data = C_temp_offd_data_array + num_cols_offd_C*my_thread_num; } HYPRE_Int *S_marker = NULL, *S_marker_offd = NULL; if (num_coarse) S_marker = S_marker_array + num_coarse*my_thread_num; if (num_cols_offd_C) S_marker_offd = S_marker_offd_array + num_cols_offd_C*my_thread_num; for (i1 = 0; i1 < num_coarse; i1++) { S_marker[i1] = -1; } for (i1 = 0; i1 < num_cols_offd_C; i1++) { S_marker_offd[i1] = -1; } // These two counters are for before filtering by num_paths HYPRE_Int jj_count_diag = 0; HYPRE_Int jj_count_offd = 0; // These two counters are for after filtering by num_paths HYPRE_Int num_nonzeros_diag = 0; HYPRE_Int num_nonzeros_offd = 0; HYPRE_Int ic_begin = num_coarse_prefix_sum[my_thread_num]; HYPRE_Int ic_end = num_coarse_prefix_sum[my_thread_num + 1]; HYPRE_Int ic; if (num_paths == 1) { for (ic = ic_begin; ic < ic_end; ic++) { /*-------------------------------------------------------------------- * Set marker for diagonal entry, C_{i1,i1} (for square matrices). *--------------------------------------------------------------------*/ i1 = coarse_to_fine[ic]; HYPRE_Int jj_row_begin_diag = num_nonzeros_diag; HYPRE_Int jj_row_begin_offd = num_nonzeros_offd; C_diag_i[ic] = num_nonzeros_diag; if (num_cols_offd_C) { C_offd_i[ic] = num_nonzeros_offd; } for (jj1 = S_diag_i[i1]; jj1 < S_diag_i[i1+1]; jj1++) { i2 = S_diag_j[jj1]; if (CF_marker[i2] > 0) { index = fine_to_coarse[i2]; if (S_marker[index] < jj_row_begin_diag) { S_marker[index] = num_nonzeros_diag; num_nonzeros_diag++; } } for (jj2 = S_diag_i[i2]; jj2 < S_diag_i[i2+1]; jj2++) { i3 = S_diag_j[jj2]; if (CF_marker[i3] > 0) { index = fine_to_coarse[i3]; if (index != ic && S_marker[index] < jj_row_begin_diag) { S_marker[index] = num_nonzeros_diag; num_nonzeros_diag++; } } } for (jj2 = S_offd_i[i2]; jj2 < S_offd_i[i2+1]; jj2++) { i3 = S_offd_j[jj2]; if (CF_marker_offd[i3] > 0) { index = map_S_to_C[i3]; if (S_marker_offd[index] < jj_row_begin_offd) { S_marker_offd[index] = num_nonzeros_offd; num_nonzeros_offd++; } } } } for (jj1 = S_offd_i[i1]; jj1 < S_offd_i[i1+1]; jj1++) { i2 = S_offd_j[jj1]; if (CF_marker_offd[i2] > 0) { index = map_S_to_C[i2]; if (S_marker_offd[index] < jj_row_begin_offd) { S_marker_offd[index] = num_nonzeros_offd; num_nonzeros_offd++; } } for (jj2 = S_ext_diag_i[i2]; jj2 < S_ext_diag_i[i2+1]; jj2++) { i3 = S_ext_diag_j[jj2]; if (i3 != ic && S_marker[i3] < jj_row_begin_diag) { S_marker[i3] = num_nonzeros_diag; num_nonzeros_diag++; } } for (jj2 = S_ext_offd_i[i2]; jj2 < S_ext_offd_i[i2+1]; jj2++) { i3 = S_ext_offd_j[jj2]; if (S_marker_offd[i3] < jj_row_begin_offd) { S_marker_offd[i3] = num_nonzeros_offd; num_nonzeros_offd++; } } } } /* for each row */ } /* num_paths == 1 */ else { for (ic = ic_begin; ic < ic_end; ic++) { /*-------------------------------------------------------------------- * Set marker for diagonal entry, C_{i1,i1} (for square matrices). *--------------------------------------------------------------------*/ i1 = coarse_to_fine[ic]; HYPRE_Int jj_row_begin_diag = jj_count_diag; HYPRE_Int jj_row_begin_offd = jj_count_offd; C_diag_i[ic] = num_nonzeros_diag; if (num_cols_offd_C) { C_offd_i[ic] = num_nonzeros_offd; } for (jj1 = S_diag_i[i1]; jj1 < S_diag_i[i1+1]; jj1++) { i2 = S_diag_j[jj1]; if (CF_marker[i2] > 0) { index = fine_to_coarse[i2]; if (S_marker[index] < jj_row_begin_diag) { S_marker[index] = jj_count_diag; C_temp_diag_data[jj_count_diag - jj_row_begin_diag] = 2; jj_count_diag++; } else { C_temp_diag_data[S_marker[index] - jj_row_begin_diag] += 2; } } for (jj2 = S_diag_i[i2]; jj2 < S_diag_i[i2+1]; jj2++) { i3 = S_diag_j[jj2]; if (CF_marker[i3] > 0 && fine_to_coarse[i3] != ic) { index = fine_to_coarse[i3]; if (S_marker[index] < jj_row_begin_diag) { S_marker[index] = jj_count_diag; C_temp_diag_data[jj_count_diag - jj_row_begin_diag] = 1; jj_count_diag++; } else { C_temp_diag_data[S_marker[index] - jj_row_begin_diag]++; } } } for (jj2 = S_offd_i[i2]; jj2 < S_offd_i[i2+1]; jj2++) { i3 = S_offd_j[jj2]; if (CF_marker_offd[i3] > 0) { index = map_S_to_C[i3]; if (S_marker_offd[index] < jj_row_begin_offd) { S_marker_offd[index] = jj_count_offd; C_temp_offd_data[jj_count_offd - jj_row_begin_offd] = 1; jj_count_offd++; } else { C_temp_offd_data[S_marker_offd[index] - jj_row_begin_offd]++; } } } } for (jj1 = S_offd_i[i1]; jj1 < S_offd_i[i1+1]; jj1++) { i2 = S_offd_j[jj1]; if (CF_marker_offd[i2] > 0) { index = map_S_to_C[i2]; if (S_marker_offd[index] < jj_row_begin_offd) { S_marker_offd[index] = jj_count_offd; C_temp_offd_data[jj_count_offd - jj_row_begin_offd] = 2; jj_count_offd++; } else { C_temp_offd_data[S_marker_offd[index] - jj_row_begin_offd] += 2; } } for (jj2 = S_ext_diag_i[i2]; jj2 < S_ext_diag_i[i2+1]; jj2++) { i3 = S_ext_diag_j[jj2]; if (i3 != ic) { if (S_marker[i3] < jj_row_begin_diag) { S_marker[i3] = jj_count_diag; C_temp_diag_data[jj_count_diag - jj_row_begin_diag] = 1; jj_count_diag++; } else { C_temp_diag_data[S_marker[i3] - jj_row_begin_diag]++; } } } for (jj2 = S_ext_offd_i[i2]; jj2 < S_ext_offd_i[i2+1]; jj2++) { i3 = S_ext_offd_j[jj2]; if (S_marker_offd[i3] < jj_row_begin_offd) { S_marker_offd[i3] = jj_count_offd; C_temp_offd_data[jj_count_offd - jj_row_begin_offd] = 1; jj_count_offd++; } else { C_temp_offd_data[S_marker_offd[i3] - jj_row_begin_offd]++; } } } for (jj1 = jj_row_begin_diag; jj1 < jj_count_diag; jj1++) { if (C_temp_diag_data[jj1 - jj_row_begin_diag] >= num_paths) { ++num_nonzeros_diag; } C_temp_diag_data[jj1 - jj_row_begin_diag] = 0; } for (jj1 = jj_row_begin_offd; jj1 < jj_count_offd; jj1++) { if (C_temp_offd_data[jj1 - jj_row_begin_offd] >= num_paths) { ++num_nonzeros_offd; } C_temp_offd_data[jj1 - jj_row_begin_offd] = 0; } } /* for each row */ } /* num_paths > 1 */ hypre_prefix_sum_pair( &num_nonzeros_diag, &C_diag_i[num_coarse], &num_nonzeros_offd, &C_offd_i[num_coarse], prefix_sum_workspace); for (i1 = 0; i1 < num_coarse; i1++) { S_marker[i1] = -1; } for (i1 = 0; i1 < num_cols_offd_C; i1++) { S_marker_offd[i1] = -1; } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #pragma omp master #endif { if (C_diag_i[num_coarse]) { C_diag_j = hypre_TAlloc(HYPRE_Int, C_diag_i[num_coarse], HYPRE_MEMORY_HOST); } if (C_offd_i[num_coarse]) { C_offd_j = hypre_TAlloc(HYPRE_Int, C_offd_i[num_coarse], HYPRE_MEMORY_HOST); } } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif for (ic = ic_begin; ic < ic_end - 1; ic++) { if (C_diag_i[ic+1] == C_diag_i[ic] && C_offd_i[ic+1] == C_offd_i[ic]) CF_marker[coarse_to_fine[ic]] = 2; C_diag_i[ic] += num_nonzeros_diag; C_offd_i[ic] += num_nonzeros_offd; } if (ic_begin < ic_end) { C_diag_i[ic] += num_nonzeros_diag; C_offd_i[ic] += num_nonzeros_offd; HYPRE_Int next_C_diag_i = prefix_sum_workspace[2*(my_thread_num + 1)]; HYPRE_Int next_C_offd_i = prefix_sum_workspace[2*(my_thread_num + 1) + 1]; if (next_C_diag_i == C_diag_i[ic] && next_C_offd_i == C_offd_i[ic]) CF_marker[coarse_to_fine[ic]] = 2; } if (num_paths == 1) { for (ic = ic_begin; ic < ic_end; ic++) { /*-------------------------------------------------------------------- * Set marker for diagonal entry, C_{i1,i1} (for square matrices). *--------------------------------------------------------------------*/ i1 = coarse_to_fine[ic]; HYPRE_Int jj_row_begin_diag = num_nonzeros_diag; HYPRE_Int jj_row_begin_offd = num_nonzeros_offd; for (jj1 = S_diag_i[i1]; jj1 < S_diag_i[i1+1]; jj1++) { i2 = S_diag_j[jj1]; if (CF_marker[i2] > 0) { index = fine_to_coarse[i2]; if (S_marker[index] < jj_row_begin_diag) { S_marker[index] = num_nonzeros_diag; C_diag_j[num_nonzeros_diag] = index; num_nonzeros_diag++; } } for (jj2 = S_diag_i[i2]; jj2 < S_diag_i[i2+1]; jj2++) { i3 = S_diag_j[jj2]; if (CF_marker[i3] > 0) { index = fine_to_coarse[i3]; if (index != ic && S_marker[index] < jj_row_begin_diag) { S_marker[index] = num_nonzeros_diag; C_diag_j[num_nonzeros_diag] = index; num_nonzeros_diag++; } } } for (jj2 = S_offd_i[i2]; jj2 < S_offd_i[i2+1]; jj2++) { i3 = S_offd_j[jj2]; if (CF_marker_offd[i3] > 0) { index = map_S_to_C[i3]; if (S_marker_offd[index] < jj_row_begin_offd) { S_marker_offd[index] = num_nonzeros_offd; C_offd_j[num_nonzeros_offd] = index; num_nonzeros_offd++; } } } } for (jj1 = S_offd_i[i1]; jj1 < S_offd_i[i1+1]; jj1++) { i2 = S_offd_j[jj1]; if (CF_marker_offd[i2] > 0) { index = map_S_to_C[i2]; if (S_marker_offd[index] < jj_row_begin_offd) { S_marker_offd[index] = num_nonzeros_offd; C_offd_j[num_nonzeros_offd] = index; num_nonzeros_offd++; } } for (jj2 = S_ext_diag_i[i2]; jj2 < S_ext_diag_i[i2+1]; jj2++) { i3 = S_ext_diag_j[jj2]; if (i3 != ic && S_marker[i3] < jj_row_begin_diag) { S_marker[i3] = num_nonzeros_diag; C_diag_j[num_nonzeros_diag] = i3; num_nonzeros_diag++; } } for (jj2 = S_ext_offd_i[i2]; jj2 < S_ext_offd_i[i2+1]; jj2++) { i3 = S_ext_offd_j[jj2]; if (S_marker_offd[i3] < jj_row_begin_offd) { S_marker_offd[i3] = num_nonzeros_offd; C_offd_j[num_nonzeros_offd] = i3; num_nonzeros_offd++; } } } } /* for each row */ } /* num_paths == 1 */ else { jj_count_diag = num_nonzeros_diag; jj_count_offd = num_nonzeros_offd; for (ic = ic_begin; ic < ic_end; ic++) { /*-------------------------------------------------------------------- * Set marker for diagonal entry, C_{i1,i1} (for square matrices). *--------------------------------------------------------------------*/ i1 = coarse_to_fine[ic]; HYPRE_Int jj_row_begin_diag = jj_count_diag; HYPRE_Int jj_row_begin_offd = jj_count_offd; for (jj1 = S_diag_i[i1]; jj1 < S_diag_i[i1+1]; jj1++) { i2 = S_diag_j[jj1]; if (CF_marker[i2] > 0) { index = fine_to_coarse[i2]; if (S_marker[index] < jj_row_begin_diag) { S_marker[index] = jj_count_diag; C_temp_diag_j[jj_count_diag - jj_row_begin_diag] = index; C_temp_diag_data[jj_count_diag - jj_row_begin_diag] = 2; jj_count_diag++; } else { C_temp_diag_data[S_marker[index] - jj_row_begin_diag] += 2; } } for (jj2 = S_diag_i[i2]; jj2 < S_diag_i[i2+1]; jj2++) { i3 = S_diag_j[jj2]; if (CF_marker[i3] > 0 && fine_to_coarse[i3] != ic) { index = fine_to_coarse[i3]; if (S_marker[index] < jj_row_begin_diag) { S_marker[index] = jj_count_diag; C_temp_diag_j[jj_count_diag - jj_row_begin_diag] = index; C_temp_diag_data[jj_count_diag - jj_row_begin_diag] = 1; jj_count_diag++; } else { C_temp_diag_data[S_marker[index] - jj_row_begin_diag]++; } } } for (jj2 = S_offd_i[i2]; jj2 < S_offd_i[i2+1]; jj2++) { i3 = S_offd_j[jj2]; if (CF_marker_offd[i3] > 0) { index = map_S_to_C[i3]; if (S_marker_offd[index] < jj_row_begin_offd) { S_marker_offd[index] = jj_count_offd; C_temp_offd_j[jj_count_offd - jj_row_begin_offd] = index; C_temp_offd_data[jj_count_offd - jj_row_begin_offd] = 1; jj_count_offd++; } else { C_temp_offd_data[S_marker_offd[index] - jj_row_begin_offd]++; } } } } for (jj1 = S_offd_i[i1]; jj1 < S_offd_i[i1+1]; jj1++) { i2 = S_offd_j[jj1]; if (CF_marker_offd[i2] > 0) { index = map_S_to_C[i2]; if (S_marker_offd[index] < jj_row_begin_offd) { S_marker_offd[index] = jj_count_offd; C_temp_offd_j[jj_count_offd - jj_row_begin_offd] = index; C_temp_offd_data[jj_count_offd - jj_row_begin_offd] = 2; jj_count_offd++; } else { C_temp_offd_data[S_marker_offd[index] - jj_row_begin_offd] += 2; } } for (jj2 = S_ext_diag_i[i2]; jj2 < S_ext_diag_i[i2+1]; jj2++) { i3 = S_ext_diag_j[jj2]; if (i3 != ic) { if (S_marker[i3] < jj_row_begin_diag) { S_marker[i3] = jj_count_diag; C_temp_diag_j[jj_count_diag - jj_row_begin_diag] = i3; C_temp_diag_data[jj_count_diag - jj_row_begin_diag] = 1; jj_count_diag++; } else { C_temp_diag_data[S_marker[i3] - jj_row_begin_diag]++; } } } for (jj2 = S_ext_offd_i[i2]; jj2 < S_ext_offd_i[i2+1]; jj2++) { i3 = S_ext_offd_j[jj2]; if (S_marker_offd[i3] < jj_row_begin_offd) { S_marker_offd[i3] = jj_count_offd; C_temp_offd_j[jj_count_offd - jj_row_begin_offd] = i3; C_temp_offd_data[jj_count_offd - jj_row_begin_offd] = 1; jj_count_offd++; } else { C_temp_offd_data[S_marker_offd[i3] - jj_row_begin_offd]++; } } } for (jj1 = jj_row_begin_diag; jj1 < jj_count_diag; jj1++) { if (C_temp_diag_data[jj1 - jj_row_begin_diag] >= num_paths) { C_diag_j[num_nonzeros_diag++] = C_temp_diag_j[jj1 - jj_row_begin_diag]; } C_temp_diag_data[jj1 - jj_row_begin_diag] = 0; } for (jj1 = jj_row_begin_offd; jj1 < jj_count_offd; jj1++) { if (C_temp_offd_data[jj1 - jj_row_begin_offd] >= num_paths) { C_offd_j[num_nonzeros_offd++] = C_temp_offd_j[jj1 - jj_row_begin_offd]; } C_temp_offd_data[jj1 - jj_row_begin_offd] = 0; } } /* for each row */ } /* num_paths > 1 */ } /* omp parallel */ S2 = hypre_ParCSRMatrixCreate(comm, global_num_coarse, global_num_coarse, coarse_row_starts, coarse_row_starts, num_cols_offd_C, C_diag_i[num_coarse], C_offd_i[num_coarse]); hypre_ParCSRMatrixOwnsRowStarts(S2) = 0; C_diag = hypre_ParCSRMatrixDiag(S2); hypre_CSRMatrixI(C_diag) = C_diag_i; if (C_diag_i[num_coarse]) hypre_CSRMatrixJ(C_diag) = C_diag_j; C_offd = hypre_ParCSRMatrixOffd(S2); hypre_CSRMatrixI(C_offd) = C_offd_i; hypre_ParCSRMatrixOffd(S2) = C_offd; if (num_cols_offd_C) { if (C_offd_i[num_coarse]) hypre_CSRMatrixJ(C_offd) = C_offd_j; hypre_ParCSRMatrixColMapOffd(S2) = col_map_offd_C; } /*----------------------------------------------------------------------- * Free various arrays *-----------------------------------------------------------------------*/ hypre_TFree(C_temp_diag_j_array, HYPRE_MEMORY_HOST); hypre_TFree(C_temp_diag_data_array, HYPRE_MEMORY_HOST); hypre_TFree(C_temp_offd_j_array, HYPRE_MEMORY_HOST); hypre_TFree(C_temp_offd_data_array, HYPRE_MEMORY_HOST); hypre_TFree(S_marker_array, HYPRE_MEMORY_HOST); hypre_TFree(S_marker_offd_array, HYPRE_MEMORY_HOST); hypre_TFree(S_marker, HYPRE_MEMORY_HOST); hypre_TFree(S_marker_offd, HYPRE_MEMORY_HOST); hypre_TFree(S_ext_diag_i, HYPRE_MEMORY_HOST); hypre_TFree(fine_to_coarse, HYPRE_MEMORY_HOST); hypre_TFree(coarse_to_fine, HYPRE_MEMORY_HOST); if (S_ext_diag_size) { hypre_TFree(S_ext_diag_j, HYPRE_MEMORY_HOST); } hypre_TFree(S_ext_offd_i, HYPRE_MEMORY_HOST); if (S_ext_offd_size) { hypre_TFree(S_ext_offd_j, HYPRE_MEMORY_HOST); } if (num_cols_offd_S) { hypre_TFree(map_S_to_C, HYPRE_MEMORY_HOST); hypre_TFree(CF_marker_offd, HYPRE_MEMORY_HOST); hypre_TFree(fine_to_coarse_offd, HYPRE_MEMORY_HOST); } hypre_CSRMatrixMemoryLocation(C_diag) = HYPRE_MEMORY_HOST; hypre_CSRMatrixMemoryLocation(C_offd) = HYPRE_MEMORY_HOST; *C_ptr = S2; #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_CREATE_2NDS] += hypre_MPI_Wtime(); #endif hypre_TFree(prefix_sum_workspace, HYPRE_MEMORY_HOST); hypre_TFree(num_coarse_prefix_sum, HYPRE_MEMORY_HOST); return 0; } //----------------------------------------------------------------------- HYPRE_Int hypre_BoomerAMGCreate2ndS( hypre_ParCSRMatrix *S, HYPRE_Int *CF_marker, HYPRE_Int num_paths, HYPRE_BigInt *coarse_row_starts, hypre_ParCSRMatrix **C_ptr) { #if defined(HYPRE_USING_CUDA) hypre_NvtxPushRange("Create2ndS"); #endif HYPRE_Int ierr = 0; #if defined(HYPRE_USING_CUDA) HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy1( hypre_CSRMatrixMemoryLocation(hypre_ParCSRMatrixDiag(S)) ); if (exec == HYPRE_EXEC_DEVICE) { ierr = hypre_BoomerAMGCreate2ndSDevice( S, CF_marker, num_paths, coarse_row_starts, C_ptr ); } else #endif { ierr = hypre_BoomerAMGCreate2ndSHost( S, CF_marker, num_paths, coarse_row_starts, C_ptr ); } #if defined(HYPRE_USING_CUDA) hypre_NvtxPopRange(); #endif return ierr; } /*-------------------------------------------------------------------------- * hypre_BoomerAMGCorrectCFMarker : corrects CF_marker after aggr. coarsening *--------------------------------------------------------------------------*/ HYPRE_Int hypre_BoomerAMGCorrectCFMarker(HYPRE_Int *CF_marker, HYPRE_Int num_var, HYPRE_Int *new_CF_marker) { HYPRE_Int i, cnt; cnt = 0; for (i=0; i < num_var; i++) { if (CF_marker[i] > 0 ) { if (CF_marker[i] == 1) CF_marker[i] = new_CF_marker[cnt++]; else { CF_marker[i] = 1; cnt++;} } } return 0; } /*-------------------------------------------------------------------------- * hypre_BoomerAMGCorrectCFMarker2 : corrects CF_marker after aggr. coarsening, * but marks new F-points (previous C-points) as -2 *--------------------------------------------------------------------------*/ HYPRE_Int hypre_BoomerAMGCorrectCFMarker2(HYPRE_Int *CF_marker, HYPRE_Int num_var, HYPRE_Int *new_CF_marker) { HYPRE_Int i, cnt; cnt = 0; for (i=0; i < num_var; i++) { if (CF_marker[i] > 0 ) { if (new_CF_marker[cnt] == -1) CF_marker[i] = -2; else CF_marker[i] = 1; cnt++; } } return 0; }
Example_taskloop_simd_reduction.1.c
/* * @@name: taskloop_simd_reduction.1c * @@type: C * @@compilable: yes * @@linkable: yes * @@expect: success * @@version: omp_5.0 */ #include <stdio.h> #define N 100 int main(){ int i, a[N], asum=0; for(i=0;i<N;i++) a[i]=i; // taskloop reductions #pragma omp parallel master #pragma omp taskloop reduction(+:asum) //taskloop 1 for(i=0;i<N;i++){ asum += a[i]; } #pragma omp parallel reduction(task, +:asum) // parallel reduction a { #pragma omp master #pragma omp task in_reduction(+:asum) //task 2 for(i=0;i<N;i++){ asum += a[i]; } #pragma omp master taskloop in_reduction(+:asum) //taskloop 2 for(i=0;i<N;i++){ asum += a[i]; } } // taskloop simd reductions #pragma omp parallel master #pragma omp taskloop simd reduction(+:asum) //taskloop simd 3 for(i=0;i<N;i++){ asum += a[i]; } #pragma omp parallel reduction(task, +:asum) // parallel reduction b { #pragma omp master #pragma omp task in_reduction(+:asum) //task 4 for(i=0;i<N;i++){ asum += a[i]; } #pragma omp master taskloop simd in_reduction(+:asum) //taskloop simd 4 for(i=0;i<N;i++){ asum += a[i]; } } printf("asum=%d \n",asum); // output: asum=29700 }
antidep1-var-yes.c
/* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov, schordan1@llnl.gov, karlin1@llnl.gov) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https://github.com/LLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ // using variable-length array in C99 // Avoid dynamic allocated arrays, which introduces pointers , bad for static analysis tools #include <stdlib.h> int main(int argc,char *argv[]) { int i, j; int len = 20; if (argc>1) len = atoi(argv[1]); double a[len][len]; for (i=0; i< len; i++) for (j=0; j<len; j++) a[i][j] = 0.5; #pragma omp parallel for for (i = 0; i < len - 1; i += 1) { for (j = 0; j < len ; j += 1) { a[i][j] += a[i + 1][j]; } } return 0; }
task_largenumber.c
#include <stdio.h> #include <omp.h> #define LARGE_NUMBER 10 //#define LARGE_NUMBER 10000000 double item[LARGE_NUMBER]; void process (double input) { printf("processing %f by thread %d\n",input, omp_get_thread_num()); } int cutoff =1000; int main () { #pragma omp parallel { #pragma omp single { int i; printf("Using %d threads.\n",omp_get_num_threads()); for (i = 0; i < LARGE_NUMBER; i++) #pragma omp task if (i < cutoff) process (item[i]); } } return 0; }
libsais16.c
/*-- This file is a part of libsais, a library for linear time suffix array and burrows wheeler transform construction. Copyright (c) 2021-2022 Ilya Grebnov <ilya.grebnov@gmail.com> Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Please see the file LICENSE for full copyright information. --*/ #include "libsais16.h" #include <stddef.h> #include <stdint.h> #include <stdlib.h> #include <string.h> #include <limits.h> #if defined(_OPENMP) #include <omp.h> #else #define UNUSED(_x) (void)(_x) #endif typedef int32_t sa_sint_t; typedef uint32_t sa_uint_t; typedef ptrdiff_t fast_sint_t; typedef size_t fast_uint_t; #define SAINT_BIT (32) #define SAINT_MAX INT32_MAX #define SAINT_MIN INT32_MIN #define ALPHABET_SIZE (1 << CHAR_BIT << CHAR_BIT) #define UNBWT_FASTBITS (17) #define SUFFIX_GROUP_BIT (SAINT_BIT - 1) #define SUFFIX_GROUP_MARKER (((sa_sint_t)1) << (SUFFIX_GROUP_BIT - 1)) #define BUCKETS_INDEX2(_c, _s) (((_c) << 1) + (_s)) #define BUCKETS_INDEX4(_c, _s) (((_c) << 2) + (_s)) #define LIBSAIS_PER_THREAD_CACHE_SIZE (24576) typedef struct LIBSAIS_THREAD_CACHE { sa_sint_t symbol; sa_sint_t index; } LIBSAIS_THREAD_CACHE; typedef union LIBSAIS_THREAD_STATE { struct { fast_sint_t position; fast_sint_t count; fast_sint_t m; fast_sint_t last_lms_suffix; sa_sint_t * buckets; LIBSAIS_THREAD_CACHE * cache; } state; uint8_t padding[64]; } LIBSAIS_THREAD_STATE; typedef struct LIBSAIS_CONTEXT { sa_sint_t * buckets; LIBSAIS_THREAD_STATE * thread_state; fast_sint_t threads; } LIBSAIS_CONTEXT; typedef struct LIBSAIS_UNBWT_CONTEXT { sa_uint_t * bucket2; uint16_t * fastbits; sa_uint_t * buckets; fast_sint_t threads; } LIBSAIS_UNBWT_CONTEXT; #if defined(__GNUC__) || defined(__clang__) #define RESTRICT __restrict__ #elif defined(_MSC_VER) || defined(__INTEL_COMPILER) #define RESTRICT __restrict #else #error Your compiler, configuration or platform is not supported. #endif #if defined(__has_builtin) #if __has_builtin(__builtin_prefetch) #define HAS_BUILTIN_PREFECTCH #endif #elif defined(__GNUC__) && ((__GNUC__ == 3) && (__GNUC_MINOR__ >= 2)) || (__GNUC__ >= 4) #define HAS_BUILTIN_PREFECTCH #endif #if defined(HAS_BUILTIN_PREFECTCH) #define libsais16_prefetch(address) __builtin_prefetch((const void *)(address), 0, 0) #define libsais16_prefetchw(address) __builtin_prefetch((const void *)(address), 1, 0) #elif defined (_M_IX86) || defined (_M_AMD64) #include <intrin.h> #define libsais16_prefetch(address) _mm_prefetch((const void *)(address), _MM_HINT_NTA) #define libsais16_prefetchw(address) _m_prefetchw((const void *)(address)) #elif defined (_M_ARM) #include <intrin.h> #define libsais16_prefetch(address) __prefetch((const void *)(address)) #define libsais16_prefetchw(address) __prefetchw((const void *)(address)) #elif defined (_M_ARM64) #include <intrin.h> #define libsais16_prefetch(address) __prefetch2((const void *)(address), 1) #define libsais16_prefetchw(address) __prefetch2((const void *)(address), 17) #else #error Your compiler, configuration or platform is not supported. #endif #if !defined(__LITTLE_ENDIAN__) && !defined(__BIG_ENDIAN__) #if defined(_LITTLE_ENDIAN) \ || (defined(BYTE_ORDER) && defined(LITTLE_ENDIAN) && BYTE_ORDER == LITTLE_ENDIAN) \ || (defined(_BYTE_ORDER) && defined(_LITTLE_ENDIAN) && _BYTE_ORDER == _LITTLE_ENDIAN) \ || (defined(__BYTE_ORDER) && defined(__LITTLE_ENDIAN) && __BYTE_ORDER == __LITTLE_ENDIAN) \ || (defined(__BYTE_ORDER__) && defined(__ORDER_LITTLE_ENDIAN__) && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) #define __LITTLE_ENDIAN__ #elif defined(_BIG_ENDIAN) \ || (defined(BYTE_ORDER) && defined(BIG_ENDIAN) && BYTE_ORDER == BIG_ENDIAN) \ || (defined(_BYTE_ORDER) && defined(_BIG_ENDIAN) && _BYTE_ORDER == _BIG_ENDIAN) \ || (defined(__BYTE_ORDER) && defined(__BIG_ENDIAN) && __BYTE_ORDER == __BIG_ENDIAN) \ || (defined(__BYTE_ORDER__) && defined(__ORDER_BIG_ENDIAN__) && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__) #define __BIG_ENDIAN__ #elif defined(_WIN32) #define __LITTLE_ENDIAN__ #endif #endif static void * libsais16_align_up(const void * address, size_t alignment) { return (void *)((((ptrdiff_t)address) + ((ptrdiff_t)alignment) - 1) & (-((ptrdiff_t)alignment))); } static void * libsais16_alloc_aligned(size_t size, size_t alignment) { void * address = malloc(size + sizeof(short) + alignment - 1); if (address != NULL) { void * aligned_address = libsais16_align_up((void *)((ptrdiff_t)address + (ptrdiff_t)(sizeof(short))), alignment); ((short *)aligned_address)[-1] = (short)((ptrdiff_t)aligned_address - (ptrdiff_t)address); return aligned_address; } return NULL; } static void libsais16_free_aligned(void * aligned_address) { if (aligned_address != NULL) { free((void *)((ptrdiff_t)aligned_address - ((short *)aligned_address)[-1])); } } static LIBSAIS_THREAD_STATE * libsais16_alloc_thread_state(sa_sint_t threads) { LIBSAIS_THREAD_STATE * RESTRICT thread_state = (LIBSAIS_THREAD_STATE *)libsais16_alloc_aligned((size_t)threads * sizeof(LIBSAIS_THREAD_STATE), 4096); sa_sint_t * RESTRICT thread_buckets = (sa_sint_t *)libsais16_alloc_aligned((size_t)threads * 4 * ALPHABET_SIZE * sizeof(sa_sint_t), 4096); LIBSAIS_THREAD_CACHE * RESTRICT thread_cache = (LIBSAIS_THREAD_CACHE *)libsais16_alloc_aligned((size_t)threads * LIBSAIS_PER_THREAD_CACHE_SIZE * sizeof(LIBSAIS_THREAD_CACHE), 4096); if (thread_state != NULL && thread_buckets != NULL && thread_cache != NULL) { fast_sint_t t; for (t = 0; t < threads; ++t) { thread_state[t].state.buckets = thread_buckets; thread_buckets += 4 * ALPHABET_SIZE; thread_state[t].state.cache = thread_cache; thread_cache += LIBSAIS_PER_THREAD_CACHE_SIZE; } return thread_state; } libsais16_free_aligned(thread_cache); libsais16_free_aligned(thread_buckets); libsais16_free_aligned(thread_state); return NULL; } static void libsais16_free_thread_state(LIBSAIS_THREAD_STATE * thread_state) { if (thread_state != NULL) { libsais16_free_aligned(thread_state[0].state.cache); libsais16_free_aligned(thread_state[0].state.buckets); libsais16_free_aligned(thread_state); } } static LIBSAIS_CONTEXT * libsais16_create_ctx_main(sa_sint_t threads) { LIBSAIS_CONTEXT * RESTRICT ctx = (LIBSAIS_CONTEXT *)libsais16_alloc_aligned(sizeof(LIBSAIS_CONTEXT), 64); sa_sint_t * RESTRICT buckets = (sa_sint_t *)libsais16_alloc_aligned(8 * ALPHABET_SIZE * sizeof(sa_sint_t), 4096); LIBSAIS_THREAD_STATE * RESTRICT thread_state = threads > 1 ? libsais16_alloc_thread_state(threads) : NULL; if (ctx != NULL && buckets != NULL && (thread_state != NULL || threads == 1)) { ctx->buckets = buckets; ctx->threads = threads; ctx->thread_state = thread_state; return ctx; } libsais16_free_thread_state(thread_state); libsais16_free_aligned(buckets); libsais16_free_aligned(ctx); return NULL; } static void libsais16_free_ctx_main(LIBSAIS_CONTEXT * ctx) { if (ctx != NULL) { libsais16_free_thread_state(ctx->thread_state); libsais16_free_aligned(ctx->buckets); libsais16_free_aligned(ctx); } } #if defined(_OPENMP) static sa_sint_t libsais16_count_negative_marked_suffixes(sa_sint_t * RESTRICT SA, fast_sint_t omp_block_start, fast_sint_t omp_block_size) { sa_sint_t count = 0; fast_sint_t i; for (i = omp_block_start; i < omp_block_start + omp_block_size; ++i) { count += (SA[i] < 0); } return count; } static sa_sint_t libsais16_count_zero_marked_suffixes(sa_sint_t * RESTRICT SA, fast_sint_t omp_block_start, fast_sint_t omp_block_size) { sa_sint_t count = 0; fast_sint_t i; for (i = omp_block_start; i < omp_block_start + omp_block_size; ++i) { count += (SA[i] == 0); } return count; } static void libsais16_place_cached_suffixes(sa_sint_t * RESTRICT SA, LIBSAIS_THREAD_CACHE * RESTRICT cache, fast_sint_t omp_block_start, fast_sint_t omp_block_size) { const fast_sint_t prefetch_distance = 32; fast_sint_t i, j; for (i = omp_block_start, j = omp_block_start + omp_block_size - prefetch_distance - 3; i < j; i += 4) { libsais16_prefetch(&cache[i + 2 * prefetch_distance]); libsais16_prefetchw(&SA[cache[i + prefetch_distance + 0].symbol]); libsais16_prefetchw(&SA[cache[i + prefetch_distance + 1].symbol]); libsais16_prefetchw(&SA[cache[i + prefetch_distance + 2].symbol]); libsais16_prefetchw(&SA[cache[i + prefetch_distance + 3].symbol]); SA[cache[i + 0].symbol] = cache[i + 0].index; SA[cache[i + 1].symbol] = cache[i + 1].index; SA[cache[i + 2].symbol] = cache[i + 2].index; SA[cache[i + 3].symbol] = cache[i + 3].index; } for (j += prefetch_distance + 3; i < j; i += 1) { SA[cache[i].symbol] = cache[i].index; } } static void libsais16_compact_and_place_cached_suffixes(sa_sint_t * RESTRICT SA, LIBSAIS_THREAD_CACHE * RESTRICT cache, fast_sint_t omp_block_start, fast_sint_t omp_block_size) { const fast_sint_t prefetch_distance = 32; fast_sint_t i, j, l; for (i = omp_block_start, j = omp_block_start + omp_block_size - 3, l = omp_block_start; i < j; i += 4) { libsais16_prefetchw(&cache[i + prefetch_distance]); cache[l] = cache[i + 0]; l += cache[l].symbol >= 0; cache[l] = cache[i + 1]; l += cache[l].symbol >= 0; cache[l] = cache[i + 2]; l += cache[l].symbol >= 0; cache[l] = cache[i + 3]; l += cache[l].symbol >= 0; } for (j += 3; i < j; i += 1) { cache[l] = cache[i]; l += cache[l].symbol >= 0; } libsais16_place_cached_suffixes(SA, cache, omp_block_start, l - omp_block_start); } static void libsais16_accumulate_counts_s32_2(sa_sint_t * RESTRICT bucket00, fast_sint_t bucket_size, fast_sint_t bucket_stride) { sa_sint_t * RESTRICT bucket01 = bucket00 - bucket_stride; fast_sint_t s; for (s = 0; s < bucket_size; s += 1) { bucket00[s] = bucket00[s] + bucket01[s]; } } static void libsais16_accumulate_counts_s32_3(sa_sint_t * RESTRICT bucket00, fast_sint_t bucket_size, fast_sint_t bucket_stride) { sa_sint_t * RESTRICT bucket01 = bucket00 - bucket_stride; sa_sint_t * RESTRICT bucket02 = bucket01 - bucket_stride; fast_sint_t s; for (s = 0; s < bucket_size; s += 1) { bucket00[s] = bucket00[s] + bucket01[s] + bucket02[s]; } } static void libsais16_accumulate_counts_s32_4(sa_sint_t * RESTRICT bucket00, fast_sint_t bucket_size, fast_sint_t bucket_stride) { sa_sint_t * RESTRICT bucket01 = bucket00 - bucket_stride; sa_sint_t * RESTRICT bucket02 = bucket01 - bucket_stride; sa_sint_t * RESTRICT bucket03 = bucket02 - bucket_stride; fast_sint_t s; for (s = 0; s < bucket_size; s += 1) { bucket00[s] = bucket00[s] + bucket01[s] + bucket02[s] + bucket03[s]; } } static void libsais16_accumulate_counts_s32_5(sa_sint_t * RESTRICT bucket00, fast_sint_t bucket_size, fast_sint_t bucket_stride) { sa_sint_t * RESTRICT bucket01 = bucket00 - bucket_stride; sa_sint_t * RESTRICT bucket02 = bucket01 - bucket_stride; sa_sint_t * RESTRICT bucket03 = bucket02 - bucket_stride; sa_sint_t * RESTRICT bucket04 = bucket03 - bucket_stride; fast_sint_t s; for (s = 0; s < bucket_size; s += 1) { bucket00[s] = bucket00[s] + bucket01[s] + bucket02[s] + bucket03[s] + bucket04[s]; } } static void libsais16_accumulate_counts_s32_6(sa_sint_t * RESTRICT bucket00, fast_sint_t bucket_size, fast_sint_t bucket_stride) { sa_sint_t * RESTRICT bucket01 = bucket00 - bucket_stride; sa_sint_t * RESTRICT bucket02 = bucket01 - bucket_stride; sa_sint_t * RESTRICT bucket03 = bucket02 - bucket_stride; sa_sint_t * RESTRICT bucket04 = bucket03 - bucket_stride; sa_sint_t * RESTRICT bucket05 = bucket04 - bucket_stride; fast_sint_t s; for (s = 0; s < bucket_size; s += 1) { bucket00[s] = bucket00[s] + bucket01[s] + bucket02[s] + bucket03[s] + bucket04[s] + bucket05[s]; } } static void libsais16_accumulate_counts_s32_7(sa_sint_t * RESTRICT bucket00, fast_sint_t bucket_size, fast_sint_t bucket_stride) { sa_sint_t * RESTRICT bucket01 = bucket00 - bucket_stride; sa_sint_t * RESTRICT bucket02 = bucket01 - bucket_stride; sa_sint_t * RESTRICT bucket03 = bucket02 - bucket_stride; sa_sint_t * RESTRICT bucket04 = bucket03 - bucket_stride; sa_sint_t * RESTRICT bucket05 = bucket04 - bucket_stride; sa_sint_t * RESTRICT bucket06 = bucket05 - bucket_stride; fast_sint_t s; for (s = 0; s < bucket_size; s += 1) { bucket00[s] = bucket00[s] + bucket01[s] + bucket02[s] + bucket03[s] + bucket04[s] + bucket05[s] + bucket06[s]; } } static void libsais16_accumulate_counts_s32_8(sa_sint_t * RESTRICT bucket00, fast_sint_t bucket_size, fast_sint_t bucket_stride) { sa_sint_t * RESTRICT bucket01 = bucket00 - bucket_stride; sa_sint_t * RESTRICT bucket02 = bucket01 - bucket_stride; sa_sint_t * RESTRICT bucket03 = bucket02 - bucket_stride; sa_sint_t * RESTRICT bucket04 = bucket03 - bucket_stride; sa_sint_t * RESTRICT bucket05 = bucket04 - bucket_stride; sa_sint_t * RESTRICT bucket06 = bucket05 - bucket_stride; sa_sint_t * RESTRICT bucket07 = bucket06 - bucket_stride; fast_sint_t s; for (s = 0; s < bucket_size; s += 1) { bucket00[s] = bucket00[s] + bucket01[s] + bucket02[s] + bucket03[s] + bucket04[s] + bucket05[s] + bucket06[s] + bucket07[s]; } } static void libsais16_accumulate_counts_s32_9(sa_sint_t * RESTRICT bucket00, fast_sint_t bucket_size, fast_sint_t bucket_stride) { sa_sint_t * RESTRICT bucket01 = bucket00 - bucket_stride; sa_sint_t * RESTRICT bucket02 = bucket01 - bucket_stride; sa_sint_t * RESTRICT bucket03 = bucket02 - bucket_stride; sa_sint_t * RESTRICT bucket04 = bucket03 - bucket_stride; sa_sint_t * RESTRICT bucket05 = bucket04 - bucket_stride; sa_sint_t * RESTRICT bucket06 = bucket05 - bucket_stride; sa_sint_t * RESTRICT bucket07 = bucket06 - bucket_stride; sa_sint_t * RESTRICT bucket08 = bucket07 - bucket_stride; fast_sint_t s; for (s = 0; s < bucket_size; s += 1) { bucket00[s] = bucket00[s] + bucket01[s] + bucket02[s] + bucket03[s] + bucket04[s] + bucket05[s] + bucket06[s] + bucket07[s] + bucket08[s]; } } static void libsais16_accumulate_counts_s32(sa_sint_t * RESTRICT buckets, fast_sint_t bucket_size, fast_sint_t bucket_stride, fast_sint_t num_buckets) { while (num_buckets >= 9) { libsais16_accumulate_counts_s32_9(buckets - (num_buckets - 9) * bucket_stride, bucket_size, bucket_stride); num_buckets -= 8; } switch (num_buckets) { case 1: break; case 2: libsais16_accumulate_counts_s32_2(buckets, bucket_size, bucket_stride); break; case 3: libsais16_accumulate_counts_s32_3(buckets, bucket_size, bucket_stride); break; case 4: libsais16_accumulate_counts_s32_4(buckets, bucket_size, bucket_stride); break; case 5: libsais16_accumulate_counts_s32_5(buckets, bucket_size, bucket_stride); break; case 6: libsais16_accumulate_counts_s32_6(buckets, bucket_size, bucket_stride); break; case 7: libsais16_accumulate_counts_s32_7(buckets, bucket_size, bucket_stride); break; case 8: libsais16_accumulate_counts_s32_8(buckets, bucket_size, bucket_stride); break; } } #endif static void libsais16_gather_lms_suffixes_16u(const uint16_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, fast_sint_t m, fast_sint_t omp_block_start, fast_sint_t omp_block_size) { if (omp_block_size > 0) { const fast_sint_t prefetch_distance = 128; fast_sint_t i, j = omp_block_start + omp_block_size, c0 = T[omp_block_start + omp_block_size - 1], c1 = -1; while (j < n && (c1 = T[j]) == c0) { ++j; } fast_uint_t s = c0 >= c1; for (i = omp_block_start + omp_block_size - 2, j = omp_block_start + 3; i >= j; i -= 4) { libsais16_prefetch(&T[i - prefetch_distance]); c1 = T[i - 0]; s = (s << 1) + (fast_uint_t)(c1 > (c0 - (fast_sint_t)(s & 1))); SA[m] = (sa_sint_t)(i + 1); m -= ((s & 3) == 1); c0 = T[i - 1]; s = (s << 1) + (fast_uint_t)(c0 > (c1 - (fast_sint_t)(s & 1))); SA[m] = (sa_sint_t)(i - 0); m -= ((s & 3) == 1); c1 = T[i - 2]; s = (s << 1) + (fast_uint_t)(c1 > (c0 - (fast_sint_t)(s & 1))); SA[m] = (sa_sint_t)(i - 1); m -= ((s & 3) == 1); c0 = T[i - 3]; s = (s << 1) + (fast_uint_t)(c0 > (c1 - (fast_sint_t)(s & 1))); SA[m] = (sa_sint_t)(i - 2); m -= ((s & 3) == 1); } for (j -= 3; i >= j; i -= 1) { c1 = c0; c0 = T[i]; s = (s << 1) + (fast_uint_t)(c0 > (c1 - (fast_sint_t)(s & 1))); SA[m] = (sa_sint_t)(i + 1); m -= ((s & 3) == 1); } SA[m] = (sa_sint_t)(i + 1); } } static void libsais16_gather_lms_suffixes_16u_omp(const uint16_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state) { #if defined(_OPENMP) #pragma omp parallel num_threads(threads) if(threads > 1 && n >= 65536 && omp_get_dynamic() == 0) #endif { #if defined(_OPENMP) fast_sint_t omp_thread_num = omp_get_thread_num(); fast_sint_t omp_num_threads = omp_get_num_threads(); #else UNUSED(threads); UNUSED(thread_state); fast_sint_t omp_thread_num = 0; fast_sint_t omp_num_threads = 1; #endif fast_sint_t omp_block_stride = (n / omp_num_threads) & (-16); fast_sint_t omp_block_start = omp_thread_num * omp_block_stride; fast_sint_t omp_block_size = omp_thread_num < omp_num_threads - 1 ? omp_block_stride : n - omp_block_start; if (omp_num_threads == 1) { libsais16_gather_lms_suffixes_16u(T, SA, n, (fast_sint_t)n - 1, omp_block_start, omp_block_size); } #if defined(_OPENMP) else { fast_sint_t t, m = 0; for (t = omp_num_threads - 1; t > omp_thread_num; --t) { m += thread_state[t].state.m; } libsais16_gather_lms_suffixes_16u(T, SA, n, (fast_sint_t)n - 1 - m, omp_block_start, omp_block_size); #pragma omp barrier if (thread_state[omp_thread_num].state.m > 0) { SA[(fast_sint_t)n - 1 - m] = (sa_sint_t)thread_state[omp_thread_num].state.last_lms_suffix; } } #endif } } static sa_sint_t libsais16_gather_lms_suffixes_32s(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n) { const fast_sint_t prefetch_distance = 32; sa_sint_t i = n - 2; sa_sint_t m = n - 1; fast_uint_t s = 1; fast_sint_t c0 = T[n - 1]; fast_sint_t c1 = 0; for (; i >= 3; i -= 4) { libsais16_prefetch(&T[i - prefetch_distance]); c1 = T[i - 0]; s = (s << 1) + (fast_uint_t)(c1 > (c0 - (fast_sint_t)(s & 1))); SA[m] = i + 1; m -= ((s & 3) == 1); c0 = T[i - 1]; s = (s << 1) + (fast_uint_t)(c0 > (c1 - (fast_sint_t)(s & 1))); SA[m] = i - 0; m -= ((s & 3) == 1); c1 = T[i - 2]; s = (s << 1) + (fast_uint_t)(c1 > (c0 - (fast_sint_t)(s & 1))); SA[m] = i - 1; m -= ((s & 3) == 1); c0 = T[i - 3]; s = (s << 1) + (fast_uint_t)(c0 > (c1 - (fast_sint_t)(s & 1))); SA[m] = i - 2; m -= ((s & 3) == 1); } for (; i >= 0; i -= 1) { c1 = c0; c0 = T[i]; s = (s << 1) + (fast_uint_t)(c0 > (c1 - (fast_sint_t)(s & 1))); SA[m] = i + 1; m -= ((s & 3) == 1); } return n - 1 - m; } static sa_sint_t libsais16_gather_compacted_lms_suffixes_32s(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n) { const fast_sint_t prefetch_distance = 32; sa_sint_t i = n - 2; sa_sint_t m = n - 1; fast_uint_t s = 1; fast_sint_t c0 = T[n - 1]; fast_sint_t c1 = 0; for (; i >= 3; i -= 4) { libsais16_prefetch(&T[i - prefetch_distance]); c1 = T[i - 0]; s = (s << 1) + (fast_uint_t)(c1 > (c0 - (fast_sint_t)(s & 1))); SA[m] = i + 1; m -= ((fast_sint_t)(s & 3) == (c0 >= 0)); c0 = T[i - 1]; s = (s << 1) + (fast_uint_t)(c0 > (c1 - (fast_sint_t)(s & 1))); SA[m] = i - 0; m -= ((fast_sint_t)(s & 3) == (c1 >= 0)); c1 = T[i - 2]; s = (s << 1) + (fast_uint_t)(c1 > (c0 - (fast_sint_t)(s & 1))); SA[m] = i - 1; m -= ((fast_sint_t)(s & 3) == (c0 >= 0)); c0 = T[i - 3]; s = (s << 1) + (fast_uint_t)(c0 > (c1 - (fast_sint_t)(s & 1))); SA[m] = i - 2; m -= ((fast_sint_t)(s & 3) == (c1 >= 0)); } for (; i >= 0; i -= 1) { c1 = c0; c0 = T[i]; s = (s << 1) + (fast_uint_t)(c0 > (c1 - (fast_sint_t)(s & 1))); SA[m] = i + 1; m -= ((fast_sint_t)(s & 3) == (c1 >= 0)); } return n - 1 - m; } #if defined(_OPENMP) static void libsais16_count_lms_suffixes_32s_4k(const sa_sint_t * RESTRICT T, sa_sint_t n, sa_sint_t k, sa_sint_t * RESTRICT buckets) { const fast_sint_t prefetch_distance = 32; memset(buckets, 0, 4 * (size_t)k * sizeof(sa_sint_t)); sa_sint_t i = n - 2; fast_uint_t s = 1; fast_sint_t c0 = T[n - 1]; fast_sint_t c1 = 0; for (; i >= prefetch_distance + 3; i -= 4) { libsais16_prefetch(&T[i - 2 * prefetch_distance]); libsais16_prefetchw(&buckets[BUCKETS_INDEX4(T[i - prefetch_distance - 0], 0)]); libsais16_prefetchw(&buckets[BUCKETS_INDEX4(T[i - prefetch_distance - 1], 0)]); libsais16_prefetchw(&buckets[BUCKETS_INDEX4(T[i - prefetch_distance - 2], 0)]); libsais16_prefetchw(&buckets[BUCKETS_INDEX4(T[i - prefetch_distance - 3], 0)]); c1 = T[i - 0]; s = (s << 1) + (fast_uint_t)(c1 > (c0 - (fast_sint_t)(s & 1))); buckets[BUCKETS_INDEX4((fast_uint_t)c0, s & 3)]++; c0 = T[i - 1]; s = (s << 1) + (fast_uint_t)(c0 > (c1 - (fast_sint_t)(s & 1))); buckets[BUCKETS_INDEX4((fast_uint_t)c1, s & 3)]++; c1 = T[i - 2]; s = (s << 1) + (fast_uint_t)(c1 > (c0 - (fast_sint_t)(s & 1))); buckets[BUCKETS_INDEX4((fast_uint_t)c0, s & 3)]++; c0 = T[i - 3]; s = (s << 1) + (fast_uint_t)(c0 > (c1 - (fast_sint_t)(s & 1))); buckets[BUCKETS_INDEX4((fast_uint_t)c1, s & 3)]++; } for (; i >= 0; i -= 1) { c1 = c0; c0 = T[i]; s = (s << 1) + (fast_uint_t)(c0 > (c1 - (fast_sint_t)(s & 1))); buckets[BUCKETS_INDEX4((fast_uint_t)c1, s & 3)]++; } buckets[BUCKETS_INDEX4((fast_uint_t)c0, (s << 1) & 3)]++; } #endif static void libsais16_count_lms_suffixes_32s_2k(const sa_sint_t * RESTRICT T, sa_sint_t n, sa_sint_t k, sa_sint_t * RESTRICT buckets) { const fast_sint_t prefetch_distance = 32; memset(buckets, 0, 2 * (size_t)k * sizeof(sa_sint_t)); sa_sint_t i = n - 2; fast_uint_t s = 1; fast_sint_t c0 = T[n - 1]; fast_sint_t c1 = 0; for (; i >= prefetch_distance + 3; i -= 4) { libsais16_prefetch(&T[i - 2 * prefetch_distance]); libsais16_prefetchw(&buckets[BUCKETS_INDEX2(T[i - prefetch_distance - 0], 0)]); libsais16_prefetchw(&buckets[BUCKETS_INDEX2(T[i - prefetch_distance - 1], 0)]); libsais16_prefetchw(&buckets[BUCKETS_INDEX2(T[i - prefetch_distance - 2], 0)]); libsais16_prefetchw(&buckets[BUCKETS_INDEX2(T[i - prefetch_distance - 3], 0)]); c1 = T[i - 0]; s = (s << 1) + (fast_uint_t)(c1 > (c0 - (fast_sint_t)(s & 1))); buckets[BUCKETS_INDEX2((fast_uint_t)c0, (s & 3) == 1)]++; c0 = T[i - 1]; s = (s << 1) + (fast_uint_t)(c0 > (c1 - (fast_sint_t)(s & 1))); buckets[BUCKETS_INDEX2((fast_uint_t)c1, (s & 3) == 1)]++; c1 = T[i - 2]; s = (s << 1) + (fast_uint_t)(c1 > (c0 - (fast_sint_t)(s & 1))); buckets[BUCKETS_INDEX2((fast_uint_t)c0, (s & 3) == 1)]++; c0 = T[i - 3]; s = (s << 1) + (fast_uint_t)(c0 > (c1 - (fast_sint_t)(s & 1))); buckets[BUCKETS_INDEX2((fast_uint_t)c1, (s & 3) == 1)]++; } for (; i >= 0; i -= 1) { c1 = c0; c0 = T[i]; s = (s << 1) + (fast_uint_t)(c0 > (c1 - (fast_sint_t)(s & 1))); buckets[BUCKETS_INDEX2((fast_uint_t)c1, (s & 3) == 1)]++; } buckets[BUCKETS_INDEX2((fast_uint_t)c0, 0)]++; } #if defined(_OPENMP) static void libsais16_count_compacted_lms_suffixes_32s_2k(const sa_sint_t * RESTRICT T, sa_sint_t n, sa_sint_t k, sa_sint_t * RESTRICT buckets) { const fast_sint_t prefetch_distance = 32; memset(buckets, 0, 2 * (size_t)k * sizeof(sa_sint_t)); sa_sint_t i = n - 2; fast_uint_t s = 1; fast_sint_t c0 = T[n - 1]; fast_sint_t c1 = 0; for (; i >= prefetch_distance + 3; i -= 4) { libsais16_prefetch(&T[i - 2 * prefetch_distance]); libsais16_prefetchw(&buckets[BUCKETS_INDEX2(T[i - prefetch_distance - 0] & SAINT_MAX, 0)]); libsais16_prefetchw(&buckets[BUCKETS_INDEX2(T[i - prefetch_distance - 1] & SAINT_MAX, 0)]); libsais16_prefetchw(&buckets[BUCKETS_INDEX2(T[i - prefetch_distance - 2] & SAINT_MAX, 0)]); libsais16_prefetchw(&buckets[BUCKETS_INDEX2(T[i - prefetch_distance - 3] & SAINT_MAX, 0)]); c1 = T[i - 0]; s = (s << 1) + (fast_uint_t)(c1 > (c0 - (fast_sint_t)(s & 1))); c0 &= SAINT_MAX; buckets[BUCKETS_INDEX2((fast_uint_t)c0, (s & 3) == 1)]++; c0 = T[i - 1]; s = (s << 1) + (fast_uint_t)(c0 > (c1 - (fast_sint_t)(s & 1))); c1 &= SAINT_MAX; buckets[BUCKETS_INDEX2((fast_uint_t)c1, (s & 3) == 1)]++; c1 = T[i - 2]; s = (s << 1) + (fast_uint_t)(c1 > (c0 - (fast_sint_t)(s & 1))); c0 &= SAINT_MAX; buckets[BUCKETS_INDEX2((fast_uint_t)c0, (s & 3) == 1)]++; c0 = T[i - 3]; s = (s << 1) + (fast_uint_t)(c0 > (c1 - (fast_sint_t)(s & 1))); c1 &= SAINT_MAX; buckets[BUCKETS_INDEX2((fast_uint_t)c1, (s & 3) == 1)]++; } for (; i >= 0; i -= 1) { c1 = c0; c0 = T[i]; s = (s << 1) + (fast_uint_t)(c0 > (c1 - (fast_sint_t)(s & 1))); c1 &= SAINT_MAX; buckets[BUCKETS_INDEX2((fast_uint_t)c1, (s & 3) == 1)]++; } c0 &= SAINT_MAX; buckets[BUCKETS_INDEX2((fast_uint_t)c0, 0)]++; } #endif static sa_sint_t libsais16_count_and_gather_lms_suffixes_16u(const uint16_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t * RESTRICT buckets, fast_sint_t omp_block_start, fast_sint_t omp_block_size) { memset(buckets, 0, 4 * ALPHABET_SIZE * sizeof(sa_sint_t)); fast_sint_t m = omp_block_start + omp_block_size - 1; if (omp_block_size > 0) { const fast_sint_t prefetch_distance = 128; fast_sint_t i, j = m + 1, c0 = T[m], c1 = -1; while (j < n && (c1 = T[j]) == c0) { ++j; } fast_uint_t s = c0 >= c1; for (i = m - 1, j = omp_block_start + 3; i >= j; i -= 4) { libsais16_prefetch(&T[i - prefetch_distance]); c1 = T[i - 0]; s = (s << 1) + (fast_uint_t)(c1 > (c0 - (fast_sint_t)(s & 1))); SA[m] = (sa_sint_t)(i + 1); m -= ((s & 3) == 1); buckets[BUCKETS_INDEX4((fast_uint_t)c0, s & 3)]++; c0 = T[i - 1]; s = (s << 1) + (fast_uint_t)(c0 > (c1 - (fast_sint_t)(s & 1))); SA[m] = (sa_sint_t)(i - 0); m -= ((s & 3) == 1); buckets[BUCKETS_INDEX4((fast_uint_t)c1, s & 3)]++; c1 = T[i - 2]; s = (s << 1) + (fast_uint_t)(c1 > (c0 - (fast_sint_t)(s & 1))); SA[m] = (sa_sint_t)(i - 1); m -= ((s & 3) == 1); buckets[BUCKETS_INDEX4((fast_uint_t)c0, s & 3)]++; c0 = T[i - 3]; s = (s << 1) + (fast_uint_t)(c0 > (c1 - (fast_sint_t)(s & 1))); SA[m] = (sa_sint_t)(i - 2); m -= ((s & 3) == 1); buckets[BUCKETS_INDEX4((fast_uint_t)c1, s & 3)]++; } for (j -= 3; i >= j; i -= 1) { c1 = c0; c0 = T[i]; s = (s << 1) + (fast_uint_t)(c0 > (c1 - (fast_sint_t)(s & 1))); SA[m] = (sa_sint_t)(i + 1); m -= ((s & 3) == 1); buckets[BUCKETS_INDEX4((fast_uint_t)c1, s & 3)]++; } c1 = (i >= 0) ? T[i] : -1; s = (s << 1) + (fast_uint_t)(c1 > (c0 - (fast_sint_t)(s & 1))); SA[m] = (sa_sint_t)(i + 1); m -= ((s & 3) == 1); buckets[BUCKETS_INDEX4((fast_uint_t)c0, s & 3)]++; } return (sa_sint_t)(omp_block_start + omp_block_size - 1 - m); } static sa_sint_t libsais16_count_and_gather_lms_suffixes_16u_omp(const uint16_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t * RESTRICT buckets, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state) { sa_sint_t m = 0; #if defined(_OPENMP) #pragma omp parallel num_threads(threads) if(threads > 1 && n >= 65536 && omp_get_dynamic() == 0) #endif { #if defined(_OPENMP) fast_sint_t omp_thread_num = omp_get_thread_num(); fast_sint_t omp_num_threads = omp_get_num_threads(); #else UNUSED(threads); UNUSED(thread_state); fast_sint_t omp_thread_num = 0; fast_sint_t omp_num_threads = 1; #endif fast_sint_t omp_block_stride = (n / omp_num_threads) & (-16); fast_sint_t omp_block_start = omp_thread_num * omp_block_stride; fast_sint_t omp_block_size = omp_thread_num < omp_num_threads - 1 ? omp_block_stride : n - omp_block_start; if (omp_num_threads == 1) { m = libsais16_count_and_gather_lms_suffixes_16u(T, SA, n, buckets, omp_block_start, omp_block_size); } #if defined(_OPENMP) else { { thread_state[omp_thread_num].state.position = omp_block_start + omp_block_size; thread_state[omp_thread_num].state.m = libsais16_count_and_gather_lms_suffixes_16u(T, SA, n, thread_state[omp_thread_num].state.buckets, omp_block_start, omp_block_size); if (thread_state[omp_thread_num].state.m > 0) { thread_state[omp_thread_num].state.last_lms_suffix = SA[thread_state[omp_thread_num].state.position - 1]; } } #pragma omp barrier #pragma omp master { memset(buckets, 0, 4 * ALPHABET_SIZE * sizeof(sa_sint_t)); fast_sint_t t; for (t = omp_num_threads - 1; t >= 0; --t) { m += (sa_sint_t)thread_state[t].state.m; if (t != omp_num_threads - 1 && thread_state[t].state.m > 0) { memcpy(&SA[n - m], &SA[thread_state[t].state.position - thread_state[t].state.m], (size_t)thread_state[t].state.m * sizeof(sa_sint_t)); } { sa_sint_t * RESTRICT temp_bucket = thread_state[t].state.buckets; fast_sint_t s; for (s = 0; s < 4 * ALPHABET_SIZE; s += 1) { sa_sint_t A = buckets[s], B = temp_bucket[s]; buckets[s] = A + B; temp_bucket[s] = A; } } } } } #endif } return m; } static sa_sint_t libsais16_count_and_gather_lms_suffixes_32s_4k(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t k, sa_sint_t * RESTRICT buckets, fast_sint_t omp_block_start, fast_sint_t omp_block_size) { memset(buckets, 0, 4 * (size_t)k * sizeof(sa_sint_t)); fast_sint_t m = omp_block_start + omp_block_size - 1; if (omp_block_size > 0) { const fast_sint_t prefetch_distance = 32; fast_sint_t i, j = m + 1, c0 = T[m], c1 = -1; while (j < n && (c1 = T[j]) == c0) { ++j; } fast_uint_t s = c0 >= c1; for (i = m - 1, j = omp_block_start + prefetch_distance + 3; i >= j; i -= 4) { libsais16_prefetch(&T[i - 2 * prefetch_distance]); libsais16_prefetchw(&buckets[BUCKETS_INDEX4(T[i - prefetch_distance - 0], 0)]); libsais16_prefetchw(&buckets[BUCKETS_INDEX4(T[i - prefetch_distance - 1], 0)]); libsais16_prefetchw(&buckets[BUCKETS_INDEX4(T[i - prefetch_distance - 2], 0)]); libsais16_prefetchw(&buckets[BUCKETS_INDEX4(T[i - prefetch_distance - 3], 0)]); c1 = T[i - 0]; s = (s << 1) + (fast_uint_t)(c1 > (c0 - (fast_sint_t)(s & 1))); SA[m] = (sa_sint_t)(i + 1); m -= ((s & 3) == 1); buckets[BUCKETS_INDEX4((fast_uint_t)c0, s & 3)]++; c0 = T[i - 1]; s = (s << 1) + (fast_uint_t)(c0 > (c1 - (fast_sint_t)(s & 1))); SA[m] = (sa_sint_t)(i - 0); m -= ((s & 3) == 1); buckets[BUCKETS_INDEX4((fast_uint_t)c1, s & 3)]++; c1 = T[i - 2]; s = (s << 1) + (fast_uint_t)(c1 > (c0 - (fast_sint_t)(s & 1))); SA[m] = (sa_sint_t)(i - 1); m -= ((s & 3) == 1); buckets[BUCKETS_INDEX4((fast_uint_t)c0, s & 3)]++; c0 = T[i - 3]; s = (s << 1) + (fast_uint_t)(c0 > (c1 - (fast_sint_t)(s & 1))); SA[m] = (sa_sint_t)(i - 2); m -= ((s & 3) == 1); buckets[BUCKETS_INDEX4((fast_uint_t)c1, s & 3)]++; } for (j -= prefetch_distance + 3; i >= j; i -= 1) { c1 = c0; c0 = T[i]; s = (s << 1) + (fast_uint_t)(c0 > (c1 - (fast_sint_t)(s & 1))); SA[m] = (sa_sint_t)(i + 1); m -= ((s & 3) == 1); buckets[BUCKETS_INDEX4((fast_uint_t)c1, s & 3)]++; } c1 = (i >= 0) ? T[i] : -1; s = (s << 1) + (fast_uint_t)(c1 > (c0 - (fast_sint_t)(s & 1))); SA[m] = (sa_sint_t)(i + 1); m -= ((s & 3) == 1); buckets[BUCKETS_INDEX4((fast_uint_t)c0, s & 3)]++; } return (sa_sint_t)(omp_block_start + omp_block_size - 1 - m); } static sa_sint_t libsais16_count_and_gather_lms_suffixes_32s_2k(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t k, sa_sint_t * RESTRICT buckets, fast_sint_t omp_block_start, fast_sint_t omp_block_size) { memset(buckets, 0, 2 * (size_t)k * sizeof(sa_sint_t)); fast_sint_t m = omp_block_start + omp_block_size - 1; if (omp_block_size > 0) { const fast_sint_t prefetch_distance = 32; fast_sint_t i, j = m + 1, c0 = T[m], c1 = -1; while (j < n && (c1 = T[j]) == c0) { ++j; } fast_uint_t s = c0 >= c1; for (i = m - 1, j = omp_block_start + prefetch_distance + 3; i >= j; i -= 4) { libsais16_prefetch(&T[i - 2 * prefetch_distance]); libsais16_prefetchw(&buckets[BUCKETS_INDEX2(T[i - prefetch_distance - 0], 0)]); libsais16_prefetchw(&buckets[BUCKETS_INDEX2(T[i - prefetch_distance - 1], 0)]); libsais16_prefetchw(&buckets[BUCKETS_INDEX2(T[i - prefetch_distance - 2], 0)]); libsais16_prefetchw(&buckets[BUCKETS_INDEX2(T[i - prefetch_distance - 3], 0)]); c1 = T[i - 0]; s = (s << 1) + (fast_uint_t)(c1 > (c0 - (fast_sint_t)(s & 1))); SA[m] = (sa_sint_t)(i + 1); m -= ((s & 3) == 1); buckets[BUCKETS_INDEX2((fast_uint_t)c0, (s & 3) == 1)]++; c0 = T[i - 1]; s = (s << 1) + (fast_uint_t)(c0 > (c1 - (fast_sint_t)(s & 1))); SA[m] = (sa_sint_t)(i - 0); m -= ((s & 3) == 1); buckets[BUCKETS_INDEX2((fast_uint_t)c1, (s & 3) == 1)]++; c1 = T[i - 2]; s = (s << 1) + (fast_uint_t)(c1 > (c0 - (fast_sint_t)(s & 1))); SA[m] = (sa_sint_t)(i - 1); m -= ((s & 3) == 1); buckets[BUCKETS_INDEX2((fast_uint_t)c0, (s & 3) == 1)]++; c0 = T[i - 3]; s = (s << 1) + (fast_uint_t)(c0 > (c1 - (fast_sint_t)(s & 1))); SA[m] = (sa_sint_t)(i - 2); m -= ((s & 3) == 1); buckets[BUCKETS_INDEX2((fast_uint_t)c1, (s & 3) == 1)]++; } for (j -= prefetch_distance + 3; i >= j; i -= 1) { c1 = c0; c0 = T[i]; s = (s << 1) + (fast_uint_t)(c0 > (c1 - (fast_sint_t)(s & 1))); SA[m] = (sa_sint_t)(i + 1); m -= ((s & 3) == 1); buckets[BUCKETS_INDEX2((fast_uint_t)c1, (s & 3) == 1)]++; } c1 = (i >= 0) ? T[i] : -1; s = (s << 1) + (fast_uint_t)(c1 > (c0 - (fast_sint_t)(s & 1))); SA[m] = (sa_sint_t)(i + 1); m -= ((s & 3) == 1); buckets[BUCKETS_INDEX2((fast_uint_t)c0, (s & 3) == 1)]++; } return (sa_sint_t)(omp_block_start + omp_block_size - 1 - m); } static sa_sint_t libsais16_count_and_gather_compacted_lms_suffixes_32s_2k(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t k, sa_sint_t * RESTRICT buckets, fast_sint_t omp_block_start, fast_sint_t omp_block_size) { memset(buckets, 0, 2 * (size_t)k * sizeof(sa_sint_t)); fast_sint_t m = omp_block_start + omp_block_size - 1; if (omp_block_size > 0) { const fast_sint_t prefetch_distance = 32; fast_sint_t i, j = m + 1, c0 = T[m], c1 = -1; while (j < n && (c1 = T[j]) == c0) { ++j; } fast_uint_t s = c0 >= c1; for (i = m - 1, j = omp_block_start + prefetch_distance + 3; i >= j; i -= 4) { libsais16_prefetch(&T[i - 2 * prefetch_distance]); libsais16_prefetchw(&buckets[BUCKETS_INDEX2(T[i - prefetch_distance - 0] & SAINT_MAX, 0)]); libsais16_prefetchw(&buckets[BUCKETS_INDEX2(T[i - prefetch_distance - 1] & SAINT_MAX, 0)]); libsais16_prefetchw(&buckets[BUCKETS_INDEX2(T[i - prefetch_distance - 2] & SAINT_MAX, 0)]); libsais16_prefetchw(&buckets[BUCKETS_INDEX2(T[i - prefetch_distance - 3] & SAINT_MAX, 0)]); c1 = T[i - 0]; s = (s << 1) + (fast_uint_t)(c1 > (c0 - (fast_sint_t)(s & 1))); SA[m] = (sa_sint_t)(i + 1); m -= ((fast_sint_t)(s & 3) == (c0 >= 0)); c0 &= SAINT_MAX; buckets[BUCKETS_INDEX2((fast_uint_t)c0, (s & 3) == 1)]++; c0 = T[i - 1]; s = (s << 1) + (fast_uint_t)(c0 > (c1 - (fast_sint_t)(s & 1))); SA[m] = (sa_sint_t)(i - 0); m -= ((fast_sint_t)(s & 3) == (c1 >= 0)); c1 &= SAINT_MAX; buckets[BUCKETS_INDEX2((fast_uint_t)c1, (s & 3) == 1)]++; c1 = T[i - 2]; s = (s << 1) + (fast_uint_t)(c1 > (c0 - (fast_sint_t)(s & 1))); SA[m] = (sa_sint_t)(i - 1); m -= ((fast_sint_t)(s & 3) == (c0 >= 0)); c0 &= SAINT_MAX; buckets[BUCKETS_INDEX2((fast_uint_t)c0, (s & 3) == 1)]++; c0 = T[i - 3]; s = (s << 1) + (fast_uint_t)(c0 > (c1 - (fast_sint_t)(s & 1))); SA[m] = (sa_sint_t)(i - 2); m -= ((fast_sint_t)(s & 3) == (c1 >= 0)); c1 &= SAINT_MAX; buckets[BUCKETS_INDEX2((fast_uint_t)c1, (s & 3) == 1)]++; } for (j -= prefetch_distance + 3; i >= j; i -= 1) { c1 = c0; c0 = T[i]; s = (s << 1) + (fast_uint_t)(c0 > (c1 - (fast_sint_t)(s & 1))); SA[m] = (sa_sint_t)(i + 1); m -= ((fast_sint_t)(s & 3) == (c1 >= 0)); c1 &= SAINT_MAX; buckets[BUCKETS_INDEX2((fast_uint_t)c1, (s & 3) == 1)]++; } c1 = (i >= 0) ? T[i] : -1; s = (s << 1) + (fast_uint_t)(c1 > (c0 - (fast_sint_t)(s & 1))); SA[m] = (sa_sint_t)(i + 1); m -= ((fast_sint_t)(s & 3) == (c0 >= 0)); c0 &= SAINT_MAX; buckets[BUCKETS_INDEX2((fast_uint_t)c0, (s & 3) == 1)]++; } return (sa_sint_t)(omp_block_start + omp_block_size - 1 - m); } #if defined(_OPENMP) static fast_sint_t libsais16_get_bucket_stride(fast_sint_t free_space, fast_sint_t bucket_size, fast_sint_t num_buckets) { fast_sint_t bucket_size_1024 = (bucket_size + 1023) & (-1024); if (free_space / (num_buckets - 1) >= bucket_size_1024) { return bucket_size_1024; } fast_sint_t bucket_size_16 = (bucket_size + 15) & (-16); if (free_space / (num_buckets - 1) >= bucket_size_16) { return bucket_size_16; } return bucket_size; } static sa_sint_t libsais16_count_and_gather_lms_suffixes_32s_4k_fs_omp(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t k, sa_sint_t * RESTRICT buckets, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state) { sa_sint_t m = 0; #if defined(_OPENMP) #pragma omp parallel num_threads(threads) if(threads > 1 && n >= 65536) #endif { #if defined(_OPENMP) fast_sint_t omp_thread_num = omp_get_thread_num(); fast_sint_t omp_num_threads = omp_get_num_threads(); #else UNUSED(threads); UNUSED(thread_state); fast_sint_t omp_thread_num = 0; fast_sint_t omp_num_threads = 1; #endif fast_sint_t omp_block_stride = (n / omp_num_threads) & (-16); fast_sint_t omp_block_start = omp_thread_num * omp_block_stride; fast_sint_t omp_block_size = omp_thread_num < omp_num_threads - 1 ? omp_block_stride : n - omp_block_start; if (omp_num_threads == 1) { m = libsais16_count_and_gather_lms_suffixes_32s_4k(T, SA, n, k, buckets, omp_block_start, omp_block_size); } #if defined(_OPENMP) else { fast_sint_t bucket_size = 4 * (fast_sint_t)k; fast_sint_t bucket_stride = libsais16_get_bucket_stride(buckets - &SA[n], bucket_size, omp_num_threads); { thread_state[omp_thread_num].state.position = omp_block_start + omp_block_size; thread_state[omp_thread_num].state.count = libsais16_count_and_gather_lms_suffixes_32s_4k(T, SA, n, k, buckets - (omp_thread_num * bucket_stride), omp_block_start, omp_block_size); } #pragma omp barrier if (omp_thread_num == omp_num_threads - 1) { fast_sint_t t; for (t = omp_num_threads - 1; t >= 0; --t) { m += (sa_sint_t)thread_state[t].state.count; if (t != omp_num_threads - 1 && thread_state[t].state.count > 0) { memcpy(&SA[n - m], &SA[thread_state[t].state.position - thread_state[t].state.count], (size_t)thread_state[t].state.count * sizeof(sa_sint_t)); } } } else { omp_num_threads = omp_num_threads - 1; omp_block_stride = (bucket_size / omp_num_threads) & (-16); omp_block_start = omp_thread_num * omp_block_stride; omp_block_size = omp_thread_num < omp_num_threads - 1 ? omp_block_stride : bucket_size - omp_block_start; libsais16_accumulate_counts_s32(buckets + omp_block_start, omp_block_size, bucket_stride, omp_num_threads + 1); } } #endif } return m; } static sa_sint_t libsais16_count_and_gather_lms_suffixes_32s_2k_fs_omp(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t k, sa_sint_t * RESTRICT buckets, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state) { sa_sint_t m = 0; #if defined(_OPENMP) #pragma omp parallel num_threads(threads) if(threads > 1 && n >= 65536) #endif { #if defined(_OPENMP) fast_sint_t omp_thread_num = omp_get_thread_num(); fast_sint_t omp_num_threads = omp_get_num_threads(); #else UNUSED(threads); UNUSED(thread_state); fast_sint_t omp_thread_num = 0; fast_sint_t omp_num_threads = 1; #endif fast_sint_t omp_block_stride = (n / omp_num_threads) & (-16); fast_sint_t omp_block_start = omp_thread_num * omp_block_stride; fast_sint_t omp_block_size = omp_thread_num < omp_num_threads - 1 ? omp_block_stride : n - omp_block_start; if (omp_num_threads == 1) { m = libsais16_count_and_gather_lms_suffixes_32s_2k(T, SA, n, k, buckets, omp_block_start, omp_block_size); } #if defined(_OPENMP) else { fast_sint_t bucket_size = 2 * (fast_sint_t)k; fast_sint_t bucket_stride = libsais16_get_bucket_stride(buckets - &SA[n], bucket_size, omp_num_threads); { thread_state[omp_thread_num].state.position = omp_block_start + omp_block_size; thread_state[omp_thread_num].state.count = libsais16_count_and_gather_lms_suffixes_32s_2k(T, SA, n, k, buckets - (omp_thread_num * bucket_stride), omp_block_start, omp_block_size); } #pragma omp barrier if (omp_thread_num == omp_num_threads - 1) { fast_sint_t t; for (t = omp_num_threads - 1; t >= 0; --t) { m += (sa_sint_t)thread_state[t].state.count; if (t != omp_num_threads - 1 && thread_state[t].state.count > 0) { memcpy(&SA[n - m], &SA[thread_state[t].state.position - thread_state[t].state.count], (size_t)thread_state[t].state.count * sizeof(sa_sint_t)); } } } else { omp_num_threads = omp_num_threads - 1; omp_block_stride = (bucket_size / omp_num_threads) & (-16); omp_block_start = omp_thread_num * omp_block_stride; omp_block_size = omp_thread_num < omp_num_threads - 1 ? omp_block_stride : bucket_size - omp_block_start; libsais16_accumulate_counts_s32(buckets + omp_block_start, omp_block_size, bucket_stride, omp_num_threads + 1); } } #endif } return m; } static void libsais16_count_and_gather_compacted_lms_suffixes_32s_2k_fs_omp(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t k, sa_sint_t * RESTRICT buckets, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state) { #if defined(_OPENMP) #pragma omp parallel num_threads(threads) if(threads > 1 && n >= 65536) #endif { #if defined(_OPENMP) fast_sint_t omp_thread_num = omp_get_thread_num(); fast_sint_t omp_num_threads = omp_get_num_threads(); #else UNUSED(threads); UNUSED(thread_state); fast_sint_t omp_thread_num = 0; fast_sint_t omp_num_threads = 1; #endif fast_sint_t omp_block_stride = (n / omp_num_threads) & (-16); fast_sint_t omp_block_start = omp_thread_num * omp_block_stride; fast_sint_t omp_block_size = omp_thread_num < omp_num_threads - 1 ? omp_block_stride : n - omp_block_start; if (omp_num_threads == 1) { libsais16_count_and_gather_compacted_lms_suffixes_32s_2k(T, SA, n, k, buckets, omp_block_start, omp_block_size); } #if defined(_OPENMP) else { fast_sint_t bucket_size = 2 * (fast_sint_t)k; fast_sint_t bucket_stride = libsais16_get_bucket_stride(buckets - &SA[n + n], bucket_size, omp_num_threads); { thread_state[omp_thread_num].state.position = omp_block_start + omp_block_size; thread_state[omp_thread_num].state.count = libsais16_count_and_gather_compacted_lms_suffixes_32s_2k(T, SA + n, n, k, buckets - (omp_thread_num * bucket_stride), omp_block_start, omp_block_size); } #pragma omp barrier { fast_sint_t t, m = 0; for (t = omp_num_threads - 1; t >= omp_thread_num; --t) { m += (sa_sint_t)thread_state[t].state.count; } if (thread_state[omp_thread_num].state.count > 0) { memcpy(&SA[n - m], &SA[n + thread_state[omp_thread_num].state.position - thread_state[omp_thread_num].state.count], (size_t)thread_state[omp_thread_num].state.count * sizeof(sa_sint_t)); } } { omp_block_stride = (bucket_size / omp_num_threads) & (-16); omp_block_start = omp_thread_num * omp_block_stride; omp_block_size = omp_thread_num < omp_num_threads - 1 ? omp_block_stride : bucket_size - omp_block_start; libsais16_accumulate_counts_s32(buckets + omp_block_start, omp_block_size, bucket_stride, omp_num_threads); } } #endif } } #endif static sa_sint_t libsais16_count_and_gather_lms_suffixes_32s_4k_nofs_omp(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t k, sa_sint_t * RESTRICT buckets, sa_sint_t threads) { sa_sint_t m = 0; #if defined(_OPENMP) #pragma omp parallel num_threads(2) if(threads > 1 && n >= 65536) #endif { #if defined(_OPENMP) fast_sint_t omp_thread_num = omp_get_thread_num(); fast_sint_t omp_num_threads = omp_get_num_threads(); #else UNUSED(threads); fast_sint_t omp_num_threads = 1; #endif if (omp_num_threads == 1) { m = libsais16_count_and_gather_lms_suffixes_32s_4k(T, SA, n, k, buckets, 0, n); } #if defined(_OPENMP) else if (omp_thread_num == 0) { libsais16_count_lms_suffixes_32s_4k(T, n, k, buckets); } else { m = libsais16_gather_lms_suffixes_32s(T, SA, n); } #endif } return m; } static sa_sint_t libsais16_count_and_gather_lms_suffixes_32s_2k_nofs_omp(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t k, sa_sint_t * RESTRICT buckets, sa_sint_t threads) { sa_sint_t m = 0; #if defined(_OPENMP) #pragma omp parallel num_threads(2) if(threads > 1 && n >= 65536) #endif { #if defined(_OPENMP) fast_sint_t omp_thread_num = omp_get_thread_num(); fast_sint_t omp_num_threads = omp_get_num_threads(); #else UNUSED(threads); fast_sint_t omp_num_threads = 1; #endif if (omp_num_threads == 1) { m = libsais16_count_and_gather_lms_suffixes_32s_2k(T, SA, n, k, buckets, 0, n); } #if defined(_OPENMP) else if (omp_thread_num == 0) { libsais16_count_lms_suffixes_32s_2k(T, n, k, buckets); } else { m = libsais16_gather_lms_suffixes_32s(T, SA, n); } #endif } return m; } static sa_sint_t libsais16_count_and_gather_compacted_lms_suffixes_32s_2k_nofs_omp(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t k, sa_sint_t * RESTRICT buckets, sa_sint_t threads) { sa_sint_t m = 0; #if defined(_OPENMP) #pragma omp parallel num_threads(2) if(threads > 1 && n >= 65536) #endif { #if defined(_OPENMP) fast_sint_t omp_thread_num = omp_get_thread_num(); fast_sint_t omp_num_threads = omp_get_num_threads(); #else UNUSED(threads); fast_sint_t omp_num_threads = 1; #endif if (omp_num_threads == 1) { m = libsais16_count_and_gather_compacted_lms_suffixes_32s_2k(T, SA, n, k, buckets, 0, n); } #if defined(_OPENMP) else if (omp_thread_num == 0) { libsais16_count_compacted_lms_suffixes_32s_2k(T, n, k, buckets); } else { m = libsais16_gather_compacted_lms_suffixes_32s(T, SA, n); } #endif } return m; } static sa_sint_t libsais16_count_and_gather_lms_suffixes_32s_4k_omp(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t k, sa_sint_t * RESTRICT buckets, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state) { sa_sint_t m; #if defined(_OPENMP) sa_sint_t max_threads = (sa_sint_t)((buckets - &SA[n]) / ((4 * (fast_sint_t)k + 15) & (-16))); if (max_threads > threads) { max_threads = threads; } if (max_threads > 1 && n >= 65536 && n / k >= 2) { if (max_threads > n / 16 / k) { max_threads = n / 16 / k; } m = libsais16_count_and_gather_lms_suffixes_32s_4k_fs_omp(T, SA, n, k, buckets, max_threads > 2 ? max_threads : 2, thread_state); } else #else UNUSED(thread_state); #endif { m = libsais16_count_and_gather_lms_suffixes_32s_4k_nofs_omp(T, SA, n, k, buckets, threads); } return m; } static sa_sint_t libsais16_count_and_gather_lms_suffixes_32s_2k_omp(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t k, sa_sint_t * RESTRICT buckets, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state) { sa_sint_t m; #if defined(_OPENMP) sa_sint_t max_threads = (sa_sint_t)((buckets - &SA[n]) / ((2 * (fast_sint_t)k + 15) & (-16))); if (max_threads > threads) { max_threads = threads; } if (max_threads > 1 && n >= 65536 && n / k >= 2) { if (max_threads > n / 8 / k) { max_threads = n / 8 / k; } m = libsais16_count_and_gather_lms_suffixes_32s_2k_fs_omp(T, SA, n, k, buckets, max_threads > 2 ? max_threads : 2, thread_state); } else #else UNUSED(thread_state); #endif { m = libsais16_count_and_gather_lms_suffixes_32s_2k_nofs_omp(T, SA, n, k, buckets, threads); } return m; } static void libsais16_count_and_gather_compacted_lms_suffixes_32s_2k_omp(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t k, sa_sint_t * RESTRICT buckets, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state) { #if defined(_OPENMP) sa_sint_t max_threads = (sa_sint_t)((buckets - &SA[n + n]) / ((2 * (fast_sint_t)k + 15) & (-16))); if (max_threads > threads) { max_threads = threads; } if (max_threads > 1 && n >= 65536 && n / k >= 2) { if (max_threads > n / 8 / k) { max_threads = n / 8 / k; } libsais16_count_and_gather_compacted_lms_suffixes_32s_2k_fs_omp(T, SA, n, k, buckets, max_threads > 2 ? max_threads : 2, thread_state); } else #else UNUSED(thread_state); #endif { libsais16_count_and_gather_compacted_lms_suffixes_32s_2k_nofs_omp(T, SA, n, k, buckets, threads); } } static void libsais16_count_suffixes_32s(const sa_sint_t * RESTRICT T, sa_sint_t n, sa_sint_t k, sa_sint_t * RESTRICT buckets) { const fast_sint_t prefetch_distance = 32; memset(buckets, 0, (size_t)k * sizeof(sa_sint_t)); fast_sint_t i, j; for (i = 0, j = (fast_sint_t)n - 7; i < j; i += 8) { libsais16_prefetch(&T[i + prefetch_distance]); buckets[T[i + 0]]++; buckets[T[i + 1]]++; buckets[T[i + 2]]++; buckets[T[i + 3]]++; buckets[T[i + 4]]++; buckets[T[i + 5]]++; buckets[T[i + 6]]++; buckets[T[i + 7]]++; } for (j += 7; i < j; i += 1) { buckets[T[i]]++; } } static void libsais16_initialize_buckets_start_and_end_16u(sa_sint_t * RESTRICT buckets, sa_sint_t * RESTRICT freq) { sa_sint_t * RESTRICT bucket_start = &buckets[6 * ALPHABET_SIZE]; sa_sint_t * RESTRICT bucket_end = &buckets[7 * ALPHABET_SIZE]; if (freq != NULL) { fast_sint_t i, j; sa_sint_t sum = 0; for (i = BUCKETS_INDEX4(0, 0), j = 0; i <= BUCKETS_INDEX4(ALPHABET_SIZE - 1, 0); i += BUCKETS_INDEX4(1, 0), j += 1) { bucket_start[j] = sum; sum += (freq[j] = buckets[i + BUCKETS_INDEX4(0, 0)] + buckets[i + BUCKETS_INDEX4(0, 1)] + buckets[i + BUCKETS_INDEX4(0, 2)] + buckets[i + BUCKETS_INDEX4(0, 3)]); bucket_end[j] = sum; } } else { fast_sint_t i, j; sa_sint_t sum = 0; for (i = BUCKETS_INDEX4(0, 0), j = 0; i <= BUCKETS_INDEX4(ALPHABET_SIZE - 1, 0); i += BUCKETS_INDEX4(1, 0), j += 1) { bucket_start[j] = sum; sum += buckets[i + BUCKETS_INDEX4(0, 0)] + buckets[i + BUCKETS_INDEX4(0, 1)] + buckets[i + BUCKETS_INDEX4(0, 2)] + buckets[i + BUCKETS_INDEX4(0, 3)]; bucket_end[j] = sum; } } } static void libsais16_initialize_buckets_start_and_end_32s_6k(sa_sint_t k, sa_sint_t * RESTRICT buckets) { sa_sint_t * RESTRICT bucket_start = &buckets[4 * k]; sa_sint_t * RESTRICT bucket_end = &buckets[5 * k]; fast_sint_t i, j; sa_sint_t sum = 0; for (i = BUCKETS_INDEX4(0, 0), j = 0; i <= BUCKETS_INDEX4((fast_sint_t)k - 1, 0); i += BUCKETS_INDEX4(1, 0), j += 1) { bucket_start[j] = sum; sum += buckets[i + BUCKETS_INDEX4(0, 0)] + buckets[i + BUCKETS_INDEX4(0, 1)] + buckets[i + BUCKETS_INDEX4(0, 2)] + buckets[i + BUCKETS_INDEX4(0, 3)]; bucket_end[j] = sum; } } static void libsais16_initialize_buckets_start_and_end_32s_4k(sa_sint_t k, sa_sint_t * RESTRICT buckets) { sa_sint_t * RESTRICT bucket_start = &buckets[2 * k]; sa_sint_t * RESTRICT bucket_end = &buckets[3 * k]; fast_sint_t i, j; sa_sint_t sum = 0; for (i = BUCKETS_INDEX2(0, 0), j = 0; i <= BUCKETS_INDEX2((fast_sint_t)k - 1, 0); i += BUCKETS_INDEX2(1, 0), j += 1) { bucket_start[j] = sum; sum += buckets[i + BUCKETS_INDEX2(0, 0)] + buckets[i + BUCKETS_INDEX2(0, 1)]; bucket_end[j] = sum; } } static void libsais16_initialize_buckets_end_32s_2k(sa_sint_t k, sa_sint_t * RESTRICT buckets) { fast_sint_t i; sa_sint_t sum0 = 0; for (i = BUCKETS_INDEX2(0, 0); i <= BUCKETS_INDEX2((fast_sint_t)k - 1, 0); i += BUCKETS_INDEX2(1, 0)) { sum0 += buckets[i + BUCKETS_INDEX2(0, 0)] + buckets[i + BUCKETS_INDEX2(0, 1)]; buckets[i + BUCKETS_INDEX2(0, 0)] = sum0; } } static void libsais16_initialize_buckets_start_and_end_32s_2k(sa_sint_t k, sa_sint_t * RESTRICT buckets) { fast_sint_t i, j; for (i = BUCKETS_INDEX2(0, 0), j = 0; i <= BUCKETS_INDEX2((fast_sint_t)k - 1, 0); i += BUCKETS_INDEX2(1, 0), j += 1) { buckets[j] = buckets[i]; } buckets[k] = 0; memcpy(&buckets[k + 1], buckets, ((size_t)k - 1) * sizeof(sa_sint_t)); } static void libsais16_initialize_buckets_start_32s_1k(sa_sint_t k, sa_sint_t * RESTRICT buckets) { fast_sint_t i; sa_sint_t sum = 0; for (i = 0; i <= (fast_sint_t)k - 1; i += 1) { sa_sint_t tmp = buckets[i]; buckets[i] = sum; sum += tmp; } } static void libsais16_initialize_buckets_end_32s_1k(sa_sint_t k, sa_sint_t * RESTRICT buckets) { fast_sint_t i; sa_sint_t sum = 0; for (i = 0; i <= (fast_sint_t)k - 1; i += 1) { sum += buckets[i]; buckets[i] = sum; } } static sa_sint_t libsais16_initialize_buckets_for_lms_suffixes_radix_sort_16u(const uint16_t * RESTRICT T, sa_sint_t * RESTRICT buckets, sa_sint_t first_lms_suffix) { { fast_uint_t s = 0; fast_sint_t c0 = T[first_lms_suffix]; fast_sint_t c1 = 0; for (; --first_lms_suffix >= 0; ) { c1 = c0; c0 = T[first_lms_suffix]; s = (s << 1) + (fast_uint_t)(c0 > (c1 - (fast_sint_t)(s & 1))); buckets[BUCKETS_INDEX4((fast_uint_t)c1, s & 3)]--; } buckets[BUCKETS_INDEX4((fast_uint_t)c0, (s << 1) & 3)]--; } { sa_sint_t * RESTRICT temp_bucket = &buckets[4 * ALPHABET_SIZE]; fast_sint_t i, j; sa_sint_t sum = 0; for (i = BUCKETS_INDEX4(0, 0), j = BUCKETS_INDEX2(0, 0); i <= BUCKETS_INDEX4(ALPHABET_SIZE - 1, 0); i += BUCKETS_INDEX4(1, 0), j += BUCKETS_INDEX2(1, 0)) { temp_bucket[j + BUCKETS_INDEX2(0, 1)] = sum; sum += buckets[i + BUCKETS_INDEX4(0, 1)] + buckets[i + BUCKETS_INDEX4(0, 3)]; temp_bucket[j] = sum; } return sum; } } static void libsais16_initialize_buckets_for_lms_suffixes_radix_sort_32s_2k(const sa_sint_t * RESTRICT T, sa_sint_t k, sa_sint_t * RESTRICT buckets, sa_sint_t first_lms_suffix) { buckets[BUCKETS_INDEX2(T[first_lms_suffix], 0)]++; buckets[BUCKETS_INDEX2(T[first_lms_suffix], 1)]--; fast_sint_t i; sa_sint_t sum0 = 0, sum1 = 0; for (i = BUCKETS_INDEX2(0, 0); i <= BUCKETS_INDEX2((fast_sint_t)k - 1, 0); i += BUCKETS_INDEX2(1, 0)) { sum0 += buckets[i + BUCKETS_INDEX2(0, 0)] + buckets[i + BUCKETS_INDEX2(0, 1)]; sum1 += buckets[i + BUCKETS_INDEX2(0, 1)]; buckets[i + BUCKETS_INDEX2(0, 0)] = sum0; buckets[i + BUCKETS_INDEX2(0, 1)] = sum1; } } static sa_sint_t libsais16_initialize_buckets_for_lms_suffixes_radix_sort_32s_6k(const sa_sint_t * RESTRICT T, sa_sint_t k, sa_sint_t * RESTRICT buckets, sa_sint_t first_lms_suffix) { { fast_uint_t s = 0; fast_sint_t c0 = T[first_lms_suffix]; fast_sint_t c1 = 0; for (; --first_lms_suffix >= 0; ) { c1 = c0; c0 = T[first_lms_suffix]; s = (s << 1) + (fast_uint_t)(c0 > (c1 - (fast_sint_t)(s & 1))); buckets[BUCKETS_INDEX4((fast_uint_t)c1, s & 3)]--; } buckets[BUCKETS_INDEX4((fast_uint_t)c0, (s << 1) & 3)]--; } { sa_sint_t * RESTRICT temp_bucket = &buckets[4 * k]; fast_sint_t i, j; sa_sint_t sum = 0; for (i = BUCKETS_INDEX4(0, 0), j = 0; i <= BUCKETS_INDEX4((fast_sint_t)k - 1, 0); i += BUCKETS_INDEX4(1, 0), j += 1) { sum += buckets[i + BUCKETS_INDEX4(0, 1)] + buckets[i + BUCKETS_INDEX4(0, 3)]; temp_bucket[j] = sum; } return sum; } } static void libsais16_initialize_buckets_for_radix_and_partial_sorting_32s_4k(const sa_sint_t * RESTRICT T, sa_sint_t k, sa_sint_t * RESTRICT buckets, sa_sint_t first_lms_suffix) { sa_sint_t * RESTRICT bucket_start = &buckets[2 * k]; sa_sint_t * RESTRICT bucket_end = &buckets[3 * k]; buckets[BUCKETS_INDEX2(T[first_lms_suffix], 0)]++; buckets[BUCKETS_INDEX2(T[first_lms_suffix], 1)]--; fast_sint_t i, j; sa_sint_t sum0 = 0, sum1 = 0; for (i = BUCKETS_INDEX2(0, 0), j = 0; i <= BUCKETS_INDEX2((fast_sint_t)k - 1, 0); i += BUCKETS_INDEX2(1, 0), j += 1) { bucket_start[j] = sum1; sum0 += buckets[i + BUCKETS_INDEX2(0, 1)]; sum1 += buckets[i + BUCKETS_INDEX2(0, 0)] + buckets[i + BUCKETS_INDEX2(0, 1)]; buckets[i + BUCKETS_INDEX2(0, 1)] = sum0; bucket_end[j] = sum1; } } static void libsais16_radix_sort_lms_suffixes_16u(const uint16_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t * RESTRICT induction_bucket, fast_sint_t omp_block_start, fast_sint_t omp_block_size) { const fast_sint_t prefetch_distance = 32; fast_sint_t i, j; for (i = omp_block_start + omp_block_size - 1, j = omp_block_start + prefetch_distance + 3; i >= j; i -= 4) { libsais16_prefetch(&SA[i - 2 * prefetch_distance]); libsais16_prefetch(&T[SA[i - prefetch_distance - 0]]); libsais16_prefetch(&T[SA[i - prefetch_distance - 1]]); libsais16_prefetch(&T[SA[i - prefetch_distance - 2]]); libsais16_prefetch(&T[SA[i - prefetch_distance - 3]]); sa_sint_t p0 = SA[i - 0]; SA[--induction_bucket[BUCKETS_INDEX2(T[p0], 0)]] = p0; sa_sint_t p1 = SA[i - 1]; SA[--induction_bucket[BUCKETS_INDEX2(T[p1], 0)]] = p1; sa_sint_t p2 = SA[i - 2]; SA[--induction_bucket[BUCKETS_INDEX2(T[p2], 0)]] = p2; sa_sint_t p3 = SA[i - 3]; SA[--induction_bucket[BUCKETS_INDEX2(T[p3], 0)]] = p3; } for (j -= prefetch_distance + 3; i >= j; i -= 1) { sa_sint_t p = SA[i]; SA[--induction_bucket[BUCKETS_INDEX2(T[p], 0)]] = p; } } static void libsais16_radix_sort_lms_suffixes_16u_omp(const uint16_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t m, sa_sint_t * RESTRICT buckets, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state) { #if defined(_OPENMP) #pragma omp parallel num_threads(threads) if(threads > 1 && n >= 65536 && m >= 65536 && omp_get_dynamic() == 0) #endif { #if defined(_OPENMP) fast_sint_t omp_thread_num = omp_get_thread_num(); fast_sint_t omp_num_threads = omp_get_num_threads(); #else UNUSED(threads); UNUSED(thread_state); fast_sint_t omp_num_threads = 1; #endif if (omp_num_threads == 1) { libsais16_radix_sort_lms_suffixes_16u(T, SA, &buckets[4 * ALPHABET_SIZE], (fast_sint_t)n - (fast_sint_t)m + 1, (fast_sint_t)m - 1); } #if defined(_OPENMP) else { { sa_sint_t * RESTRICT src_bucket = &buckets[4 * ALPHABET_SIZE]; sa_sint_t * RESTRICT dst_bucket = thread_state[omp_thread_num].state.buckets; fast_sint_t i, j; for (i = BUCKETS_INDEX2(0, 0), j = BUCKETS_INDEX4(0, 1); i <= BUCKETS_INDEX2(ALPHABET_SIZE - 1, 0); i += BUCKETS_INDEX2(1, 0), j += BUCKETS_INDEX4(1, 0)) { dst_bucket[i] = src_bucket[i] - dst_bucket[j]; } } { fast_sint_t t, omp_block_start = 0, omp_block_size = thread_state[omp_thread_num].state.m; for (t = omp_num_threads - 1; t >= omp_thread_num; --t) omp_block_start += thread_state[t].state.m; if (omp_block_start == (fast_sint_t)m && omp_block_size > 0) { omp_block_start -= 1; omp_block_size -= 1; } libsais16_radix_sort_lms_suffixes_16u(T, SA, thread_state[omp_thread_num].state.buckets, (fast_sint_t)n - omp_block_start, omp_block_size); } } #endif } } static void libsais16_radix_sort_lms_suffixes_32s_6k(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t * RESTRICT induction_bucket, fast_sint_t omp_block_start, fast_sint_t omp_block_size) { const fast_sint_t prefetch_distance = 32; fast_sint_t i, j; for (i = omp_block_start + omp_block_size - 1, j = omp_block_start + 2 * prefetch_distance + 3; i >= j; i -= 4) { libsais16_prefetch(&SA[i - 3 * prefetch_distance]); libsais16_prefetch(&T[SA[i - 2 * prefetch_distance - 0]]); libsais16_prefetch(&T[SA[i - 2 * prefetch_distance - 1]]); libsais16_prefetch(&T[SA[i - 2 * prefetch_distance - 2]]); libsais16_prefetch(&T[SA[i - 2 * prefetch_distance - 3]]); libsais16_prefetchw(&induction_bucket[T[SA[i - prefetch_distance - 0]]]); libsais16_prefetchw(&induction_bucket[T[SA[i - prefetch_distance - 1]]]); libsais16_prefetchw(&induction_bucket[T[SA[i - prefetch_distance - 2]]]); libsais16_prefetchw(&induction_bucket[T[SA[i - prefetch_distance - 3]]]); sa_sint_t p0 = SA[i - 0]; SA[--induction_bucket[T[p0]]] = p0; sa_sint_t p1 = SA[i - 1]; SA[--induction_bucket[T[p1]]] = p1; sa_sint_t p2 = SA[i - 2]; SA[--induction_bucket[T[p2]]] = p2; sa_sint_t p3 = SA[i - 3]; SA[--induction_bucket[T[p3]]] = p3; } for (j -= 2 * prefetch_distance + 3; i >= j; i -= 1) { sa_sint_t p = SA[i]; SA[--induction_bucket[T[p]]] = p; } } static void libsais16_radix_sort_lms_suffixes_32s_2k(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t * RESTRICT induction_bucket, fast_sint_t omp_block_start, fast_sint_t omp_block_size) { const fast_sint_t prefetch_distance = 32; fast_sint_t i, j; for (i = omp_block_start + omp_block_size - 1, j = omp_block_start + 2 * prefetch_distance + 3; i >= j; i -= 4) { libsais16_prefetch(&SA[i - 3 * prefetch_distance]); libsais16_prefetch(&T[SA[i - 2 * prefetch_distance - 0]]); libsais16_prefetch(&T[SA[i - 2 * prefetch_distance - 1]]); libsais16_prefetch(&T[SA[i - 2 * prefetch_distance - 2]]); libsais16_prefetch(&T[SA[i - 2 * prefetch_distance - 3]]); libsais16_prefetchw(&induction_bucket[BUCKETS_INDEX2(T[SA[i - prefetch_distance - 0]], 0)]); libsais16_prefetchw(&induction_bucket[BUCKETS_INDEX2(T[SA[i - prefetch_distance - 1]], 0)]); libsais16_prefetchw(&induction_bucket[BUCKETS_INDEX2(T[SA[i - prefetch_distance - 2]], 0)]); libsais16_prefetchw(&induction_bucket[BUCKETS_INDEX2(T[SA[i - prefetch_distance - 3]], 0)]); sa_sint_t p0 = SA[i - 0]; SA[--induction_bucket[BUCKETS_INDEX2(T[p0], 0)]] = p0; sa_sint_t p1 = SA[i - 1]; SA[--induction_bucket[BUCKETS_INDEX2(T[p1], 0)]] = p1; sa_sint_t p2 = SA[i - 2]; SA[--induction_bucket[BUCKETS_INDEX2(T[p2], 0)]] = p2; sa_sint_t p3 = SA[i - 3]; SA[--induction_bucket[BUCKETS_INDEX2(T[p3], 0)]] = p3; } for (j -= 2 * prefetch_distance + 3; i >= j; i -= 1) { sa_sint_t p = SA[i]; SA[--induction_bucket[BUCKETS_INDEX2(T[p], 0)]] = p; } } #if defined(_OPENMP) static void libsais16_radix_sort_lms_suffixes_32s_block_gather(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, LIBSAIS_THREAD_CACHE * RESTRICT cache, fast_sint_t omp_block_start, fast_sint_t omp_block_size) { const fast_sint_t prefetch_distance = 32; fast_sint_t i, j; for (i = omp_block_start, j = omp_block_start + omp_block_size - prefetch_distance - 3; i < j; i += 4) { libsais16_prefetch(&SA[i + 2 * prefetch_distance]); libsais16_prefetch(&T[SA[i + prefetch_distance + 0]]); libsais16_prefetch(&T[SA[i + prefetch_distance + 1]]); libsais16_prefetch(&T[SA[i + prefetch_distance + 2]]); libsais16_prefetch(&T[SA[i + prefetch_distance + 3]]); libsais16_prefetchw(&cache[i + prefetch_distance]); cache[i + 0].symbol = T[cache[i + 0].index = SA[i + 0]]; cache[i + 1].symbol = T[cache[i + 1].index = SA[i + 1]]; cache[i + 2].symbol = T[cache[i + 2].index = SA[i + 2]]; cache[i + 3].symbol = T[cache[i + 3].index = SA[i + 3]]; } for (j += prefetch_distance + 3; i < j; i += 1) { cache[i].symbol = T[cache[i].index = SA[i]]; } } static void libsais16_radix_sort_lms_suffixes_32s_6k_block_sort(sa_sint_t * RESTRICT induction_bucket, LIBSAIS_THREAD_CACHE * RESTRICT cache, fast_sint_t omp_block_start, fast_sint_t omp_block_size) { const fast_sint_t prefetch_distance = 32; fast_sint_t i, j; for (i = omp_block_start + omp_block_size - 1, j = omp_block_start + prefetch_distance + 3; i >= j; i -= 4) { libsais16_prefetchw(&cache[i - 2 * prefetch_distance]); libsais16_prefetchw(&induction_bucket[cache[i - prefetch_distance - 0].symbol]); libsais16_prefetchw(&induction_bucket[cache[i - prefetch_distance - 1].symbol]); libsais16_prefetchw(&induction_bucket[cache[i - prefetch_distance - 2].symbol]); libsais16_prefetchw(&induction_bucket[cache[i - prefetch_distance - 3].symbol]); cache[i - 0].symbol = --induction_bucket[cache[i - 0].symbol]; cache[i - 1].symbol = --induction_bucket[cache[i - 1].symbol]; cache[i - 2].symbol = --induction_bucket[cache[i - 2].symbol]; cache[i - 3].symbol = --induction_bucket[cache[i - 3].symbol]; } for (j -= prefetch_distance + 3; i >= j; i -= 1) { cache[i].symbol = --induction_bucket[cache[i].symbol]; } } static void libsais16_radix_sort_lms_suffixes_32s_2k_block_sort(sa_sint_t * RESTRICT induction_bucket, LIBSAIS_THREAD_CACHE * RESTRICT cache, fast_sint_t omp_block_start, fast_sint_t omp_block_size) { const fast_sint_t prefetch_distance = 32; fast_sint_t i, j; for (i = omp_block_start + omp_block_size - 1, j = omp_block_start + prefetch_distance + 3; i >= j; i -= 4) { libsais16_prefetchw(&cache[i - 2 * prefetch_distance]); libsais16_prefetchw(&induction_bucket[BUCKETS_INDEX2(cache[i - prefetch_distance - 0].symbol, 0)]); libsais16_prefetchw(&induction_bucket[BUCKETS_INDEX2(cache[i - prefetch_distance - 1].symbol, 0)]); libsais16_prefetchw(&induction_bucket[BUCKETS_INDEX2(cache[i - prefetch_distance - 2].symbol, 0)]); libsais16_prefetchw(&induction_bucket[BUCKETS_INDEX2(cache[i - prefetch_distance - 3].symbol, 0)]); cache[i - 0].symbol = --induction_bucket[BUCKETS_INDEX2(cache[i - 0].symbol, 0)]; cache[i - 1].symbol = --induction_bucket[BUCKETS_INDEX2(cache[i - 1].symbol, 0)]; cache[i - 2].symbol = --induction_bucket[BUCKETS_INDEX2(cache[i - 2].symbol, 0)]; cache[i - 3].symbol = --induction_bucket[BUCKETS_INDEX2(cache[i - 3].symbol, 0)]; } for (j -= prefetch_distance + 3; i >= j; i -= 1) { cache[i].symbol = --induction_bucket[BUCKETS_INDEX2(cache[i].symbol, 0)]; } } static void libsais16_radix_sort_lms_suffixes_32s_6k_block_omp(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t * RESTRICT induction_bucket, LIBSAIS_THREAD_CACHE * RESTRICT cache, fast_sint_t block_start, fast_sint_t block_size, sa_sint_t threads) { #if defined(_OPENMP) #pragma omp parallel num_threads(threads) if(threads > 1 && block_size >= 16384) #endif { #if defined(_OPENMP) fast_sint_t omp_thread_num = omp_get_thread_num(); fast_sint_t omp_num_threads = omp_get_num_threads(); #else UNUSED(threads); UNUSED(cache); fast_sint_t omp_thread_num = 0; fast_sint_t omp_num_threads = 1; #endif fast_sint_t omp_block_stride = (block_size / omp_num_threads) & (-16); fast_sint_t omp_block_start = omp_thread_num * omp_block_stride; fast_sint_t omp_block_size = omp_thread_num < omp_num_threads - 1 ? omp_block_stride : block_size - omp_block_start; omp_block_start += block_start; if (omp_num_threads == 1) { libsais16_radix_sort_lms_suffixes_32s_6k(T, SA, induction_bucket, omp_block_start, omp_block_size); } #if defined(_OPENMP) else { { libsais16_radix_sort_lms_suffixes_32s_block_gather(T, SA, cache - block_start, omp_block_start, omp_block_size); } #pragma omp barrier #pragma omp master { libsais16_radix_sort_lms_suffixes_32s_6k_block_sort(induction_bucket, cache - block_start, block_start, block_size); } #pragma omp barrier { libsais16_place_cached_suffixes(SA, cache - block_start, omp_block_start, omp_block_size); } } #endif } } static void libsais16_radix_sort_lms_suffixes_32s_2k_block_omp(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t * RESTRICT induction_bucket, LIBSAIS_THREAD_CACHE * RESTRICT cache, fast_sint_t block_start, fast_sint_t block_size, sa_sint_t threads) { #if defined(_OPENMP) #pragma omp parallel num_threads(threads) if(threads > 1 && block_size >= 16384) #endif { #if defined(_OPENMP) fast_sint_t omp_thread_num = omp_get_thread_num(); fast_sint_t omp_num_threads = omp_get_num_threads(); #else UNUSED(threads); UNUSED(cache); fast_sint_t omp_thread_num = 0; fast_sint_t omp_num_threads = 1; #endif fast_sint_t omp_block_stride = (block_size / omp_num_threads) & (-16); fast_sint_t omp_block_start = omp_thread_num * omp_block_stride; fast_sint_t omp_block_size = omp_thread_num < omp_num_threads - 1 ? omp_block_stride : block_size - omp_block_start; omp_block_start += block_start; if (omp_num_threads == 1) { libsais16_radix_sort_lms_suffixes_32s_2k(T, SA, induction_bucket, omp_block_start, omp_block_size); } #if defined(_OPENMP) else { { libsais16_radix_sort_lms_suffixes_32s_block_gather(T, SA, cache - block_start, omp_block_start, omp_block_size); } #pragma omp barrier #pragma omp master { libsais16_radix_sort_lms_suffixes_32s_2k_block_sort(induction_bucket, cache - block_start, block_start, block_size); } #pragma omp barrier { libsais16_place_cached_suffixes(SA, cache - block_start, omp_block_start, omp_block_size); } } #endif } } #endif static void libsais16_radix_sort_lms_suffixes_32s_6k_omp(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t m, sa_sint_t * RESTRICT induction_bucket, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state) { if (threads == 1 || m < 65536) { libsais16_radix_sort_lms_suffixes_32s_6k(T, SA, induction_bucket, (fast_sint_t)n - (fast_sint_t)m + 1, (fast_sint_t)m - 1); } #if defined(_OPENMP) else { fast_sint_t block_start, block_end; for (block_start = 0; block_start < (fast_sint_t)m - 1; block_start = block_end) { block_end = block_start + (fast_sint_t)threads * LIBSAIS_PER_THREAD_CACHE_SIZE; if (block_end >= m) { block_end = (fast_sint_t)m - 1; } libsais16_radix_sort_lms_suffixes_32s_6k_block_omp(T, SA, induction_bucket, thread_state[0].state.cache, (fast_sint_t)n - block_end, block_end - block_start, threads); } } #else UNUSED(thread_state); #endif } static void libsais16_radix_sort_lms_suffixes_32s_2k_omp(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t m, sa_sint_t * RESTRICT induction_bucket, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state) { if (threads == 1 || m < 65536) { libsais16_radix_sort_lms_suffixes_32s_2k(T, SA, induction_bucket, (fast_sint_t)n - (fast_sint_t)m + 1, (fast_sint_t)m - 1); } #if defined(_OPENMP) else { fast_sint_t block_start, block_end; for (block_start = 0; block_start < (fast_sint_t)m - 1; block_start = block_end) { block_end = block_start + (fast_sint_t)threads * LIBSAIS_PER_THREAD_CACHE_SIZE; if (block_end >= m) { block_end = (fast_sint_t)m - 1; } libsais16_radix_sort_lms_suffixes_32s_2k_block_omp(T, SA, induction_bucket, thread_state[0].state.cache, (fast_sint_t)n - block_end, block_end - block_start, threads); } } #else UNUSED(thread_state); #endif } static sa_sint_t libsais16_radix_sort_lms_suffixes_32s_1k(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t * RESTRICT buckets) { const fast_sint_t prefetch_distance = 32; sa_sint_t i = n - 2; sa_sint_t m = 0; fast_uint_t s = 1; fast_sint_t c0 = T[n - 1]; fast_sint_t c1 = 0; fast_sint_t c2 = 0; for (; i >= prefetch_distance + 3; i -= 4) { libsais16_prefetch(&T[i - 2 * prefetch_distance]); libsais16_prefetchw(&buckets[T[i - prefetch_distance - 0]]); libsais16_prefetchw(&buckets[T[i - prefetch_distance - 1]]); libsais16_prefetchw(&buckets[T[i - prefetch_distance - 2]]); libsais16_prefetchw(&buckets[T[i - prefetch_distance - 3]]); c1 = T[i - 0]; s = (s << 1) + (fast_uint_t)(c1 > (c0 - (fast_sint_t)(s & 1))); if ((s & 3) == 1) { SA[--buckets[c2 = c0]] = i + 1; m++; } c0 = T[i - 1]; s = (s << 1) + (fast_uint_t)(c0 > (c1 - (fast_sint_t)(s & 1))); if ((s & 3) == 1) { SA[--buckets[c2 = c1]] = i - 0; m++; } c1 = T[i - 2]; s = (s << 1) + (fast_uint_t)(c1 > (c0 - (fast_sint_t)(s & 1))); if ((s & 3) == 1) { SA[--buckets[c2 = c0]] = i - 1; m++; } c0 = T[i - 3]; s = (s << 1) + (fast_uint_t)(c0 > (c1 - (fast_sint_t)(s & 1))); if ((s & 3) == 1) { SA[--buckets[c2 = c1]] = i - 2; m++; } } for (; i >= 0; i -= 1) { c1 = c0; c0 = T[i]; s = (s << 1) + (fast_uint_t)(c0 > (c1 - (fast_sint_t)(s & 1))); if ((s & 3) == 1) { SA[--buckets[c2 = c1]] = i + 1; m++; } } if (m > 1) { SA[buckets[c2]] = 0; } return m; } static void libsais16_radix_sort_set_markers_32s_6k(sa_sint_t * RESTRICT SA, sa_sint_t * RESTRICT induction_bucket, fast_sint_t omp_block_start, fast_sint_t omp_block_size) { const fast_sint_t prefetch_distance = 32; fast_sint_t i, j; for (i = omp_block_start, j = omp_block_start + omp_block_size - prefetch_distance - 3; i < j; i += 4) { libsais16_prefetch(&induction_bucket[i + 2 * prefetch_distance]); libsais16_prefetchw(&SA[induction_bucket[i + prefetch_distance + 0]]); libsais16_prefetchw(&SA[induction_bucket[i + prefetch_distance + 1]]); libsais16_prefetchw(&SA[induction_bucket[i + prefetch_distance + 2]]); libsais16_prefetchw(&SA[induction_bucket[i + prefetch_distance + 3]]); SA[induction_bucket[i + 0]] |= SAINT_MIN; SA[induction_bucket[i + 1]] |= SAINT_MIN; SA[induction_bucket[i + 2]] |= SAINT_MIN; SA[induction_bucket[i + 3]] |= SAINT_MIN; } for (j += prefetch_distance + 3; i < j; i += 1) { SA[induction_bucket[i]] |= SAINT_MIN; } } static void libsais16_radix_sort_set_markers_32s_4k(sa_sint_t * RESTRICT SA, sa_sint_t * RESTRICT induction_bucket, fast_sint_t omp_block_start, fast_sint_t omp_block_size) { const fast_sint_t prefetch_distance = 32; fast_sint_t i, j; for (i = omp_block_start, j = omp_block_start + omp_block_size - prefetch_distance - 3; i < j; i += 4) { libsais16_prefetch(&induction_bucket[BUCKETS_INDEX2(i + 2 * prefetch_distance, 0)]); libsais16_prefetchw(&SA[induction_bucket[BUCKETS_INDEX2(i + prefetch_distance + 0, 0)]]); libsais16_prefetchw(&SA[induction_bucket[BUCKETS_INDEX2(i + prefetch_distance + 1, 0)]]); libsais16_prefetchw(&SA[induction_bucket[BUCKETS_INDEX2(i + prefetch_distance + 2, 0)]]); libsais16_prefetchw(&SA[induction_bucket[BUCKETS_INDEX2(i + prefetch_distance + 3, 0)]]); SA[induction_bucket[BUCKETS_INDEX2(i + 0, 0)]] |= SUFFIX_GROUP_MARKER; SA[induction_bucket[BUCKETS_INDEX2(i + 1, 0)]] |= SUFFIX_GROUP_MARKER; SA[induction_bucket[BUCKETS_INDEX2(i + 2, 0)]] |= SUFFIX_GROUP_MARKER; SA[induction_bucket[BUCKETS_INDEX2(i + 3, 0)]] |= SUFFIX_GROUP_MARKER; } for (j += prefetch_distance + 3; i < j; i += 1) { SA[induction_bucket[BUCKETS_INDEX2(i, 0)]] |= SUFFIX_GROUP_MARKER; } } static void libsais16_radix_sort_set_markers_32s_6k_omp(sa_sint_t * RESTRICT SA, sa_sint_t k, sa_sint_t * RESTRICT induction_bucket, sa_sint_t threads) { #if defined(_OPENMP) #pragma omp parallel num_threads(threads) if(threads > 1 && k >= 65536) #endif { #if defined(_OPENMP) fast_sint_t omp_thread_num = omp_get_thread_num(); fast_sint_t omp_num_threads = omp_get_num_threads(); fast_sint_t omp_block_stride = (((fast_sint_t)k - 1) / omp_num_threads) & (-16); fast_sint_t omp_block_start = omp_thread_num * omp_block_stride; fast_sint_t omp_block_size = omp_thread_num < omp_num_threads - 1 ? omp_block_stride : (fast_sint_t)k - 1 - omp_block_start; #else UNUSED(threads); fast_sint_t omp_block_start = 0; fast_sint_t omp_block_size = (fast_sint_t)k - 1; #endif libsais16_radix_sort_set_markers_32s_6k(SA, induction_bucket, omp_block_start, omp_block_size); } } static void libsais16_radix_sort_set_markers_32s_4k_omp(sa_sint_t * RESTRICT SA, sa_sint_t k, sa_sint_t * RESTRICT induction_bucket, sa_sint_t threads) { #if defined(_OPENMP) #pragma omp parallel num_threads(threads) if(threads > 1 && k >= 65536) #endif { #if defined(_OPENMP) fast_sint_t omp_thread_num = omp_get_thread_num(); fast_sint_t omp_num_threads = omp_get_num_threads(); fast_sint_t omp_block_stride = (((fast_sint_t)k - 1) / omp_num_threads) & (-16); fast_sint_t omp_block_start = omp_thread_num * omp_block_stride; fast_sint_t omp_block_size = omp_thread_num < omp_num_threads - 1 ? omp_block_stride : (fast_sint_t)k - 1 - omp_block_start; #else UNUSED(threads); fast_sint_t omp_block_start = 0; fast_sint_t omp_block_size = (fast_sint_t)k - 1; #endif libsais16_radix_sort_set_markers_32s_4k(SA, induction_bucket, omp_block_start, omp_block_size); } } static void libsais16_initialize_buckets_for_partial_sorting_16u(const uint16_t * RESTRICT T, sa_sint_t * RESTRICT buckets, sa_sint_t first_lms_suffix, sa_sint_t left_suffixes_count) { sa_sint_t * RESTRICT temp_bucket = &buckets[4 * ALPHABET_SIZE]; buckets[BUCKETS_INDEX4((fast_uint_t)T[first_lms_suffix], 1)]++; fast_sint_t i, j; sa_sint_t sum0 = left_suffixes_count + 1, sum1 = 0; for (i = BUCKETS_INDEX4(0, 0), j = BUCKETS_INDEX2(0, 0); i <= BUCKETS_INDEX4(ALPHABET_SIZE - 1, 0); i += BUCKETS_INDEX4(1, 0), j += BUCKETS_INDEX2(1, 0)) { temp_bucket[j + BUCKETS_INDEX2(0, 0)] = sum0; sum0 += buckets[i + BUCKETS_INDEX4(0, 0)] + buckets[i + BUCKETS_INDEX4(0, 2)]; sum1 += buckets[i + BUCKETS_INDEX4(0, 1)]; buckets[j + BUCKETS_INDEX2(0, 0)] = sum0; buckets[j + BUCKETS_INDEX2(0, 1)] = sum1; } } static void libsais16_initialize_buckets_for_partial_sorting_32s_6k(const sa_sint_t * RESTRICT T, sa_sint_t k, sa_sint_t * RESTRICT buckets, sa_sint_t first_lms_suffix, sa_sint_t left_suffixes_count) { sa_sint_t * RESTRICT temp_bucket = &buckets[4 * k]; fast_sint_t i, j; sa_sint_t sum0 = left_suffixes_count + 1, sum1 = 0, sum2 = 0; for (first_lms_suffix = T[first_lms_suffix], i = BUCKETS_INDEX4(0, 0), j = BUCKETS_INDEX2(0, 0); i <= BUCKETS_INDEX4((fast_sint_t)first_lms_suffix - 1, 0); i += BUCKETS_INDEX4(1, 0), j += BUCKETS_INDEX2(1, 0)) { sa_sint_t SS = buckets[i + BUCKETS_INDEX4(0, 0)]; sa_sint_t LS = buckets[i + BUCKETS_INDEX4(0, 1)]; sa_sint_t SL = buckets[i + BUCKETS_INDEX4(0, 2)]; sa_sint_t LL = buckets[i + BUCKETS_INDEX4(0, 3)]; buckets[i + BUCKETS_INDEX4(0, 0)] = sum0; buckets[i + BUCKETS_INDEX4(0, 1)] = sum2; buckets[i + BUCKETS_INDEX4(0, 2)] = 0; buckets[i + BUCKETS_INDEX4(0, 3)] = 0; sum0 += SS + SL; sum1 += LS; sum2 += LS + LL; temp_bucket[j + BUCKETS_INDEX2(0, 0)] = sum0; temp_bucket[j + BUCKETS_INDEX2(0, 1)] = sum1; } for (sum1 += 1; i <= BUCKETS_INDEX4((fast_sint_t)k - 1, 0); i += BUCKETS_INDEX4(1, 0), j += BUCKETS_INDEX2(1, 0)) { sa_sint_t SS = buckets[i + BUCKETS_INDEX4(0, 0)]; sa_sint_t LS = buckets[i + BUCKETS_INDEX4(0, 1)]; sa_sint_t SL = buckets[i + BUCKETS_INDEX4(0, 2)]; sa_sint_t LL = buckets[i + BUCKETS_INDEX4(0, 3)]; buckets[i + BUCKETS_INDEX4(0, 0)] = sum0; buckets[i + BUCKETS_INDEX4(0, 1)] = sum2; buckets[i + BUCKETS_INDEX4(0, 2)] = 0; buckets[i + BUCKETS_INDEX4(0, 3)] = 0; sum0 += SS + SL; sum1 += LS; sum2 += LS + LL; temp_bucket[j + BUCKETS_INDEX2(0, 0)] = sum0; temp_bucket[j + BUCKETS_INDEX2(0, 1)] = sum1; } } static sa_sint_t libsais16_partial_sorting_scan_left_to_right_16u(const uint16_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t * RESTRICT buckets, sa_sint_t d, fast_sint_t omp_block_start, fast_sint_t omp_block_size) { const fast_sint_t prefetch_distance = 32; sa_sint_t * RESTRICT induction_bucket = &buckets[4 * ALPHABET_SIZE]; sa_sint_t * RESTRICT distinct_names = &buckets[2 * ALPHABET_SIZE]; fast_sint_t i, j; for (i = omp_block_start, j = omp_block_start + omp_block_size - prefetch_distance - 1; i < j; i += 2) { libsais16_prefetch(&SA[i + 2 * prefetch_distance]); libsais16_prefetch(&T[SA[i + prefetch_distance + 0] & SAINT_MAX] - 1); libsais16_prefetch(&T[SA[i + prefetch_distance + 0] & SAINT_MAX] - 2); libsais16_prefetch(&T[SA[i + prefetch_distance + 1] & SAINT_MAX] - 1); libsais16_prefetch(&T[SA[i + prefetch_distance + 1] & SAINT_MAX] - 2); sa_sint_t p0 = SA[i + 0]; d += (p0 < 0); p0 &= SAINT_MAX; sa_sint_t v0 = BUCKETS_INDEX2(T[p0 - 1], T[p0 - 2] >= T[p0 - 1]); SA[induction_bucket[v0]++] = (p0 - 1) | ((sa_sint_t)(distinct_names[v0] != d) << (SAINT_BIT - 1)); distinct_names[v0] = d; sa_sint_t p1 = SA[i + 1]; d += (p1 < 0); p1 &= SAINT_MAX; sa_sint_t v1 = BUCKETS_INDEX2(T[p1 - 1], T[p1 - 2] >= T[p1 - 1]); SA[induction_bucket[v1]++] = (p1 - 1) | ((sa_sint_t)(distinct_names[v1] != d) << (SAINT_BIT - 1)); distinct_names[v1] = d; } for (j += prefetch_distance + 1; i < j; i += 1) { sa_sint_t p = SA[i]; d += (p < 0); p &= SAINT_MAX; sa_sint_t v = BUCKETS_INDEX2(T[p - 1], T[p - 2] >= T[p - 1]); SA[induction_bucket[v]++] = (p - 1) | ((sa_sint_t)(distinct_names[v] != d) << (SAINT_BIT - 1)); distinct_names[v] = d; } return d; } #if defined(_OPENMP) static void libsais16_partial_sorting_scan_left_to_right_16u_block_prepare(const uint16_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t * RESTRICT buckets, LIBSAIS_THREAD_CACHE * RESTRICT cache, fast_sint_t omp_block_start, fast_sint_t omp_block_size, LIBSAIS_THREAD_STATE * RESTRICT state) { const fast_sint_t prefetch_distance = 32; sa_sint_t * RESTRICT induction_bucket = &buckets[0 * ALPHABET_SIZE]; sa_sint_t * RESTRICT distinct_names = &buckets[2 * ALPHABET_SIZE]; memset(buckets, 0, 4 * ALPHABET_SIZE * sizeof(sa_sint_t)); fast_sint_t i, j, count = 0; sa_sint_t d = 1; for (i = omp_block_start, j = omp_block_start + omp_block_size - prefetch_distance - 1; i < j; i += 2) { libsais16_prefetch(&SA[i + 2 * prefetch_distance]); libsais16_prefetch(&T[SA[i + prefetch_distance + 0] & SAINT_MAX] - 1); libsais16_prefetch(&T[SA[i + prefetch_distance + 0] & SAINT_MAX] - 2); libsais16_prefetch(&T[SA[i + prefetch_distance + 1] & SAINT_MAX] - 1); libsais16_prefetch(&T[SA[i + prefetch_distance + 1] & SAINT_MAX] - 2); sa_sint_t p0 = cache[count].index = SA[i + 0]; d += (p0 < 0); p0 &= SAINT_MAX; sa_sint_t v0 = cache[count++].symbol = BUCKETS_INDEX2(T[p0 - 1], T[p0 - 2] >= T[p0 - 1]); induction_bucket[v0]++; distinct_names[v0] = d; sa_sint_t p1 = cache[count].index = SA[i + 1]; d += (p1 < 0); p1 &= SAINT_MAX; sa_sint_t v1 = cache[count++].symbol = BUCKETS_INDEX2(T[p1 - 1], T[p1 - 2] >= T[p1 - 1]); induction_bucket[v1]++; distinct_names[v1] = d; } for (j += prefetch_distance + 1; i < j; i += 1) { sa_sint_t p = cache[count].index = SA[i]; d += (p < 0); p &= SAINT_MAX; sa_sint_t v = cache[count++].symbol = BUCKETS_INDEX2(T[p - 1], T[p - 2] >= T[p - 1]); induction_bucket[v]++; distinct_names[v] = d; } state[0].state.position = (fast_sint_t)d - 1; state[0].state.count = count; } static void libsais16_partial_sorting_scan_left_to_right_16u_block_place(sa_sint_t * RESTRICT SA, sa_sint_t * RESTRICT buckets, LIBSAIS_THREAD_CACHE * RESTRICT cache, fast_sint_t count, sa_sint_t d) { const fast_sint_t prefetch_distance = 32; sa_sint_t * RESTRICT induction_bucket = &buckets[0 * ALPHABET_SIZE]; sa_sint_t * RESTRICT distinct_names = &buckets[2 * ALPHABET_SIZE]; fast_sint_t i, j; for (i = 0, j = count - 1; i < j; i += 2) { libsais16_prefetch(&cache[i + prefetch_distance]); sa_sint_t p0 = cache[i + 0].index; d += (p0 < 0); sa_sint_t v0 = cache[i + 0].symbol; SA[induction_bucket[v0]++] = (p0 - 1) | ((sa_sint_t)(distinct_names[v0] != d) << (SAINT_BIT - 1)); distinct_names[v0] = d; sa_sint_t p1 = cache[i + 1].index; d += (p1 < 0); sa_sint_t v1 = cache[i + 1].symbol; SA[induction_bucket[v1]++] = (p1 - 1) | ((sa_sint_t)(distinct_names[v1] != d) << (SAINT_BIT - 1)); distinct_names[v1] = d; } for (j += 1; i < j; i += 1) { sa_sint_t p = cache[i].index; d += (p < 0); sa_sint_t v = cache[i].symbol; SA[induction_bucket[v]++] = (p - 1) | ((sa_sint_t)(distinct_names[v] != d) << (SAINT_BIT - 1)); distinct_names[v] = d; } } static sa_sint_t libsais16_partial_sorting_scan_left_to_right_16u_block_omp(const uint16_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t * RESTRICT buckets, sa_sint_t d, fast_sint_t block_start, fast_sint_t block_size, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state) { #if defined(_OPENMP) #pragma omp parallel num_threads(threads) if(threads > 1 && block_size >= 64 * ALPHABET_SIZE && omp_get_dynamic() == 0) #endif { #if defined(_OPENMP) fast_sint_t omp_thread_num = omp_get_thread_num(); fast_sint_t omp_num_threads = omp_get_num_threads(); #else UNUSED(threads); UNUSED(thread_state); fast_sint_t omp_thread_num = 0; fast_sint_t omp_num_threads = 1; #endif fast_sint_t omp_block_stride = (block_size / omp_num_threads) & (-16); fast_sint_t omp_block_start = omp_thread_num * omp_block_stride; fast_sint_t omp_block_size = omp_thread_num < omp_num_threads - 1 ? omp_block_stride : block_size - omp_block_start; omp_block_start += block_start; if (omp_num_threads == 1) { d = libsais16_partial_sorting_scan_left_to_right_16u(T, SA, buckets, d, omp_block_start, omp_block_size); } #if defined(_OPENMP) else { { libsais16_partial_sorting_scan_left_to_right_16u_block_prepare(T, SA, thread_state[omp_thread_num].state.buckets, thread_state[omp_thread_num].state.cache, omp_block_start, omp_block_size, &thread_state[omp_thread_num]); } #pragma omp barrier #pragma omp master { sa_sint_t * RESTRICT induction_bucket = &buckets[4 * ALPHABET_SIZE]; sa_sint_t * RESTRICT distinct_names = &buckets[2 * ALPHABET_SIZE]; fast_sint_t t; for (t = 0; t < omp_num_threads; ++t) { sa_sint_t * RESTRICT temp_induction_bucket = &thread_state[t].state.buckets[0 * ALPHABET_SIZE]; sa_sint_t * RESTRICT temp_distinct_names = &thread_state[t].state.buckets[2 * ALPHABET_SIZE]; fast_sint_t c; for (c = 0; c < 2 * ALPHABET_SIZE; c += 1) { sa_sint_t A = induction_bucket[c], B = temp_induction_bucket[c]; induction_bucket[c] = A + B; temp_induction_bucket[c] = A; } for (d -= 1, c = 0; c < 2 * ALPHABET_SIZE; c += 1) { sa_sint_t A = distinct_names[c], B = temp_distinct_names[c], D = B + d; distinct_names[c] = B > 0 ? D : A; temp_distinct_names[c] = A; } d += 1 + (sa_sint_t)thread_state[t].state.position; thread_state[t].state.position = (fast_sint_t)d - thread_state[t].state.position; } } #pragma omp barrier { libsais16_partial_sorting_scan_left_to_right_16u_block_place(SA, thread_state[omp_thread_num].state.buckets, thread_state[omp_thread_num].state.cache, thread_state[omp_thread_num].state.count, (sa_sint_t)thread_state[omp_thread_num].state.position); } } #endif } return d; } #endif static sa_sint_t libsais16_partial_sorting_scan_left_to_right_16u_omp(const uint16_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t * RESTRICT buckets, sa_sint_t left_suffixes_count, sa_sint_t d, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state) { sa_sint_t * RESTRICT induction_bucket = &buckets[4 * ALPHABET_SIZE]; sa_sint_t * RESTRICT distinct_names = &buckets[2 * ALPHABET_SIZE]; SA[induction_bucket[BUCKETS_INDEX2(T[n - 1], T[n - 2] >= T[n - 1])]++] = (n - 1) | SAINT_MIN; distinct_names[BUCKETS_INDEX2(T[n - 1], T[n - 2] >= T[n - 1])] = ++d; if (threads == 1 || left_suffixes_count < 65536) { d = libsais16_partial_sorting_scan_left_to_right_16u(T, SA, buckets, d, 0, left_suffixes_count); } #if defined(_OPENMP) else { fast_sint_t block_start; for (block_start = 0; block_start < left_suffixes_count; ) { if (SA[block_start] == 0) { block_start++; } else { fast_sint_t block_max_end = block_start + ((fast_sint_t)threads) * (LIBSAIS_PER_THREAD_CACHE_SIZE - 16 * (fast_sint_t)threads); if (block_max_end > left_suffixes_count) { block_max_end = left_suffixes_count;} fast_sint_t block_end = block_start + 1; while (block_end < block_max_end && SA[block_end] != 0) { block_end++; } fast_sint_t block_size = block_end - block_start; if (block_size < 32) { for (; block_start < block_end; block_start += 1) { sa_sint_t p = SA[block_start]; d += (p < 0); p &= SAINT_MAX; sa_sint_t v = BUCKETS_INDEX2(T[p - 1], T[p - 2] >= T[p - 1]); SA[induction_bucket[v]++] = (p - 1) | ((sa_sint_t)(distinct_names[v] != d) << (SAINT_BIT - 1)); distinct_names[v] = d; } } else { d = libsais16_partial_sorting_scan_left_to_right_16u_block_omp(T, SA, buckets, d, block_start, block_size, threads, thread_state); block_start = block_end; } } } } #else UNUSED(thread_state); #endif return d; } static sa_sint_t libsais16_partial_sorting_scan_left_to_right_32s_6k(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t * RESTRICT buckets, sa_sint_t d, fast_sint_t omp_block_start, fast_sint_t omp_block_size) { const fast_sint_t prefetch_distance = 32; fast_sint_t i, j; for (i = omp_block_start, j = omp_block_start + omp_block_size - 2 * prefetch_distance - 1; i < j; i += 2) { libsais16_prefetch(&SA[i + 3 * prefetch_distance]); libsais16_prefetch(&T[SA[i + 2 * prefetch_distance + 0] & SAINT_MAX] - 1); libsais16_prefetch(&T[SA[i + 2 * prefetch_distance + 0] & SAINT_MAX] - 2); libsais16_prefetch(&T[SA[i + 2 * prefetch_distance + 1] & SAINT_MAX] - 1); libsais16_prefetch(&T[SA[i + 2 * prefetch_distance + 1] & SAINT_MAX] - 2); sa_sint_t p0 = SA[i + prefetch_distance + 0] & SAINT_MAX; sa_sint_t v0 = BUCKETS_INDEX4(T[p0 - (p0 > 0)], 0); libsais16_prefetchw(&buckets[v0]); sa_sint_t p1 = SA[i + prefetch_distance + 1] & SAINT_MAX; sa_sint_t v1 = BUCKETS_INDEX4(T[p1 - (p1 > 0)], 0); libsais16_prefetchw(&buckets[v1]); sa_sint_t p2 = SA[i + 0]; d += (p2 < 0); p2 &= SAINT_MAX; sa_sint_t v2 = BUCKETS_INDEX4(T[p2 - 1], T[p2 - 2] >= T[p2 - 1]); SA[buckets[v2]++] = (p2 - 1) | ((sa_sint_t)(buckets[2 + v2] != d) << (SAINT_BIT - 1)); buckets[2 + v2] = d; sa_sint_t p3 = SA[i + 1]; d += (p3 < 0); p3 &= SAINT_MAX; sa_sint_t v3 = BUCKETS_INDEX4(T[p3 - 1], T[p3 - 2] >= T[p3 - 1]); SA[buckets[v3]++] = (p3 - 1) | ((sa_sint_t)(buckets[2 + v3] != d) << (SAINT_BIT - 1)); buckets[2 + v3] = d; } for (j += 2 * prefetch_distance + 1; i < j; i += 1) { sa_sint_t p = SA[i]; d += (p < 0); p &= SAINT_MAX; sa_sint_t v = BUCKETS_INDEX4(T[p - 1], T[p - 2] >= T[p - 1]); SA[buckets[v]++] = (p - 1) | ((sa_sint_t)(buckets[2 + v] != d) << (SAINT_BIT - 1)); buckets[2 + v] = d; } return d; } static sa_sint_t libsais16_partial_sorting_scan_left_to_right_32s_4k(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t k, sa_sint_t * RESTRICT buckets, sa_sint_t d, fast_sint_t omp_block_start, fast_sint_t omp_block_size) { const fast_sint_t prefetch_distance = 32; sa_sint_t * RESTRICT induction_bucket = &buckets[2 * k]; sa_sint_t * RESTRICT distinct_names = &buckets[0 * k]; fast_sint_t i, j; for (i = omp_block_start, j = omp_block_start + omp_block_size - 2 * prefetch_distance - 1; i < j; i += 2) { libsais16_prefetchw(&SA[i + 3 * prefetch_distance]); sa_sint_t s0 = SA[i + 2 * prefetch_distance + 0]; const sa_sint_t * Ts0 = &T[s0 & ~SUFFIX_GROUP_MARKER] - 1; libsais16_prefetch(s0 > 0 ? Ts0 : NULL); Ts0--; libsais16_prefetch(s0 > 0 ? Ts0 : NULL); sa_sint_t s1 = SA[i + 2 * prefetch_distance + 1]; const sa_sint_t * Ts1 = &T[s1 & ~SUFFIX_GROUP_MARKER] - 1; libsais16_prefetch(s1 > 0 ? Ts1 : NULL); Ts1--; libsais16_prefetch(s1 > 0 ? Ts1 : NULL); sa_sint_t s2 = SA[i + 1 * prefetch_distance + 0]; if (s2 > 0) { const fast_sint_t Ts2 = T[(s2 & ~SUFFIX_GROUP_MARKER) - 1]; libsais16_prefetchw(&induction_bucket[Ts2]); libsais16_prefetchw(&distinct_names[BUCKETS_INDEX2(Ts2, 0)]); } sa_sint_t s3 = SA[i + 1 * prefetch_distance + 1]; if (s3 > 0) { const fast_sint_t Ts3 = T[(s3 & ~SUFFIX_GROUP_MARKER) - 1]; libsais16_prefetchw(&induction_bucket[Ts3]); libsais16_prefetchw(&distinct_names[BUCKETS_INDEX2(Ts3, 0)]); } sa_sint_t p0 = SA[i + 0]; SA[i + 0] = p0 & SAINT_MAX; if (p0 > 0) { SA[i + 0] = 0; d += (p0 >> (SUFFIX_GROUP_BIT - 1)); p0 &= ~SUFFIX_GROUP_MARKER; sa_sint_t v0 = BUCKETS_INDEX2(T[p0 - 1], T[p0 - 2] < T[p0 - 1]); SA[induction_bucket[T[p0 - 1]]++] = (p0 - 1) | ((sa_sint_t)(T[p0 - 2] < T[p0 - 1]) << (SAINT_BIT - 1)) | ((sa_sint_t)(distinct_names[v0] != d) << (SUFFIX_GROUP_BIT - 1)); distinct_names[v0] = d; } sa_sint_t p1 = SA[i + 1]; SA[i + 1] = p1 & SAINT_MAX; if (p1 > 0) { SA[i + 1] = 0; d += (p1 >> (SUFFIX_GROUP_BIT - 1)); p1 &= ~SUFFIX_GROUP_MARKER; sa_sint_t v1 = BUCKETS_INDEX2(T[p1 - 1], T[p1 - 2] < T[p1 - 1]); SA[induction_bucket[T[p1 - 1]]++] = (p1 - 1) | ((sa_sint_t)(T[p1 - 2] < T[p1 - 1]) << (SAINT_BIT - 1)) | ((sa_sint_t)(distinct_names[v1] != d) << (SUFFIX_GROUP_BIT - 1)); distinct_names[v1] = d; } } for (j += 2 * prefetch_distance + 1; i < j; i += 1) { sa_sint_t p = SA[i]; SA[i] = p & SAINT_MAX; if (p > 0) { SA[i] = 0; d += (p >> (SUFFIX_GROUP_BIT - 1)); p &= ~SUFFIX_GROUP_MARKER; sa_sint_t v = BUCKETS_INDEX2(T[p - 1], T[p - 2] < T[p - 1]); SA[induction_bucket[T[p - 1]]++] = (p - 1) | ((sa_sint_t)(T[p - 2] < T[p - 1]) << (SAINT_BIT - 1)) | ((sa_sint_t)(distinct_names[v] != d) << (SUFFIX_GROUP_BIT - 1)); distinct_names[v] = d; } } return d; } static void libsais16_partial_sorting_scan_left_to_right_32s_1k(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t * RESTRICT induction_bucket, fast_sint_t omp_block_start, fast_sint_t omp_block_size) { const fast_sint_t prefetch_distance = 32; fast_sint_t i, j; for (i = omp_block_start, j = omp_block_start + omp_block_size - 2 * prefetch_distance - 1; i < j; i += 2) { libsais16_prefetchw(&SA[i + 3 * prefetch_distance]); sa_sint_t s0 = SA[i + 2 * prefetch_distance + 0]; const sa_sint_t * Ts0 = &T[s0] - 1; libsais16_prefetch(s0 > 0 ? Ts0 : NULL); sa_sint_t s1 = SA[i + 2 * prefetch_distance + 1]; const sa_sint_t * Ts1 = &T[s1] - 1; libsais16_prefetch(s1 > 0 ? Ts1 : NULL); sa_sint_t s2 = SA[i + 1 * prefetch_distance + 0]; if (s2 > 0) { libsais16_prefetchw(&induction_bucket[T[s2 - 1]]); libsais16_prefetch(&T[s2] - 2); } sa_sint_t s3 = SA[i + 1 * prefetch_distance + 1]; if (s3 > 0) { libsais16_prefetchw(&induction_bucket[T[s3 - 1]]); libsais16_prefetch(&T[s3] - 2); } sa_sint_t p0 = SA[i + 0]; SA[i + 0] = p0 & SAINT_MAX; if (p0 > 0) { SA[i + 0] = 0; SA[induction_bucket[T[p0 - 1]]++] = (p0 - 1) | ((sa_sint_t)(T[p0 - 2] < T[p0 - 1]) << (SAINT_BIT - 1)); } sa_sint_t p1 = SA[i + 1]; SA[i + 1] = p1 & SAINT_MAX; if (p1 > 0) { SA[i + 1] = 0; SA[induction_bucket[T[p1 - 1]]++] = (p1 - 1) | ((sa_sint_t)(T[p1 - 2] < T[p1 - 1]) << (SAINT_BIT - 1)); } } for (j += 2 * prefetch_distance + 1; i < j; i += 1) { sa_sint_t p = SA[i]; SA[i] = p & SAINT_MAX; if (p > 0) { SA[i] = 0; SA[induction_bucket[T[p - 1]]++] = (p - 1) | ((sa_sint_t)(T[p - 2] < T[p - 1]) << (SAINT_BIT - 1)); } } } #if defined(_OPENMP) static void libsais16_partial_sorting_scan_left_to_right_32s_6k_block_gather(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, LIBSAIS_THREAD_CACHE * RESTRICT cache, fast_sint_t omp_block_start, fast_sint_t omp_block_size) { const fast_sint_t prefetch_distance = 32; fast_sint_t i, j; for (i = omp_block_start, j = omp_block_start + omp_block_size - prefetch_distance - 1; i < j; i += 2) { libsais16_prefetch(&SA[i + 2 * prefetch_distance]); libsais16_prefetch(&T[SA[i + prefetch_distance + 0] & SAINT_MAX] - 1); libsais16_prefetch(&T[SA[i + prefetch_distance + 0] & SAINT_MAX] - 2); libsais16_prefetch(&T[SA[i + prefetch_distance + 1] & SAINT_MAX] - 1); libsais16_prefetch(&T[SA[i + prefetch_distance + 1] & SAINT_MAX] - 2); libsais16_prefetchw(&cache[i + prefetch_distance]); sa_sint_t p0 = cache[i + 0].index = SA[i + 0]; sa_sint_t symbol0 = 0; p0 &= SAINT_MAX; if (p0 != 0) { symbol0 = BUCKETS_INDEX4(T[p0 - 1], T[p0 - 2] >= T[p0 - 1]); } cache[i + 0].symbol = symbol0; sa_sint_t p1 = cache[i + 1].index = SA[i + 1]; sa_sint_t symbol1 = 0; p1 &= SAINT_MAX; if (p1 != 0) { symbol1 = BUCKETS_INDEX4(T[p1 - 1], T[p1 - 2] >= T[p1 - 1]); } cache[i + 1].symbol = symbol1; } for (j += prefetch_distance + 1; i < j; i += 1) { sa_sint_t p = cache[i].index = SA[i]; sa_sint_t symbol = 0; p &= SAINT_MAX; if (p != 0) { symbol = BUCKETS_INDEX4(T[p - 1], T[p - 2] >= T[p - 1]); } cache[i].symbol = symbol; } } static void libsais16_partial_sorting_scan_left_to_right_32s_4k_block_gather(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, LIBSAIS_THREAD_CACHE * RESTRICT cache, fast_sint_t omp_block_start, fast_sint_t omp_block_size) { const fast_sint_t prefetch_distance = 32; fast_sint_t i, j; for (i = omp_block_start, j = omp_block_start + omp_block_size - prefetch_distance - 1; i < j; i += 2) { libsais16_prefetchw(&SA[i + 2 * prefetch_distance]); sa_sint_t s0 = SA[i + prefetch_distance + 0]; const sa_sint_t * Ts0 = &T[s0 & ~SUFFIX_GROUP_MARKER] - 1; libsais16_prefetch(s0 > 0 ? Ts0 : NULL); Ts0--; libsais16_prefetch(s0 > 0 ? Ts0 : NULL); sa_sint_t s1 = SA[i + prefetch_distance + 1]; const sa_sint_t * Ts1 = &T[s1 & ~SUFFIX_GROUP_MARKER] - 1; libsais16_prefetch(s1 > 0 ? Ts1 : NULL); Ts1--; libsais16_prefetch(s1 > 0 ? Ts1 : NULL); libsais16_prefetchw(&cache[i + prefetch_distance]); sa_sint_t symbol0 = SAINT_MIN, p0 = SA[i + 0]; if (p0 > 0) { cache[i + 0].index = p0; p0 &= ~SUFFIX_GROUP_MARKER; symbol0 = BUCKETS_INDEX2(T[p0 - 1], T[p0 - 2] < T[p0 - 1]); p0 = 0; } cache[i + 0].symbol = symbol0; SA[i + 0] = p0 & SAINT_MAX; sa_sint_t symbol1 = SAINT_MIN, p1 = SA[i + 1]; if (p1 > 0) { cache[i + 1].index = p1; p1 &= ~SUFFIX_GROUP_MARKER; symbol1 = BUCKETS_INDEX2(T[p1 - 1], T[p1 - 2] < T[p1 - 1]); p1 = 0; } cache[i + 1].symbol = symbol1; SA[i + 1] = p1 & SAINT_MAX; } for (j += prefetch_distance + 1; i < j; i += 1) { sa_sint_t symbol = SAINT_MIN, p = SA[i]; if (p > 0) { cache[i].index = p; p &= ~SUFFIX_GROUP_MARKER; symbol = BUCKETS_INDEX2(T[p - 1], T[p - 2] < T[p - 1]); p = 0; } cache[i].symbol = symbol; SA[i] = p & SAINT_MAX; } } static void libsais16_partial_sorting_scan_left_to_right_32s_1k_block_gather(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, LIBSAIS_THREAD_CACHE * RESTRICT cache, fast_sint_t omp_block_start, fast_sint_t omp_block_size) { const fast_sint_t prefetch_distance = 32; fast_sint_t i, j; for (i = omp_block_start, j = omp_block_start + omp_block_size - prefetch_distance - 1; i < j; i += 2) { libsais16_prefetchw(&SA[i + 2 * prefetch_distance]); sa_sint_t s0 = SA[i + prefetch_distance + 0]; const sa_sint_t * Ts0 = &T[s0] - 1; libsais16_prefetch(s0 > 0 ? Ts0 : NULL); Ts0--; libsais16_prefetch(s0 > 0 ? Ts0 : NULL); sa_sint_t s1 = SA[i + prefetch_distance + 1]; const sa_sint_t * Ts1 = &T[s1] - 1; libsais16_prefetch(s1 > 0 ? Ts1 : NULL); Ts1--; libsais16_prefetch(s1 > 0 ? Ts1 : NULL); libsais16_prefetchw(&cache[i + prefetch_distance]); sa_sint_t symbol0 = SAINT_MIN, p0 = SA[i + 0]; if (p0 > 0) { cache[i + 0].index = (p0 - 1) | ((sa_sint_t)(T[p0 - 2] < T[p0 - 1]) << (SAINT_BIT - 1)); symbol0 = T[p0 - 1]; p0 = 0; } cache[i + 0].symbol = symbol0; SA[i + 0] = p0 & SAINT_MAX; sa_sint_t symbol1 = SAINT_MIN, p1 = SA[i + 1]; if (p1 > 0) { cache[i + 1].index = (p1 - 1) | ((sa_sint_t)(T[p1 - 2] < T[p1 - 1]) << (SAINT_BIT - 1)); symbol1 = T[p1 - 1]; p1 = 0; } cache[i + 1].symbol = symbol1; SA[i + 1] = p1 & SAINT_MAX; } for (j += prefetch_distance + 1; i < j; i += 1) { sa_sint_t symbol = SAINT_MIN, p = SA[i]; if (p > 0) { cache[i].index = (p - 1) | ((sa_sint_t)(T[p - 2] < T[p - 1]) << (SAINT_BIT - 1)); symbol = T[p - 1]; p = 0; } cache[i].symbol = symbol; SA[i] = p & SAINT_MAX; } } static sa_sint_t libsais16_partial_sorting_scan_left_to_right_32s_6k_block_sort(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT buckets, sa_sint_t d, LIBSAIS_THREAD_CACHE * RESTRICT cache, fast_sint_t omp_block_start, fast_sint_t omp_block_size) { const fast_sint_t prefetch_distance = 32; fast_sint_t i, j, omp_block_end = omp_block_start + omp_block_size; for (i = omp_block_start, j = omp_block_end - prefetch_distance - 1; i < j; i += 2) { libsais16_prefetchw(&cache[i + 2 * prefetch_distance]); libsais16_prefetchw(&buckets[cache[i + prefetch_distance + 0].symbol]); libsais16_prefetchw(&buckets[cache[i + prefetch_distance + 1].symbol]); sa_sint_t v0 = cache[i + 0].symbol, p0 = cache[i + 0].index; d += (p0 < 0); cache[i + 0].symbol = buckets[v0]++; cache[i + 0].index = (p0 - 1) | ((sa_sint_t)(buckets[2 + v0] != d) << (SAINT_BIT - 1)); buckets[2 + v0] = d; if (cache[i + 0].symbol < omp_block_end) { sa_sint_t s = cache[i + 0].symbol, q = (cache[s].index = cache[i + 0].index) & SAINT_MAX; cache[s].symbol = BUCKETS_INDEX4(T[q - 1], T[q - 2] >= T[q - 1]); } sa_sint_t v1 = cache[i + 1].symbol, p1 = cache[i + 1].index; d += (p1 < 0); cache[i + 1].symbol = buckets[v1]++; cache[i + 1].index = (p1 - 1) | ((sa_sint_t)(buckets[2 + v1] != d) << (SAINT_BIT - 1)); buckets[2 + v1] = d; if (cache[i + 1].symbol < omp_block_end) { sa_sint_t s = cache[i + 1].symbol, q = (cache[s].index = cache[i + 1].index) & SAINT_MAX; cache[s].symbol = BUCKETS_INDEX4(T[q - 1], T[q - 2] >= T[q - 1]); } } for (j += prefetch_distance + 1; i < j; i += 1) { sa_sint_t v = cache[i].symbol, p = cache[i].index; d += (p < 0); cache[i].symbol = buckets[v]++; cache[i].index = (p - 1) | ((sa_sint_t)(buckets[2 + v] != d) << (SAINT_BIT - 1)); buckets[2 + v] = d; if (cache[i].symbol < omp_block_end) { sa_sint_t s = cache[i].symbol, q = (cache[s].index = cache[i].index) & SAINT_MAX; cache[s].symbol = BUCKETS_INDEX4(T[q - 1], T[q - 2] >= T[q - 1]); } } return d; } static sa_sint_t libsais16_partial_sorting_scan_left_to_right_32s_4k_block_sort(const sa_sint_t * RESTRICT T, sa_sint_t k, sa_sint_t * RESTRICT buckets, sa_sint_t d, LIBSAIS_THREAD_CACHE * RESTRICT cache, fast_sint_t omp_block_start, fast_sint_t omp_block_size) { const fast_sint_t prefetch_distance = 32; sa_sint_t * RESTRICT induction_bucket = &buckets[2 * k]; sa_sint_t * RESTRICT distinct_names = &buckets[0 * k]; fast_sint_t i, j, omp_block_end = omp_block_start + omp_block_size; for (i = omp_block_start, j = omp_block_end - prefetch_distance - 1; i < j; i += 2) { libsais16_prefetchw(&cache[i + 2 * prefetch_distance]); sa_sint_t s0 = cache[i + prefetch_distance + 0].symbol; const sa_sint_t * Is0 = &induction_bucket[s0 >> 1]; libsais16_prefetchw(s0 >= 0 ? Is0 : NULL); const sa_sint_t * Ds0 = &distinct_names[s0]; libsais16_prefetchw(s0 >= 0 ? Ds0 : NULL); sa_sint_t s1 = cache[i + prefetch_distance + 1].symbol; const sa_sint_t * Is1 = &induction_bucket[s1 >> 1]; libsais16_prefetchw(s1 >= 0 ? Is1 : NULL); const sa_sint_t * Ds1 = &distinct_names[s1]; libsais16_prefetchw(s1 >= 0 ? Ds1 : NULL); sa_sint_t v0 = cache[i + 0].symbol; if (v0 >= 0) { sa_sint_t p0 = cache[i + 0].index; d += (p0 >> (SUFFIX_GROUP_BIT - 1)); cache[i + 0].symbol = induction_bucket[v0 >> 1]++; cache[i + 0].index = (p0 - 1) | (v0 << (SAINT_BIT - 1)) | ((sa_sint_t)(distinct_names[v0] != d) << (SUFFIX_GROUP_BIT - 1)); distinct_names[v0] = d; if (cache[i + 0].symbol < omp_block_end) { sa_sint_t ni = cache[i + 0].symbol, np = cache[i + 0].index; if (np > 0) { cache[ni].index = np; np &= ~SUFFIX_GROUP_MARKER; cache[ni].symbol = BUCKETS_INDEX2(T[np - 1], T[np - 2] < T[np - 1]); np = 0; } cache[i + 0].index = np & SAINT_MAX; } } sa_sint_t v1 = cache[i + 1].symbol; if (v1 >= 0) { sa_sint_t p1 = cache[i + 1].index; d += (p1 >> (SUFFIX_GROUP_BIT - 1)); cache[i + 1].symbol = induction_bucket[v1 >> 1]++; cache[i + 1].index = (p1 - 1) | (v1 << (SAINT_BIT - 1)) | ((sa_sint_t)(distinct_names[v1] != d) << (SUFFIX_GROUP_BIT - 1)); distinct_names[v1] = d; if (cache[i + 1].symbol < omp_block_end) { sa_sint_t ni = cache[i + 1].symbol, np = cache[i + 1].index; if (np > 0) { cache[ni].index = np; np &= ~SUFFIX_GROUP_MARKER; cache[ni].symbol = BUCKETS_INDEX2(T[np - 1], T[np - 2] < T[np - 1]); np = 0; } cache[i + 1].index = np & SAINT_MAX; } } } for (j += prefetch_distance + 1; i < j; i += 1) { sa_sint_t v = cache[i].symbol; if (v >= 0) { sa_sint_t p = cache[i].index; d += (p >> (SUFFIX_GROUP_BIT - 1)); cache[i].symbol = induction_bucket[v >> 1]++; cache[i].index = (p - 1) | (v << (SAINT_BIT - 1)) | ((sa_sint_t)(distinct_names[v] != d) << (SUFFIX_GROUP_BIT - 1)); distinct_names[v] = d; if (cache[i].symbol < omp_block_end) { sa_sint_t ni = cache[i].symbol, np = cache[i].index; if (np > 0) { cache[ni].index = np; np &= ~SUFFIX_GROUP_MARKER; cache[ni].symbol = BUCKETS_INDEX2(T[np - 1], T[np - 2] < T[np - 1]); np = 0; } cache[i].index = np & SAINT_MAX; } } } return d; } static void libsais16_partial_sorting_scan_left_to_right_32s_1k_block_sort(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT induction_bucket, LIBSAIS_THREAD_CACHE * RESTRICT cache, fast_sint_t omp_block_start, fast_sint_t omp_block_size) { const fast_sint_t prefetch_distance = 32; fast_sint_t i, j, omp_block_end = omp_block_start + omp_block_size; for (i = omp_block_start, j = omp_block_end - prefetch_distance - 1; i < j; i += 2) { libsais16_prefetchw(&cache[i + 2 * prefetch_distance]); sa_sint_t s0 = cache[i + prefetch_distance + 0].symbol; const sa_sint_t * Is0 = &induction_bucket[s0]; libsais16_prefetchw(s0 >= 0 ? Is0 : NULL); sa_sint_t s1 = cache[i + prefetch_distance + 1].symbol; const sa_sint_t * Is1 = &induction_bucket[s1]; libsais16_prefetchw(s1 >= 0 ? Is1 : NULL); sa_sint_t v0 = cache[i + 0].symbol; if (v0 >= 0) { cache[i + 0].symbol = induction_bucket[v0]++; if (cache[i + 0].symbol < omp_block_end) { sa_sint_t ni = cache[i + 0].symbol, np = cache[i + 0].index; if (np > 0) { cache[ni].index = (np - 1) | ((sa_sint_t)(T[np - 2] < T[np - 1]) << (SAINT_BIT - 1)); cache[ni].symbol = T[np - 1]; np = 0; } cache[i + 0].index = np & SAINT_MAX; } } sa_sint_t v1 = cache[i + 1].symbol; if (v1 >= 0) { cache[i + 1].symbol = induction_bucket[v1]++; if (cache[i + 1].symbol < omp_block_end) { sa_sint_t ni = cache[i + 1].symbol, np = cache[i + 1].index; if (np > 0) { cache[ni].index = (np - 1) | ((sa_sint_t)(T[np - 2] < T[np - 1]) << (SAINT_BIT - 1)); cache[ni].symbol = T[np - 1]; np = 0; } cache[i + 1].index = np & SAINT_MAX; } } } for (j += prefetch_distance + 1; i < j; i += 1) { sa_sint_t v = cache[i].symbol; if (v >= 0) { cache[i].symbol = induction_bucket[v]++; if (cache[i].symbol < omp_block_end) { sa_sint_t ni = cache[i].symbol, np = cache[i].index; if (np > 0) { cache[ni].index = (np - 1) | ((sa_sint_t)(T[np - 2] < T[np - 1]) << (SAINT_BIT - 1)); cache[ni].symbol = T[np - 1]; np = 0; } cache[i].index = np & SAINT_MAX; } } } } static sa_sint_t libsais16_partial_sorting_scan_left_to_right_32s_6k_block_omp(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t * RESTRICT buckets, sa_sint_t d, LIBSAIS_THREAD_CACHE * RESTRICT cache, fast_sint_t block_start, fast_sint_t block_size, sa_sint_t threads) { #if defined(_OPENMP) #pragma omp parallel num_threads(threads) if(threads > 1 && block_size >= 16384) #endif { #if defined(_OPENMP) fast_sint_t omp_thread_num = omp_get_thread_num(); fast_sint_t omp_num_threads = omp_get_num_threads(); #else UNUSED(threads); UNUSED(cache); fast_sint_t omp_thread_num = 0; fast_sint_t omp_num_threads = 1; #endif fast_sint_t omp_block_stride = (block_size / omp_num_threads) & (-16); fast_sint_t omp_block_start = omp_thread_num * omp_block_stride; fast_sint_t omp_block_size = omp_thread_num < omp_num_threads - 1 ? omp_block_stride : block_size - omp_block_start; omp_block_start += block_start; if (omp_num_threads == 1) { d = libsais16_partial_sorting_scan_left_to_right_32s_6k(T, SA, buckets, d, omp_block_start, omp_block_size); } #if defined(_OPENMP) else { { libsais16_partial_sorting_scan_left_to_right_32s_6k_block_gather(T, SA, cache - block_start, omp_block_start, omp_block_size); } #pragma omp barrier #pragma omp master { d = libsais16_partial_sorting_scan_left_to_right_32s_6k_block_sort(T, buckets, d, cache - block_start, block_start, block_size); } #pragma omp barrier { libsais16_place_cached_suffixes(SA, cache - block_start, omp_block_start, omp_block_size); } } #endif } return d; } static sa_sint_t libsais16_partial_sorting_scan_left_to_right_32s_4k_block_omp(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t k, sa_sint_t * RESTRICT buckets, sa_sint_t d, LIBSAIS_THREAD_CACHE * RESTRICT cache, fast_sint_t block_start, fast_sint_t block_size, sa_sint_t threads) { #if defined(_OPENMP) #pragma omp parallel num_threads(threads) if(threads > 1 && block_size >= 16384) #endif { #if defined(_OPENMP) fast_sint_t omp_thread_num = omp_get_thread_num(); fast_sint_t omp_num_threads = omp_get_num_threads(); #else UNUSED(threads); UNUSED(cache); fast_sint_t omp_thread_num = 0; fast_sint_t omp_num_threads = 1; #endif fast_sint_t omp_block_stride = (block_size / omp_num_threads) & (-16); fast_sint_t omp_block_start = omp_thread_num * omp_block_stride; fast_sint_t omp_block_size = omp_thread_num < omp_num_threads - 1 ? omp_block_stride : block_size - omp_block_start; omp_block_start += block_start; if (omp_num_threads == 1) { d = libsais16_partial_sorting_scan_left_to_right_32s_4k(T, SA, k, buckets, d, omp_block_start, omp_block_size); } #if defined(_OPENMP) else { { libsais16_partial_sorting_scan_left_to_right_32s_4k_block_gather(T, SA, cache - block_start, omp_block_start, omp_block_size); } #pragma omp barrier #pragma omp master { d = libsais16_partial_sorting_scan_left_to_right_32s_4k_block_sort(T, k, buckets, d, cache - block_start, block_start, block_size); } #pragma omp barrier { libsais16_compact_and_place_cached_suffixes(SA, cache - block_start, omp_block_start, omp_block_size); } } #endif } return d; } static void libsais16_partial_sorting_scan_left_to_right_32s_1k_block_omp(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t * RESTRICT buckets, LIBSAIS_THREAD_CACHE * RESTRICT cache, fast_sint_t block_start, fast_sint_t block_size, sa_sint_t threads) { #if defined(_OPENMP) #pragma omp parallel num_threads(threads) if(threads > 1 && block_size >= 16384) #endif { #if defined(_OPENMP) fast_sint_t omp_thread_num = omp_get_thread_num(); fast_sint_t omp_num_threads = omp_get_num_threads(); #else UNUSED(threads); UNUSED(cache); fast_sint_t omp_thread_num = 0; fast_sint_t omp_num_threads = 1; #endif fast_sint_t omp_block_stride = (block_size / omp_num_threads) & (-16); fast_sint_t omp_block_start = omp_thread_num * omp_block_stride; fast_sint_t omp_block_size = omp_thread_num < omp_num_threads - 1 ? omp_block_stride : block_size - omp_block_start; omp_block_start += block_start; if (omp_num_threads == 1) { libsais16_partial_sorting_scan_left_to_right_32s_1k(T, SA, buckets, omp_block_start, omp_block_size); } #if defined(_OPENMP) else { { libsais16_partial_sorting_scan_left_to_right_32s_1k_block_gather(T, SA, cache - block_start, omp_block_start, omp_block_size); } #pragma omp barrier #pragma omp master { libsais16_partial_sorting_scan_left_to_right_32s_1k_block_sort(T, buckets, cache - block_start, block_start, block_size); } #pragma omp barrier { libsais16_compact_and_place_cached_suffixes(SA, cache - block_start, omp_block_start, omp_block_size); } } #endif } } #endif static sa_sint_t libsais16_partial_sorting_scan_left_to_right_32s_6k_omp(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t * RESTRICT buckets, sa_sint_t left_suffixes_count, sa_sint_t d, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state) { SA[buckets[BUCKETS_INDEX4(T[n - 1], T[n - 2] >= T[n - 1])]++] = (n - 1) | SAINT_MIN; buckets[2 + BUCKETS_INDEX4(T[n - 1], T[n - 2] >= T[n - 1])] = ++d; if (threads == 1 || left_suffixes_count < 65536) { d = libsais16_partial_sorting_scan_left_to_right_32s_6k(T, SA, buckets, d, 0, left_suffixes_count); } #if defined(_OPENMP) else { fast_sint_t block_start, block_end; for (block_start = 0; block_start < left_suffixes_count; block_start = block_end) { block_end = block_start + (fast_sint_t)threads * LIBSAIS_PER_THREAD_CACHE_SIZE; if (block_end > left_suffixes_count) { block_end = left_suffixes_count; } d = libsais16_partial_sorting_scan_left_to_right_32s_6k_block_omp(T, SA, buckets, d, thread_state[0].state.cache, block_start, block_end - block_start, threads); } } #else UNUSED(thread_state); #endif return d; } static sa_sint_t libsais16_partial_sorting_scan_left_to_right_32s_4k_omp(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t k, sa_sint_t * RESTRICT buckets, sa_sint_t d, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state) { sa_sint_t * RESTRICT induction_bucket = &buckets[2 * k]; sa_sint_t * RESTRICT distinct_names = &buckets[0 * k]; SA[induction_bucket[T[n - 1]]++] = (n - 1) | ((sa_sint_t)(T[n - 2] < T[n - 1]) << (SAINT_BIT - 1)) | SUFFIX_GROUP_MARKER; distinct_names[BUCKETS_INDEX2(T[n - 1], T[n - 2] < T[n - 1])] = ++d; if (threads == 1 || n < 65536) { d = libsais16_partial_sorting_scan_left_to_right_32s_4k(T, SA, k, buckets, d, 0, n); } #if defined(_OPENMP) else { fast_sint_t block_start, block_end; for (block_start = 0; block_start < n; block_start = block_end) { block_end = block_start + (fast_sint_t)threads * LIBSAIS_PER_THREAD_CACHE_SIZE; if (block_end > n) { block_end = n; } d = libsais16_partial_sorting_scan_left_to_right_32s_4k_block_omp(T, SA, k, buckets, d, thread_state[0].state.cache, block_start, block_end - block_start, threads); } } #else UNUSED(thread_state); #endif return d; } static void libsais16_partial_sorting_scan_left_to_right_32s_1k_omp(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t * RESTRICT buckets, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state) { SA[buckets[T[n - 1]]++] = (n - 1) | ((sa_sint_t)(T[n - 2] < T[n - 1]) << (SAINT_BIT - 1)); if (threads == 1 || n < 65536) { libsais16_partial_sorting_scan_left_to_right_32s_1k(T, SA, buckets, 0, n); } #if defined(_OPENMP) else { fast_sint_t block_start, block_end; for (block_start = 0; block_start < n; block_start = block_end) { block_end = block_start + (fast_sint_t)threads * LIBSAIS_PER_THREAD_CACHE_SIZE; if (block_end > n) { block_end = n; } libsais16_partial_sorting_scan_left_to_right_32s_1k_block_omp(T, SA, buckets, thread_state[0].state.cache, block_start, block_end - block_start, threads); } } #else UNUSED(thread_state); #endif } static void libsais16_partial_sorting_shift_markers_16u_omp(sa_sint_t * RESTRICT SA, sa_sint_t n, const sa_sint_t * RESTRICT buckets, sa_sint_t threads) { const fast_sint_t prefetch_distance = 32; const sa_sint_t * RESTRICT temp_bucket = &buckets[4 * ALPHABET_SIZE]; fast_sint_t c; #if defined(_OPENMP) #pragma omp parallel for schedule(static, 1) num_threads(threads) if(threads > 1 && n >= 65536) #else UNUSED(threads); UNUSED(n); #endif for (c = BUCKETS_INDEX2(ALPHABET_SIZE - 1, 0); c >= BUCKETS_INDEX2(1, 0); c -= BUCKETS_INDEX2(1, 0)) { fast_sint_t i, j; sa_sint_t s = SAINT_MIN; for (i = (fast_sint_t)temp_bucket[c] - 1, j = (fast_sint_t)buckets[c - BUCKETS_INDEX2(1, 0)] + 3; i >= j; i -= 4) { libsais16_prefetchw(&SA[i - prefetch_distance]); sa_sint_t p0 = SA[i - 0], q0 = (p0 & SAINT_MIN) ^ s; s = s ^ q0; SA[i - 0] = p0 ^ q0; sa_sint_t p1 = SA[i - 1], q1 = (p1 & SAINT_MIN) ^ s; s = s ^ q1; SA[i - 1] = p1 ^ q1; sa_sint_t p2 = SA[i - 2], q2 = (p2 & SAINT_MIN) ^ s; s = s ^ q2; SA[i - 2] = p2 ^ q2; sa_sint_t p3 = SA[i - 3], q3 = (p3 & SAINT_MIN) ^ s; s = s ^ q3; SA[i - 3] = p3 ^ q3; } for (j -= 3; i >= j; i -= 1) { sa_sint_t p = SA[i], q = (p & SAINT_MIN) ^ s; s = s ^ q; SA[i] = p ^ q; } } } static void libsais16_partial_sorting_shift_markers_32s_6k_omp(sa_sint_t * RESTRICT SA, sa_sint_t k, const sa_sint_t * RESTRICT buckets, sa_sint_t threads) { const fast_sint_t prefetch_distance = 32; const sa_sint_t * RESTRICT temp_bucket = &buckets[4 * k]; fast_sint_t c; #if defined(_OPENMP) #pragma omp parallel for schedule(static, 1) num_threads(threads) if(threads > 1 && k >= 65536) #else UNUSED(threads); #endif for (c = (fast_sint_t)k - 1; c >= 1; c -= 1) { fast_sint_t i, j; sa_sint_t s = SAINT_MIN; for (i = (fast_sint_t)buckets[BUCKETS_INDEX4(c, 0)] - 1, j = (fast_sint_t)temp_bucket[BUCKETS_INDEX2(c - 1, 0)] + 3; i >= j; i -= 4) { libsais16_prefetchw(&SA[i - prefetch_distance]); sa_sint_t p0 = SA[i - 0], q0 = (p0 & SAINT_MIN) ^ s; s = s ^ q0; SA[i - 0] = p0 ^ q0; sa_sint_t p1 = SA[i - 1], q1 = (p1 & SAINT_MIN) ^ s; s = s ^ q1; SA[i - 1] = p1 ^ q1; sa_sint_t p2 = SA[i - 2], q2 = (p2 & SAINT_MIN) ^ s; s = s ^ q2; SA[i - 2] = p2 ^ q2; sa_sint_t p3 = SA[i - 3], q3 = (p3 & SAINT_MIN) ^ s; s = s ^ q3; SA[i - 3] = p3 ^ q3; } for (j -= 3; i >= j; i -= 1) { sa_sint_t p = SA[i], q = (p & SAINT_MIN) ^ s; s = s ^ q; SA[i] = p ^ q; } } } static void libsais16_partial_sorting_shift_markers_32s_4k(sa_sint_t * RESTRICT SA, sa_sint_t n) { const fast_sint_t prefetch_distance = 32; fast_sint_t i; sa_sint_t s = SUFFIX_GROUP_MARKER; for (i = (fast_sint_t)n - 1; i >= 3; i -= 4) { libsais16_prefetchw(&SA[i - prefetch_distance]); sa_sint_t p0 = SA[i - 0], q0 = ((p0 & SUFFIX_GROUP_MARKER) ^ s) & ((sa_sint_t)(p0 > 0) << ((SUFFIX_GROUP_BIT - 1))); s = s ^ q0; SA[i - 0] = p0 ^ q0; sa_sint_t p1 = SA[i - 1], q1 = ((p1 & SUFFIX_GROUP_MARKER) ^ s) & ((sa_sint_t)(p1 > 0) << ((SUFFIX_GROUP_BIT - 1))); s = s ^ q1; SA[i - 1] = p1 ^ q1; sa_sint_t p2 = SA[i - 2], q2 = ((p2 & SUFFIX_GROUP_MARKER) ^ s) & ((sa_sint_t)(p2 > 0) << ((SUFFIX_GROUP_BIT - 1))); s = s ^ q2; SA[i - 2] = p2 ^ q2; sa_sint_t p3 = SA[i - 3], q3 = ((p3 & SUFFIX_GROUP_MARKER) ^ s) & ((sa_sint_t)(p3 > 0) << ((SUFFIX_GROUP_BIT - 1))); s = s ^ q3; SA[i - 3] = p3 ^ q3; } for (; i >= 0; i -= 1) { sa_sint_t p = SA[i], q = ((p & SUFFIX_GROUP_MARKER) ^ s) & ((sa_sint_t)(p > 0) << ((SUFFIX_GROUP_BIT - 1))); s = s ^ q; SA[i] = p ^ q; } } static void libsais16_partial_sorting_shift_buckets_32s_6k(sa_sint_t k, sa_sint_t * RESTRICT buckets) { sa_sint_t * RESTRICT temp_bucket = &buckets[4 * k]; fast_sint_t i; for (i = BUCKETS_INDEX2(0, 0); i <= BUCKETS_INDEX2((fast_sint_t)k - 1, 0); i += BUCKETS_INDEX2(1, 0)) { buckets[2 * i + BUCKETS_INDEX4(0, 0)] = temp_bucket[i + BUCKETS_INDEX2(0, 0)]; buckets[2 * i + BUCKETS_INDEX4(0, 1)] = temp_bucket[i + BUCKETS_INDEX2(0, 1)]; } } static sa_sint_t libsais16_partial_sorting_scan_right_to_left_16u(const uint16_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t * RESTRICT buckets, sa_sint_t d, fast_sint_t omp_block_start, fast_sint_t omp_block_size) { const fast_sint_t prefetch_distance = 32; sa_sint_t * RESTRICT induction_bucket = &buckets[0 * ALPHABET_SIZE]; sa_sint_t * RESTRICT distinct_names = &buckets[2 * ALPHABET_SIZE]; fast_sint_t i, j; for (i = omp_block_start + omp_block_size - 1, j = omp_block_start + prefetch_distance + 1; i >= j; i -= 2) { libsais16_prefetch(&SA[i - 2 * prefetch_distance]); libsais16_prefetch(&T[SA[i - prefetch_distance - 0] & SAINT_MAX] - 1); libsais16_prefetch(&T[SA[i - prefetch_distance - 0] & SAINT_MAX] - 2); libsais16_prefetch(&T[SA[i - prefetch_distance - 1] & SAINT_MAX] - 1); libsais16_prefetch(&T[SA[i - prefetch_distance - 1] & SAINT_MAX] - 2); sa_sint_t p0 = SA[i - 0]; d += (p0 < 0); p0 &= SAINT_MAX; sa_sint_t v0 = BUCKETS_INDEX2(T[p0 - 1], T[p0 - 2] > T[p0 - 1]); SA[--induction_bucket[v0]] = (p0 - 1) | ((sa_sint_t)(distinct_names[v0] != d) << (SAINT_BIT - 1)); distinct_names[v0] = d; sa_sint_t p1 = SA[i - 1]; d += (p1 < 0); p1 &= SAINT_MAX; sa_sint_t v1 = BUCKETS_INDEX2(T[p1 - 1], T[p1 - 2] > T[p1 - 1]); SA[--induction_bucket[v1]] = (p1 - 1) | ((sa_sint_t)(distinct_names[v1] != d) << (SAINT_BIT - 1)); distinct_names[v1] = d; } for (j -= prefetch_distance + 1; i >= j; i -= 1) { sa_sint_t p = SA[i]; d += (p < 0); p &= SAINT_MAX; sa_sint_t v = BUCKETS_INDEX2(T[p - 1], T[p - 2] > T[p - 1]); SA[--induction_bucket[v]] = (p - 1) | ((sa_sint_t)(distinct_names[v] != d) << (SAINT_BIT - 1)); distinct_names[v] = d; } return d; } #if defined(_OPENMP) static void libsais16_partial_sorting_scan_right_to_left_16u_block_prepare(const uint16_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t * RESTRICT buckets, LIBSAIS_THREAD_CACHE * RESTRICT cache, fast_sint_t omp_block_start, fast_sint_t omp_block_size, LIBSAIS_THREAD_STATE * RESTRICT state) { const fast_sint_t prefetch_distance = 32; sa_sint_t * RESTRICT induction_bucket = &buckets[0 * ALPHABET_SIZE]; sa_sint_t * RESTRICT distinct_names = &buckets[2 * ALPHABET_SIZE]; memset(buckets, 0, 4 * ALPHABET_SIZE * sizeof(sa_sint_t)); fast_sint_t i, j, count = 0; sa_sint_t d = 1; for (i = omp_block_start + omp_block_size - 1, j = omp_block_start + prefetch_distance + 1; i >= j; i -= 2) { libsais16_prefetch(&SA[i - 2 * prefetch_distance]); libsais16_prefetch(&T[SA[i - prefetch_distance - 0] & SAINT_MAX] - 1); libsais16_prefetch(&T[SA[i - prefetch_distance - 0] & SAINT_MAX] - 2); libsais16_prefetch(&T[SA[i - prefetch_distance - 1] & SAINT_MAX] - 1); libsais16_prefetch(&T[SA[i - prefetch_distance - 1] & SAINT_MAX] - 2); sa_sint_t p0 = cache[count].index = SA[i - 0]; d += (p0 < 0); p0 &= SAINT_MAX; sa_sint_t v0 = cache[count++].symbol = BUCKETS_INDEX2(T[p0 - 1], T[p0 - 2] > T[p0 - 1]); induction_bucket[v0]++; distinct_names[v0] = d; sa_sint_t p1 = cache[count].index = SA[i - 1]; d += (p1 < 0); p1 &= SAINT_MAX; sa_sint_t v1 = cache[count++].symbol = BUCKETS_INDEX2(T[p1 - 1], T[p1 - 2] > T[p1 - 1]); induction_bucket[v1]++; distinct_names[v1] = d; } for (j -= prefetch_distance + 1; i >= j; i -= 1) { sa_sint_t p = cache[count].index = SA[i]; d += (p < 0); p &= SAINT_MAX; sa_sint_t v = cache[count++].symbol = BUCKETS_INDEX2(T[p - 1], T[p - 2] > T[p - 1]); induction_bucket[v]++; distinct_names[v] = d; } state[0].state.position = (fast_sint_t)d - 1; state[0].state.count = count; } static void libsais16_partial_sorting_scan_right_to_left_16u_block_place(sa_sint_t * RESTRICT SA, sa_sint_t * RESTRICT buckets, LIBSAIS_THREAD_CACHE * RESTRICT cache, fast_sint_t count, sa_sint_t d) { const fast_sint_t prefetch_distance = 32; sa_sint_t * RESTRICT induction_bucket = &buckets[0 * ALPHABET_SIZE]; sa_sint_t * RESTRICT distinct_names = &buckets[2 * ALPHABET_SIZE]; fast_sint_t i, j; for (i = 0, j = count - 1; i < j; i += 2) { libsais16_prefetch(&cache[i + prefetch_distance]); sa_sint_t p0 = cache[i + 0].index; d += (p0 < 0); sa_sint_t v0 = cache[i + 0].symbol; SA[--induction_bucket[v0]] = (p0 - 1) | ((sa_sint_t)(distinct_names[v0] != d) << (SAINT_BIT - 1)); distinct_names[v0] = d; sa_sint_t p1 = cache[i + 1].index; d += (p1 < 0); sa_sint_t v1 = cache[i + 1].symbol; SA[--induction_bucket[v1]] = (p1 - 1) | ((sa_sint_t)(distinct_names[v1] != d) << (SAINT_BIT - 1)); distinct_names[v1] = d; } for (j += 1; i < j; i += 1) { sa_sint_t p = cache[i].index; d += (p < 0); sa_sint_t v = cache[i].symbol; SA[--induction_bucket[v]] = (p - 1) | ((sa_sint_t)(distinct_names[v] != d) << (SAINT_BIT - 1)); distinct_names[v] = d; } } static sa_sint_t libsais16_partial_sorting_scan_right_to_left_16u_block_omp(const uint16_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t * RESTRICT buckets, sa_sint_t d, fast_sint_t block_start, fast_sint_t block_size, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state) { #if defined(_OPENMP) #pragma omp parallel num_threads(threads) if(threads > 1 && block_size >= 64 * ALPHABET_SIZE && omp_get_dynamic() == 0) #endif { #if defined(_OPENMP) fast_sint_t omp_thread_num = omp_get_thread_num(); fast_sint_t omp_num_threads = omp_get_num_threads(); #else UNUSED(threads); UNUSED(thread_state); fast_sint_t omp_thread_num = 0; fast_sint_t omp_num_threads = 1; #endif fast_sint_t omp_block_stride = (block_size / omp_num_threads) & (-16); fast_sint_t omp_block_start = omp_thread_num * omp_block_stride; fast_sint_t omp_block_size = omp_thread_num < omp_num_threads - 1 ? omp_block_stride : block_size - omp_block_start; omp_block_start += block_start; if (omp_num_threads == 1) { d = libsais16_partial_sorting_scan_right_to_left_16u(T, SA, buckets, d, omp_block_start, omp_block_size); } #if defined(_OPENMP) else { { libsais16_partial_sorting_scan_right_to_left_16u_block_prepare(T, SA, thread_state[omp_thread_num].state.buckets, thread_state[omp_thread_num].state.cache, omp_block_start, omp_block_size, &thread_state[omp_thread_num]); } #pragma omp barrier #pragma omp master { sa_sint_t * RESTRICT induction_bucket = &buckets[0 * ALPHABET_SIZE]; sa_sint_t * RESTRICT distinct_names = &buckets[2 * ALPHABET_SIZE]; fast_sint_t t; for (t = omp_num_threads - 1; t >= 0; --t) { sa_sint_t * RESTRICT temp_induction_bucket = &thread_state[t].state.buckets[0 * ALPHABET_SIZE]; sa_sint_t * RESTRICT temp_distinct_names = &thread_state[t].state.buckets[2 * ALPHABET_SIZE]; fast_sint_t c; for (c = 0; c < 2 * ALPHABET_SIZE; c += 1) { sa_sint_t A = induction_bucket[c], B = temp_induction_bucket[c]; induction_bucket[c] = A - B; temp_induction_bucket[c] = A; } for (d -= 1, c = 0; c < 2 * ALPHABET_SIZE; c += 1) { sa_sint_t A = distinct_names[c], B = temp_distinct_names[c], D = B + d; distinct_names[c] = B > 0 ? D : A; temp_distinct_names[c] = A; } d += 1 + (sa_sint_t)thread_state[t].state.position; thread_state[t].state.position = (fast_sint_t)d - thread_state[t].state.position; } } #pragma omp barrier { libsais16_partial_sorting_scan_right_to_left_16u_block_place(SA, thread_state[omp_thread_num].state.buckets, thread_state[omp_thread_num].state.cache, thread_state[omp_thread_num].state.count, (sa_sint_t)thread_state[omp_thread_num].state.position); } } #endif } return d; } #endif static void libsais16_partial_sorting_scan_right_to_left_16u_omp(const uint16_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t * RESTRICT buckets, sa_sint_t first_lms_suffix, sa_sint_t left_suffixes_count, sa_sint_t d, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state) { fast_sint_t scan_start = (fast_sint_t)left_suffixes_count + 1; fast_sint_t scan_end = (fast_sint_t)n - (fast_sint_t)first_lms_suffix; if (threads == 1 || (scan_end - scan_start) < 65536) { libsais16_partial_sorting_scan_right_to_left_16u(T, SA, buckets, d, scan_start, scan_end - scan_start); } #if defined(_OPENMP) else { sa_sint_t * RESTRICT induction_bucket = &buckets[0 * ALPHABET_SIZE]; sa_sint_t * RESTRICT distinct_names = &buckets[2 * ALPHABET_SIZE]; fast_sint_t block_start; for (block_start = scan_end - 1; block_start >= scan_start; ) { if (SA[block_start] == 0) { block_start--; } else { fast_sint_t block_max_end = block_start - ((fast_sint_t)threads) * (LIBSAIS_PER_THREAD_CACHE_SIZE - 16 * (fast_sint_t)threads); if (block_max_end < scan_start) { block_max_end = scan_start - 1; } fast_sint_t block_end = block_start - 1; while (block_end > block_max_end && SA[block_end] != 0) { block_end--; } fast_sint_t block_size = block_start - block_end; if (block_size < 32) { for (; block_start > block_end; block_start -= 1) { sa_sint_t p = SA[block_start]; d += (p < 0); p &= SAINT_MAX; sa_sint_t v = BUCKETS_INDEX2(T[p - 1], T[p - 2] > T[p - 1]); SA[--induction_bucket[v]] = (p - 1) | ((sa_sint_t)(distinct_names[v] != d) << (SAINT_BIT - 1)); distinct_names[v] = d; } } else { d = libsais16_partial_sorting_scan_right_to_left_16u_block_omp(T, SA, buckets, d, block_end + 1, block_size, threads, thread_state); block_start = block_end; } } } } #else UNUSED(thread_state); #endif } static sa_sint_t libsais16_partial_sorting_scan_right_to_left_32s_6k(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t * RESTRICT buckets, sa_sint_t d, fast_sint_t omp_block_start, fast_sint_t omp_block_size) { const fast_sint_t prefetch_distance = 32; fast_sint_t i, j; for (i = omp_block_start + omp_block_size - 1, j = omp_block_start + 2 * prefetch_distance + 1; i >= j; i -= 2) { libsais16_prefetch(&SA[i - 3 * prefetch_distance]); libsais16_prefetch(&T[SA[i - 2 * prefetch_distance - 0] & SAINT_MAX] - 1); libsais16_prefetch(&T[SA[i - 2 * prefetch_distance - 0] & SAINT_MAX] - 2); libsais16_prefetch(&T[SA[i - 2 * prefetch_distance - 1] & SAINT_MAX] - 1); libsais16_prefetch(&T[SA[i - 2 * prefetch_distance - 1] & SAINT_MAX] - 2); sa_sint_t p0 = SA[i - prefetch_distance - 0] & SAINT_MAX; sa_sint_t v0 = BUCKETS_INDEX4(T[p0 - (p0 > 0)], 0); libsais16_prefetchw(&buckets[v0]); sa_sint_t p1 = SA[i - prefetch_distance - 1] & SAINT_MAX; sa_sint_t v1 = BUCKETS_INDEX4(T[p1 - (p1 > 0)], 0); libsais16_prefetchw(&buckets[v1]); sa_sint_t p2 = SA[i - 0]; d += (p2 < 0); p2 &= SAINT_MAX; sa_sint_t v2 = BUCKETS_INDEX4(T[p2 - 1], T[p2 - 2] > T[p2 - 1]); SA[--buckets[v2]] = (p2 - 1) | ((sa_sint_t)(buckets[2 + v2] != d) << (SAINT_BIT - 1)); buckets[2 + v2] = d; sa_sint_t p3 = SA[i - 1]; d += (p3 < 0); p3 &= SAINT_MAX; sa_sint_t v3 = BUCKETS_INDEX4(T[p3 - 1], T[p3 - 2] > T[p3 - 1]); SA[--buckets[v3]] = (p3 - 1) | ((sa_sint_t)(buckets[2 + v3] != d) << (SAINT_BIT - 1)); buckets[2 + v3] = d; } for (j -= 2 * prefetch_distance + 1; i >= j; i -= 1) { sa_sint_t p = SA[i]; d += (p < 0); p &= SAINT_MAX; sa_sint_t v = BUCKETS_INDEX4(T[p - 1], T[p - 2] > T[p - 1]); SA[--buckets[v]] = (p - 1) | ((sa_sint_t)(buckets[2 + v] != d) << (SAINT_BIT - 1)); buckets[2 + v] = d; } return d; } static sa_sint_t libsais16_partial_sorting_scan_right_to_left_32s_4k(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t k, sa_sint_t * RESTRICT buckets, sa_sint_t d, fast_sint_t omp_block_start, fast_sint_t omp_block_size) { const fast_sint_t prefetch_distance = 32; sa_sint_t * RESTRICT induction_bucket = &buckets[3 * k]; sa_sint_t * RESTRICT distinct_names = &buckets[0 * k]; fast_sint_t i, j; for (i = omp_block_start + omp_block_size - 1, j = omp_block_start + 2 * prefetch_distance + 1; i >= j; i -= 2) { libsais16_prefetchw(&SA[i - 3 * prefetch_distance]); sa_sint_t s0 = SA[i - 2 * prefetch_distance - 0]; const sa_sint_t * Ts0 = &T[s0 & ~SUFFIX_GROUP_MARKER] - 1; libsais16_prefetch(s0 > 0 ? Ts0 : NULL); Ts0--; libsais16_prefetch(s0 > 0 ? Ts0 : NULL); sa_sint_t s1 = SA[i - 2 * prefetch_distance - 1]; const sa_sint_t * Ts1 = &T[s1 & ~SUFFIX_GROUP_MARKER] - 1; libsais16_prefetch(s1 > 0 ? Ts1 : NULL); Ts1--; libsais16_prefetch(s1 > 0 ? Ts1 : NULL); sa_sint_t s2 = SA[i - 1 * prefetch_distance - 0]; if (s2 > 0) { const fast_sint_t Ts2 = T[(s2 & ~SUFFIX_GROUP_MARKER) - 1]; libsais16_prefetchw(&induction_bucket[Ts2]); libsais16_prefetchw(&distinct_names[BUCKETS_INDEX2(Ts2, 0)]); } sa_sint_t s3 = SA[i - 1 * prefetch_distance - 1]; if (s3 > 0) { const fast_sint_t Ts3 = T[(s3 & ~SUFFIX_GROUP_MARKER) - 1]; libsais16_prefetchw(&induction_bucket[Ts3]); libsais16_prefetchw(&distinct_names[BUCKETS_INDEX2(Ts3, 0)]); } sa_sint_t p0 = SA[i - 0]; if (p0 > 0) { SA[i - 0] = 0; d += (p0 >> (SUFFIX_GROUP_BIT - 1)); p0 &= ~SUFFIX_GROUP_MARKER; sa_sint_t v0 = BUCKETS_INDEX2(T[p0 - 1], T[p0 - 2] > T[p0 - 1]); SA[--induction_bucket[T[p0 - 1]]] = (p0 - 1) | ((sa_sint_t)(T[p0 - 2] > T[p0 - 1]) << (SAINT_BIT - 1)) | ((sa_sint_t)(distinct_names[v0] != d) << (SUFFIX_GROUP_BIT - 1)); distinct_names[v0] = d; } sa_sint_t p1 = SA[i - 1]; if (p1 > 0) { SA[i - 1] = 0; d += (p1 >> (SUFFIX_GROUP_BIT - 1)); p1 &= ~SUFFIX_GROUP_MARKER; sa_sint_t v1 = BUCKETS_INDEX2(T[p1 - 1], T[p1 - 2] > T[p1 - 1]); SA[--induction_bucket[T[p1 - 1]]] = (p1 - 1) | ((sa_sint_t)(T[p1 - 2] > T[p1 - 1]) << (SAINT_BIT - 1)) | ((sa_sint_t)(distinct_names[v1] != d) << (SUFFIX_GROUP_BIT - 1)); distinct_names[v1] = d; } } for (j -= 2 * prefetch_distance + 1; i >= j; i -= 1) { sa_sint_t p = SA[i]; if (p > 0) { SA[i] = 0; d += (p >> (SUFFIX_GROUP_BIT - 1)); p &= ~SUFFIX_GROUP_MARKER; sa_sint_t v = BUCKETS_INDEX2(T[p - 1], T[p - 2] > T[p - 1]); SA[--induction_bucket[T[p - 1]]] = (p - 1) | ((sa_sint_t)(T[p - 2] > T[p - 1]) << (SAINT_BIT - 1)) | ((sa_sint_t)(distinct_names[v] != d) << (SUFFIX_GROUP_BIT - 1)); distinct_names[v] = d; } } return d; } static void libsais16_partial_sorting_scan_right_to_left_32s_1k(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t * RESTRICT induction_bucket, fast_sint_t omp_block_start, fast_sint_t omp_block_size) { const fast_sint_t prefetch_distance = 32; fast_sint_t i, j; for (i = omp_block_start + omp_block_size - 1, j = omp_block_start + 2 * prefetch_distance + 1; i >= j; i -= 2) { libsais16_prefetchw(&SA[i - 3 * prefetch_distance]); sa_sint_t s0 = SA[i - 2 * prefetch_distance - 0]; const sa_sint_t * Ts0 = &T[s0] - 1; libsais16_prefetch(s0 > 0 ? Ts0 : NULL); sa_sint_t s1 = SA[i - 2 * prefetch_distance - 1]; const sa_sint_t * Ts1 = &T[s1] - 1; libsais16_prefetch(s1 > 0 ? Ts1 : NULL); sa_sint_t s2 = SA[i - 1 * prefetch_distance - 0]; if (s2 > 0) { libsais16_prefetchw(&induction_bucket[T[s2 - 1]]); libsais16_prefetch(&T[s2] - 2); } sa_sint_t s3 = SA[i - 1 * prefetch_distance - 1]; if (s3 > 0) { libsais16_prefetchw(&induction_bucket[T[s3 - 1]]); libsais16_prefetch(&T[s3] - 2); } sa_sint_t p0 = SA[i - 0]; if (p0 > 0) { SA[i - 0] = 0; SA[--induction_bucket[T[p0 - 1]]] = (p0 - 1) | ((sa_sint_t)(T[p0 - 2] > T[p0 - 1]) << (SAINT_BIT - 1)); } sa_sint_t p1 = SA[i - 1]; if (p1 > 0) { SA[i - 1] = 0; SA[--induction_bucket[T[p1 - 1]]] = (p1 - 1) | ((sa_sint_t)(T[p1 - 2] > T[p1 - 1]) << (SAINT_BIT - 1)); } } for (j -= 2 * prefetch_distance + 1; i >= j; i -= 1) { sa_sint_t p = SA[i]; if (p > 0) { SA[i] = 0; SA[--induction_bucket[T[p - 1]]] = (p - 1) | ((sa_sint_t)(T[p - 2] > T[p - 1]) << (SAINT_BIT - 1)); } } } #if defined(_OPENMP) static void libsais16_partial_sorting_scan_right_to_left_32s_6k_block_gather(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, LIBSAIS_THREAD_CACHE * RESTRICT cache, fast_sint_t omp_block_start, fast_sint_t omp_block_size) { const fast_sint_t prefetch_distance = 32; fast_sint_t i, j; for (i = omp_block_start, j = omp_block_start + omp_block_size - prefetch_distance - 1; i < j; i += 2) { libsais16_prefetch(&SA[i + 2 * prefetch_distance]); libsais16_prefetch(&T[SA[i + prefetch_distance + 0] & SAINT_MAX] - 1); libsais16_prefetch(&T[SA[i + prefetch_distance + 0] & SAINT_MAX] - 2); libsais16_prefetch(&T[SA[i + prefetch_distance + 1] & SAINT_MAX] - 1); libsais16_prefetch(&T[SA[i + prefetch_distance + 1] & SAINT_MAX] - 2); libsais16_prefetchw(&cache[i + prefetch_distance]); sa_sint_t p0 = cache[i + 0].index = SA[i + 0]; sa_sint_t symbol0 = 0; p0 &= SAINT_MAX; if (p0 != 0) { symbol0 = BUCKETS_INDEX4(T[p0 - 1], T[p0 - 2] > T[p0 - 1]); } cache[i + 0].symbol = symbol0; sa_sint_t p1 = cache[i + 1].index = SA[i + 1]; sa_sint_t symbol1 = 0; p1 &= SAINT_MAX; if (p1 != 0) { symbol1 = BUCKETS_INDEX4(T[p1 - 1], T[p1 - 2] > T[p1 - 1]); } cache[i + 1].symbol = symbol1; } for (j += prefetch_distance + 1; i < j; i += 1) { sa_sint_t p = cache[i].index = SA[i]; sa_sint_t symbol = 0; p &= SAINT_MAX; if (p != 0) { symbol = BUCKETS_INDEX4(T[p - 1], T[p - 2] > T[p - 1]); } cache[i].symbol = symbol; } } static void libsais16_partial_sorting_scan_right_to_left_32s_4k_block_gather(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, LIBSAIS_THREAD_CACHE * RESTRICT cache, fast_sint_t omp_block_start, fast_sint_t omp_block_size) { const fast_sint_t prefetch_distance = 32; fast_sint_t i, j; for (i = omp_block_start, j = omp_block_start + omp_block_size - prefetch_distance - 1; i < j; i += 2) { libsais16_prefetchw(&SA[i + 2 * prefetch_distance]); sa_sint_t s0 = SA[i + prefetch_distance + 0]; const sa_sint_t * Ts0 = &T[s0 & ~SUFFIX_GROUP_MARKER] - 1; libsais16_prefetch(s0 > 0 ? Ts0 : NULL); Ts0--; libsais16_prefetch(s0 > 0 ? Ts0 : NULL); sa_sint_t s1 = SA[i + prefetch_distance + 1]; const sa_sint_t * Ts1 = &T[s1 & ~SUFFIX_GROUP_MARKER] - 1; libsais16_prefetch(s1 > 0 ? Ts1 : NULL); Ts1--; libsais16_prefetch(s1 > 0 ? Ts1 : NULL); libsais16_prefetchw(&cache[i + prefetch_distance]); sa_sint_t symbol0 = SAINT_MIN, p0 = SA[i + 0]; if (p0 > 0) { SA[i + 0] = 0; cache[i + 0].index = p0; p0 &= ~SUFFIX_GROUP_MARKER; symbol0 = BUCKETS_INDEX2(T[p0 - 1], T[p0 - 2] > T[p0 - 1]); } cache[i + 0].symbol = symbol0; sa_sint_t symbol1 = SAINT_MIN, p1 = SA[i + 1]; if (p1 > 0) { SA[i + 1] = 0; cache[i + 1].index = p1; p1 &= ~SUFFIX_GROUP_MARKER; symbol1 = BUCKETS_INDEX2(T[p1 - 1], T[p1 - 2] > T[p1 - 1]); } cache[i + 1].symbol = symbol1; } for (j += prefetch_distance + 1; i < j; i += 1) { sa_sint_t symbol = SAINT_MIN, p = SA[i]; if (p > 0) { SA[i] = 0; cache[i].index = p; p &= ~SUFFIX_GROUP_MARKER; symbol = BUCKETS_INDEX2(T[p - 1], T[p - 2] > T[p - 1]); } cache[i].symbol = symbol; } } static void libsais16_partial_sorting_scan_right_to_left_32s_1k_block_gather(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, LIBSAIS_THREAD_CACHE * RESTRICT cache, fast_sint_t omp_block_start, fast_sint_t omp_block_size) { const fast_sint_t prefetch_distance = 32; fast_sint_t i, j; for (i = omp_block_start, j = omp_block_start + omp_block_size - prefetch_distance - 1; i < j; i += 2) { libsais16_prefetchw(&SA[i + 2 * prefetch_distance]); sa_sint_t s0 = SA[i + prefetch_distance + 0]; const sa_sint_t * Ts0 = &T[s0] - 1; libsais16_prefetch(s0 > 0 ? Ts0 : NULL); Ts0--; libsais16_prefetch(s0 > 0 ? Ts0 : NULL); sa_sint_t s1 = SA[i + prefetch_distance + 1]; const sa_sint_t * Ts1 = &T[s1] - 1; libsais16_prefetch(s1 > 0 ? Ts1 : NULL); Ts1--; libsais16_prefetch(s1 > 0 ? Ts1 : NULL); libsais16_prefetchw(&cache[i + prefetch_distance]); sa_sint_t symbol0 = SAINT_MIN, p0 = SA[i + 0]; if (p0 > 0) { SA[i + 0] = 0; cache[i + 0].index = (p0 - 1) | ((sa_sint_t)(T[p0 - 2] > T[p0 - 1]) << (SAINT_BIT - 1)); symbol0 = T[p0 - 1]; } cache[i + 0].symbol = symbol0; sa_sint_t symbol1 = SAINT_MIN, p1 = SA[i + 1]; if (p1 > 0) { SA[i + 1] = 0; cache[i + 1].index = (p1 - 1) | ((sa_sint_t)(T[p1 - 2] > T[p1 - 1]) << (SAINT_BIT - 1)); symbol1 = T[p1 - 1]; } cache[i + 1].symbol = symbol1; } for (j += prefetch_distance + 1; i < j; i += 1) { sa_sint_t symbol = SAINT_MIN, p = SA[i]; if (p > 0) { SA[i] = 0; cache[i].index = (p - 1) | ((sa_sint_t)(T[p - 2] > T[p - 1]) << (SAINT_BIT - 1)); symbol = T[p - 1]; } cache[i].symbol = symbol; } } static sa_sint_t libsais16_partial_sorting_scan_right_to_left_32s_6k_block_sort(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT buckets, sa_sint_t d, LIBSAIS_THREAD_CACHE * RESTRICT cache, fast_sint_t omp_block_start, fast_sint_t omp_block_size) { const fast_sint_t prefetch_distance = 32; fast_sint_t i, j; for (i = omp_block_start + omp_block_size - 1, j = omp_block_start + prefetch_distance + 1; i >= j; i -= 2) { libsais16_prefetchw(&cache[i - 2 * prefetch_distance]); libsais16_prefetchw(&buckets[cache[i - prefetch_distance - 0].symbol]); libsais16_prefetchw(&buckets[cache[i - prefetch_distance - 1].symbol]); sa_sint_t v0 = cache[i - 0].symbol, p0 = cache[i - 0].index; d += (p0 < 0); cache[i - 0].symbol = --buckets[v0]; cache[i - 0].index = (p0 - 1) | ((sa_sint_t)(buckets[2 + v0] != d) << (SAINT_BIT - 1)); buckets[2 + v0] = d; if (cache[i - 0].symbol >= omp_block_start) { sa_sint_t s = cache[i - 0].symbol, q = (cache[s].index = cache[i - 0].index) & SAINT_MAX; cache[s].symbol = BUCKETS_INDEX4(T[q - 1], T[q - 2] > T[q - 1]); } sa_sint_t v1 = cache[i - 1].symbol, p1 = cache[i - 1].index; d += (p1 < 0); cache[i - 1].symbol = --buckets[v1]; cache[i - 1].index = (p1 - 1) | ((sa_sint_t)(buckets[2 + v1] != d) << (SAINT_BIT - 1)); buckets[2 + v1] = d; if (cache[i - 1].symbol >= omp_block_start) { sa_sint_t s = cache[i - 1].symbol, q = (cache[s].index = cache[i - 1].index) & SAINT_MAX; cache[s].symbol = BUCKETS_INDEX4(T[q - 1], T[q - 2] > T[q - 1]); } } for (j -= prefetch_distance + 1; i >= j; i -= 1) { sa_sint_t v = cache[i].symbol, p = cache[i].index; d += (p < 0); cache[i].symbol = --buckets[v]; cache[i].index = (p - 1) | ((sa_sint_t)(buckets[2 + v] != d) << (SAINT_BIT - 1)); buckets[2 + v] = d; if (cache[i].symbol >= omp_block_start) { sa_sint_t s = cache[i].symbol, q = (cache[s].index = cache[i].index) & SAINT_MAX; cache[s].symbol = BUCKETS_INDEX4(T[q - 1], T[q - 2] > T[q - 1]); } } return d; } static sa_sint_t libsais16_partial_sorting_scan_right_to_left_32s_4k_block_sort(const sa_sint_t * RESTRICT T, sa_sint_t k, sa_sint_t * RESTRICT buckets, sa_sint_t d, LIBSAIS_THREAD_CACHE * RESTRICT cache, fast_sint_t omp_block_start, fast_sint_t omp_block_size) { const fast_sint_t prefetch_distance = 32; sa_sint_t * RESTRICT induction_bucket = &buckets[3 * k]; sa_sint_t * RESTRICT distinct_names = &buckets[0 * k]; fast_sint_t i, j; for (i = omp_block_start + omp_block_size - 1, j = omp_block_start + prefetch_distance + 1; i >= j; i -= 2) { libsais16_prefetchw(&cache[i - 2 * prefetch_distance]); sa_sint_t s0 = cache[i - prefetch_distance - 0].symbol; const sa_sint_t * Is0 = &induction_bucket[s0 >> 1]; libsais16_prefetchw(s0 >= 0 ? Is0 : NULL); const sa_sint_t * Ds0 = &distinct_names[s0]; libsais16_prefetchw(s0 >= 0 ? Ds0 : NULL); sa_sint_t s1 = cache[i - prefetch_distance - 1].symbol; const sa_sint_t * Is1 = &induction_bucket[s1 >> 1]; libsais16_prefetchw(s1 >= 0 ? Is1 : NULL); const sa_sint_t * Ds1 = &distinct_names[s1]; libsais16_prefetchw(s1 >= 0 ? Ds1 : NULL); sa_sint_t v0 = cache[i - 0].symbol; if (v0 >= 0) { sa_sint_t p0 = cache[i - 0].index; d += (p0 >> (SUFFIX_GROUP_BIT - 1)); cache[i - 0].symbol = --induction_bucket[v0 >> 1]; cache[i - 0].index = (p0 - 1) | (v0 << (SAINT_BIT - 1)) | ((sa_sint_t)(distinct_names[v0] != d) << (SUFFIX_GROUP_BIT - 1)); distinct_names[v0] = d; if (cache[i - 0].symbol >= omp_block_start) { sa_sint_t ni = cache[i - 0].symbol, np = cache[i - 0].index; if (np > 0) { cache[i - 0].index = 0; cache[ni].index = np; np &= ~SUFFIX_GROUP_MARKER; cache[ni].symbol = BUCKETS_INDEX2(T[np - 1], T[np - 2] > T[np - 1]); } } } sa_sint_t v1 = cache[i - 1].symbol; if (v1 >= 0) { sa_sint_t p1 = cache[i - 1].index; d += (p1 >> (SUFFIX_GROUP_BIT - 1)); cache[i - 1].symbol = --induction_bucket[v1 >> 1]; cache[i - 1].index = (p1 - 1) | (v1 << (SAINT_BIT - 1)) | ((sa_sint_t)(distinct_names[v1] != d) << (SUFFIX_GROUP_BIT - 1)); distinct_names[v1] = d; if (cache[i - 1].symbol >= omp_block_start) { sa_sint_t ni = cache[i - 1].symbol, np = cache[i - 1].index; if (np > 0) { cache[i - 1].index = 0; cache[ni].index = np; np &= ~SUFFIX_GROUP_MARKER; cache[ni].symbol = BUCKETS_INDEX2(T[np - 1], T[np - 2] > T[np - 1]); } } } } for (j -= prefetch_distance + 1; i >= j; i -= 1) { sa_sint_t v = cache[i].symbol; if (v >= 0) { sa_sint_t p = cache[i].index; d += (p >> (SUFFIX_GROUP_BIT - 1)); cache[i].symbol = --induction_bucket[v >> 1]; cache[i].index = (p - 1) | (v << (SAINT_BIT - 1)) | ((sa_sint_t)(distinct_names[v] != d) << (SUFFIX_GROUP_BIT - 1)); distinct_names[v] = d; if (cache[i].symbol >= omp_block_start) { sa_sint_t ni = cache[i].symbol, np = cache[i].index; if (np > 0) { cache[i].index = 0; cache[ni].index = np; np &= ~SUFFIX_GROUP_MARKER; cache[ni].symbol = BUCKETS_INDEX2(T[np - 1], T[np - 2] > T[np - 1]); } } } } return d; } static void libsais16_partial_sorting_scan_right_to_left_32s_1k_block_sort(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT induction_bucket, LIBSAIS_THREAD_CACHE * RESTRICT cache, fast_sint_t omp_block_start, fast_sint_t omp_block_size) { const fast_sint_t prefetch_distance = 32; fast_sint_t i, j; for (i = omp_block_start + omp_block_size - 1, j = omp_block_start + prefetch_distance + 1; i >= j; i -= 2) { libsais16_prefetchw(&cache[i - 2 * prefetch_distance]); sa_sint_t s0 = cache[i - prefetch_distance - 0].symbol; const sa_sint_t * Is0 = &induction_bucket[s0]; libsais16_prefetchw(s0 >= 0 ? Is0 : NULL); sa_sint_t s1 = cache[i - prefetch_distance - 1].symbol; const sa_sint_t * Is1 = &induction_bucket[s1]; libsais16_prefetchw(s1 >= 0 ? Is1 : NULL); sa_sint_t v0 = cache[i - 0].symbol; if (v0 >= 0) { cache[i - 0].symbol = --induction_bucket[v0]; if (cache[i - 0].symbol >= omp_block_start) { sa_sint_t ni = cache[i - 0].symbol, np = cache[i - 0].index; if (np > 0) { cache[i - 0].index = 0; cache[ni].index = (np - 1) | ((sa_sint_t)(T[np - 2] > T[np - 1]) << (SAINT_BIT - 1)); cache[ni].symbol = T[np - 1]; } } } sa_sint_t v1 = cache[i - 1].symbol; if (v1 >= 0) { cache[i - 1].symbol = --induction_bucket[v1]; if (cache[i - 1].symbol >= omp_block_start) { sa_sint_t ni = cache[i - 1].symbol, np = cache[i - 1].index; if (np > 0) { cache[i - 1].index = 0; cache[ni].index = (np - 1) | ((sa_sint_t)(T[np - 2] > T[np - 1]) << (SAINT_BIT - 1)); cache[ni].symbol = T[np - 1]; }} } } for (j -= prefetch_distance + 1; i >= j; i -= 1) { sa_sint_t v = cache[i].symbol; if (v >= 0) { cache[i].symbol = --induction_bucket[v]; if (cache[i].symbol >= omp_block_start) { sa_sint_t ni = cache[i].symbol, np = cache[i].index; if (np > 0) { cache[i].index = 0; cache[ni].index = (np - 1) | ((sa_sint_t)(T[np - 2] > T[np - 1]) << (SAINT_BIT - 1)); cache[ni].symbol = T[np - 1]; } } } } } static sa_sint_t libsais16_partial_sorting_scan_right_to_left_32s_6k_block_omp(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t * RESTRICT buckets, sa_sint_t d, LIBSAIS_THREAD_CACHE * RESTRICT cache, fast_sint_t block_start, fast_sint_t block_size, sa_sint_t threads) { #if defined(_OPENMP) #pragma omp parallel num_threads(threads) if(threads > 1 && block_size >= 16384) #endif { #if defined(_OPENMP) fast_sint_t omp_thread_num = omp_get_thread_num(); fast_sint_t omp_num_threads = omp_get_num_threads(); #else UNUSED(threads); UNUSED(cache); fast_sint_t omp_thread_num = 0; fast_sint_t omp_num_threads = 1; #endif fast_sint_t omp_block_stride = (block_size / omp_num_threads) & (-16); fast_sint_t omp_block_start = omp_thread_num * omp_block_stride; fast_sint_t omp_block_size = omp_thread_num < omp_num_threads - 1 ? omp_block_stride : block_size - omp_block_start; omp_block_start += block_start; if (omp_num_threads == 1) { d = libsais16_partial_sorting_scan_right_to_left_32s_6k(T, SA, buckets, d, omp_block_start, omp_block_size); } #if defined(_OPENMP) else { { libsais16_partial_sorting_scan_right_to_left_32s_6k_block_gather(T, SA, cache - block_start, omp_block_start, omp_block_size); } #pragma omp barrier #pragma omp master { d = libsais16_partial_sorting_scan_right_to_left_32s_6k_block_sort(T, buckets, d, cache - block_start, block_start, block_size); } #pragma omp barrier { libsais16_place_cached_suffixes(SA, cache - block_start, omp_block_start, omp_block_size); } } #endif } return d; } static sa_sint_t libsais16_partial_sorting_scan_right_to_left_32s_4k_block_omp(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t k, sa_sint_t * RESTRICT buckets, sa_sint_t d, LIBSAIS_THREAD_CACHE * RESTRICT cache, fast_sint_t block_start, fast_sint_t block_size, sa_sint_t threads) { #if defined(_OPENMP) #pragma omp parallel num_threads(threads) if(threads > 1 && block_size >= 16384) #endif { #if defined(_OPENMP) fast_sint_t omp_thread_num = omp_get_thread_num(); fast_sint_t omp_num_threads = omp_get_num_threads(); #else UNUSED(threads); UNUSED(cache); fast_sint_t omp_thread_num = 0; fast_sint_t omp_num_threads = 1; #endif fast_sint_t omp_block_stride = (block_size / omp_num_threads) & (-16); fast_sint_t omp_block_start = omp_thread_num * omp_block_stride; fast_sint_t omp_block_size = omp_thread_num < omp_num_threads - 1 ? omp_block_stride : block_size - omp_block_start; omp_block_start += block_start; if (omp_num_threads == 1) { d = libsais16_partial_sorting_scan_right_to_left_32s_4k(T, SA, k, buckets, d, omp_block_start, omp_block_size); } #if defined(_OPENMP) else { { libsais16_partial_sorting_scan_right_to_left_32s_4k_block_gather(T, SA, cache - block_start, omp_block_start, omp_block_size); } #pragma omp barrier #pragma omp master { d = libsais16_partial_sorting_scan_right_to_left_32s_4k_block_sort(T, k, buckets, d, cache - block_start, block_start, block_size); } #pragma omp barrier { libsais16_compact_and_place_cached_suffixes(SA, cache - block_start, omp_block_start, omp_block_size); } } #endif } return d; } static void libsais16_partial_sorting_scan_right_to_left_32s_1k_block_omp(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t * RESTRICT buckets, LIBSAIS_THREAD_CACHE * RESTRICT cache, fast_sint_t block_start, fast_sint_t block_size, sa_sint_t threads) { #if defined(_OPENMP) #pragma omp parallel num_threads(threads) if(threads > 1 && block_size >= 16384) #endif { #if defined(_OPENMP) fast_sint_t omp_thread_num = omp_get_thread_num(); fast_sint_t omp_num_threads = omp_get_num_threads(); #else UNUSED(threads); UNUSED(cache); fast_sint_t omp_thread_num = 0; fast_sint_t omp_num_threads = 1; #endif fast_sint_t omp_block_stride = (block_size / omp_num_threads) & (-16); fast_sint_t omp_block_start = omp_thread_num * omp_block_stride; fast_sint_t omp_block_size = omp_thread_num < omp_num_threads - 1 ? omp_block_stride : block_size - omp_block_start; omp_block_start += block_start; if (omp_num_threads == 1) { libsais16_partial_sorting_scan_right_to_left_32s_1k(T, SA, buckets, omp_block_start, omp_block_size); } #if defined(_OPENMP) else { { libsais16_partial_sorting_scan_right_to_left_32s_1k_block_gather(T, SA, cache - block_start, omp_block_start, omp_block_size); } #pragma omp barrier #pragma omp master { libsais16_partial_sorting_scan_right_to_left_32s_1k_block_sort(T, buckets, cache - block_start, block_start, block_size); } #pragma omp barrier { libsais16_compact_and_place_cached_suffixes(SA, cache - block_start, omp_block_start, omp_block_size); } } #endif } } #endif static sa_sint_t libsais16_partial_sorting_scan_right_to_left_32s_6k_omp(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t * RESTRICT buckets, sa_sint_t first_lms_suffix, sa_sint_t left_suffixes_count, sa_sint_t d, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state) { fast_sint_t scan_start = (fast_sint_t)left_suffixes_count + 1; fast_sint_t scan_end = (fast_sint_t)n - (fast_sint_t)first_lms_suffix; if (threads == 1 || (scan_end - scan_start) < 65536) { d = libsais16_partial_sorting_scan_right_to_left_32s_6k(T, SA, buckets, d, scan_start, scan_end - scan_start); } #if defined(_OPENMP) else { fast_sint_t block_start, block_end; for (block_start = scan_end - 1; block_start >= scan_start; block_start = block_end) { block_end = block_start - (fast_sint_t)threads * LIBSAIS_PER_THREAD_CACHE_SIZE; if (block_end < scan_start) { block_end = scan_start - 1; } d = libsais16_partial_sorting_scan_right_to_left_32s_6k_block_omp(T, SA, buckets, d, thread_state[0].state.cache, block_end + 1, block_start - block_end, threads); } } #else UNUSED(thread_state); #endif return d; } static sa_sint_t libsais16_partial_sorting_scan_right_to_left_32s_4k_omp(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t k, sa_sint_t * RESTRICT buckets, sa_sint_t d, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state) { if (threads == 1 || n < 65536) { d = libsais16_partial_sorting_scan_right_to_left_32s_4k(T, SA, k, buckets, d, 0, n); } #if defined(_OPENMP) else { fast_sint_t block_start, block_end; for (block_start = (fast_sint_t)n - 1; block_start >= 0; block_start = block_end) { block_end = block_start - (fast_sint_t)threads * LIBSAIS_PER_THREAD_CACHE_SIZE; if (block_end < 0) { block_end = -1; } d = libsais16_partial_sorting_scan_right_to_left_32s_4k_block_omp(T, SA, k, buckets, d, thread_state[0].state.cache, block_end + 1, block_start - block_end, threads); } } #else UNUSED(thread_state); #endif return d; } static void libsais16_partial_sorting_scan_right_to_left_32s_1k_omp(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t * RESTRICT buckets, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state) { if (threads == 1 || n < 65536) { libsais16_partial_sorting_scan_right_to_left_32s_1k(T, SA, buckets, 0, n); } #if defined(_OPENMP) else { fast_sint_t block_start, block_end; for (block_start = (fast_sint_t)n - 1; block_start >= 0; block_start = block_end) { block_end = block_start - (fast_sint_t)threads * LIBSAIS_PER_THREAD_CACHE_SIZE; if (block_end < 0) { block_end = -1; } libsais16_partial_sorting_scan_right_to_left_32s_1k_block_omp(T, SA, buckets, thread_state[0].state.cache, block_end + 1, block_start - block_end, threads); } } #else UNUSED(thread_state); #endif } static fast_sint_t libsais16_partial_sorting_gather_lms_suffixes_32s_4k(sa_sint_t * RESTRICT SA, fast_sint_t omp_block_start, fast_sint_t omp_block_size) { const fast_sint_t prefetch_distance = 32; fast_sint_t i, j, l; for (i = omp_block_start, j = omp_block_start + omp_block_size - 3, l = omp_block_start; i < j; i += 4) { libsais16_prefetch(&SA[i + prefetch_distance]); sa_sint_t s0 = SA[i + 0]; SA[l] = (s0 - SUFFIX_GROUP_MARKER) & (~SUFFIX_GROUP_MARKER); l += (s0 < 0); sa_sint_t s1 = SA[i + 1]; SA[l] = (s1 - SUFFIX_GROUP_MARKER) & (~SUFFIX_GROUP_MARKER); l += (s1 < 0); sa_sint_t s2 = SA[i + 2]; SA[l] = (s2 - SUFFIX_GROUP_MARKER) & (~SUFFIX_GROUP_MARKER); l += (s2 < 0); sa_sint_t s3 = SA[i + 3]; SA[l] = (s3 - SUFFIX_GROUP_MARKER) & (~SUFFIX_GROUP_MARKER); l += (s3 < 0); } for (j += 3; i < j; i += 1) { sa_sint_t s = SA[i]; SA[l] = (s - SUFFIX_GROUP_MARKER) & (~SUFFIX_GROUP_MARKER); l += (s < 0); } return l; } static fast_sint_t libsais16_partial_sorting_gather_lms_suffixes_32s_1k(sa_sint_t * RESTRICT SA, fast_sint_t omp_block_start, fast_sint_t omp_block_size) { const fast_sint_t prefetch_distance = 32; fast_sint_t i, j, l; for (i = omp_block_start, j = omp_block_start + omp_block_size - 3, l = omp_block_start; i < j; i += 4) { libsais16_prefetch(&SA[i + prefetch_distance]); sa_sint_t s0 = SA[i + 0]; SA[l] = s0 & SAINT_MAX; l += (s0 < 0); sa_sint_t s1 = SA[i + 1]; SA[l] = s1 & SAINT_MAX; l += (s1 < 0); sa_sint_t s2 = SA[i + 2]; SA[l] = s2 & SAINT_MAX; l += (s2 < 0); sa_sint_t s3 = SA[i + 3]; SA[l] = s3 & SAINT_MAX; l += (s3 < 0); } for (j += 3; i < j; i += 1) { sa_sint_t s = SA[i]; SA[l] = s & SAINT_MAX; l += (s < 0); } return l; } static void libsais16_partial_sorting_gather_lms_suffixes_32s_4k_omp(sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state) { #if defined(_OPENMP) #pragma omp parallel num_threads(threads) if(threads > 1 && n >= 65536) #endif { #if defined(_OPENMP) fast_sint_t omp_thread_num = omp_get_thread_num(); fast_sint_t omp_num_threads = omp_get_num_threads(); #else UNUSED(threads); UNUSED(thread_state); fast_sint_t omp_thread_num = 0; fast_sint_t omp_num_threads = 1; #endif fast_sint_t omp_block_stride = (n / omp_num_threads) & (-16); fast_sint_t omp_block_start = omp_thread_num * omp_block_stride; fast_sint_t omp_block_size = omp_thread_num < omp_num_threads - 1 ? omp_block_stride : n - omp_block_start; if (omp_num_threads == 1) { libsais16_partial_sorting_gather_lms_suffixes_32s_4k(SA, omp_block_start, omp_block_size); } #if defined(_OPENMP) else { { thread_state[omp_thread_num].state.position = omp_block_start; thread_state[omp_thread_num].state.count = libsais16_partial_sorting_gather_lms_suffixes_32s_4k(SA, omp_block_start, omp_block_size) - omp_block_start; } #pragma omp barrier #pragma omp master { fast_sint_t t, position = 0; for (t = 0; t < omp_num_threads; ++t) { if (t > 0 && thread_state[t].state.count > 0) { memmove(&SA[position], &SA[thread_state[t].state.position], (size_t)thread_state[t].state.count * sizeof(sa_sint_t)); } position += thread_state[t].state.count; } } } #endif } } static void libsais16_partial_sorting_gather_lms_suffixes_32s_1k_omp(sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state) { #if defined(_OPENMP) #pragma omp parallel num_threads(threads) if(threads > 1 && n >= 65536) #endif { #if defined(_OPENMP) fast_sint_t omp_thread_num = omp_get_thread_num(); fast_sint_t omp_num_threads = omp_get_num_threads(); #else UNUSED(threads); UNUSED(thread_state); fast_sint_t omp_thread_num = 0; fast_sint_t omp_num_threads = 1; #endif fast_sint_t omp_block_stride = (n / omp_num_threads) & (-16); fast_sint_t omp_block_start = omp_thread_num * omp_block_stride; fast_sint_t omp_block_size = omp_thread_num < omp_num_threads - 1 ? omp_block_stride : n - omp_block_start; if (omp_num_threads == 1) { libsais16_partial_sorting_gather_lms_suffixes_32s_1k(SA, omp_block_start, omp_block_size); } #if defined(_OPENMP) else { { thread_state[omp_thread_num].state.position = omp_block_start; thread_state[omp_thread_num].state.count = libsais16_partial_sorting_gather_lms_suffixes_32s_1k(SA, omp_block_start, omp_block_size) - omp_block_start; } #pragma omp barrier #pragma omp master { fast_sint_t t, position = 0; for (t = 0; t < omp_num_threads; ++t) { if (t > 0 && thread_state[t].state.count > 0) { memmove(&SA[position], &SA[thread_state[t].state.position], (size_t)thread_state[t].state.count * sizeof(sa_sint_t)); } position += thread_state[t].state.count; } } } #endif } } static void libsais16_induce_partial_order_16u_omp(const uint16_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t * RESTRICT buckets, sa_sint_t first_lms_suffix, sa_sint_t left_suffixes_count, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state) { memset(&buckets[2 * ALPHABET_SIZE], 0, 2 * ALPHABET_SIZE * sizeof(sa_sint_t)); sa_sint_t d = libsais16_partial_sorting_scan_left_to_right_16u_omp(T, SA, n, buckets, left_suffixes_count, 0, threads, thread_state); libsais16_partial_sorting_shift_markers_16u_omp(SA, n, buckets, threads); libsais16_partial_sorting_scan_right_to_left_16u_omp(T, SA, n, buckets, first_lms_suffix, left_suffixes_count, d, threads, thread_state); } static void libsais16_induce_partial_order_32s_6k_omp(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t k, sa_sint_t * RESTRICT buckets, sa_sint_t first_lms_suffix, sa_sint_t left_suffixes_count, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state) { sa_sint_t d = libsais16_partial_sorting_scan_left_to_right_32s_6k_omp(T, SA, n, buckets, left_suffixes_count, 0, threads, thread_state); libsais16_partial_sorting_shift_markers_32s_6k_omp(SA, k, buckets, threads); libsais16_partial_sorting_shift_buckets_32s_6k(k, buckets); libsais16_partial_sorting_scan_right_to_left_32s_6k_omp(T, SA, n, buckets, first_lms_suffix, left_suffixes_count, d, threads, thread_state); } static void libsais16_induce_partial_order_32s_4k_omp(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t k, sa_sint_t * RESTRICT buckets, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state) { memset(buckets, 0, 2 * (size_t)k * sizeof(sa_sint_t)); sa_sint_t d = libsais16_partial_sorting_scan_left_to_right_32s_4k_omp(T, SA, n, k, buckets, 0, threads, thread_state); libsais16_partial_sorting_shift_markers_32s_4k(SA, n); libsais16_partial_sorting_scan_right_to_left_32s_4k_omp(T, SA, n, k, buckets, d, threads, thread_state); libsais16_partial_sorting_gather_lms_suffixes_32s_4k_omp(SA, n, threads, thread_state); } static void libsais16_induce_partial_order_32s_2k_omp(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t k, sa_sint_t * RESTRICT buckets, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state) { libsais16_partial_sorting_scan_left_to_right_32s_1k_omp(T, SA, n, &buckets[1 * k], threads, thread_state); libsais16_partial_sorting_scan_right_to_left_32s_1k_omp(T, SA, n, &buckets[0 * k], threads, thread_state); libsais16_partial_sorting_gather_lms_suffixes_32s_1k_omp(SA, n, threads, thread_state); } static void libsais16_induce_partial_order_32s_1k_omp(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t k, sa_sint_t * RESTRICT buckets, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state) { libsais16_count_suffixes_32s(T, n, k, buckets); libsais16_initialize_buckets_start_32s_1k(k, buckets); libsais16_partial_sorting_scan_left_to_right_32s_1k_omp(T, SA, n, buckets, threads, thread_state); libsais16_count_suffixes_32s(T, n, k, buckets); libsais16_initialize_buckets_end_32s_1k(k, buckets); libsais16_partial_sorting_scan_right_to_left_32s_1k_omp(T, SA, n, buckets, threads, thread_state); libsais16_partial_sorting_gather_lms_suffixes_32s_1k_omp(SA, n, threads, thread_state); } static sa_sint_t libsais16_renumber_lms_suffixes_16u(sa_sint_t * RESTRICT SA, sa_sint_t m, sa_sint_t name, fast_sint_t omp_block_start, fast_sint_t omp_block_size) { const fast_sint_t prefetch_distance = 32; sa_sint_t * RESTRICT SAm = &SA[m]; fast_sint_t i, j; for (i = omp_block_start, j = omp_block_start + omp_block_size - prefetch_distance - 3; i < j; i += 4) { libsais16_prefetch(&SA[i + 2 * prefetch_distance]); libsais16_prefetchw(&SAm[(SA[i + prefetch_distance + 0] & SAINT_MAX) >> 1]); libsais16_prefetchw(&SAm[(SA[i + prefetch_distance + 1] & SAINT_MAX) >> 1]); libsais16_prefetchw(&SAm[(SA[i + prefetch_distance + 2] & SAINT_MAX) >> 1]); libsais16_prefetchw(&SAm[(SA[i + prefetch_distance + 3] & SAINT_MAX) >> 1]); sa_sint_t p0 = SA[i + 0]; SAm[(p0 & SAINT_MAX) >> 1] = name | SAINT_MIN; name += p0 < 0; sa_sint_t p1 = SA[i + 1]; SAm[(p1 & SAINT_MAX) >> 1] = name | SAINT_MIN; name += p1 < 0; sa_sint_t p2 = SA[i + 2]; SAm[(p2 & SAINT_MAX) >> 1] = name | SAINT_MIN; name += p2 < 0; sa_sint_t p3 = SA[i + 3]; SAm[(p3 & SAINT_MAX) >> 1] = name | SAINT_MIN; name += p3 < 0; } for (j += prefetch_distance + 3; i < j; i += 1) { sa_sint_t p = SA[i]; SAm[(p & SAINT_MAX) >> 1] = name | SAINT_MIN; name += p < 0; } return name; } static fast_sint_t libsais16_gather_marked_suffixes_16u(sa_sint_t * RESTRICT SA, sa_sint_t m, fast_sint_t l, fast_sint_t omp_block_start, fast_sint_t omp_block_size) { const fast_sint_t prefetch_distance = 32; l -= 1; fast_sint_t i, j; for (i = (fast_sint_t)m + omp_block_start + omp_block_size - 1, j = (fast_sint_t)m + omp_block_start + 3; i >= j; i -= 4) { libsais16_prefetch(&SA[i - prefetch_distance]); sa_sint_t s0 = SA[i - 0]; SA[l] = s0 & SAINT_MAX; l -= s0 < 0; sa_sint_t s1 = SA[i - 1]; SA[l] = s1 & SAINT_MAX; l -= s1 < 0; sa_sint_t s2 = SA[i - 2]; SA[l] = s2 & SAINT_MAX; l -= s2 < 0; sa_sint_t s3 = SA[i - 3]; SA[l] = s3 & SAINT_MAX; l -= s3 < 0; } for (j -= 3; i >= j; i -= 1) { sa_sint_t s = SA[i]; SA[l] = s & SAINT_MAX; l -= s < 0; } l += 1; return l; } static sa_sint_t libsais16_renumber_lms_suffixes_16u_omp(sa_sint_t * RESTRICT SA, sa_sint_t m, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state) { sa_sint_t name = 0; #if defined(_OPENMP) #pragma omp parallel num_threads(threads) if(threads > 1 && m >= 65536) #endif { #if defined(_OPENMP) fast_sint_t omp_thread_num = omp_get_thread_num(); fast_sint_t omp_num_threads = omp_get_num_threads(); #else UNUSED(threads); UNUSED(thread_state); fast_sint_t omp_thread_num = 0; fast_sint_t omp_num_threads = 1; #endif fast_sint_t omp_block_stride = (m / omp_num_threads) & (-16); fast_sint_t omp_block_start = omp_thread_num * omp_block_stride; fast_sint_t omp_block_size = omp_thread_num < omp_num_threads - 1 ? omp_block_stride : m - omp_block_start; if (omp_num_threads == 1) { name = libsais16_renumber_lms_suffixes_16u(SA, m, 0, omp_block_start, omp_block_size); } #if defined(_OPENMP) else { { thread_state[omp_thread_num].state.count = libsais16_count_negative_marked_suffixes(SA, omp_block_start, omp_block_size); } #pragma omp barrier { fast_sint_t t, count = 0; for (t = 0; t < omp_thread_num; ++t) { count += thread_state[t].state.count; } if (omp_thread_num == omp_num_threads - 1) { name = (sa_sint_t)(count + thread_state[omp_thread_num].state.count); } libsais16_renumber_lms_suffixes_16u(SA, m, (sa_sint_t)count, omp_block_start, omp_block_size); } } #endif } return name; } static void libsais16_gather_marked_lms_suffixes_16u_omp(sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t m, sa_sint_t fs, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state) { #if defined(_OPENMP) #pragma omp parallel num_threads(threads) if(threads > 1 && n >= 131072) #endif { #if defined(_OPENMP) fast_sint_t omp_thread_num = omp_get_thread_num(); fast_sint_t omp_num_threads = omp_get_num_threads(); #else UNUSED(threads); UNUSED(thread_state); fast_sint_t omp_thread_num = 0; fast_sint_t omp_num_threads = 1; #endif fast_sint_t omp_block_stride = (((fast_sint_t)n >> 1) / omp_num_threads) & (-16); fast_sint_t omp_block_start = omp_thread_num * omp_block_stride; fast_sint_t omp_block_size = omp_thread_num < omp_num_threads - 1 ? omp_block_stride : ((fast_sint_t)n >> 1) - omp_block_start; if (omp_num_threads == 1) { libsais16_gather_marked_suffixes_16u(SA, m, (fast_sint_t)n + (fast_sint_t)fs, omp_block_start, omp_block_size); } #if defined(_OPENMP) else { { if (omp_thread_num < omp_num_threads - 1) { thread_state[omp_thread_num].state.position = libsais16_gather_marked_suffixes_16u(SA, m, (fast_sint_t)m + omp_block_start + omp_block_size, omp_block_start, omp_block_size); thread_state[omp_thread_num].state.count = (fast_sint_t)m + omp_block_start + omp_block_size - thread_state[omp_thread_num].state.position; } else { thread_state[omp_thread_num].state.position = libsais16_gather_marked_suffixes_16u(SA, m, (fast_sint_t)n + (fast_sint_t)fs, omp_block_start, omp_block_size); thread_state[omp_thread_num].state.count = (fast_sint_t)n + (fast_sint_t)fs - thread_state[omp_thread_num].state.position; } } #pragma omp barrier #pragma omp master { fast_sint_t t, position = (fast_sint_t)n + (fast_sint_t)fs; for (t = omp_num_threads - 1; t >= 0; --t) { position -= thread_state[t].state.count; if (t != omp_num_threads - 1 && thread_state[t].state.count > 0) { memmove(&SA[position], &SA[thread_state[t].state.position], (size_t)thread_state[t].state.count * sizeof(sa_sint_t)); } } } } #endif } } static sa_sint_t libsais16_renumber_and_gather_lms_suffixes_16u_omp(sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t m, sa_sint_t fs, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state) { memset(&SA[m], 0, ((size_t)n >> 1) * sizeof(sa_sint_t)); sa_sint_t name = libsais16_renumber_lms_suffixes_16u_omp(SA, m, threads, thread_state); if (name < m) { libsais16_gather_marked_lms_suffixes_16u_omp(SA, n, m, fs, threads, thread_state); } else { fast_sint_t i; for (i = 0; i < m; i += 1) { SA[i] &= SAINT_MAX; } } return name; } static sa_sint_t libsais16_renumber_distinct_lms_suffixes_32s_4k(sa_sint_t * RESTRICT SA, sa_sint_t m, sa_sint_t name, fast_sint_t omp_block_start, fast_sint_t omp_block_size) { const fast_sint_t prefetch_distance = 32; sa_sint_t * RESTRICT SAm = &SA[m]; fast_sint_t i, j; sa_sint_t p0, p1, p2, p3 = 0; for (i = omp_block_start, j = omp_block_start + omp_block_size - prefetch_distance - 3; i < j; i += 4) { libsais16_prefetchw(&SA[i + 2 * prefetch_distance]); libsais16_prefetchw(&SAm[(SA[i + prefetch_distance + 0] & SAINT_MAX) >> 1]); libsais16_prefetchw(&SAm[(SA[i + prefetch_distance + 1] & SAINT_MAX) >> 1]); libsais16_prefetchw(&SAm[(SA[i + prefetch_distance + 2] & SAINT_MAX) >> 1]); libsais16_prefetchw(&SAm[(SA[i + prefetch_distance + 3] & SAINT_MAX) >> 1]); p0 = SA[i + 0]; SAm[(SA[i + 0] = p0 & SAINT_MAX) >> 1] = name | (p0 & p3 & SAINT_MIN); name += p0 < 0; p1 = SA[i + 1]; SAm[(SA[i + 1] = p1 & SAINT_MAX) >> 1] = name | (p1 & p0 & SAINT_MIN); name += p1 < 0; p2 = SA[i + 2]; SAm[(SA[i + 2] = p2 & SAINT_MAX) >> 1] = name | (p2 & p1 & SAINT_MIN); name += p2 < 0; p3 = SA[i + 3]; SAm[(SA[i + 3] = p3 & SAINT_MAX) >> 1] = name | (p3 & p2 & SAINT_MIN); name += p3 < 0; } for (j += prefetch_distance + 3; i < j; i += 1) { p2 = p3; p3 = SA[i]; SAm[(SA[i] = p3 & SAINT_MAX) >> 1] = name | (p3 & p2 & SAINT_MIN); name += p3 < 0; } return name; } static void libsais16_mark_distinct_lms_suffixes_32s(sa_sint_t * RESTRICT SA, sa_sint_t m, fast_sint_t omp_block_start, fast_sint_t omp_block_size) { const fast_sint_t prefetch_distance = 32; fast_sint_t i, j; sa_sint_t p0, p1, p2, p3 = 0; for (i = (fast_sint_t)m + omp_block_start, j = (fast_sint_t)m + omp_block_start + omp_block_size - 3; i < j; i += 4) { libsais16_prefetchw(&SA[i + prefetch_distance]); p0 = SA[i + 0]; SA[i + 0] = p0 & (p3 | SAINT_MAX); p0 = (p0 == 0) ? p3 : p0; p1 = SA[i + 1]; SA[i + 1] = p1 & (p0 | SAINT_MAX); p1 = (p1 == 0) ? p0 : p1; p2 = SA[i + 2]; SA[i + 2] = p2 & (p1 | SAINT_MAX); p2 = (p2 == 0) ? p1 : p2; p3 = SA[i + 3]; SA[i + 3] = p3 & (p2 | SAINT_MAX); p3 = (p3 == 0) ? p2 : p3; } for (j += 3; i < j; i += 1) { p2 = p3; p3 = SA[i]; SA[i] = p3 & (p2 | SAINT_MAX); p3 = (p3 == 0) ? p2 : p3; } } static void libsais16_clamp_lms_suffixes_length_32s(sa_sint_t * RESTRICT SA, sa_sint_t m, fast_sint_t omp_block_start, fast_sint_t omp_block_size) { const fast_sint_t prefetch_distance = 32; sa_sint_t * RESTRICT SAm = &SA[m]; fast_sint_t i, j; for (i = omp_block_start, j = omp_block_start + omp_block_size - 3; i < j; i += 4) { libsais16_prefetchw(&SAm[i + prefetch_distance]); SAm[i + 0] = (SAm[i + 0] < 0 ? SAm[i + 0] : 0) & SAINT_MAX; SAm[i + 1] = (SAm[i + 1] < 0 ? SAm[i + 1] : 0) & SAINT_MAX; SAm[i + 2] = (SAm[i + 2] < 0 ? SAm[i + 2] : 0) & SAINT_MAX; SAm[i + 3] = (SAm[i + 3] < 0 ? SAm[i + 3] : 0) & SAINT_MAX; } for (j += 3; i < j; i += 1) { SAm[i] = (SAm[i] < 0 ? SAm[i] : 0) & SAINT_MAX; } } static sa_sint_t libsais16_renumber_distinct_lms_suffixes_32s_4k_omp(sa_sint_t * RESTRICT SA, sa_sint_t m, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state) { sa_sint_t name = 0; #if defined(_OPENMP) #pragma omp parallel num_threads(threads) if(threads > 1 && m >= 65536) #endif { #if defined(_OPENMP) fast_sint_t omp_thread_num = omp_get_thread_num(); fast_sint_t omp_num_threads = omp_get_num_threads(); #else UNUSED(threads); UNUSED(thread_state); fast_sint_t omp_thread_num = 0; fast_sint_t omp_num_threads = 1; #endif fast_sint_t omp_block_stride = (m / omp_num_threads) & (-16); fast_sint_t omp_block_start = omp_thread_num * omp_block_stride; fast_sint_t omp_block_size = omp_thread_num < omp_num_threads - 1 ? omp_block_stride : m - omp_block_start; if (omp_num_threads == 1) { name = libsais16_renumber_distinct_lms_suffixes_32s_4k(SA, m, 1, omp_block_start, omp_block_size); } #if defined(_OPENMP) else { { thread_state[omp_thread_num].state.count = libsais16_count_negative_marked_suffixes(SA, omp_block_start, omp_block_size); } #pragma omp barrier { fast_sint_t t, count = 1; for (t = 0; t < omp_thread_num; ++t) { count += thread_state[t].state.count; } if (omp_thread_num == omp_num_threads - 1) { name = (sa_sint_t)(count + thread_state[omp_thread_num].state.count); } libsais16_renumber_distinct_lms_suffixes_32s_4k(SA, m, (sa_sint_t)count, omp_block_start, omp_block_size); } } #endif } return name - 1; } static void libsais16_mark_distinct_lms_suffixes_32s_omp(sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t m, sa_sint_t threads) { #if defined(_OPENMP) #pragma omp parallel num_threads(threads) if(threads > 1 && n >= 131072) #endif { #if defined(_OPENMP) fast_sint_t omp_thread_num = omp_get_thread_num(); fast_sint_t omp_num_threads = omp_get_num_threads(); fast_sint_t omp_block_stride = (((fast_sint_t)n >> 1) / omp_num_threads) & (-16); fast_sint_t omp_block_start = omp_thread_num * omp_block_stride; fast_sint_t omp_block_size = omp_thread_num < omp_num_threads - 1 ? omp_block_stride : ((fast_sint_t)n >> 1) - omp_block_start; #else UNUSED(threads); fast_sint_t omp_block_start = 0; fast_sint_t omp_block_size = (fast_sint_t)n >> 1; #endif libsais16_mark_distinct_lms_suffixes_32s(SA, m, omp_block_start, omp_block_size); } } static void libsais16_clamp_lms_suffixes_length_32s_omp(sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t m, sa_sint_t threads) { #if defined(_OPENMP) #pragma omp parallel num_threads(threads) if(threads > 1 && n >= 131072) #endif { #if defined(_OPENMP) fast_sint_t omp_thread_num = omp_get_thread_num(); fast_sint_t omp_num_threads = omp_get_num_threads(); fast_sint_t omp_block_stride = (((fast_sint_t)n >> 1) / omp_num_threads) & (-16); fast_sint_t omp_block_start = omp_thread_num * omp_block_stride; fast_sint_t omp_block_size = omp_thread_num < omp_num_threads - 1 ? omp_block_stride : ((fast_sint_t)n >> 1) - omp_block_start; #else UNUSED(threads); fast_sint_t omp_block_start = 0; fast_sint_t omp_block_size = (fast_sint_t)n >> 1; #endif libsais16_clamp_lms_suffixes_length_32s(SA, m, omp_block_start, omp_block_size); } } static sa_sint_t libsais16_renumber_and_mark_distinct_lms_suffixes_32s_4k_omp(sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t m, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state) { memset(&SA[m], 0, ((size_t)n >> 1) * sizeof(sa_sint_t)); sa_sint_t name = libsais16_renumber_distinct_lms_suffixes_32s_4k_omp(SA, m, threads, thread_state); if (name < m) { libsais16_mark_distinct_lms_suffixes_32s_omp(SA, n, m, threads); } return name; } static sa_sint_t libsais16_renumber_and_mark_distinct_lms_suffixes_32s_1k_omp(sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t m, sa_sint_t threads) { const fast_sint_t prefetch_distance = 32; sa_sint_t * RESTRICT SAm = &SA[m]; { libsais16_gather_lms_suffixes_32s(T, SA, n); memset(&SA[m], 0, ((size_t)n - (size_t)m - (size_t)m) * sizeof(sa_sint_t)); fast_sint_t i, j; for (i = (fast_sint_t)n - (fast_sint_t)m, j = (fast_sint_t)n - 1 - prefetch_distance - 3; i < j; i += 4) { libsais16_prefetch(&SA[i + 2 * prefetch_distance]); libsais16_prefetchw(&SAm[((sa_uint_t)SA[i + prefetch_distance + 0]) >> 1]); libsais16_prefetchw(&SAm[((sa_uint_t)SA[i + prefetch_distance + 1]) >> 1]); libsais16_prefetchw(&SAm[((sa_uint_t)SA[i + prefetch_distance + 2]) >> 1]); libsais16_prefetchw(&SAm[((sa_uint_t)SA[i + prefetch_distance + 3]) >> 1]); SAm[((sa_uint_t)SA[i + 0]) >> 1] = SA[i + 1] - SA[i + 0] + 1 + SAINT_MIN; SAm[((sa_uint_t)SA[i + 1]) >> 1] = SA[i + 2] - SA[i + 1] + 1 + SAINT_MIN; SAm[((sa_uint_t)SA[i + 2]) >> 1] = SA[i + 3] - SA[i + 2] + 1 + SAINT_MIN; SAm[((sa_uint_t)SA[i + 3]) >> 1] = SA[i + 4] - SA[i + 3] + 1 + SAINT_MIN; } for (j += prefetch_distance + 3; i < j; i += 1) { SAm[((sa_uint_t)SA[i]) >> 1] = SA[i + 1] - SA[i] + 1 + SAINT_MIN; } SAm[((sa_uint_t)SA[n - 1]) >> 1] = 1 + SAINT_MIN; } { libsais16_clamp_lms_suffixes_length_32s_omp(SA, n, m, threads); } sa_sint_t name = 1; { fast_sint_t i, j, p = SA[0], plen = SAm[p >> 1]; sa_sint_t pdiff = SAINT_MIN; for (i = 1, j = m - prefetch_distance - 1; i < j; i += 2) { libsais16_prefetch(&SA[i + 2 * prefetch_distance]); libsais16_prefetchw(&SAm[((sa_uint_t)SA[i + prefetch_distance + 0]) >> 1]); libsais16_prefetch(&T[((sa_uint_t)SA[i + prefetch_distance + 0])]); libsais16_prefetchw(&SAm[((sa_uint_t)SA[i + prefetch_distance + 1]) >> 1]); libsais16_prefetch(&T[((sa_uint_t)SA[i + prefetch_distance + 1])]); fast_sint_t q = SA[i + 0], qlen = SAm[q >> 1]; sa_sint_t qdiff = SAINT_MIN; if (plen == qlen) { fast_sint_t l = 0; do { if (T[p + l] != T[q + l]) { break; } } while (++l < qlen); qdiff = (sa_sint_t)(l - qlen) & SAINT_MIN; } SAm[p >> 1] = name | (pdiff & qdiff); name += (qdiff < 0); p = SA[i + 1]; plen = SAm[p >> 1]; pdiff = SAINT_MIN; if (qlen == plen) { fast_sint_t l = 0; do { if (T[q + l] != T[p + l]) { break; } } while (++l < plen); pdiff = (sa_sint_t)(l - plen) & SAINT_MIN; } SAm[q >> 1] = name | (qdiff & pdiff); name += (pdiff < 0); } for (j += prefetch_distance + 1; i < j; i += 1) { fast_sint_t q = SA[i], qlen = SAm[q >> 1]; sa_sint_t qdiff = SAINT_MIN; if (plen == qlen) { fast_sint_t l = 0; do { if (T[p + l] != T[q + l]) { break; } } while (++l < plen); qdiff = (sa_sint_t)(l - plen) & SAINT_MIN; } SAm[p >> 1] = name | (pdiff & qdiff); name += (qdiff < 0); p = q; plen = qlen; pdiff = qdiff; } SAm[p >> 1] = name | pdiff; name++; } if (name <= m) { libsais16_mark_distinct_lms_suffixes_32s_omp(SA, n, m, threads); } return name - 1; } static void libsais16_reconstruct_lms_suffixes(sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t m, fast_sint_t omp_block_start, fast_sint_t omp_block_size) { const fast_sint_t prefetch_distance = 32; const sa_sint_t * RESTRICT SAnm = &SA[n - m]; fast_sint_t i, j; for (i = omp_block_start, j = omp_block_start + omp_block_size - prefetch_distance - 3; i < j; i += 4) { libsais16_prefetchw(&SA[i + 2 * prefetch_distance]); libsais16_prefetch(&SAnm[SA[i + prefetch_distance + 0]]); libsais16_prefetch(&SAnm[SA[i + prefetch_distance + 1]]); libsais16_prefetch(&SAnm[SA[i + prefetch_distance + 2]]); libsais16_prefetch(&SAnm[SA[i + prefetch_distance + 3]]); SA[i + 0] = SAnm[SA[i + 0]]; SA[i + 1] = SAnm[SA[i + 1]]; SA[i + 2] = SAnm[SA[i + 2]]; SA[i + 3] = SAnm[SA[i + 3]]; } for (j += prefetch_distance + 3; i < j; i += 1) { SA[i] = SAnm[SA[i]]; } } static void libsais16_reconstruct_lms_suffixes_omp(sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t m, sa_sint_t threads) { #if defined(_OPENMP) #pragma omp parallel num_threads(threads) if(threads > 1 && m >= 65536) #endif { #if defined(_OPENMP) fast_sint_t omp_thread_num = omp_get_thread_num(); fast_sint_t omp_num_threads = omp_get_num_threads(); fast_sint_t omp_block_stride = (m / omp_num_threads) & (-16); fast_sint_t omp_block_start = omp_thread_num * omp_block_stride; fast_sint_t omp_block_size = omp_thread_num < omp_num_threads - 1 ? omp_block_stride : m - omp_block_start; #else UNUSED(threads); fast_sint_t omp_block_start = 0; fast_sint_t omp_block_size = m; #endif libsais16_reconstruct_lms_suffixes(SA, n, m, omp_block_start, omp_block_size); } } static void libsais16_place_lms_suffixes_interval_16u(sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t m, const sa_sint_t * RESTRICT buckets) { const sa_sint_t * RESTRICT bucket_end = &buckets[7 * ALPHABET_SIZE]; fast_sint_t c, j = n; for (c = ALPHABET_SIZE - 2; c >= 0; --c) { fast_sint_t l = (fast_sint_t)buckets[BUCKETS_INDEX2(c, 1) + BUCKETS_INDEX2(1, 0)] - (fast_sint_t)buckets[BUCKETS_INDEX2(c, 1)]; if (l > 0) { fast_sint_t i = bucket_end[c]; if (j - i > 0) { memset(&SA[i], 0, (size_t)(j - i) * sizeof(sa_sint_t)); } memmove(&SA[j = (i - l)], &SA[m -= (sa_sint_t)l], (size_t)l * sizeof(sa_sint_t)); } } memset(&SA[0], 0, (size_t)j * sizeof(sa_sint_t)); } static void libsais16_place_lms_suffixes_interval_32s_4k(sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t k, sa_sint_t m, const sa_sint_t * RESTRICT buckets) { const sa_sint_t * RESTRICT bucket_end = &buckets[3 * k]; fast_sint_t c, j = n; for (c = (fast_sint_t)k - 2; c >= 0; --c) { fast_sint_t l = (fast_sint_t)buckets[BUCKETS_INDEX2(c, 1) + BUCKETS_INDEX2(1, 0)] - (fast_sint_t)buckets[BUCKETS_INDEX2(c, 1)]; if (l > 0) { fast_sint_t i = bucket_end[c]; if (j - i > 0) { memset(&SA[i], 0, (size_t)(j - i) * sizeof(sa_sint_t)); } memmove(&SA[j = (i - l)], &SA[m -= (sa_sint_t)l], (size_t)l * sizeof(sa_sint_t)); } } memset(&SA[0], 0, (size_t)j * sizeof(sa_sint_t)); } static void libsais16_place_lms_suffixes_interval_32s_2k(sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t k, sa_sint_t m, const sa_sint_t * RESTRICT buckets) { fast_sint_t j = n; if (k > 1) { fast_sint_t c; for (c = BUCKETS_INDEX2((fast_sint_t)k - 2, 0); c >= BUCKETS_INDEX2(0, 0); c -= BUCKETS_INDEX2(1, 0)) { fast_sint_t l = (fast_sint_t)buckets[c + BUCKETS_INDEX2(1, 1)] - (fast_sint_t)buckets[c + BUCKETS_INDEX2(0, 1)]; if (l > 0) { fast_sint_t i = buckets[c]; if (j - i > 0) { memset(&SA[i], 0, (size_t)(j - i) * sizeof(sa_sint_t)); } memmove(&SA[j = (i - l)], &SA[m -= (sa_sint_t)l], (size_t)l * sizeof(sa_sint_t)); } } } memset(&SA[0], 0, (size_t)j * sizeof(sa_sint_t)); } static void libsais16_place_lms_suffixes_interval_32s_1k(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t k, sa_sint_t m, sa_sint_t * RESTRICT buckets) { const fast_sint_t prefetch_distance = 32; sa_sint_t c = k - 1; fast_sint_t i, l = buckets[c]; for (i = (fast_sint_t)m - 1; i >= prefetch_distance + 3; i -= 4) { libsais16_prefetch(&SA[i - 2 * prefetch_distance]); libsais16_prefetch(&T[SA[i - prefetch_distance - 0]]); libsais16_prefetch(&T[SA[i - prefetch_distance - 1]]); libsais16_prefetch(&T[SA[i - prefetch_distance - 2]]); libsais16_prefetch(&T[SA[i - prefetch_distance - 3]]); sa_sint_t p0 = SA[i - 0]; if (T[p0] != c) { c = T[p0]; memset(&SA[buckets[c]], 0, (size_t)(l - buckets[c]) * sizeof(sa_sint_t)); l = buckets[c]; } SA[--l] = p0; sa_sint_t p1 = SA[i - 1]; if (T[p1] != c) { c = T[p1]; memset(&SA[buckets[c]], 0, (size_t)(l - buckets[c]) * sizeof(sa_sint_t)); l = buckets[c]; } SA[--l] = p1; sa_sint_t p2 = SA[i - 2]; if (T[p2] != c) { c = T[p2]; memset(&SA[buckets[c]], 0, (size_t)(l - buckets[c]) * sizeof(sa_sint_t)); l = buckets[c]; } SA[--l] = p2; sa_sint_t p3 = SA[i - 3]; if (T[p3] != c) { c = T[p3]; memset(&SA[buckets[c]], 0, (size_t)(l - buckets[c]) * sizeof(sa_sint_t)); l = buckets[c]; } SA[--l] = p3; } for (; i >= 0; i -= 1) { sa_sint_t p = SA[i]; if (T[p] != c) { c = T[p]; memset(&SA[buckets[c]], 0, (size_t)(l - buckets[c]) * sizeof(sa_sint_t)); l = buckets[c]; } SA[--l] = p; } memset(&SA[0], 0, (size_t)l * sizeof(sa_sint_t)); } static void libsais16_place_lms_suffixes_histogram_32s_6k(sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t k, sa_sint_t m, const sa_sint_t * RESTRICT buckets) { const sa_sint_t * RESTRICT bucket_end = &buckets[5 * k]; fast_sint_t c, j = n; for (c = (fast_sint_t)k - 2; c >= 0; --c) { fast_sint_t l = (fast_sint_t)buckets[BUCKETS_INDEX4(c, 1)]; if (l > 0) { fast_sint_t i = bucket_end[c]; if (j - i > 0) { memset(&SA[i], 0, (size_t)(j - i) * sizeof(sa_sint_t)); } memmove(&SA[j = (i - l)], &SA[m -= (sa_sint_t)l], (size_t)l * sizeof(sa_sint_t)); } } memset(&SA[0], 0, (size_t)j * sizeof(sa_sint_t)); } static void libsais16_place_lms_suffixes_histogram_32s_4k(sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t k, sa_sint_t m, const sa_sint_t * RESTRICT buckets) { const sa_sint_t * RESTRICT bucket_end = &buckets[3 * k]; fast_sint_t c, j = n; for (c = (fast_sint_t)k - 2; c >= 0; --c) { fast_sint_t l = (fast_sint_t)buckets[BUCKETS_INDEX2(c, 1)]; if (l > 0) { fast_sint_t i = bucket_end[c]; if (j - i > 0) { memset(&SA[i], 0, (size_t)(j - i) * sizeof(sa_sint_t)); } memmove(&SA[j = (i - l)], &SA[m -= (sa_sint_t)l], (size_t)l * sizeof(sa_sint_t)); } } memset(&SA[0], 0, (size_t)j * sizeof(sa_sint_t)); } static void libsais16_place_lms_suffixes_histogram_32s_2k(sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t k, sa_sint_t m, const sa_sint_t * RESTRICT buckets) { fast_sint_t j = n; if (k > 1) { fast_sint_t c; for (c = BUCKETS_INDEX2((fast_sint_t)k - 2, 0); c >= BUCKETS_INDEX2(0, 0); c -= BUCKETS_INDEX2(1, 0)) { fast_sint_t l = (fast_sint_t)buckets[c + BUCKETS_INDEX2(0, 1)]; if (l > 0) { fast_sint_t i = buckets[c]; if (j - i > 0) { memset(&SA[i], 0, (size_t)(j - i) * sizeof(sa_sint_t)); } memmove(&SA[j = (i - l)], &SA[m -= (sa_sint_t)l], (size_t)l * sizeof(sa_sint_t)); } } } memset(&SA[0], 0, (size_t)j * sizeof(sa_sint_t)); } static void libsais16_final_bwt_scan_left_to_right_16u(const uint16_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t * RESTRICT induction_bucket, fast_sint_t omp_block_start, fast_sint_t omp_block_size) { const fast_sint_t prefetch_distance = 32; fast_sint_t i, j; for (i = omp_block_start, j = omp_block_start + omp_block_size - prefetch_distance - 1; i < j; i += 2) { libsais16_prefetchw(&SA[i + 2 * prefetch_distance]); sa_sint_t s0 = SA[i + prefetch_distance + 0]; const uint16_t * Ts0 = &T[s0] - 1; libsais16_prefetch(s0 > 0 ? Ts0 : NULL); Ts0--; libsais16_prefetch(s0 > 0 ? Ts0 : NULL); sa_sint_t s1 = SA[i + prefetch_distance + 1]; const uint16_t * Ts1 = &T[s1] - 1; libsais16_prefetch(s1 > 0 ? Ts1 : NULL); Ts1--; libsais16_prefetch(s1 > 0 ? Ts1 : NULL); sa_sint_t p0 = SA[i + 0]; SA[i + 0] = p0 & SAINT_MAX; if (p0 > 0) { p0--; SA[i + 0] = T[p0] | SAINT_MIN; SA[induction_bucket[T[p0]]++] = p0 | ((sa_sint_t)(T[p0 - (p0 > 0)] < T[p0]) << (SAINT_BIT - 1)); } sa_sint_t p1 = SA[i + 1]; SA[i + 1] = p1 & SAINT_MAX; if (p1 > 0) { p1--; SA[i + 1] = T[p1] | SAINT_MIN; SA[induction_bucket[T[p1]]++] = p1 | ((sa_sint_t)(T[p1 - (p1 > 0)] < T[p1]) << (SAINT_BIT - 1)); } } for (j += prefetch_distance + 1; i < j; i += 1) { sa_sint_t p = SA[i]; SA[i] = p & SAINT_MAX; if (p > 0) { p--; SA[i] = T[p] | SAINT_MIN; SA[induction_bucket[T[p]]++] = p | ((sa_sint_t)(T[p - (p > 0)] < T[p]) << (SAINT_BIT - 1)); } } } static void libsais16_final_bwt_aux_scan_left_to_right_16u(const uint16_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t rm, sa_sint_t * RESTRICT I, sa_sint_t * RESTRICT induction_bucket, fast_sint_t omp_block_start, fast_sint_t omp_block_size) { const fast_sint_t prefetch_distance = 32; fast_sint_t i, j; for (i = omp_block_start, j = omp_block_start + omp_block_size - prefetch_distance - 1; i < j; i += 2) { libsais16_prefetchw(&SA[i + 2 * prefetch_distance]); sa_sint_t s0 = SA[i + prefetch_distance + 0]; const uint16_t * Ts0 = &T[s0] - 1; libsais16_prefetch(s0 > 0 ? Ts0 : NULL); Ts0--; libsais16_prefetch(s0 > 0 ? Ts0 : NULL); sa_sint_t s1 = SA[i + prefetch_distance + 1]; const uint16_t * Ts1 = &T[s1] - 1; libsais16_prefetch(s1 > 0 ? Ts1 : NULL); Ts1--; libsais16_prefetch(s1 > 0 ? Ts1 : NULL); sa_sint_t p0 = SA[i + 0]; SA[i + 0] = p0 & SAINT_MAX; if (p0 > 0) { p0--; SA[i + 0] = T[p0] | SAINT_MIN; SA[induction_bucket[T[p0]]++] = p0 | ((sa_sint_t)(T[p0 - (p0 > 0)] < T[p0]) << (SAINT_BIT - 1)); if ((p0 & rm) == 0) { I[p0 / (rm + 1)] = induction_bucket[T[p0]]; }} sa_sint_t p1 = SA[i + 1]; SA[i + 1] = p1 & SAINT_MAX; if (p1 > 0) { p1--; SA[i + 1] = T[p1] | SAINT_MIN; SA[induction_bucket[T[p1]]++] = p1 | ((sa_sint_t)(T[p1 - (p1 > 0)] < T[p1]) << (SAINT_BIT - 1)); if ((p1 & rm) == 0) { I[p1 / (rm + 1)] = induction_bucket[T[p1]]; }} } for (j += prefetch_distance + 1; i < j; i += 1) { sa_sint_t p = SA[i]; SA[i] = p & SAINT_MAX; if (p > 0) { p--; SA[i] = T[p] | SAINT_MIN; SA[induction_bucket[T[p]]++] = p | ((sa_sint_t)(T[p - (p > 0)] < T[p]) << (SAINT_BIT - 1)); if ((p & rm) == 0) { I[p / (rm + 1)] = induction_bucket[T[p]]; } } } } static void libsais16_final_sorting_scan_left_to_right_16u(const uint16_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t * RESTRICT induction_bucket, fast_sint_t omp_block_start, fast_sint_t omp_block_size) { const fast_sint_t prefetch_distance = 32; fast_sint_t i, j; for (i = omp_block_start, j = omp_block_start + omp_block_size - prefetch_distance - 1; i < j; i += 2) { libsais16_prefetchw(&SA[i + 2 * prefetch_distance]); sa_sint_t s0 = SA[i + prefetch_distance + 0]; const uint16_t * Ts0 = &T[s0] - 1; libsais16_prefetch(s0 > 0 ? Ts0 : NULL); Ts0--; libsais16_prefetch(s0 > 0 ? Ts0 : NULL); sa_sint_t s1 = SA[i + prefetch_distance + 1]; const uint16_t * Ts1 = &T[s1] - 1; libsais16_prefetch(s1 > 0 ? Ts1 : NULL); Ts1--; libsais16_prefetch(s1 > 0 ? Ts1 : NULL); sa_sint_t p0 = SA[i + 0]; SA[i + 0] = p0 ^ SAINT_MIN; if (p0 > 0) { p0--; SA[induction_bucket[T[p0]]++] = p0 | ((sa_sint_t)(T[p0 - (p0 > 0)] < T[p0]) << (SAINT_BIT - 1)); } sa_sint_t p1 = SA[i + 1]; SA[i + 1] = p1 ^ SAINT_MIN; if (p1 > 0) { p1--; SA[induction_bucket[T[p1]]++] = p1 | ((sa_sint_t)(T[p1 - (p1 > 0)] < T[p1]) << (SAINT_BIT - 1)); } } for (j += prefetch_distance + 1; i < j; i += 1) { sa_sint_t p = SA[i]; SA[i] = p ^ SAINT_MIN; if (p > 0) { p--; SA[induction_bucket[T[p]]++] = p | ((sa_sint_t)(T[p - (p > 0)] < T[p]) << (SAINT_BIT - 1)); } } } static void libsais16_final_sorting_scan_left_to_right_32s(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t * RESTRICT induction_bucket, fast_sint_t omp_block_start, fast_sint_t omp_block_size) { const fast_sint_t prefetch_distance = 32; fast_sint_t i, j; for (i = omp_block_start, j = omp_block_start + omp_block_size - 2 * prefetch_distance - 1; i < j; i += 2) { libsais16_prefetchw(&SA[i + 3 * prefetch_distance]); sa_sint_t s0 = SA[i + 2 * prefetch_distance + 0]; const sa_sint_t * Ts0 = &T[s0] - 1; libsais16_prefetch(s0 > 0 ? Ts0 : NULL); sa_sint_t s1 = SA[i + 2 * prefetch_distance + 1]; const sa_sint_t * Ts1 = &T[s1] - 1; libsais16_prefetch(s1 > 0 ? Ts1 : NULL); sa_sint_t s2 = SA[i + 1 * prefetch_distance + 0]; if (s2 > 0) { libsais16_prefetchw(&induction_bucket[T[s2 - 1]]); libsais16_prefetch(&T[s2] - 2); } sa_sint_t s3 = SA[i + 1 * prefetch_distance + 1]; if (s3 > 0) { libsais16_prefetchw(&induction_bucket[T[s3 - 1]]); libsais16_prefetch(&T[s3] - 2); } sa_sint_t p0 = SA[i + 0]; SA[i + 0] = p0 ^ SAINT_MIN; if (p0 > 0) { p0--; SA[induction_bucket[T[p0]]++] = p0 | ((sa_sint_t)(T[p0 - (p0 > 0)] < T[p0]) << (SAINT_BIT - 1)); } sa_sint_t p1 = SA[i + 1]; SA[i + 1] = p1 ^ SAINT_MIN; if (p1 > 0) { p1--; SA[induction_bucket[T[p1]]++] = p1 | ((sa_sint_t)(T[p1 - (p1 > 0)] < T[p1]) << (SAINT_BIT - 1)); } } for (j += 2 * prefetch_distance + 1; i < j; i += 1) { sa_sint_t p = SA[i]; SA[i] = p ^ SAINT_MIN; if (p > 0) { p--; SA[induction_bucket[T[p]]++] = p | ((sa_sint_t)(T[p - (p > 0)] < T[p]) << (SAINT_BIT - 1)); } } } #if defined(_OPENMP) static fast_sint_t libsais16_final_bwt_scan_left_to_right_16u_block_prepare(const uint16_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t * RESTRICT buckets, LIBSAIS_THREAD_CACHE * RESTRICT cache, fast_sint_t omp_block_start, fast_sint_t omp_block_size) { const fast_sint_t prefetch_distance = 32; memset(buckets, 0, ALPHABET_SIZE * sizeof(sa_sint_t)); fast_sint_t i, j, count = 0; for (i = omp_block_start, j = omp_block_start + omp_block_size - prefetch_distance - 1; i < j; i += 2) { libsais16_prefetchw(&SA[i + 2 * prefetch_distance]); sa_sint_t s0 = SA[i + prefetch_distance + 0]; const uint16_t * Ts0 = &T[s0] - 1; libsais16_prefetch(s0 > 0 ? Ts0 : NULL); Ts0--; libsais16_prefetch(s0 > 0 ? Ts0 : NULL); sa_sint_t s1 = SA[i + prefetch_distance + 1]; const uint16_t * Ts1 = &T[s1] - 1; libsais16_prefetch(s1 > 0 ? Ts1 : NULL); Ts1--; libsais16_prefetch(s1 > 0 ? Ts1 : NULL); sa_sint_t p0 = SA[i + 0]; SA[i + 0] = p0 & SAINT_MAX; if (p0 > 0) { p0--; SA[i + 0] = T[p0] | SAINT_MIN; buckets[cache[count].symbol = T[p0]]++; cache[count++].index = p0 | ((sa_sint_t)(T[p0 - (p0 > 0)] < T[p0]) << (SAINT_BIT - 1)); } sa_sint_t p1 = SA[i + 1]; SA[i + 1] = p1 & SAINT_MAX; if (p1 > 0) { p1--; SA[i + 1] = T[p1] | SAINT_MIN; buckets[cache[count].symbol = T[p1]]++; cache[count++].index = p1 | ((sa_sint_t)(T[p1 - (p1 > 0)] < T[p1]) << (SAINT_BIT - 1)); } } for (j += prefetch_distance + 1; i < j; i += 1) { sa_sint_t p = SA[i]; SA[i] = p & SAINT_MAX; if (p > 0) { p--; SA[i] = T[p] | SAINT_MIN; buckets[cache[count].symbol = T[p]]++; cache[count++].index = p | ((sa_sint_t)(T[p - (p > 0)] < T[p]) << (SAINT_BIT - 1)); } } return count; } static fast_sint_t libsais16_final_sorting_scan_left_to_right_16u_block_prepare(const uint16_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t * RESTRICT buckets, LIBSAIS_THREAD_CACHE * RESTRICT cache, fast_sint_t omp_block_start, fast_sint_t omp_block_size) { const fast_sint_t prefetch_distance = 32; memset(buckets, 0, ALPHABET_SIZE * sizeof(sa_sint_t)); fast_sint_t i, j, count = 0; for (i = omp_block_start, j = omp_block_start + omp_block_size - prefetch_distance - 1; i < j; i += 2) { libsais16_prefetchw(&SA[i + 2 * prefetch_distance]); sa_sint_t s0 = SA[i + prefetch_distance + 0]; const uint16_t * Ts0 = &T[s0] - 1; libsais16_prefetch(s0 > 0 ? Ts0 : NULL); Ts0--; libsais16_prefetch(s0 > 0 ? Ts0 : NULL); sa_sint_t s1 = SA[i + prefetch_distance + 1]; const uint16_t * Ts1 = &T[s1] - 1; libsais16_prefetch(s1 > 0 ? Ts1 : NULL); Ts1--; libsais16_prefetch(s1 > 0 ? Ts1 : NULL); sa_sint_t p0 = SA[i + 0]; SA[i + 0] = p0 ^ SAINT_MIN; if (p0 > 0) { p0--; buckets[cache[count].symbol = T[p0]]++; cache[count++].index = p0 | ((sa_sint_t)(T[p0 - (p0 > 0)] < T[p0]) << (SAINT_BIT - 1)); } sa_sint_t p1 = SA[i + 1]; SA[i + 1] = p1 ^ SAINT_MIN; if (p1 > 0) { p1--; buckets[cache[count].symbol = T[p1]]++; cache[count++].index = p1 | ((sa_sint_t)(T[p1 - (p1 > 0)] < T[p1]) << (SAINT_BIT - 1)); } } for (j += prefetch_distance + 1; i < j; i += 1) { sa_sint_t p = SA[i]; SA[i] = p ^ SAINT_MIN; if (p > 0) { p--; buckets[cache[count].symbol = T[p]]++; cache[count++].index = p | ((sa_sint_t)(T[p - (p > 0)] < T[p]) << (SAINT_BIT - 1)); } } return count; } static void libsais16_final_order_scan_left_to_right_16u_block_place(sa_sint_t * RESTRICT SA, sa_sint_t * RESTRICT buckets, LIBSAIS_THREAD_CACHE * RESTRICT cache, fast_sint_t count) { const fast_sint_t prefetch_distance = 32; fast_sint_t i, j; for (i = 0, j = count - 3; i < j; i += 4) { libsais16_prefetch(&cache[i + prefetch_distance]); SA[buckets[cache[i + 0].symbol]++] = cache[i + 0].index; SA[buckets[cache[i + 1].symbol]++] = cache[i + 1].index; SA[buckets[cache[i + 2].symbol]++] = cache[i + 2].index; SA[buckets[cache[i + 3].symbol]++] = cache[i + 3].index; } for (j += 3; i < j; i += 1) { SA[buckets[cache[i].symbol]++] = cache[i].index; } } static void libsais16_final_bwt_aux_scan_left_to_right_16u_block_place(sa_sint_t * RESTRICT SA, sa_sint_t rm, sa_sint_t * RESTRICT I, sa_sint_t * RESTRICT buckets, LIBSAIS_THREAD_CACHE * RESTRICT cache, fast_sint_t count) { const fast_sint_t prefetch_distance = 32; fast_sint_t i, j; for (i = 0, j = count - 3; i < j; i += 4) { libsais16_prefetch(&cache[i + prefetch_distance]); SA[buckets[cache[i + 0].symbol]++] = cache[i + 0].index; if ((cache[i + 0].index & rm) == 0) { I[(cache[i + 0].index & SAINT_MAX) / (rm + 1)] = buckets[cache[i + 0].symbol]; } SA[buckets[cache[i + 1].symbol]++] = cache[i + 1].index; if ((cache[i + 1].index & rm) == 0) { I[(cache[i + 1].index & SAINT_MAX) / (rm + 1)] = buckets[cache[i + 1].symbol]; } SA[buckets[cache[i + 2].symbol]++] = cache[i + 2].index; if ((cache[i + 2].index & rm) == 0) { I[(cache[i + 2].index & SAINT_MAX) / (rm + 1)] = buckets[cache[i + 2].symbol]; } SA[buckets[cache[i + 3].symbol]++] = cache[i + 3].index; if ((cache[i + 3].index & rm) == 0) { I[(cache[i + 3].index & SAINT_MAX) / (rm + 1)] = buckets[cache[i + 3].symbol]; } } for (j += 3; i < j; i += 1) { SA[buckets[cache[i].symbol]++] = cache[i].index; if ((cache[i].index & rm) == 0) { I[(cache[i].index & SAINT_MAX) / (rm + 1)] = buckets[cache[i].symbol]; } } } static void libsais16_final_sorting_scan_left_to_right_32s_block_gather(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, LIBSAIS_THREAD_CACHE * RESTRICT cache, fast_sint_t omp_block_start, fast_sint_t omp_block_size) { const fast_sint_t prefetch_distance = 32; fast_sint_t i, j; for (i = omp_block_start, j = omp_block_start + omp_block_size - prefetch_distance - 1; i < j; i += 2) { libsais16_prefetchw(&SA[i + 2 * prefetch_distance]); sa_sint_t s0 = SA[i + prefetch_distance + 0]; const sa_sint_t * Ts0 = &T[s0] - 1; libsais16_prefetch(s0 > 0 ? Ts0 : NULL); Ts0--; libsais16_prefetch(s0 > 0 ? Ts0 : NULL); sa_sint_t s1 = SA[i + prefetch_distance + 1]; const sa_sint_t * Ts1 = &T[s1] - 1; libsais16_prefetch(s1 > 0 ? Ts1 : NULL); Ts1--; libsais16_prefetch(s1 > 0 ? Ts1 : NULL); libsais16_prefetchw(&cache[i + prefetch_distance]); sa_sint_t symbol0 = SAINT_MIN, p0 = SA[i + 0]; SA[i + 0] = p0 ^ SAINT_MIN; if (p0 > 0) { p0--; cache[i + 0].index = p0 | ((sa_sint_t)(T[p0 - (p0 > 0)] < T[p0]) << (SAINT_BIT - 1)); symbol0 = T[p0]; } cache[i + 0].symbol = symbol0; sa_sint_t symbol1 = SAINT_MIN, p1 = SA[i + 1]; SA[i + 1] = p1 ^ SAINT_MIN; if (p1 > 0) { p1--; cache[i + 1].index = p1 | ((sa_sint_t)(T[p1 - (p1 > 0)] < T[p1]) << (SAINT_BIT - 1)); symbol1 = T[p1]; } cache[i + 1].symbol = symbol1; } for (j += prefetch_distance + 1; i < j; i += 1) { sa_sint_t symbol = SAINT_MIN, p = SA[i]; SA[i] = p ^ SAINT_MIN; if (p > 0) { p--; cache[i].index = p | ((sa_sint_t)(T[p - (p > 0)] < T[p]) << (SAINT_BIT - 1)); symbol = T[p]; } cache[i].symbol = symbol; } } static void libsais16_final_sorting_scan_left_to_right_32s_block_sort(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT induction_bucket, LIBSAIS_THREAD_CACHE * RESTRICT cache, fast_sint_t omp_block_start, fast_sint_t omp_block_size) { const fast_sint_t prefetch_distance = 32; fast_sint_t i, j, omp_block_end = omp_block_start + omp_block_size; for (i = omp_block_start, j = omp_block_end - prefetch_distance - 1; i < j; i += 2) { libsais16_prefetchw(&cache[i + 2 * prefetch_distance]); sa_sint_t s0 = cache[i + prefetch_distance + 0].symbol; const sa_sint_t * Is0 = &induction_bucket[s0]; libsais16_prefetchw(s0 >= 0 ? Is0 : NULL); sa_sint_t s1 = cache[i + prefetch_distance + 1].symbol; const sa_sint_t * Is1 = &induction_bucket[s1]; libsais16_prefetchw(s1 >= 0 ? Is1 : NULL); sa_sint_t v0 = cache[i + 0].symbol; if (v0 >= 0) { cache[i + 0].symbol = induction_bucket[v0]++; if (cache[i + 0].symbol < omp_block_end) { sa_sint_t ni = cache[i + 0].symbol, np = cache[i + 0].index; cache[i + 0].index = np ^ SAINT_MIN; if (np > 0) { np--; cache[ni].index = np | ((sa_sint_t)(T[np - (np > 0)] < T[np]) << (SAINT_BIT - 1)); cache[ni].symbol = T[np]; } } } sa_sint_t v1 = cache[i + 1].symbol; if (v1 >= 0) { cache[i + 1].symbol = induction_bucket[v1]++; if (cache[i + 1].symbol < omp_block_end) { sa_sint_t ni = cache[i + 1].symbol, np = cache[i + 1].index; cache[i + 1].index = np ^ SAINT_MIN; if (np > 0) { np--; cache[ni].index = np | ((sa_sint_t)(T[np - (np > 0)] < T[np]) << (SAINT_BIT - 1)); cache[ni].symbol = T[np]; } } } } for (j += prefetch_distance + 1; i < j; i += 1) { sa_sint_t v = cache[i].symbol; if (v >= 0) { cache[i].symbol = induction_bucket[v]++; if (cache[i].symbol < omp_block_end) { sa_sint_t ni = cache[i].symbol, np = cache[i].index; cache[i].index = np ^ SAINT_MIN; if (np > 0) { np--; cache[ni].index = np | ((sa_sint_t)(T[np - (np > 0)] < T[np]) << (SAINT_BIT - 1)); cache[ni].symbol = T[np]; } } } } } static void libsais16_final_bwt_scan_left_to_right_16u_block_omp(const uint16_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t * RESTRICT induction_bucket, fast_sint_t block_start, fast_sint_t block_size, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state) { #if defined(_OPENMP) #pragma omp parallel num_threads(threads) if(threads > 1 && block_size >= 64 * ALPHABET_SIZE && omp_get_dynamic() == 0) #endif { #if defined(_OPENMP) fast_sint_t omp_thread_num = omp_get_thread_num(); fast_sint_t omp_num_threads = omp_get_num_threads(); #else UNUSED(threads); UNUSED(thread_state); fast_sint_t omp_thread_num = 0; fast_sint_t omp_num_threads = 1; #endif fast_sint_t omp_block_stride = (block_size / omp_num_threads) & (-16); fast_sint_t omp_block_start = omp_thread_num * omp_block_stride; fast_sint_t omp_block_size = omp_thread_num < omp_num_threads - 1 ? omp_block_stride : block_size - omp_block_start; omp_block_start += block_start; if (omp_num_threads == 1) { libsais16_final_bwt_scan_left_to_right_16u(T, SA, induction_bucket, omp_block_start, omp_block_size); } #if defined(_OPENMP) else { { thread_state[omp_thread_num].state.count = libsais16_final_bwt_scan_left_to_right_16u_block_prepare(T, SA, thread_state[omp_thread_num].state.buckets, thread_state[omp_thread_num].state.cache, omp_block_start, omp_block_size); } #pragma omp barrier #pragma omp master { fast_sint_t t; for (t = 0; t < omp_num_threads; ++t) { sa_sint_t * RESTRICT temp_bucket = thread_state[t].state.buckets; fast_sint_t c; for (c = 0; c < ALPHABET_SIZE; c += 1) { sa_sint_t A = induction_bucket[c], B = temp_bucket[c]; induction_bucket[c] = A + B; temp_bucket[c] = A; } } } #pragma omp barrier { libsais16_final_order_scan_left_to_right_16u_block_place(SA, thread_state[omp_thread_num].state.buckets, thread_state[omp_thread_num].state.cache, thread_state[omp_thread_num].state.count); } } #endif } } static void libsais16_final_bwt_aux_scan_left_to_right_16u_block_omp(const uint16_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t rm, sa_sint_t * RESTRICT I, sa_sint_t * RESTRICT induction_bucket, fast_sint_t block_start, fast_sint_t block_size, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state) { #if defined(_OPENMP) #pragma omp parallel num_threads(threads) if(threads > 1 && block_size >= 64 * ALPHABET_SIZE && omp_get_dynamic() == 0) #endif { #if defined(_OPENMP) fast_sint_t omp_thread_num = omp_get_thread_num(); fast_sint_t omp_num_threads = omp_get_num_threads(); #else UNUSED(threads); UNUSED(thread_state); fast_sint_t omp_thread_num = 0; fast_sint_t omp_num_threads = 1; #endif fast_sint_t omp_block_stride = (block_size / omp_num_threads) & (-16); fast_sint_t omp_block_start = omp_thread_num * omp_block_stride; fast_sint_t omp_block_size = omp_thread_num < omp_num_threads - 1 ? omp_block_stride : block_size - omp_block_start; omp_block_start += block_start; if (omp_num_threads == 1) { libsais16_final_bwt_aux_scan_left_to_right_16u(T, SA, rm, I, induction_bucket, omp_block_start, omp_block_size); } #if defined(_OPENMP) else { { thread_state[omp_thread_num].state.count = libsais16_final_bwt_scan_left_to_right_16u_block_prepare(T, SA, thread_state[omp_thread_num].state.buckets, thread_state[omp_thread_num].state.cache, omp_block_start, omp_block_size); } #pragma omp barrier #pragma omp master { fast_sint_t t; for (t = 0; t < omp_num_threads; ++t) { sa_sint_t * RESTRICT temp_bucket = thread_state[t].state.buckets; fast_sint_t c; for (c = 0; c < ALPHABET_SIZE; c += 1) { sa_sint_t A = induction_bucket[c], B = temp_bucket[c]; induction_bucket[c] = A + B; temp_bucket[c] = A; } } } #pragma omp barrier { libsais16_final_bwt_aux_scan_left_to_right_16u_block_place(SA, rm, I, thread_state[omp_thread_num].state.buckets, thread_state[omp_thread_num].state.cache, thread_state[omp_thread_num].state.count); } } #endif } } static void libsais16_final_sorting_scan_left_to_right_16u_block_omp(const uint16_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t * RESTRICT induction_bucket, fast_sint_t block_start, fast_sint_t block_size, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state) { #if defined(_OPENMP) #pragma omp parallel num_threads(threads) if(threads > 1 && block_size >= 64 * ALPHABET_SIZE && omp_get_dynamic() == 0) #endif { #if defined(_OPENMP) fast_sint_t omp_thread_num = omp_get_thread_num(); fast_sint_t omp_num_threads = omp_get_num_threads(); #else UNUSED(threads); UNUSED(thread_state); fast_sint_t omp_thread_num = 0; fast_sint_t omp_num_threads = 1; #endif fast_sint_t omp_block_stride = (block_size / omp_num_threads) & (-16); fast_sint_t omp_block_start = omp_thread_num * omp_block_stride; fast_sint_t omp_block_size = omp_thread_num < omp_num_threads - 1 ? omp_block_stride : block_size - omp_block_start; omp_block_start += block_start; if (omp_num_threads == 1) { libsais16_final_sorting_scan_left_to_right_16u(T, SA, induction_bucket, omp_block_start, omp_block_size); } #if defined(_OPENMP) else { { thread_state[omp_thread_num].state.count = libsais16_final_sorting_scan_left_to_right_16u_block_prepare(T, SA, thread_state[omp_thread_num].state.buckets, thread_state[omp_thread_num].state.cache, omp_block_start, omp_block_size); } #pragma omp barrier #pragma omp master { fast_sint_t t; for (t = 0; t < omp_num_threads; ++t) { sa_sint_t * RESTRICT temp_bucket = thread_state[t].state.buckets; fast_sint_t c; for (c = 0; c < ALPHABET_SIZE; c += 1) { sa_sint_t A = induction_bucket[c], B = temp_bucket[c]; induction_bucket[c] = A + B; temp_bucket[c] = A; } } } #pragma omp barrier { libsais16_final_order_scan_left_to_right_16u_block_place(SA, thread_state[omp_thread_num].state.buckets, thread_state[omp_thread_num].state.cache, thread_state[omp_thread_num].state.count); } } #endif } } static void libsais16_final_sorting_scan_left_to_right_32s_block_omp(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t * RESTRICT buckets, LIBSAIS_THREAD_CACHE * RESTRICT cache, fast_sint_t block_start, fast_sint_t block_size, sa_sint_t threads) { #if defined(_OPENMP) #pragma omp parallel num_threads(threads) if(threads > 1 && block_size >= 16384) #endif { #if defined(_OPENMP) fast_sint_t omp_thread_num = omp_get_thread_num(); fast_sint_t omp_num_threads = omp_get_num_threads(); #else UNUSED(threads); UNUSED(cache); fast_sint_t omp_thread_num = 0; fast_sint_t omp_num_threads = 1; #endif fast_sint_t omp_block_stride = (block_size / omp_num_threads) & (-16); fast_sint_t omp_block_start = omp_thread_num * omp_block_stride; fast_sint_t omp_block_size = omp_thread_num < omp_num_threads - 1 ? omp_block_stride : block_size - omp_block_start; omp_block_start += block_start; if (omp_num_threads == 1) { libsais16_final_sorting_scan_left_to_right_32s(T, SA, buckets, omp_block_start, omp_block_size); } #if defined(_OPENMP) else { { libsais16_final_sorting_scan_left_to_right_32s_block_gather(T, SA, cache - block_start, omp_block_start, omp_block_size); } #pragma omp barrier #pragma omp master { libsais16_final_sorting_scan_left_to_right_32s_block_sort(T, buckets, cache - block_start, block_start, block_size); } #pragma omp barrier { libsais16_compact_and_place_cached_suffixes(SA, cache - block_start, omp_block_start, omp_block_size); } } #endif } } #endif static void libsais16_final_bwt_scan_left_to_right_16u_omp(const uint16_t * RESTRICT T, sa_sint_t * RESTRICT SA, fast_sint_t n, sa_sint_t * RESTRICT induction_bucket, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state) { SA[induction_bucket[T[(sa_sint_t)n - 1]]++] = ((sa_sint_t)n - 1) | ((sa_sint_t)(T[(sa_sint_t)n - 2] < T[(sa_sint_t)n - 1]) << (SAINT_BIT - 1)); if (threads == 1 || n < 65536) { libsais16_final_bwt_scan_left_to_right_16u(T, SA, induction_bucket, 0, n); } #if defined(_OPENMP) else { fast_sint_t block_start; for (block_start = 0; block_start < n; ) { if (SA[block_start] == 0) { block_start++; } else { fast_sint_t block_max_end = block_start + ((fast_sint_t)threads) * (LIBSAIS_PER_THREAD_CACHE_SIZE - 16 * (fast_sint_t)threads); if (block_max_end > n) { block_max_end = n;} fast_sint_t block_end = block_start + 1; while (block_end < block_max_end && SA[block_end] != 0) { block_end++; } fast_sint_t block_size = block_end - block_start; if (block_size < 32) { for (; block_start < block_end; block_start += 1) { sa_sint_t p = SA[block_start]; SA[block_start] = p & SAINT_MAX; if (p > 0) { p--; SA[block_start] = T[p] | SAINT_MIN; SA[induction_bucket[T[p]]++] = p | ((sa_sint_t)(T[p - (p > 0)] < T[p]) << (SAINT_BIT - 1)); } } } else { libsais16_final_bwt_scan_left_to_right_16u_block_omp(T, SA, induction_bucket, block_start, block_size, threads, thread_state); block_start = block_end; } } } } #else UNUSED(thread_state); #endif } static void libsais16_final_bwt_aux_scan_left_to_right_16u_omp(const uint16_t * RESTRICT T, sa_sint_t * RESTRICT SA, fast_sint_t n, sa_sint_t rm, sa_sint_t * RESTRICT I, sa_sint_t * RESTRICT induction_bucket, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state) { SA[induction_bucket[T[(sa_sint_t)n - 1]]++] = ((sa_sint_t)n - 1) | ((sa_sint_t)(T[(sa_sint_t)n - 2] < T[(sa_sint_t)n - 1]) << (SAINT_BIT - 1)); if ((((sa_sint_t)n - 1) & rm) == 0) { I[((sa_sint_t)n - 1) / (rm + 1)] = induction_bucket[T[(sa_sint_t)n - 1]]; } if (threads == 1 || n < 65536) { libsais16_final_bwt_aux_scan_left_to_right_16u(T, SA, rm, I, induction_bucket, 0, n); } #if defined(_OPENMP) else { fast_sint_t block_start; for (block_start = 0; block_start < n; ) { if (SA[block_start] == 0) { block_start++; } else { fast_sint_t block_max_end = block_start + ((fast_sint_t)threads) * (LIBSAIS_PER_THREAD_CACHE_SIZE - 16 * (fast_sint_t)threads); if (block_max_end > n) { block_max_end = n;} fast_sint_t block_end = block_start + 1; while (block_end < block_max_end && SA[block_end] != 0) { block_end++; } fast_sint_t block_size = block_end - block_start; if (block_size < 32) { for (; block_start < block_end; block_start += 1) { sa_sint_t p = SA[block_start]; SA[block_start] = p & SAINT_MAX; if (p > 0) { p--; SA[block_start] = T[p] | SAINT_MIN; SA[induction_bucket[T[p]]++] = p | ((sa_sint_t)(T[p - (p > 0)] < T[p]) << (SAINT_BIT - 1)); if ((p & rm) == 0) { I[p / (rm + 1)] = induction_bucket[T[p]]; } } } } else { libsais16_final_bwt_aux_scan_left_to_right_16u_block_omp(T, SA, rm, I, induction_bucket, block_start, block_size, threads, thread_state); block_start = block_end; } } } } #else UNUSED(thread_state); #endif } static void libsais16_final_sorting_scan_left_to_right_16u_omp(const uint16_t * RESTRICT T, sa_sint_t * RESTRICT SA, fast_sint_t n, sa_sint_t * RESTRICT induction_bucket, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state) { SA[induction_bucket[T[(sa_sint_t)n - 1]]++] = ((sa_sint_t)n - 1) | ((sa_sint_t)(T[(sa_sint_t)n - 2] < T[(sa_sint_t)n - 1]) << (SAINT_BIT - 1)); if (threads == 1 || n < 65536) { libsais16_final_sorting_scan_left_to_right_16u(T, SA, induction_bucket, 0, n); } #if defined(_OPENMP) else { fast_sint_t block_start; for (block_start = 0; block_start < n; ) { if (SA[block_start] == 0) { block_start++; } else { fast_sint_t block_max_end = block_start + ((fast_sint_t)threads) * (LIBSAIS_PER_THREAD_CACHE_SIZE - 16 * (fast_sint_t)threads); if (block_max_end > n) { block_max_end = n;} fast_sint_t block_end = block_start + 1; while (block_end < block_max_end && SA[block_end] != 0) { block_end++; } fast_sint_t block_size = block_end - block_start; if (block_size < 32) { for (; block_start < block_end; block_start += 1) { sa_sint_t p = SA[block_start]; SA[block_start] = p ^ SAINT_MIN; if (p > 0) { p--; SA[induction_bucket[T[p]]++] = p | ((sa_sint_t)(T[p - (p > 0)] < T[p]) << (SAINT_BIT - 1)); } } } else { libsais16_final_sorting_scan_left_to_right_16u_block_omp(T, SA, induction_bucket, block_start, block_size, threads, thread_state); block_start = block_end; } } } } #else UNUSED(thread_state); #endif } static void libsais16_final_sorting_scan_left_to_right_32s_omp(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t * RESTRICT induction_bucket, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state) { SA[induction_bucket[T[n - 1]]++] = (n - 1) | ((sa_sint_t)(T[n - 2] < T[n - 1]) << (SAINT_BIT - 1)); if (threads == 1 || n < 65536) { libsais16_final_sorting_scan_left_to_right_32s(T, SA, induction_bucket, 0, n); } #if defined(_OPENMP) else { fast_sint_t block_start, block_end; for (block_start = 0; block_start < n; block_start = block_end) { block_end = block_start + (fast_sint_t)threads * LIBSAIS_PER_THREAD_CACHE_SIZE; if (block_end > n) { block_end = n; } libsais16_final_sorting_scan_left_to_right_32s_block_omp(T, SA, induction_bucket, thread_state[0].state.cache, block_start, block_end - block_start, threads); } } #else UNUSED(thread_state); #endif } static sa_sint_t libsais16_final_bwt_scan_right_to_left_16u(const uint16_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t * RESTRICT induction_bucket, fast_sint_t omp_block_start, fast_sint_t omp_block_size) { const fast_sint_t prefetch_distance = 32; fast_sint_t i, j; sa_sint_t index = -1; for (i = omp_block_start + omp_block_size - 1, j = omp_block_start + prefetch_distance + 1; i >= j; i -= 2) { libsais16_prefetchw(&SA[i - 2 * prefetch_distance]); sa_sint_t s0 = SA[i - prefetch_distance - 0]; const uint16_t * Ts0 = &T[s0] - 1; libsais16_prefetch(s0 > 0 ? Ts0 : NULL); Ts0--; libsais16_prefetch(s0 > 0 ? Ts0 : NULL); sa_sint_t s1 = SA[i - prefetch_distance - 1]; const uint16_t * Ts1 = &T[s1] - 1; libsais16_prefetch(s1 > 0 ? Ts1 : NULL); Ts1--; libsais16_prefetch(s1 > 0 ? Ts1 : NULL); sa_sint_t p0 = SA[i - 0]; index = (p0 == 0) ? (sa_sint_t)(i - 0) : index; SA[i - 0] = p0 & SAINT_MAX; if (p0 > 0) { p0--; uint16_t c0 = T[p0 - (p0 > 0)], c1 = T[p0]; SA[i - 0] = c1; sa_sint_t t = c0 | SAINT_MIN; SA[--induction_bucket[c1]] = (c0 <= c1) ? p0 : t; } sa_sint_t p1 = SA[i - 1]; index = (p1 == 0) ? (sa_sint_t)(i - 1) : index; SA[i - 1] = p1 & SAINT_MAX; if (p1 > 0) { p1--; uint16_t c0 = T[p1 - (p1 > 0)], c1 = T[p1]; SA[i - 1] = c1; sa_sint_t t = c0 | SAINT_MIN; SA[--induction_bucket[c1]] = (c0 <= c1) ? p1 : t; } } for (j -= prefetch_distance + 1; i >= j; i -= 1) { sa_sint_t p = SA[i]; index = (p == 0) ? (sa_sint_t)i : index; SA[i] = p & SAINT_MAX; if (p > 0) { p--; uint16_t c0 = T[p - (p > 0)], c1 = T[p]; SA[i] = c1; sa_sint_t t = c0 | SAINT_MIN; SA[--induction_bucket[c1]] = (c0 <= c1) ? p : t; } } return index; } static void libsais16_final_bwt_aux_scan_right_to_left_16u(const uint16_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t rm, sa_sint_t * RESTRICT I, sa_sint_t * RESTRICT induction_bucket, fast_sint_t omp_block_start, fast_sint_t omp_block_size) { const fast_sint_t prefetch_distance = 32; fast_sint_t i, j; for (i = omp_block_start + omp_block_size - 1, j = omp_block_start + prefetch_distance + 1; i >= j; i -= 2) { libsais16_prefetchw(&SA[i - 2 * prefetch_distance]); sa_sint_t s0 = SA[i - prefetch_distance - 0]; const uint16_t * Ts0 = &T[s0] - 1; libsais16_prefetch(s0 > 0 ? Ts0 : NULL); Ts0--; libsais16_prefetch(s0 > 0 ? Ts0 : NULL); sa_sint_t s1 = SA[i - prefetch_distance - 1]; const uint16_t * Ts1 = &T[s1] - 1; libsais16_prefetch(s1 > 0 ? Ts1 : NULL); Ts1--; libsais16_prefetch(s1 > 0 ? Ts1 : NULL); sa_sint_t p0 = SA[i - 0]; SA[i - 0] = p0 & SAINT_MAX; if (p0 > 0) { p0--; uint16_t c0 = T[p0 - (p0 > 0)], c1 = T[p0]; SA[i - 0] = c1; sa_sint_t t = c0 | SAINT_MIN; SA[--induction_bucket[c1]] = (c0 <= c1) ? p0 : t; if ((p0 & rm) == 0) { I[p0 / (rm + 1)] = induction_bucket[T[p0]] + 1; } } sa_sint_t p1 = SA[i - 1]; SA[i - 1] = p1 & SAINT_MAX; if (p1 > 0) { p1--; uint16_t c0 = T[p1 - (p1 > 0)], c1 = T[p1]; SA[i - 1] = c1; sa_sint_t t = c0 | SAINT_MIN; SA[--induction_bucket[c1]] = (c0 <= c1) ? p1 : t; if ((p1 & rm) == 0) { I[p1 / (rm + 1)] = induction_bucket[T[p1]] + 1; } } } for (j -= prefetch_distance + 1; i >= j; i -= 1) { sa_sint_t p = SA[i]; SA[i] = p & SAINT_MAX; if (p > 0) { p--; uint16_t c0 = T[p - (p > 0)], c1 = T[p]; SA[i] = c1; sa_sint_t t = c0 | SAINT_MIN; SA[--induction_bucket[c1]] = (c0 <= c1) ? p : t; if ((p & rm) == 0) { I[p / (rm + 1)] = induction_bucket[T[p]] + 1; } } } } static void libsais16_final_sorting_scan_right_to_left_16u(const uint16_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t * RESTRICT induction_bucket, fast_sint_t omp_block_start, fast_sint_t omp_block_size) { const fast_sint_t prefetch_distance = 32; fast_sint_t i, j; for (i = omp_block_start + omp_block_size - 1, j = omp_block_start + prefetch_distance + 1; i >= j; i -= 2) { libsais16_prefetchw(&SA[i - 2 * prefetch_distance]); sa_sint_t s0 = SA[i - prefetch_distance - 0]; const uint16_t * Ts0 = &T[s0] - 1; libsais16_prefetch(s0 > 0 ? Ts0 : NULL); Ts0--; libsais16_prefetch(s0 > 0 ? Ts0 : NULL); sa_sint_t s1 = SA[i - prefetch_distance - 1]; const uint16_t * Ts1 = &T[s1] - 1; libsais16_prefetch(s1 > 0 ? Ts1 : NULL); Ts1--; libsais16_prefetch(s1 > 0 ? Ts1 : NULL); sa_sint_t p0 = SA[i - 0]; SA[i - 0] = p0 & SAINT_MAX; if (p0 > 0) { p0--; SA[--induction_bucket[T[p0]]] = p0 | ((sa_sint_t)(T[p0 - (p0 > 0)] > T[p0]) << (SAINT_BIT - 1)); } sa_sint_t p1 = SA[i - 1]; SA[i - 1] = p1 & SAINT_MAX; if (p1 > 0) { p1--; SA[--induction_bucket[T[p1]]] = p1 | ((sa_sint_t)(T[p1 - (p1 > 0)] > T[p1]) << (SAINT_BIT - 1)); } } for (j -= prefetch_distance + 1; i >= j; i -= 1) { sa_sint_t p = SA[i]; SA[i] = p & SAINT_MAX; if (p > 0) { p--; SA[--induction_bucket[T[p]]] = p | ((sa_sint_t)(T[p - (p > 0)] > T[p]) << (SAINT_BIT - 1)); } } } static void libsais16_final_sorting_scan_right_to_left_32s(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t * RESTRICT induction_bucket, fast_sint_t omp_block_start, fast_sint_t omp_block_size) { const fast_sint_t prefetch_distance = 32; fast_sint_t i, j; for (i = omp_block_start + omp_block_size - 1, j = omp_block_start + 2 * prefetch_distance + 1; i >= j; i -= 2) { libsais16_prefetchw(&SA[i - 3 * prefetch_distance]); sa_sint_t s0 = SA[i - 2 * prefetch_distance - 0]; const sa_sint_t * Ts0 = &T[s0] - 1; libsais16_prefetch(s0 > 0 ? Ts0 : NULL); sa_sint_t s1 = SA[i - 2 * prefetch_distance - 1]; const sa_sint_t * Ts1 = &T[s1] - 1; libsais16_prefetch(s1 > 0 ? Ts1 : NULL); sa_sint_t s2 = SA[i - 1 * prefetch_distance - 0]; if (s2 > 0) { libsais16_prefetchw(&induction_bucket[T[s2 - 1]]); libsais16_prefetch(&T[s2] - 2); } sa_sint_t s3 = SA[i - 1 * prefetch_distance - 1]; if (s3 > 0) { libsais16_prefetchw(&induction_bucket[T[s3 - 1]]); libsais16_prefetch(&T[s3] - 2); } sa_sint_t p0 = SA[i - 0]; SA[i - 0] = p0 & SAINT_MAX; if (p0 > 0) { p0--; SA[--induction_bucket[T[p0]]] = p0 | ((sa_sint_t)(T[p0 - (p0 > 0)] > T[p0]) << (SAINT_BIT - 1)); } sa_sint_t p1 = SA[i - 1]; SA[i - 1] = p1 & SAINT_MAX; if (p1 > 0) { p1--; SA[--induction_bucket[T[p1]]] = p1 | ((sa_sint_t)(T[p1 - (p1 > 0)] > T[p1]) << (SAINT_BIT - 1)); } } for (j -= 2 * prefetch_distance + 1; i >= j; i -= 1) { sa_sint_t p = SA[i]; SA[i] = p & SAINT_MAX; if (p > 0) { p--; SA[--induction_bucket[T[p]]] = p | ((sa_sint_t)(T[p - (p > 0)] > T[p]) << (SAINT_BIT - 1)); } } } #if defined(_OPENMP) static fast_sint_t libsais16_final_bwt_scan_right_to_left_16u_block_prepare(const uint16_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t * RESTRICT buckets, LIBSAIS_THREAD_CACHE * RESTRICT cache, fast_sint_t omp_block_start, fast_sint_t omp_block_size) { const fast_sint_t prefetch_distance = 32; memset(buckets, 0, ALPHABET_SIZE * sizeof(sa_sint_t)); fast_sint_t i, j, count = 0; for (i = omp_block_start + omp_block_size - 1, j = omp_block_start + prefetch_distance + 1; i >= j; i -= 2) { libsais16_prefetchw(&SA[i - 2 * prefetch_distance]); sa_sint_t s0 = SA[i - prefetch_distance - 0]; const uint16_t * Ts0 = &T[s0] - 1; libsais16_prefetch(s0 > 0 ? Ts0 : NULL); Ts0--; libsais16_prefetch(s0 > 0 ? Ts0 : NULL); sa_sint_t s1 = SA[i - prefetch_distance - 1]; const uint16_t * Ts1 = &T[s1] - 1; libsais16_prefetch(s1 > 0 ? Ts1 : NULL); Ts1--; libsais16_prefetch(s1 > 0 ? Ts1 : NULL); sa_sint_t p0 = SA[i - 0]; SA[i - 0] = p0 & SAINT_MAX; if (p0 > 0) { p0--; uint16_t c0 = T[p0 - (p0 > 0)], c1 = T[p0]; SA[i - 0] = c1; sa_sint_t t = c0 | SAINT_MIN; buckets[cache[count].symbol = c1]++; cache[count++].index = (c0 <= c1) ? p0 : t; } sa_sint_t p1 = SA[i - 1]; SA[i - 1] = p1 & SAINT_MAX; if (p1 > 0) { p1--; uint16_t c0 = T[p1 - (p1 > 0)], c1 = T[p1]; SA[i - 1] = c1; sa_sint_t t = c0 | SAINT_MIN; buckets[cache[count].symbol = c1]++; cache[count++].index = (c0 <= c1) ? p1 : t; } } for (j -= prefetch_distance + 1; i >= j; i -= 1) { sa_sint_t p = SA[i]; SA[i] = p & SAINT_MAX; if (p > 0) { p--; uint16_t c0 = T[p - (p > 0)], c1 = T[p]; SA[i] = c1; sa_sint_t t = c0 | SAINT_MIN; buckets[cache[count].symbol = c1]++; cache[count++].index = (c0 <= c1) ? p : t; } } return count; } static fast_sint_t libsais16_final_bwt_aux_scan_right_to_left_16u_block_prepare(const uint16_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t * RESTRICT buckets, LIBSAIS_THREAD_CACHE * RESTRICT cache, fast_sint_t omp_block_start, fast_sint_t omp_block_size) { const fast_sint_t prefetch_distance = 32; memset(buckets, 0, ALPHABET_SIZE * sizeof(sa_sint_t)); fast_sint_t i, j, count = 0; for (i = omp_block_start + omp_block_size - 1, j = omp_block_start + prefetch_distance + 1; i >= j; i -= 2) { libsais16_prefetchw(&SA[i - 2 * prefetch_distance]); sa_sint_t s0 = SA[i - prefetch_distance - 0]; const uint16_t * Ts0 = &T[s0] - 1; libsais16_prefetch(s0 > 0 ? Ts0 : NULL); Ts0--; libsais16_prefetch(s0 > 0 ? Ts0 : NULL); sa_sint_t s1 = SA[i - prefetch_distance - 1]; const uint16_t * Ts1 = &T[s1] - 1; libsais16_prefetch(s1 > 0 ? Ts1 : NULL); Ts1--; libsais16_prefetch(s1 > 0 ? Ts1 : NULL); sa_sint_t p0 = SA[i - 0]; SA[i - 0] = p0 & SAINT_MAX; if (p0 > 0) { p0--; uint16_t c0 = T[p0 - (p0 > 0)], c1 = T[p0]; SA[i - 0] = c1; sa_sint_t t = c0 | SAINT_MIN; buckets[cache[count].symbol = c1]++; cache[count].index = (c0 <= c1) ? p0 : t; cache[count + 1].index = p0; count += 2; } sa_sint_t p1 = SA[i - 1]; SA[i - 1] = p1 & SAINT_MAX; if (p1 > 0) { p1--; uint16_t c0 = T[p1 - (p1 > 0)], c1 = T[p1]; SA[i - 1] = c1; sa_sint_t t = c0 | SAINT_MIN; buckets[cache[count].symbol = c1]++; cache[count].index = (c0 <= c1) ? p1 : t; cache[count + 1].index = p1; count += 2; } } for (j -= prefetch_distance + 1; i >= j; i -= 1) { sa_sint_t p = SA[i]; SA[i] = p & SAINT_MAX; if (p > 0) { p--; uint16_t c0 = T[p - (p > 0)], c1 = T[p]; SA[i] = c1; sa_sint_t t = c0 | SAINT_MIN; buckets[cache[count].symbol = c1]++; cache[count].index = (c0 <= c1) ? p : t; cache[count + 1].index = p; count += 2; } } return count; } static fast_sint_t libsais16_final_sorting_scan_right_to_left_16u_block_prepare(const uint16_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t * RESTRICT buckets, LIBSAIS_THREAD_CACHE * RESTRICT cache, fast_sint_t omp_block_start, fast_sint_t omp_block_size) { const fast_sint_t prefetch_distance = 32; memset(buckets, 0, ALPHABET_SIZE * sizeof(sa_sint_t)); fast_sint_t i, j, count = 0; for (i = omp_block_start + omp_block_size - 1, j = omp_block_start + prefetch_distance + 1; i >= j; i -= 2) { libsais16_prefetchw(&SA[i - 2 * prefetch_distance]); sa_sint_t s0 = SA[i - prefetch_distance - 0]; const uint16_t * Ts0 = &T[s0] - 1; libsais16_prefetch(s0 > 0 ? Ts0 : NULL); Ts0--; libsais16_prefetch(s0 > 0 ? Ts0 : NULL); sa_sint_t s1 = SA[i - prefetch_distance - 1]; const uint16_t * Ts1 = &T[s1] - 1; libsais16_prefetch(s1 > 0 ? Ts1 : NULL); Ts1--; libsais16_prefetch(s1 > 0 ? Ts1 : NULL); sa_sint_t p0 = SA[i - 0]; SA[i - 0] = p0 & SAINT_MAX; if (p0 > 0) { p0--; buckets[cache[count].symbol = T[p0]]++; cache[count++].index = p0 | ((sa_sint_t)(T[p0 - (p0 > 0)] > T[p0]) << (SAINT_BIT - 1)); } sa_sint_t p1 = SA[i - 1]; SA[i - 1] = p1 & SAINT_MAX; if (p1 > 0) { p1--; buckets[cache[count].symbol = T[p1]]++; cache[count++].index = p1 | ((sa_sint_t)(T[p1 - (p1 > 0)] > T[p1]) << (SAINT_BIT - 1)); } } for (j -= prefetch_distance + 1; i >= j; i -= 1) { sa_sint_t p = SA[i]; SA[i] = p & SAINT_MAX; if (p > 0) { p--; buckets[cache[count].symbol = T[p]]++; cache[count++].index = p | ((sa_sint_t)(T[p - (p > 0)] > T[p]) << (SAINT_BIT - 1)); } } return count; } static void libsais16_final_order_scan_right_to_left_16u_block_place(sa_sint_t * RESTRICT SA, sa_sint_t * RESTRICT buckets, LIBSAIS_THREAD_CACHE * RESTRICT cache, fast_sint_t count) { const fast_sint_t prefetch_distance = 32; fast_sint_t i, j; for (i = 0, j = count - 3; i < j; i += 4) { libsais16_prefetch(&cache[i + prefetch_distance]); SA[--buckets[cache[i + 0].symbol]] = cache[i + 0].index; SA[--buckets[cache[i + 1].symbol]] = cache[i + 1].index; SA[--buckets[cache[i + 2].symbol]] = cache[i + 2].index; SA[--buckets[cache[i + 3].symbol]] = cache[i + 3].index; } for (j += 3; i < j; i += 1) { SA[--buckets[cache[i].symbol]] = cache[i].index; } } static void libsais16_final_bwt_aux_scan_right_to_left_16u_block_place(sa_sint_t * RESTRICT SA, sa_sint_t rm, sa_sint_t * RESTRICT I, sa_sint_t * RESTRICT buckets, LIBSAIS_THREAD_CACHE * RESTRICT cache, fast_sint_t count) { const fast_sint_t prefetch_distance = 32; fast_sint_t i, j; for (i = 0, j = count - 6; i < j; i += 8) { libsais16_prefetch(&cache[i + prefetch_distance]); SA[--buckets[cache[i + 0].symbol]] = cache[i + 0].index; if ((cache[i + 1].index & rm) == 0) { I[cache[i + 1].index / (rm + 1)] = buckets[cache[i + 0].symbol] + 1; } SA[--buckets[cache[i + 2].symbol]] = cache[i + 2].index; if ((cache[i + 3].index & rm) == 0) { I[cache[i + 3].index / (rm + 1)] = buckets[cache[i + 2].symbol] + 1; } SA[--buckets[cache[i + 4].symbol]] = cache[i + 4].index; if ((cache[i + 5].index & rm) == 0) { I[cache[i + 5].index / (rm + 1)] = buckets[cache[i + 4].symbol] + 1; } SA[--buckets[cache[i + 6].symbol]] = cache[i + 6].index; if ((cache[i + 7].index & rm) == 0) { I[cache[i + 7].index / (rm + 1)] = buckets[cache[i + 6].symbol] + 1; } } for (j += 6; i < j; i += 2) { SA[--buckets[cache[i].symbol]] = cache[i].index; if ((cache[i + 1].index & rm) == 0) { I[(cache[i + 1].index & SAINT_MAX) / (rm + 1)] = buckets[cache[i].symbol] + 1; } } } static void libsais16_final_sorting_scan_right_to_left_32s_block_gather(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, LIBSAIS_THREAD_CACHE * RESTRICT cache, fast_sint_t omp_block_start, fast_sint_t omp_block_size) { const fast_sint_t prefetch_distance = 32; fast_sint_t i, j; for (i = omp_block_start, j = omp_block_start + omp_block_size - prefetch_distance - 1; i < j; i += 2) { libsais16_prefetchw(&SA[i + 2 * prefetch_distance]); sa_sint_t s0 = SA[i + prefetch_distance + 0]; const sa_sint_t * Ts0 = &T[s0] - 1; libsais16_prefetch(s0 > 0 ? Ts0 : NULL); Ts0--; libsais16_prefetch(s0 > 0 ? Ts0 : NULL); sa_sint_t s1 = SA[i + prefetch_distance + 1]; const sa_sint_t * Ts1 = &T[s1] - 1; libsais16_prefetch(s1 > 0 ? Ts1 : NULL); Ts1--; libsais16_prefetch(s1 > 0 ? Ts1 : NULL); libsais16_prefetchw(&cache[i + prefetch_distance]); sa_sint_t symbol0 = SAINT_MIN, p0 = SA[i + 0]; SA[i + 0] = p0 & SAINT_MAX; if (p0 > 0) { p0--; cache[i + 0].index = p0 | ((sa_sint_t)(T[p0 - (p0 > 0)] > T[p0]) << (SAINT_BIT - 1)); symbol0 = T[p0]; } cache[i + 0].symbol = symbol0; sa_sint_t symbol1 = SAINT_MIN, p1 = SA[i + 1]; SA[i + 1] = p1 & SAINT_MAX; if (p1 > 0) { p1--; cache[i + 1].index = p1 | ((sa_sint_t)(T[p1 - (p1 > 0)] > T[p1]) << (SAINT_BIT - 1)); symbol1 = T[p1]; } cache[i + 1].symbol = symbol1; } for (j += prefetch_distance + 1; i < j; i += 1) { sa_sint_t symbol = SAINT_MIN, p = SA[i]; SA[i] = p & SAINT_MAX; if (p > 0) { p--; cache[i].index = p | ((sa_sint_t)(T[p - (p > 0)] > T[p]) << (SAINT_BIT - 1)); symbol = T[p]; } cache[i].symbol = symbol; } } static void libsais16_final_sorting_scan_right_to_left_32s_block_sort(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT induction_bucket, LIBSAIS_THREAD_CACHE * RESTRICT cache, fast_sint_t omp_block_start, fast_sint_t omp_block_size) { const fast_sint_t prefetch_distance = 32; fast_sint_t i, j; for (i = omp_block_start + omp_block_size - 1, j = omp_block_start + prefetch_distance + 1; i >= j; i -= 2) { libsais16_prefetchw(&cache[i - 2 * prefetch_distance]); sa_sint_t s0 = cache[i - prefetch_distance - 0].symbol; const sa_sint_t * Is0 = &induction_bucket[s0]; libsais16_prefetchw(s0 >= 0 ? Is0 : NULL); sa_sint_t s1 = cache[i - prefetch_distance - 1].symbol; const sa_sint_t * Is1 = &induction_bucket[s1]; libsais16_prefetchw(s1 >= 0 ? Is1 : NULL); sa_sint_t v0 = cache[i - 0].symbol; if (v0 >= 0) { cache[i - 0].symbol = --induction_bucket[v0]; if (cache[i - 0].symbol >= omp_block_start) { sa_sint_t ni = cache[i - 0].symbol, np = cache[i - 0].index; cache[i - 0].index = np & SAINT_MAX; if (np > 0) { np--; cache[ni].index = np | ((sa_sint_t)(T[np - (np > 0)] > T[np]) << (SAINT_BIT - 1)); cache[ni].symbol = T[np]; } } } sa_sint_t v1 = cache[i - 1].symbol; if (v1 >= 0) { cache[i - 1].symbol = --induction_bucket[v1]; if (cache[i - 1].symbol >= omp_block_start) { sa_sint_t ni = cache[i - 1].symbol, np = cache[i - 1].index; cache[i - 1].index = np & SAINT_MAX; if (np > 0) { np--; cache[ni].index = np | ((sa_sint_t)(T[np - (np > 0)] > T[np]) << (SAINT_BIT - 1)); cache[ni].symbol = T[np]; } } } } for (j -= prefetch_distance + 1; i >= j; i -= 1) { sa_sint_t v = cache[i].symbol; if (v >= 0) { cache[i].symbol = --induction_bucket[v]; if (cache[i].symbol >= omp_block_start) { sa_sint_t ni = cache[i].symbol, np = cache[i].index; cache[i].index = np & SAINT_MAX; if (np > 0) { np--; cache[ni].index = np | ((sa_sint_t)(T[np - (np > 0)] > T[np]) << (SAINT_BIT - 1)); cache[ni].symbol = T[np]; } } } } } static void libsais16_final_bwt_scan_right_to_left_16u_block_omp(const uint16_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t * RESTRICT induction_bucket, fast_sint_t block_start, fast_sint_t block_size, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state) { #if defined(_OPENMP) #pragma omp parallel num_threads(threads) if(threads > 1 && block_size >= 64 * ALPHABET_SIZE && omp_get_dynamic() == 0) #endif { #if defined(_OPENMP) fast_sint_t omp_thread_num = omp_get_thread_num(); fast_sint_t omp_num_threads = omp_get_num_threads(); #else UNUSED(threads); UNUSED(thread_state); fast_sint_t omp_thread_num = 0; fast_sint_t omp_num_threads = 1; #endif fast_sint_t omp_block_stride = (block_size / omp_num_threads) & (-16); fast_sint_t omp_block_start = omp_thread_num * omp_block_stride; fast_sint_t omp_block_size = omp_thread_num < omp_num_threads - 1 ? omp_block_stride : block_size - omp_block_start; omp_block_start += block_start; if (omp_num_threads == 1) { libsais16_final_bwt_scan_right_to_left_16u(T, SA, induction_bucket, omp_block_start, omp_block_size); } #if defined(_OPENMP) else { { thread_state[omp_thread_num].state.count = libsais16_final_bwt_scan_right_to_left_16u_block_prepare(T, SA, thread_state[omp_thread_num].state.buckets, thread_state[omp_thread_num].state.cache, omp_block_start, omp_block_size); } #pragma omp barrier #pragma omp master { fast_sint_t t; for (t = omp_num_threads - 1; t >= 0; --t) { sa_sint_t * RESTRICT temp_bucket = thread_state[t].state.buckets; fast_sint_t c; for (c = 0; c < ALPHABET_SIZE; c += 1) { sa_sint_t A = induction_bucket[c], B = temp_bucket[c]; induction_bucket[c] = A - B; temp_bucket[c] = A; } } } #pragma omp barrier { libsais16_final_order_scan_right_to_left_16u_block_place(SA, thread_state[omp_thread_num].state.buckets, thread_state[omp_thread_num].state.cache, thread_state[omp_thread_num].state.count); } } #endif } } static void libsais16_final_bwt_aux_scan_right_to_left_16u_block_omp(const uint16_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t rm, sa_sint_t * RESTRICT I, sa_sint_t * RESTRICT induction_bucket, fast_sint_t block_start, fast_sint_t block_size, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state) { #if defined(_OPENMP) #pragma omp parallel num_threads(threads) if(threads > 1 && block_size >= 64 * ALPHABET_SIZE && omp_get_dynamic() == 0) #endif { #if defined(_OPENMP) fast_sint_t omp_thread_num = omp_get_thread_num(); fast_sint_t omp_num_threads = omp_get_num_threads(); #else UNUSED(threads); UNUSED(thread_state); fast_sint_t omp_thread_num = 0; fast_sint_t omp_num_threads = 1; #endif fast_sint_t omp_block_stride = (block_size / omp_num_threads) & (-16); fast_sint_t omp_block_start = omp_thread_num * omp_block_stride; fast_sint_t omp_block_size = omp_thread_num < omp_num_threads - 1 ? omp_block_stride : block_size - omp_block_start; omp_block_start += block_start; if (omp_num_threads == 1) { libsais16_final_bwt_aux_scan_right_to_left_16u(T, SA, rm, I, induction_bucket, omp_block_start, omp_block_size); } #if defined(_OPENMP) else { { thread_state[omp_thread_num].state.count = libsais16_final_bwt_aux_scan_right_to_left_16u_block_prepare(T, SA, thread_state[omp_thread_num].state.buckets, thread_state[omp_thread_num].state.cache, omp_block_start, omp_block_size); } #pragma omp barrier #pragma omp master { fast_sint_t t; for (t = omp_num_threads - 1; t >= 0; --t) { sa_sint_t * RESTRICT temp_bucket = thread_state[t].state.buckets; fast_sint_t c; for (c = 0; c < ALPHABET_SIZE; c += 1) { sa_sint_t A = induction_bucket[c], B = temp_bucket[c]; induction_bucket[c] = A - B; temp_bucket[c] = A; } } } #pragma omp barrier { libsais16_final_bwt_aux_scan_right_to_left_16u_block_place(SA, rm, I, thread_state[omp_thread_num].state.buckets, thread_state[omp_thread_num].state.cache, thread_state[omp_thread_num].state.count); } } #endif } } static void libsais16_final_sorting_scan_right_to_left_16u_block_omp(const uint16_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t * RESTRICT induction_bucket, fast_sint_t block_start, fast_sint_t block_size, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state) { #if defined(_OPENMP) #pragma omp parallel num_threads(threads) if(threads > 1 && block_size >= 64 * ALPHABET_SIZE && omp_get_dynamic() == 0) #endif { #if defined(_OPENMP) fast_sint_t omp_thread_num = omp_get_thread_num(); fast_sint_t omp_num_threads = omp_get_num_threads(); #else UNUSED(threads); UNUSED(thread_state); fast_sint_t omp_thread_num = 0; fast_sint_t omp_num_threads = 1; #endif fast_sint_t omp_block_stride = (block_size / omp_num_threads) & (-16); fast_sint_t omp_block_start = omp_thread_num * omp_block_stride; fast_sint_t omp_block_size = omp_thread_num < omp_num_threads - 1 ? omp_block_stride : block_size - omp_block_start; omp_block_start += block_start; if (omp_num_threads == 1) { libsais16_final_sorting_scan_right_to_left_16u(T, SA, induction_bucket, omp_block_start, omp_block_size); } #if defined(_OPENMP) else { { thread_state[omp_thread_num].state.count = libsais16_final_sorting_scan_right_to_left_16u_block_prepare(T, SA, thread_state[omp_thread_num].state.buckets, thread_state[omp_thread_num].state.cache, omp_block_start, omp_block_size); } #pragma omp barrier #pragma omp master { fast_sint_t t; for (t = omp_num_threads - 1; t >= 0; --t) { sa_sint_t * RESTRICT temp_bucket = thread_state[t].state.buckets; fast_sint_t c; for (c = 0; c < ALPHABET_SIZE; c += 1) { sa_sint_t A = induction_bucket[c], B = temp_bucket[c]; induction_bucket[c] = A - B; temp_bucket[c] = A; } } } #pragma omp barrier { libsais16_final_order_scan_right_to_left_16u_block_place(SA, thread_state[omp_thread_num].state.buckets, thread_state[omp_thread_num].state.cache, thread_state[omp_thread_num].state.count); } } #endif } } static void libsais16_final_sorting_scan_right_to_left_32s_block_omp(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t * RESTRICT buckets, LIBSAIS_THREAD_CACHE * RESTRICT cache, fast_sint_t block_start, fast_sint_t block_size, sa_sint_t threads) { #if defined(_OPENMP) #pragma omp parallel num_threads(threads) if(threads > 1 && block_size >= 16384) #endif { #if defined(_OPENMP) fast_sint_t omp_thread_num = omp_get_thread_num(); fast_sint_t omp_num_threads = omp_get_num_threads(); #else UNUSED(threads); UNUSED(cache); fast_sint_t omp_thread_num = 0; fast_sint_t omp_num_threads = 1; #endif fast_sint_t omp_block_stride = (block_size / omp_num_threads) & (-16); fast_sint_t omp_block_start = omp_thread_num * omp_block_stride; fast_sint_t omp_block_size = omp_thread_num < omp_num_threads - 1 ? omp_block_stride : block_size - omp_block_start; omp_block_start += block_start; if (omp_num_threads == 1) { libsais16_final_sorting_scan_right_to_left_32s(T, SA, buckets, omp_block_start, omp_block_size); } #if defined(_OPENMP) else { { libsais16_final_sorting_scan_right_to_left_32s_block_gather(T, SA, cache - block_start, omp_block_start, omp_block_size); } #pragma omp barrier #pragma omp master { libsais16_final_sorting_scan_right_to_left_32s_block_sort(T, buckets, cache - block_start, block_start, block_size); } #pragma omp barrier { libsais16_compact_and_place_cached_suffixes(SA, cache - block_start, omp_block_start, omp_block_size); } } #endif } } #endif static sa_sint_t libsais16_final_bwt_scan_right_to_left_16u_omp(const uint16_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t * RESTRICT induction_bucket, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state) { sa_sint_t index = -1; if (threads == 1 || n < 65536) { index = libsais16_final_bwt_scan_right_to_left_16u(T, SA, induction_bucket, 0, n); } #if defined(_OPENMP) else { fast_sint_t block_start; for (block_start = (fast_sint_t)n - 1; block_start >= 0; ) { if (SA[block_start] == 0) { index = (sa_sint_t)block_start--; } else { fast_sint_t block_max_end = block_start - ((fast_sint_t)threads) * (LIBSAIS_PER_THREAD_CACHE_SIZE - 16 * (fast_sint_t)threads); if (block_max_end < 0) { block_max_end = -1; } fast_sint_t block_end = block_start - 1; while (block_end > block_max_end && SA[block_end] != 0) { block_end--; } fast_sint_t block_size = block_start - block_end; if (block_size < 32) { for (; block_start > block_end; block_start -= 1) { sa_sint_t p = SA[block_start]; SA[block_start] = p & SAINT_MAX; if (p > 0) { p--; uint16_t c0 = T[p - (p > 0)], c1 = T[p]; SA[block_start] = c1; sa_sint_t t = c0 | SAINT_MIN; SA[--induction_bucket[c1]] = (c0 <= c1) ? p : t; } } } else { libsais16_final_bwt_scan_right_to_left_16u_block_omp(T, SA, induction_bucket, block_end + 1, block_size, threads, thread_state); block_start = block_end; } } } } #else UNUSED(thread_state); #endif return index; } static void libsais16_final_bwt_aux_scan_right_to_left_16u_omp(const uint16_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t rm, sa_sint_t * RESTRICT I, sa_sint_t * RESTRICT induction_bucket, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state) { if (threads == 1 || n < 65536) { libsais16_final_bwt_aux_scan_right_to_left_16u(T, SA, rm, I, induction_bucket, 0, n); } #if defined(_OPENMP) else { fast_sint_t block_start; for (block_start = (fast_sint_t)n - 1; block_start >= 0; ) { if (SA[block_start] == 0) { block_start--; } else { fast_sint_t block_max_end = block_start - ((fast_sint_t)threads) * ((LIBSAIS_PER_THREAD_CACHE_SIZE - 16 * (fast_sint_t)threads) / 2); if (block_max_end < 0) { block_max_end = -1; } fast_sint_t block_end = block_start - 1; while (block_end > block_max_end && SA[block_end] != 0) { block_end--; } fast_sint_t block_size = block_start - block_end; if (block_size < 32) { for (; block_start > block_end; block_start -= 1) { sa_sint_t p = SA[block_start]; SA[block_start] = p & SAINT_MAX; if (p > 0) { p--; uint16_t c0 = T[p - (p > 0)], c1 = T[p]; SA[block_start] = c1; sa_sint_t t = c0 | SAINT_MIN; SA[--induction_bucket[c1]] = (c0 <= c1) ? p : t; if ((p & rm) == 0) { I[p / (rm + 1)] = induction_bucket[T[p]] + 1; } } } } else { libsais16_final_bwt_aux_scan_right_to_left_16u_block_omp(T, SA, rm, I, induction_bucket, block_end + 1, block_size, threads, thread_state); block_start = block_end; } } } } #else UNUSED(thread_state); #endif } static void libsais16_final_sorting_scan_right_to_left_16u_omp(const uint16_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t * RESTRICT induction_bucket, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state) { if (threads == 1 || n < 65536) { libsais16_final_sorting_scan_right_to_left_16u(T, SA, induction_bucket, 0, n); } #if defined(_OPENMP) else { fast_sint_t block_start; for (block_start = (fast_sint_t)n - 1; block_start >= 0; ) { if (SA[block_start] == 0) { block_start--; } else { fast_sint_t block_max_end = block_start - ((fast_sint_t)threads) * (LIBSAIS_PER_THREAD_CACHE_SIZE - 16 * (fast_sint_t)threads); if (block_max_end < -1) { block_max_end = -1; } fast_sint_t block_end = block_start - 1; while (block_end > block_max_end && SA[block_end] != 0) { block_end--; } fast_sint_t block_size = block_start - block_end; if (block_size < 32) { for (; block_start > block_end; block_start -= 1) { sa_sint_t p = SA[block_start]; SA[block_start] = p & SAINT_MAX; if (p > 0) { p--; SA[--induction_bucket[T[p]]] = p | ((sa_sint_t)(T[p - (p > 0)] > T[p]) << (SAINT_BIT - 1)); } } } else { libsais16_final_sorting_scan_right_to_left_16u_block_omp(T, SA, induction_bucket, block_end + 1, block_size, threads, thread_state); block_start = block_end; } } } } #else UNUSED(thread_state); #endif } static void libsais16_final_sorting_scan_right_to_left_32s_omp(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t * RESTRICT induction_bucket, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state) { if (threads == 1 || n < 65536) { libsais16_final_sorting_scan_right_to_left_32s(T, SA, induction_bucket, 0, n); } #if defined(_OPENMP) else { fast_sint_t block_start, block_end; for (block_start = (fast_sint_t)n - 1; block_start >= 0; block_start = block_end) { block_end = block_start - (fast_sint_t)threads * LIBSAIS_PER_THREAD_CACHE_SIZE; if (block_end < 0) { block_end = -1; } libsais16_final_sorting_scan_right_to_left_32s_block_omp(T, SA, induction_bucket, thread_state[0].state.cache, block_end + 1, block_start - block_end, threads); } } #else UNUSED(thread_state); #endif } static void libsais16_clear_lms_suffixes_omp(sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t k, sa_sint_t * RESTRICT bucket_start, sa_sint_t * RESTRICT bucket_end, sa_sint_t threads) { fast_sint_t c; #if defined(_OPENMP) #pragma omp parallel for schedule(static, 1) num_threads(threads) if(threads > 1 && n >= 65536) #else UNUSED(threads); UNUSED(n); #endif for (c = 0; c < k; ++c) { if (bucket_end[c] > bucket_start[c]) { memset(&SA[bucket_start[c]], 0, ((size_t)bucket_end[c] - (size_t)bucket_start[c]) * sizeof(sa_sint_t)); } } } static sa_sint_t libsais16_induce_final_order_16u_omp(const uint16_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t bwt, sa_sint_t r, sa_sint_t * RESTRICT I, sa_sint_t * RESTRICT buckets, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state) { if (!bwt) { libsais16_final_sorting_scan_left_to_right_16u_omp(T, SA, n, &buckets[6 * ALPHABET_SIZE], threads, thread_state); if (threads > 1 && n >= 65536) { libsais16_clear_lms_suffixes_omp(SA, n, ALPHABET_SIZE, &buckets[6 * ALPHABET_SIZE], &buckets[7 * ALPHABET_SIZE], threads); } libsais16_final_sorting_scan_right_to_left_16u_omp(T, SA, n, &buckets[7 * ALPHABET_SIZE], threads, thread_state); return 0; } else if (I != NULL) { libsais16_final_bwt_aux_scan_left_to_right_16u_omp(T, SA, n, r - 1, I, &buckets[6 * ALPHABET_SIZE], threads, thread_state); if (threads > 1 && n >= 65536) { libsais16_clear_lms_suffixes_omp(SA, n, ALPHABET_SIZE, &buckets[6 * ALPHABET_SIZE], &buckets[7 * ALPHABET_SIZE], threads); } libsais16_final_bwt_aux_scan_right_to_left_16u_omp(T, SA, n, r - 1, I, &buckets[7 * ALPHABET_SIZE], threads, thread_state); return 0; } else { libsais16_final_bwt_scan_left_to_right_16u_omp(T, SA, n, &buckets[6 * ALPHABET_SIZE], threads, thread_state); if (threads > 1 && n >= 65536) { libsais16_clear_lms_suffixes_omp(SA, n, ALPHABET_SIZE, &buckets[6 * ALPHABET_SIZE], &buckets[7 * ALPHABET_SIZE], threads); } return libsais16_final_bwt_scan_right_to_left_16u_omp(T, SA, n, &buckets[7 * ALPHABET_SIZE], threads, thread_state); } } static void libsais16_induce_final_order_32s_6k(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t k, sa_sint_t * RESTRICT buckets, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state) { libsais16_final_sorting_scan_left_to_right_32s_omp(T, SA, n, &buckets[4 * k], threads, thread_state); libsais16_final_sorting_scan_right_to_left_32s_omp(T, SA, n, &buckets[5 * k], threads, thread_state); } static void libsais16_induce_final_order_32s_4k(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t k, sa_sint_t * RESTRICT buckets, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state) { libsais16_final_sorting_scan_left_to_right_32s_omp(T, SA, n, &buckets[2 * k], threads, thread_state); libsais16_final_sorting_scan_right_to_left_32s_omp(T, SA, n, &buckets[3 * k], threads, thread_state); } static void libsais16_induce_final_order_32s_2k(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t k, sa_sint_t * RESTRICT buckets, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state) { libsais16_final_sorting_scan_left_to_right_32s_omp(T, SA, n, &buckets[1 * k], threads, thread_state); libsais16_final_sorting_scan_right_to_left_32s_omp(T, SA, n, &buckets[0 * k], threads, thread_state); } static void libsais16_induce_final_order_32s_1k(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t k, sa_sint_t * RESTRICT buckets, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state) { libsais16_count_suffixes_32s(T, n, k, buckets); libsais16_initialize_buckets_start_32s_1k(k, buckets); libsais16_final_sorting_scan_left_to_right_32s_omp(T, SA, n, buckets, threads, thread_state); libsais16_count_suffixes_32s(T, n, k, buckets); libsais16_initialize_buckets_end_32s_1k(k, buckets); libsais16_final_sorting_scan_right_to_left_32s_omp(T, SA, n, buckets, threads, thread_state); } static sa_sint_t libsais16_renumber_unique_and_nonunique_lms_suffixes_32s(sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t m, sa_sint_t f, fast_sint_t omp_block_start, fast_sint_t omp_block_size) { const fast_sint_t prefetch_distance = 32; sa_sint_t * RESTRICT SAm = &SA[m]; sa_sint_t i, j; for (i = (sa_sint_t)omp_block_start, j = (sa_sint_t)omp_block_start + (sa_sint_t)omp_block_size - 2 * (sa_sint_t)prefetch_distance - 3; i < j; i += 4) { libsais16_prefetch(&SA[i + 3 * prefetch_distance]); libsais16_prefetchw(&SAm[((sa_uint_t)SA[i + 2 * prefetch_distance + 0]) >> 1]); libsais16_prefetchw(&SAm[((sa_uint_t)SA[i + 2 * prefetch_distance + 1]) >> 1]); libsais16_prefetchw(&SAm[((sa_uint_t)SA[i + 2 * prefetch_distance + 2]) >> 1]); libsais16_prefetchw(&SAm[((sa_uint_t)SA[i + 2 * prefetch_distance + 3]) >> 1]); sa_uint_t q0 = (sa_uint_t)SA[i + prefetch_distance + 0]; const sa_sint_t * Tq0 = &T[q0]; libsais16_prefetchw(SAm[q0 >> 1] < 0 ? Tq0 : NULL); sa_uint_t q1 = (sa_uint_t)SA[i + prefetch_distance + 1]; const sa_sint_t * Tq1 = &T[q1]; libsais16_prefetchw(SAm[q1 >> 1] < 0 ? Tq1 : NULL); sa_uint_t q2 = (sa_uint_t)SA[i + prefetch_distance + 2]; const sa_sint_t * Tq2 = &T[q2]; libsais16_prefetchw(SAm[q2 >> 1] < 0 ? Tq2 : NULL); sa_uint_t q3 = (sa_uint_t)SA[i + prefetch_distance + 3]; const sa_sint_t * Tq3 = &T[q3]; libsais16_prefetchw(SAm[q3 >> 1] < 0 ? Tq3 : NULL); sa_uint_t p0 = (sa_uint_t)SA[i + 0]; sa_sint_t s0 = SAm[p0 >> 1]; if (s0 < 0) { T[p0] |= SAINT_MIN; f++; s0 = i + 0 + SAINT_MIN + f; } SAm[p0 >> 1] = s0 - f; sa_uint_t p1 = (sa_uint_t)SA[i + 1]; sa_sint_t s1 = SAm[p1 >> 1]; if (s1 < 0) { T[p1] |= SAINT_MIN; f++; s1 = i + 1 + SAINT_MIN + f; } SAm[p1 >> 1] = s1 - f; sa_uint_t p2 = (sa_uint_t)SA[i + 2]; sa_sint_t s2 = SAm[p2 >> 1]; if (s2 < 0) { T[p2] |= SAINT_MIN; f++; s2 = i + 2 + SAINT_MIN + f; } SAm[p2 >> 1] = s2 - f; sa_uint_t p3 = (sa_uint_t)SA[i + 3]; sa_sint_t s3 = SAm[p3 >> 1]; if (s3 < 0) { T[p3] |= SAINT_MIN; f++; s3 = i + 3 + SAINT_MIN + f; } SAm[p3 >> 1] = s3 - f; } for (j += 2 * (sa_sint_t)prefetch_distance + 3; i < j; i += 1) { sa_uint_t p = (sa_uint_t)SA[i]; sa_sint_t s = SAm[p >> 1]; if (s < 0) { T[p] |= SAINT_MIN; f++; s = i + SAINT_MIN + f; } SAm[p >> 1] = s - f; } return f; } static void libsais16_compact_unique_and_nonunique_lms_suffixes_32s(sa_sint_t * RESTRICT SA, sa_sint_t m, fast_sint_t * pl, fast_sint_t * pr, fast_sint_t omp_block_start, fast_sint_t omp_block_size) { const fast_sint_t prefetch_distance = 32; sa_sint_t * RESTRICT SAl = &SA[0]; sa_sint_t * RESTRICT SAr = &SA[0]; fast_sint_t i, j, l = *pl - 1, r = *pr - 1; for (i = (fast_sint_t)m + omp_block_start + omp_block_size - 1, j = (fast_sint_t)m + omp_block_start + 3; i >= j; i -= 4) { libsais16_prefetch(&SA[i - prefetch_distance]); sa_sint_t p0 = SA[i - 0]; SAl[l] = p0 & SAINT_MAX; l -= p0 < 0; SAr[r] = p0 - 1; r -= p0 > 0; sa_sint_t p1 = SA[i - 1]; SAl[l] = p1 & SAINT_MAX; l -= p1 < 0; SAr[r] = p1 - 1; r -= p1 > 0; sa_sint_t p2 = SA[i - 2]; SAl[l] = p2 & SAINT_MAX; l -= p2 < 0; SAr[r] = p2 - 1; r -= p2 > 0; sa_sint_t p3 = SA[i - 3]; SAl[l] = p3 & SAINT_MAX; l -= p3 < 0; SAr[r] = p3 - 1; r -= p3 > 0; } for (j -= 3; i >= j; i -= 1) { sa_sint_t p = SA[i]; SAl[l] = p & SAINT_MAX; l -= p < 0; SAr[r] = p - 1; r -= p > 0; } *pl = l + 1; *pr = r + 1; } #if defined(_OPENMP) static sa_sint_t libsais16_count_unique_suffixes(sa_sint_t * RESTRICT SA, sa_sint_t m, fast_sint_t omp_block_start, fast_sint_t omp_block_size) { const fast_sint_t prefetch_distance = 32; sa_sint_t * RESTRICT SAm = &SA[m]; fast_sint_t i, j; sa_sint_t f0 = 0, f1 = 0, f2 = 0, f3 = 0; for (i = omp_block_start, j = omp_block_start + omp_block_size - prefetch_distance - 3; i < j; i += 4) { libsais16_prefetch(&SA[i + 2 * prefetch_distance]); libsais16_prefetch(&SAm[((sa_uint_t)SA[i + prefetch_distance + 0]) >> 1]); libsais16_prefetch(&SAm[((sa_uint_t)SA[i + prefetch_distance + 1]) >> 1]); libsais16_prefetch(&SAm[((sa_uint_t)SA[i + prefetch_distance + 2]) >> 1]); libsais16_prefetch(&SAm[((sa_uint_t)SA[i + prefetch_distance + 3]) >> 1]); f0 += SAm[((sa_uint_t)SA[i + 0]) >> 1] < 0; f1 += SAm[((sa_uint_t)SA[i + 1]) >> 1] < 0; f2 += SAm[((sa_uint_t)SA[i + 2]) >> 1] < 0; f3 += SAm[((sa_uint_t)SA[i + 3]) >> 1] < 0; } for (j += prefetch_distance + 3; i < j; i += 1) { f0 += SAm[((sa_uint_t)SA[i]) >> 1] < 0; } return f0 + f1 + f2 + f3; } #endif static sa_sint_t libsais16_renumber_unique_and_nonunique_lms_suffixes_32s_omp(sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t m, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state) { sa_sint_t f = 0; #if defined(_OPENMP) #pragma omp parallel num_threads(threads) if(threads > 1 && m >= 65536) #endif { #if defined(_OPENMP) fast_sint_t omp_thread_num = omp_get_thread_num(); fast_sint_t omp_num_threads = omp_get_num_threads(); #else UNUSED(threads); UNUSED(thread_state); fast_sint_t omp_thread_num = 0; fast_sint_t omp_num_threads = 1; #endif fast_sint_t omp_block_stride = (m / omp_num_threads) & (-16); fast_sint_t omp_block_start = omp_thread_num * omp_block_stride; fast_sint_t omp_block_size = omp_thread_num < omp_num_threads - 1 ? omp_block_stride : m - omp_block_start; if (omp_num_threads == 1) { f = libsais16_renumber_unique_and_nonunique_lms_suffixes_32s(T, SA, m, 0, omp_block_start, omp_block_size); } #if defined(_OPENMP) else { { thread_state[omp_thread_num].state.count = libsais16_count_unique_suffixes(SA, m, omp_block_start, omp_block_size); } #pragma omp barrier { fast_sint_t t, count = 0; for (t = 0; t < omp_thread_num; ++t) { count += thread_state[t].state.count; } if (omp_thread_num == omp_num_threads - 1) { f = (sa_sint_t)(count + thread_state[omp_thread_num].state.count); } libsais16_renumber_unique_and_nonunique_lms_suffixes_32s(T, SA, m, (sa_sint_t)count, omp_block_start, omp_block_size); } } #endif } return f; } static void libsais16_compact_unique_and_nonunique_lms_suffixes_32s_omp(sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t m, sa_sint_t fs, sa_sint_t f, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state) { #if defined(_OPENMP) #pragma omp parallel num_threads(threads) if(threads > 1 && n >= 131072 && m < fs) #endif { #if defined(_OPENMP) fast_sint_t omp_thread_num = omp_get_thread_num(); fast_sint_t omp_num_threads = omp_get_num_threads(); #else UNUSED(threads); UNUSED(thread_state); fast_sint_t omp_thread_num = 0; fast_sint_t omp_num_threads = 1; #endif fast_sint_t omp_block_stride = (((fast_sint_t)n >> 1) / omp_num_threads) & (-16); fast_sint_t omp_block_start = omp_thread_num * omp_block_stride; fast_sint_t omp_block_size = omp_thread_num < omp_num_threads - 1 ? omp_block_stride : ((fast_sint_t)n >> 1) - omp_block_start; if (omp_num_threads == 1) { fast_sint_t l = m, r = (fast_sint_t)n + (fast_sint_t)fs; libsais16_compact_unique_and_nonunique_lms_suffixes_32s(SA, m, &l, &r, omp_block_start, omp_block_size); } #if defined(_OPENMP) else { { thread_state[omp_thread_num].state.position = (fast_sint_t)m + ((fast_sint_t)n >> 1) + omp_block_start + omp_block_size; thread_state[omp_thread_num].state.count = (fast_sint_t)m + omp_block_start + omp_block_size; libsais16_compact_unique_and_nonunique_lms_suffixes_32s(SA, m, &thread_state[omp_thread_num].state.position, &thread_state[omp_thread_num].state.count, omp_block_start, omp_block_size); } #pragma omp barrier #pragma omp master { fast_sint_t t, position; for (position = m, t = omp_num_threads - 1; t >= 0; --t) { fast_sint_t omp_block_end = t < omp_num_threads - 1 ? omp_block_stride * (t + 1) : ((fast_sint_t)n >> 1); fast_sint_t count = ((fast_sint_t)m + ((fast_sint_t)n >> 1) + omp_block_end - thread_state[t].state.position); if (count > 0) { position -= count; memcpy(&SA[position], &SA[thread_state[t].state.position], (size_t)count * sizeof(sa_sint_t)); } } for (position = (fast_sint_t)n + (fast_sint_t)fs, t = omp_num_threads - 1; t >= 0; --t) { fast_sint_t omp_block_end = t < omp_num_threads - 1 ? omp_block_stride * (t + 1) : ((fast_sint_t)n >> 1); fast_sint_t count = ((fast_sint_t)m + omp_block_end - thread_state[t].state.count); if (count > 0) { position -= count; memcpy(&SA[position], &SA[thread_state[t].state.count], (size_t)count * sizeof(sa_sint_t)); } } } } #endif } memcpy(&SA[(fast_sint_t)n + (fast_sint_t)fs - (fast_sint_t)m], &SA[(fast_sint_t)m - (fast_sint_t)f], (size_t)f * sizeof(sa_sint_t)); } static sa_sint_t libsais16_compact_lms_suffixes_32s_omp(sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t m, sa_sint_t fs, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state) { sa_sint_t f = libsais16_renumber_unique_and_nonunique_lms_suffixes_32s_omp(T, SA, m, threads, thread_state); libsais16_compact_unique_and_nonunique_lms_suffixes_32s_omp(SA, n, m, fs, f, threads, thread_state); return f; } static void libsais16_merge_unique_lms_suffixes_32s(sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t m, fast_sint_t l, fast_sint_t omp_block_start, fast_sint_t omp_block_size) { const fast_sint_t prefetch_distance = 32; const sa_sint_t * RESTRICT SAnm = &SA[(fast_sint_t)n - (fast_sint_t)m - 1 + l]; sa_sint_t i, j; fast_sint_t tmp = *SAnm++; for (i = (sa_sint_t)omp_block_start, j = (sa_sint_t)omp_block_start + (sa_sint_t)omp_block_size - 6; i < j; i += 4) { libsais16_prefetch(&T[i + prefetch_distance]); sa_sint_t c0 = T[i + 0]; if (c0 < 0) { T[i + 0] = c0 & SAINT_MAX; SA[tmp] = i + 0; i++; tmp = *SAnm++; } sa_sint_t c1 = T[i + 1]; if (c1 < 0) { T[i + 1] = c1 & SAINT_MAX; SA[tmp] = i + 1; i++; tmp = *SAnm++; } sa_sint_t c2 = T[i + 2]; if (c2 < 0) { T[i + 2] = c2 & SAINT_MAX; SA[tmp] = i + 2; i++; tmp = *SAnm++; } sa_sint_t c3 = T[i + 3]; if (c3 < 0) { T[i + 3] = c3 & SAINT_MAX; SA[tmp] = i + 3; i++; tmp = *SAnm++; } } for (j += 6; i < j; i += 1) { sa_sint_t c = T[i]; if (c < 0) { T[i] = c & SAINT_MAX; SA[tmp] = i; i++; tmp = *SAnm++; } } } static void libsais16_merge_nonunique_lms_suffixes_32s(sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t m, fast_sint_t l, fast_sint_t omp_block_start, fast_sint_t omp_block_size) { const fast_sint_t prefetch_distance = 32; const sa_sint_t * RESTRICT SAnm = &SA[(fast_sint_t)n - (fast_sint_t)m - 1 + l]; fast_sint_t i, j; sa_sint_t tmp = *SAnm++; for (i = omp_block_start, j = omp_block_start + omp_block_size - 3; i < j; i += 4) { libsais16_prefetch(&SA[i + prefetch_distance]); if (SA[i + 0] == 0) { SA[i + 0] = tmp; tmp = *SAnm++; } if (SA[i + 1] == 0) { SA[i + 1] = tmp; tmp = *SAnm++; } if (SA[i + 2] == 0) { SA[i + 2] = tmp; tmp = *SAnm++; } if (SA[i + 3] == 0) { SA[i + 3] = tmp; tmp = *SAnm++; } } for (j += 3; i < j; i += 1) { if (SA[i] == 0) { SA[i] = tmp; tmp = *SAnm++; } } } static void libsais16_merge_unique_lms_suffixes_32s_omp(sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t m, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state) { #if defined(_OPENMP) #pragma omp parallel num_threads(threads) if(threads > 1 && n >= 65536) #endif { #if defined(_OPENMP) fast_sint_t omp_thread_num = omp_get_thread_num(); fast_sint_t omp_num_threads = omp_get_num_threads(); #else UNUSED(threads); UNUSED(thread_state); fast_sint_t omp_thread_num = 0; fast_sint_t omp_num_threads = 1; #endif fast_sint_t omp_block_stride = (n / omp_num_threads) & (-16); fast_sint_t omp_block_start = omp_thread_num * omp_block_stride; fast_sint_t omp_block_size = omp_thread_num < omp_num_threads - 1 ? omp_block_stride : n - omp_block_start; if (omp_num_threads == 1) { libsais16_merge_unique_lms_suffixes_32s(T, SA, n, m, 0, omp_block_start, omp_block_size); } #if defined(_OPENMP) else { { thread_state[omp_thread_num].state.count = libsais16_count_negative_marked_suffixes(T, omp_block_start, omp_block_size); } #pragma omp barrier { fast_sint_t t, count = 0; for (t = 0; t < omp_thread_num; ++t) { count += thread_state[t].state.count; } libsais16_merge_unique_lms_suffixes_32s(T, SA, n, m, count, omp_block_start, omp_block_size); } } #endif } } static void libsais16_merge_nonunique_lms_suffixes_32s_omp(sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t m, sa_sint_t f, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state) { #if defined(_OPENMP) #pragma omp parallel num_threads(threads) if(threads > 1 && m >= 65536) #endif { #if defined(_OPENMP) fast_sint_t omp_thread_num = omp_get_thread_num(); fast_sint_t omp_num_threads = omp_get_num_threads(); #else UNUSED(threads); UNUSED(thread_state); fast_sint_t omp_thread_num = 0; fast_sint_t omp_num_threads = 1; #endif fast_sint_t omp_block_stride = (m / omp_num_threads) & (-16); fast_sint_t omp_block_start = omp_thread_num * omp_block_stride; fast_sint_t omp_block_size = omp_thread_num < omp_num_threads - 1 ? omp_block_stride : m - omp_block_start; if (omp_num_threads == 1) { libsais16_merge_nonunique_lms_suffixes_32s(SA, n, m, f, omp_block_start, omp_block_size); } #if defined(_OPENMP) else { { thread_state[omp_thread_num].state.count = libsais16_count_zero_marked_suffixes(SA, omp_block_start, omp_block_size); } #pragma omp barrier { fast_sint_t t, count = f; for (t = 0; t < omp_thread_num; ++t) { count += thread_state[t].state.count; } libsais16_merge_nonunique_lms_suffixes_32s(SA, n, m, count, omp_block_start, omp_block_size); } } #endif } } static void libsais16_merge_compacted_lms_suffixes_32s_omp(sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t m, sa_sint_t f, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state) { libsais16_merge_unique_lms_suffixes_32s_omp(T, SA, n, m, threads, thread_state); libsais16_merge_nonunique_lms_suffixes_32s_omp(SA, n, m, f, threads, thread_state); } static void libsais16_reconstruct_compacted_lms_suffixes_32s_2k_omp(sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t k, sa_sint_t m, sa_sint_t fs, sa_sint_t f, sa_sint_t * RESTRICT buckets, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state) { if (f > 0) { memmove(&SA[n - m - 1], &SA[n + fs - m], (size_t)f * sizeof(sa_sint_t)); libsais16_count_and_gather_compacted_lms_suffixes_32s_2k_omp(T, SA, n, k, buckets, threads, thread_state); libsais16_reconstruct_lms_suffixes_omp(SA, n, m - f, threads); memcpy(&SA[n - m - 1 + f], &SA[0], ((size_t)m - (size_t)f) * sizeof(sa_sint_t)); memset(&SA[0], 0, (size_t)m * sizeof(sa_sint_t)); libsais16_merge_compacted_lms_suffixes_32s_omp(T, SA, n, m, f, threads, thread_state); } else { libsais16_count_and_gather_lms_suffixes_32s_2k(T, SA, n, k, buckets, 0, n); libsais16_reconstruct_lms_suffixes_omp(SA, n, m, threads); } } static void libsais16_reconstruct_compacted_lms_suffixes_32s_1k_omp(sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t m, sa_sint_t fs, sa_sint_t f, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state) { if (f > 0) { memmove(&SA[n - m - 1], &SA[n + fs - m], (size_t)f * sizeof(sa_sint_t)); libsais16_gather_compacted_lms_suffixes_32s(T, SA, n); libsais16_reconstruct_lms_suffixes_omp(SA, n, m - f, threads); memcpy(&SA[n - m - 1 + f], &SA[0], ((size_t)m - (size_t)f) * sizeof(sa_sint_t)); memset(&SA[0], 0, (size_t)m * sizeof(sa_sint_t)); libsais16_merge_compacted_lms_suffixes_32s_omp(T, SA, n, m, f, threads, thread_state); } else { libsais16_gather_lms_suffixes_32s(T, SA, n); libsais16_reconstruct_lms_suffixes_omp(SA, n, m, threads); } } static sa_sint_t libsais16_main_32s(sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t k, sa_sint_t fs, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state) { fs = fs < (SAINT_MAX - n) ? fs : (SAINT_MAX - n); if (k > 0 && fs / k >= 6) { sa_sint_t alignment = (fs - 1024) / k >= 6 ? 1024 : 16; sa_sint_t * RESTRICT buckets = (fs - alignment) / k >= 6 ? (sa_sint_t *)libsais16_align_up(&SA[n + fs - 6 * k - alignment], (size_t)alignment * sizeof(sa_sint_t)) : &SA[n + fs - 6 * k]; sa_sint_t m = libsais16_count_and_gather_lms_suffixes_32s_4k_omp(T, SA, n, k, buckets, threads, thread_state); if (m > 1) { memset(SA, 0, ((size_t)n - (size_t)m) * sizeof(sa_sint_t)); sa_sint_t first_lms_suffix = SA[n - m]; sa_sint_t left_suffixes_count = libsais16_initialize_buckets_for_lms_suffixes_radix_sort_32s_6k(T, k, buckets, first_lms_suffix); libsais16_radix_sort_lms_suffixes_32s_6k_omp(T, SA, n, m, &buckets[4 * k], threads, thread_state); libsais16_radix_sort_set_markers_32s_6k_omp(SA, k, &buckets[4 * k], threads); if (threads > 1 && n >= 65536) { memset(&SA[(fast_sint_t)n - (fast_sint_t)m], 0, (size_t)m * sizeof(sa_sint_t)); } libsais16_initialize_buckets_for_partial_sorting_32s_6k(T, k, buckets, first_lms_suffix, left_suffixes_count); libsais16_induce_partial_order_32s_6k_omp(T, SA, n, k, buckets, first_lms_suffix, left_suffixes_count, threads, thread_state); sa_sint_t names = libsais16_renumber_and_mark_distinct_lms_suffixes_32s_4k_omp(SA, n, m, threads, thread_state); if (names < m) { sa_sint_t f = libsais16_compact_lms_suffixes_32s_omp(T, SA, n, m, fs, threads, thread_state); if (libsais16_main_32s(SA + n + fs - m + f, SA, m - f, names - f, fs + n - 2 * m + f, threads, thread_state) != 0) { return -2; } libsais16_reconstruct_compacted_lms_suffixes_32s_2k_omp(T, SA, n, k, m, fs, f, buckets, threads, thread_state); } else { libsais16_count_lms_suffixes_32s_2k(T, n, k, buckets); } libsais16_initialize_buckets_start_and_end_32s_4k(k, buckets); libsais16_place_lms_suffixes_histogram_32s_4k(SA, n, k, m, buckets); libsais16_induce_final_order_32s_4k(T, SA, n, k, buckets, threads, thread_state); } else { SA[0] = SA[n - 1]; libsais16_initialize_buckets_start_and_end_32s_6k(k, buckets); libsais16_place_lms_suffixes_histogram_32s_6k(SA, n, k, m, buckets); libsais16_induce_final_order_32s_6k(T, SA, n, k, buckets, threads, thread_state); } return 0; } else if (k > 0 && fs / k >= 4) { sa_sint_t alignment = (fs - 1024) / k >= 4 ? 1024 : 16; sa_sint_t * RESTRICT buckets = (fs - alignment) / k >= 4 ? (sa_sint_t *)libsais16_align_up(&SA[n + fs - 4 * k - alignment], (size_t)alignment * sizeof(sa_sint_t)) : &SA[n + fs - 4 * k]; sa_sint_t m = libsais16_count_and_gather_lms_suffixes_32s_2k_omp(T, SA, n, k, buckets, threads, thread_state); if (m > 1) { libsais16_initialize_buckets_for_radix_and_partial_sorting_32s_4k(T, k, buckets, SA[n - m]); libsais16_radix_sort_lms_suffixes_32s_2k_omp(T, SA, n, m, &buckets[1], threads, thread_state); libsais16_radix_sort_set_markers_32s_4k_omp(SA, k, &buckets[1], threads); libsais16_place_lms_suffixes_interval_32s_4k(SA, n, k, m - 1, buckets); libsais16_induce_partial_order_32s_4k_omp(T, SA, n, k, buckets, threads, thread_state); sa_sint_t names = libsais16_renumber_and_mark_distinct_lms_suffixes_32s_4k_omp(SA, n, m, threads, thread_state); if (names < m) { sa_sint_t f = libsais16_compact_lms_suffixes_32s_omp(T, SA, n, m, fs, threads, thread_state); if (libsais16_main_32s(SA + n + fs - m + f, SA, m - f, names - f, fs + n - 2 * m + f, threads, thread_state) != 0) { return -2; } libsais16_reconstruct_compacted_lms_suffixes_32s_2k_omp(T, SA, n, k, m, fs, f, buckets, threads, thread_state); } else { libsais16_count_lms_suffixes_32s_2k(T, n, k, buckets); } } else { SA[0] = SA[n - 1]; } libsais16_initialize_buckets_start_and_end_32s_4k(k, buckets); libsais16_place_lms_suffixes_histogram_32s_4k(SA, n, k, m, buckets); libsais16_induce_final_order_32s_4k(T, SA, n, k, buckets, threads, thread_state); return 0; } else if (k > 0 && fs / k >= 2) { sa_sint_t alignment = (fs - 1024) / k >= 2 ? 1024 : 16; sa_sint_t * RESTRICT buckets = (fs - alignment) / k >= 2 ? (sa_sint_t *)libsais16_align_up(&SA[n + fs - 2 * k - alignment], (size_t)alignment * sizeof(sa_sint_t)) : &SA[n + fs - 2 * k]; sa_sint_t m = libsais16_count_and_gather_lms_suffixes_32s_2k_omp(T, SA, n, k, buckets, threads, thread_state); if (m > 1) { libsais16_initialize_buckets_for_lms_suffixes_radix_sort_32s_2k(T, k, buckets, SA[n - m]); libsais16_radix_sort_lms_suffixes_32s_2k_omp(T, SA, n, m, &buckets[1], threads, thread_state); libsais16_place_lms_suffixes_interval_32s_2k(SA, n, k, m - 1, buckets); libsais16_initialize_buckets_start_and_end_32s_2k(k, buckets); libsais16_induce_partial_order_32s_2k_omp(T, SA, n, k, buckets, threads, thread_state); sa_sint_t names = libsais16_renumber_and_mark_distinct_lms_suffixes_32s_1k_omp(T, SA, n, m, threads); if (names < m) { sa_sint_t f = libsais16_compact_lms_suffixes_32s_omp(T, SA, n, m, fs, threads, thread_state); if (libsais16_main_32s(SA + n + fs - m + f, SA, m - f, names - f, fs + n - 2 * m + f, threads, thread_state) != 0) { return -2; } libsais16_reconstruct_compacted_lms_suffixes_32s_2k_omp(T, SA, n, k, m, fs, f, buckets, threads, thread_state); } else { libsais16_count_lms_suffixes_32s_2k(T, n, k, buckets); } } else { SA[0] = SA[n - 1]; } libsais16_initialize_buckets_end_32s_2k(k, buckets); libsais16_place_lms_suffixes_histogram_32s_2k(SA, n, k, m, buckets); libsais16_initialize_buckets_start_and_end_32s_2k(k, buckets); libsais16_induce_final_order_32s_2k(T, SA, n, k, buckets, threads, thread_state); return 0; } else { sa_sint_t * buffer = fs < k ? (sa_sint_t *)libsais16_alloc_aligned((size_t)k * sizeof(sa_sint_t), 4096) : (sa_sint_t *)NULL; sa_sint_t alignment = fs - 1024 >= k ? 1024 : 16; sa_sint_t * RESTRICT buckets = fs - alignment >= k ? (sa_sint_t *)libsais16_align_up(&SA[n + fs - k - alignment], (size_t)alignment * sizeof(sa_sint_t)) : fs >= k ? &SA[n + fs - k] : buffer; if (buckets == NULL) { return -2; } memset(SA, 0, (size_t)n * sizeof(sa_sint_t)); libsais16_count_suffixes_32s(T, n, k, buckets); libsais16_initialize_buckets_end_32s_1k(k, buckets); sa_sint_t m = libsais16_radix_sort_lms_suffixes_32s_1k(T, SA, n, buckets); if (m > 1) { libsais16_induce_partial_order_32s_1k_omp(T, SA, n, k, buckets, threads, thread_state); sa_sint_t names = libsais16_renumber_and_mark_distinct_lms_suffixes_32s_1k_omp(T, SA, n, m, threads); if (names < m) { if (buffer != NULL) { libsais16_free_aligned(buffer); buckets = NULL; } sa_sint_t f = libsais16_compact_lms_suffixes_32s_omp(T, SA, n, m, fs, threads, thread_state); if (libsais16_main_32s(SA + n + fs - m + f, SA, m - f, names - f, fs + n - 2 * m + f, threads, thread_state) != 0) { return -2; } libsais16_reconstruct_compacted_lms_suffixes_32s_1k_omp(T, SA, n, m, fs, f, threads, thread_state); if (buckets == NULL) { buckets = buffer = (sa_sint_t *)libsais16_alloc_aligned((size_t)k * sizeof(sa_sint_t), 4096); } if (buckets == NULL) { return -2; } } libsais16_count_suffixes_32s(T, n, k, buckets); libsais16_initialize_buckets_end_32s_1k(k, buckets); libsais16_place_lms_suffixes_interval_32s_1k(T, SA, k, m, buckets); } libsais16_induce_final_order_32s_1k(T, SA, n, k, buckets, threads, thread_state); libsais16_free_aligned(buffer); return 0; } } static sa_sint_t libsais16_main_16u(const uint16_t * T, sa_sint_t * SA, sa_sint_t n, sa_sint_t * RESTRICT buckets, sa_sint_t bwt, sa_sint_t r, sa_sint_t * RESTRICT I, sa_sint_t fs, sa_sint_t * freq, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state) { fs = fs < (SAINT_MAX - n) ? fs : (SAINT_MAX - n); sa_sint_t m = libsais16_count_and_gather_lms_suffixes_16u_omp(T, SA, n, buckets, threads, thread_state); libsais16_initialize_buckets_start_and_end_16u(buckets, freq); if (m > 0) { sa_sint_t first_lms_suffix = SA[n - m]; sa_sint_t left_suffixes_count = libsais16_initialize_buckets_for_lms_suffixes_radix_sort_16u(T, buckets, first_lms_suffix); if (threads > 1 && n >= 65536) { memset(SA, 0, ((size_t)n - (size_t)m) * sizeof(sa_sint_t)); } libsais16_radix_sort_lms_suffixes_16u_omp(T, SA, n, m, buckets, threads, thread_state); if (threads > 1 && n >= 65536) { memset(&SA[(fast_sint_t)n - (fast_sint_t)m], 0, (size_t)m * sizeof(sa_sint_t)); } libsais16_initialize_buckets_for_partial_sorting_16u(T, buckets, first_lms_suffix, left_suffixes_count); libsais16_induce_partial_order_16u_omp(T, SA, n, buckets, first_lms_suffix, left_suffixes_count, threads, thread_state); sa_sint_t names = libsais16_renumber_and_gather_lms_suffixes_16u_omp(SA, n, m, fs, threads, thread_state); if (names < m) { if (libsais16_main_32s(SA + n + fs - m, SA, m, names, fs + n - 2 * m, threads, thread_state) != 0) { return -2; } libsais16_gather_lms_suffixes_16u_omp(T, SA, n, threads, thread_state); libsais16_reconstruct_lms_suffixes_omp(SA, n, m, threads); } libsais16_place_lms_suffixes_interval_16u(SA, n, m, buckets); } else { memset(SA, 0, (size_t)n * sizeof(sa_sint_t)); } return libsais16_induce_final_order_16u_omp(T, SA, n, bwt, r, I, buckets, threads, thread_state); } static sa_sint_t libsais16_main(const uint16_t * T, sa_sint_t * SA, sa_sint_t n, sa_sint_t bwt, sa_sint_t r, sa_sint_t * I, sa_sint_t fs, sa_sint_t * freq, sa_sint_t threads) { LIBSAIS_THREAD_STATE * RESTRICT thread_state = threads > 1 ? libsais16_alloc_thread_state(threads) : NULL; sa_sint_t * RESTRICT buckets = (sa_sint_t *)libsais16_alloc_aligned(8 * ALPHABET_SIZE * sizeof(sa_sint_t), 4096); sa_sint_t index = buckets != NULL && (thread_state != NULL || threads == 1) ? libsais16_main_16u(T, SA, n, buckets, bwt, r, I, fs, freq, threads, thread_state) : -2; libsais16_free_aligned(buckets); libsais16_free_thread_state(thread_state); return index; } static sa_sint_t libsais16_main_ctx(const LIBSAIS_CONTEXT * ctx, const uint16_t * T, sa_sint_t * SA, sa_sint_t n, sa_sint_t bwt, sa_sint_t r, sa_sint_t * I, sa_sint_t fs, sa_sint_t * freq) { return ctx != NULL && (ctx->buckets != NULL && (ctx->thread_state != NULL || ctx->threads == 1)) ? libsais16_main_16u(T, SA, n, ctx->buckets, bwt, r, I, fs, freq, (sa_sint_t)ctx->threads, ctx->thread_state) : -2; } static void libsais16_bwt_copy_16u(uint16_t * RESTRICT U, sa_sint_t * RESTRICT A, sa_sint_t n) { const fast_sint_t prefetch_distance = 32; fast_sint_t i, j; for (i = 0, j = (fast_sint_t)n - 7; i < j; i += 8) { libsais16_prefetch(&A[i + prefetch_distance]); U[i + 0] = (uint16_t)A[i + 0]; U[i + 1] = (uint16_t)A[i + 1]; U[i + 2] = (uint16_t)A[i + 2]; U[i + 3] = (uint16_t)A[i + 3]; U[i + 4] = (uint16_t)A[i + 4]; U[i + 5] = (uint16_t)A[i + 5]; U[i + 6] = (uint16_t)A[i + 6]; U[i + 7] = (uint16_t)A[i + 7]; } for (j += 7; i < j; i += 1) { U[i] = (uint16_t)A[i]; } } #if defined(_OPENMP) static void libsais16_bwt_copy_16u_omp(uint16_t * RESTRICT U, sa_sint_t * RESTRICT A, sa_sint_t n, sa_sint_t threads) { #if defined(_OPENMP) #pragma omp parallel num_threads(threads) if(threads > 1 && n >= 65536) #endif { #if defined(_OPENMP) fast_sint_t omp_thread_num = omp_get_thread_num(); fast_sint_t omp_num_threads = omp_get_num_threads(); fast_sint_t omp_block_stride = ((fast_sint_t)n / omp_num_threads) & (-16); fast_sint_t omp_block_start = omp_thread_num * omp_block_stride; fast_sint_t omp_block_size = omp_thread_num < omp_num_threads - 1 ? omp_block_stride : (fast_sint_t)n - omp_block_start; #else UNUSED(threads); fast_sint_t omp_block_start = 0; fast_sint_t omp_block_size = (fast_sint_t)n; #endif libsais16_bwt_copy_16u(U + omp_block_start, A + omp_block_start, (sa_sint_t)omp_block_size); } } #endif void * libsais16_create_ctx(void) { return (void *)libsais16_create_ctx_main(1); } void libsais16_free_ctx(void * ctx) { libsais16_free_ctx_main((LIBSAIS_CONTEXT *)ctx); } int32_t libsais16(const uint16_t * T, int32_t * SA, int32_t n, int32_t fs, int32_t * freq) { if ((T == NULL) || (SA == NULL) || (n < 0) || (fs < 0)) { return -1; } else if (n < 2) { if (n == 1) { SA[0] = 0; } return 0; } return libsais16_main(T, SA, n, 0, 0, NULL, fs, freq, 1); } int32_t libsais16_ctx(const void * ctx, const uint16_t * T, int32_t * SA, int32_t n, int32_t fs, int32_t * freq) { if ((ctx == NULL) || (T == NULL) || (SA == NULL) || (n < 0) || (fs < 0)) { return -1; } else if (n < 2) { if (n == 1) { SA[0] = 0; } return 0; } return libsais16_main_ctx((const LIBSAIS_CONTEXT *)ctx, T, SA, n, 0, 0, NULL, fs, freq); } int32_t libsais16_bwt(const uint16_t * T, uint16_t * U, int32_t * A, int32_t n, int32_t fs, int32_t * freq) { if ((T == NULL) || (U == NULL) || (A == NULL) || (n < 0) || (fs < 0)) { return -1; } else if (n <= 1) { if (n == 1) { U[0] = T[0]; } return n; } sa_sint_t index = libsais16_main(T, A, n, 1, 0, NULL, fs, freq, 1); if (index >= 0) { index++; U[0] = T[n - 1]; libsais16_bwt_copy_16u(U + 1, A, index - 1); libsais16_bwt_copy_16u(U + index, A + index, n - index); } return index; } int32_t libsais16_bwt_aux(const uint16_t * T, uint16_t * U, int32_t * A, int32_t n, int32_t fs, int32_t * freq, int32_t r, int32_t * I) { if ((T == NULL) || (U == NULL) || (A == NULL) || (n < 0) || (fs < 0) || (r < 2) || ((r & (r - 1)) != 0) || (I == NULL)) { return -1; } else if (n <= 1) { if (n == 1) { U[0] = T[0]; } I[0] = n; return 0; } if (libsais16_main(T, A, n, 1, r, I, fs, freq, 1) != 0) { return -2; } U[0] = T[n - 1]; libsais16_bwt_copy_16u(U + 1, A, I[0] - 1); libsais16_bwt_copy_16u(U + I[0], A + I[0], n - I[0]); return 0; } int32_t libsais16_bwt_ctx(const void * ctx, const uint16_t * T, uint16_t * U, int32_t * A, int32_t n, int32_t fs, int32_t * freq) { if ((ctx == NULL) || (T == NULL) || (U == NULL) || (A == NULL) || (n < 0) || (fs < 0)) { return -1; } else if (n <= 1) { if (n == 1) { U[0] = T[0]; } return n; } sa_sint_t index = libsais16_main_ctx((const LIBSAIS_CONTEXT *)ctx, T, A, n, 1, 0, NULL, fs, freq); if (index >= 0) { index++; U[0] = T[n - 1]; #if defined(_OPENMP) libsais16_bwt_copy_16u_omp(U + 1, A, index - 1, (sa_sint_t)((const LIBSAIS_CONTEXT *)ctx)->threads); libsais16_bwt_copy_16u_omp(U + index, A + index, n - index, (sa_sint_t)((const LIBSAIS_CONTEXT *)ctx)->threads); #else libsais16_bwt_copy_16u(U + 1, A, index - 1); libsais16_bwt_copy_16u(U + index, A + index, n - index); #endif } return index; } int32_t libsais16_bwt_aux_ctx(const void * ctx, const uint16_t * T, uint16_t * U, int32_t * A, int32_t n, int32_t fs, int32_t * freq, int32_t r, int32_t * I) { if ((ctx == NULL) || (T == NULL) || (U == NULL) || (A == NULL) || (n < 0) || (fs < 0) || (r < 2) || ((r & (r - 1)) != 0) || (I == NULL)) { return -1; } else if (n <= 1) { if (n == 1) { U[0] = T[0]; } I[0] = n; return 0; } if (libsais16_main_ctx((const LIBSAIS_CONTEXT *)ctx, T, A, n, 1, r, I, fs, freq) != 0) { return -2; } U[0] = T[n - 1]; #if defined(_OPENMP) libsais16_bwt_copy_16u_omp(U + 1, A, I[0] - 1, (sa_sint_t)((const LIBSAIS_CONTEXT *)ctx)->threads); libsais16_bwt_copy_16u_omp(U + I[0], A + I[0], n - I[0], (sa_sint_t)((const LIBSAIS_CONTEXT *)ctx)->threads); #else libsais16_bwt_copy_16u(U + 1, A, I[0] - 1); libsais16_bwt_copy_16u(U + I[0], A + I[0], n - I[0]); #endif return 0; } #if defined(_OPENMP) void * libsais16_create_ctx_omp(int32_t threads) { if (threads < 0) { return NULL; } threads = threads > 0 ? threads : omp_get_max_threads(); return (void *)libsais16_create_ctx_main(threads); } int32_t libsais16_omp(const uint16_t * T, int32_t * SA, int32_t n, int32_t fs, int32_t * freq, int32_t threads) { if ((T == NULL) || (SA == NULL) || (n < 0) || (fs < 0) || (threads < 0)) { return -1; } else if (n < 2) { if (n == 1) { SA[0] = 0; } return 0; } threads = threads > 0 ? threads : omp_get_max_threads(); return libsais16_main(T, SA, n, 0, 0, NULL, fs, freq, threads); } int32_t libsais16_bwt_omp(const uint16_t * T, uint16_t * U, int32_t * A, int32_t n, int32_t fs, int32_t * freq, int32_t threads) { if ((T == NULL) || (U == NULL) || (A == NULL) || (n < 0) || (fs < 0) || (threads < 0)) { return -1; } else if (n <= 1) { if (n == 1) { U[0] = T[0]; } return n; } threads = threads > 0 ? threads : omp_get_max_threads(); sa_sint_t index = libsais16_main(T, A, n, 1, 0, NULL, fs, freq, threads); if (index >= 0) { index++; U[0] = T[n - 1]; libsais16_bwt_copy_16u_omp(U + 1, A, index - 1, threads); libsais16_bwt_copy_16u_omp(U + index, A + index, n - index, threads); } return index; } int32_t libsais16_bwt_aux_omp(const uint16_t * T, uint16_t * U, int32_t * A, int32_t n, int32_t fs, int32_t * freq, int32_t r, int32_t * I, int32_t threads) { if ((T == NULL) || (U == NULL) || (A == NULL) || (n < 0) || (fs < 0) || (r < 2) || ((r & (r - 1)) != 0) || (I == NULL) || (threads < 0)) { return -1; } else if (n <= 1) { if (n == 1) { U[0] = T[0];} I[0] = n; return 0; } threads = threads > 0 ? threads : omp_get_max_threads(); if (libsais16_main(T, A, n, 1, r, I, fs, freq, threads) != 0) { return -2; } U[0] = T[n - 1]; libsais16_bwt_copy_16u_omp(U + 1, A, I[0] - 1, threads); libsais16_bwt_copy_16u_omp(U + I[0], A + I[0], n - I[0], threads); return 0; } #endif static LIBSAIS_UNBWT_CONTEXT * libsais16_unbwt_create_ctx_main(sa_sint_t threads) { LIBSAIS_UNBWT_CONTEXT * RESTRICT ctx = (LIBSAIS_UNBWT_CONTEXT *)libsais16_alloc_aligned(sizeof(LIBSAIS_UNBWT_CONTEXT), 64); sa_uint_t * RESTRICT bucket2 = (sa_uint_t *)libsais16_alloc_aligned(ALPHABET_SIZE * sizeof(sa_uint_t), 4096); uint16_t * RESTRICT fastbits = (uint16_t *)libsais16_alloc_aligned((1 + (1 << UNBWT_FASTBITS)) * sizeof(uint16_t), 4096); sa_uint_t * RESTRICT buckets = threads > 1 ? (sa_uint_t *)libsais16_alloc_aligned((size_t)threads * ALPHABET_SIZE * sizeof(sa_uint_t), 4096) : NULL; if (ctx != NULL && bucket2 != NULL && fastbits != NULL && (buckets != NULL || threads == 1)) { ctx->bucket2 = bucket2; ctx->fastbits = fastbits; ctx->buckets = buckets; ctx->threads = threads; return ctx; } libsais16_free_aligned(buckets); libsais16_free_aligned(fastbits); libsais16_free_aligned(bucket2); libsais16_free_aligned(ctx); return NULL; } static void libsais16_unbwt_free_ctx_main(LIBSAIS_UNBWT_CONTEXT * ctx) { if (ctx != NULL) { libsais16_free_aligned(ctx->buckets); libsais16_free_aligned(ctx->fastbits); libsais16_free_aligned(ctx->bucket2); libsais16_free_aligned(ctx); } } static void libsais16_unbwt_compute_histogram(const uint16_t * RESTRICT T, fast_sint_t n, sa_uint_t * RESTRICT count) { fast_sint_t i; for (i = 0; i < n; i += 1) { count[T[i]]++; } } static void libsais16_unbwt_calculate_fastbits(sa_uint_t * RESTRICT bucket2, uint16_t * RESTRICT fastbits, fast_uint_t shift) { fast_uint_t v, w, sum; for (v = 0, sum = 1, w = 0; w < ALPHABET_SIZE; ++w) { fast_uint_t prev = sum; sum += bucket2[w]; bucket2[w] = (sa_uint_t)prev; if (prev != sum) { for (; v <= ((sum - 1) >> shift); ++v) { fastbits[v] = (uint16_t)w; } } } } static void libsais16_unbwt_calculate_P(const uint16_t * RESTRICT T, sa_uint_t * RESTRICT P, sa_uint_t * RESTRICT bucket2, fast_uint_t index, fast_sint_t omp_block_start, fast_sint_t omp_block_end) { { fast_sint_t i = omp_block_start, j = (fast_sint_t)index; if (omp_block_end < j) { j = omp_block_end; } for (; i < j; ++i) { fast_uint_t c = T[i]; P[bucket2[c]++] = (sa_uint_t)i; } } { fast_sint_t i = (fast_sint_t)index, j = omp_block_end; if (omp_block_start > i) { i = omp_block_start; } for (T -= 1, i += 1; i <= j; ++i) { fast_uint_t c = T[i]; P[bucket2[c]++] = (sa_uint_t)i; } } } static void libsais16_unbwt_init_single(const uint16_t * RESTRICT T, sa_uint_t * RESTRICT P, sa_sint_t n, const sa_sint_t * freq, const sa_uint_t * RESTRICT I, sa_uint_t * RESTRICT bucket2, uint16_t * RESTRICT fastbits) { fast_uint_t index = I[0]; fast_uint_t shift = 0; while ((n >> shift) > (1 << UNBWT_FASTBITS)) { shift++; } if (freq != NULL) { memcpy(bucket2, freq, ALPHABET_SIZE * sizeof(sa_uint_t)); } else { memset(bucket2, 0, ALPHABET_SIZE * sizeof(sa_uint_t)); libsais16_unbwt_compute_histogram(T, n, bucket2); } libsais16_unbwt_calculate_fastbits(bucket2, fastbits, shift); libsais16_unbwt_calculate_P(T, P, bucket2, index, 0, n); } #if defined(_OPENMP) static void libsais16_unbwt_init_parallel(const uint16_t * RESTRICT T, sa_uint_t * RESTRICT P, sa_sint_t n, const sa_sint_t * freq, const sa_uint_t * RESTRICT I, sa_uint_t * RESTRICT bucket2, uint16_t * RESTRICT fastbits, sa_uint_t * RESTRICT buckets, sa_sint_t threads) { fast_uint_t index = I[0]; fast_uint_t shift = 0; while ((n >> shift) > (1 << UNBWT_FASTBITS)) { shift++; } #pragma omp parallel num_threads(threads) if(threads > 1 && n >= 65536) { fast_sint_t omp_thread_num = omp_get_thread_num(); fast_sint_t omp_num_threads = omp_get_num_threads(); if (omp_num_threads == 1) { libsais16_unbwt_init_single(T, P, n, freq, I, bucket2, fastbits); } else { { sa_uint_t * RESTRICT bucket2_local = buckets + omp_thread_num * ALPHABET_SIZE; fast_sint_t omp_block_stride = (n / omp_num_threads) & (-16); fast_sint_t omp_block_start = omp_thread_num * omp_block_stride; fast_sint_t omp_block_size = omp_thread_num < omp_num_threads - 1 ? omp_block_stride : n - omp_block_start; memset(bucket2_local, 0, ALPHABET_SIZE * sizeof(sa_uint_t)); libsais16_unbwt_compute_histogram(T + omp_block_start, omp_block_size, bucket2_local); } #pragma omp barrier { sa_uint_t * RESTRICT bucket2_temp = buckets; fast_sint_t omp_block_stride = (ALPHABET_SIZE / omp_num_threads) & (-16); fast_sint_t omp_block_start = omp_thread_num * omp_block_stride; fast_sint_t omp_block_size = omp_thread_num < omp_num_threads - 1 ? omp_block_stride : ALPHABET_SIZE - omp_block_start; memset(bucket2 + omp_block_start, 0, omp_block_size * sizeof(sa_uint_t)); fast_sint_t t; for (t = 0; t < omp_num_threads; ++t, bucket2_temp += ALPHABET_SIZE) { fast_sint_t c; for (c = omp_block_start; c < omp_block_start + omp_block_size; c += 1) { sa_uint_t A = bucket2[c], B = bucket2_temp[c]; bucket2[c] = A + B; bucket2_temp[c] = A; } } } #pragma omp barrier #pragma omp master { libsais16_unbwt_calculate_fastbits(bucket2, fastbits, shift); } #pragma omp barrier { sa_uint_t * RESTRICT bucket2_local = buckets + omp_thread_num * ALPHABET_SIZE; fast_sint_t omp_block_stride = (n / omp_num_threads) & (-16); fast_sint_t omp_block_start = omp_thread_num * omp_block_stride; fast_sint_t omp_block_size = omp_thread_num < omp_num_threads - 1 ? omp_block_stride : n - omp_block_start; fast_sint_t c; for (c = 0; c < ALPHABET_SIZE; c += 1) { sa_uint_t A = bucket2[c], B = bucket2_local[c]; bucket2_local[c] = A + B; } libsais16_unbwt_calculate_P(T, P, bucket2_local, index, omp_block_start, omp_block_start + omp_block_size); } #pragma omp barrier #pragma omp master { memcpy(bucket2, buckets + (omp_num_threads - 1) * ALPHABET_SIZE, ALPHABET_SIZE * sizeof(sa_uint_t)); } } } } #endif static void libsais16_unbwt_decode_1(uint16_t * RESTRICT U, sa_uint_t * RESTRICT P, sa_uint_t * RESTRICT bucket2, uint16_t * RESTRICT fastbits, fast_uint_t shift, fast_uint_t * i0, fast_uint_t k) { uint16_t * RESTRICT U0 = U; fast_uint_t i, p0 = *i0; for (i = 0; i != k; ++i) { uint16_t c0 = fastbits[p0 >> shift]; if (bucket2[c0] <= p0) { do { c0++; } while (bucket2[c0] <= p0); } p0 = P[p0]; U0[i] = c0; } *i0 = p0; } static void libsais16_unbwt_decode_2(uint16_t * RESTRICT U, sa_uint_t * RESTRICT P, sa_uint_t * RESTRICT bucket2, uint16_t * RESTRICT fastbits, fast_uint_t shift, fast_uint_t r, fast_uint_t * i0, fast_uint_t * i1, fast_uint_t k) { uint16_t * RESTRICT U0 = U; uint16_t * RESTRICT U1 = U0 + r; fast_uint_t i, p0 = *i0, p1 = *i1; for (i = 0; i != k; ++i) { uint16_t c0 = fastbits[p0 >> shift]; if (bucket2[c0] <= p0) { do { c0++; } while (bucket2[c0] <= p0); } p0 = P[p0]; U0[i] = c0; uint16_t c1 = fastbits[p1 >> shift]; if (bucket2[c1] <= p1) { do { c1++; } while (bucket2[c1] <= p1); } p1 = P[p1]; U1[i] = c1; } *i0 = p0; *i1 = p1; } static void libsais16_unbwt_decode_3(uint16_t * RESTRICT U, sa_uint_t * RESTRICT P, sa_uint_t * RESTRICT bucket2, uint16_t * RESTRICT fastbits, fast_uint_t shift, fast_uint_t r, fast_uint_t * i0, fast_uint_t * i1, fast_uint_t * i2, fast_uint_t k) { uint16_t * RESTRICT U0 = U; uint16_t * RESTRICT U1 = U0 + r; uint16_t * RESTRICT U2 = U1 + r; fast_uint_t i, p0 = *i0, p1 = *i1, p2 = *i2; for (i = 0; i != k; ++i) { uint16_t c0 = fastbits[p0 >> shift]; if (bucket2[c0] <= p0) { do { c0++; } while (bucket2[c0] <= p0); } p0 = P[p0]; U0[i] = c0; uint16_t c1 = fastbits[p1 >> shift]; if (bucket2[c1] <= p1) { do { c1++; } while (bucket2[c1] <= p1); } p1 = P[p1]; U1[i] = c1; uint16_t c2 = fastbits[p2 >> shift]; if (bucket2[c2] <= p2) { do { c2++; } while (bucket2[c2] <= p2); } p2 = P[p2]; U2[i] = c2; } *i0 = p0; *i1 = p1; *i2 = p2; } static void libsais16_unbwt_decode_4(uint16_t * RESTRICT U, sa_uint_t * RESTRICT P, sa_uint_t * RESTRICT bucket2, uint16_t * RESTRICT fastbits, fast_uint_t shift, fast_uint_t r, fast_uint_t * i0, fast_uint_t * i1, fast_uint_t * i2, fast_uint_t * i3, fast_uint_t k) { uint16_t * RESTRICT U0 = U; uint16_t * RESTRICT U1 = U0 + r; uint16_t * RESTRICT U2 = U1 + r; uint16_t * RESTRICT U3 = U2 + r; fast_uint_t i, p0 = *i0, p1 = *i1, p2 = *i2, p3 = *i3; for (i = 0; i != k; ++i) { uint16_t c0 = fastbits[p0 >> shift]; if (bucket2[c0] <= p0) { do { c0++; } while (bucket2[c0] <= p0); } p0 = P[p0]; U0[i] = c0; uint16_t c1 = fastbits[p1 >> shift]; if (bucket2[c1] <= p1) { do { c1++; } while (bucket2[c1] <= p1); } p1 = P[p1]; U1[i] = c1; uint16_t c2 = fastbits[p2 >> shift]; if (bucket2[c2] <= p2) { do { c2++; } while (bucket2[c2] <= p2); } p2 = P[p2]; U2[i] = c2; uint16_t c3 = fastbits[p3 >> shift]; if (bucket2[c3] <= p3) { do { c3++; } while (bucket2[c3] <= p3); } p3 = P[p3]; U3[i] = c3; } *i0 = p0; *i1 = p1; *i2 = p2; *i3 = p3; } static void libsais16_unbwt_decode_5(uint16_t * RESTRICT U, sa_uint_t * RESTRICT P, sa_uint_t * RESTRICT bucket2, uint16_t * RESTRICT fastbits, fast_uint_t shift, fast_uint_t r, fast_uint_t * i0, fast_uint_t * i1, fast_uint_t * i2, fast_uint_t * i3, fast_uint_t * i4, fast_uint_t k) { uint16_t * RESTRICT U0 = U; uint16_t * RESTRICT U1 = U0 + r; uint16_t * RESTRICT U2 = U1 + r; uint16_t * RESTRICT U3 = U2 + r; uint16_t * RESTRICT U4 = U3 + r; fast_uint_t i, p0 = *i0, p1 = *i1, p2 = *i2, p3 = *i3, p4 = *i4; for (i = 0; i != k; ++i) { uint16_t c0 = fastbits[p0 >> shift]; if (bucket2[c0] <= p0) { do { c0++; } while (bucket2[c0] <= p0); } p0 = P[p0]; U0[i] = c0; uint16_t c1 = fastbits[p1 >> shift]; if (bucket2[c1] <= p1) { do { c1++; } while (bucket2[c1] <= p1); } p1 = P[p1]; U1[i] = c1; uint16_t c2 = fastbits[p2 >> shift]; if (bucket2[c2] <= p2) { do { c2++; } while (bucket2[c2] <= p2); } p2 = P[p2]; U2[i] = c2; uint16_t c3 = fastbits[p3 >> shift]; if (bucket2[c3] <= p3) { do { c3++; } while (bucket2[c3] <= p3); } p3 = P[p3]; U3[i] = c3; uint16_t c4 = fastbits[p4 >> shift]; if (bucket2[c4] <= p4) { do { c4++; } while (bucket2[c4] <= p4); } p4 = P[p4]; U4[i] = c4; } *i0 = p0; *i1 = p1; *i2 = p2; *i3 = p3; *i4 = p4; } static void libsais16_unbwt_decode_6(uint16_t * RESTRICT U, sa_uint_t * RESTRICT P, sa_uint_t * RESTRICT bucket2, uint16_t * RESTRICT fastbits, fast_uint_t shift, fast_uint_t r, fast_uint_t * i0, fast_uint_t * i1, fast_uint_t * i2, fast_uint_t * i3, fast_uint_t * i4, fast_uint_t * i5, fast_uint_t k) { uint16_t * RESTRICT U0 = U; uint16_t * RESTRICT U1 = U0 + r; uint16_t * RESTRICT U2 = U1 + r; uint16_t * RESTRICT U3 = U2 + r; uint16_t * RESTRICT U4 = U3 + r; uint16_t * RESTRICT U5 = U4 + r; fast_uint_t i, p0 = *i0, p1 = *i1, p2 = *i2, p3 = *i3, p4 = *i4, p5 = *i5; for (i = 0; i != k; ++i) { uint16_t c0 = fastbits[p0 >> shift]; if (bucket2[c0] <= p0) { do { c0++; } while (bucket2[c0] <= p0); } p0 = P[p0]; U0[i] = c0; uint16_t c1 = fastbits[p1 >> shift]; if (bucket2[c1] <= p1) { do { c1++; } while (bucket2[c1] <= p1); } p1 = P[p1]; U1[i] = c1; uint16_t c2 = fastbits[p2 >> shift]; if (bucket2[c2] <= p2) { do { c2++; } while (bucket2[c2] <= p2); } p2 = P[p2]; U2[i] = c2; uint16_t c3 = fastbits[p3 >> shift]; if (bucket2[c3] <= p3) { do { c3++; } while (bucket2[c3] <= p3); } p3 = P[p3]; U3[i] = c3; uint16_t c4 = fastbits[p4 >> shift]; if (bucket2[c4] <= p4) { do { c4++; } while (bucket2[c4] <= p4); } p4 = P[p4]; U4[i] = c4; uint16_t c5 = fastbits[p5 >> shift]; if (bucket2[c5] <= p5) { do { c5++; } while (bucket2[c5] <= p5); } p5 = P[p5]; U5[i] = c5; } *i0 = p0; *i1 = p1; *i2 = p2; *i3 = p3; *i4 = p4; *i5 = p5; } static void libsais16_unbwt_decode_7(uint16_t * RESTRICT U, sa_uint_t * RESTRICT P, sa_uint_t * RESTRICT bucket2, uint16_t * RESTRICT fastbits, fast_uint_t shift, fast_uint_t r, fast_uint_t * i0, fast_uint_t * i1, fast_uint_t * i2, fast_uint_t * i3, fast_uint_t * i4, fast_uint_t * i5, fast_uint_t * i6, fast_uint_t k) { uint16_t * RESTRICT U0 = U; uint16_t * RESTRICT U1 = U0 + r; uint16_t * RESTRICT U2 = U1 + r; uint16_t * RESTRICT U3 = U2 + r; uint16_t * RESTRICT U4 = U3 + r; uint16_t * RESTRICT U5 = U4 + r; uint16_t * RESTRICT U6 = U5 + r; fast_uint_t i, p0 = *i0, p1 = *i1, p2 = *i2, p3 = *i3, p4 = *i4, p5 = *i5, p6 = *i6; for (i = 0; i != k; ++i) { uint16_t c0 = fastbits[p0 >> shift]; if (bucket2[c0] <= p0) { do { c0++; } while (bucket2[c0] <= p0); } p0 = P[p0]; U0[i] = c0; uint16_t c1 = fastbits[p1 >> shift]; if (bucket2[c1] <= p1) { do { c1++; } while (bucket2[c1] <= p1); } p1 = P[p1]; U1[i] = c1; uint16_t c2 = fastbits[p2 >> shift]; if (bucket2[c2] <= p2) { do { c2++; } while (bucket2[c2] <= p2); } p2 = P[p2]; U2[i] = c2; uint16_t c3 = fastbits[p3 >> shift]; if (bucket2[c3] <= p3) { do { c3++; } while (bucket2[c3] <= p3); } p3 = P[p3]; U3[i] = c3; uint16_t c4 = fastbits[p4 >> shift]; if (bucket2[c4] <= p4) { do { c4++; } while (bucket2[c4] <= p4); } p4 = P[p4]; U4[i] = c4; uint16_t c5 = fastbits[p5 >> shift]; if (bucket2[c5] <= p5) { do { c5++; } while (bucket2[c5] <= p5); } p5 = P[p5]; U5[i] = c5; uint16_t c6 = fastbits[p6 >> shift]; if (bucket2[c6] <= p6) { do { c6++; } while (bucket2[c6] <= p6); } p6 = P[p6]; U6[i] = c6; } *i0 = p0; *i1 = p1; *i2 = p2; *i3 = p3; *i4 = p4; *i5 = p5; *i6 = p6; } static void libsais16_unbwt_decode_8(uint16_t * RESTRICT U, sa_uint_t * RESTRICT P, sa_uint_t * RESTRICT bucket2, uint16_t * RESTRICT fastbits, fast_uint_t shift, fast_uint_t r, fast_uint_t * i0, fast_uint_t * i1, fast_uint_t * i2, fast_uint_t * i3, fast_uint_t * i4, fast_uint_t * i5, fast_uint_t * i6, fast_uint_t * i7, fast_uint_t k) { uint16_t * RESTRICT U0 = U; uint16_t * RESTRICT U1 = U0 + r; uint16_t * RESTRICT U2 = U1 + r; uint16_t * RESTRICT U3 = U2 + r; uint16_t * RESTRICT U4 = U3 + r; uint16_t * RESTRICT U5 = U4 + r; uint16_t * RESTRICT U6 = U5 + r; uint16_t * RESTRICT U7 = U6 + r; fast_uint_t i, p0 = *i0, p1 = *i1, p2 = *i2, p3 = *i3, p4 = *i4, p5 = *i5, p6 = *i6, p7 = *i7; for (i = 0; i != k; ++i) { uint16_t c0 = fastbits[p0 >> shift]; if (bucket2[c0] <= p0) { do { c0++; } while (bucket2[c0] <= p0); } p0 = P[p0]; U0[i] = c0; uint16_t c1 = fastbits[p1 >> shift]; if (bucket2[c1] <= p1) { do { c1++; } while (bucket2[c1] <= p1); } p1 = P[p1]; U1[i] = c1; uint16_t c2 = fastbits[p2 >> shift]; if (bucket2[c2] <= p2) { do { c2++; } while (bucket2[c2] <= p2); } p2 = P[p2]; U2[i] = c2; uint16_t c3 = fastbits[p3 >> shift]; if (bucket2[c3] <= p3) { do { c3++; } while (bucket2[c3] <= p3); } p3 = P[p3]; U3[i] = c3; uint16_t c4 = fastbits[p4 >> shift]; if (bucket2[c4] <= p4) { do { c4++; } while (bucket2[c4] <= p4); } p4 = P[p4]; U4[i] = c4; uint16_t c5 = fastbits[p5 >> shift]; if (bucket2[c5] <= p5) { do { c5++; } while (bucket2[c5] <= p5); } p5 = P[p5]; U5[i] = c5; uint16_t c6 = fastbits[p6 >> shift]; if (bucket2[c6] <= p6) { do { c6++; } while (bucket2[c6] <= p6); } p6 = P[p6]; U6[i] = c6; uint16_t c7 = fastbits[p7 >> shift]; if (bucket2[c7] <= p7) { do { c7++; } while (bucket2[c7] <= p7); } p7 = P[p7]; U7[i] = c7; } *i0 = p0; *i1 = p1; *i2 = p2; *i3 = p3; *i4 = p4; *i5 = p5; *i6 = p6; *i7 = p7; } static void libsais16_unbwt_decode(uint16_t * RESTRICT U, sa_uint_t * RESTRICT P, sa_sint_t n, sa_sint_t r, const sa_uint_t * RESTRICT I, sa_uint_t * RESTRICT bucket2, uint16_t * RESTRICT fastbits, fast_sint_t blocks, fast_uint_t reminder) { fast_uint_t shift = 0; while ((n >> shift) > (1 << UNBWT_FASTBITS)) { shift++; } fast_uint_t offset = 0; while (blocks > 8) { fast_uint_t i0 = I[0], i1 = I[1], i2 = I[2], i3 = I[3], i4 = I[4], i5 = I[5], i6 = I[6], i7 = I[7]; libsais16_unbwt_decode_8(U + offset, P, bucket2, fastbits, shift, (fast_uint_t)r, &i0, &i1, &i2, &i3, &i4, &i5, &i6, &i7, (fast_uint_t)r); I += 8; blocks -= 8; offset += 8 * (fast_uint_t)r; } if (blocks == 1) { fast_uint_t i0 = I[0]; libsais16_unbwt_decode_1(U + offset, P, bucket2, fastbits, shift, &i0, reminder); } else if (blocks == 2) { fast_uint_t i0 = I[0], i1 = I[1]; libsais16_unbwt_decode_2(U + offset, P, bucket2, fastbits, shift, (fast_uint_t)r, &i0, &i1, reminder); libsais16_unbwt_decode_1(U + offset + reminder, P, bucket2, fastbits, shift, &i0, ((fast_uint_t)r) - reminder); } else if (blocks == 3) { fast_uint_t i0 = I[0], i1 = I[1], i2 = I[2]; libsais16_unbwt_decode_3(U + offset, P, bucket2, fastbits, shift, (fast_uint_t)r, &i0, &i1, &i2, reminder); libsais16_unbwt_decode_2(U + offset + reminder, P, bucket2, fastbits, shift, (fast_uint_t)r, &i0, &i1, ((fast_uint_t)r) - reminder); } else if (blocks == 4) { fast_uint_t i0 = I[0], i1 = I[1], i2 = I[2], i3 = I[3]; libsais16_unbwt_decode_4(U + offset, P, bucket2, fastbits, shift, (fast_uint_t)r, &i0, &i1, &i2, &i3, reminder); libsais16_unbwt_decode_3(U + offset + reminder, P, bucket2, fastbits, shift, (fast_uint_t)r, &i0, &i1, &i2, ((fast_uint_t)r) - reminder); } else if (blocks == 5) { fast_uint_t i0 = I[0], i1 = I[1], i2 = I[2], i3 = I[3], i4 = I[4]; libsais16_unbwt_decode_5(U + offset, P, bucket2, fastbits, shift, (fast_uint_t)r, &i0, &i1, &i2, &i3, &i4, reminder); libsais16_unbwt_decode_4(U + offset + reminder, P, bucket2, fastbits, shift, (fast_uint_t)r, &i0, &i1, &i2, &i3, ((fast_uint_t)r) - reminder); } else if (blocks == 6) { fast_uint_t i0 = I[0], i1 = I[1], i2 = I[2], i3 = I[3], i4 = I[4], i5 = I[5]; libsais16_unbwt_decode_6(U + offset, P, bucket2, fastbits, shift, (fast_uint_t)r, &i0, &i1, &i2, &i3, &i4, &i5, reminder); libsais16_unbwt_decode_5(U + offset + reminder, P, bucket2, fastbits, shift, (fast_uint_t)r, &i0, &i1, &i2, &i3, &i4, ((fast_uint_t)r) - reminder); } else if (blocks == 7) { fast_uint_t i0 = I[0], i1 = I[1], i2 = I[2], i3 = I[3], i4 = I[4], i5 = I[5], i6 = I[6]; libsais16_unbwt_decode_7(U + offset, P, bucket2, fastbits, shift, (fast_uint_t)r, &i0, &i1, &i2, &i3, &i4, &i5, &i6, reminder); libsais16_unbwt_decode_6(U + offset + reminder, P, bucket2, fastbits, shift, (fast_uint_t)r, &i0, &i1, &i2, &i3, &i4, &i5, ((fast_uint_t)r) - reminder); } else { fast_uint_t i0 = I[0], i1 = I[1], i2 = I[2], i3 = I[3], i4 = I[4], i5 = I[5], i6 = I[6], i7 = I[7]; libsais16_unbwt_decode_8(U + offset, P, bucket2, fastbits, shift, (fast_uint_t)r, &i0, &i1, &i2, &i3, &i4, &i5, &i6, &i7, reminder); libsais16_unbwt_decode_7(U + offset + reminder, P, bucket2, fastbits, shift, (fast_uint_t)r, &i0, &i1, &i2, &i3, &i4, &i5, &i6, ((fast_uint_t)r) - reminder); } } static void libsais16_unbwt_decode_omp(uint16_t * RESTRICT U, sa_uint_t * RESTRICT P, sa_sint_t n, sa_sint_t r, const sa_uint_t * RESTRICT I, sa_uint_t * RESTRICT bucket2, uint16_t * RESTRICT fastbits, sa_sint_t threads) { fast_sint_t blocks = 1 + (((fast_sint_t)n - 1) / (fast_sint_t)r); fast_uint_t reminder = (fast_uint_t)n - ((fast_uint_t)r * ((fast_uint_t)blocks - 1)); #if defined(_OPENMP) fast_sint_t max_threads = blocks < threads ? blocks : threads; #pragma omp parallel num_threads(max_threads) if(max_threads > 1 && n >= 65536) #endif { #if defined(_OPENMP) fast_sint_t omp_thread_num = omp_get_thread_num(); fast_sint_t omp_num_threads = omp_get_num_threads(); #else UNUSED(threads); fast_sint_t omp_thread_num = 0; fast_sint_t omp_num_threads = 1; #endif fast_sint_t omp_block_stride = blocks / omp_num_threads; fast_sint_t omp_block_reminder = blocks % omp_num_threads; fast_sint_t omp_block_size = omp_block_stride + (omp_thread_num < omp_block_reminder); fast_sint_t omp_block_start = omp_block_stride * omp_thread_num + (omp_thread_num < omp_block_reminder ? omp_thread_num : omp_block_reminder); libsais16_unbwt_decode(U + r * omp_block_start, P, n, r, I + omp_block_start, bucket2, fastbits, omp_block_size, omp_thread_num < omp_num_threads - 1 ? (fast_uint_t)r : reminder); } } static sa_sint_t libsais16_unbwt_core(const uint16_t * RESTRICT T, uint16_t * RESTRICT U, sa_uint_t * RESTRICT P, sa_sint_t n, const sa_sint_t * freq, sa_sint_t r, const sa_uint_t * RESTRICT I, sa_uint_t * RESTRICT bucket2, uint16_t * RESTRICT fastbits, sa_uint_t * RESTRICT buckets, sa_sint_t threads) { #if defined(_OPENMP) if (threads > 1 && n >= 262144) { libsais16_unbwt_init_parallel(T, P, n, freq, I, bucket2, fastbits, buckets, threads); } else #else UNUSED(buckets); #endif { libsais16_unbwt_init_single(T, P, n, freq, I, bucket2, fastbits); } libsais16_unbwt_decode_omp(U, P, n, r, I, bucket2, fastbits, threads); return 0; } static sa_sint_t libsais16_unbwt_main(const uint16_t * T, uint16_t * U, sa_uint_t * P, sa_sint_t n, const sa_sint_t * freq, sa_sint_t r, const sa_uint_t * I, sa_sint_t threads) { fast_uint_t shift = 0; while ((n >> shift) > (1 << UNBWT_FASTBITS)) { shift++; } sa_uint_t * RESTRICT bucket2 = (sa_uint_t *)libsais16_alloc_aligned(ALPHABET_SIZE * sizeof(sa_uint_t), 4096); uint16_t * RESTRICT fastbits = (uint16_t *)libsais16_alloc_aligned(((size_t)1 + (size_t)(n >> shift)) * sizeof(uint16_t), 4096); sa_uint_t * RESTRICT buckets = threads > 1 && n >= 262144 ? (sa_uint_t *)libsais16_alloc_aligned((size_t)threads * ALPHABET_SIZE * sizeof(sa_uint_t), 4096) : NULL; sa_sint_t index = bucket2 != NULL && fastbits != NULL && (buckets != NULL || threads == 1 || n < 262144) ? libsais16_unbwt_core(T, U, P, n, freq, r, I, bucket2, fastbits, buckets, threads) : -2; libsais16_free_aligned(buckets); libsais16_free_aligned(fastbits); libsais16_free_aligned(bucket2); return index; } static sa_sint_t libsais16_unbwt_main_ctx(const LIBSAIS_UNBWT_CONTEXT * ctx, const uint16_t * T, uint16_t * U, sa_uint_t * P, sa_sint_t n, const sa_sint_t * freq, sa_sint_t r, const sa_uint_t * I) { return ctx != NULL && ctx->bucket2 != NULL && ctx->fastbits != NULL && (ctx->buckets != NULL || ctx->threads == 1) ? libsais16_unbwt_core(T, U, P, n, freq, r, I, ctx->bucket2, ctx->fastbits, ctx->buckets, (sa_sint_t)ctx->threads) : -2; } void * libsais16_unbwt_create_ctx(void) { return (void *)libsais16_unbwt_create_ctx_main(1); } void libsais16_unbwt_free_ctx(void * ctx) { libsais16_unbwt_free_ctx_main((LIBSAIS_UNBWT_CONTEXT *)ctx); } int32_t libsais16_unbwt(const uint16_t * T, uint16_t * U, int32_t * A, int32_t n, const int32_t * freq, int32_t i) { return libsais16_unbwt_aux(T, U, A, n, freq, n, &i); } int32_t libsais16_unbwt_ctx(const void * ctx, const uint16_t * T, uint16_t * U, int32_t * A, int32_t n, const int32_t * freq, int32_t i) { return libsais16_unbwt_aux_ctx(ctx, T, U, A, n, freq, n, &i); } int32_t libsais16_unbwt_aux(const uint16_t * T, uint16_t * U, int32_t * A, int32_t n, const int32_t * freq, int32_t r, const int32_t * I) { if ((T == NULL) || (U == NULL) || (A == NULL) || (n < 0) || ((r != n) && ((r < 2) || ((r & (r - 1)) != 0))) || (I == NULL)) { return -1; } else if (n <= 1) { if (I[0] != n) { return -1; } if (n == 1) { U[0] = T[0]; } return 0; } fast_sint_t t; for (t = 0; t <= (n - 1) / r; ++t) { if (I[t] <= 0 || I[t] > n) { return -1; } } return libsais16_unbwt_main(T, U, (sa_uint_t *)A, n, freq, r, (const sa_uint_t *)I, 1); } int32_t libsais16_unbwt_aux_ctx(const void * ctx, const uint16_t * T, uint16_t * U, int32_t * A, int32_t n, const int32_t * freq, int32_t r, const int32_t * I) { if ((T == NULL) || (U == NULL) || (A == NULL) || (n < 0) || ((r != n) && ((r < 2) || ((r & (r - 1)) != 0))) || (I == NULL)) { return -1; } else if (n <= 1) { if (I[0] != n) { return -1; } if (n == 1) { U[0] = T[0]; } return 0; } fast_sint_t t; for (t = 0; t <= (n - 1) / r; ++t) { if (I[t] <= 0 || I[t] > n) { return -1; } } return libsais16_unbwt_main_ctx((const LIBSAIS_UNBWT_CONTEXT *)ctx, T, U, (sa_uint_t *)A, n, freq, r, (const sa_uint_t *)I); } #if defined(_OPENMP) void * libsais16_unbwt_create_ctx_omp(int32_t threads) { if (threads < 0) { return NULL; } threads = threads > 0 ? threads : omp_get_max_threads(); return (void *)libsais16_unbwt_create_ctx_main(threads); } int32_t libsais16_unbwt_omp(const uint16_t * T, uint16_t * U, int32_t * A, int32_t n, const int32_t * freq, int32_t i, int32_t threads) { return libsais16_unbwt_aux_omp(T, U, A, n, freq, n, &i, threads); } int32_t libsais16_unbwt_aux_omp(const uint16_t * T, uint16_t * U, int32_t * A, int32_t n, const int32_t * freq, int32_t r, const int32_t * I, int32_t threads) { if ((T == NULL) || (U == NULL) || (A == NULL) || (n < 0) || ((r != n) && ((r < 2) || ((r & (r - 1)) != 0))) || (I == NULL) || (threads < 0)) { return -1; } else if (n <= 1) { if (I[0] != n) { return -1; } if (n == 1) { U[0] = T[0]; } return 0; } fast_sint_t t; for (t = 0; t <= (n - 1) / r; ++t) { if (I[t] <= 0 || I[t] > n) { return -1; } } threads = threads > 0 ? threads : omp_get_max_threads(); return libsais16_unbwt_main(T, U, (sa_uint_t *)A, n, freq, r, (const sa_uint_t *)I, threads); } #endif
3d25pt_var.c
/* * Order-1, 3D 25 point stencil with axis-symmetric ariable coefficients * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, m, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+8; Ny = atoi(argv[2])+8; Nz = atoi(argv[3])+8; } if (argc > 4) Nt = atoi(argv[4]); // allocate the arrays double ****A = (double ****) malloc(sizeof(double***)*2); for(m=0; m<2;m++){ A[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } double ****coef = (double ****) malloc(sizeof(double***)*13); for(m=0; m<13;m++){ coef[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ coef[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ coef[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 16; tile_size[1] = 16; tile_size[2] = 8; tile_size[3] = 64; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } for (m=0; m<13; m++) { for (i=1; i<Nz; i++) { for (j=1; j<Ny; j++) { for (k=1; k<Nx; k++) { coef[m][i][j][k] = 1.0 * (rand() % BASE); } } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 #pragma scop for (t = 0; t < Nt; t++) { for (i = 4; i < Nz-4; i++) { for (j = 4; j < Ny-4; j++) { for (k = 4; k < Nx-4; k++) { A[(t+1)%2][i][j][k] = coef[0][i][j][k] * A[(t)%2][i ][j ][k ] + coef[1][i][j][k] * (A[(t)%2][i-1][j ][k ] + A[(t)%2][i+1][j ][k ]) + coef[2][i][j][k] * (A[(t)%2][i ][j-1][k ] + A[(t)%2][i ][j+1][k ]) + coef[3][i][j][k] * (A[(t)%2][i ][j ][k-1] + A[(t)%2][i ][j ][k+1]) + coef[4][i][j][k] * (A[(t)%2][i-2][j ][k ] + A[(t)%2][i+2][j ][k ]) + coef[5][i][j][k] * (A[(t)%2][i ][j-2][k ] + A[(t)%2][i ][j+2][k ]) + coef[6][i][j][k] * (A[(t)%2][i ][j ][k-2] + A[(t)%2][i ][j ][k+2]) + coef[7][i][j][k] * (A[(t)%2][i-3][j ][k ] + A[(t)%2][i+3][j ][k ]) + coef[8][i][j][k] * (A[(t)%2][i ][j-3][k ] + A[(t)%2][i ][j+3][k ]) + coef[9][i][j][k] * (A[(t)%2][i ][j ][k-3] + A[(t)%2][i ][j ][k+3]) + coef[10][i][j][k]* (A[(t)%2][i-4][j ][k ] + A[(t)%2][i+4][j ][k ]) + coef[11][i][j][k]* (A[(t)%2][i ][j-4][k ] + A[(t)%2][i ][j+4][k ]) + coef[12][i][j][k]* (A[(t)%2][i ][j ][k-4] + A[(t)%2][i ][j ][k+4]) ; } } } } #pragma endscop gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(4, "variable axis-symmetric") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); for(m=0; m<13;m++){ for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(coef[m][i][j]); } free(coef[m][i]); } free(coef[m]); } return 0; }
channel.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % CCCC H H AAA N N N N EEEEE L % % C H H A A NN N NN N E L % % C HHHHH AAAAA N N N N N N RRR L % % C H H A A N NN N NN E L % % CCCC H H A A N N N N EEEEE LLLLL % % % % % % MagickCore Image Channel Methods % % % % Software Design % % John Cristy % % December 2003 % % % % % % Copyright 1999-2013 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % http://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "magick/studio.h" #include "magick/cache-private.h" #include "magick/channel.h" #include "magick/color-private.h" #include "magick/colorspace-private.h" #include "magick/composite-private.h" #include "magick/exception-private.h" #include "magick/enhance.h" #include "magick/image.h" #include "magick/list.h" #include "magick/log.h" #include "magick/monitor.h" #include "magick/monitor-private.h" #include "magick/option.h" #include "magick/pixel-accessor.h" #include "magick/resource_.h" #include "magick/string-private.h" #include "magick/thread-private.h" #include "magick/token.h" #include "magick/utility.h" #include "magick/version.h" /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o m b i n e I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CombineImages() combines one or more images into a single image. The % grayscale value of the pixels of each image in the sequence is assigned in % order to the specified channels of the combined image. The typical % ordering would be image 1 => Red, 2 => Green, 3 => Blue, etc. % % The format of the CombineImages method is: % % Image *CombineImages(const Image *image,const ChannelType channel, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *CombineImages(const Image *image,const ChannelType channel, ExceptionInfo *exception) { #define CombineImageTag "Combine/Image" CacheView *combine_view; const Image *next; Image *combine_image; MagickBooleanType status; MagickOffsetType progress; ssize_t y; /* Ensure the image are the same size. */ assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); for (next=image; next != (Image *) NULL; next=GetNextImageInList(next)) { if ((next->columns != image->columns) || (next->rows != image->rows)) ThrowImageException(OptionError,"ImagesAreNotTheSameSize"); } combine_image=CloneImage(image,0,0,MagickTrue,exception); if (combine_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(combine_image,DirectClass) == MagickFalse) { InheritException(exception,&combine_image->exception); combine_image=DestroyImage(combine_image); return((Image *) NULL); } if (IsGrayColorspace(image->colorspace) != MagickFalse) (void) SetImageColorspace(combine_image,RGBColorspace); if ((channel & OpacityChannel) != 0) combine_image->matte=MagickTrue; (void) SetImageBackgroundColor(combine_image); /* Combine images. */ status=MagickTrue; progress=0; combine_view=AcquireAuthenticCacheView(combine_image,exception); for (y=0; y < (ssize_t) combine_image->rows; y++) { CacheView *image_view; const Image *next; PixelPacket *pixels; register const PixelPacket *restrict p; register PixelPacket *restrict q; register ssize_t x; if (status == MagickFalse) continue; pixels=GetCacheViewAuthenticPixels(combine_view,0,y,combine_image->columns, 1,exception); if (pixels == (PixelPacket *) NULL) { status=MagickFalse; continue; } next=image; if (((channel & RedChannel) != 0) && (next != (Image *) NULL)) { image_view=AcquireVirtualCacheView(next,exception); p=GetCacheViewVirtualPixels(image_view,0,y,next->columns,1,exception); if (p == (const PixelPacket *) NULL) continue; q=pixels; for (x=0; x < (ssize_t) combine_image->columns; x++) { SetPixelRed(q,PixelIntensityToQuantum(image,p)); p++; q++; } image_view=DestroyCacheView(image_view); next=GetNextImageInList(next); } if (((channel & GreenChannel) != 0) && (next != (Image *) NULL)) { image_view=AcquireVirtualCacheView(next,exception); p=GetCacheViewVirtualPixels(image_view,0,y,next->columns,1,exception); if (p == (const PixelPacket *) NULL) continue; q=pixels; for (x=0; x < (ssize_t) combine_image->columns; x++) { SetPixelGreen(q,PixelIntensityToQuantum(image,p)); p++; q++; } image_view=DestroyCacheView(image_view); next=GetNextImageInList(next); } if (((channel & BlueChannel) != 0) && (next != (Image *) NULL)) { image_view=AcquireVirtualCacheView(next,exception); p=GetCacheViewVirtualPixels(image_view,0,y,next->columns,1,exception); if (p == (const PixelPacket *) NULL) continue; q=pixels; for (x=0; x < (ssize_t) combine_image->columns; x++) { SetPixelBlue(q,PixelIntensityToQuantum(image,p)); p++; q++; } image_view=DestroyCacheView(image_view); next=GetNextImageInList(next); } if (((channel & OpacityChannel) != 0) && (next != (Image *) NULL)) { image_view=AcquireVirtualCacheView(next,exception); p=GetCacheViewVirtualPixels(image_view,0,y,next->columns,1,exception); if (p == (const PixelPacket *) NULL) continue; q=pixels; for (x=0; x < (ssize_t) combine_image->columns; x++) { SetPixelAlpha(q,PixelIntensityToQuantum(image,p)); p++; q++; } image_view=DestroyCacheView(image_view); next=GetNextImageInList(next); } if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace) && (next != (Image *) NULL)) { IndexPacket *indexes; image_view=AcquireVirtualCacheView(next,exception); p=GetCacheViewVirtualPixels(image_view,0,y,next->columns,1,exception); if (p == (const PixelPacket *) NULL) continue; indexes=GetCacheViewAuthenticIndexQueue(combine_view); for (x=0; x < (ssize_t) combine_image->columns; x++) { SetPixelIndex(indexes+x,PixelIntensityToQuantum(image,p)); p++; } image_view=DestroyCacheView(image_view); next=GetNextImageInList(next); } if (SyncCacheViewAuthenticPixels(combine_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,CombineImageTag,progress++, combine_image->rows); if (proceed == MagickFalse) status=MagickFalse; } } combine_view=DestroyCacheView(combine_view); if (IsGrayColorspace(combine_image->colorspace) != MagickFalse) (void) TransformImageColorspace(combine_image,RGBColorspace); if (status == MagickFalse) combine_image=DestroyImage(combine_image); return(combine_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e A l p h a C h a n n e l % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageAlphaChannel() returns MagickFalse if the image alpha channel is % not activated. That is, the image is RGB rather than RGBA or CMYK rather % than CMYKA. % % The format of the GetImageAlphaChannel method is: % % MagickBooleanType GetImageAlphaChannel(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport MagickBooleanType GetImageAlphaChannel(const Image *image) { assert(image != (const Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickSignature); return(image->matte); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e p a r a t e I m a g e C h a n n e l % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SeparateImageChannel() separates a channel from the image and returns it as % a grayscale image. A channel is a particular color component of each pixel % in the image. % % The format of the SeparateImageChannel method is: % % MagickBooleanType SeparateImageChannel(Image *image, % const ChannelType channel) % % A description of each parameter follows: % % o image: the image. % % o channel: Identify which channel to extract: RedChannel, GreenChannel, % BlueChannel, OpacityChannel, CyanChannel, MagentaChannel, % YellowChannel, or BlackChannel. % */ MagickExport MagickBooleanType SeparateImageChannel(Image *image, const ChannelType channel) { #define SeparateImageTag "Separate/Image" CacheView *image_view; ExceptionInfo *exception; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); if (channel == GrayChannels) image->matte=MagickTrue; /* Separate image channels. */ status=MagickTrue; progress=0; exception=(&image->exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register IndexPacket *restrict indexes; register PixelPacket *restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); switch (channel) { case RedChannel: { for (x=0; x < (ssize_t) image->columns; x++) { SetPixelGreen(q,GetPixelRed(q)); SetPixelBlue(q,GetPixelRed(q)); q++; } break; } case GreenChannel: { for (x=0; x < (ssize_t) image->columns; x++) { SetPixelRed(q,GetPixelGreen(q)); SetPixelBlue(q,GetPixelGreen(q)); q++; } break; } case BlueChannel: { for (x=0; x < (ssize_t) image->columns; x++) { SetPixelRed(q,GetPixelBlue(q)); SetPixelGreen(q,GetPixelBlue(q)); q++; } break; } case OpacityChannel: { for (x=0; x < (ssize_t) image->columns; x++) { SetPixelRed(q,GetPixelOpacity(q)); SetPixelGreen(q,GetPixelOpacity(q)); SetPixelBlue(q,GetPixelOpacity(q)); q++; } break; } case BlackChannel: { if ((image->storage_class != PseudoClass) && (image->colorspace != CMYKColorspace)) break; for (x=0; x < (ssize_t) image->columns; x++) { SetPixelRed(q,GetPixelIndex(indexes+x)); SetPixelGreen(q,GetPixelIndex(indexes+x)); SetPixelBlue(q,GetPixelIndex(indexes+x)); q++; } break; } case TrueAlphaChannel: { for (x=0; x < (ssize_t) image->columns; x++) { SetPixelRed(q,GetPixelAlpha(q)); SetPixelGreen(q,GetPixelAlpha(q)); SetPixelBlue(q,GetPixelAlpha(q)); q++; } break; } case GrayChannels: { for (x=0; x < (ssize_t) image->columns; x++) { SetPixelAlpha(q,PixelIntensityToQuantum(image,q)); q++; } break; } default: break; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_SeparateImageChannel) #endif proceed=SetImageProgress(image,SeparateImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); if (channel != GrayChannels) image->matte=MagickFalse; if (IssRGBColorspace(image->colorspace) == MagickFalse) (void) SetImageColorspace(image,GRAYColorspace); else (void) TransformImageColorspace(image,GRAYColorspace); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e p a r a t e I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SeparateImages() returns a separate grayscale image for each channel % specified. % % The format of the SeparateImages method is: % % MagickBooleanType SeparateImages(const Image *image, % const ChannelType channel,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o channel: Identify which channels to extract: RedChannel, GreenChannel, % BlueChannel, OpacityChannel, CyanChannel, MagentaChannel, % YellowChannel, or BlackChannel. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *SeparateImages(const Image *image,const ChannelType channel, ExceptionInfo *exception) { Image *images, *separate_image; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); images=NewImageList(); if ((channel & RedChannel) != 0) { separate_image=CloneImage(image,0,0,MagickTrue,exception); (void) SeparateImageChannel(separate_image,RedChannel); AppendImageToList(&images,separate_image); } if ((channel & GreenChannel) != 0) { separate_image=CloneImage(image,0,0,MagickTrue,exception); (void) SeparateImageChannel(separate_image,GreenChannel); AppendImageToList(&images,separate_image); } if ((channel & BlueChannel) != 0) { separate_image=CloneImage(image,0,0,MagickTrue,exception); (void) SeparateImageChannel(separate_image,BlueChannel); AppendImageToList(&images,separate_image); } if (((channel & BlackChannel) != 0) && (image->colorspace == CMYKColorspace)) { separate_image=CloneImage(image,0,0,MagickTrue,exception); (void) SeparateImageChannel(separate_image,BlackChannel); AppendImageToList(&images,separate_image); } if ((channel & AlphaChannel) != 0) { separate_image=CloneImage(image,0,0,MagickTrue,exception); (void) SeparateImageChannel(separate_image,TrueAlphaChannel); AppendImageToList(&images,separate_image); } return(images); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e A l p h a C h a n n e l % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageAlphaChannel() activates, deactivates, resets, or sets the alpha % channel. % % The format of the SetImageAlphaChannel method is: % % MagickBooleanType SetImageAlphaChannel(Image *image, % const AlphaChannelType alpha_type) % % A description of each parameter follows: % % o image: the image. % % o alpha_type: The alpha channel type: ActivateAlphaChannel, % CopyAlphaChannel, DeactivateAlphaChannel, ExtractAlphaChannel, % OpaqueAlphaChannel, ResetAlphaChannel, SetAlphaChannel, % ShapeAlphaChannel, and TransparentAlphaChannel. % */ MagickExport MagickBooleanType SetImageAlphaChannel(Image *image, const AlphaChannelType alpha_type) { MagickBooleanType status; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickSignature); status=MagickTrue; switch (alpha_type) { case ActivateAlphaChannel: { image->matte=MagickTrue; break; } case BackgroundAlphaChannel: { CacheView *image_view; ExceptionInfo *exception; IndexPacket index; MagickBooleanType status; MagickPixelPacket background; PixelPacket pixel; ssize_t y; /* Set transparent pixels to background color. */ if (image->matte == MagickFalse) break; if (SetImageStorageClass(image,DirectClass) == MagickFalse) break; GetMagickPixelPacket(image,&background); SetMagickPixelPacket(image,&image->background_color,(const IndexPacket *) NULL,&background); if (image->colorspace == CMYKColorspace) ConvertRGBToCMYK(&background); index=0; SetPixelPacket(image,&background,&pixel,&index); status=MagickTrue; exception=(&image->exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register IndexPacket *restrict indexes; register PixelPacket *restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { if (q->opacity == TransparentOpacity) { SetPixelRed(q,pixel.red); SetPixelGreen(q,pixel.green); SetPixelBlue(q,pixel.blue); } q++; } if (image->colorspace == CMYKColorspace) { indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) SetPixelIndex(indexes+x,index); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); return(status); } case CopyAlphaChannel: case ShapeAlphaChannel: { /* Special usage case for SeparateImageChannel(): copy grayscale color to the alpha channel. */ status=SeparateImageChannel(image,GrayChannels); image->matte=MagickTrue; /* make sure transparency is now on! */ if (alpha_type == ShapeAlphaChannel) { MagickPixelPacket background; /* Reset all color channels to background color. */ GetMagickPixelPacket(image,&background); SetMagickPixelPacket(image,&(image->background_color),(IndexPacket *) NULL,&background); (void) LevelColorsImage(image,&background,&background,MagickTrue); } break; } case DeactivateAlphaChannel: { image->matte=MagickFalse; break; } case ExtractAlphaChannel: { status=SeparateImageChannel(image,TrueAlphaChannel); image->matte=MagickFalse; break; } case RemoveAlphaChannel: case FlattenAlphaChannel: { CacheView *image_view; ExceptionInfo *exception; IndexPacket index; MagickBooleanType status; MagickPixelPacket background; PixelPacket pixel; ssize_t y; /* Flatten image pixels over the background pixels. */ if (image->matte == MagickFalse) break; if (SetImageStorageClass(image,DirectClass) == MagickFalse) break; GetMagickPixelPacket(image,&background); SetMagickPixelPacket(image,&image->background_color,(const IndexPacket *) NULL,&background); if (image->colorspace == CMYKColorspace) ConvertRGBToCMYK(&background); index=0; SetPixelPacket(image,&background,&pixel,&index); status=MagickTrue; exception=(&image->exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register IndexPacket *restrict indexes; register PixelPacket *restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double gamma, opacity; gamma=1.0-QuantumScale*QuantumScale*q->opacity*pixel.opacity; opacity=(double) QuantumRange*(1.0-gamma); gamma=PerceptibleReciprocal(gamma); q->red=ClampToQuantum(gamma*MagickOver_((MagickRealType) q->red, (MagickRealType) q->opacity,(MagickRealType) pixel.red, (MagickRealType) pixel.opacity)); q->green=ClampToQuantum(gamma*MagickOver_((MagickRealType) q->green, (MagickRealType) q->opacity,(MagickRealType) pixel.green, (MagickRealType) pixel.opacity)); q->blue=ClampToQuantum(gamma*MagickOver_((MagickRealType) q->blue, (MagickRealType) q->opacity,(MagickRealType) pixel.blue, (MagickRealType) pixel.opacity)); q->opacity=ClampToQuantum(opacity); q++; } if (image->colorspace == CMYKColorspace) { indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) SetPixelIndex(indexes+x,index); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); return(status); } case ResetAlphaChannel: /* deprecated */ case OpaqueAlphaChannel: { status=SetImageOpacity(image,OpaqueOpacity); break; } case SetAlphaChannel: { if (image->matte == MagickFalse) status=SetImageOpacity(image,OpaqueOpacity); break; } case TransparentAlphaChannel: { status=SetImageOpacity(image,TransparentOpacity); break; } case UndefinedAlphaChannel: break; } if (status == MagickFalse) return(status); return(SyncImagePixelCache(image,&image->exception)); }
axpy_int_simdlen8.c
//axpy.c #include <stdio.h> #include <stdlib.h> #include <time.h> #include <sys/timeb.h> #include <malloc.h> #define N_RUNS 1000 #define N 1200 // read timer in second double read_timer() { struct timeb tm; ftime(&tm); return (double) tm.time + (double) tm.millitm / 1000.0; } //Create a matrix and a vector and fill with random numbers void init(int *X, int *Y) { for (int i = 0; i<N; i++) { X[i] = (int)rand()/(int)(RAND_MAX/10.0); Y[i] = (int)rand()/(int)(RAND_MAX/10.0); } } //Our sum function- what it does is pretty straight-forward. void axpy(int *X, int *Y, int a) { #pragma omp simd simdlen(8) for (int i = 0; i<N; i++) { Y[i] += a * X[i]; } } // Debug functions void axpy_serial(int *X, int *Y, int a) { for (int i = 0; i<N; i++) { Y[i] += a * X[i]; } } void print_vector(int *vector) { printf("["); for (int i = 0; i<8; i++) { printf("%d ", vector[i]); } puts("]"); } int check(int *A, int *B){ int difference = 0; for(int i = 0;i<N; i++){ difference += A[i]- B[i]; } return difference; } int main(int argc, char **argv) { //Set everything up int *X = malloc(sizeof(int)*N); int *Y = malloc(sizeof(int)*N); int *Y_serial = malloc(sizeof(int)*N); int a = 3; srand(time(NULL)); init(X, Y); for (int i = 0; i<N; i++) Y_serial[i] = Y[i]; print_vector(Y); print_vector(X); printf("%d\n", a); puts("=\n"); double start = read_timer(); for (int i = 0; i<N_RUNS; i++) axpy(X, Y, a); double t = (read_timer() - start); double start_serial = read_timer(); for (int i = 0; i<N_RUNS; i++) axpy_serial(X, Y_serial, a); double t_serial = (read_timer() - start_serial); print_vector(Y); puts("---------------------------------"); print_vector(Y_serial); double gflops = ((2.0 * N) * N * N_RUNS) / (1.0e9 * t); double gflops_serial = ((2.0 * N) * N * N_RUNS) / (1.0e9 * t_serial); printf("==================================================================\n"); printf("Performance:\t\t\tRuntime (s)\t GFLOPS\n"); printf("------------------------------------------------------------------\n"); printf("AXPY (SIMD):\t\t%4f\t%4f\n", t, gflops); printf("AXPY (Serial):\t\t%4f\t%4f\n", t_serial, gflops_serial); printf("Correctness check: %d\n", check(Y,Y_serial)); free(X); free(Y); free(Y_serial); return 0; }
lapack_wrapper.c
/* Copyright (C) 2015 Atsushi Togo */ /* All rights reserved. */ /* This file is part of phonopy. */ /* Redistribution and use in source and binary forms, with or without */ /* modification, are permitted provided that the following conditions */ /* are met: */ /* * Redistributions of source code must retain the above copyright */ /* notice, this list of conditions and the following disclaimer. */ /* * Redistributions in binary form must reproduce the above copyright */ /* notice, this list of conditions and the following disclaimer in */ /* the documentation and/or other materials provided with the */ /* distribution. */ /* * Neither the name of the phonopy project nor the names of its */ /* contributors may be used to endorse or promote products derived */ /* from this software without specific prior written permission. */ /* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS */ /* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT */ /* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS */ /* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE */ /* COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, */ /* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, */ /* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; */ /* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER */ /* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT */ /* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN */ /* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */ /* POSSIBILITY OF SUCH DAMAGE. */ #include <lapack_wrapper.h> #define min(a,b) ((a)>(b)?(b):(a)) #ifdef MKL_LAPACKE MKL_Complex16 lapack_make_complex_double( double re, double im ) { MKL_Complex16 z; z.real = re; z.imag = im; return z; } #endif int phonopy_zheev(double *w, lapack_complex_double *a, const int n, const char uplo) { lapack_int info; info = LAPACKE_zheev(LAPACK_ROW_MAJOR,'V', uplo, (lapack_int)n, a, (lapack_int)n, w); return (int)info; } int phonopy_pinv(double *data_out, const double *data_in, const int m, const int n, const double cutoff) { int i, j, k; lapack_int info; double *s, *a, *u, *vt, *superb; a = (double*)malloc(sizeof(double) * m * n); s = (double*)malloc(sizeof(double) * min(m,n)); u = (double*)malloc(sizeof(double) * m * m); vt = (double*)malloc(sizeof(double) * n * n); superb = (double*)malloc(sizeof(double) * (min(m,n) - 1)); for (i = 0; i < m * n; i++) { a[i] = data_in[i]; } info = LAPACKE_dgesvd(LAPACK_ROW_MAJOR, 'A', 'A', (lapack_int)m, (lapack_int)n, a, (lapack_int)n, s, u, (lapack_int)m, vt, (lapack_int)n, superb); for (i = 0; i < n * m; i++) { data_out[i] = 0; } for (i = 0; i < m; i++) { for (j = 0; j < n; j++) { for (k = 0; k < min(m,n); k++) { if (s[k] > cutoff) { data_out[j * m + i] += u[i * m + k] / s[k] * vt[k * n + j]; } } } } free(a); free(s); free(u); free(vt); free(superb); return (int)info; } void phonopy_pinv_mt(double *data_out, int *info_out, const double *data_in, const int num_thread, const int *row_nums, const int max_row_num, const int column_num, const double cutoff) { int i; #pragma omp parallel for for (i = 0; i < num_thread; i++) { info_out[i] = phonopy_pinv(data_out + i * max_row_num * column_num, data_in + i * max_row_num * column_num, row_nums[i], column_num, cutoff); } } int phonopy_dsyev(double *data, double *eigvals, const int size, const int algorithm) { lapack_int info; info = 0; switch (algorithm) { case 0: /* dsyev */ info = LAPACKE_dsyev(LAPACK_COL_MAJOR, 'V', 'U', (lapack_int)size, data, (lapack_int)size, eigvals); break; case 1: /* dsyevd */ info = LAPACKE_dsyevd(LAPACK_COL_MAJOR, 'V', 'U', (lapack_int)size, data, (lapack_int)size, eigvals); break; } return (int)info; }
ComputeMaxNeighboursWorklet.h
//============================================================================ // Copyright (c) Kitware, Inc. // All rights reserved. // See LICENSE.txt for details. // This software is distributed WITHOUT ANY WARRANTY; without even // the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR // PURPOSE. See the above copyright notice for more information. // // Copyright 2014 National Technology & Engineering Solutions of Sandia, LLC (NTESS). // Copyright 2014 UT-Battelle, LLC. // Copyright 2014 Los Alamos National Security. // // Under the terms of Contract DE-NA0003525 with NTESS, // the U.S. Government retains certain rights in this software. // // Under the terms of Contract DE-AC52-06NA25396 with Los Alamos National // Laboratory (LANL), the U.S. Government retains certain rights in // this software. //============================================================================ // Copyright (c) 2018, The Regents of the University of California, through // Lawrence Berkeley National Laboratory (subject to receipt of any required approvals // from the U.S. Dept. of Energy). All rights reserved. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // (1) Redistributions of source code must retain the above copyright notice, this // list of conditions and the following disclaimer. // // (2) Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // (3) Neither the name of the University of California, Lawrence Berkeley National // Laboratory, U.S. Dept. of Energy nor the names of its contributors may be // used to endorse or promote products derived from this software without // specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. // IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, // INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, // BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE // OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED // OF THE POSSIBILITY OF SUCH DAMAGE. // //============================================================================= // // This code is an extension of the algorithm presented in the paper: // Parallel Peak Pruning for Scalable SMP Contour Tree Computation. // Hamish Carr, Gunther Weber, Christopher Sewell, and James Ahrens. // Proceedings of the IEEE Symposium on Large Data Analysis and Visualization // (LDAV), October 2016, Baltimore, Maryland. // // The PPP2 algorithm and software were jointly developed by // Hamish Carr (University of Leeds), Gunther H. Weber (LBNL), and // Oliver Ruebel (LBNL) //============================================================================== #ifndef vtkm_worklet_contourtree_augmented_contourtree_mesh_inc_compute_max_neighbour_worklet_h #define vtkm_worklet_contourtree_augmented_contourtree_mesh_inc_compute_max_neighbour_worklet_h #include <vtkm/worklet/WorkletMapField.h> namespace vtkm { namespace worklet { namespace contourtree_augmented { namespace mesh_dem_contourtree_mesh_inc { // Worklet to update all of the edges so that the far end resets to the result of the ascent in the previous step class ComputeMaxNeighboursWorklet : public vtkm::worklet::WorkletMapField { public: typedef void ControlSignature(WholeArrayIn firstNeighbour, // (input) firstNeighbour WholeArrayOut nNeighbours); // (output) typedef void ExecutionSignature(_1, InputIndex, _2); typedef _1 InputDomain; // Default Constructor VTKM_EXEC_CONT ComputeMaxNeighboursWorklet(const vtkm::Id neighboursSize) : NeighboursSize(neighboursSize) { } template <typename OutFieldPortalType, typename InFieldPortalType> VTKM_EXEC void operator()(const InFieldPortalType& firstNeighbourPortal, vtkm::Id startVtxNo, const OutFieldPortalType& nNeighboursPortal) const { if (startVtxNo < firstNeighbourPortal.GetNumberOfValues() - 1) { nNeighboursPortal.Set(startVtxNo, firstNeighbourPortal.Get(startVtxNo + 1) - firstNeighbourPortal.Get(startVtxNo)); } else { nNeighboursPortal.Set(startVtxNo, NeighboursSize - firstNeighbourPortal.Get(nNeighboursPortal.GetNumberOfValues() - 1)); } // In serial this worklet implements the following operation // #pragma omp parallel for // for (indexVector::size_type startVtxNo = 0; startVtxNo < firstNeighbour.size()-1; ++startVtxNo) // { // nNeighbours[startVtxNo] = firstNeighbour[startVtxNo+1] - firstNeighbour[startVtxNo]; // } // nNeighbours[nNeighbours.size() - 1] = neighbours.size() - firstNeighbour[nNeighbours.size() - 1]; // // // NOTE: In the above we change the loop to run for the full length of the array and instead // // then do a conditional assign for the last element directly within the loop, rather // // than shortcutting the loop and doing a special assigne after the loop. This allows // // us to process all elements on the device in parallel rather than having to pull // // data back into the control area to do the last assignement } private: vtkm::Id NeighboursSize; }; // ComputeMaxNeighboursWorklet } // namespace mesh_dem_contourtree_mesh_inc } // namespace contourtree_augmented } // namespace worklet } // namespace vtkm #endif
evaluation.c
#include "common.h" static void clear_buffers(uint64_t* restrict A, uint64_t* restrict B, const int s) { #ifdef _OPENMP #pragma omp parallel for #endif for(int i=0;i<s;i++) A[i] = B[i] = 0; } #ifdef _OPENMP static int top_down_step(const int level, const int nodes, const int num_frontier, const int degree, const int* restrict adjacency, int* restrict frontier, int* restrict next, int* restrict distance, char* restrict bitmap) { int count = 0; int local_frontier[nodes]; #pragma omp parallel private(local_frontier) { int local_count = 0; #pragma omp for nowait for(int i=0;i<num_frontier;i++){ int v = frontier[i]; for(int j=0;j<degree;j++){ int n = *(adjacency + v * degree + j); // adjacency[v][j]; if(bitmap[n] == NOT_VISITED){ bitmap[n] = VISITED; distance[n] = level; local_frontier[local_count++] = n; } } } // end for i #pragma omp critical { memcpy(&next[count], local_frontier, local_count*sizeof(int)); count += local_count; } } return count; } #else static int top_down_step(const int level, const int nodes, const int num_frontier, const int degree, const int* restrict adjacency, int* restrict frontier, int* restrict next, int* restrict distance, char* restrict bitmap) { int count = 0; for(int i=0;i<num_frontier;i++){ int v = frontier[i]; for(int j=0;j<degree;j++){ int n = *(adjacency + v * degree + j); // int n = adjacency[v][j]; if(bitmap[n] == NOT_VISITED){ bitmap[n] = VISITED; distance[n] = level; next[count++] = n; } } } return count; } #endif static bool bfs(const int nodes, const int degree, const int adjacency[nodes][degree], const int based_nodes, const int height, const int based_height, const int groups, int *diameter, double *ASPL) { char *bitmap = malloc(sizeof(char) * nodes); int *frontier = malloc(sizeof(int) * nodes); int *distance = malloc(sizeof(int) * nodes); int *next = malloc(sizeof(int) * nodes); bool reached = true; double sum = 0.0; *diameter = 0; for(int s=rank;s<based_nodes;s+=procs){ int num_frontier = 1, level = 0; for(int i=0;i<nodes;i++) bitmap[i] = NOT_VISITED; frontier[0] = s; distance[s] = level; bitmap[s] = VISITED; while(1){ num_frontier = top_down_step(level++, nodes, num_frontier, degree, (int *)adjacency, frontier, next, distance, bitmap); if(num_frontier == 0) break; int *tmp = frontier; frontier = next; next = tmp; } *diameter = MAX(*diameter, level-1); for(int i=0;i<nodes;i++){ if(i == s) continue; if(bitmap[i] == NOT_VISITED) reached = false; sum += (distance[i] + 1) * groups; } } free(bitmap); free(frontier); free(distance); free(next); MPI_Allreduce(MPI_IN_PLACE, &reached, 1, MPI_C_BOOL, MPI_LAND, MPI_COMM_WORLD); if(!reached) return false; MPI_Allreduce(MPI_IN_PLACE, diameter, 1, MPI_INT, MPI_MAX, MPI_COMM_WORLD); MPI_Allreduce(MPI_IN_PLACE, &sum, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); *ASPL = sum / ((((double)nodes-1)*nodes)); return true; } static bool matrix_op(const int nodes, const int degree, const int* restrict adjacency, const int based_nodes, const int height, const int based_height, const int groups, int *diameter, double *ASPL) { unsigned int elements = (based_nodes+(UINT64_BITS-1))/UINT64_BITS; unsigned int chunk = (elements+(procs-1))/procs; size_t s = nodes*chunk*sizeof(uint64_t); uint64_t* A = malloc(s); // uint64_t A[nodes][chunk]; uint64_t* B = malloc(s); // uint64_t B[nodes][chunk]; int parsize = (elements+(chunk-1))/chunk; double sum = 0.0; *diameter = 1; for(int t=rank;t<parsize;t+=procs){ uint64_t kk, l; clear_buffers(A, B, nodes*chunk); for(l=0; l<UINT64_BITS*chunk && UINT64_BITS*t*chunk+l<based_nodes; l++){ unsigned int offset = (UINT64_BITS*t*chunk+l)*chunk+l/UINT64_BITS; A[offset] = B[offset] = (0x1ULL<<(l%UINT64_BITS)); } for(kk=0;kk<nodes;kk++){ #ifdef _OPENMP #pragma omp parallel for #endif for(int i=0;i<nodes;i++){ for(int j=0;j<degree;j++){ int n = *(adjacency + i * degree + j); // int n = adjacency[i][j]; for(int k=0;k<chunk;k++) B[i*chunk+k] |= A[n*chunk+k]; } } uint64_t num = 0; #ifdef _OPENMP #pragma omp parallel for reduction(+:num) #endif for(int i=0;i<chunk*nodes;i++) num += POPCNT(B[i]); if(num == (uint64_t)nodes*l) break; // swap A <-> B uint64_t* tmp = A; A = B; B = tmp; sum += ((double)nodes * l - num) * groups; } *diameter = MAX(*diameter, kk+1); } MPI_Allreduce(MPI_IN_PLACE, diameter, 1, MPI_INT, MPI_MAX, MPI_COMM_WORLD); MPI_Allreduce(MPI_IN_PLACE, &sum, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); sum += (double)nodes * (nodes - 1); if(*diameter > nodes){ // PRINT_R0("This graph is not connected graph.\n"); return false; } *ASPL = sum / (((double)nodes-1)*nodes); free(A); free(B); return true; } bool evaluation(const int nodes, const int degree, const int groups, const int* restrict adjacency, const int based_nodes, const int height, const int based_height, int *diameter, double *ASPL, const bool enable_bfs) { timer_start(TIMER_APSP); bool flag; if(enable_bfs) flag = bfs(nodes, degree, (const int (*)[degree])adjacency, based_nodes, height, based_height, groups, diameter, ASPL); else flag = matrix_op(nodes, degree, adjacency, based_nodes, height, based_height, groups, diameter, ASPL); timer_stop(TIMER_APSP); return flag; }
is.c
/************************************************************************* * * * N A S P A R A L L E L B E N C H M A R K S 3.3 * * * * O p e n M P V E R S I O N * * * * I S * * * ************************************************************************* * * * This benchmark is an OpenMP version of the NPB IS code. * * It is described in NAS Technical Report 99-011. * * * * Permission to use, copy, distribute and modify this software * * for any purpose with or without fee is hereby granted. We * * request, however, that all derived work reference the NAS * * Parallel Benchmarks 3.3. This software is provided "as is" * * without express or implied warranty. * * * * Information on NPB 3.3, including the technical report, the * * original specifications, source code, results and information * * on how to submit new results, is available at: * * * * http://www.nas.nasa.gov/Software/NPB/ * * * * Send comments or suggestions to npb@nas.nasa.gov * * * * NAS Parallel Benchmarks Group * * NASA Ames Research Center * * Mail Stop: T27A-1 * * Moffett Field, CA 94035-1000 * * * * E-mail: npb@nas.nasa.gov * * Fax: (650) 604-3957 * * * ************************************************************************* * * * Author: M. Yarrow * * H. Jin * * * *************************************************************************/ #include "npbparams.h" #include <stdlib.h> #include <stdio.h> #ifdef _OPENMP #include <omp.h> #endif #include "sim_api.h" /*****************************************************************/ /* For serial IS, buckets are not really req'd to solve NPB1 IS */ /* spec, but their use on some machines improves performance, on */ /* other machines the use of buckets compromises performance, */ /* probably because it is extra computation which is not req'd. */ /* (Note: Mechanism not understood, probably cache related) */ /* Example: SP2-66MhzWN: 50% speedup with buckets */ /* Example: SGI Indy5000: 50% slowdown with buckets */ /* Example: SGI O2000: 400% slowdown with buckets (Wow!) */ /*****************************************************************/ /* To disable the use of buckets, comment out the following line */ #define USE_BUCKETS /* Uncomment below for cyclic schedule */ /*#define SCHED_CYCLIC*/ /******************/ /* default values */ /******************/ #ifndef CLASS #define CLASS 'S' #endif /*************/ /* CLASS S */ /*************/ #if CLASS == 'S' #define TOTAL_KEYS_LOG_2 16 #define MAX_KEY_LOG_2 11 #define NUM_BUCKETS_LOG_2 9 #endif /*************/ /* CLASS W */ /*************/ #if CLASS == 'W' #define TOTAL_KEYS_LOG_2 20 #define MAX_KEY_LOG_2 16 #define NUM_BUCKETS_LOG_2 10 #endif /*************/ /* CLASS A */ /*************/ #if CLASS == 'A' #define TOTAL_KEYS_LOG_2 23 #define MAX_KEY_LOG_2 19 #define NUM_BUCKETS_LOG_2 10 #endif /*************/ /* CLASS B */ /*************/ #if CLASS == 'B' #define TOTAL_KEYS_LOG_2 25 #define MAX_KEY_LOG_2 21 #define NUM_BUCKETS_LOG_2 10 #endif /*************/ /* CLASS C */ /*************/ #if CLASS == 'C' #define TOTAL_KEYS_LOG_2 27 #define MAX_KEY_LOG_2 23 #define NUM_BUCKETS_LOG_2 10 #endif /*************/ /* CLASS D */ /*************/ #if CLASS == 'D' #define TOTAL_KEYS_LOG_2 31 #define MAX_KEY_LOG_2 27 #define NUM_BUCKETS_LOG_2 10 #endif #if CLASS == 'D' #define TOTAL_KEYS (1L << TOTAL_KEYS_LOG_2) #else #define TOTAL_KEYS (1 << TOTAL_KEYS_LOG_2) #endif #define MAX_KEY (1 << MAX_KEY_LOG_2) #define NUM_BUCKETS (1 << NUM_BUCKETS_LOG_2) #define NUM_KEYS TOTAL_KEYS #define SIZE_OF_BUFFERS NUM_KEYS #define MAX_ITERATIONS 10 #define TEST_ARRAY_SIZE 5 /*************************************/ /* Typedef: if necessary, change the */ /* size of int here by changing the */ /* int type to, say, long */ /*************************************/ #if CLASS == 'D' typedef long INT_TYPE; #else typedef int INT_TYPE; #endif /********************/ /* Some global info */ /********************/ INT_TYPE *key_buff_ptr_global; /* used by full_verify to get */ /* copies of rank info */ int passed_verification; /************************************/ /* These are the three main arrays. */ /* See SIZE_OF_BUFFERS def above */ /************************************/ INT_TYPE key_array[SIZE_OF_BUFFERS], key_buff1[MAX_KEY], key_buff2[SIZE_OF_BUFFERS], partial_verify_vals[TEST_ARRAY_SIZE], **key_buff1_aptr = NULL; #ifdef USE_BUCKETS INT_TYPE **bucket_size, bucket_ptrs[NUM_BUCKETS]; #pragma omp threadprivate(bucket_ptrs) #endif /**********************/ /* Partial verif info */ /**********************/ INT_TYPE test_index_array[TEST_ARRAY_SIZE], test_rank_array[TEST_ARRAY_SIZE], S_test_index_array[TEST_ARRAY_SIZE] = {48427,17148,23627,62548,4431}, S_test_rank_array[TEST_ARRAY_SIZE] = {0,18,346,64917,65463}, W_test_index_array[TEST_ARRAY_SIZE] = {357773,934767,875723,898999,404505}, W_test_rank_array[TEST_ARRAY_SIZE] = {1249,11698,1039987,1043896,1048018}, A_test_index_array[TEST_ARRAY_SIZE] = {2112377,662041,5336171,3642833,4250760}, A_test_rank_array[TEST_ARRAY_SIZE] = {104,17523,123928,8288932,8388264}, B_test_index_array[TEST_ARRAY_SIZE] = {41869,812306,5102857,18232239,26860214}, B_test_rank_array[TEST_ARRAY_SIZE] = {33422937,10244,59149,33135281,99}, C_test_index_array[TEST_ARRAY_SIZE] = {44172927,72999161,74326391,129606274,21736814}, C_test_rank_array[TEST_ARRAY_SIZE] = {61147,882988,266290,133997595,133525895}, D_test_index_array[TEST_ARRAY_SIZE] = {1317351170,995930646,1157283250,1503301535,1453734525}, D_test_rank_array[TEST_ARRAY_SIZE] = {1,36538729,1978098519,2145192618,2147425337}; /***********************/ /* function prototypes */ /***********************/ double randlc( double *X, double *A ); void full_verify( void ); void c_print_results( char *name, char class, int n1, int n2, int n3, int niter, double t, double mops, char *optype, int passed_verification, char *npbversion, char *compiletime, char *cc, char *clink, char *c_lib, char *c_inc, char *cflags, char *clinkflags ); void timer_clear( int n ); void timer_start( int n ); void timer_stop( int n ); double timer_read( int n ); /* * FUNCTION RANDLC (X, A) * * This routine returns a uniform pseudorandom double precision number in the * range (0, 1) by using the linear congruential generator * * x_{k+1} = a x_k (mod 2^46) * * where 0 < x_k < 2^46 and 0 < a < 2^46. This scheme generates 2^44 numbers * before repeating. The argument A is the same as 'a' in the above formula, * and X is the same as x_0. A and X must be odd double precision integers * in the range (1, 2^46). The returned value RANDLC is normalized to be * between 0 and 1, i.e. RANDLC = 2^(-46) * x_1. X is updated to contain * the new seed x_1, so that subsequent calls to RANDLC using the same * arguments will generate a continuous sequence. * * This routine should produce the same results on any computer with at least * 48 mantissa bits in double precision floating point data. On Cray systems, * double precision should be disabled. * * David H. Bailey October 26, 1990 * * IMPLICIT DOUBLE PRECISION (A-H, O-Z) * SAVE KS, R23, R46, T23, T46 * DATA KS/0/ * * If this is the first call to RANDLC, compute R23 = 2 ^ -23, R46 = 2 ^ -46, * T23 = 2 ^ 23, and T46 = 2 ^ 46. These are computed in loops, rather than * by merely using the ** operator, in order to insure that the results are * exact on all systems. This code assumes that 0.5D0 is represented exactly. */ /*****************************************************************/ /************* R A N D L C ************/ /************* ************/ /************* portable random number generator ************/ /*****************************************************************/ static int KS=0; static double R23, R46, T23, T46; #pragma omp threadprivate(KS, R23, R46, T23, T46) double randlc( double *X, double *A ) { double T1, T2, T3, T4; double A1; double A2; double X1; double X2; double Z; int i, j; if (KS == 0) { R23 = 1.0; R46 = 1.0; T23 = 1.0; T46 = 1.0; for (i=1; i<=23; i++) { R23 = 0.50 * R23; T23 = 2.0 * T23; } for (i=1; i<=46; i++) { R46 = 0.50 * R46; T46 = 2.0 * T46; } KS = 1; } /* Break A into two parts such that A = 2^23 * A1 + A2 and set X = N. */ T1 = R23 * *A; j = T1; A1 = j; A2 = *A - T23 * A1; /* Break X into two parts such that X = 2^23 * X1 + X2, compute Z = A1 * X2 + A2 * X1 (mod 2^23), and then X = 2^23 * Z + A2 * X2 (mod 2^46). */ T1 = R23 * *X; j = T1; X1 = j; X2 = *X - T23 * X1; T1 = A1 * X2 + A2 * X1; j = R23 * T1; T2 = j; Z = T1 - T23 * T2; T3 = T23 * Z + A2 * X2; j = R46 * T3; T4 = j; *X = T3 - T46 * T4; return(R46 * *X); } /*****************************************************************/ /************ F I N D _ M Y _ S E E D ************/ /************ ************/ /************ returns parallel random number seq seed ************/ /*****************************************************************/ /* * Create a random number sequence of total length nn residing * on np number of processors. Each processor will therefore have a * subsequence of length nn/np. This routine returns that random * number which is the first random number for the subsequence belonging * to processor rank kn, and which is used as seed for proc kn ran # gen. */ double find_my_seed( int kn, /* my processor rank, 0<=kn<=num procs */ int np, /* np = num procs */ long nn, /* total num of ran numbers, all procs */ double s, /* Ran num seed, for ex.: 314159265.00 */ double a ) /* Ran num gen mult, try 1220703125.00 */ { double t1,t2; long mq,nq,kk,ik; if ( kn == 0 ) return s; mq = (nn/4 + np - 1) / np; nq = mq * 4 * kn; /* number of rans to be skipped */ t1 = s; t2 = a; kk = nq; while ( kk > 1 ) { ik = kk / 2; if( 2 * ik == kk ) { (void)randlc( &t2, &t2 ); kk = ik; } else { (void)randlc( &t1, &t2 ); kk = kk - 1; } } (void)randlc( &t1, &t2 ); return( t1 ); } /*****************************************************************/ /************* C R E A T E _ S E Q ************/ /*****************************************************************/ void create_seq( double seed, double a ) { double x, s; INT_TYPE i, k; #pragma omp parallel private(x,s,i,k) { INT_TYPE k1, k2; double an = a; int myid, num_procs; INT_TYPE mq; #ifdef _OPENMP myid = omp_get_thread_num(); num_procs = omp_get_num_threads(); #else myid = 0; num_procs = 1; #endif mq = (NUM_KEYS + num_procs - 1) / num_procs; k1 = mq * myid; k2 = k1 + mq; if ( k2 > NUM_KEYS ) k2 = NUM_KEYS; KS = 0; s = find_my_seed( myid, num_procs, (long)4*NUM_KEYS, seed, an ); k = MAX_KEY/4; for (i=k1; i<k2; i++) { x = randlc(&s, &an); x += randlc(&s, &an); x += randlc(&s, &an); x += randlc(&s, &an); key_array[i] = k*x; } } /*omp parallel*/ } /*****************************************************************/ /***************** Allocate Working Buffer ****************/ /*****************************************************************/ void *alloc_mem( size_t size ) { void *p; p = (void *)malloc(size); if (!p) { perror("Memory allocation error"); exit(1); } return p; } void alloc_key_buff( void ) { INT_TYPE i; int num_procs; #ifdef _OPENMP num_procs = omp_get_max_threads(); #else num_procs = 1; #endif #ifdef USE_BUCKETS bucket_size = (INT_TYPE **)alloc_mem(sizeof(INT_TYPE *) * num_procs); for (i = 0; i < num_procs; i++) { bucket_size[i] = (INT_TYPE *)alloc_mem(sizeof(INT_TYPE) * NUM_BUCKETS); } #pragma omp parallel for for( i=0; i<NUM_KEYS; i++ ) key_buff2[i] = 0; #else /*USE_BUCKETS*/ key_buff1_aptr = (INT_TYPE **)alloc_mem(sizeof(INT_TYPE *) * num_procs); key_buff1_aptr[0] = key_buff1; for (i = 1; i < num_procs; i++) { key_buff1_aptr[i] = (INT_TYPE *)alloc_mem(sizeof(INT_TYPE) * MAX_KEY); } #endif /*USE_BUCKETS*/ } /*****************************************************************/ /************* F U L L _ V E R I F Y ************/ /*****************************************************************/ void full_verify( void ) { INT_TYPE i, j; INT_TYPE k, k1, k2; /* Now, finally, sort the keys: */ /* Copy keys into work array; keys in key_array will be reassigned. */ #ifdef USE_BUCKETS /* Buckets are already sorted. Sorting keys within each bucket */ #ifdef SCHED_CYCLIC #pragma omp parallel for private(i,j,k,k1) schedule(static,1) #else #pragma omp parallel for private(i,j,k,k1) schedule(dynamic) #endif for( j=0; j< NUM_BUCKETS; j++ ) { k1 = (j > 0)? bucket_ptrs[j-1] : 0; for ( i = k1; i < bucket_ptrs[j]; i++ ) { k = --key_buff_ptr_global[key_buff2[i]]; key_array[k] = key_buff2[i]; } } #else #pragma omp parallel private(i,j,k,k1,k2) { #pragma omp for for( i=0; i<NUM_KEYS; i++ ) key_buff2[i] = key_array[i]; /* This is actual sorting. Each thread is responsible for a subset of key values */ j = omp_get_num_threads(); j = (MAX_KEY + j - 1) / j; k1 = j * omp_get_thread_num(); k2 = k1 + j; if (k2 > MAX_KEY) k2 = MAX_KEY; for( i=0; i<NUM_KEYS; i++ ) { if (key_buff2[i] >= k1 && key_buff2[i] < k2) { k = --key_buff_ptr_global[key_buff2[i]]; key_array[k] = key_buff2[i]; } } } /*omp parallel*/ #endif /* Confirm keys correctly sorted: count incorrectly sorted keys, if any */ j = 0; #pragma omp parallel for reduction(+:j) for( i=1; i<NUM_KEYS; i++ ) if( key_array[i-1] > key_array[i] ) j++; if( j != 0 ) printf( "Full_verify: number of keys out of sort: %ld\n", (long)j ); else passed_verification++; } /*****************************************************************/ /************* R A N K ****************/ /*****************************************************************/ void rank( int iteration ) { INT_TYPE i, k; INT_TYPE *key_buff_ptr, *key_buff_ptr2; #ifdef USE_BUCKETS int shift = MAX_KEY_LOG_2 - NUM_BUCKETS_LOG_2; INT_TYPE num_bucket_keys = (1L << shift); #endif key_array[iteration] = iteration; key_array[iteration+MAX_ITERATIONS] = MAX_KEY - iteration; /* Determine where the partial verify test keys are, load into */ /* top of array bucket_size */ for( i=0; i<TEST_ARRAY_SIZE; i++ ) partial_verify_vals[i] = key_array[test_index_array[i]]; /* Setup pointers to key buffers */ #ifdef USE_BUCKETS key_buff_ptr2 = key_buff2; #else key_buff_ptr2 = key_array; #endif key_buff_ptr = key_buff1; #pragma omp parallel private(i, k) { INT_TYPE *work_buff, m, k1, k2; int myid = 0, num_procs = 1; #ifdef _OPENMP myid = omp_get_thread_num(); num_procs = omp_get_num_threads(); #endif /* Bucket sort is known to improve cache performance on some */ /* cache based systems. But the actual performance may depend */ /* on cache size, problem size. */ #ifdef USE_BUCKETS work_buff = bucket_size[myid]; /* Initialize */ for( i=0; i<NUM_BUCKETS; i++ ) work_buff[i] = 0; /* Determine the number of keys in each bucket */ #pragma omp for schedule(static) for( i=0; i<NUM_KEYS; i++ ) work_buff[key_array[i] >> shift]++; /* Accumulative bucket sizes are the bucket pointers. These are global sizes accumulated upon to each bucket */ bucket_ptrs[0] = 0; for( k=0; k< myid; k++ ) bucket_ptrs[0] += bucket_size[k][0]; for( i=1; i< NUM_BUCKETS; i++ ) { bucket_ptrs[i] = bucket_ptrs[i-1]; for( k=0; k< myid; k++ ) bucket_ptrs[i] += bucket_size[k][i]; for( k=myid; k< num_procs; k++ ) bucket_ptrs[i] += bucket_size[k][i-1]; } /* Sort into appropriate bucket */ #pragma omp for schedule(static) for( i=0; i<NUM_KEYS; i++ ) { k = key_array[i]; key_buff2[bucket_ptrs[k >> shift]++] = k; } /* The bucket pointers now point to the final accumulated sizes */ if (myid < num_procs-1) { for( i=0; i< NUM_BUCKETS; i++ ) for( k=myid+1; k< num_procs; k++ ) bucket_ptrs[i] += bucket_size[k][i]; } /* Now, buckets are sorted. We only need to sort keys inside each bucket, which can be done in parallel. Because the distribution of the number of keys in the buckets is Gaussian, the use of a dynamic schedule should improve load balance, thus, performance */ #ifdef SCHED_CYCLIC #pragma omp for schedule(static,1) #else #pragma omp for schedule(dynamic) #endif for( i=0; i< NUM_BUCKETS; i++ ) { /* Clear the work array section associated with each bucket */ k1 = i * num_bucket_keys; k2 = k1 + num_bucket_keys; for ( k = k1; k < k2; k++ ) key_buff_ptr[k] = 0; /* Ranking of all keys occurs in this section: */ /* In this section, the keys themselves are used as their own indexes to determine how many of each there are: their individual population */ m = (i > 0)? bucket_ptrs[i-1] : 0; for ( k = m; k < bucket_ptrs[i]; k++ ) key_buff_ptr[key_buff_ptr2[k]]++; /* Now they have individual key */ /* population */ /* To obtain ranks of each key, successively add the individual key population, not forgetting to add m, the total of lesser keys, to the first key population */ key_buff_ptr[k1] += m; for ( k = k1+1; k < k2; k++ ) key_buff_ptr[k] += key_buff_ptr[k-1]; } #else /*USE_BUCKETS*/ work_buff = key_buff1_aptr[myid]; /* Clear the work array */ for( i=0; i<MAX_KEY; i++ ) work_buff[i] = 0; /* Ranking of all keys occurs in this section: */ /* In this section, the keys themselves are used as their own indexes to determine how many of each there are: their individual population */ #pragma omp for nowait schedule(static) for( i=0; i<NUM_KEYS; i++ ) work_buff[key_buff_ptr2[i]]++; /* Now they have individual key */ /* population */ /* To obtain ranks of each key, successively add the individual key population */ for( i=0; i<MAX_KEY-1; i++ ) work_buff[i+1] += work_buff[i]; #pragma omp barrier /* Accumulate the global key population */ for( k=1; k<num_procs; k++ ) { #pragma omp for nowait schedule(static) for( i=0; i<MAX_KEY; i++ ) key_buff_ptr[i] += key_buff1_aptr[k][i]; } #endif /*USE_BUCKETS*/ } /*omp parallel*/ /* This is the partial verify test section */ /* Observe that test_rank_array vals are */ /* shifted differently for different cases */ for( i=0; i<TEST_ARRAY_SIZE; i++ ) { k = partial_verify_vals[i]; /* test vals were put here */ if( 0 < k && k <= NUM_KEYS-1 ) { INT_TYPE key_rank = key_buff_ptr[k-1]; int failed = 0; switch( CLASS ) { case 'S': if( i <= 2 ) { if( key_rank != test_rank_array[i]+iteration ) failed = 1; else passed_verification++; } else { if( key_rank != test_rank_array[i]-iteration ) failed = 1; else passed_verification++; } break; case 'W': if( i < 2 ) { if( key_rank != test_rank_array[i]+(iteration-2) ) failed = 1; else passed_verification++; } else { if( key_rank != test_rank_array[i]-iteration ) failed = 1; else passed_verification++; } break; case 'A': if( i <= 2 ) { if( key_rank != test_rank_array[i]+(iteration-1) ) failed = 1; else passed_verification++; } else { if( key_rank != test_rank_array[i]-(iteration-1) ) failed = 1; else passed_verification++; } break; case 'B': if( i == 1 || i == 2 || i == 4 ) { if( key_rank != test_rank_array[i]+iteration ) failed = 1; else passed_verification++; } else { if( key_rank != test_rank_array[i]-iteration ) failed = 1; else passed_verification++; } break; case 'C': if( i <= 2 ) { if( key_rank != test_rank_array[i]+iteration ) failed = 1; else passed_verification++; } else { if( key_rank != test_rank_array[i]-iteration ) failed = 1; else passed_verification++; } break; case 'D': if( i < 2 ) { if( key_rank != test_rank_array[i]+iteration ) failed = 1; else passed_verification++; } else { if( key_rank != test_rank_array[i]-iteration ) failed = 1; else passed_verification++; } break; } if( failed == 1 ) printf( "Failed partial verification: " "iteration %d, test key %d\n", iteration, (int)i ); } } /* Make copies of rank info for use by full_verify: these variables in rank are local; making them global slows down the code, probably since they cannot be made register by compiler */ if( iteration == MAX_ITERATIONS ) key_buff_ptr_global = key_buff_ptr; } /*****************************************************************/ /************* M A I N ****************/ /*****************************************************************/ int main( int argc, char **argv ) { int i, iteration, timer_on; double timecounter; FILE *fp; /* Initialize timers */ timer_on = 0; if ((fp = fopen("timer.flag", "r")) != NULL) { fclose(fp); timer_on = 1; } timer_clear( 0 ); if (timer_on) { timer_clear( 1 ); timer_clear( 2 ); timer_clear( 3 ); } if (timer_on) timer_start( 3 ); /* Initialize the verification arrays if a valid class */ for( i=0; i<TEST_ARRAY_SIZE; i++ ) switch( CLASS ) { case 'S': test_index_array[i] = S_test_index_array[i]; test_rank_array[i] = S_test_rank_array[i]; break; case 'A': test_index_array[i] = A_test_index_array[i]; test_rank_array[i] = A_test_rank_array[i]; break; case 'W': test_index_array[i] = W_test_index_array[i]; test_rank_array[i] = W_test_rank_array[i]; break; case 'B': test_index_array[i] = B_test_index_array[i]; test_rank_array[i] = B_test_rank_array[i]; break; case 'C': test_index_array[i] = C_test_index_array[i]; test_rank_array[i] = C_test_rank_array[i]; break; case 'D': test_index_array[i] = D_test_index_array[i]; test_rank_array[i] = D_test_rank_array[i]; break; }; /* Printout initial NPB info */ printf ( "\n\n NAS Parallel Benchmarks (NPB3.3-OMP) - IS Benchmark\n\n" ); printf( " Size: %ld (class %c)\n", (long)TOTAL_KEYS, CLASS ); printf( " Iterations: %d\n", MAX_ITERATIONS ); #ifdef _OPENMP printf( " Number of available threads: %d\n", omp_get_max_threads() ); #endif printf( "\n" ); if (timer_on) timer_start( 1 ); /* Generate random number sequence and subsequent keys on all procs */ create_seq( 314159265.00, /* Random number gen seed */ 1220703125.00 ); /* Random number gen mult */ alloc_key_buff(); if (timer_on) timer_stop( 1 ); /* Do one interation for free (i.e., untimed) to guarantee initialization of all data and code pages and respective tables */ rank( 1 ); /* Start verification counter */ passed_verification = 0; if( CLASS != 'S' ) printf( "\n iteration\n" ); /* Start timer */ timer_start( 0 ); parmacs_roi_begin(); /* This is the main iteration */ for( iteration=1; iteration<=MAX_ITERATIONS; iteration++ ) { if( CLASS != 'S' ) printf( " %d\n", iteration ); parmacs_iter_begin(iteration); rank( iteration ); parmacs_iter_end(iteration); } /* End of timing, obtain maximum time of all processors */ parmacs_roi_end(); timer_stop( 0 ); timecounter = timer_read( 0 ); /* This tests that keys are in sequence: sorting of last ranked key seq occurs here, but is an untimed operation */ if (timer_on) timer_start( 2 ); full_verify(); if (timer_on) timer_stop( 2 ); if (timer_on) timer_stop( 3 ); /* The final printout */ if( passed_verification != 5*MAX_ITERATIONS + 1 ) passed_verification = 0; c_print_results( "IS", CLASS, (int)(TOTAL_KEYS/64), 64, 0, MAX_ITERATIONS, timecounter, ((double) (MAX_ITERATIONS*TOTAL_KEYS)) /timecounter/1000000., "keys ranked", passed_verification, NPBVERSION, COMPILETIME, CC, CLINK, C_LIB, C_INC, CFLAGS, CLINKFLAGS ); /* Print additional timers */ if (timer_on) { double t_total, t_percent; t_total = timer_read( 3 ); printf("\nAdditional timers -\n"); printf(" Total execution: %8.3f\n", t_total); if (t_total == 0.0) t_total = 1.0; timecounter = timer_read(1); t_percent = timecounter/t_total * 100.; printf(" Initialization : %8.3f (%5.2f%%)\n", timecounter, t_percent); timecounter = timer_read(0); t_percent = timecounter/t_total * 100.; printf(" Benchmarking : %8.3f (%5.2f%%)\n", timecounter, t_percent); timecounter = timer_read(2); t_percent = timecounter/t_total * 100.; printf(" Sorting : %8.3f (%5.2f%%)\n", timecounter, t_percent); } return 0; /**************************/ } /* E N D P R O G R A M */ /**************************/
par_mgr.c
/****************************************************************************** * Copyright 1998-2019 Lawrence Livermore National Security, LLC and other * HYPRE Project Developers. See the top-level COPYRIGHT file for details. * * SPDX-License-Identifier: (Apache-2.0 OR MIT) ******************************************************************************/ /****************************************************************************** * * Two-grid system solver * *****************************************************************************/ #include "_hypre_parcsr_ls.h" #include "par_amg.h" #include "par_mgr.h" /* Create */ void * hypre_MGRCreate() { hypre_ParMGRData *mgr_data; mgr_data = hypre_CTAlloc(hypre_ParMGRData, 1, HYPRE_MEMORY_HOST); /* block data */ (mgr_data -> block_size) = 1; (mgr_data -> block_num_coarse_indexes) = NULL; (mgr_data -> point_marker_array) = NULL; (mgr_data -> block_cf_marker) = NULL; /* general data */ (mgr_data -> max_num_coarse_levels) = 10; (mgr_data -> A_array) = NULL; (mgr_data -> P_array) = NULL; (mgr_data -> RT_array) = NULL; (mgr_data -> RAP) = NULL; (mgr_data -> CF_marker_array) = NULL; (mgr_data -> coarse_indices_lvls) = NULL; (mgr_data -> A_ff_array) = NULL; (mgr_data -> F_fine_array) = NULL; (mgr_data -> U_fine_array) = NULL; (mgr_data -> aff_solver) = NULL; (mgr_data -> fine_grid_solver_setup) = NULL; (mgr_data -> fine_grid_solver_solve) = NULL; (mgr_data -> F_array) = NULL; (mgr_data -> U_array) = NULL; (mgr_data -> residual) = NULL; (mgr_data -> rel_res_norms) = NULL; (mgr_data -> Vtemp) = NULL; (mgr_data -> Ztemp) = NULL; (mgr_data -> Utemp) = NULL; (mgr_data -> Ftemp) = NULL; (mgr_data -> num_iterations) = 0; (mgr_data -> num_interp_sweeps) = 1; (mgr_data -> num_restrict_sweeps) = 1; (mgr_data -> trunc_factor) = 0.0; (mgr_data -> max_row_sum) = 0.9; (mgr_data -> strong_threshold) = 0.25; (mgr_data -> S_commpkg_switch) = 1.0; (mgr_data -> P_max_elmts) = 0; (mgr_data -> coarse_grid_solver) = NULL; (mgr_data -> coarse_grid_solver_setup) = NULL; (mgr_data -> coarse_grid_solver_solve) = NULL; (mgr_data -> global_smoother) = NULL; (mgr_data -> use_default_cgrid_solver) = 1; (mgr_data -> use_default_fsolver) = -1; // set to -1 to avoid printing when not used (mgr_data -> omega) = 1.; (mgr_data -> max_iter) = 20; (mgr_data -> tol) = 1.0e-7; (mgr_data -> relax_type) = 0; (mgr_data -> relax_order) = 1; // not fully utilized. Only used to compute L1-norms. (mgr_data -> interp_type) = NULL; (mgr_data -> restrict_type) = NULL; (mgr_data -> num_relax_sweeps) = 1; (mgr_data -> relax_weight) = 1.0; (mgr_data -> logging) = 0; (mgr_data -> print_level) = 0; (mgr_data -> l1_norms) = NULL; (mgr_data -> reserved_coarse_size) = 0; (mgr_data -> reserved_coarse_indexes) = NULL; (mgr_data -> reserved_Cpoint_local_indexes) = NULL; (mgr_data -> diaginv) = NULL; (mgr_data -> global_smooth_iters) = 1; (mgr_data -> global_smooth_type) = 0; (mgr_data -> set_non_Cpoints_to_F) = 0; (mgr_data -> idx_array) = NULL; (mgr_data -> Frelax_method) = NULL; (mgr_data -> VcycleRelaxVtemp) = NULL; (mgr_data -> VcycleRelaxZtemp) = NULL; (mgr_data -> FrelaxVcycleData) = NULL; (mgr_data -> Frelax_num_functions) = NULL; (mgr_data -> max_local_lvls) = 10; (mgr_data -> use_non_galerkin_cg) = NULL; (mgr_data -> print_coarse_system) = 0; (mgr_data -> set_c_points_method) = 0; (mgr_data -> lvl_to_keep_cpoints) = 0; (mgr_data -> cg_convergence_factor) = 0.0; return (void *) mgr_data; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ /* Destroy */ HYPRE_Int hypre_MGRDestroy( void *data ) { hypre_ParMGRData * mgr_data = (hypre_ParMGRData*) data; HYPRE_Int i; HYPRE_Int num_coarse_levels = (mgr_data -> num_coarse_levels); /* block info data */ if ((mgr_data -> block_cf_marker)) { for (i=0; i < (mgr_data -> max_num_coarse_levels); i++) { if ((mgr_data -> block_cf_marker)[i]) { hypre_TFree((mgr_data -> block_cf_marker)[i], HYPRE_MEMORY_HOST); } } hypre_TFree((mgr_data -> block_cf_marker), HYPRE_MEMORY_HOST); (mgr_data -> block_cf_marker) = NULL; } if(mgr_data -> block_num_coarse_indexes) { hypre_TFree(mgr_data -> block_num_coarse_indexes, HYPRE_MEMORY_HOST); (mgr_data -> block_num_coarse_indexes) = NULL; } /* final residual vector */ if((mgr_data -> residual)) { hypre_ParVectorDestroy( (mgr_data -> residual) ); (mgr_data -> residual) = NULL; } if((mgr_data -> rel_res_norms)) { hypre_TFree( (mgr_data -> rel_res_norms) , HYPRE_MEMORY_HOST); (mgr_data -> rel_res_norms) = NULL; } /* temp vectors for solve phase */ if((mgr_data -> Vtemp)) { hypre_ParVectorDestroy( (mgr_data -> Vtemp) ); (mgr_data -> Vtemp) = NULL; } if((mgr_data -> Ztemp)) { hypre_ParVectorDestroy( (mgr_data -> Ztemp) ); (mgr_data -> Ztemp) = NULL; } if((mgr_data -> Utemp)) { hypre_ParVectorDestroy( (mgr_data -> Utemp) ); (mgr_data -> Utemp) = NULL; } if((mgr_data -> Ftemp)) { hypre_ParVectorDestroy( (mgr_data -> Ftemp) ); (mgr_data -> Ftemp) = NULL; } /* coarse grid solver */ if((mgr_data -> use_default_cgrid_solver)) { if((mgr_data -> coarse_grid_solver)) hypre_BoomerAMGDestroy( (mgr_data -> coarse_grid_solver) ); (mgr_data -> coarse_grid_solver) = NULL; } /* l1_norms */ if ((mgr_data -> l1_norms)) { for (i=0; i < (num_coarse_levels); i++) { hypre_SeqVectorDestroy((mgr_data -> l1_norms)[i]); } hypre_TFree((mgr_data -> l1_norms), HYPRE_MEMORY_HOST); } /* coarse_indices_lvls */ if ((mgr_data -> coarse_indices_lvls)) { for (i=0; i < (num_coarse_levels); i++) if ((mgr_data -> coarse_indices_lvls)[i]) hypre_TFree((mgr_data -> coarse_indices_lvls)[i], HYPRE_MEMORY_HOST); hypre_TFree((mgr_data -> coarse_indices_lvls), HYPRE_MEMORY_HOST); } /* linear system and cf marker array */ if(mgr_data -> A_array || mgr_data -> P_array || mgr_data -> RT_array || mgr_data -> CF_marker_array) { for (i=1; i < num_coarse_levels+1; i++) { hypre_ParVectorDestroy((mgr_data -> F_array)[i]); hypre_ParVectorDestroy((mgr_data -> U_array)[i]); if ((mgr_data -> P_array)[i-1]) hypre_ParCSRMatrixDestroy((mgr_data -> P_array)[i-1]); if ((mgr_data -> RT_array)[i-1]) hypre_ParCSRMatrixDestroy((mgr_data -> RT_array)[i-1]); hypre_TFree((mgr_data -> CF_marker_array)[i-1], HYPRE_MEMORY_HOST); } for (i=1; i < (num_coarse_levels); i++) { if ((mgr_data -> A_array)[i]) hypre_ParCSRMatrixDestroy((mgr_data -> A_array)[i]); } } /* AMG for Frelax */ if(mgr_data -> A_ff_array || mgr_data -> F_fine_array || mgr_data -> U_fine_array) { for (i=1; i < num_coarse_levels+1; i++) { if (mgr_data -> F_fine_array[i]) hypre_ParVectorDestroy((mgr_data -> F_fine_array)[i]); if (mgr_data -> U_fine_array[i]) hypre_ParVectorDestroy((mgr_data -> U_fine_array)[i]); } for (i=1; i < (num_coarse_levels); i++) { if ((mgr_data -> A_ff_array)[i]) hypre_ParCSRMatrixDestroy((mgr_data -> A_ff_array)[i]); } if (mgr_data -> use_default_fsolver) { hypre_ParCSRMatrixDestroy((mgr_data -> A_ff_array)[0]); } hypre_TFree(mgr_data -> F_fine_array, HYPRE_MEMORY_HOST); (mgr_data -> F_fine_array) = NULL; hypre_TFree(mgr_data -> U_fine_array, HYPRE_MEMORY_HOST); (mgr_data -> U_fine_array) = NULL; hypre_TFree(mgr_data -> A_ff_array, HYPRE_MEMORY_HOST); (mgr_data -> A_ff_array) = NULL; } if(mgr_data -> aff_solver) { for (i = 1; i < (num_coarse_levels); i++) { if ((mgr_data -> aff_solver)[i]) hypre_BoomerAMGDestroy((mgr_data -> aff_solver)[i]); } if (mgr_data -> use_default_fsolver) { if ((mgr_data -> aff_solver)[0]) hypre_BoomerAMGDestroy((mgr_data -> aff_solver)[0]); } hypre_TFree(mgr_data -> aff_solver, HYPRE_MEMORY_HOST); (mgr_data -> aff_solver) = NULL; } if((mgr_data -> F_array)) { hypre_TFree((mgr_data -> F_array), HYPRE_MEMORY_HOST); (mgr_data -> F_array) = NULL; } if((mgr_data -> U_array)) { hypre_TFree((mgr_data -> U_array), HYPRE_MEMORY_HOST); (mgr_data -> U_array) = NULL; } if((mgr_data -> A_array)) { hypre_TFree((mgr_data -> A_array), HYPRE_MEMORY_HOST); (mgr_data -> A_array) = NULL; } if((mgr_data -> P_array)) { hypre_TFree((mgr_data -> P_array), HYPRE_MEMORY_HOST); (mgr_data -> P_array) = NULL; } if((mgr_data -> RT_array)) { hypre_TFree((mgr_data -> RT_array), HYPRE_MEMORY_HOST); (mgr_data -> RT_array) = NULL; } if((mgr_data -> CF_marker_array)) { hypre_TFree((mgr_data -> CF_marker_array), HYPRE_MEMORY_HOST); (mgr_data -> CF_marker_array) = NULL; } if((mgr_data -> reserved_Cpoint_local_indexes)) { hypre_TFree((mgr_data -> reserved_Cpoint_local_indexes), HYPRE_MEMORY_HOST); (mgr_data -> reserved_Cpoint_local_indexes) = NULL; } if (mgr_data -> restrict_type) { hypre_TFree(mgr_data -> restrict_type, HYPRE_MEMORY_HOST); (mgr_data -> restrict_type) = NULL; } if (mgr_data -> interp_type) { hypre_TFree(mgr_data -> interp_type, HYPRE_MEMORY_HOST); (mgr_data -> interp_type) = NULL; } /* Frelax_method */ if (mgr_data -> Frelax_method) { hypre_TFree(mgr_data -> Frelax_method, HYPRE_MEMORY_HOST); (mgr_data -> Frelax_method) = NULL; } /* Frelax_num_functions */ if (mgr_data -> Frelax_num_functions) { hypre_TFree(mgr_data -> Frelax_num_functions, HYPRE_MEMORY_HOST); (mgr_data -> Frelax_num_functions) = NULL; } /* data for V-cycle F-relaxation */ if((mgr_data -> VcycleRelaxVtemp)) { hypre_ParVectorDestroy( (mgr_data -> VcycleRelaxVtemp) ); (mgr_data -> VcycleRelaxVtemp) = NULL; } if((mgr_data -> VcycleRelaxZtemp)) { hypre_ParVectorDestroy( (mgr_data -> VcycleRelaxZtemp) ); (mgr_data -> VcycleRelaxZtemp) = NULL; } if (mgr_data -> FrelaxVcycleData) { for (i = 0; i < num_coarse_levels; i++) { if ((mgr_data -> FrelaxVcycleData)[i]) { hypre_MGRDestroyFrelaxVcycleData((mgr_data -> FrelaxVcycleData)[i]); (mgr_data -> FrelaxVcycleData)[i] = NULL; } } hypre_TFree(mgr_data -> FrelaxVcycleData, HYPRE_MEMORY_HOST); (mgr_data -> FrelaxVcycleData) = NULL; } /* data for reserved coarse nodes */ if(mgr_data -> reserved_coarse_indexes) { hypre_TFree(mgr_data -> reserved_coarse_indexes, HYPRE_MEMORY_HOST); (mgr_data -> reserved_coarse_indexes) = NULL; } /* index array for setting Cpoints by global block */ if ((mgr_data -> set_c_points_method) == 1) { hypre_TFree(mgr_data -> idx_array, HYPRE_MEMORY_HOST); (mgr_data -> idx_array) = NULL; } /* array for setting option to use non-Galerkin coarse grid */ if (mgr_data -> use_non_galerkin_cg) { hypre_TFree(mgr_data -> use_non_galerkin_cg, HYPRE_MEMORY_HOST); (mgr_data -> use_non_galerkin_cg) = NULL; } /* coarse level matrix - RAP */ if ((mgr_data -> RAP)) hypre_ParCSRMatrixDestroy((mgr_data -> RAP)); if ((mgr_data -> diaginv)) hypre_TFree((mgr_data -> diaginv), HYPRE_MEMORY_HOST); if ((mgr_data -> global_smoother)) { if (mgr_data -> global_smooth_type == 8) { HYPRE_EuclidDestroy((mgr_data -> global_smoother)); } else if (mgr_data -> global_smooth_type == 16) { HYPRE_ILUDestroy((mgr_data -> global_smoother)); } } /* mgr data */ hypre_TFree(mgr_data, HYPRE_MEMORY_HOST); return hypre_error_flag; } /* Create data for V-cycle F-relaxtion */ void * hypre_MGRCreateFrelaxVcycleData() { hypre_ParAMGData *vdata = hypre_CTAlloc(hypre_ParAMGData, 1, HYPRE_MEMORY_HOST); hypre_ParAMGDataAArray(vdata) = NULL; hypre_ParAMGDataPArray(vdata) = NULL; hypre_ParAMGDataFArray(vdata) = NULL; hypre_ParAMGDataCFMarkerArray(vdata) = NULL; hypre_ParAMGDataVtemp(vdata) = NULL; hypre_ParAMGDataAMat(vdata) = NULL; hypre_ParAMGDataBVec(vdata) = NULL; hypre_ParAMGDataZtemp(vdata) = NULL; hypre_ParAMGDataCommInfo(vdata) = NULL; hypre_ParAMGDataUArray(vdata) = NULL; hypre_ParAMGDataNewComm(vdata) = hypre_MPI_COMM_NULL; hypre_ParAMGDataNumLevels(vdata) = 0; hypre_ParAMGDataMaxLevels(vdata) = 10; hypre_ParAMGDataNumFunctions(vdata) = 1; hypre_ParAMGDataSCommPkgSwitch(vdata) = 1.0; hypre_ParAMGDataRelaxOrder(vdata) = 1; hypre_ParAMGDataMaxCoarseSize(vdata) = 9; hypre_ParAMGDataMinCoarseSize(vdata) = 0; hypre_ParAMGDataUserCoarseRelaxType(vdata) = 9; return (void *) vdata; } /* Destroy data for V-cycle F-relaxation */ HYPRE_Int hypre_MGRDestroyFrelaxVcycleData( void *data ) { hypre_ParAMGData * vdata = (hypre_ParAMGData*) data; HYPRE_Int i; HYPRE_Int num_levels = hypre_ParAMGDataNumLevels(vdata); MPI_Comm new_comm = hypre_ParAMGDataNewComm(vdata); hypre_TFree(hypre_ParAMGDataDofFuncArray(vdata)[0], HYPRE_MEMORY_HOST); for (i=1; i < num_levels + 1; i++) { if (hypre_ParAMGDataAArray(vdata)[i]) hypre_ParCSRMatrixDestroy(hypre_ParAMGDataAArray(vdata)[i]); if (hypre_ParAMGDataPArray(vdata)[i-1]) hypre_ParCSRMatrixDestroy(hypre_ParAMGDataPArray(vdata)[i-1]); hypre_TFree(hypre_ParAMGDataCFMarkerArray(vdata)[i-1], HYPRE_MEMORY_HOST); hypre_ParVectorDestroy(hypre_ParAMGDataFArray(vdata)[i]); hypre_ParVectorDestroy(hypre_ParAMGDataUArray(vdata)[i]); hypre_TFree(hypre_ParAMGDataDofFuncArray(vdata)[i], HYPRE_MEMORY_HOST); } /* see comments in par_coarsen.c regarding special case for CF_marker */ if (num_levels <= 1) { hypre_TFree(hypre_ParAMGDataCFMarkerArray(vdata)[0], HYPRE_MEMORY_HOST); } /* Points to VcycleRelaxVtemp of mgr_data, which is already destroyed */ //hypre_ParVectorDestroy(hypre_ParAMGDataVtemp(vdata)); hypre_TFree(hypre_ParAMGDataFArray(vdata), HYPRE_MEMORY_HOST); hypre_TFree(hypre_ParAMGDataUArray(vdata), HYPRE_MEMORY_HOST); hypre_TFree(hypre_ParAMGDataAArray(vdata), HYPRE_MEMORY_HOST); hypre_TFree(hypre_ParAMGDataPArray(vdata), HYPRE_MEMORY_HOST); hypre_TFree(hypre_ParAMGDataCFMarkerArray(vdata), HYPRE_MEMORY_HOST); //hypre_TFree(hypre_ParAMGDataGridRelaxType(vdata), HYPRE_MEMORY_HOST); hypre_TFree(hypre_ParAMGDataDofFuncArray(vdata), HYPRE_MEMORY_HOST); /* Points to VcycleRelaxZtemp of mgr_data, which is already destroyed */ /* if (hypre_ParAMGDataZtemp(vdata)) hypre_ParVectorDestroy(hypre_ParAMGDataZtemp(vdata)); */ if (hypre_ParAMGDataAMat(vdata)) hypre_TFree(hypre_ParAMGDataAMat(vdata), HYPRE_MEMORY_HOST); if (hypre_ParAMGDataBVec(vdata)) hypre_TFree(hypre_ParAMGDataBVec(vdata), HYPRE_MEMORY_HOST); if (hypre_ParAMGDataCommInfo(vdata)) hypre_TFree(hypre_ParAMGDataCommInfo(vdata), HYPRE_MEMORY_HOST); if (new_comm != hypre_MPI_COMM_NULL) { hypre_MPI_Comm_free (&new_comm); } hypre_TFree(vdata, HYPRE_MEMORY_HOST); return hypre_error_flag; } /* Set C-point variables for each reduction level */ /* Currently not implemented */ HYPRE_Int hypre_MGRSetReductionLevelCpoints( void *mgr_vdata, HYPRE_Int nlevels, HYPRE_Int *num_coarse_points, HYPRE_Int **level_coarse_indexes) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; (mgr_data -> num_coarse_levels) = nlevels; (mgr_data -> num_coarse_per_level) = num_coarse_points; (mgr_data -> level_coarse_indexes) = level_coarse_indexes; return hypre_error_flag; } /* Initialize some data */ /* Set whether non-coarse points on each level should be explicitly tagged as F-points */ HYPRE_Int hypre_MGRSetNonCpointsToFpoints( void *mgr_vdata, HYPRE_Int nonCptToFptFlag) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; (mgr_data -> set_non_Cpoints_to_F) = nonCptToFptFlag; return hypre_error_flag; } /* Set whether the reserved C points are reduced before the coarse grid solve */ HYPRE_Int hypre_MGRSetReservedCpointsLevelToKeep(void *mgr_vdata, HYPRE_Int level) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; (mgr_data -> lvl_to_keep_cpoints) = level; return hypre_error_flag; } /* Set Cpoints by contiguous blocks, i.e. p1, p2, ..., pn, s1, s2, ..., sn, ... */ HYPRE_Int hypre_MGRSetCpointsByContiguousBlock( void *mgr_vdata, HYPRE_Int block_size, HYPRE_Int max_num_levels, HYPRE_BigInt *begin_idx_array, HYPRE_Int *block_num_coarse_points, HYPRE_Int **block_coarse_indexes) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; HYPRE_Int i; if((mgr_data -> idx_array) != NULL) { hypre_TFree(mgr_data -> idx_array, HYPRE_MEMORY_HOST); (mgr_data -> idx_array) = NULL; } HYPRE_BigInt *index_array = hypre_CTAlloc(HYPRE_BigInt, block_size, HYPRE_MEMORY_HOST); if (begin_idx_array != NULL) { for (i = 0; i < block_size; i++) { index_array[i] = *(begin_idx_array+i); } } hypre_MGRSetCpointsByBlock(mgr_data, block_size, max_num_levels, block_num_coarse_points, block_coarse_indexes); (mgr_data -> idx_array) = index_array; (mgr_data -> set_c_points_method) = 1; return hypre_error_flag; } /* Initialize/ set local block data information */ HYPRE_Int hypre_MGRSetCpointsByBlock( void *mgr_vdata, HYPRE_Int block_size, HYPRE_Int max_num_levels, HYPRE_Int *block_num_coarse_points, HYPRE_Int **block_coarse_indexes) { HYPRE_Int i,j; HYPRE_Int **block_cf_marker = NULL; HYPRE_Int *block_num_coarse_indexes = NULL; hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; /* free block cf_marker data if not previously destroyed */ if((mgr_data -> block_cf_marker) != NULL) { for (i=0; i < (mgr_data -> max_num_coarse_levels); i++) { if ((mgr_data -> block_cf_marker)[i]) { hypre_TFree((mgr_data -> block_cf_marker)[i], HYPRE_MEMORY_HOST); (mgr_data -> block_cf_marker)[i] = NULL; } } hypre_TFree(mgr_data -> block_cf_marker, HYPRE_MEMORY_HOST); (mgr_data -> block_cf_marker) = NULL; } if((mgr_data -> block_num_coarse_indexes)) { hypre_TFree((mgr_data -> block_num_coarse_indexes), HYPRE_MEMORY_HOST); (mgr_data -> block_num_coarse_indexes) = NULL; } /* store block cf_marker */ block_cf_marker = hypre_CTAlloc(HYPRE_Int *, max_num_levels, HYPRE_MEMORY_HOST); for (i = 0; i < max_num_levels; i++) { block_cf_marker[i] = hypre_CTAlloc(HYPRE_Int, block_size, HYPRE_MEMORY_HOST); memset(block_cf_marker[i], FMRK, block_size*sizeof(HYPRE_Int)); } for (i = 0; i < max_num_levels; i++) { for(j=0; j<block_num_coarse_points[i]; j++) { (block_cf_marker[i])[block_coarse_indexes[i][j]] = CMRK; } } /* store block_num_coarse_points */ if(max_num_levels > 0) { block_num_coarse_indexes = hypre_CTAlloc(HYPRE_Int, max_num_levels, HYPRE_MEMORY_HOST); for(i=0; i<max_num_levels; i++) block_num_coarse_indexes[i] = block_num_coarse_points[i]; } /* set block data */ (mgr_data -> max_num_coarse_levels) = max_num_levels; (mgr_data -> block_size) = block_size; (mgr_data -> block_num_coarse_indexes) = block_num_coarse_indexes; (mgr_data -> block_cf_marker) = block_cf_marker; (mgr_data -> set_c_points_method) = 0; return hypre_error_flag; } HYPRE_Int hypre_MGRSetCpointsByPointMarkerArray( void *mgr_vdata, HYPRE_Int block_size, HYPRE_Int max_num_levels, HYPRE_Int *lvl_num_coarse_points, HYPRE_Int **lvl_coarse_indexes, HYPRE_Int *point_marker_array) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; HYPRE_Int i,j; HYPRE_Int **block_cf_marker = NULL; HYPRE_Int *block_num_coarse_indexes = NULL; /* free block cf_marker data if not previously destroyed */ if((mgr_data -> block_cf_marker) != NULL) { for (i=0; i < (mgr_data -> max_num_coarse_levels); i++) { if ((mgr_data -> block_cf_marker)[i]) { hypre_TFree((mgr_data -> block_cf_marker)[i], HYPRE_MEMORY_HOST); (mgr_data -> block_cf_marker)[i] = NULL; } } hypre_TFree(mgr_data -> block_cf_marker, HYPRE_MEMORY_HOST); (mgr_data -> block_cf_marker) = NULL; } if((mgr_data -> block_num_coarse_indexes)) { hypre_TFree((mgr_data -> block_num_coarse_indexes), HYPRE_MEMORY_HOST); (mgr_data -> block_num_coarse_indexes) = NULL; } /* store block cf_marker */ block_cf_marker = hypre_CTAlloc(HYPRE_Int *, max_num_levels, HYPRE_MEMORY_HOST); for (i = 0; i < max_num_levels; i++) { block_cf_marker[i] = hypre_CTAlloc(HYPRE_Int, block_size, HYPRE_MEMORY_HOST); memset(block_cf_marker[i], FMRK, block_size*sizeof(HYPRE_Int)); } for (i = 0; i < max_num_levels; i++) { for(j=0; j<lvl_num_coarse_points[i]; j++) { block_cf_marker[i][j] = lvl_coarse_indexes[i][j]; } } /* store block_num_coarse_points */ if(max_num_levels > 0) { block_num_coarse_indexes = hypre_CTAlloc(HYPRE_Int, max_num_levels, HYPRE_MEMORY_HOST); for(i=0; i<max_num_levels; i++) block_num_coarse_indexes[i] = lvl_num_coarse_points[i]; } /* set block data */ (mgr_data -> max_num_coarse_levels) = max_num_levels; (mgr_data -> block_size) = block_size; (mgr_data -> block_num_coarse_indexes) = block_num_coarse_indexes; (mgr_data -> block_cf_marker) = block_cf_marker; (mgr_data -> point_marker_array) = point_marker_array; (mgr_data -> set_c_points_method) = 2; return hypre_error_flag; } /*Set number of points that remain part of the coarse grid throughout the hierarchy */ HYPRE_Int hypre_MGRSetReservedCoarseNodes(void *mgr_vdata, HYPRE_Int reserved_coarse_size, HYPRE_BigInt *reserved_cpt_index) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; HYPRE_BigInt *reserved_coarse_indexes = NULL; HYPRE_Int i; if (!mgr_data) { hypre_error_w_msg(HYPRE_ERROR_GENERIC,"Warning! MGR object empty!\n"); return hypre_error_flag; } if(reserved_coarse_size < 0) { hypre_error_in_arg(2); return hypre_error_flag; } /* free data not previously destroyed */ if((mgr_data -> reserved_coarse_indexes)) { hypre_TFree((mgr_data -> reserved_coarse_indexes), HYPRE_MEMORY_HOST); (mgr_data -> reserved_coarse_indexes) = NULL; } /* set reserved coarse nodes */ if(reserved_coarse_size > 0) { reserved_coarse_indexes = hypre_CTAlloc(HYPRE_BigInt, reserved_coarse_size, HYPRE_MEMORY_HOST); for(i=0; i<reserved_coarse_size; i++) reserved_coarse_indexes[i] = reserved_cpt_index[i]; } (mgr_data -> reserved_coarse_size) = reserved_coarse_size; (mgr_data -> reserved_coarse_indexes) = reserved_coarse_indexes; return hypre_error_flag; } /* Set CF marker array */ HYPRE_Int hypre_MGRCoarsen(hypre_ParCSRMatrix *S, hypre_ParCSRMatrix *A, HYPRE_Int fixed_coarse_size, HYPRE_Int *fixed_coarse_indexes, HYPRE_Int debug_flag, HYPRE_Int **CF_marker_ptr, HYPRE_Int cflag) { HYPRE_Int *CF_marker = NULL; HYPRE_Int *cindexes = fixed_coarse_indexes; HYPRE_Int i, row, nc; HYPRE_Int nloc = hypre_CSRMatrixNumRows(hypre_ParCSRMatrixDiag(A)); /* If this is the last level, coarsen onto fixed coarse set */ if(cflag) { if(*CF_marker_ptr != NULL) { hypre_TFree(*CF_marker_ptr, HYPRE_MEMORY_HOST); } CF_marker = hypre_CTAlloc(HYPRE_Int, nloc, HYPRE_MEMORY_HOST); memset(CF_marker, FMRK, nloc*sizeof(HYPRE_Int)); /* first mark fixed coarse set */ nc = fixed_coarse_size; for(i = 0; i < nc; i++) { CF_marker[cindexes[i]] = CMRK; } } else { /* First coarsen to get initial CF splitting. * This is then followed by updating the CF marker to pass * coarse information to the next levels. NOTE: It may be * convenient to implement this way (allows the use of multiple * coarsening strategies without changing too much code), * but not necessarily the best option, compared to initializing * CF_marker first and then coarsening on subgraph which excludes * the initialized coarse nodes. */ hypre_BoomerAMGCoarsen(S, A, 0, debug_flag, &CF_marker); /* Update CF_marker to correct Cpoints marked as Fpoints. */ nc = fixed_coarse_size; for(i = 0; i < nc; i++) { CF_marker[cindexes[i]] = CMRK; } /* set F-points to FMRK. This is necessary since the different coarsening schemes differentiate * between type of F-points (example Ruge coarsening). We do not need that distinction here. */ for (row = 0; row <nloc; row++) { if(CF_marker[row] == CMRK) continue; CF_marker[row] = FMRK; } #if 0 /* IMPORTANT: Update coarse_indexes array to define the positions of the fixed coarse points * in the next level. */ nc = 0; index_i = 0; for (row = 0; row <nloc; row++) { /* loop through new c-points */ if(CF_marker[row] == CMRK) nc++; else if(CF_marker[row] == S_CMRK) { /* previously marked c-point is part of fixed coarse set. Track its current local index */ cindexes[index_i++] = nc; /* reset c-point from S_CMRK to CMRK */ cf_marker[row] = CMRK; nc++; } /* set F-points to FMRK. This is necessary since the different coarsening schemes differentiate * between type of F-points (example Ruge coarsening). We do not need that distinction here. */ else { CF_marker[row] = FMRK; } } /* check if this should be last level */ if( nc == fixed_coarse_size) last_level = 1; //printf(" nc = %d and fixed coarse size = %d \n", nc, fixed_coarse_size); #endif } /* set CF_marker */ *CF_marker_ptr = CF_marker; return hypre_error_flag; } /* Interpolation for MGR - Adapted from BoomerAMGBuildInterp */ HYPRE_Int hypre_MGRBuildP( hypre_ParCSRMatrix *A, HYPRE_Int *CF_marker, HYPRE_BigInt *num_cpts_global, HYPRE_Int method, HYPRE_Int debug_flag, hypre_ParCSRMatrix **P_ptr) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); hypre_ParCSRCommHandle *comm_handle; hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd); HYPRE_Real *a_diag; hypre_ParCSRMatrix *P; HYPRE_BigInt *col_map_offd_P; HYPRE_Int *tmp_map_offd = NULL; HYPRE_Int *CF_marker_offd = NULL; hypre_CSRMatrix *P_diag; hypre_CSRMatrix *P_offd; HYPRE_Real *P_diag_data; HYPRE_Int *P_diag_i; HYPRE_Int *P_diag_j; HYPRE_Real *P_offd_data; HYPRE_Int *P_offd_i; HYPRE_Int *P_offd_j; HYPRE_Int P_diag_size, P_offd_size; HYPRE_Int *P_marker, *P_marker_offd; HYPRE_Int jj_counter,jj_counter_offd; HYPRE_Int *jj_count, *jj_count_offd; // HYPRE_Int jj_begin_row,jj_begin_row_offd; // HYPRE_Int jj_end_row,jj_end_row_offd; HYPRE_Int start_indexing = 0; /* start indexing for P_data at 0 */ HYPRE_Int n_fine = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int *fine_to_coarse; //HYPRE_BigInt *fine_to_coarse_offd; HYPRE_Int *coarse_counter; HYPRE_Int coarse_shift; HYPRE_BigInt total_global_cpts; //HYPRE_BigInt my_first_cpt; HYPRE_Int num_cols_P_offd; HYPRE_Int i,i1; HYPRE_Int j,jl,jj; HYPRE_Int start; HYPRE_Real one = 1.0; HYPRE_Int my_id; HYPRE_Int num_procs; HYPRE_Int num_threads; HYPRE_Int num_sends; HYPRE_Int index; HYPRE_Int ns, ne, size, rest; HYPRE_Int *int_buf_data; HYPRE_Real wall_time; /* for debugging instrumentation */ hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm,&my_id); num_threads = hypre_NumThreads(); #ifdef HYPRE_NO_GLOBAL_PARTITION //my_first_cpt = num_cpts_global[0]; if (my_id == (num_procs -1)) total_global_cpts = num_cpts_global[1]; hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_BIG_INT, num_procs-1, comm); #else //my_first_cpt = num_cpts_global[my_id]; total_global_cpts = num_cpts_global[num_procs]; #endif /*------------------------------------------------------------------- * Get the CF_marker data for the off-processor columns *-------------------------------------------------------------------*/ if (debug_flag < 0) { debug_flag = -debug_flag; } if (debug_flag==4) wall_time = time_getWallclockSeconds(); if (num_cols_A_offd) CF_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); if (!comm_pkg) { hypre_MatvecCommPkgCreate(A); comm_pkg = hypre_ParCSRMatrixCommPkg(A); } num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST); index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) int_buf_data[index++] = CF_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data, CF_marker_offd); hypre_ParCSRCommHandleDestroy(comm_handle); if (debug_flag==4) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d Interp: Comm 1 CF_marker = %f\n", my_id, wall_time); fflush(NULL); } /*----------------------------------------------------------------------- * First Pass: Determine size of P and fill in fine_to_coarse mapping. *-----------------------------------------------------------------------*/ /*----------------------------------------------------------------------- * Intialize counters and allocate mapping vector. *-----------------------------------------------------------------------*/ coarse_counter = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); jj_count = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); jj_count_offd = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif #endif for (i = 0; i < n_fine; i++) fine_to_coarse[i] = -1; jj_counter = start_indexing; jj_counter_offd = start_indexing; /*----------------------------------------------------------------------- * Loop over fine grid. *-----------------------------------------------------------------------*/ /* RDF: this looks a little tricky, but doable */ #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,j,i1,jj,ns,ne,size,rest) HYPRE_SMP_SCHEDULE #endif #endif for (j = 0; j < num_threads; j++) { size = n_fine/num_threads; rest = n_fine - size*num_threads; if (j < rest) { ns = j*size+j; ne = (j+1)*size+j+1; } else { ns = j*size+rest; ne = (j+1)*size+rest; } for (i = ns; i < ne; i++) { /*-------------------------------------------------------------------- * If i is a C-point, interpolation is the identity. Also set up * mapping vector. *--------------------------------------------------------------------*/ if (CF_marker[i] >= 0) { jj_count[j]++; fine_to_coarse[i] = coarse_counter[j]; coarse_counter[j]++; } /*-------------------------------------------------------------------- * If i is an F-point, interpolation is the approximation of A_{ff}^{-1}A_{fc} *--------------------------------------------------------------------*/ else { for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++) { i1 = A_diag_j[jj]; if ((CF_marker[i1] >= 0) && (method > 0)) { jj_count[j]++; } } if (num_procs > 1) { for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { i1 = A_offd_j[jj]; if ((CF_marker_offd[i1] >= 0) && (method > 0)) { jj_count_offd[j]++; } } } } } } /*----------------------------------------------------------------------- * Allocate arrays. *-----------------------------------------------------------------------*/ for (i=0; i < num_threads-1; i++) { coarse_counter[i+1] += coarse_counter[i]; jj_count[i+1] += jj_count[i]; jj_count_offd[i+1] += jj_count_offd[i]; } i = num_threads-1; jj_counter = jj_count[i]; jj_counter_offd = jj_count_offd[i]; P_diag_size = jj_counter; P_diag_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, HYPRE_MEMORY_DEVICE); P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size, HYPRE_MEMORY_DEVICE); P_diag_data = hypre_CTAlloc(HYPRE_Real, P_diag_size, HYPRE_MEMORY_DEVICE); P_diag_i[n_fine] = jj_counter; P_offd_size = jj_counter_offd; P_offd_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, HYPRE_MEMORY_DEVICE); P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size, HYPRE_MEMORY_DEVICE); P_offd_data = hypre_CTAlloc(HYPRE_Real, P_offd_size, HYPRE_MEMORY_DEVICE); /*----------------------------------------------------------------------- * Intialize some stuff. *-----------------------------------------------------------------------*/ jj_counter = start_indexing; jj_counter_offd = start_indexing; if (debug_flag==4) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d Interp: Internal work 1 = %f\n", my_id, wall_time); fflush(NULL); } /*----------------------------------------------------------------------- * Send and receive fine_to_coarse info. *-----------------------------------------------------------------------*/ if (debug_flag==4) wall_time = time_getWallclockSeconds(); //fine_to_coarse_offd = hypre_CTAlloc(HYPRE_BigInt, num_cols_A_offd, HYPRE_MEMORY_HOST); #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,j,ns,ne,size,rest,coarse_shift) HYPRE_SMP_SCHEDULE #endif #endif for (j = 0; j < num_threads; j++) { coarse_shift = 0; if (j > 0) coarse_shift = coarse_counter[j-1]; size = n_fine/num_threads; rest = n_fine - size*num_threads; if (j < rest) { ns = j*size+j; ne = (j+1)*size+j+1; } else { ns = j*size+rest; ne = (j+1)*size+rest; } for (i = ns; i < ne; i++) { fine_to_coarse[i] += coarse_shift; } } /* index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) big_buf_data[index++] = fine_to_coarse[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]+ my_first_cpt; } comm_handle = hypre_ParCSRCommHandleCreate( 21, comm_pkg, big_buf_data, fine_to_coarse_offd); hypre_ParCSRCommHandleDestroy(comm_handle); */ if (debug_flag==4) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d Interp: Comm 4 FineToCoarse = %f\n", my_id, wall_time); fflush(NULL); } if (debug_flag==4) wall_time = time_getWallclockSeconds(); #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif #endif //for (i = 0; i < n_fine; i++) fine_to_coarse[i] -= my_first_cpt; /*----------------------------------------------------------------------- * Loop over fine grid points. *-----------------------------------------------------------------------*/ a_diag = hypre_CTAlloc(HYPRE_Real, n_fine, HYPRE_MEMORY_HOST); for (i = 0; i < n_fine; i++) { if (CF_marker[i] < 0) { for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++) { i1 = A_diag_j[jj]; if ( i==i1 ) /* diagonal of A only */ { a_diag[i] = 1.0/A_diag_data[jj]; } } } } #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,j,jl,i1,jj,ns,ne,size,rest,P_marker,P_marker_offd,jj_counter,jj_counter_offd,jj_begin_row,jj_end_row,jj_begin_row_offd,jj_end_row_offd) HYPRE_SMP_SCHEDULE #endif #endif for (jl = 0; jl < num_threads; jl++) { size = n_fine/num_threads; rest = n_fine - size*num_threads; if (jl < rest) { ns = jl*size+jl; ne = (jl+1)*size+jl+1; } else { ns = jl*size+rest; ne = (jl+1)*size+rest; } jj_counter = 0; if (jl > 0) jj_counter = jj_count[jl-1]; jj_counter_offd = 0; if (jl > 0) jj_counter_offd = jj_count_offd[jl-1]; P_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); if (num_cols_A_offd) P_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); else P_marker_offd = NULL; for (i = 0; i < n_fine; i++) { P_marker[i] = -1; } for (i = 0; i < num_cols_A_offd; i++) { P_marker_offd[i] = -1; } for (i = ns; i < ne; i++) { /*-------------------------------------------------------------------- * If i is a c-point, interpolation is the identity. *--------------------------------------------------------------------*/ if (CF_marker[i] >= 0) { P_diag_i[i] = jj_counter; P_diag_j[jj_counter] = fine_to_coarse[i]; P_diag_data[jj_counter] = one; jj_counter++; } /*-------------------------------------------------------------------- * If i is an F-point, build interpolation. *--------------------------------------------------------------------*/ else { /* Diagonal part of P */ P_diag_i[i] = jj_counter; for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++) { i1 = A_diag_j[jj]; /*-------------------------------------------------------------- * If neighbor i1 is a C-point, set column number in P_diag_j * and initialize interpolation weight to zero. *--------------------------------------------------------------*/ if ((CF_marker[i1] >= 0) && (method > 0)) { P_marker[i1] = jj_counter; P_diag_j[jj_counter] = fine_to_coarse[i1]; /* if(method == 0) { P_diag_data[jj_counter] = 0.0; } */ if (method == 1) { P_diag_data[jj_counter] = - A_diag_data[jj]; } else if (method == 2) { P_diag_data[jj_counter] = - A_diag_data[jj]*a_diag[i]; } jj_counter++; } } /* Off-Diagonal part of P */ P_offd_i[i] = jj_counter_offd; if (num_procs > 1) { for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { i1 = A_offd_j[jj]; /*----------------------------------------------------------- * If neighbor i1 is a C-point, set column number in P_offd_j * and initialize interpolation weight to zero. *-----------------------------------------------------------*/ if ((CF_marker_offd[i1] >= 0) && (method > 0)) { P_marker_offd[i1] = jj_counter_offd; /*P_offd_j[jj_counter_offd] = fine_to_coarse_offd[i1];*/ P_offd_j[jj_counter_offd] = i1; /* if(method == 0) { P_offd_data[jj_counter_offd] = 0.0; } */ if (method == 1) { P_offd_data[jj_counter_offd] = - A_offd_data[jj]; } else if (method == 2) { P_offd_data[jj_counter_offd] = - A_offd_data[jj]*a_diag[i]; } jj_counter_offd++; } } } } P_offd_i[i+1] = jj_counter_offd; } hypre_TFree(P_marker, HYPRE_MEMORY_HOST); hypre_TFree(P_marker_offd, HYPRE_MEMORY_HOST); } hypre_TFree(a_diag, HYPRE_MEMORY_HOST); P = hypre_ParCSRMatrixCreate(comm, hypre_ParCSRMatrixGlobalNumRows(A), total_global_cpts, hypre_ParCSRMatrixColStarts(A), num_cpts_global, 0, P_diag_i[n_fine], P_offd_i[n_fine]); P_diag = hypre_ParCSRMatrixDiag(P); hypre_CSRMatrixData(P_diag) = P_diag_data; hypre_CSRMatrixI(P_diag) = P_diag_i; hypre_CSRMatrixJ(P_diag) = P_diag_j; P_offd = hypre_ParCSRMatrixOffd(P); hypre_CSRMatrixData(P_offd) = P_offd_data; hypre_CSRMatrixI(P_offd) = P_offd_i; hypre_CSRMatrixJ(P_offd) = P_offd_j; hypre_ParCSRMatrixOwnsRowStarts(P) = 0; num_cols_P_offd = 0; if (P_offd_size) { P_marker = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif #endif for (i=0; i < num_cols_A_offd; i++) P_marker[i] = 0; num_cols_P_offd = 0; for (i=0; i < P_offd_size; i++) { index = P_offd_j[i]; if (!P_marker[index]) { num_cols_P_offd++; P_marker[index] = 1; } } col_map_offd_P = hypre_CTAlloc(HYPRE_BigInt, num_cols_P_offd, HYPRE_MEMORY_HOST); tmp_map_offd = hypre_CTAlloc(HYPRE_Int, num_cols_P_offd, HYPRE_MEMORY_HOST); index = 0; for (i=0; i < num_cols_P_offd; i++) { while (P_marker[index]==0) index++; tmp_map_offd[i] = index++; } #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif #endif for (i=0; i < P_offd_size; i++) P_offd_j[i] = hypre_BinarySearch(tmp_map_offd, P_offd_j[i], num_cols_P_offd); hypre_TFree(P_marker, HYPRE_MEMORY_HOST); } for (i=0; i < n_fine; i++) if (CF_marker[i] == -3) CF_marker[i] = -1; if (num_cols_P_offd) { hypre_ParCSRMatrixColMapOffd(P) = col_map_offd_P; hypre_CSRMatrixNumCols(P_offd) = num_cols_P_offd; } hypre_GetCommPkgRTFromCommPkgA(P,A, fine_to_coarse, tmp_map_offd); *P_ptr = P; hypre_TFree(tmp_map_offd, HYPRE_MEMORY_HOST); hypre_TFree(CF_marker_offd, HYPRE_MEMORY_HOST); hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST); hypre_TFree(fine_to_coarse, HYPRE_MEMORY_HOST); //hypre_TFree(fine_to_coarse_offd, HYPRE_MEMORY_HOST); hypre_TFree(coarse_counter, HYPRE_MEMORY_HOST); hypre_TFree(jj_count, HYPRE_MEMORY_HOST); hypre_TFree(jj_count_offd, HYPRE_MEMORY_HOST); return(0); } /* Interpolation for MGR - Dynamic Row Sum method */ HYPRE_Int hypre_MGRBuildPDRS( hypre_ParCSRMatrix *A, HYPRE_Int *CF_marker, HYPRE_BigInt *num_cpts_global, HYPRE_Int blk_size, HYPRE_Int reserved_coarse_size, HYPRE_Int debug_flag, hypre_ParCSRMatrix **P_ptr) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); hypre_ParCSRCommHandle *comm_handle; hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd); HYPRE_Real *a_diag; hypre_ParCSRMatrix *P; HYPRE_BigInt *col_map_offd_P; HYPRE_Int *tmp_map_offd; HYPRE_Int *CF_marker_offd = NULL; hypre_CSRMatrix *P_diag; hypre_CSRMatrix *P_offd; HYPRE_Real *P_diag_data; HYPRE_Int *P_diag_i; HYPRE_Int *P_diag_j; HYPRE_Real *P_offd_data; HYPRE_Int *P_offd_i; HYPRE_Int *P_offd_j; HYPRE_Int P_diag_size, P_offd_size; HYPRE_Int *P_marker, *P_marker_offd; HYPRE_Int jj_counter,jj_counter_offd; HYPRE_Int *jj_count, *jj_count_offd; // HYPRE_Int jj_begin_row,jj_begin_row_offd; // HYPRE_Int jj_end_row,jj_end_row_offd; HYPRE_Int start_indexing = 0; /* start indexing for P_data at 0 */ HYPRE_Int n_fine = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int *fine_to_coarse; //HYPRE_BigInt *fine_to_coarse_offd; HYPRE_Int *coarse_counter; HYPRE_Int coarse_shift; HYPRE_BigInt total_global_cpts; //HYPRE_BigInt my_first_cpt; HYPRE_Int num_cols_P_offd; HYPRE_Int i,i1; HYPRE_Int j,jl,jj; HYPRE_Int start; HYPRE_Real one = 1.0; HYPRE_Int my_id; HYPRE_Int num_procs; HYPRE_Int num_threads; HYPRE_Int num_sends; HYPRE_Int index; HYPRE_Int ns, ne, size, rest; HYPRE_Int *int_buf_data; HYPRE_Real wall_time; /* for debugging instrumentation */ hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm,&my_id); num_threads = hypre_NumThreads(); #ifdef HYPRE_NO_GLOBAL_PARTITION //my_first_cpt = num_cpts_global[0]; if (my_id == (num_procs -1)) total_global_cpts = num_cpts_global[1]; hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_BIG_INT, num_procs-1, comm); #else //my_first_cpt = num_cpts_global[my_id]; total_global_cpts = num_cpts_global[num_procs]; #endif /*------------------------------------------------------------------- * Get the CF_marker data for the off-processor columns *-------------------------------------------------------------------*/ if (debug_flag < 0) { debug_flag = -debug_flag; } if (debug_flag==4) wall_time = time_getWallclockSeconds(); if (num_cols_A_offd) CF_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); if (!comm_pkg) { hypre_MatvecCommPkgCreate(A); comm_pkg = hypre_ParCSRMatrixCommPkg(A); } num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST); index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) int_buf_data[index++] = CF_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data, CF_marker_offd); hypre_ParCSRCommHandleDestroy(comm_handle); if (debug_flag==4) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d Interp: Comm 1 CF_marker = %f\n", my_id, wall_time); fflush(NULL); } /*----------------------------------------------------------------------- * First Pass: Determine size of P and fill in fine_to_coarse mapping. *-----------------------------------------------------------------------*/ /*----------------------------------------------------------------------- * Intialize counters and allocate mapping vector. *-----------------------------------------------------------------------*/ coarse_counter = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); jj_count = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); jj_count_offd = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif #endif for (i = 0; i < n_fine; i++) fine_to_coarse[i] = -1; jj_counter = start_indexing; jj_counter_offd = start_indexing; /*----------------------------------------------------------------------- * Loop over fine grid. *-----------------------------------------------------------------------*/ /* RDF: this looks a little tricky, but doable */ #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,j,i1,jj,ns,ne,size,rest) HYPRE_SMP_SCHEDULE #endif #endif for (j = 0; j < num_threads; j++) { size = n_fine/num_threads; rest = n_fine - size*num_threads; if (j < rest) { ns = j*size+j; ne = (j+1)*size+j+1; } else { ns = j*size+rest; ne = (j+1)*size+rest; } for (i = ns; i < ne; i++) { /*-------------------------------------------------------------------- * If i is a C-point, interpolation is the identity. Also set up * mapping vector. *--------------------------------------------------------------------*/ if (CF_marker[i] >= 0) { jj_count[j]++; fine_to_coarse[i] = coarse_counter[j]; coarse_counter[j]++; } /*-------------------------------------------------------------------- * If i is an F-point, interpolation is the approximation of A_{ff}^{-1}A_{fc} *--------------------------------------------------------------------*/ else { for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++) { i1 = A_diag_j[jj]; if (CF_marker[i1] >= 0) { jj_count[j]++; } } if (num_procs > 1) { for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { i1 = A_offd_j[jj]; if (CF_marker_offd[i1] >= 0) { jj_count_offd[j]++; } } } } /*-------------------------------------------------------------------- * Set up the indexes for the DRS method *--------------------------------------------------------------------*/ } } /*----------------------------------------------------------------------- * Allocate arrays. *-----------------------------------------------------------------------*/ for (i=0; i < num_threads-1; i++) { coarse_counter[i+1] += coarse_counter[i]; jj_count[i+1] += jj_count[i]; jj_count_offd[i+1] += jj_count_offd[i]; } i = num_threads-1; jj_counter = jj_count[i]; jj_counter_offd = jj_count_offd[i]; P_diag_size = jj_counter; P_diag_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, HYPRE_MEMORY_HOST); P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size, HYPRE_MEMORY_HOST); P_diag_data = hypre_CTAlloc(HYPRE_Real, P_diag_size, HYPRE_MEMORY_HOST); P_diag_i[n_fine] = jj_counter; P_offd_size = jj_counter_offd; P_offd_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, HYPRE_MEMORY_HOST); P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size, HYPRE_MEMORY_HOST); P_offd_data = hypre_CTAlloc(HYPRE_Real, P_offd_size, HYPRE_MEMORY_HOST); /*----------------------------------------------------------------------- * Intialize some stuff. *-----------------------------------------------------------------------*/ jj_counter = start_indexing; jj_counter_offd = start_indexing; if (debug_flag==4) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d Interp: Internal work 1 = %f\n", my_id, wall_time); fflush(NULL); } /*----------------------------------------------------------------------- * Send and receive fine_to_coarse info. *-----------------------------------------------------------------------*/ if (debug_flag==4) wall_time = time_getWallclockSeconds(); //fine_to_coarse_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,j,ns,ne,size,rest,coarse_shift) HYPRE_SMP_SCHEDULE #endif #endif for (j = 0; j < num_threads; j++) { coarse_shift = 0; if (j > 0) coarse_shift = coarse_counter[j-1]; size = n_fine/num_threads; rest = n_fine - size*num_threads; if (j < rest) { ns = j*size+j; ne = (j+1)*size+j+1; } else { ns = j*size+rest; ne = (j+1)*size+rest; } for (i = ns; i < ne; i++) fine_to_coarse[i] += coarse_shift; } /*index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) int_buf_data[index++] = fine_to_coarse[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data, fine_to_coarse_offd); hypre_ParCSRCommHandleDestroy(comm_handle); */ if (debug_flag==4) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d Interp: Comm 4 FineToCoarse = %f\n", my_id, wall_time); fflush(NULL); } if (debug_flag==4) wall_time = time_getWallclockSeconds(); #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif #endif //for (i = 0; i < n_fine; i++) fine_to_coarse[i] -= my_first_cpt; /*----------------------------------------------------------------------- * Loop over fine grid points. *-----------------------------------------------------------------------*/ a_diag = hypre_CTAlloc(HYPRE_Real, n_fine, HYPRE_MEMORY_HOST); for (i = 0; i < n_fine; i++) { for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++) { i1 = A_diag_j[jj]; if ( i==i1 ) /* diagonal of A only */ { a_diag[i] = 1.0/A_diag_data[jj]; } } } #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,j,jl,i1,jj,ns,ne,size,rest,P_marker,P_marker_offd,jj_counter,jj_counter_offd,jj_begin_row,jj_end_row,jj_begin_row_offd,jj_end_row_offd) HYPRE_SMP_SCHEDULE #endif #endif for (jl = 0; jl < num_threads; jl++) { size = n_fine/num_threads; rest = n_fine - size*num_threads; if (jl < rest) { ns = jl*size+jl; ne = (jl+1)*size+jl+1; } else { ns = jl*size+rest; ne = (jl+1)*size+rest; } jj_counter = 0; if (jl > 0) jj_counter = jj_count[jl-1]; jj_counter_offd = 0; if (jl > 0) jj_counter_offd = jj_count_offd[jl-1]; P_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); if (num_cols_A_offd) P_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); else P_marker_offd = NULL; for (i = 0; i < n_fine; i++) { P_marker[i] = -1; } for (i = 0; i < num_cols_A_offd; i++) { P_marker_offd[i] = -1; } for (i = ns; i < ne; i++) { /*-------------------------------------------------------------------- * If i is a c-point, interpolation is the identity. *--------------------------------------------------------------------*/ if (CF_marker[i] >= 0) { P_diag_i[i] = jj_counter; P_diag_j[jj_counter] = fine_to_coarse[i]; P_diag_data[jj_counter] = one; jj_counter++; } /*-------------------------------------------------------------------- * If i is an F-point, build interpolation. *--------------------------------------------------------------------*/ else { /* Diagonal part of P */ P_diag_i[i] = jj_counter; for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++) { i1 = A_diag_j[jj]; /*-------------------------------------------------------------- * If neighbor i1 is a C-point, set column number in P_diag_j * and initialize interpolation weight to zero. *--------------------------------------------------------------*/ if (CF_marker[i1] >= 0) { P_marker[i1] = jj_counter; P_diag_j[jj_counter] = fine_to_coarse[i1]; P_diag_data[jj_counter] = - A_diag_data[jj]*a_diag[i]; jj_counter++; } } /* Off-Diagonal part of P */ P_offd_i[i] = jj_counter_offd; if (num_procs > 1) { for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { i1 = A_offd_j[jj]; /*----------------------------------------------------------- * If neighbor i1 is a C-point, set column number in P_offd_j * and initialize interpolation weight to zero. *-----------------------------------------------------------*/ if (CF_marker_offd[i1] >= 0) { P_marker_offd[i1] = jj_counter_offd; /*P_offd_j[jj_counter_offd] = fine_to_coarse_offd[i1];*/ P_offd_j[jj_counter_offd] = i1; P_offd_data[jj_counter_offd] = - A_offd_data[jj]*a_diag[i]; jj_counter_offd++; } } } } P_offd_i[i+1] = jj_counter_offd; } hypre_TFree(P_marker, HYPRE_MEMORY_HOST); hypre_TFree(P_marker_offd, HYPRE_MEMORY_HOST); } hypre_TFree(a_diag, HYPRE_MEMORY_HOST); P = hypre_ParCSRMatrixCreate(comm, hypre_ParCSRMatrixGlobalNumRows(A), total_global_cpts, hypre_ParCSRMatrixColStarts(A), num_cpts_global, 0, P_diag_i[n_fine], P_offd_i[n_fine]); P_diag = hypre_ParCSRMatrixDiag(P); hypre_CSRMatrixData(P_diag) = P_diag_data; hypre_CSRMatrixI(P_diag) = P_diag_i; hypre_CSRMatrixJ(P_diag) = P_diag_j; P_offd = hypre_ParCSRMatrixOffd(P); hypre_CSRMatrixData(P_offd) = P_offd_data; hypre_CSRMatrixI(P_offd) = P_offd_i; hypre_CSRMatrixJ(P_offd) = P_offd_j; hypre_ParCSRMatrixOwnsRowStarts(P) = 0; num_cols_P_offd = 0; if (P_offd_size) { P_marker = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif #endif for (i=0; i < num_cols_A_offd; i++) P_marker[i] = 0; num_cols_P_offd = 0; for (i=0; i < P_offd_size; i++) { index = P_offd_j[i]; if (!P_marker[index]) { num_cols_P_offd++; P_marker[index] = 1; } } tmp_map_offd = hypre_CTAlloc(HYPRE_Int, num_cols_P_offd, HYPRE_MEMORY_HOST); col_map_offd_P = hypre_CTAlloc(HYPRE_BigInt, num_cols_P_offd, HYPRE_MEMORY_HOST); index = 0; for (i=0; i < num_cols_P_offd; i++) { while (P_marker[index]==0) index++; tmp_map_offd[i] = index++; } #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif #endif for (i=0; i < P_offd_size; i++) P_offd_j[i] = hypre_BinarySearch(tmp_map_offd, P_offd_j[i], num_cols_P_offd); hypre_TFree(P_marker, HYPRE_MEMORY_HOST); } for (i=0; i < n_fine; i++) if (CF_marker[i] == -3) CF_marker[i] = -1; if (num_cols_P_offd) { hypre_ParCSRMatrixColMapOffd(P) = col_map_offd_P; hypre_CSRMatrixNumCols(P_offd) = num_cols_P_offd; } hypre_GetCommPkgRTFromCommPkgA(P,A, fine_to_coarse, tmp_map_offd); *P_ptr = P; hypre_TFree(tmp_map_offd, HYPRE_MEMORY_HOST); hypre_TFree(CF_marker_offd, HYPRE_MEMORY_HOST); hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST); hypre_TFree(fine_to_coarse, HYPRE_MEMORY_HOST); // hypre_TFree(fine_to_coarse_offd, HYPRE_MEMORY_HOST); hypre_TFree(coarse_counter, HYPRE_MEMORY_HOST); hypre_TFree(jj_count, HYPRE_MEMORY_HOST); hypre_TFree(jj_count_offd, HYPRE_MEMORY_HOST); return(0); } /* Scale ParCSR matrix A = scalar * A * A: the target CSR matrix * vector: array of real numbers */ HYPRE_Int hypre_ParCSRMatrixLeftScale(HYPRE_Real *vector, hypre_ParCSRMatrix *A) { HYPRE_Int i, j, n_local; hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); n_local = hypre_CSRMatrixNumRows(A_diag); for (i = 0; i < n_local; i++) { HYPRE_Real factor = vector[i]; for (j = A_diag_i[i]; j < A_diag_i[i+1]; j++) { A_diag_data[j] *= factor; } for (j = A_offd_i[i]; j < A_offd_i[i+1]; j++) { A_offd_data[j] *= factor; } } return(0); } /************************************************************ * Available methods: * 0: inv(A_FF) approximated by its diagonal inverse * 1: inv(A_FF) approximated by sparse approximate inverse *************************************************************/ HYPRE_Int hypre_MGRComputeNonGalerkinCoarseGrid(hypre_ParCSRMatrix *A, hypre_ParCSRMatrix *P, hypre_ParCSRMatrix *RT, HYPRE_Int bsize, HYPRE_Int ordering, HYPRE_Int method, HYPRE_Int Pmax, HYPRE_Int keep_stencil, HYPRE_Int *CF_marker, hypre_ParCSRMatrix **A_h_ptr) { HYPRE_Int *c_marker, *f_marker; HYPRE_Int n_local_fine_grid, i, i1, jj; hypre_ParCSRMatrix *A_cc; hypre_ParCSRMatrix *A_ff; hypre_ParCSRMatrix *A_fc; hypre_ParCSRMatrix *A_cf; hypre_ParCSRMatrix *A_h; hypre_ParCSRMatrix *A_h_correction; HYPRE_Int max_elmts = Pmax; // HYPRE_Real wall_time = 0.; hypre_ParCSRMatrix *P_mod = NULL; HYPRE_Int my_id; MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_MPI_Comm_rank(comm,&my_id); n_local_fine_grid = hypre_CSRMatrixNumRows(hypre_ParCSRMatrixDiag(A)); c_marker = hypre_CTAlloc(HYPRE_Int, n_local_fine_grid, HYPRE_MEMORY_HOST); f_marker = hypre_CTAlloc(HYPRE_Int, n_local_fine_grid, HYPRE_MEMORY_HOST); for (i = 0; i < n_local_fine_grid; i++) { HYPRE_Int point_type = CF_marker[i]; assert(point_type == 1 || point_type == -1); c_marker[i] = point_type; f_marker[i] = -point_type; } // get the A_cc sub-block hypre_MGRGetSubBlock(A, c_marker, c_marker, 0, &A_cc); if (method == 0) { if (keep_stencil) { //wall_time = time_getWallclockSeconds(); hypre_MGRGetSubBlock(A, c_marker, f_marker, 0, &A_cf); hypre_MGRGetSubBlock(A, f_marker, c_marker, 0, &A_fc); hypre_MGRGetSubBlock(A, f_marker, f_marker, 0, &A_ff); // extract the diagonal of A_ff and compute D_ff_inv hypre_CSRMatrix *A_ff_diag = hypre_ParCSRMatrixDiag(A_ff); HYPRE_Real *A_ff_diag_data = hypre_CSRMatrixData(A_ff_diag); HYPRE_Int *A_ff_diag_i = hypre_CSRMatrixI(A_ff_diag); HYPRE_Int *A_ff_diag_j = hypre_CSRMatrixJ(A_ff_diag); HYPRE_Int n_local_fpoints = hypre_CSRMatrixNumRows(A_ff_diag); HYPRE_Real *D_ff_inv; D_ff_inv = hypre_CTAlloc(HYPRE_Real, n_local_fpoints, HYPRE_MEMORY_HOST); for (i = 0; i < n_local_fpoints; i++) { for (jj = A_ff_diag_i[i]; jj < A_ff_diag_i[i+1]; jj++) { i1 = A_ff_diag_j[jj]; if ( i==i1 ) { D_ff_inv[i] = -1.0/A_ff_diag_data[jj]; } } } // extract the diagonal of A_cf hypre_CSRMatrix *A_cf_diag = hypre_ParCSRMatrixDiag(A_cf); HYPRE_Real *A_cf_diag_data = hypre_CSRMatrixData(A_cf_diag); HYPRE_Int *A_cf_diag_i = hypre_CSRMatrixI(A_cf_diag); HYPRE_Int *A_cf_diag_j = hypre_CSRMatrixJ(A_cf_diag); n_local_fpoints = hypre_CSRMatrixNumRows(A_cf_diag); HYPRE_Real *D_cf; D_cf = hypre_CTAlloc(HYPRE_Real, n_local_fpoints, HYPRE_MEMORY_HOST); for (i = 0; i < n_local_fpoints; i++) { i1 = A_cf_diag_j[A_cf_diag_i[i]]; D_cf[i] = A_cf_diag_data[jj]; } // compute the triple product hypre_ParCSRMatrixLeftScale(D_ff_inv, A_fc); hypre_ParCSRMatrixLeftScale(D_cf, A_fc); A_h_correction = A_fc; hypre_TFree(D_cf, HYPRE_MEMORY_HOST); hypre_TFree(D_ff_inv, HYPRE_MEMORY_HOST); hypre_ParCSRMatrixDestroy(A_ff); hypre_ParCSRMatrixDestroy(A_cf); //wall_time = time_getWallclockSeconds() - wall_time; //hypre_printf("Compute triple product D_cf * D_ff_inv * A_fc time: %1.5f\n", wall_time); } else { //wall_time = time_getWallclockSeconds(); P_mod = hypre_ParCSRMatrixCompleteClone(P); hypre_ParCSRMatrixCopy(P,P_mod,1); HYPRE_Int n_local_rows = hypre_ParCSRMatrixNumRows(P_mod); hypre_CSRMatrix *P_mod_diag = hypre_ParCSRMatrixDiag(P_mod); HYPRE_Int *P_mod_diag_i = hypre_CSRMatrixI(P_mod_diag); HYPRE_Real *P_mod_diag_data = hypre_CSRMatrixData(P_mod_diag); for (i = 0; i < n_local_rows; i ++) { if (CF_marker[i] >= 0) { HYPRE_Int ii = P_mod_diag_i[i]; P_mod_diag_data[ii] = 0.0; } } hypre_BoomerAMGBuildCoarseOperator(RT, A, P_mod, &A_h_correction); //wall_time = time_getWallclockSeconds() - wall_time; //hypre_printf("Compute triple product time new: %1.5f\n", wall_time); hypre_ParCSRMatrixDestroy(P_mod); } } else { // Approximate inverse for ideal interploation hypre_MGRGetSubBlock(A, c_marker, f_marker, 0, &A_cf); hypre_MGRGetSubBlock(A, f_marker, c_marker, 0, &A_fc); hypre_MGRGetSubBlock(A, f_marker, f_marker, 0, &A_ff); hypre_ParCSRMatrix *A_ff_inv = NULL; hypre_ParCSRMatrix *minus_Wp = NULL; hypre_MGRApproximateInverse(A_ff, &A_ff_inv); minus_Wp = hypre_ParMatmul(A_ff_inv, A_fc); A_h_correction = hypre_ParMatmul(A_cf, minus_Wp); hypre_ParCSRMatrixDestroy(minus_Wp); hypre_ParCSRMatrixDestroy(A_ff); hypre_ParCSRMatrixDestroy(A_fc); hypre_ParCSRMatrixDestroy(A_cf); } // perform dropping for A_h_correction // specific to multiphase poromechanics // we only keep the diagonal of each block //wall_time = time_getWallclockSeconds(); HYPRE_Int n_local_cpoints = hypre_CSRMatrixNumRows(hypre_ParCSRMatrixDiag(A_h_correction)); hypre_CSRMatrix *A_h_correction_diag = hypre_ParCSRMatrixDiag(A_h_correction); HYPRE_Real *A_h_correction_diag_data = hypre_CSRMatrixData(A_h_correction_diag); HYPRE_Int *A_h_correction_diag_i = hypre_CSRMatrixI(A_h_correction_diag); HYPRE_Int *A_h_correction_diag_j = hypre_CSRMatrixJ(A_h_correction_diag); HYPRE_Int ncol_diag = hypre_CSRMatrixNumCols(A_h_correction_diag); hypre_CSRMatrix *A_h_correction_offd = hypre_ParCSRMatrixOffd(A_h_correction); HYPRE_Real *A_h_correction_offd_data = hypre_CSRMatrixData(A_h_correction_offd); HYPRE_Int *A_h_correction_offd_i = hypre_CSRMatrixI(A_h_correction_offd); HYPRE_Int *A_h_correction_offd_j = hypre_CSRMatrixJ(A_h_correction_offd); if (Pmax > 0) { if (ordering == 0) // interleaved ordering { HYPRE_Int *A_h_correction_diag_i_new = hypre_CTAlloc(HYPRE_Int, n_local_cpoints+1, HYPRE_MEMORY_HOST); HYPRE_Int *A_h_correction_diag_j_new = hypre_CTAlloc(HYPRE_Int, (bsize + max_elmts)*n_local_cpoints, HYPRE_MEMORY_HOST); HYPRE_Complex *A_h_correction_diag_data_new = hypre_CTAlloc(HYPRE_Complex, (bsize + max_elmts)*n_local_cpoints, HYPRE_MEMORY_HOST); HYPRE_Int num_nonzeros_diag_new = 0; HYPRE_Int *A_h_correction_offd_i_new = hypre_CTAlloc(HYPRE_Int, n_local_cpoints+1, HYPRE_MEMORY_HOST); HYPRE_Int *A_h_correction_offd_j_new = hypre_CTAlloc(HYPRE_Int, max_elmts*n_local_cpoints, HYPRE_MEMORY_HOST); HYPRE_Complex *A_h_correction_offd_data_new = hypre_CTAlloc(HYPRE_Complex, max_elmts*n_local_cpoints, HYPRE_MEMORY_HOST); HYPRE_Int num_nonzeros_offd_new = 0; for (i = 0; i < n_local_cpoints; i++) { HYPRE_Int max_num_nonzeros = A_h_correction_diag_i[i+1] - A_h_correction_diag_i[i] + A_h_correction_offd_i[i+1] - A_h_correction_offd_i[i]; HYPRE_Int *aux_j = hypre_CTAlloc(HYPRE_Int, max_num_nonzeros, HYPRE_MEMORY_HOST); HYPRE_Real *aux_data = hypre_CTAlloc(HYPRE_Real, max_num_nonzeros, HYPRE_MEMORY_HOST); HYPRE_Int row_start = i - (i % bsize); HYPRE_Int row_stop = row_start + bsize - 1; HYPRE_Int cnt = 0; for (jj = A_h_correction_offd_i[i]; jj < A_h_correction_offd_i[i+1]; jj++) { aux_j[cnt] = A_h_correction_offd_j[jj] + ncol_diag; aux_data[cnt] = A_h_correction_offd_data[jj]; cnt++; } for (jj = A_h_correction_diag_i[i]; jj < A_h_correction_diag_i[i+1]; jj++) { aux_j[cnt] = A_h_correction_diag_j[jj]; aux_data[cnt] = A_h_correction_diag_data[jj]; cnt++; } hypre_qsort2_abs(aux_j, aux_data, 0, cnt-1); for (jj = A_h_correction_diag_i[i]; jj < A_h_correction_diag_i[i+1]; jj++) { i1 = A_h_correction_diag_j[jj]; if (i1 >= row_start && i1 <= row_stop) { // copy data to new arrays A_h_correction_diag_j_new[num_nonzeros_diag_new] = i1; A_h_correction_diag_data_new[num_nonzeros_diag_new] = A_h_correction_diag_data[jj]; ++num_nonzeros_diag_new; } else { // Do nothing } } if (max_elmts > 0) { for (jj = 0; jj < hypre_min(max_elmts, cnt); jj++) { HYPRE_Int col_idx = aux_j[jj]; HYPRE_Real col_value = aux_data[jj]; if (col_idx < ncol_diag && (col_idx < row_start || col_idx > row_stop)) { A_h_correction_diag_j_new[num_nonzeros_diag_new] = col_idx; A_h_correction_diag_data_new[num_nonzeros_diag_new] = col_value; ++num_nonzeros_diag_new; } else if (col_idx >= ncol_diag) { A_h_correction_offd_j_new[num_nonzeros_offd_new] = col_idx - ncol_diag; A_h_correction_offd_data_new[num_nonzeros_offd_new] = col_value; ++num_nonzeros_offd_new; } } } A_h_correction_diag_i_new[i+1] = num_nonzeros_diag_new; A_h_correction_offd_i_new[i+1] = num_nonzeros_offd_new; hypre_TFree(aux_j, HYPRE_MEMORY_HOST); hypre_TFree(aux_data, HYPRE_MEMORY_HOST); } hypre_TFree(A_h_correction_diag_i, HYPRE_MEMORY_HOST); hypre_TFree(A_h_correction_diag_j, HYPRE_MEMORY_HOST); hypre_TFree(A_h_correction_diag_data, HYPRE_MEMORY_HOST); hypre_CSRMatrixI(A_h_correction_diag) = A_h_correction_diag_i_new; hypre_CSRMatrixJ(A_h_correction_diag) = A_h_correction_diag_j_new; hypre_CSRMatrixData(A_h_correction_diag) = A_h_correction_diag_data_new; hypre_CSRMatrixNumNonzeros(A_h_correction_diag) = num_nonzeros_diag_new; if (A_h_correction_offd_i) hypre_TFree(A_h_correction_offd_i, HYPRE_MEMORY_HOST); if (A_h_correction_offd_j) hypre_TFree(A_h_correction_offd_j, HYPRE_MEMORY_HOST); if (A_h_correction_offd_data) hypre_TFree(A_h_correction_offd_data, HYPRE_MEMORY_HOST); hypre_CSRMatrixI(A_h_correction_offd) = A_h_correction_offd_i_new; hypre_CSRMatrixJ(A_h_correction_offd) = A_h_correction_offd_j_new; hypre_CSRMatrixData(A_h_correction_offd) = A_h_correction_offd_data_new; hypre_CSRMatrixNumNonzeros(A_h_correction_offd) = num_nonzeros_offd_new; } else { hypre_printf("Error!! Block ordering is not supported at the moment\n"); exit(-1); } } //hypre_MGRParCSRMatrixTruncate(A_h_correction, max_elmts); //wall_time = time_getWallclockSeconds() - wall_time; //hypre_printf("Filter A_h_correction time: %1.5f\n", wall_time); //hypre_ParCSRMatrixPrintIJ(A_h_correction,1,1,"A_h_correction_filtered"); // coarse grid / schur complement hypre_ParcsrAdd(1.0, A_cc, 1.0, A_h_correction, &A_h); *A_h_ptr = A_h; //hypre_ParCSRMatrixPrintIJ(A_h,1,1,"A_h"); hypre_ParCSRMatrixDestroy(A_cc); hypre_ParCSRMatrixDestroy(A_h_correction); hypre_TFree(c_marker, HYPRE_MEMORY_HOST); hypre_TFree(f_marker, HYPRE_MEMORY_HOST); return hypre_error_flag; } HYPRE_Int hypre_MGRComputeAlgebraicFixedStress(hypre_ParCSRMatrix *A, HYPRE_BigInt *mgr_idx_array, HYPRE_Solver A_ff_solver) { HYPRE_Int *U_marker, *S_marker, *P_marker; HYPRE_Int n_fine, i; HYPRE_BigInt ibegin; hypre_ParCSRMatrix *A_up; hypre_ParCSRMatrix *A_uu; hypre_ParCSRMatrix *A_su; hypre_ParCSRMatrix *A_pu; hypre_ParVector *e1_vector; hypre_ParVector *e2_vector; hypre_ParVector *e3_vector; hypre_ParVector *e4_vector; hypre_ParVector *e5_vector; n_fine = hypre_CSRMatrixNumRows(hypre_ParCSRMatrixDiag(A)); ibegin = hypre_ParCSRMatrixFirstRowIndex(A); hypre_assert(ibegin == mgr_idx_array[0]); U_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); S_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); P_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); for (i = 0; i < n_fine; i++) { U_marker[i] = -1; S_marker[i] = -1; P_marker[i] = -1; } // create C and F markers for (i = 0; i < n_fine; i++) { if (i < mgr_idx_array[1] - ibegin) { U_marker[i] = 1; } else if (i >= (mgr_idx_array[1] - ibegin) && i < (mgr_idx_array[2] - ibegin)) { S_marker[i] = 1; } else { P_marker[i] = 1; } } // Get A_up hypre_MGRGetSubBlock(A, U_marker, P_marker, 0, &A_up); // GetA_uu hypre_MGRGetSubBlock(A, U_marker, U_marker, 0, &A_uu); // Get A_su hypre_MGRGetSubBlock(A, S_marker, U_marker, 0, &A_su); // Get A_pu hypre_MGRGetSubBlock(A, P_marker, U_marker, 0, &A_pu); e1_vector = hypre_ParVectorCreate(hypre_ParCSRMatrixComm(A_up), hypre_ParCSRMatrixGlobalNumCols(A_up), hypre_ParCSRMatrixColStarts(A_up)); hypre_ParVectorInitialize(e1_vector); hypre_ParVectorSetPartitioningOwner(e1_vector,0); hypre_ParVectorSetConstantValues(e1_vector, 1.0); e2_vector = hypre_ParVectorCreate(hypre_ParCSRMatrixComm(A_uu), hypre_ParCSRMatrixGlobalNumRows(A_uu), hypre_ParCSRMatrixRowStarts(A_uu)); hypre_ParVectorInitialize(e2_vector); hypre_ParVectorSetPartitioningOwner(e2_vector,0); hypre_ParVectorSetConstantValues(e2_vector, 0.0); e3_vector = hypre_ParVectorCreate(hypre_ParCSRMatrixComm(A_uu), hypre_ParCSRMatrixGlobalNumRows(A_uu), hypre_ParCSRMatrixRowStarts(A_uu)); hypre_ParVectorInitialize(e3_vector); hypre_ParVectorSetPartitioningOwner(e3_vector,0); hypre_ParVectorSetConstantValues(e3_vector, 0.0); e4_vector = hypre_ParVectorCreate(hypre_ParCSRMatrixComm(A_su), hypre_ParCSRMatrixGlobalNumRows(A_su), hypre_ParCSRMatrixRowStarts(A_su)); hypre_ParVectorInitialize(e4_vector); hypre_ParVectorSetPartitioningOwner(e4_vector,0); hypre_ParVectorSetConstantValues(e4_vector, 0.0); e5_vector = hypre_ParVectorCreate(hypre_ParCSRMatrixComm(A_pu), hypre_ParCSRMatrixGlobalNumRows(A_pu), hypre_ParCSRMatrixRowStarts(A_pu)); hypre_ParVectorInitialize(e5_vector); hypre_ParVectorSetPartitioningOwner(e5_vector,0); hypre_ParVectorSetConstantValues(e5_vector, 0.0); // compute e2 = A_up * e1 hypre_ParCSRMatrixMatvecOutOfPlace(1.0, A_up, e1_vector, 0.0, e2_vector, e2_vector); // solve e3 = A_uu^-1 * e2 hypre_BoomerAMGSolve(A_ff_solver, A_uu, e2_vector, e3_vector); // compute e4 = A_su * e3 hypre_ParCSRMatrixMatvecOutOfPlace(1.0, A_su, e3_vector, 0.0, e4_vector, e4_vector); // compute e4 = A_su * e3 hypre_ParCSRMatrixMatvecOutOfPlace(1.0, A_su, e3_vector, 0.0, e4_vector, e4_vector); // print e4 hypre_ParVectorPrintIJ(e4_vector,1,"Dsp"); // compute e5 = A_pu * e3 hypre_ParCSRMatrixMatvecOutOfPlace(1.0, A_pu, e3_vector, 0.0, e5_vector, e5_vector); hypre_ParVectorPrintIJ(e5_vector,1,"Dpp"); hypre_ParVectorDestroy(e1_vector); hypre_ParVectorDestroy(e2_vector); hypre_ParVectorDestroy(e3_vector); hypre_ParCSRMatrixDestroy(A_uu); hypre_ParCSRMatrixDestroy(A_up); hypre_ParCSRMatrixDestroy(A_pu); hypre_ParCSRMatrixDestroy(A_su); hypre_TFree(U_marker, HYPRE_MEMORY_HOST); hypre_TFree(S_marker, HYPRE_MEMORY_HOST); hypre_TFree(P_marker, HYPRE_MEMORY_HOST); return hypre_error_flag; } HYPRE_Int hypre_MGRApproximateInverse(hypre_ParCSRMatrix *A, hypre_ParCSRMatrix **A_inv) { HYPRE_Int print_level, mr_max_row_nnz, mr_max_iter, nsh_max_row_nnz, nsh_max_iter, mr_col_version; HYPRE_Real mr_tol, nsh_tol; HYPRE_Real *droptol = hypre_CTAlloc(HYPRE_Real, 2, HYPRE_MEMORY_HOST); hypre_ParCSRMatrix *approx_A_inv = NULL; print_level = 0; nsh_max_iter = 2; nsh_max_row_nnz = 2; // default 1000 mr_max_iter = 1; mr_tol = 1.0e-3; mr_max_row_nnz = 2; // default 800 mr_col_version = 0; nsh_tol = 1.0e-3; droptol[0] = 1.0e-2; droptol[1] = 1.0e-2; hypre_ILUParCSRInverseNSH(A, &approx_A_inv, droptol, mr_tol, nsh_tol, DIVIDE_TOL, mr_max_row_nnz, nsh_max_row_nnz, mr_max_iter, nsh_max_iter, mr_col_version, print_level); *A_inv = approx_A_inv; if (droptol) hypre_TFree(droptol, HYPRE_MEMORY_HOST); return hypre_error_flag; } HYPRE_Int hypre_MGRBuildInterpApproximateInverseExp(hypre_ParCSRMatrix *A, hypre_ParCSRMatrix *S, HYPRE_Int *CF_marker, HYPRE_BigInt *num_cpts_global, HYPRE_Int debug_flag, hypre_ParCSRMatrix **P_ptr) { HYPRE_Int *C_marker; HYPRE_Int *F_marker; hypre_ParCSRMatrix *A_fc; hypre_ParCSRMatrix *minus_Wp; MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_ParCSRMatrix *P; HYPRE_BigInt *col_map_offd_P; hypre_CSRMatrix *P_diag; hypre_CSRMatrix *P_offd; HYPRE_Real *P_diag_data; HYPRE_Int *P_diag_i; HYPRE_Int *P_diag_j; HYPRE_Real *P_offd_data; HYPRE_Int *P_offd_i; HYPRE_Int *P_offd_j; HYPRE_Int P_diag_size, P_offd_size; HYPRE_Int jj_counter,jj_counter_offd; HYPRE_Int start_indexing = 0; /* start indexing for P_data at 0 */ HYPRE_Int n_fine = hypre_CSRMatrixNumRows(hypre_ParCSRMatrixDiag(A)); HYPRE_Int *fine_to_coarse = NULL; HYPRE_Int coarse_counter; HYPRE_BigInt total_global_cpts; HYPRE_Int num_cols_P_offd; // HYPRE_BigInt my_first_cpt; HYPRE_Int i, jj; HYPRE_Real one = 1.0; HYPRE_Int my_id; HYPRE_Int num_procs; // HYPRE_Int num_threads; // HYPRE_Real wall_time; /* for debugging instrumentation */ C_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); F_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); // create C and F markers for (i = 0; i < n_fine; i++) { C_marker[i] = (CF_marker[i] == 1)? 1: -1; F_marker[i] = (CF_marker[i] == 1) ? -1: 1; } // Get A_FC hypre_MGRGetSubBlock(A, F_marker, C_marker, 0, &A_fc); // compute -Wp minus_Wp = hypre_ParMatmul(S, A_fc); hypre_CSRMatrix *minus_Wp_diag = hypre_ParCSRMatrixDiag(minus_Wp); HYPRE_Real *minus_Wp_diag_data = hypre_CSRMatrixData(minus_Wp_diag); HYPRE_Int *minus_Wp_diag_i = hypre_CSRMatrixI(minus_Wp_diag); HYPRE_Int *minus_Wp_diag_j = hypre_CSRMatrixJ(minus_Wp_diag); hypre_CSRMatrix *minus_Wp_offd = hypre_ParCSRMatrixOffd(minus_Wp); HYPRE_Real *minus_Wp_offd_data = hypre_CSRMatrixData(minus_Wp_offd); HYPRE_Int *minus_Wp_offd_i = hypre_CSRMatrixI(minus_Wp_offd); HYPRE_Int *minus_Wp_offd_j = hypre_CSRMatrixJ(minus_Wp_offd); hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm,&my_id); // num_threads = hypre_NumThreads(); #ifdef HYPRE_NO_GLOBAL_PARTITION // my_first_cpt = num_cpts_global[0]; if (my_id == (num_procs -1)) total_global_cpts = num_cpts_global[1]; hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_BIG_INT, num_procs-1, comm); #else // my_first_cpt = num_cpts_global[my_id]; total_global_cpts = num_cpts_global[num_procs]; #endif /*----------------------------------------------------------------------- * First Pass: Determine size of P and fill in fine_to_coarse mapping. *-----------------------------------------------------------------------*/ /*----------------------------------------------------------------------- * Intialize counters and allocate mapping vector. *-----------------------------------------------------------------------*/ fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif #endif for (i = 0; i < n_fine; i++) fine_to_coarse[i] = -1; jj_counter = start_indexing; jj_counter_offd = start_indexing; /*----------------------------------------------------------------------- * Loop over fine grid. *-----------------------------------------------------------------------*/ HYPRE_Int row_counter = 0; coarse_counter = 0; for (i = 0; i < n_fine; i++) { /*-------------------------------------------------------------------- * If i is a C-point, interpolation is the identity. Also set up * mapping vector. *--------------------------------------------------------------------*/ if (CF_marker[i] > 0) { jj_counter++; fine_to_coarse[i] = coarse_counter; coarse_counter++; } else { /*-------------------------------------------------------------------- * If i is an F-point, interpolation is the approximation of A_{ff}^{-1}A_{fc} *--------------------------------------------------------------------*/ for (jj = minus_Wp_diag_i[row_counter]; jj < minus_Wp_diag_i[row_counter+1]; jj++) { jj_counter++; } if (num_procs > 1) { for (jj = minus_Wp_offd_i[row_counter]; jj < minus_Wp_offd_i[row_counter+1]; jj++) { jj_counter_offd++; } } row_counter++; } } /*----------------------------------------------------------------------- * Allocate arrays. *-----------------------------------------------------------------------*/ P_diag_size = jj_counter; P_diag_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, HYPRE_MEMORY_DEVICE); P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size, HYPRE_MEMORY_DEVICE); P_diag_data = hypre_CTAlloc(HYPRE_Real, P_diag_size, HYPRE_MEMORY_DEVICE); P_diag_i[n_fine] = jj_counter; P_offd_size = jj_counter_offd; P_offd_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, HYPRE_MEMORY_DEVICE); P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size, HYPRE_MEMORY_DEVICE); P_offd_data = hypre_CTAlloc(HYPRE_Real, P_offd_size, HYPRE_MEMORY_DEVICE); /*----------------------------------------------------------------------- * Intialize some stuff. *-----------------------------------------------------------------------*/ jj_counter = start_indexing; jj_counter_offd = start_indexing; /*----------------------------------------------------------------------- * Send and receive fine_to_coarse info. *-----------------------------------------------------------------------*/ row_counter = 0; for (i = 0; i < n_fine; i++) { /*-------------------------------------------------------------------- * If i is a c-point, interpolation is the identity. *--------------------------------------------------------------------*/ if (CF_marker[i] >= 0) { P_diag_i[i] = jj_counter; P_diag_j[jj_counter] = fine_to_coarse[i]; P_diag_data[jj_counter] = one; jj_counter++; } /*-------------------------------------------------------------------- * If i is an F-point, build interpolation. *--------------------------------------------------------------------*/ else { /* Diagonal part of P */ P_diag_i[i] = jj_counter; for (jj = minus_Wp_diag_i[row_counter]; jj < minus_Wp_diag_i[row_counter+1]; jj++) { P_diag_j[jj_counter] = minus_Wp_diag_j[jj]; P_diag_data[jj_counter] = - minus_Wp_diag_data[jj]; jj_counter++; } /* Off-Diagonal part of P */ P_offd_i[i] = jj_counter_offd; if (num_procs > 1) { for (jj = minus_Wp_offd_i[row_counter]; jj < minus_Wp_offd_i[row_counter+1]; jj++) { P_offd_j[jj_counter_offd] = minus_Wp_offd_j[jj]; P_offd_data[jj_counter_offd] = - minus_Wp_offd_data[jj]; jj_counter_offd++; } } row_counter++; } P_offd_i[i+1] = jj_counter_offd; } P = hypre_ParCSRMatrixCreate(comm, hypre_ParCSRMatrixGlobalNumRows(A), total_global_cpts, hypre_ParCSRMatrixColStarts(A), num_cpts_global, 0, P_diag_i[n_fine], P_offd_i[n_fine]); P_diag = hypre_ParCSRMatrixDiag(P); hypre_CSRMatrixData(P_diag) = P_diag_data; hypre_CSRMatrixI(P_diag) = P_diag_i; hypre_CSRMatrixJ(P_diag) = P_diag_j; P_offd = hypre_ParCSRMatrixOffd(P); hypre_CSRMatrixData(P_offd) = P_offd_data; hypre_CSRMatrixI(P_offd) = P_offd_i; hypre_CSRMatrixJ(P_offd) = P_offd_j; hypre_ParCSRMatrixOwnsRowStarts(P) = 0; num_cols_P_offd = hypre_CSRMatrixNumCols(minus_Wp_offd); HYPRE_BigInt *col_map_offd_tmp = hypre_ParCSRMatrixColMapOffd(minus_Wp); if (P_offd_size) { col_map_offd_P = hypre_CTAlloc(HYPRE_BigInt, num_cols_P_offd, HYPRE_MEMORY_HOST); for (i=0; i < num_cols_P_offd; i++) { col_map_offd_P[i] = col_map_offd_tmp[i]; } } if (num_cols_P_offd) { hypre_ParCSRMatrixColMapOffd(P) = col_map_offd_P; hypre_CSRMatrixNumCols(P_offd) = num_cols_P_offd; } hypre_MatvecCommPkgCreate(P); *P_ptr = P; hypre_TFree(fine_to_coarse, HYPRE_MEMORY_HOST); hypre_TFree(C_marker, HYPRE_MEMORY_HOST); hypre_TFree(F_marker, HYPRE_MEMORY_HOST); hypre_ParCSRMatrixDestroy(A_fc); hypre_ParCSRMatrixDestroy(minus_Wp); return 0; } HYPRE_Int hypre_MGRBuildInterpApproximateInverse(hypre_ParCSRMatrix *A, HYPRE_Int *CF_marker, HYPRE_BigInt *num_cpts_global, HYPRE_Int debug_flag, hypre_ParCSRMatrix **P_ptr) { HYPRE_Int *C_marker; HYPRE_Int *F_marker; hypre_ParCSRMatrix *A_ff; hypre_ParCSRMatrix *A_fc; hypre_ParCSRMatrix *A_ff_inv; hypre_ParCSRMatrix *minus_Wp; MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_ParCSRMatrix *P; HYPRE_BigInt *col_map_offd_P; hypre_CSRMatrix *P_diag; hypre_CSRMatrix *P_offd; HYPRE_Real *P_diag_data; HYPRE_Int *P_diag_i; HYPRE_Int *P_diag_j; HYPRE_Real *P_offd_data; HYPRE_Int *P_offd_i; HYPRE_Int *P_offd_j; HYPRE_Int P_diag_size, P_offd_size; HYPRE_Int jj_counter,jj_counter_offd; //HYPRE_Int jj_begin_row,jj_begin_row_offd; //HYPRE_Int jj_end_row,jj_end_row_offd; HYPRE_Int start_indexing = 0; /* start indexing for P_data at 0 */ HYPRE_Int n_fine = hypre_CSRMatrixNumRows(hypre_ParCSRMatrixDiag(A)); HYPRE_Int *fine_to_coarse = NULL; //HYPRE_Int *coarse_counter; HYPRE_Int coarse_counter; HYPRE_BigInt total_global_cpts; HYPRE_Int num_cols_P_offd; // HYPRE_BigInt my_first_cpt; HYPRE_Int i,jj; HYPRE_Real one = 1.0; HYPRE_Int my_id; HYPRE_Int num_procs; // HYPRE_Int num_threads; // HYPRE_Real wall_time; /* for debugging instrumentation */ C_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); F_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); // create C and F markers for (i = 0; i < n_fine; i++) { C_marker[i] = (CF_marker[i] == 1)? 1: -1; F_marker[i] = (CF_marker[i] == 1) ? -1: 1; } // Get A_FF hypre_MGRGetSubBlock(A, F_marker, F_marker, 0, &A_ff); // Get A_FC hypre_MGRGetSubBlock(A, F_marker, C_marker, 0, &A_fc); hypre_MGRApproximateInverse(A_ff, &A_ff_inv); hypre_ParCSRMatrixPrintIJ(A_ff_inv, 1, 1, "A_ff_inv"); hypre_ParCSRMatrixPrintIJ(A_fc, 1, 1, "A_fc"); minus_Wp = hypre_ParMatmul(A_ff_inv, A_fc); hypre_ParCSRMatrixPrintIJ(minus_Wp, 1, 1, "Wp"); hypre_CSRMatrix *minus_Wp_diag = hypre_ParCSRMatrixDiag(minus_Wp); HYPRE_Real *minus_Wp_diag_data = hypre_CSRMatrixData(minus_Wp_diag); HYPRE_Int *minus_Wp_diag_i = hypre_CSRMatrixI(minus_Wp_diag); HYPRE_Int *minus_Wp_diag_j = hypre_CSRMatrixJ(minus_Wp_diag); hypre_CSRMatrix *minus_Wp_offd = hypre_ParCSRMatrixOffd(minus_Wp); HYPRE_Real *minus_Wp_offd_data = hypre_CSRMatrixData(minus_Wp_offd); HYPRE_Int *minus_Wp_offd_i = hypre_CSRMatrixI(minus_Wp_offd); HYPRE_Int *minus_Wp_offd_j = hypre_CSRMatrixJ(minus_Wp_offd); //hypre_CSRMatrix *minus_Wp_offd = hypre_ParCSRMatrixOffd(minus_Wp); //HYPRE_Int num_cols_minus_Wp_offd = hypre_CSRMatrixNumCols(minus_Wp_offd); hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm,&my_id); // num_threads = hypre_NumThreads(); #ifdef HYPRE_NO_GLOBAL_PARTITION // my_first_cpt = num_cpts_global[0]; if (my_id == (num_procs -1)) total_global_cpts = num_cpts_global[1]; hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_BIG_INT, num_procs-1, comm); #else // my_first_cpt = num_cpts_global[my_id]; total_global_cpts = num_cpts_global[num_procs]; #endif /*----------------------------------------------------------------------- * First Pass: Determine size of P and fill in fine_to_coarse mapping. *-----------------------------------------------------------------------*/ /*----------------------------------------------------------------------- * Intialize counters and allocate mapping vector. *-----------------------------------------------------------------------*/ //coarse_counter = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); //jj_count = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); //jj_count_offd = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif #endif for (i = 0; i < n_fine; i++) fine_to_coarse[i] = -1; jj_counter = start_indexing; jj_counter_offd = start_indexing; /*----------------------------------------------------------------------- * Loop over fine grid. *-----------------------------------------------------------------------*/ HYPRE_Int row_counter = 0; coarse_counter = 0; for (i = 0; i < n_fine; i++) { /*-------------------------------------------------------------------- * If i is a C-point, interpolation is the identity. Also set up * mapping vector. *--------------------------------------------------------------------*/ if (CF_marker[i] > 0) { //jj_count[j]++; //fine_to_coarse[i] = coarse_counter[j]; //coarse_counter[j]++; jj_counter++; fine_to_coarse[i] = coarse_counter; coarse_counter++; } else { /*-------------------------------------------------------------------- * If i is an F-point, interpolation is the approximation of A_{ff}^{-1}A_{fc} *--------------------------------------------------------------------*/ for (jj = minus_Wp_diag_i[row_counter]; jj < minus_Wp_diag_i[row_counter+1]; jj++) { //jj_count[j]++; jj_counter++; } if (num_procs > 1) { for (jj = minus_Wp_offd_i[row_counter]; jj < minus_Wp_offd_i[row_counter+1]; jj++) { //jj_count_offd[j]++; jj_counter_offd++; } } row_counter++; } } /*----------------------------------------------------------------------- * Allocate arrays. *-----------------------------------------------------------------------*/ /* for (i=0; i < num_threads-1; i++) { coarse_counter[i+1] += coarse_counter[i]; jj_count[i+1] += jj_count[i]; jj_count_offd[i+1] += jj_count_offd[i]; } i = num_threads-1; jj_counter = jj_count[i]; jj_counter_offd = jj_count_offd[i]; */ P_diag_size = jj_counter; P_diag_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, HYPRE_MEMORY_DEVICE); P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size, HYPRE_MEMORY_DEVICE); P_diag_data = hypre_CTAlloc(HYPRE_Real, P_diag_size, HYPRE_MEMORY_DEVICE); P_diag_i[n_fine] = jj_counter; P_offd_size = jj_counter_offd; P_offd_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, HYPRE_MEMORY_DEVICE); P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size, HYPRE_MEMORY_DEVICE); P_offd_data = hypre_CTAlloc(HYPRE_Real, P_offd_size, HYPRE_MEMORY_DEVICE); /*----------------------------------------------------------------------- * Intialize some stuff. *-----------------------------------------------------------------------*/ jj_counter = start_indexing; jj_counter_offd = start_indexing; /* if (debug_flag==4) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d Interp: Internal work 1 = %f\n", my_id, wall_time); fflush(NULL); } */ /*----------------------------------------------------------------------- * Send and receive fine_to_coarse info. *-----------------------------------------------------------------------*/ /* if (num_procs > 1) { if (debug_flag==4) wall_time = time_getWallclockSeconds(); fine_to_coarse_offd = hypre_CTAlloc(HYPRE_Int, num_cols_minus_Wp_offd, HYPRE_MEMORY_HOST); for (i = 0; i < n_fine; i++) { fine_to_coarse[i] += my_first_cpt; } comm_pkg = hypre_ParCSRMatrixCommPkg(minus_Wp); if (!comm_pkg) { hypre_MatvecCommPkgCreate(minus_Wp); comm_pkg = hypre_ParCSRMatrixCommPkg(minus_Wp); } num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) int_buf_data[index++] = fine_to_coarse[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data, fine_to_coarse_offd); hypre_ParCSRCommHandleDestroy(comm_handle); if (debug_flag==4) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d Interp: Comm 4 FineToCoarse = %f\n", my_id, wall_time); fflush(NULL); } if (debug_flag==4) wall_time = time_getWallclockSeconds(); #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif #endif for (i = 0; i < n_fine; i++) fine_to_coarse[i] -= my_first_cpt; } */ row_counter = 0; for (i = 0; i < n_fine; i++) { /*-------------------------------------------------------------------- * If i is a c-point, interpolation is the identity. *--------------------------------------------------------------------*/ if (CF_marker[i] >= 0) { P_diag_i[i] = jj_counter; P_diag_j[jj_counter] = fine_to_coarse[i]; P_diag_data[jj_counter] = one; jj_counter++; } /*-------------------------------------------------------------------- * If i is an F-point, build interpolation. *--------------------------------------------------------------------*/ else { /* Diagonal part of P */ P_diag_i[i] = jj_counter; for (jj = minus_Wp_diag_i[row_counter]; jj < minus_Wp_diag_i[row_counter+1]; jj++) { //P_marker[row_counter] = jj_counter; P_diag_j[jj_counter] = minus_Wp_diag_j[jj]; P_diag_data[jj_counter] = - minus_Wp_diag_data[jj]; jj_counter++; } /* Off-Diagonal part of P */ P_offd_i[i] = jj_counter_offd; if (num_procs > 1) { for (jj = minus_Wp_offd_i[row_counter]; jj < minus_Wp_offd_i[row_counter+1]; jj++) { //P_marker_offd[row_counter] = jj_counter_offd; P_offd_j[jj_counter_offd] = minus_Wp_offd_j[jj]; P_offd_data[jj_counter_offd] = - minus_Wp_offd_data[jj]; jj_counter_offd++; } } row_counter++; } P_offd_i[i+1] = jj_counter_offd; } //hypre_printf("Num rows of Wp = %d\n", row_counter); //P_offd_i[row_counter] = jj_counter_offd; P = hypre_ParCSRMatrixCreate(comm, hypre_ParCSRMatrixGlobalNumRows(A), total_global_cpts, hypre_ParCSRMatrixColStarts(A), num_cpts_global, 0, P_diag_i[n_fine], P_offd_i[n_fine]); P_diag = hypre_ParCSRMatrixDiag(P); hypre_CSRMatrixData(P_diag) = P_diag_data; hypre_CSRMatrixI(P_diag) = P_diag_i; hypre_CSRMatrixJ(P_diag) = P_diag_j; P_offd = hypre_ParCSRMatrixOffd(P); hypre_CSRMatrixData(P_offd) = P_offd_data; hypre_CSRMatrixI(P_offd) = P_offd_i; hypre_CSRMatrixJ(P_offd) = P_offd_j; hypre_ParCSRMatrixOwnsRowStarts(P) = 0; num_cols_P_offd = hypre_CSRMatrixNumCols(minus_Wp_offd); HYPRE_BigInt *col_map_offd_tmp = hypre_ParCSRMatrixColMapOffd(minus_Wp); if (P_offd_size) { col_map_offd_P = hypre_CTAlloc(HYPRE_BigInt, num_cols_P_offd, HYPRE_MEMORY_HOST); for (i=0; i < num_cols_P_offd; i++) { col_map_offd_P[i] = col_map_offd_tmp[i]; } } /* num_cols_P_offd = 0; if (P_offd_size) { P_marker = hypre_CTAlloc(HYPRE_Int, num_cols_minus_Wp_offd, HYPRE_MEMORY_HOST); #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif #endif for (i=0; i < num_cols_minus_Wp_offd; i++) P_marker[i] = 0; num_cols_P_offd = 0; for (i=0; i < P_offd_size; i++) { index = P_offd_j[i]; if (!P_marker[index]) { num_cols_P_offd++; P_marker[index] = 1; } } col_map_offd_P = hypre_CTAlloc(HYPRE_Int, num_cols_P_offd, HYPRE_MEMORY_HOST); index = 0; for (i=0; i < num_cols_P_offd; i++) { while (P_marker[index]==0) index++; col_map_offd_P[i] = index++; } #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif #endif for (i=0; i < P_offd_size; i++) P_offd_j[i] = hypre_BinarySearch(col_map_offd_P, P_offd_j[i], num_cols_P_offd); hypre_TFree(P_marker, HYPRE_MEMORY_HOST); } */ if (num_cols_P_offd) { hypre_ParCSRMatrixColMapOffd(P) = col_map_offd_P; hypre_CSRMatrixNumCols(P_offd) = num_cols_P_offd; } hypre_MatvecCommPkgCreate(P); //hypre_GetCommPkgRTFromCommPkgA(P,A, fine_to_coarse_offd); *P_ptr = P; //hypre_TFree(CF_marker_offd, HYPRE_MEMORY_HOST); //hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST); hypre_TFree(fine_to_coarse, HYPRE_MEMORY_HOST); //if (fine_to_coarse_offd) hypre_TFree(fine_to_coarse_offd, HYPRE_MEMORY_HOST); //hypre_TFree(coarse_counter, HYPRE_MEMORY_HOST); //hypre_TFree(jj_count, HYPRE_MEMORY_HOST); //hypre_TFree(jj_count_offd, HYPRE_MEMORY_HOST); hypre_TFree(C_marker, HYPRE_MEMORY_HOST); hypre_TFree(F_marker, HYPRE_MEMORY_HOST); hypre_ParCSRMatrixDestroy(A_ff); hypre_ParCSRMatrixDestroy(A_fc); hypre_ParCSRMatrixDestroy(A_ff_inv); hypre_ParCSRMatrixDestroy(minus_Wp); return 0; } /* Setup interpolation operator */ HYPRE_Int hypre_MGRBuildInterp(hypre_ParCSRMatrix *A, HYPRE_Int *CF_marker, hypre_ParCSRMatrix *S, HYPRE_BigInt *num_cpts_global, HYPRE_Int num_functions, HYPRE_Int *dof_func, HYPRE_Int debug_flag, HYPRE_Real trunc_factor, HYPRE_Int max_elmts, HYPRE_Int *col_offd_S_to_A, hypre_ParCSRMatrix **P, HYPRE_Int interp_type, HYPRE_Int numsweeps) { //HYPRE_Int i; hypre_ParCSRMatrix *P_ptr = NULL; //HYPRE_Real jac_trunc_threshold = trunc_factor; //HYPRE_Real jac_trunc_threshold_minus = 0.5*jac_trunc_threshold; /* Interpolation for each level */ if (interp_type <3) { hypre_MGRBuildP( A,CF_marker,num_cpts_global,interp_type,debug_flag,&P_ptr); /* Could do a few sweeps of Jacobi to further improve P */ //for(i=0; i<numsweeps; i++) // hypre_BoomerAMGJacobiInterp(A, &P_ptr, S,1, NULL, CF_marker, 0, jac_trunc_threshold, jac_trunc_threshold_minus ); //hypre_BoomerAMGInterpTruncation(P_ptr, trunc_factor, max_elmts); } else if (interp_type == 4) { hypre_MGRBuildInterpApproximateInverse(A, CF_marker, num_cpts_global, debug_flag, &P_ptr); hypre_BoomerAMGInterpTruncation(P_ptr, trunc_factor, max_elmts); } else if (interp_type == 99) { hypre_MGRBuildInterpApproximateInverseExp(A, S, CF_marker, num_cpts_global, debug_flag, &P_ptr); hypre_BoomerAMGInterpTruncation(P_ptr, trunc_factor, max_elmts); } else { /* Classical modified interpolation */ hypre_BoomerAMGBuildInterp(A, CF_marker, S, num_cpts_global,1, NULL,debug_flag, trunc_factor, max_elmts, col_offd_S_to_A, &P_ptr); /* Do k steps of Jacobi build W for P = [-W I]. * Note that BoomerAMGJacobiInterp assumes you have some initial P, * hence we need to initialize P as above, before calling this routine. * If numsweeps = 0, the following step is skipped and P is returned as is. * Looping here is equivalent to improving P by Jacobi interpolation */ //for(i=0; i<numsweeps; i++) // hypre_BoomerAMGJacobiInterp(A, &P_ptr, S,1, NULL, CF_marker, // 0, jac_trunc_threshold, // jac_trunc_threshold_minus ); } /* set pointer to P */ *P = P_ptr; return hypre_error_flag; } /* Setup restriction operator */ HYPRE_Int hypre_MGRBuildRestrict(hypre_ParCSRMatrix *A, HYPRE_Int *CF_marker, HYPRE_BigInt *num_cpts_global, HYPRE_Int num_functions, HYPRE_Int *dof_func, HYPRE_Int debug_flag, HYPRE_Real trunc_factor, HYPRE_Int max_elmts, HYPRE_Real S_commpkg_switch, HYPRE_Real strong_threshold, HYPRE_Real max_row_sum, hypre_ParCSRMatrix **R, HYPRE_Int restrict_type, HYPRE_Int numsweeps) { // HYPRE_Int i; hypre_ParCSRMatrix *R_ptr = NULL; hypre_ParCSRMatrix *AT = NULL; hypre_ParCSRMatrix *ST = NULL; HYPRE_Int *col_offd_ST_to_AT = NULL; // HYPRE_Real jac_trunc_threshold = trunc_factor; // HYPRE_Real jac_trunc_threshold_minus = 0.5*jac_trunc_threshold; /* Build AT (transpose A) */ if (restrict_type > 0) { hypre_ParCSRMatrixTranspose(A, &AT, 1); } if (restrict_type > 5) { /* Build new strength matrix */ hypre_BoomerAMGCreateS(AT, strong_threshold, max_row_sum, 1, NULL, &ST); /* use appropriate communication package for Strength matrix */ if (strong_threshold > S_commpkg_switch) hypre_BoomerAMGCreateSCommPkg(AT, ST, &col_offd_ST_to_AT); } /* Interpolation for each level */ if (restrict_type == 0) { hypre_MGRBuildP(A, CF_marker, num_cpts_global, restrict_type, debug_flag, &R_ptr); } else if (restrict_type == 1 || restrict_type == 2) { hypre_MGRBuildP(AT, CF_marker, num_cpts_global, restrict_type, debug_flag, &R_ptr); /* Could do a few sweeps of Jacobi to further improve P */ //for(i=0; i<numsweeps; i++) // hypre_BoomerAMGJacobiInterp(A, &R_ptr, S,1, NULL, CF_marker, 0, jac_trunc_threshold, jac_trunc_threshold_minus ); //hypre_BoomerAMGInterpTruncation(R_ptr, trunc_factor, max_elmts); } else if (restrict_type == 4) { hypre_MGRBuildInterpApproximateInverse(A, CF_marker, num_cpts_global, debug_flag, &R_ptr); hypre_BoomerAMGInterpTruncation(R_ptr, trunc_factor, max_elmts); } else { /* Classical modified interpolation */ hypre_BoomerAMGBuildInterp(AT, CF_marker, ST, num_cpts_global,1, NULL,debug_flag, trunc_factor, max_elmts, col_offd_ST_to_AT, &R_ptr); /* Do k steps of Jacobi build W for P = [-W I]. * Note that BoomerAMGJacobiInterp assumes you have some initial P, * hence we need to initialize P as above, before calling this routine. * If numsweeps = 0, the following step is skipped and P is returned as is. * Looping here is equivalent to improving P by Jacobi interpolation */ // for(i=0; i<numsweeps; i++) // hypre_BoomerAMGJacobiInterp(A, &R_ptr, S,1, NULL, CF_marker, 0, // jac_trunc_threshold, jac_trunc_threshold_minus); } /* set pointer to P */ *R = R_ptr; /* Free memory */ if (restrict_type > 0) { hypre_ParCSRMatrixDestroy(AT); } if (restrict_type > 5) { hypre_ParCSRMatrixDestroy(ST); if (col_offd_ST_to_AT) hypre_TFree(col_offd_ST_to_AT, HYPRE_MEMORY_HOST); } return hypre_error_flag; } void hypre_blas_smat_inv_n4 (HYPRE_Real *a) { const HYPRE_Real a11 = a[0], a12 = a[1], a13 = a[2], a14 = a[3]; const HYPRE_Real a21 = a[4], a22 = a[5], a23 = a[6], a24 = a[7]; const HYPRE_Real a31 = a[8], a32 = a[9], a33 = a[10], a34 = a[11]; const HYPRE_Real a41 = a[12], a42 = a[13], a43 = a[14], a44 = a[15]; const HYPRE_Real M11 = a22*a33*a44 + a23*a34*a42 + a24*a32*a43 - a22*a34*a43 - a23*a32*a44 - a24*a33*a42; const HYPRE_Real M12 = a12*a34*a43 + a13*a32*a44 + a14*a33*a42 - a12*a33*a44 - a13*a34*a42 - a14*a32*a43; const HYPRE_Real M13 = a12*a23*a44 + a13*a24*a42 + a14*a22*a43 - a12*a24*a43 - a13*a22*a44 - a14*a23*a42; const HYPRE_Real M14 = a12*a24*a33 + a13*a22*a34 + a14*a23*a32 - a12*a23*a34 - a13*a24*a32 - a14*a22*a33; const HYPRE_Real M21 = a21*a34*a43 + a23*a31*a44 + a24*a33*a41 - a21*a33*a44 - a23*a34*a41 - a24*a31*a43; const HYPRE_Real M22 = a11*a33*a44 + a13*a34*a41 + a14*a31*a43 - a11*a34*a43 - a13*a31*a44 - a14*a33*a41; const HYPRE_Real M23 = a11*a24*a43 + a13*a21*a44 + a14*a23*a41 - a11*a23*a44 - a13*a24*a41 - a14*a21*a43; const HYPRE_Real M24 = a11*a23*a34 + a13*a24*a31 + a14*a21*a33 - a11*a24*a33 - a13*a21*a34 - a14*a23*a31; const HYPRE_Real M31 = a21*a32*a44 + a22*a34*a41 + a24*a31*a42 - a21*a34*a42 - a22*a31*a44 - a24*a32*a41; const HYPRE_Real M32 = a11*a34*a42 + a12*a31*a44 + a14*a32*a41 - a11*a32*a44 - a12*a34*a41 - a14*a31*a42; const HYPRE_Real M33 = a11*a22*a44 + a12*a24*a41 + a14*a21*a42 - a11*a24*a42 - a12*a21*a44 - a14*a22*a41; const HYPRE_Real M34 = a11*a24*a32 + a12*a21*a34 + a14*a22*a31 - a11*a22*a34 - a12*a24*a31 - a14*a21*a32; const HYPRE_Real M41 = a21*a33*a42 + a22*a31*a43 + a23*a32*a41 - a21*a32*a43 - a22*a33*a41 - a23*a31*a42; const HYPRE_Real M42 = a11*a32*a43 + a12*a33*a41 + a13*a31*a42 - a11*a33*a42 - a12*a31*a43 - a13*a32*a41; const HYPRE_Real M43 = a11*a23*a42 + a12*a21*a43 + a13*a22*a41 - a11*a22*a43 - a12*a23*a41 - a13*a21*a42; const HYPRE_Real M44 = a11*a22*a33 + a12*a23*a31 + a13*a21*a32 - a11*a23*a32 - a12*a21*a33 - a13*a22*a31; const HYPRE_Real det = a11*M11 + a12*M21 + a13*M31 + a14*M41; HYPRE_Real det_inv; //if ( fabs(det) < 1e-22 ) { //hypre_printf("### WARNING: Matrix is nearly singular! det = %e\n", det); /* printf("##----------------------------------------------\n"); printf("## %12.5e %12.5e %12.5e \n", a0, a1, a2); printf("## %12.5e %12.5e %12.5e \n", a3, a4, a5); printf("## %12.5e %12.5e %12.5e \n", a5, a6, a7); printf("##----------------------------------------------\n"); getchar(); */ //} det_inv = 1.0/det; a[0] = M11*det_inv; a[1] = M12*det_inv; a[2] = M13*det_inv; a[3] = M14*det_inv; a[4] = M21*det_inv; a[5] = M22*det_inv; a[6] = M23*det_inv; a[7] = M24*det_inv; a[8] = M31*det_inv; a[9] = M32*det_inv; a[10] = M33*det_inv; a[11] = M34*det_inv; a[12] = M41*det_inv; a[13] = M42*det_inv; a[14] = M43*det_inv; a[15] = M44*det_inv; } void hypre_blas_mat_inv(HYPRE_Real *a, HYPRE_Int n) { HYPRE_Int i,j,k,l,u,kn,in; HYPRE_Real alinv; if (n == 4) { hypre_blas_smat_inv_n4(a); } else { for (k=0; k<n; ++k) { kn = k*n; l = kn+k; //if (fabs(a[l]) < SMALLREAL) { // printf("### WARNING: Diagonal entry is close to zero!"); // printf("### WARNING: diag_%d=%e\n", k, a[l]); // a[l] = SMALLREAL; //} alinv = 1.0/a[l]; a[l] = alinv; for (j=0; j<k; ++j) { u = kn+j; a[u] *= alinv; } for (j=k+1; j<n; ++j) { u = kn+j; a[u] *= alinv; } for (i=0; i<k; ++i) { in = i*n; for (j=0; j<n; ++j) if (j!=k) { u = in+j; a[u] -= a[in+k]*a[kn+j]; } // end if (j!=k) } for (i=k+1; i<n; ++i) { in = i*n; for (j=0; j<n; ++j) if (j!=k) { u = in+j; a[u] -= a[in+k]*a[kn+j]; } // end if (j!=k) } for (i=0; i<k; ++i) { u=i*n+k; a[u] *= -alinv; } for (i=k+1; i<n; ++i) { u=i*n+k; a[u] *= -alinv; } } // end for (k=0; k<n; ++k) }// end if } HYPRE_Int hypre_block_jacobi_scaling(hypre_ParCSRMatrix *A, hypre_ParCSRMatrix **B_ptr, void *mgr_vdata, HYPRE_Int debug_flag) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; HYPRE_Int num_procs, my_id; HYPRE_Int blk_size = (mgr_data -> block_size); HYPRE_Int reserved_coarse_size = (mgr_data -> reserved_coarse_size); hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); hypre_ParCSRMatrix *B; hypre_CSRMatrix *B_diag; HYPRE_Real *B_diag_data; HYPRE_Int *B_diag_i; HYPRE_Int *B_diag_j; hypre_CSRMatrix *B_offd; HYPRE_Int i,ii; HYPRE_Int j,jj; HYPRE_Int k; HYPRE_Int n = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int n_block, left_size,inv_size; // HYPRE_Real wall_time; /* for debugging instrumentation */ HYPRE_Int bidx,bidxm1,bidxp1; HYPRE_Real * diaginv; const HYPRE_Int nb2 = blk_size*blk_size; HYPRE_Int block_scaling_error = 0; hypre_MPI_Comm_size(comm,&num_procs); hypre_MPI_Comm_rank(comm,&my_id); // HYPRE_Int num_threads = hypre_NumThreads(); //printf("n = %d\n",n); if (my_id == num_procs) { n_block = (n - reserved_coarse_size) / blk_size; left_size = n - blk_size*n_block; } else { n_block = n / blk_size; left_size = n - blk_size*n_block; } inv_size = nb2*n_block + left_size*left_size; //printf("inv_size = %d\n",inv_size); hypre_blockRelax_setup(A,blk_size,reserved_coarse_size,&(mgr_data -> diaginv)); // if (debug_flag==4) wall_time = time_getWallclockSeconds(); /*----------------------------------------------------------------------- * First Pass: Determine size of B and fill in *-----------------------------------------------------------------------*/ B_diag_i = hypre_CTAlloc(HYPRE_Int, n+1, HYPRE_MEMORY_HOST); B_diag_j = hypre_CTAlloc(HYPRE_Int, inv_size, HYPRE_MEMORY_HOST); B_diag_data = hypre_CTAlloc(HYPRE_Real, inv_size, HYPRE_MEMORY_HOST); B_diag_i[n] = inv_size; //B_offd_i = hypre_CTAlloc(HYPRE_Int, n+1, HYPRE_MEMORY_HOST); //B_offd_j = hypre_CTAlloc(HYPRE_Int, 1, HYPRE_MEMORY_HOST); //B_offd_data = hypre_CTAlloc(HYPRE_Real, 1, HYPRE_MEMORY_HOST); //B_offd_i[n] = 1; /*----------------------------------------------------------------- * Get all the diagonal sub-blocks *-----------------------------------------------------------------*/ diaginv = hypre_CTAlloc(HYPRE_Real, nb2, HYPRE_MEMORY_HOST); //printf("n_block = %d\n",n_block); for (i = 0;i < n_block; i++) { bidxm1 = i*blk_size; bidxp1 = (i+1)*blk_size; for (k = 0;k < blk_size; k++) { for (j = 0;j < blk_size; j++) { bidx = k*blk_size + j; diaginv[bidx] = 0.0; } for (ii = A_diag_i[bidxm1+k]; ii < A_diag_i[bidxm1+k+1]; ii++) { jj = A_diag_j[ii]; if (jj >= bidxm1 && jj < bidxp1 && fabs(A_diag_data[ii]) > SMALLREAL) { bidx = k*blk_size + jj - bidxm1; //printf("jj = %d,val = %e, bidx = %d\n",jj,A_diag_data[ii],bidx); diaginv[bidx] = A_diag_data[ii]; } } } /* for (k = 0;k < blk_size; k++) */ /* { */ /* for (j = 0;j < blk_size; j++) */ /* { */ /* bidx = k*blk_size + j; */ /* printf("diaginv[%d] = %e\n",bidx,diaginv[bidx]); */ /* } */ /* } */ hypre_blas_mat_inv(diaginv, blk_size); for (k = 0;k < blk_size; k++) { B_diag_i[i*blk_size+k] = i*nb2 + k*blk_size; //B_offd_i[i*nb2+k] = 0; for (j = 0;j < blk_size; j++) { bidx = i*nb2 + k*blk_size + j; B_diag_j[bidx] = i*blk_size + j; B_diag_data[bidx] = diaginv[k*blk_size + j]; } } } //printf("Before create\n"); B = hypre_ParCSRMatrixCreate(comm, hypre_ParCSRMatrixGlobalNumRows(A), hypre_ParCSRMatrixGlobalNumCols(A), hypre_ParCSRMatrixRowStarts(A), hypre_ParCSRMatrixColStarts(A), 0, inv_size, 0); //printf("After create\n"); B_diag = hypre_ParCSRMatrixDiag(B); hypre_CSRMatrixData(B_diag) = B_diag_data; hypre_CSRMatrixI(B_diag) = B_diag_i; hypre_CSRMatrixJ(B_diag) = B_diag_j; B_offd = hypre_ParCSRMatrixOffd(B); hypre_CSRMatrixData(B_offd) = NULL; hypre_CSRMatrixI(B_offd) = NULL; hypre_CSRMatrixJ(B_offd) = NULL; /* hypre_ParCSRMatrixOwnsRowStarts(B) = 0; */ *B_ptr = B; return(block_scaling_error); } HYPRE_Int hypre_blockRelax_solve (hypre_ParCSRMatrix *A, hypre_ParVector *f, hypre_ParVector *u, HYPRE_Real blk_size, HYPRE_Int n_block, HYPRE_Int left_size, HYPRE_Int method, HYPRE_Real *diaginv, hypre_ParVector *Vtemp) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); hypre_ParCSRCommHandle *comm_handle; HYPRE_Int n = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(A_offd); hypre_Vector *u_local = hypre_ParVectorLocalVector(u); HYPRE_Real *u_data = hypre_VectorData(u_local); hypre_Vector *f_local = hypre_ParVectorLocalVector(f); HYPRE_Real *f_data = hypre_VectorData(f_local); hypre_Vector *Vtemp_local = hypre_ParVectorLocalVector(Vtemp); HYPRE_Real *Vtemp_data = hypre_VectorData(Vtemp_local); HYPRE_Real *Vext_data = NULL; HYPRE_Real *v_buf_data; HYPRE_Int i, j, k; HYPRE_Int ii, jj; HYPRE_Int bidx,bidx1; HYPRE_Int relax_error = 0; HYPRE_Int num_sends; HYPRE_Int index, start; HYPRE_Int num_procs, my_id; HYPRE_Real *res; const HYPRE_Int nb2 = blk_size*blk_size; hypre_MPI_Comm_size(comm,&num_procs); hypre_MPI_Comm_rank(comm,&my_id); // HYPRE_Int num_threads = hypre_NumThreads(); res = hypre_CTAlloc(HYPRE_Real, blk_size, HYPRE_MEMORY_HOST); if (!comm_pkg) { hypre_MatvecCommPkgCreate(A); comm_pkg = hypre_ParCSRMatrixCommPkg(A); } if (num_procs > 1) { num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); v_buf_data = hypre_CTAlloc(HYPRE_Real, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST); Vext_data = hypre_CTAlloc(HYPRE_Real, num_cols_offd, HYPRE_MEMORY_HOST); if (num_cols_offd) { A_offd_j = hypre_CSRMatrixJ(A_offd); A_offd_data = hypre_CSRMatrixData(A_offd); } index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j=start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) v_buf_data[index++] = u_data[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } comm_handle = hypre_ParCSRCommHandleCreate( 1, comm_pkg, v_buf_data, Vext_data); } /*----------------------------------------------------------------- * Copy current approximation into temporary vector. *-----------------------------------------------------------------*/ #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif #endif for (i = 0; i < n; i++) { Vtemp_data[i] = u_data[i]; //printf("u_old[%d] = %e\n",i,Vtemp_data[i]); } if (num_procs > 1) { hypre_ParCSRCommHandleDestroy(comm_handle); comm_handle = NULL; } /*----------------------------------------------------------------- * Relax points block by block *-----------------------------------------------------------------*/ for (i = 0;i < n_block; i++) { for (j = 0;j < blk_size; j++) { bidx = i*blk_size +j; res[j] = f_data[bidx]; for (jj = A_diag_i[bidx]; jj < A_diag_i[bidx+1]; jj++) { ii = A_diag_j[jj]; if (method == 0) { // Jacobi for diagonal part res[j] -= A_diag_data[jj] * Vtemp_data[ii]; } else if (method == 1) { // Gauss-Seidel for diagonal part res[j] -= A_diag_data[jj] * u_data[ii]; } else { // Default do Jacobi for diagonal part res[j] -= A_diag_data[jj] * Vtemp_data[ii]; } //printf("%d: Au= %e * %e =%e\n",ii,A_diag_data[jj],Vtemp_data[ii], res[j]); } for (jj = A_offd_i[bidx]; jj < A_offd_i[bidx+1]; jj++) { // always do Jacobi for off-diagonal part ii = A_offd_j[jj]; res[j] -= A_offd_data[jj] * Vext_data[ii]; } //printf("%d: res = %e\n",bidx,res[j]); } for (j = 0;j < blk_size; j++) { bidx1 = i*blk_size +j; for (k = 0;k < blk_size; k++) { bidx = i*nb2 +j*blk_size+k; u_data[bidx1] += res[k]*diaginv[bidx]; //printf("u[%d] = %e, diaginv[%d] = %e\n",bidx1,u_data[bidx1],bidx,diaginv[bidx]); } //printf("u[%d] = %e\n",bidx1,u_data[bidx1]); } } if (num_procs > 1) { hypre_TFree(Vext_data, HYPRE_MEMORY_HOST); hypre_TFree(v_buf_data, HYPRE_MEMORY_HOST); } hypre_TFree(res, HYPRE_MEMORY_HOST); return(relax_error); } HYPRE_Int hypre_block_gs (hypre_ParCSRMatrix *A, hypre_ParVector *f, hypre_ParVector *u, HYPRE_Real blk_size, HYPRE_Int n_block, HYPRE_Int left_size, HYPRE_Real *diaginv, hypre_ParVector *Vtemp) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); hypre_ParCSRCommHandle *comm_handle; HYPRE_Int n = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(A_offd); hypre_Vector *u_local = hypre_ParVectorLocalVector(u); HYPRE_Real *u_data = hypre_VectorData(u_local); hypre_Vector *f_local = hypre_ParVectorLocalVector(f); HYPRE_Real *f_data = hypre_VectorData(f_local); hypre_Vector *Vtemp_local = hypre_ParVectorLocalVector(Vtemp); HYPRE_Real *Vtemp_data = hypre_VectorData(Vtemp_local); HYPRE_Real *Vext_data = NULL; HYPRE_Real *v_buf_data; HYPRE_Int i, j, k; HYPRE_Int ii, jj; HYPRE_Int bidx,bidx1; HYPRE_Int relax_error = 0; HYPRE_Int num_sends; HYPRE_Int index, start; HYPRE_Int num_procs, my_id; HYPRE_Real *res; const HYPRE_Int nb2 = blk_size*blk_size; hypre_MPI_Comm_size(comm,&num_procs); hypre_MPI_Comm_rank(comm,&my_id); //HYPRE_Int num_threads = hypre_NumThreads(); res = hypre_CTAlloc(HYPRE_Real, blk_size, HYPRE_MEMORY_HOST); if (!comm_pkg) { hypre_MatvecCommPkgCreate(A); comm_pkg = hypre_ParCSRMatrixCommPkg(A); } if (num_procs > 1) { num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); v_buf_data = hypre_CTAlloc(HYPRE_Real, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST); Vext_data = hypre_CTAlloc(HYPRE_Real, num_cols_offd, HYPRE_MEMORY_HOST); if (num_cols_offd) { A_offd_j = hypre_CSRMatrixJ(A_offd); A_offd_data = hypre_CSRMatrixData(A_offd); } index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j=start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) v_buf_data[index++] = u_data[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } comm_handle = hypre_ParCSRCommHandleCreate( 1, comm_pkg, v_buf_data, Vext_data); } /*----------------------------------------------------------------- * Copy current approximation into temporary vector. *-----------------------------------------------------------------*/ #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif #endif for (i = 0; i < n; i++) { Vtemp_data[i] = u_data[i]; //printf("u_old[%d] = %e\n",i,Vtemp_data[i]); } if (num_procs > 1) { hypre_ParCSRCommHandleDestroy(comm_handle); comm_handle = NULL; } /*----------------------------------------------------------------- * Relax points block by block *-----------------------------------------------------------------*/ for (i = 0;i < n_block; i++) { for (j = 0;j < blk_size; j++) { bidx = i*blk_size +j; res[j] = f_data[bidx]; for (jj = A_diag_i[bidx]; jj < A_diag_i[bidx+1]; jj++) { ii = A_diag_j[jj]; //res[j] -= A_diag_data[jj] * Vtemp_data[ii]; //printf("my_id = %d, %d: Au = %e * %e\n",my_id,ii,A_diag_data[jj],Vtemp_data[ii]); res[j] -= A_diag_data[jj] * u_data[ii]; //printf("%d: Au= %e * %e =%e\n",ii,A_diag_data[jj],Vtemp_data[ii], res[j]); } for (jj = A_offd_i[bidx]; jj < A_offd_i[bidx+1]; jj++) { ii = A_offd_j[jj]; res[j] -= A_offd_data[jj] * Vext_data[ii]; } //printf("%d: res = %e\n",bidx,res[j]); } for (j = 0;j < blk_size; j++) { bidx1 = i*blk_size +j; for (k = 0;k < blk_size; k++) { bidx = i*nb2 +j*blk_size+k; u_data[bidx1] += res[k]*diaginv[bidx]; //printf("u[%d] = %e, diaginv[%d] = %e\n",bidx1,u_data[bidx1],bidx,diaginv[bidx]); } //printf("u[%d] = %e\n",bidx1,u_data[bidx1]); } } if (num_procs > 1) { hypre_TFree(Vext_data, HYPRE_MEMORY_HOST); hypre_TFree(v_buf_data, HYPRE_MEMORY_HOST); } hypre_TFree(res, HYPRE_MEMORY_HOST); return(relax_error); } /*Block smoother*/ HYPRE_Int hypre_blockRelax_setup(hypre_ParCSRMatrix *A, HYPRE_Int blk_size, HYPRE_Int reserved_coarse_size, HYPRE_Real **diaginvptr) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); HYPRE_Int n = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int i, j,k; HYPRE_Int ii, jj; HYPRE_Int bidx,bidxm1,bidxp1; HYPRE_Int num_procs, my_id; const HYPRE_Int nb2 = blk_size*blk_size; HYPRE_Int n_block; HYPRE_Int left_size,inv_size; HYPRE_Real *diaginv = *diaginvptr; hypre_MPI_Comm_size(comm,&num_procs); hypre_MPI_Comm_rank(comm,&my_id); //HYPRE_Int num_threads = hypre_NumThreads(); if (my_id == num_procs) { n_block = (n - reserved_coarse_size) / blk_size; left_size = n - blk_size*n_block; } else { n_block = n / blk_size; left_size = n - blk_size*n_block; } inv_size = nb2*n_block + left_size*left_size; if (diaginv !=NULL) { hypre_TFree(diaginv, HYPRE_MEMORY_HOST); diaginv = hypre_CTAlloc(HYPRE_Real, inv_size, HYPRE_MEMORY_HOST); } else { diaginv = hypre_CTAlloc(HYPRE_Real, inv_size, HYPRE_MEMORY_HOST); } /*----------------------------------------------------------------- * Get all the diagonal sub-blocks *-----------------------------------------------------------------*/ for (i = 0;i < n_block; i++) { bidxm1 = i*blk_size; bidxp1 = (i+1)*blk_size; //printf("bidxm1 = %d,bidxp1 = %d\n",bidxm1,bidxp1); for (k = 0;k < blk_size; k++) { for (j = 0;j < blk_size; j++) { bidx = i*nb2 + k*blk_size + j; diaginv[bidx] = 0.0; } for (ii = A_diag_i[bidxm1+k]; ii < A_diag_i[bidxm1+k+1]; ii++) { jj = A_diag_j[ii]; if (jj >= bidxm1 && jj < bidxp1 && fabs(A_diag_data[ii]) > SMALLREAL) { bidx = i*nb2 + k*blk_size + jj - bidxm1; //printf("jj = %d,val = %e, bidx = %d\n",jj,A_diag_data[ii],bidx); diaginv[bidx] = A_diag_data[ii]; } } } } for (i = 0;i < left_size; i++) { bidxm1 =n_block*nb2 + i*blk_size; bidxp1 =n_block*nb2 + (i+1)*blk_size; for (j = 0;j < left_size; j++) { bidx = n_block*nb2 + i*blk_size +j; diaginv[bidx] = 0.0; } for (ii = A_diag_i[n_block*blk_size + i]; ii < A_diag_i[n_block*blk_size+i+1]; ii++) { jj = A_diag_j[ii]; if (jj > n_block*blk_size) { bidx = n_block*nb2 + i*blk_size + jj - n_block*blk_size; diaginv[bidx] = A_diag_data[ii]; } } } /*----------------------------------------------------------------- * compute the inverses of all the diagonal sub-blocks *-----------------------------------------------------------------*/ if (blk_size > 1) { for (i = 0;i < n_block; i++) { hypre_blas_mat_inv(diaginv+i*nb2, blk_size); } hypre_blas_mat_inv(diaginv+(HYPRE_Int)(blk_size*nb2),left_size); } else { for (i = 0;i < n; i++) { // FIX-ME: zero-diagonal should be tested previously if (fabs(diaginv[i]) < SMALLREAL) diaginv[i] = 0.0; else diaginv[i] = 1.0 / diaginv[i]; } } *diaginvptr = diaginv; return 1; } HYPRE_Int hypre_blockRelax(hypre_ParCSRMatrix *A, hypre_ParVector *f, hypre_ParVector *u, HYPRE_Int blk_size, HYPRE_Int reserved_coarse_size, HYPRE_Int method, hypre_ParVector *Vtemp, hypre_ParVector *Ztemp) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); HYPRE_Int n = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int i, j,k; HYPRE_Int ii, jj; HYPRE_Int bidx,bidxm1,bidxp1; HYPRE_Int relax_error = 0; HYPRE_Int num_procs, my_id; const HYPRE_Int nb2 = blk_size*blk_size; HYPRE_Int n_block; HYPRE_Int left_size,inv_size; HYPRE_Real *diaginv; hypre_MPI_Comm_size(comm,&num_procs); hypre_MPI_Comm_rank(comm,&my_id); //HYPRE_Int num_threads = hypre_NumThreads(); if (my_id == num_procs) { n_block = (n - reserved_coarse_size) / blk_size; left_size = n - blk_size*n_block; } else { n_block = n / blk_size; left_size = n - blk_size*n_block; } inv_size = nb2*n_block + left_size*left_size; diaginv = hypre_CTAlloc(HYPRE_Real, inv_size, HYPRE_MEMORY_HOST); /*----------------------------------------------------------------- * Get all the diagonal sub-blocks *-----------------------------------------------------------------*/ for (i = 0;i < n_block; i++) { bidxm1 = i*blk_size; bidxp1 = (i+1)*blk_size; //printf("bidxm1 = %d,bidxp1 = %d\n",bidxm1,bidxp1); for (k = 0;k < blk_size; k++) { for (j = 0;j < blk_size; j++) { bidx = i*nb2 + k*blk_size + j; diaginv[bidx] = 0.0; } for (ii = A_diag_i[bidxm1+k]; ii < A_diag_i[bidxm1+k+1]; ii++) { jj = A_diag_j[ii]; if (jj >= bidxm1 && jj < bidxp1 && fabs(A_diag_data[ii]) > SMALLREAL) { bidx = i*nb2 + k*blk_size + jj - bidxm1; //printf("jj = %d,val = %e, bidx = %d\n",jj,A_diag_data[ii],bidx); diaginv[bidx] = A_diag_data[ii]; } } } } for (i = 0;i < left_size; i++) { bidxm1 =n_block*nb2 + i*blk_size; bidxp1 =n_block*nb2 + (i+1)*blk_size; for (j = 0;j < left_size; j++) { bidx = n_block*nb2 + i*blk_size +j; diaginv[bidx] = 0.0; } for (ii = A_diag_i[n_block*blk_size + i]; ii < A_diag_i[n_block*blk_size+i+1]; ii++) { jj = A_diag_j[ii]; if (jj > n_block*blk_size) { bidx = n_block*nb2 + i*blk_size + jj - n_block*blk_size; diaginv[bidx] = A_diag_data[ii]; } } } /* for (i = 0;i < n_block; i++) { for (j = 0;j < blk_size; j++) { for (k = 0;k < blk_size; k ++) { bidx = i*nb2 + j*blk_size + k; printf("%e\t",diaginv[bidx]); } printf("\n"); } printf("\n"); } */ /*----------------------------------------------------------------- * compute the inverses of all the diagonal sub-blocks *-----------------------------------------------------------------*/ if (blk_size > 1) { for (i = 0;i < n_block; i++) { hypre_blas_mat_inv(diaginv+i*nb2, blk_size); } hypre_blas_mat_inv(diaginv+(HYPRE_Int)(blk_size*nb2),left_size); /* for (i = 0;i < n_block; i++) { for (j = 0;j < blk_size; j++) { for (k = 0;k < blk_size; k ++) { bidx = i*nb2 + j*blk_size + k; printf("%e\t",diaginv[bidx]); } printf("\n"); } printf("\n"); } */ } else { for (i = 0;i < n; i++) { // FIX-ME: zero-diagonal should be tested previously if (fabs(diaginv[i]) < SMALLREAL) diaginv[i] = 0.0; else diaginv[i] = 1.0 / diaginv[i]; } } hypre_blockRelax_solve(A,f,u,blk_size,n_block,left_size,method,diaginv,Vtemp); /*----------------------------------------------------------------- * Free temperary memeory *-----------------------------------------------------------------*/ hypre_TFree(diaginv, HYPRE_MEMORY_HOST); return(relax_error); } /* set coarse grid solver */ HYPRE_Int hypre_MGRSetFSolver( void *mgr_vdata, HYPRE_Int (*fine_grid_solver_solve)(void*,void*,void*,void*), HYPRE_Int (*fine_grid_solver_setup)(void*,void*,void*,void*), void *fsolver ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; if (!mgr_data) { hypre_error_in_arg(1); return hypre_error_flag; } HYPRE_Int max_num_coarse_levels = (mgr_data -> max_num_coarse_levels); HYPRE_Solver **aff_solver = (mgr_data -> aff_solver); if (aff_solver == NULL) aff_solver = hypre_CTAlloc(HYPRE_Solver*, max_num_coarse_levels, HYPRE_MEMORY_HOST); /* only allow to set F-solver for the first level */ aff_solver[0] = (HYPRE_Solver *) fsolver; (mgr_data -> fine_grid_solver_solve) = fine_grid_solver_solve; (mgr_data -> fine_grid_solver_setup) = fine_grid_solver_setup; (mgr_data -> aff_solver) = aff_solver; (mgr_data -> use_default_fsolver) = 0; return hypre_error_flag; } /* set coarse grid solver */ HYPRE_Int hypre_MGRSetCoarseSolver( void *mgr_vdata, HYPRE_Int (*coarse_grid_solver_solve)(void*,void*,void*,void*), HYPRE_Int (*coarse_grid_solver_setup)(void*,void*,void*,void*), void *coarse_grid_solver ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; if (!mgr_data) { hypre_error_in_arg(1); return hypre_error_flag; } (mgr_data -> coarse_grid_solver_solve) = coarse_grid_solver_solve; (mgr_data -> coarse_grid_solver_setup) = coarse_grid_solver_setup; (mgr_data -> coarse_grid_solver) = (HYPRE_Solver) coarse_grid_solver; (mgr_data -> use_default_cgrid_solver) = 0; return hypre_error_flag; } HYPRE_Int hypre_MGRSetAffInv( void *mgr_vdata, hypre_ParCSRMatrix *A_ff_inv ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; (mgr_data -> A_ff_inv) = A_ff_inv; return hypre_error_flag; } /* Set the maximum number of coarse levels. * maxcoarselevs = 1 yields the default 2-grid scheme. */ HYPRE_Int hypre_MGRSetMaxCoarseLevels( void *mgr_vdata, HYPRE_Int maxcoarselevs ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; (mgr_data -> max_num_coarse_levels) = maxcoarselevs; return hypre_error_flag; } /* Set the system block size */ HYPRE_Int hypre_MGRSetBlockSize( void *mgr_vdata, HYPRE_Int bsize ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; (mgr_data -> block_size) = bsize; return hypre_error_flag; } /* Set the relaxation type for the fine levels of the reduction. * Currently supports the following flavors of relaxation types * as described in the documentation: * relax_types 0 - 8, 13, 14, 18, 19, 98. * See par_relax.c and par_relax_more.c for more details. * */ HYPRE_Int hypre_MGRSetRelaxType( void *mgr_vdata, HYPRE_Int relax_type ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; (mgr_data -> relax_type) = relax_type; return hypre_error_flag; } /* Set the number of relaxation sweeps */ HYPRE_Int hypre_MGRSetNumRelaxSweeps( void *mgr_vdata, HYPRE_Int nsweeps ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; (mgr_data -> num_relax_sweeps) = nsweeps; return hypre_error_flag; } /* Set the F-relaxation strategy: 0=single level, 1=multi level */ HYPRE_Int hypre_MGRSetFRelaxMethod( void *mgr_vdata, HYPRE_Int relax_method ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; HYPRE_Int i; HYPRE_Int max_num_coarse_levels = (mgr_data -> max_num_coarse_levels); if((mgr_data -> Frelax_method) != NULL) { hypre_TFree(mgr_data -> Frelax_method, HYPRE_MEMORY_HOST); (mgr_data -> Frelax_method) = NULL; } HYPRE_Int *Frelax_method = hypre_CTAlloc(HYPRE_Int, max_num_coarse_levels, HYPRE_MEMORY_HOST); for (i=0; i < max_num_coarse_levels; i++) { Frelax_method[i] = relax_method; } (mgr_data -> Frelax_method) = Frelax_method; return hypre_error_flag; } /* Set the F-relaxation strategy: 0=single level, 1=multi level */ HYPRE_Int hypre_MGRSetLevelFRelaxMethod( void *mgr_vdata, HYPRE_Int *relax_method ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; HYPRE_Int i; HYPRE_Int max_num_coarse_levels = (mgr_data -> max_num_coarse_levels); if((mgr_data -> Frelax_method) != NULL) { hypre_TFree(mgr_data -> Frelax_method, HYPRE_MEMORY_HOST); (mgr_data -> Frelax_method) = NULL; } HYPRE_Int *Frelax_method = hypre_CTAlloc(HYPRE_Int, max_num_coarse_levels, HYPRE_MEMORY_HOST); if (relax_method != NULL) { for (i=0; i < max_num_coarse_levels; i++) { Frelax_method[i] = relax_method[i]; } } else { for (i = 0; i < max_num_coarse_levels; i++) { Frelax_method[i] = 0; } } (mgr_data -> Frelax_method) = Frelax_method; return hypre_error_flag; } /* Coarse grid method: 0=Galerkin RAP, 1=non-Galerkin with dropping*/ HYPRE_Int hypre_MGRSetCoarseGridMethod( void *mgr_vdata, HYPRE_Int *cg_method ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; HYPRE_Int i; HYPRE_Int max_num_coarse_levels = (mgr_data -> max_num_coarse_levels); if((mgr_data -> use_non_galerkin_cg) != NULL) { hypre_TFree(mgr_data -> use_non_galerkin_cg, HYPRE_MEMORY_HOST); (mgr_data -> use_non_galerkin_cg) = NULL; } HYPRE_Int *use_non_galerkin_cg = hypre_CTAlloc(HYPRE_Int, max_num_coarse_levels, HYPRE_MEMORY_HOST); if (cg_method != NULL) { for (i=0; i < max_num_coarse_levels; i++) { use_non_galerkin_cg[i] = cg_method[i]; } } else { for (i = 0; i < max_num_coarse_levels; i++) { use_non_galerkin_cg[i] = 0; } } (mgr_data -> use_non_galerkin_cg) = use_non_galerkin_cg; return hypre_error_flag; } /* Set the F-relaxation number of functions for each level */ HYPRE_Int hypre_MGRSetLevelFRelaxNumFunctions( void *mgr_vdata, HYPRE_Int *num_functions ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; HYPRE_Int i; HYPRE_Int max_num_coarse_levels = (mgr_data -> max_num_coarse_levels); if((mgr_data -> Frelax_num_functions) != NULL) { hypre_TFree(mgr_data -> Frelax_num_functions, HYPRE_MEMORY_HOST); (mgr_data -> Frelax_num_functions) = NULL; } HYPRE_Int *Frelax_num_functions = hypre_CTAlloc(HYPRE_Int, max_num_coarse_levels, HYPRE_MEMORY_HOST); if (num_functions != NULL) { for (i=0; i < max_num_coarse_levels; i++) { Frelax_num_functions[i] = num_functions[i]; } } else { for (i = 0; i < max_num_coarse_levels; i++) { Frelax_num_functions[i] = 1; } } (mgr_data -> Frelax_num_functions) = Frelax_num_functions; return hypre_error_flag; } /* Set the type of the restriction type * for computing restriction operator */ HYPRE_Int hypre_MGRSetLevelRestrictType( void *mgr_vdata, HYPRE_Int *restrict_type) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; HYPRE_Int i; HYPRE_Int max_num_coarse_levels = (mgr_data -> max_num_coarse_levels); if ((mgr_data -> restrict_type) != NULL) { hypre_TFree((mgr_data -> restrict_type), HYPRE_MEMORY_HOST); (mgr_data -> restrict_type) = NULL; } HYPRE_Int *level_restrict_type = hypre_CTAlloc(HYPRE_Int, max_num_coarse_levels, HYPRE_MEMORY_HOST); if (restrict_type != NULL) { for (i=0; i < max_num_coarse_levels; i++) { level_restrict_type[i] = *(restrict_type + i); } } else { for (i=0; i < max_num_coarse_levels; i++) { level_restrict_type[i] = 0; } } (mgr_data -> restrict_type) = level_restrict_type; return hypre_error_flag; } /* Set the type of the restriction type * for computing restriction operator */ HYPRE_Int hypre_MGRSetRestrictType( void *mgr_vdata, HYPRE_Int restrict_type) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; HYPRE_Int i; HYPRE_Int max_num_coarse_levels = (mgr_data -> max_num_coarse_levels); if ((mgr_data -> restrict_type) != NULL) { hypre_TFree((mgr_data -> restrict_type), HYPRE_MEMORY_HOST); (mgr_data -> restrict_type) = NULL; } HYPRE_Int *level_restrict_type = hypre_CTAlloc(HYPRE_Int, max_num_coarse_levels, HYPRE_MEMORY_HOST); for (i=0; i < max_num_coarse_levels; i++) { level_restrict_type[i] = restrict_type; } (mgr_data -> restrict_type) = level_restrict_type; return hypre_error_flag; } /* Set the number of Jacobi interpolation iterations * for computing interpolation operator */ HYPRE_Int hypre_MGRSetNumRestrictSweeps( void *mgr_vdata, HYPRE_Int nsweeps ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; (mgr_data -> num_restrict_sweeps) = nsweeps; return hypre_error_flag; } /* Set the type of the interpolation * for computing interpolation operator */ HYPRE_Int hypre_MGRSetInterpType( void *mgr_vdata, HYPRE_Int interpType) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; HYPRE_Int i; HYPRE_Int max_num_coarse_levels = (mgr_data -> max_num_coarse_levels); if ((mgr_data -> interp_type) != NULL) { hypre_TFree((mgr_data -> interp_type), HYPRE_MEMORY_HOST); (mgr_data -> interp_type) = NULL; } HYPRE_Int *level_interp_type = hypre_CTAlloc(HYPRE_Int, max_num_coarse_levels, HYPRE_MEMORY_HOST); for (i=0; i < max_num_coarse_levels; i++) { level_interp_type[i] = interpType; } (mgr_data -> interp_type) = level_interp_type; return hypre_error_flag; } /* Set the type of the interpolation * for computing interpolation operator */ HYPRE_Int hypre_MGRSetLevelInterpType( void *mgr_vdata, HYPRE_Int *interpType) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; HYPRE_Int i; HYPRE_Int max_num_coarse_levels = (mgr_data -> max_num_coarse_levels); if ((mgr_data -> interp_type) != NULL) { hypre_TFree((mgr_data -> interp_type), HYPRE_MEMORY_HOST); (mgr_data -> interp_type) = NULL; } HYPRE_Int *level_interp_type = hypre_CTAlloc(HYPRE_Int, max_num_coarse_levels, HYPRE_MEMORY_HOST); if (interpType != NULL) { for (i=0; i < max_num_coarse_levels; i++) { level_interp_type[i] = *(interpType + i); } } else { for (i=0; i < max_num_coarse_levels; i++) { level_interp_type[i] = 2; } } (mgr_data -> interp_type) = level_interp_type; return hypre_error_flag; } /* Set the number of Jacobi interpolation iterations * for computing interpolation operator */ HYPRE_Int hypre_MGRSetNumInterpSweeps( void *mgr_vdata, HYPRE_Int nsweeps ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; (mgr_data -> num_interp_sweeps) = nsweeps; return hypre_error_flag; } /* Set print level for mgr solver */ HYPRE_Int hypre_MGRSetPrintLevel( void *mgr_vdata, HYPRE_Int print_level ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; (mgr_data -> print_level) = print_level; return hypre_error_flag; } /* Set print level for mgr solver */ HYPRE_Int hypre_MGRSetLogging( void *mgr_vdata, HYPRE_Int logging ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; (mgr_data -> logging) = logging; return hypre_error_flag; } /* Set max number of iterations for mgr solver */ HYPRE_Int hypre_MGRSetMaxIter( void *mgr_vdata, HYPRE_Int max_iter ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; (mgr_data -> max_iter) = max_iter; return hypre_error_flag; } /* Set convergence tolerance for mgr solver */ HYPRE_Int hypre_MGRSetTol( void *mgr_vdata, HYPRE_Real tol ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; (mgr_data -> tol) = tol; return hypre_error_flag; } /* Set max number of iterations for mgr global smoother */ HYPRE_Int hypre_MGRSetMaxGlobalsmoothIters( void *mgr_vdata, HYPRE_Int max_iter ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; (mgr_data -> global_smooth_iters) = max_iter; return hypre_error_flag; } /* Set global smoothing type for mgr solver */ HYPRE_Int hypre_MGRSetGlobalsmoothType( void *mgr_vdata, HYPRE_Int iter_type ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; (mgr_data -> global_smooth_type) = iter_type; return hypre_error_flag; } /* Set the maximum number of non-zero entries for restriction and interpolation operator if classical AMG interpolation is used */ HYPRE_Int hypre_MGRSetPMaxElmts( void *mgr_vdata, HYPRE_Int P_max_elmts) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; (mgr_data -> P_max_elmts) = P_max_elmts; return hypre_error_flag; } /* Get number of iterations for MGR solver */ HYPRE_Int hypre_MGRGetNumIterations( void *mgr_vdata, HYPRE_Int *num_iterations ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; if (!mgr_data) { hypre_error_in_arg(1); return hypre_error_flag; } *num_iterations = mgr_data->num_iterations; return hypre_error_flag; } /* Get residual norms for MGR solver */ HYPRE_Int hypre_MGRGetFinalRelativeResidualNorm( void *mgr_vdata, HYPRE_Real *res_norm ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; if (!mgr_data) { hypre_error_in_arg(1); return hypre_error_flag; } *res_norm = mgr_data->final_rel_residual_norm; return hypre_error_flag; } HYPRE_Int hypre_MGRGetCoarseGridConvergenceFactor( void *mgr_vdata , HYPRE_Real *conv_factor ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; if (!mgr_data) { hypre_error_in_arg(1); return hypre_error_flag; } *conv_factor = (mgr_data -> cg_convergence_factor); return hypre_error_flag; } /* Build A_FF matrix from A given a CF_marker array */ HYPRE_Int hypre_MGRGetSubBlock( hypre_ParCSRMatrix *A, HYPRE_Int *row_cf_marker, HYPRE_Int *col_cf_marker, HYPRE_Int debug_flag, hypre_ParCSRMatrix **A_block_ptr ) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); hypre_ParCSRCommHandle *comm_handle; hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd); //HYPRE_Int *col_map_offd = hypre_ParCSRMatrixColMapOffd(A); HYPRE_Int *coarse_dof_func_ptr = NULL; HYPRE_BigInt *num_row_cpts_global = NULL; HYPRE_BigInt *num_col_cpts_global = NULL; hypre_ParCSRMatrix *Ablock; HYPRE_BigInt *col_map_offd_Ablock; HYPRE_Int *tmp_map_offd = NULL; HYPRE_Int *CF_marker_offd = NULL; hypre_CSRMatrix *Ablock_diag; hypre_CSRMatrix *Ablock_offd; HYPRE_Real *Ablock_diag_data; HYPRE_Int *Ablock_diag_i; HYPRE_Int *Ablock_diag_j; HYPRE_Real *Ablock_offd_data; HYPRE_Int *Ablock_offd_i; HYPRE_Int *Ablock_offd_j; HYPRE_Int Ablock_diag_size, Ablock_offd_size; HYPRE_Int *Ablock_marker; HYPRE_Int ii_counter; HYPRE_Int jj_counter, jj_counter_offd; HYPRE_Int *jj_count, *jj_count_offd; HYPRE_Int start_indexing = 0; /* start indexing for Aff_data at 0 */ HYPRE_Int n_fine = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int *fine_to_coarse; HYPRE_Int *coarse_counter; HYPRE_Int *col_coarse_counter; HYPRE_Int coarse_shift; HYPRE_BigInt total_global_row_cpts; HYPRE_BigInt total_global_col_cpts; HYPRE_Int num_cols_Ablock_offd; // HYPRE_BigInt my_first_row_cpt, my_first_col_cpt; HYPRE_Int i,i1; HYPRE_Int j,jl,jj; HYPRE_Int start; HYPRE_Int my_id; HYPRE_Int num_procs; HYPRE_Int num_threads; HYPRE_Int num_sends; HYPRE_Int index; HYPRE_Int ns, ne, size, rest; HYPRE_Int *int_buf_data; HYPRE_Int local_numrows = hypre_CSRMatrixNumRows(A_diag); // HYPRE_Real wall_time; /* for debugging instrumentation */ hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm,&my_id); num_threads = hypre_NumThreads(); /* get the number of coarse rows */ hypre_BoomerAMGCoarseParms(comm, local_numrows, 1, NULL, row_cf_marker, &coarse_dof_func_ptr, &num_row_cpts_global); hypre_TFree(coarse_dof_func_ptr, HYPRE_MEMORY_HOST); coarse_dof_func_ptr = NULL; //hypre_printf("my_id = %d, cpts_this = %d, cpts_next = %d\n", my_id, num_row_cpts_global[0], num_row_cpts_global[1]); #ifdef HYPRE_NO_GLOBAL_PARTITION // my_first_row_cpt = num_row_cpts_global[0]; if (my_id == (num_procs -1)) total_global_row_cpts = num_row_cpts_global[1]; hypre_MPI_Bcast(&total_global_row_cpts, 1, HYPRE_MPI_BIG_INT, num_procs-1, comm); #else // my_first_row_cpt = num_row_cpts_global[my_id]; total_global_row_cpts = num_row_cpts_global[num_procs]; #endif /* get the number of coarse rows */ hypre_BoomerAMGCoarseParms(comm, local_numrows, 1, NULL, col_cf_marker, &coarse_dof_func_ptr, &num_col_cpts_global); hypre_TFree(coarse_dof_func_ptr, HYPRE_MEMORY_HOST); coarse_dof_func_ptr = NULL; //hypre_printf("my_id = %d, cpts_this = %d, cpts_next = %d\n", my_id, num_col_cpts_global[0], num_col_cpts_global[1]); #ifdef HYPRE_NO_GLOBAL_PARTITION // my_first_col_cpt = num_col_cpts_global[0]; if (my_id == (num_procs -1)) total_global_col_cpts = num_col_cpts_global[1]; hypre_MPI_Bcast(&total_global_col_cpts, 1, HYPRE_MPI_BIG_INT, num_procs-1, comm); #else // my_first_col_cpt = num_col_cpts_global[my_id]; total_global_col_cpts = num_col_cpts_global[num_procs]; #endif /*------------------------------------------------------------------- * Get the CF_marker data for the off-processor columns *-------------------------------------------------------------------*/ if (debug_flag < 0) { debug_flag = -debug_flag; } // if (debug_flag==4) wall_time = time_getWallclockSeconds(); if (num_cols_A_offd) CF_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); if (!comm_pkg) { hypre_MatvecCommPkgCreate(A); comm_pkg = hypre_ParCSRMatrixCommPkg(A); } num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST); index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) int_buf_data[index++] = col_cf_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data, CF_marker_offd); hypre_ParCSRCommHandleDestroy(comm_handle); /*----------------------------------------------------------------------- * First Pass: Determine size of Ablock and fill in fine_to_coarse mapping. *-----------------------------------------------------------------------*/ /*----------------------------------------------------------------------- * Intialize counters and allocate mapping vector. *-----------------------------------------------------------------------*/ coarse_counter = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); col_coarse_counter = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); jj_count = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); jj_count_offd = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif #endif for (i = 0; i < n_fine; i++) fine_to_coarse[i] = -1; jj_counter = start_indexing; jj_counter_offd = start_indexing; /*----------------------------------------------------------------------- * Loop over fine grid. *-----------------------------------------------------------------------*/ /* RDF: this looks a little tricky, but doable */ #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,j,i1,jj,ns,ne,size,rest) HYPRE_SMP_SCHEDULE #endif #endif for (j = 0; j < num_threads; j++) { size = n_fine/num_threads; rest = n_fine - size*num_threads; if (j < rest) { ns = j*size+j; ne = (j+1)*size+j+1; } else { ns = j*size+rest; ne = (j+1)*size+rest; } for (i = ns; i < ne; i++) { /*-------------------------------------------------------------------- * If i is a F-point, we loop through the columns and select * the F-columns. Also set up mapping vector. *--------------------------------------------------------------------*/ if (col_cf_marker[i] > 0) { fine_to_coarse[i] = col_coarse_counter[j]; col_coarse_counter[j]++; } if (row_cf_marker[i] > 0) { //fine_to_coarse[i] = coarse_counter[j]; coarse_counter[j]++; for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++) { i1 = A_diag_j[jj]; if (col_cf_marker[i1] > 0) { jj_count[j]++; } } if (num_procs > 1) { for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { i1 = A_offd_j[jj]; if (CF_marker_offd[i1] > 0) { jj_count_offd[j]++; } } } } } } /*----------------------------------------------------------------------- * Allocate arrays. *-----------------------------------------------------------------------*/ for (i=0; i < num_threads-1; i++) { jj_count[i+1] += jj_count[i]; jj_count_offd[i+1] += jj_count_offd[i]; coarse_counter[i+1] += coarse_counter[i]; col_coarse_counter[i+1] += col_coarse_counter[i]; } i = num_threads-1; jj_counter = jj_count[i]; jj_counter_offd = jj_count_offd[i]; ii_counter = coarse_counter[i]; Ablock_diag_size = jj_counter; Ablock_diag_i = hypre_CTAlloc(HYPRE_Int, ii_counter+1, HYPRE_MEMORY_HOST); Ablock_diag_j = hypre_CTAlloc(HYPRE_Int, Ablock_diag_size, HYPRE_MEMORY_HOST); Ablock_diag_data = hypre_CTAlloc(HYPRE_Real, Ablock_diag_size, HYPRE_MEMORY_HOST); Ablock_diag_i[ii_counter] = jj_counter; Ablock_offd_size = jj_counter_offd; Ablock_offd_i = hypre_CTAlloc(HYPRE_Int, ii_counter+1, HYPRE_MEMORY_HOST); Ablock_offd_j = hypre_CTAlloc(HYPRE_Int, Ablock_offd_size, HYPRE_MEMORY_HOST); Ablock_offd_data = hypre_CTAlloc(HYPRE_Real, Ablock_offd_size, HYPRE_MEMORY_HOST); /*----------------------------------------------------------------------- * Intialize some stuff. *-----------------------------------------------------------------------*/ jj_counter = start_indexing; jj_counter_offd = start_indexing; //----------------------------------------------------------------------- // Send and receive fine_to_coarse info. //----------------------------------------------------------------------- // if (debug_flag==4) wall_time = time_getWallclockSeconds(); #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,j,ns,ne,size,rest,coarse_shift) HYPRE_SMP_SCHEDULE #endif #endif for (j = 0; j < num_threads; j++) { coarse_shift = 0; if (j > 0) coarse_shift = col_coarse_counter[j-1]; size = n_fine/num_threads; rest = n_fine - size*num_threads; if (j < rest) { ns = j*size+j; ne = (j+1)*size+j+1; } else { ns = j*size+rest; ne = (j+1)*size+rest; } for (i = ns; i < ne; i++) fine_to_coarse[i] += coarse_shift; } // if (debug_flag==4) wall_time = time_getWallclockSeconds(); #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif #endif // for (i = 0; i < n_fine; i++) fine_to_coarse[i] -= my_first_col_cpt; #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,jl,i1,jj,ns,ne,size,rest,jj_counter,jj_counter_offd,ii_counter) HYPRE_SMP_SCHEDULE #endif #endif for (jl = 0; jl < num_threads; jl++) { size = n_fine/num_threads; rest = n_fine - size*num_threads; if (jl < rest) { ns = jl*size+jl; ne = (jl+1)*size+jl+1; } else { ns = jl*size+rest; ne = (jl+1)*size+rest; } jj_counter = 0; if (jl > 0) jj_counter = jj_count[jl-1]; jj_counter_offd = 0; if (jl > 0) jj_counter_offd = jj_count_offd[jl-1]; ii_counter = 0; for (i = ns; i < ne; i++) { /*-------------------------------------------------------------------- * If i is a F-point, we loop through the columns and select * the F-columns. Also set up mapping vector. *--------------------------------------------------------------------*/ if (row_cf_marker[i] > 0) { // Diagonal part of Ablock // Ablock_diag_i[ii_counter] = jj_counter; for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++) { i1 = A_diag_j[jj]; if (col_cf_marker[i1] > 0) { Ablock_diag_j[jj_counter] = fine_to_coarse[i1]; Ablock_diag_data[jj_counter] = A_diag_data[jj]; jj_counter++; } } // Off-Diagonal part of Ablock // Ablock_offd_i[ii_counter] = jj_counter_offd; if (num_procs > 1) { for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { i1 = A_offd_j[jj]; if (CF_marker_offd[i1] > 0) { Ablock_offd_j[jj_counter_offd] = i1; Ablock_offd_data[jj_counter_offd] = A_offd_data[jj]; jj_counter_offd++; } } } ii_counter++; } } Ablock_offd_i[ii_counter] = jj_counter_offd; Ablock_diag_i[ii_counter] = jj_counter; } Ablock = hypre_ParCSRMatrixCreate(comm, total_global_row_cpts, total_global_col_cpts, num_row_cpts_global, num_col_cpts_global, 0, Ablock_diag_i[ii_counter], Ablock_offd_i[ii_counter]); Ablock_diag = hypre_ParCSRMatrixDiag(Ablock); hypre_CSRMatrixData(Ablock_diag) = Ablock_diag_data; hypre_CSRMatrixI(Ablock_diag) = Ablock_diag_i; hypre_CSRMatrixJ(Ablock_diag) = Ablock_diag_j; Ablock_offd = hypre_ParCSRMatrixOffd(Ablock); hypre_CSRMatrixData(Ablock_offd) = Ablock_offd_data; hypre_CSRMatrixI(Ablock_offd) = Ablock_offd_i; hypre_CSRMatrixJ(Ablock_offd) = Ablock_offd_j; num_cols_Ablock_offd = 0; if (Ablock_offd_size) { Ablock_marker = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif #endif for (i=0; i < num_cols_A_offd; i++) Ablock_marker[i] = 0; num_cols_Ablock_offd = 0; for (i=0; i < Ablock_offd_size; i++) { index = Ablock_offd_j[i]; if (!Ablock_marker[index]) { num_cols_Ablock_offd++; Ablock_marker[index] = 1; } } col_map_offd_Ablock = hypre_CTAlloc(HYPRE_BigInt, num_cols_Ablock_offd, HYPRE_MEMORY_HOST); tmp_map_offd = hypre_CTAlloc(HYPRE_Int, num_cols_Ablock_offd, HYPRE_MEMORY_HOST); index = 0; for (i=0; i < num_cols_Ablock_offd; i++) { while (Ablock_marker[index]==0) index++; tmp_map_offd[i] = index++; } #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif #endif for (i=0; i < Ablock_offd_size; i++) Ablock_offd_j[i] = hypre_BinarySearch(tmp_map_offd, Ablock_offd_j[i], num_cols_Ablock_offd); hypre_TFree(Ablock_marker, HYPRE_MEMORY_HOST); } if (num_cols_Ablock_offd) { hypre_ParCSRMatrixColMapOffd(Ablock) = col_map_offd_Ablock; hypre_CSRMatrixNumCols(Ablock_offd) = num_cols_Ablock_offd; } hypre_GetCommPkgRTFromCommPkgA(Ablock, A, fine_to_coarse, tmp_map_offd); #ifdef HYPRE_NO_GLOBAL_PARTITION /* Create the assumed partition */ if (hypre_ParCSRMatrixAssumedPartition(Ablock) == NULL) { hypre_ParCSRMatrixCreateAssumedPartition(Ablock); } #endif *A_block_ptr= Ablock; hypre_TFree(tmp_map_offd, HYPRE_MEMORY_HOST); hypre_TFree(CF_marker_offd, HYPRE_MEMORY_HOST); hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST); hypre_TFree(fine_to_coarse, HYPRE_MEMORY_HOST); hypre_TFree(coarse_counter, HYPRE_MEMORY_HOST); hypre_TFree(col_coarse_counter, HYPRE_MEMORY_HOST); hypre_TFree(jj_count, HYPRE_MEMORY_HOST); hypre_TFree(jj_count_offd, HYPRE_MEMORY_HOST); return(0); } /* Build A_FF matrix from A given a CF_marker array */ HYPRE_Int hypre_MGRBuildAffNew( hypre_ParCSRMatrix *A, HYPRE_Int *CF_marker, HYPRE_Int debug_flag, hypre_ParCSRMatrix **A_ff_ptr ) { HYPRE_Int i; HYPRE_Int local_numrows = hypre_CSRMatrixNumRows(hypre_ParCSRMatrixDiag(A)); /* create a copy of the CF_marker array and switch C-points to F-points */ HYPRE_Int *CF_marker_copy = hypre_CTAlloc(HYPRE_Int, local_numrows, HYPRE_MEMORY_HOST); #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif #endif for (i = 0; i < local_numrows; i++) { CF_marker_copy[i] = -CF_marker[i]; } hypre_MGRGetSubBlock(A, CF_marker_copy, CF_marker_copy, debug_flag, A_ff_ptr); /* Free copy of CF marker */ hypre_TFree(CF_marker_copy, HYPRE_MEMORY_HOST); return(0); } /********************************************************************************* * This routine assumes that the 'toVector' is larger than the 'fromVector' and * the CF_marker is of the same length as the toVector. There must be n 'point_type' * values in the CF_marker, where n is the length of the 'fromVector'. * It adds the values of the 'fromVector' to the 'toVector' where the marker is the * same as the 'point_type' *********************************************************************************/ HYPRE_Int hypre_MGRAddVectorP ( HYPRE_Int *CF_marker, HYPRE_Int point_type, HYPRE_Real a, hypre_ParVector *fromVector, HYPRE_Real b, hypre_ParVector **toVector ) { hypre_Vector *fromVectorLocal = hypre_ParVectorLocalVector(fromVector); HYPRE_Real *fromVectorData = hypre_VectorData(fromVectorLocal); hypre_Vector *toVectorLocal = hypre_ParVectorLocalVector(*toVector); HYPRE_Real *toVectorData = hypre_VectorData(toVectorLocal); HYPRE_Int n = hypre_ParVectorActualLocalSize(*toVector); HYPRE_Int i, j; j = 0; for (i = 0; i < n; i++) { if (CF_marker[i] == point_type) { toVectorData[i] = b * toVectorData[i] + a * fromVectorData[j]; j++; } } return 0; } /************************************************************************************* * This routine assumes that the 'fromVector' is larger than the 'toVector' and * the CF_marker is of the same length as the fromVector. There must be n 'point_type' * values in the CF_marker, where n is the length of the 'toVector'. * It adds the values of the 'fromVector' where the marker is the * same as the 'point_type' to the 'toVector' *************************************************************************************/ HYPRE_Int hypre_MGRAddVectorR ( HYPRE_Int *CF_marker, HYPRE_Int point_type, HYPRE_Real a, hypre_ParVector *fromVector, HYPRE_Real b, hypre_ParVector **toVector ) { hypre_Vector *fromVectorLocal = hypre_ParVectorLocalVector(fromVector); HYPRE_Real *fromVectorData = hypre_VectorData(fromVectorLocal); hypre_Vector *toVectorLocal = hypre_ParVectorLocalVector(*toVector); HYPRE_Real *toVectorData = hypre_VectorData(toVectorLocal); HYPRE_Int n = hypre_ParVectorActualLocalSize(fromVector); HYPRE_Int i, j; j = 0; for (i = 0; i < n; i++) { if (CF_marker[i] == point_type) { toVectorData[j] = b * toVectorData[j] + a * fromVectorData[i]; j++; } } return 0; } HYPRE_Int hypre_MGRBuildAff( MPI_Comm comm, HYPRE_Int local_num_variables, HYPRE_Int num_functions, HYPRE_Int *dof_func, HYPRE_Int *CF_marker, HYPRE_Int **coarse_dof_func_ptr, HYPRE_BigInt **coarse_pnts_global_ptr, hypre_ParCSRMatrix *A, HYPRE_Int debug_flag, hypre_ParCSRMatrix **P_f_ptr, hypre_ParCSRMatrix **A_ff_ptr ) { HYPRE_Int *CF_marker_copy = hypre_CTAlloc(HYPRE_Int, local_num_variables, HYPRE_MEMORY_HOST); HYPRE_Int i; for (i = 0; i < local_num_variables; i++) { CF_marker_copy[i] = -CF_marker[i]; } hypre_BoomerAMGCoarseParms(comm, local_num_variables, 1, NULL, CF_marker_copy, coarse_dof_func_ptr, coarse_pnts_global_ptr); hypre_MGRBuildP(A, CF_marker_copy, (*coarse_pnts_global_ptr), 0, debug_flag, P_f_ptr); hypre_BoomerAMGBuildCoarseOperator(*P_f_ptr, A, *P_f_ptr, A_ff_ptr); hypre_TFree(CF_marker_copy, HYPRE_MEMORY_HOST); return 0; } /* Get pointer to coarse grid matrix for MGR solver */ HYPRE_Int hypre_MGRGetCoarseGridMatrix( void *mgr_vdata, hypre_ParCSRMatrix **RAP ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; if (!mgr_data) { hypre_error_in_arg(1); return hypre_error_flag; } if (mgr_data -> RAP == NULL) { hypre_error_w_msg(HYPRE_ERROR_GENERIC," Coarse grid matrix is NULL. Please make sure MGRSetup() is called \n"); return hypre_error_flag; } *RAP = mgr_data->RAP; return hypre_error_flag; } /* Get pointer to coarse grid solution for MGR solver */ HYPRE_Int hypre_MGRGetCoarseGridSolution( void *mgr_vdata, hypre_ParVector **sol ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; if (!mgr_data) { hypre_error_in_arg(1); return hypre_error_flag; } if (mgr_data -> U_array == NULL) { hypre_error_w_msg(HYPRE_ERROR_GENERIC," MGR solution array is NULL. Please make sure MGRSetup() and MGRSolve() are called \n"); return hypre_error_flag; } *sol = mgr_data->U_array[mgr_data->num_coarse_levels]; return hypre_error_flag; } /* Get pointer to coarse grid solution for MGR solver */ HYPRE_Int hypre_MGRGetCoarseGridRHS( void *mgr_vdata, hypre_ParVector **rhs ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; if (!mgr_data) { hypre_error_in_arg(1); return hypre_error_flag; } if (mgr_data -> F_array == NULL) { hypre_error_w_msg(HYPRE_ERROR_GENERIC," MGR RHS array is NULL. Please make sure MGRSetup() and MGRSolve() are called \n"); return hypre_error_flag; } *rhs = mgr_data->F_array[mgr_data->num_coarse_levels]; return hypre_error_flag; } /* Print coarse grid linear system (for debugging)*/ HYPRE_Int hypre_MGRPrintCoarseSystem( void *mgr_vdata, HYPRE_Int print_flag) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; mgr_data->print_coarse_system = print_flag; return hypre_error_flag; } /* Print solver params */ HYPRE_Int hypre_MGRWriteSolverParams(void *mgr_vdata) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; HYPRE_Int i, j; HYPRE_Int max_num_coarse_levels = (mgr_data -> max_num_coarse_levels); hypre_printf("MGR Setup parameters: \n"); hypre_printf("Block size: %d\n", (mgr_data -> block_size)); hypre_printf("Max number of coarse levels: %d\n", (mgr_data -> max_num_coarse_levels)); hypre_printf("Relax type: %d\n", (mgr_data -> relax_type)); hypre_printf("Set non-Cpoints to F-points: %d\n", (mgr_data -> set_non_Cpoints_to_F)); hypre_printf("Set Cpoints method: %d\n", (mgr_data -> set_c_points_method)); for (i = 0; i < max_num_coarse_levels; i++) { hypre_printf("Lev = %d, Interpolation type: %d\n", i, (mgr_data -> interp_type)[i]); hypre_printf("Lev = %d, Restriction type: %d\n", i, (mgr_data -> restrict_type)[i]); hypre_printf("Lev = %d, F-relaxation method: %d\n", i, (mgr_data -> Frelax_method)[i]); hypre_printf("Lev = %d, Use non-Galerkin coarse grid: %d\n", i, (mgr_data -> use_non_galerkin_cg)[i]); HYPRE_Int lvl_num_coarse_points = (mgr_data -> block_num_coarse_indexes)[i]; hypre_printf("Lev = %d, Number of Cpoints: %d\n", i, lvl_num_coarse_points); hypre_printf("Cpoints indices: "); for (j = 0; j < lvl_num_coarse_points; j++) { if ((mgr_data -> block_cf_marker)[i][j] == 1) { hypre_printf("%d ", j); } } hypre_printf("\n"); } hypre_printf("Number of Reserved Cpoints: %d\n", (mgr_data -> reserved_coarse_size)); hypre_printf("Keep reserved Cpoints to level: %d\n", (mgr_data -> lvl_to_keep_cpoints)); hypre_printf("\n MGR Solver Parameters: \n"); hypre_printf("Number of relax sweeps: %d\n", (mgr_data -> num_relax_sweeps)); hypre_printf("Number of interpolation sweeps: %d\n", (mgr_data -> num_interp_sweeps)); hypre_printf("Number of restriction sweeps: %d\n", (mgr_data -> num_restrict_sweeps)); hypre_printf("Global smoother type: %d\n", (mgr_data ->global_smooth_type)); hypre_printf("Number of global smoother sweeps: %d\n", (mgr_data ->global_smooth_iters)); hypre_printf("Max number of iterations: %d\n", (mgr_data -> max_iter)); hypre_printf("Stopping tolerance: %e\n", (mgr_data -> tol)); hypre_printf("Use default coarse grid solver: %d\n", (mgr_data -> use_default_cgrid_solver)); if((mgr_data -> use_default_fsolver) >= 0) { hypre_printf("Use default AMG solver for full AMG F-relaxation: %d\n", (mgr_data -> use_default_fsolver)); } return hypre_error_flag; }
nested_loop.c
#include <stdio.h> #include "assert.h" #include <unistd.h> #define TRIALS 1 #define N 960 int main() { int fail = 0; double A[N], B[N], C[N]; for (int i = 0; i < N; i++) { A[i] = 0.0; B[i] = 0.0; C[i] = 1.0; } int nte = 32; int tl = 64; int blockSize = tl; for (int t = 0 ; t < TRIALS ; t++) { #pragma omp target #pragma omp teams num_teams(nte) thread_limit(tl) { #pragma omp distribute for(int j = 0 ; j < 256 ; j += blockSize) { #pragma omp parallel for for(int i = j ; i < j+blockSize; i++) { A[i] += B[i] + C[i]; } } } } for(int i = 0 ; i < 256 ; i++) { if (A[i] != TRIALS) { printf("Error at A[%d], h = %lf, d = %lf\n", i, (double) (2.0+3.0)*TRIALS, A[i]); fail = 1; } } if(fail) printf("Failed\n"); else printf("Succeeded\n"); }
callback.h
#ifndef _BSD_SOURCE #define _BSD_SOURCE #endif #define _DEFAULT_SOURCE #include <stdio.h> #ifndef __STDC_FORMAT_MACROS #define __STDC_FORMAT_MACROS #endif #include <inttypes.h> #include <omp.h> #include <ompt.h> #include "ompt-signal.h" // Used to detect architecture #include "../../src/kmp_platform.h" static const char* ompt_thread_type_t_values[] = { NULL, "ompt_thread_initial", "ompt_thread_worker", "ompt_thread_other" }; static const char* ompt_task_status_t_values[] = { NULL, "ompt_task_complete", "ompt_task_yield", "ompt_task_cancel", "ompt_task_others" }; static const char* ompt_cancel_flag_t_values[] = { "ompt_cancel_parallel", "ompt_cancel_sections", "ompt_cancel_do", "ompt_cancel_taskgroup", "ompt_cancel_activated", "ompt_cancel_detected", "ompt_cancel_discarded_task" }; static void format_task_type(int type, char *buffer) { char *progress = buffer; if (type & ompt_task_initial) progress += sprintf(progress, "ompt_task_initial"); if (type & ompt_task_implicit) progress += sprintf(progress, "ompt_task_implicit"); if (type & ompt_task_explicit) progress += sprintf(progress, "ompt_task_explicit"); if (type & ompt_task_target) progress += sprintf(progress, "ompt_task_target"); if (type & ompt_task_undeferred) progress += sprintf(progress, "|ompt_task_undeferred"); if (type & ompt_task_untied) progress += sprintf(progress, "|ompt_task_untied"); if (type & ompt_task_final) progress += sprintf(progress, "|ompt_task_final"); if (type & ompt_task_mergeable) progress += sprintf(progress, "|ompt_task_mergeable"); if (type & ompt_task_merged) progress += sprintf(progress, "|ompt_task_merged"); } static ompt_set_callback_t ompt_set_callback; static ompt_get_callback_t ompt_get_callback; static ompt_get_state_t ompt_get_state; static ompt_get_task_info_t ompt_get_task_info; static ompt_get_thread_data_t ompt_get_thread_data; static ompt_get_parallel_info_t ompt_get_parallel_info; static ompt_get_unique_id_t ompt_get_unique_id; static ompt_get_num_procs_t ompt_get_num_procs; static ompt_get_num_places_t ompt_get_num_places; static ompt_get_place_proc_ids_t ompt_get_place_proc_ids; static ompt_get_place_num_t ompt_get_place_num; static ompt_get_partition_place_nums_t ompt_get_partition_place_nums; static ompt_get_proc_id_t ompt_get_proc_id; static ompt_enumerate_states_t ompt_enumerate_states; static ompt_enumerate_mutex_impls_t ompt_enumerate_mutex_impls; static void print_ids(int level) { int task_type, thread_num; omp_frame_t *frame; ompt_data_t *task_parallel_data; ompt_data_t *task_data; int exists_task = ompt_get_task_info(level, &task_type, &task_data, &frame, &task_parallel_data, &thread_num); char buffer[2048]; format_task_type(task_type, buffer); if (frame) printf("%" PRIu64 ": task level %d: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", exit_frame=%p, reenter_frame=%p, " "task_type=%s=%d, thread_num=%d\n", ompt_get_thread_data()->value, level, exists_task ? task_parallel_data->value : 0, exists_task ? task_data->value : 0, frame->exit_frame, frame->enter_frame, buffer, task_type, thread_num); } #define get_frame_address(level) __builtin_frame_address(level) #define print_frame(level) \ printf("%" PRIu64 ": __builtin_frame_address(%d)=%p\n", \ ompt_get_thread_data()->value, level, get_frame_address(level)) // clang (version 5.0 and above) adds an intermediate function call with debug flag (-g) #if defined(TEST_NEED_PRINT_FRAME_FROM_OUTLINED_FN) #if defined(DEBUG) && defined(__clang__) && __clang_major__ >= 5 #define print_frame_from_outlined_fn(level) print_frame(level+1) #else #define print_frame_from_outlined_fn(level) print_frame(level) #endif #if defined(__clang__) && __clang_major__ >= 5 #warning "Clang 5.0 and later add an additional wrapper for outlined functions when compiling with debug information." #warning "Please define -DDEBUG iff you manually pass in -g to make the tests succeed!" #endif #endif // This macro helps to define a label at the current position that can be used // to get the current address in the code. // // For print_current_address(): // To reliably determine the offset between the address of the label and the // actual return address, we insert a NOP instruction as a jump target as the // compiler would otherwise insert an instruction that we can't control. The // instruction length is target dependent and is explained below. // // (The empty block between "#pragma omp ..." and the __asm__ statement is a // workaround for a bug in the Intel Compiler.) #define define_ompt_label(id) \ {} \ __asm__("nop"); \ ompt_label_##id: // This macro helps to get the address of a label that is inserted by the above // macro define_ompt_label(). The address is obtained with a GNU extension // (&&label) that has been tested with gcc, clang and icc. #define get_ompt_label_address(id) (&& ompt_label_##id) // This macro prints the exact address that a previously called runtime function // returns to. #define print_current_address(id) \ define_ompt_label(id) \ print_possible_return_addresses(get_ompt_label_address(id)) #if KMP_ARCH_X86 || KMP_ARCH_X86_64 // On X86 the NOP instruction is 1 byte long. In addition, the comiler inserts // a MOV instruction for non-void runtime functions which is 3 bytes long. #define print_possible_return_addresses(addr) \ printf("%" PRIu64 ": current_address=%p or %p for non-void functions\n", \ ompt_get_thread_data()->value, ((char *)addr) - 1, ((char *)addr) - 4) #elif KMP_ARCH_PPC64 // On Power the NOP instruction is 4 bytes long. In addition, the compiler // inserts an LD instruction which accounts for another 4 bytes. In contrast to // X86 this instruction is always there, even for void runtime functions. #define print_possible_return_addresses(addr) \ printf("%" PRIu64 ": current_address=%p\n", ompt_get_thread_data()->value, \ ((char *)addr) - 8) #elif KMP_ARCH_AARCH64 // On AArch64 the NOP instruction is 4 bytes long, can be followed by inserted // store instruction (another 4 bytes long). #define print_possible_return_addresses(addr) \ printf("%" PRIu64 ": current_address=%p or %p\n", ompt_get_thread_data()->value, \ ((char *)addr) - 4, ((char *)addr) - 8) #else #error Unsupported target architecture, cannot determine address offset! #endif // This macro performs a somewhat similar job to print_current_address(), except // that it discards a certain number of nibbles from the address and only prints // the most significant bits / nibbles. This can be used for cases where the // return address can only be approximated. // // To account for overflows (ie the most significant bits / nibbles have just // changed as we are a few bytes above the relevant power of two) the addresses // of the "current" and of the "previous block" are printed. #define print_fuzzy_address(id) \ define_ompt_label(id) \ print_fuzzy_address_blocks(get_ompt_label_address(id)) // If you change this define you need to adapt all capture patterns in the tests // to include or discard the new number of nibbles! #define FUZZY_ADDRESS_DISCARD_NIBBLES 2 #define FUZZY_ADDRESS_DISCARD_BYTES (1 << ((FUZZY_ADDRESS_DISCARD_NIBBLES) * 4)) #define print_fuzzy_address_blocks(addr) \ printf("%" PRIu64 ": fuzzy_address=0x%" PRIx64 " or 0x%" PRIx64 " (%p)\n", \ ompt_get_thread_data()->value, \ ((uint64_t)addr) / FUZZY_ADDRESS_DISCARD_BYTES - 1, \ ((uint64_t)addr) / FUZZY_ADDRESS_DISCARD_BYTES, addr) static void on_ompt_callback_mutex_acquire( ompt_mutex_kind_t kind, unsigned int hint, unsigned int impl, omp_wait_id_t wait_id, const void *codeptr_ra) { switch(kind) { case ompt_mutex_lock: printf("%" PRIu64 ": ompt_event_wait_lock: wait_id=%" PRIu64 ", hint=%" PRIu32 ", impl=%" PRIu32 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, hint, impl, codeptr_ra); break; case ompt_mutex_nest_lock: printf("%" PRIu64 ": ompt_event_wait_nest_lock: wait_id=%" PRIu64 ", hint=%" PRIu32 ", impl=%" PRIu32 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, hint, impl, codeptr_ra); break; case ompt_mutex_critical: printf("%" PRIu64 ": ompt_event_wait_critical: wait_id=%" PRIu64 ", hint=%" PRIu32 ", impl=%" PRIu32 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, hint, impl, codeptr_ra); break; case ompt_mutex_atomic: printf("%" PRIu64 ": ompt_event_wait_atomic: wait_id=%" PRIu64 ", hint=%" PRIu32 ", impl=%" PRIu32 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, hint, impl, codeptr_ra); break; case ompt_mutex_ordered: printf("%" PRIu64 ": ompt_event_wait_ordered: wait_id=%" PRIu64 ", hint=%" PRIu32 ", impl=%" PRIu32 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, hint, impl, codeptr_ra); break; default: break; } } static void on_ompt_callback_mutex_acquired( ompt_mutex_kind_t kind, omp_wait_id_t wait_id, const void *codeptr_ra) { switch(kind) { case ompt_mutex_lock: printf("%" PRIu64 ": ompt_event_acquired_lock: wait_id=%" PRIu64 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, codeptr_ra); break; case ompt_mutex_nest_lock: printf("%" PRIu64 ": ompt_event_acquired_nest_lock_first: wait_id=%" PRIu64 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, codeptr_ra); break; case ompt_mutex_critical: printf("%" PRIu64 ": ompt_event_acquired_critical: wait_id=%" PRIu64 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, codeptr_ra); break; case ompt_mutex_atomic: printf("%" PRIu64 ": ompt_event_acquired_atomic: wait_id=%" PRIu64 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, codeptr_ra); break; case ompt_mutex_ordered: printf("%" PRIu64 ": ompt_event_acquired_ordered: wait_id=%" PRIu64 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, codeptr_ra); break; default: break; } } static void on_ompt_callback_mutex_released( ompt_mutex_kind_t kind, omp_wait_id_t wait_id, const void *codeptr_ra) { switch(kind) { case ompt_mutex_lock: printf("%" PRIu64 ": ompt_event_release_lock: wait_id=%" PRIu64 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, codeptr_ra); break; case ompt_mutex_nest_lock: printf("%" PRIu64 ": ompt_event_release_nest_lock_last: wait_id=%" PRIu64 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, codeptr_ra); break; case ompt_mutex_critical: printf("%" PRIu64 ": ompt_event_release_critical: wait_id=%" PRIu64 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, codeptr_ra); break; case ompt_mutex_atomic: printf("%" PRIu64 ": ompt_event_release_atomic: wait_id=%" PRIu64 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, codeptr_ra); break; case ompt_mutex_ordered: printf("%" PRIu64 ": ompt_event_release_ordered: wait_id=%" PRIu64 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, codeptr_ra); break; default: break; } } static void on_ompt_callback_nest_lock( ompt_scope_endpoint_t endpoint, omp_wait_id_t wait_id, const void *codeptr_ra) { switch(endpoint) { case ompt_scope_begin: printf("%" PRIu64 ": ompt_event_acquired_nest_lock_next: wait_id=%" PRIu64 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, codeptr_ra); break; case ompt_scope_end: printf("%" PRIu64 ": ompt_event_release_nest_lock_prev: wait_id=%" PRIu64 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, codeptr_ra); break; } } static void on_ompt_callback_sync_region( ompt_sync_region_kind_t kind, ompt_scope_endpoint_t endpoint, ompt_data_t *parallel_data, ompt_data_t *task_data, const void *codeptr_ra) { switch(endpoint) { case ompt_scope_begin: switch(kind) { case ompt_sync_region_barrier: printf("%" PRIu64 ": ompt_event_barrier_begin: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra); print_ids(0); break; case ompt_sync_region_taskwait: printf("%" PRIu64 ": ompt_event_taskwait_begin: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra); break; case ompt_sync_region_taskgroup: printf("%" PRIu64 ": ompt_event_taskgroup_begin: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra); break; } break; case ompt_scope_end: switch(kind) { case ompt_sync_region_barrier: printf("%" PRIu64 ": ompt_event_barrier_end: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p\n", ompt_get_thread_data()->value, (parallel_data)?parallel_data->value:0, task_data->value, codeptr_ra); break; case ompt_sync_region_taskwait: printf("%" PRIu64 ": ompt_event_taskwait_end: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p\n", ompt_get_thread_data()->value, (parallel_data)?parallel_data->value:0, task_data->value, codeptr_ra); break; case ompt_sync_region_taskgroup: printf("%" PRIu64 ": ompt_event_taskgroup_end: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p\n", ompt_get_thread_data()->value, (parallel_data)?parallel_data->value:0, task_data->value, codeptr_ra); break; } break; } } static void on_ompt_callback_sync_region_wait( ompt_sync_region_kind_t kind, ompt_scope_endpoint_t endpoint, ompt_data_t *parallel_data, ompt_data_t *task_data, const void *codeptr_ra) { switch(endpoint) { case ompt_scope_begin: switch(kind) { case ompt_sync_region_barrier: printf("%" PRIu64 ": ompt_event_wait_barrier_begin: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra); break; case ompt_sync_region_taskwait: printf("%" PRIu64 ": ompt_event_wait_taskwait_begin: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra); break; case ompt_sync_region_taskgroup: printf("%" PRIu64 ": ompt_event_wait_taskgroup_begin: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra); break; } break; case ompt_scope_end: switch(kind) { case ompt_sync_region_barrier: printf("%" PRIu64 ": ompt_event_wait_barrier_end: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p\n", ompt_get_thread_data()->value, (parallel_data)?parallel_data->value:0, task_data->value, codeptr_ra); break; case ompt_sync_region_taskwait: printf("%" PRIu64 ": ompt_event_wait_taskwait_end: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p\n", ompt_get_thread_data()->value, (parallel_data)?parallel_data->value:0, task_data->value, codeptr_ra); break; case ompt_sync_region_taskgroup: printf("%" PRIu64 ": ompt_event_wait_taskgroup_end: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p\n", ompt_get_thread_data()->value, (parallel_data)?parallel_data->value:0, task_data->value, codeptr_ra); break; } break; } } static void on_ompt_callback_flush( ompt_data_t *thread_data, const void *codeptr_ra) { printf("%" PRIu64 ": ompt_event_flush: codeptr_ra=%p\n", thread_data->value, codeptr_ra); } static void on_ompt_callback_cancel( ompt_data_t *task_data, int flags, const void *codeptr_ra) { const char* first_flag_value; const char* second_flag_value; if(flags & ompt_cancel_parallel) first_flag_value = ompt_cancel_flag_t_values[0]; else if(flags & ompt_cancel_sections) first_flag_value = ompt_cancel_flag_t_values[1]; else if(flags & ompt_cancel_do) first_flag_value = ompt_cancel_flag_t_values[2]; else if(flags & ompt_cancel_taskgroup) first_flag_value = ompt_cancel_flag_t_values[3]; if(flags & ompt_cancel_activated) second_flag_value = ompt_cancel_flag_t_values[4]; else if(flags & ompt_cancel_detected) second_flag_value = ompt_cancel_flag_t_values[5]; else if(flags & ompt_cancel_discarded_task) second_flag_value = ompt_cancel_flag_t_values[6]; printf("%" PRIu64 ": ompt_event_cancel: task_data=%" PRIu64 ", flags=%s|%s=%" PRIu32 ", codeptr_ra=%p\n", ompt_get_thread_data()->value, task_data->value, first_flag_value, second_flag_value, flags, codeptr_ra); } static void on_ompt_callback_idle( ompt_scope_endpoint_t endpoint) { switch(endpoint) { case ompt_scope_begin: printf("%" PRIu64 ": ompt_event_idle_begin:\n", ompt_get_thread_data()->value); break; case ompt_scope_end: printf("%" PRIu64 ": ompt_event_idle_end:\n", ompt_get_thread_data()->value); break; } } static void on_ompt_callback_implicit_task( ompt_scope_endpoint_t endpoint, ompt_data_t *parallel_data, ompt_data_t *task_data, unsigned int team_size, unsigned int thread_num) { switch(endpoint) { case ompt_scope_begin: if(task_data->ptr) printf("%s\n", "0: task_data initially not null"); task_data->value = ompt_get_unique_id(); printf("%" PRIu64 ": ompt_event_implicit_task_begin: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", team_size=%" PRIu32 ", thread_num=%" PRIu32 "\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, team_size, thread_num); break; case ompt_scope_end: printf("%" PRIu64 ": ompt_event_implicit_task_end: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", team_size=%" PRIu32 ", thread_num=%" PRIu32 "\n", ompt_get_thread_data()->value, (parallel_data)?parallel_data->value:0, task_data->value, team_size, thread_num); break; } } static void on_ompt_callback_lock_init( ompt_mutex_kind_t kind, unsigned int hint, unsigned int impl, omp_wait_id_t wait_id, const void *codeptr_ra) { switch(kind) { case ompt_mutex_lock: printf("%" PRIu64 ": ompt_event_init_lock: wait_id=%" PRIu64 ", hint=%" PRIu32 ", impl=%" PRIu32 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, hint, impl, codeptr_ra); break; case ompt_mutex_nest_lock: printf("%" PRIu64 ": ompt_event_init_nest_lock: wait_id=%" PRIu64 ", hint=%" PRIu32 ", impl=%" PRIu32 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, hint, impl, codeptr_ra); break; default: break; } } static void on_ompt_callback_lock_destroy( ompt_mutex_kind_t kind, omp_wait_id_t wait_id, const void *codeptr_ra) { switch(kind) { case ompt_mutex_lock: printf("%" PRIu64 ": ompt_event_destroy_lock: wait_id=%" PRIu64 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, codeptr_ra); break; case ompt_mutex_nest_lock: printf("%" PRIu64 ": ompt_event_destroy_nest_lock: wait_id=%" PRIu64 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, codeptr_ra); break; default: break; } } static void on_ompt_callback_work( ompt_work_type_t wstype, ompt_scope_endpoint_t endpoint, ompt_data_t *parallel_data, ompt_data_t *task_data, uint64_t count, const void *codeptr_ra) { switch(endpoint) { case ompt_scope_begin: switch(wstype) { case ompt_work_loop: printf("%" PRIu64 ": ompt_event_loop_begin: parallel_id=%" PRIu64 ", parent_task_id=%" PRIu64 ", codeptr_ra=%p, count=%" PRIu64 "\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra, count); break; case ompt_work_sections: printf("%" PRIu64 ": ompt_event_sections_begin: parallel_id=%" PRIu64 ", parent_task_id=%" PRIu64 ", codeptr_ra=%p, count=%" PRIu64 "\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra, count); break; case ompt_work_single_executor: printf("%" PRIu64 ": ompt_event_single_in_block_begin: parallel_id=%" PRIu64 ", parent_task_id=%" PRIu64 ", codeptr_ra=%p, count=%" PRIu64 "\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra, count); break; case ompt_work_single_other: printf("%" PRIu64 ": ompt_event_single_others_begin: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p, count=%" PRIu64 "\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra, count); break; case ompt_work_workshare: //impl break; case ompt_work_distribute: printf("%" PRIu64 ": ompt_event_distribute_begin: parallel_id=%" PRIu64 ", parent_task_id=%" PRIu64 ", codeptr_ra=%p, count=%" PRIu64 "\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra, count); break; case ompt_work_taskloop: //impl printf("%" PRIu64 ": ompt_event_taskloop_begin: parallel_id=%" PRIu64 ", parent_task_id=%" PRIu64 ", codeptr_ra=%p, count=%" PRIu64 "\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra, count); break; } break; case ompt_scope_end: switch(wstype) { case ompt_work_loop: printf("%" PRIu64 ": ompt_event_loop_end: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p, count=%" PRIu64 "\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra, count); break; case ompt_work_sections: printf("%" PRIu64 ": ompt_event_sections_end: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p, count=%" PRIu64 "\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra, count); break; case ompt_work_single_executor: printf("%" PRIu64 ": ompt_event_single_in_block_end: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p, count=%" PRIu64 "\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra, count); break; case ompt_work_single_other: printf("%" PRIu64 ": ompt_event_single_others_end: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p, count=%" PRIu64 "\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra, count); break; case ompt_work_workshare: //impl break; case ompt_work_distribute: printf("%" PRIu64 ": ompt_event_distribute_end: parallel_id=%" PRIu64 ", parent_task_id=%" PRIu64 ", codeptr_ra=%p, count=%" PRIu64 "\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra, count); break; case ompt_work_taskloop: //impl printf("%" PRIu64 ": ompt_event_taskloop_end: parallel_id=%" PRIu64 ", parent_task_id=%" PRIu64 ", codeptr_ra=%p, count=%" PRIu64 "\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra, count); break; } break; } } static void on_ompt_callback_master( ompt_scope_endpoint_t endpoint, ompt_data_t *parallel_data, ompt_data_t *task_data, const void *codeptr_ra) { switch(endpoint) { case ompt_scope_begin: printf("%" PRIu64 ": ompt_event_master_begin: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra); break; case ompt_scope_end: printf("%" PRIu64 ": ompt_event_master_end: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra); break; } } static void on_ompt_callback_parallel_begin( ompt_data_t *encountering_task_data, const omp_frame_t *encountering_task_frame, ompt_data_t* parallel_data, uint32_t requested_team_size, ompt_invoker_t invoker, const void *codeptr_ra) { if(parallel_data->ptr) printf("0: parallel_data initially not null\n"); parallel_data->value = ompt_get_unique_id(); printf("%" PRIu64 ": ompt_event_parallel_begin: parent_task_id=%" PRIu64 ", parent_task_frame.exit=%p, parent_task_frame.reenter=%p, parallel_id=%" PRIu64 ", requested_team_size=%" PRIu32 ", codeptr_ra=%p, invoker=%d\n", ompt_get_thread_data()->value, encountering_task_data->value, encountering_task_frame->exit_frame, encountering_task_frame->enter_frame, parallel_data->value, requested_team_size, codeptr_ra, invoker); } static void on_ompt_callback_parallel_end( ompt_data_t *parallel_data, ompt_data_t *encountering_task_data, ompt_invoker_t invoker, const void *codeptr_ra) { printf("%" PRIu64 ": ompt_event_parallel_end: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", invoker=%d, codeptr_ra=%p\n", ompt_get_thread_data()->value, parallel_data->value, encountering_task_data->value, invoker, codeptr_ra); } static void on_ompt_callback_task_create( ompt_data_t *encountering_task_data, const omp_frame_t *encountering_task_frame, ompt_data_t* new_task_data, int type, int has_dependences, const void *codeptr_ra) { if(new_task_data->ptr) printf("0: new_task_data initially not null\n"); new_task_data->value = ompt_get_unique_id(); char buffer[2048]; format_task_type(type, buffer); //there is no parallel_begin callback for implicit parallel region //thus it is initialized in initial task if(type & ompt_task_initial) { ompt_data_t *parallel_data; ompt_get_parallel_info(0, &parallel_data, NULL); if(parallel_data->ptr) printf("%s\n", "0: parallel_data initially not null"); parallel_data->value = ompt_get_unique_id(); } printf("%" PRIu64 ": ompt_event_task_create: parent_task_id=%" PRIu64 ", parent_task_frame.exit=%p, parent_task_frame.reenter=%p, new_task_id=%" PRIu64 ", codeptr_ra=%p, task_type=%s=%d, has_dependences=%s\n", ompt_get_thread_data()->value, encountering_task_data ? encountering_task_data->value : 0, encountering_task_frame ? encountering_task_frame->exit_frame : NULL, encountering_task_frame ? encountering_task_frame->enter_frame : NULL, new_task_data->value, codeptr_ra, buffer, type, has_dependences ? "yes" : "no"); } static void on_ompt_callback_task_schedule( ompt_data_t *first_task_data, ompt_task_status_t prior_task_status, ompt_data_t *second_task_data) { printf("%" PRIu64 ": ompt_event_task_schedule: first_task_id=%" PRIu64 ", second_task_id=%" PRIu64 ", prior_task_status=%s=%d\n", ompt_get_thread_data()->value, first_task_data->value, second_task_data->value, ompt_task_status_t_values[prior_task_status], prior_task_status); if(prior_task_status == ompt_task_complete) { printf("%" PRIu64 ": ompt_event_task_end: task_id=%" PRIu64 "\n", ompt_get_thread_data()->value, first_task_data->value); } } static void on_ompt_callback_task_dependences( ompt_data_t *task_data, const ompt_task_dependence_t *deps, int ndeps) { printf("%" PRIu64 ": ompt_event_task_dependences: task_id=%" PRIu64 ", deps=%p, ndeps=%d\n", ompt_get_thread_data()->value, task_data->value, (void *)deps, ndeps); } static void on_ompt_callback_task_dependence( ompt_data_t *first_task_data, ompt_data_t *second_task_data) { printf("%" PRIu64 ": ompt_event_task_dependence_pair: first_task_id=%" PRIu64 ", second_task_id=%" PRIu64 "\n", ompt_get_thread_data()->value, first_task_data->value, second_task_data->value); } static void on_ompt_callback_thread_begin( ompt_thread_type_t thread_type, ompt_data_t *thread_data) { if(thread_data->ptr) printf("%s\n", "0: thread_data initially not null"); thread_data->value = ompt_get_unique_id(); printf("%" PRIu64 ": ompt_event_thread_begin: thread_type=%s=%d, thread_id=%" PRIu64 "\n", ompt_get_thread_data()->value, ompt_thread_type_t_values[thread_type], thread_type, thread_data->value); } static void on_ompt_callback_thread_end( ompt_data_t *thread_data) { printf("%" PRIu64 ": ompt_event_thread_end: thread_id=%" PRIu64 "\n", ompt_get_thread_data()->value, thread_data->value); } static int on_ompt_callback_control_tool( uint64_t command, uint64_t modifier, void *arg, const void *codeptr_ra) { omp_frame_t* omptTaskFrame; ompt_get_task_info(0, NULL, (ompt_data_t**) NULL, &omptTaskFrame, NULL, NULL); printf("%" PRIu64 ": ompt_event_control_tool: command=%" PRIu64 ", modifier=%" PRIu64 ", arg=%p, codeptr_ra=%p, current_task_frame.exit=%p, current_task_frame.reenter=%p \n", ompt_get_thread_data()->value, command, modifier, arg, codeptr_ra, omptTaskFrame->exit_frame, omptTaskFrame->enter_frame); return 0; //success } #define register_callback_t(name, type) \ do{ \ type f_##name = &on_##name; \ if (ompt_set_callback(name, (ompt_callback_t)f_##name) == \ ompt_set_never) \ printf("0: Could not register callback '" #name "'\n"); \ }while(0) #define register_callback(name) register_callback_t(name, name##_t) int ompt_initialize( ompt_function_lookup_t lookup, ompt_data_t *tool_data) { ompt_set_callback = (ompt_set_callback_t) lookup("ompt_set_callback"); ompt_get_callback = (ompt_get_callback_t) lookup("ompt_get_callback"); ompt_get_state = (ompt_get_state_t) lookup("ompt_get_state"); ompt_get_task_info = (ompt_get_task_info_t) lookup("ompt_get_task_info"); ompt_get_thread_data = (ompt_get_thread_data_t) lookup("ompt_get_thread_data"); ompt_get_parallel_info = (ompt_get_parallel_info_t) lookup("ompt_get_parallel_info"); ompt_get_unique_id = (ompt_get_unique_id_t) lookup("ompt_get_unique_id"); ompt_get_num_procs = (ompt_get_num_procs_t) lookup("ompt_get_num_procs"); ompt_get_num_places = (ompt_get_num_places_t) lookup("ompt_get_num_places"); ompt_get_place_proc_ids = (ompt_get_place_proc_ids_t) lookup("ompt_get_place_proc_ids"); ompt_get_place_num = (ompt_get_place_num_t) lookup("ompt_get_place_num"); ompt_get_partition_place_nums = (ompt_get_partition_place_nums_t) lookup("ompt_get_partition_place_nums"); ompt_get_proc_id = (ompt_get_proc_id_t) lookup("ompt_get_proc_id"); ompt_enumerate_states = (ompt_enumerate_states_t) lookup("ompt_enumerate_states"); ompt_enumerate_mutex_impls = (ompt_enumerate_mutex_impls_t) lookup("ompt_enumerate_mutex_impls"); register_callback(ompt_callback_mutex_acquire); register_callback_t(ompt_callback_mutex_acquired, ompt_callback_mutex_t); register_callback_t(ompt_callback_mutex_released, ompt_callback_mutex_t); register_callback(ompt_callback_nest_lock); register_callback(ompt_callback_sync_region); register_callback_t(ompt_callback_sync_region_wait, ompt_callback_sync_region_t); register_callback(ompt_callback_control_tool); register_callback(ompt_callback_flush); register_callback(ompt_callback_cancel); register_callback(ompt_callback_idle); register_callback(ompt_callback_implicit_task); register_callback_t(ompt_callback_lock_init, ompt_callback_mutex_acquire_t); register_callback_t(ompt_callback_lock_destroy, ompt_callback_mutex_t); register_callback(ompt_callback_work); register_callback(ompt_callback_master); register_callback(ompt_callback_parallel_begin); register_callback(ompt_callback_parallel_end); register_callback(ompt_callback_task_create); register_callback(ompt_callback_task_schedule); register_callback(ompt_callback_task_dependences); register_callback(ompt_callback_task_dependence); register_callback(ompt_callback_thread_begin); register_callback(ompt_callback_thread_end); printf("0: NULL_POINTER=%p\n", (void*)NULL); return 1; //success } void ompt_finalize(ompt_data_t *tool_data) { printf("0: ompt_event_runtime_shutdown\n"); } ompt_start_tool_result_t* ompt_start_tool( unsigned int omp_version, const char *runtime_version) { static ompt_start_tool_result_t ompt_start_tool_result = {&ompt_initialize,&ompt_finalize, 0}; return &ompt_start_tool_result; }
par_csr_matop.c
/****************************************************************************** * Copyright 1998-2019 Lawrence Livermore National Security, LLC and other * HYPRE Project Developers. See the top-level COPYRIGHT file for details. * * SPDX-License-Identifier: (Apache-2.0 OR MIT) ******************************************************************************/ #include "_hypre_utilities.h" #include "_hypre_parcsr_mv.h" #include "_hypre_lapack.h" #include "_hypre_blas.h" /*-------------------------------------------------------------------------- * hypre_ParMatmul_RowSizes: * * Computes sizes of C rows. Formerly part of hypre_ParMatmul but removed * so it can also be used for multiplication of Boolean matrices. * * Arrays computed: C_diag_i, C_offd_i. * * Arrays needed: (17, all HYPRE_Int*) * rownnz_A, * A_diag_i, A_diag_j, * A_offd_i, A_offd_j, * B_diag_i, B_diag_j, * B_offd_i, B_offd_j, * B_ext_i, B_ext_j, * col_map_offd_B, col_map_offd_B, * B_offd_i, B_offd_j, * B_ext_i, B_ext_j. * * Scalars computed: C_diag_size, C_offd_size. * * Scalars needed: * num_rownnz_A, num_rows_diag_A, num_cols_offd_A, allsquare, * first_col_diag_B, num_cols_diag_B, num_cols_offd_B, num_cols_offd_C *--------------------------------------------------------------------------*/ void hypre_ParMatmul_RowSizes( HYPRE_MemoryLocation memory_location, HYPRE_Int **C_diag_i, HYPRE_Int **C_offd_i, HYPRE_Int *rownnz_A, HYPRE_Int *A_diag_i, HYPRE_Int *A_diag_j, HYPRE_Int *A_offd_i, HYPRE_Int *A_offd_j, HYPRE_Int *B_diag_i, HYPRE_Int *B_diag_j, HYPRE_Int *B_offd_i, HYPRE_Int *B_offd_j, HYPRE_Int *B_ext_diag_i, HYPRE_Int *B_ext_diag_j, HYPRE_Int *B_ext_offd_i, HYPRE_Int *B_ext_offd_j, HYPRE_Int *map_B_to_C, HYPRE_Int *C_diag_size, HYPRE_Int *C_offd_size, HYPRE_Int num_rownnz_A, HYPRE_Int num_rows_diag_A, HYPRE_Int num_cols_offd_A, HYPRE_Int allsquare, HYPRE_Int num_cols_diag_B, HYPRE_Int num_cols_offd_B, HYPRE_Int num_cols_offd_C ) { HYPRE_Int *jj_count_diag_array; HYPRE_Int *jj_count_offd_array; HYPRE_Int start_indexing = 0; /* start indexing for C_data at 0 */ HYPRE_Int num_threads = hypre_NumThreads(); *C_diag_i = hypre_CTAlloc(HYPRE_Int, num_rows_diag_A + 1, memory_location); *C_offd_i = hypre_CTAlloc(HYPRE_Int, num_rows_diag_A + 1, memory_location); jj_count_diag_array = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); jj_count_offd_array = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); /*----------------------------------------------------------------------- * Loop over rows of A *-----------------------------------------------------------------------*/ #ifdef HYPRE_USING_OPENMP #pragma omp parallel #endif { HYPRE_Int *B_marker = NULL; HYPRE_Int jj_row_begin_diag, jj_count_diag; HYPRE_Int jj_row_begin_offd, jj_count_offd; HYPRE_Int i1, ii1, i2, i3, jj2, jj3; HYPRE_Int size, rest, num_threads; HYPRE_Int ii, ns, ne; num_threads = hypre_NumActiveThreads(); size = num_rownnz_A / num_threads; rest = num_rownnz_A - size * num_threads; ii = hypre_GetThreadNum(); if (ii < rest) { ns = ii * size + ii; ne = (ii + 1) * size + ii + 1; } else { ns = ii * size + rest; ne = (ii + 1) * size + rest; } jj_count_diag = start_indexing; jj_count_offd = start_indexing; if (num_cols_diag_B || num_cols_offd_C) { B_marker = hypre_CTAlloc(HYPRE_Int, num_cols_diag_B + num_cols_offd_C, HYPRE_MEMORY_HOST); } for (i1 = 0; i1 < num_cols_diag_B + num_cols_offd_C; i1++) { B_marker[i1] = -1; } for (i1 = ns; i1 < ne; i1++) { jj_row_begin_diag = jj_count_diag; jj_row_begin_offd = jj_count_offd; if (rownnz_A) { ii1 = rownnz_A[i1]; } else { ii1 = i1; /*-------------------------------------------------------------------- * Set marker for diagonal entry, C_{i1,i1} (for square matrices). *--------------------------------------------------------------------*/ if (allsquare) { B_marker[i1] = jj_count_diag; jj_count_diag++; } } /*----------------------------------------------------------------- * Loop over entries in row ii1 of A_offd. *-----------------------------------------------------------------*/ if (num_cols_offd_A) { for (jj2 = A_offd_i[ii1]; jj2 < A_offd_i[ii1 + 1]; jj2++) { i2 = A_offd_j[jj2]; /*----------------------------------------------------------- * Loop over entries in row i2 of B_ext. *-----------------------------------------------------------*/ for (jj3 = B_ext_offd_i[i2]; jj3 < B_ext_offd_i[i2 + 1]; jj3++) { i3 = num_cols_diag_B + B_ext_offd_j[jj3]; /*-------------------------------------------------------- * Check B_marker to see that C_{ii1,i3} has not already * been accounted for. If it has not, mark it and increment * counter. *--------------------------------------------------------*/ if (B_marker[i3] < jj_row_begin_offd) { B_marker[i3] = jj_count_offd; jj_count_offd++; } } for (jj3 = B_ext_diag_i[i2]; jj3 < B_ext_diag_i[i2 + 1]; jj3++) { i3 = B_ext_diag_j[jj3]; if (B_marker[i3] < jj_row_begin_diag) { B_marker[i3] = jj_count_diag; jj_count_diag++; } } } } /*----------------------------------------------------------------- * Loop over entries in row ii1 of A_diag. *-----------------------------------------------------------------*/ for (jj2 = A_diag_i[ii1]; jj2 < A_diag_i[ii1 + 1]; jj2++) { i2 = A_diag_j[jj2]; /*----------------------------------------------------------- * Loop over entries in row i2 of B_diag. *-----------------------------------------------------------*/ for (jj3 = B_diag_i[i2]; jj3 < B_diag_i[i2 + 1]; jj3++) { i3 = B_diag_j[jj3]; /*-------------------------------------------------------- * Check B_marker to see that C_{ii1,i3} has not already * been accounted for. If it has not, mark it and increment * counter. *--------------------------------------------------------*/ if (B_marker[i3] < jj_row_begin_diag) { B_marker[i3] = jj_count_diag; jj_count_diag++; } } /*----------------------------------------------------------- * Loop over entries in row i2 of B_offd. *-----------------------------------------------------------*/ if (num_cols_offd_B) { for (jj3 = B_offd_i[i2]; jj3 < B_offd_i[i2 + 1]; jj3++) { i3 = num_cols_diag_B + map_B_to_C[B_offd_j[jj3]]; /*-------------------------------------------------------- * Check B_marker to see that C_{ii1,i3} has not already * been accounted for. If it has not, mark it and increment * counter. *--------------------------------------------------------*/ if (B_marker[i3] < jj_row_begin_offd) { B_marker[i3] = jj_count_offd; jj_count_offd++; } } } } /*-------------------------------------------------------------------- * Set C_diag_i and C_offd_i for this row. *--------------------------------------------------------------------*/ (*C_diag_i)[ii1] = jj_row_begin_diag; (*C_offd_i)[ii1] = jj_row_begin_offd; } jj_count_diag_array[ii] = jj_count_diag; jj_count_offd_array[ii] = jj_count_offd; hypre_TFree(B_marker, HYPRE_MEMORY_HOST); #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif /* Correct diag_i and offd_i - phase 1 */ if (ii) { jj_count_diag = jj_count_diag_array[0]; jj_count_offd = jj_count_offd_array[0]; for (i1 = 1; i1 < ii; i1++) { jj_count_diag += jj_count_diag_array[i1]; jj_count_offd += jj_count_offd_array[i1]; } for (i1 = ns; i1 < ne; i1++) { ii1 = rownnz_A ? rownnz_A[i1] : i1; (*C_diag_i)[ii1] += jj_count_diag; (*C_offd_i)[ii1] += jj_count_offd; } } else { (*C_diag_i)[num_rows_diag_A] = 0; (*C_offd_i)[num_rows_diag_A] = 0; for (i1 = 0; i1 < num_threads; i1++) { (*C_diag_i)[num_rows_diag_A] += jj_count_diag_array[i1]; (*C_offd_i)[num_rows_diag_A] += jj_count_offd_array[i1]; } } /* Correct diag_i and offd_i - phase 2 */ if (rownnz_A != NULL) { #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif for (i1 = ns; i1 < (ne - 1); i1++) { for (ii1 = rownnz_A[i1] + 1; ii1 < rownnz_A[i1 + 1]; ii1++) { (*C_diag_i)[ii1] = (*C_diag_i)[rownnz_A[i1 + 1]]; (*C_offd_i)[ii1] = (*C_offd_i)[rownnz_A[i1 + 1]]; } } if (ii < (num_threads - 1)) { for (ii1 = rownnz_A[ne - 1] + 1; ii1 < rownnz_A[ne]; ii1++) { (*C_diag_i)[ii1] = (*C_diag_i)[rownnz_A[ne]]; (*C_offd_i)[ii1] = (*C_offd_i)[rownnz_A[ne]]; } } else { for (ii1 = rownnz_A[ne - 1] + 1; ii1 < num_rows_diag_A; ii1++) { (*C_diag_i)[ii1] = (*C_diag_i)[num_rows_diag_A]; (*C_offd_i)[ii1] = (*C_offd_i)[num_rows_diag_A]; } } } } /* end parallel loop */ *C_diag_size = (*C_diag_i)[num_rows_diag_A]; *C_offd_size = (*C_offd_i)[num_rows_diag_A]; #ifdef HYPRE_DEBUG HYPRE_Int i; for (i = 0; i < num_rows_diag_A; i++) { hypre_assert((*C_diag_i)[i] <= (*C_diag_i)[i + 1]); hypre_assert((*C_offd_i)[i] <= (*C_offd_i)[i + 1]); } #endif hypre_TFree(jj_count_diag_array, HYPRE_MEMORY_HOST); hypre_TFree(jj_count_offd_array, HYPRE_MEMORY_HOST); /* End of First Pass */ } /*-------------------------------------------------------------------------- * hypre_ParMatmul: * * Multiplies two ParCSRMatrices A and B and returns the product in * ParCSRMatrix C. *--------------------------------------------------------------------------*/ hypre_ParCSRMatrix* hypre_ParMatmul( hypre_ParCSRMatrix *A, hypre_ParCSRMatrix *B ) { #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_MATMUL] -= hypre_MPI_Wtime(); #endif /* ParCSRMatrix A */ MPI_Comm comm = hypre_ParCSRMatrixComm(A); HYPRE_BigInt nrows_A = hypre_ParCSRMatrixGlobalNumRows(A); HYPRE_BigInt ncols_A = hypre_ParCSRMatrixGlobalNumCols(A); HYPRE_BigInt *row_starts_A = hypre_ParCSRMatrixRowStarts(A); HYPRE_Int num_rownnz_A; HYPRE_Int *rownnz_A = NULL; /* ParCSRMatrix B */ HYPRE_BigInt nrows_B = hypre_ParCSRMatrixGlobalNumRows(B); HYPRE_BigInt ncols_B = hypre_ParCSRMatrixGlobalNumCols(B); HYPRE_BigInt first_col_diag_B = hypre_ParCSRMatrixFirstColDiag(B); HYPRE_BigInt *col_starts_B = hypre_ParCSRMatrixColStarts(B); HYPRE_BigInt last_col_diag_B; /* A_diag */ hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Complex *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); HYPRE_Int *A_diag_ir = hypre_CSRMatrixRownnz(A_diag); HYPRE_Int num_rownnz_diag_A = hypre_CSRMatrixNumRownnz(A_diag); HYPRE_Int num_rows_diag_A = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int num_cols_diag_A = hypre_CSRMatrixNumCols(A_diag); /* A_offd */ hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Complex *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); HYPRE_Int *A_offd_ir = hypre_CSRMatrixRownnz(A_offd); HYPRE_Int num_rownnz_offd_A = hypre_CSRMatrixNumRownnz(A_offd); HYPRE_Int num_rows_offd_A = hypre_CSRMatrixNumRows(A_offd); HYPRE_Int num_cols_offd_A = hypre_CSRMatrixNumCols(A_offd); /* B_diag */ hypre_CSRMatrix *B_diag = hypre_ParCSRMatrixDiag(B); HYPRE_Complex *B_diag_data = hypre_CSRMatrixData(B_diag); HYPRE_Int *B_diag_i = hypre_CSRMatrixI(B_diag); HYPRE_Int *B_diag_j = hypre_CSRMatrixJ(B_diag); HYPRE_Int num_rows_diag_B = hypre_CSRMatrixNumRows(B_diag); HYPRE_Int num_cols_diag_B = hypre_CSRMatrixNumCols(B_diag); /* B_offd */ hypre_CSRMatrix *B_offd = hypre_ParCSRMatrixOffd(B); HYPRE_BigInt *col_map_offd_B = hypre_ParCSRMatrixColMapOffd(B); HYPRE_Complex *B_offd_data = hypre_CSRMatrixData(B_offd); HYPRE_Int *B_offd_i = hypre_CSRMatrixI(B_offd); HYPRE_Int *B_offd_j = hypre_CSRMatrixJ(B_offd); HYPRE_Int num_cols_offd_B = hypre_CSRMatrixNumCols(B_offd); /* ParCSRMatrix C */ hypre_ParCSRMatrix *C; HYPRE_BigInt *col_map_offd_C; HYPRE_Int *map_B_to_C = NULL; /* C_diag */ hypre_CSRMatrix *C_diag; HYPRE_Complex *C_diag_data; HYPRE_Int *C_diag_i; HYPRE_Int *C_diag_j; HYPRE_Int C_offd_size; HYPRE_Int num_cols_offd_C = 0; /* C_offd */ hypre_CSRMatrix *C_offd; HYPRE_Complex *C_offd_data = NULL; HYPRE_Int *C_offd_i = NULL; HYPRE_Int *C_offd_j = NULL; HYPRE_Int C_diag_size; /* Bs_ext */ hypre_CSRMatrix *Bs_ext; HYPRE_Complex *Bs_ext_data; HYPRE_Int *Bs_ext_i; HYPRE_BigInt *Bs_ext_j; HYPRE_Complex *B_ext_diag_data; HYPRE_Int *B_ext_diag_i; HYPRE_Int *B_ext_diag_j; HYPRE_Int B_ext_diag_size; HYPRE_Complex *B_ext_offd_data; HYPRE_Int *B_ext_offd_i; HYPRE_Int *B_ext_offd_j; HYPRE_BigInt *B_big_offd_j = NULL; HYPRE_Int B_ext_offd_size; HYPRE_Int allsquare = 0; HYPRE_Int num_procs; HYPRE_Int *my_diag_array; HYPRE_Int *my_offd_array; HYPRE_Int max_num_threads; HYPRE_Complex zero = 0.0; HYPRE_MemoryLocation memory_location_A = hypre_ParCSRMatrixMemoryLocation(A); HYPRE_MemoryLocation memory_location_B = hypre_ParCSRMatrixMemoryLocation(B); /* RL: TODO cannot guarantee, maybe should never assert hypre_assert(memory_location_A == memory_location_B); */ /* RL: in the case of A=H, B=D, or A=D, B=H, let C = D, * not sure if this is the right thing to do. * Also, need something like this in other places * TODO */ HYPRE_MemoryLocation memory_location_C = hypre_max(memory_location_A, memory_location_B); HYPRE_ANNOTATE_FUNC_BEGIN; max_num_threads = hypre_NumThreads(); my_diag_array = hypre_CTAlloc(HYPRE_Int, max_num_threads, HYPRE_MEMORY_HOST); my_offd_array = hypre_CTAlloc(HYPRE_Int, max_num_threads, HYPRE_MEMORY_HOST); if (ncols_A != nrows_B || num_cols_diag_A != num_rows_diag_B) { hypre_error_w_msg(HYPRE_ERROR_GENERIC, " Error! Incompatible matrix dimensions!\n"); HYPRE_ANNOTATE_FUNC_END; return NULL; } /* if C=A*B is square globally and locally, then C_diag should be square also */ if ( num_rows_diag_A == num_cols_diag_B && nrows_A == ncols_B ) { allsquare = 1; } /* Set rownnz of A */ if (num_rownnz_diag_A != num_rows_diag_A && num_rownnz_offd_A != num_rows_offd_A ) { hypre_MergeOrderedArrays(num_rownnz_diag_A, A_diag_ir, num_rownnz_offd_A, A_offd_ir, &num_rownnz_A, &rownnz_A); } else { num_rownnz_A = hypre_max(num_rows_diag_A, num_rows_offd_A); } /*----------------------------------------------------------------------- * Extract B_ext, i.e. portion of B that is stored on neighbor procs * and needed locally for matrix matrix product *-----------------------------------------------------------------------*/ hypre_MPI_Comm_size(comm, &num_procs); #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_RENUMBER_COLIDX] -= hypre_MPI_Wtime(); #endif if (num_procs > 1) { /*--------------------------------------------------------------------- * If there exists no CommPkg for A, a CommPkg is generated using * equally load balanced partitionings within * hypre_ParCSRMatrixExtractBExt *--------------------------------------------------------------------*/ Bs_ext = hypre_ParCSRMatrixExtractBExt(B, A, 1); Bs_ext_data = hypre_CSRMatrixData(Bs_ext); Bs_ext_i = hypre_CSRMatrixI(Bs_ext); Bs_ext_j = hypre_CSRMatrixBigJ(Bs_ext); } B_ext_diag_i = hypre_CTAlloc(HYPRE_Int, num_cols_offd_A + 1, HYPRE_MEMORY_HOST); B_ext_offd_i = hypre_CTAlloc(HYPRE_Int, num_cols_offd_A + 1, HYPRE_MEMORY_HOST); B_ext_diag_size = 0; B_ext_offd_size = 0; last_col_diag_B = first_col_diag_B + (HYPRE_BigInt) num_cols_diag_B - 1; #ifdef HYPRE_CONCURRENT_HOPSCOTCH hypre_UnorderedBigIntSet set; #pragma omp parallel { HYPRE_Int size, rest, ii; HYPRE_Int ns, ne; HYPRE_Int i1, i, j; HYPRE_Int my_offd_size, my_diag_size; HYPRE_Int cnt_offd, cnt_diag; HYPRE_Int num_threads = hypre_NumActiveThreads(); size = num_cols_offd_A / num_threads; rest = num_cols_offd_A - size * num_threads; ii = hypre_GetThreadNum(); if (ii < rest) { ns = ii * size + ii; ne = (ii + 1) * size + ii + 1; } else { ns = ii * size + rest; ne = (ii + 1) * size + rest; } my_diag_size = 0; my_offd_size = 0; for (i = ns; i < ne; i++) { B_ext_diag_i[i] = my_diag_size; B_ext_offd_i[i] = my_offd_size; for (j = Bs_ext_i[i]; j < Bs_ext_i[i + 1]; j++) { if (Bs_ext_j[j] < first_col_diag_B || Bs_ext_j[j] > last_col_diag_B) { my_offd_size++; } else { my_diag_size++; } } } my_diag_array[ii] = my_diag_size; my_offd_array[ii] = my_offd_size; #pragma omp barrier if (ii) { my_diag_size = my_diag_array[0]; my_offd_size = my_offd_array[0]; for (i1 = 1; i1 < ii; i1++) { my_diag_size += my_diag_array[i1]; my_offd_size += my_offd_array[i1]; } for (i1 = ns; i1 < ne; i1++) { B_ext_diag_i[i1] += my_diag_size; B_ext_offd_i[i1] += my_offd_size; } } else { B_ext_diag_size = 0; B_ext_offd_size = 0; for (i1 = 0; i1 < num_threads; i1++) { B_ext_diag_size += my_diag_array[i1]; B_ext_offd_size += my_offd_array[i1]; } B_ext_diag_i[num_cols_offd_A] = B_ext_diag_size; B_ext_offd_i[num_cols_offd_A] = B_ext_offd_size; if (B_ext_diag_size) { B_ext_diag_j = hypre_CTAlloc(HYPRE_Int, B_ext_diag_size, HYPRE_MEMORY_HOST); B_ext_diag_data = hypre_CTAlloc(HYPRE_Complex, B_ext_diag_size, HYPRE_MEMORY_HOST); } if (B_ext_offd_size) { B_ext_offd_j = hypre_CTAlloc(HYPRE_Int, B_ext_offd_size, HYPRE_MEMORY_HOST); B_big_offd_j = hypre_CTAlloc(HYPRE_BigInt, B_ext_offd_size, HYPRE_MEMORY_HOST); B_ext_offd_data = hypre_CTAlloc(HYPRE_Complex, B_ext_offd_size, HYPRE_MEMORY_HOST); } hypre_UnorderedBigIntSetCreate(&set, B_ext_offd_size + num_cols_offd_B, 16 * hypre_NumThreads()); } #pragma omp barrier cnt_offd = B_ext_offd_i[ns]; cnt_diag = B_ext_diag_i[ns]; for (i = ns; i < ne; i++) { for (j = Bs_ext_i[i]; j < Bs_ext_i[i + 1]; j++) { if (Bs_ext_j[j] < first_col_diag_B || Bs_ext_j[j] > last_col_diag_B) { hypre_UnorderedBigIntSetPut(&set, Bs_ext_j[j]); B_big_offd_j[cnt_offd] = Bs_ext_j[j]; //Bs_ext_j[cnt_offd] = Bs_ext_j[j]; B_ext_offd_data[cnt_offd++] = Bs_ext_data[j]; } else { B_ext_diag_j[cnt_diag] = (HYPRE_Int)(Bs_ext_j[j] - first_col_diag_B); B_ext_diag_data[cnt_diag++] = Bs_ext_data[j]; } } } HYPRE_Int i_begin, i_end; hypre_GetSimpleThreadPartition(&i_begin, &i_end, num_cols_offd_B); for (i = i_begin; i < i_end; i++) { hypre_UnorderedBigIntSetPut(&set, col_map_offd_B[i]); } } /* omp parallel */ col_map_offd_C = hypre_UnorderedBigIntSetCopyToArray(&set, &num_cols_offd_C); hypre_UnorderedBigIntSetDestroy(&set); hypre_UnorderedBigIntMap col_map_offd_C_inverse; hypre_big_sort_and_create_inverse_map(col_map_offd_C, num_cols_offd_C, &col_map_offd_C, &col_map_offd_C_inverse); HYPRE_Int i, j; #pragma omp parallel for private(j) HYPRE_SMP_SCHEDULE for (i = 0; i < num_cols_offd_A; i++) { for (j = B_ext_offd_i[i]; j < B_ext_offd_i[i + 1]; j++) { //B_ext_offd_j[j] = hypre_UnorderedIntMapGet(&col_map_offd_C_inverse, B_ext_offd_j[j]); B_ext_offd_j[j] = hypre_UnorderedBigIntMapGet(&col_map_offd_C_inverse, B_big_offd_j[j]); } } if (num_cols_offd_C) { hypre_UnorderedBigIntMapDestroy(&col_map_offd_C_inverse); } hypre_TFree(my_diag_array, HYPRE_MEMORY_HOST); hypre_TFree(my_offd_array, HYPRE_MEMORY_HOST); if (num_cols_offd_B) { HYPRE_Int i; map_B_to_C = hypre_CTAlloc(HYPRE_Int, num_cols_offd_B, HYPRE_MEMORY_HOST); #pragma omp parallel private(i) { HYPRE_Int i_begin, i_end; hypre_GetSimpleThreadPartition(&i_begin, &i_end, num_cols_offd_C); HYPRE_Int cnt; if (i_end > i_begin) { cnt = hypre_BigLowerBound(col_map_offd_B, col_map_offd_B + (HYPRE_BigInt)num_cols_offd_B, col_map_offd_C[i_begin]) - col_map_offd_B; } for (i = i_begin; i < i_end && cnt < num_cols_offd_B; i++) { if (col_map_offd_C[i] == col_map_offd_B[cnt]) { map_B_to_C[cnt++] = i; } } } } if (num_procs > 1) { hypre_CSRMatrixDestroy(Bs_ext); Bs_ext = NULL; } #else /* !HYPRE_CONCURRENT_HOPSCOTCH */ HYPRE_BigInt *temp; #ifdef HYPRE_USING_OPENMP #pragma omp parallel #endif { HYPRE_Int size, rest, ii; HYPRE_Int ns, ne; HYPRE_Int i1, i, j; HYPRE_Int my_offd_size, my_diag_size; HYPRE_Int cnt_offd, cnt_diag; HYPRE_Int num_threads = hypre_NumActiveThreads(); size = num_cols_offd_A / num_threads; rest = num_cols_offd_A - size * num_threads; ii = hypre_GetThreadNum(); if (ii < rest) { ns = ii * size + ii; ne = (ii + 1) * size + ii + 1; } else { ns = ii * size + rest; ne = (ii + 1) * size + rest; } my_diag_size = 0; my_offd_size = 0; for (i = ns; i < ne; i++) { B_ext_diag_i[i] = my_diag_size; B_ext_offd_i[i] = my_offd_size; for (j = Bs_ext_i[i]; j < Bs_ext_i[i + 1]; j++) { if (Bs_ext_j[j] < first_col_diag_B || Bs_ext_j[j] > last_col_diag_B) { my_offd_size++; } else { my_diag_size++; } } } my_diag_array[ii] = my_diag_size; my_offd_array[ii] = my_offd_size; #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif if (ii) { my_diag_size = my_diag_array[0]; my_offd_size = my_offd_array[0]; for (i1 = 1; i1 < ii; i1++) { my_diag_size += my_diag_array[i1]; my_offd_size += my_offd_array[i1]; } for (i1 = ns; i1 < ne; i1++) { B_ext_diag_i[i1] += my_diag_size; B_ext_offd_i[i1] += my_offd_size; } } else { B_ext_diag_size = 0; B_ext_offd_size = 0; for (i1 = 0; i1 < num_threads; i1++) { B_ext_diag_size += my_diag_array[i1]; B_ext_offd_size += my_offd_array[i1]; } B_ext_diag_i[num_cols_offd_A] = B_ext_diag_size; B_ext_offd_i[num_cols_offd_A] = B_ext_offd_size; if (B_ext_diag_size) { B_ext_diag_j = hypre_CTAlloc(HYPRE_Int, B_ext_diag_size, HYPRE_MEMORY_HOST); B_ext_diag_data = hypre_CTAlloc(HYPRE_Complex, B_ext_diag_size, HYPRE_MEMORY_HOST); } if (B_ext_offd_size) { B_ext_offd_j = hypre_CTAlloc(HYPRE_Int, B_ext_offd_size, HYPRE_MEMORY_HOST); B_big_offd_j = hypre_CTAlloc(HYPRE_BigInt, B_ext_offd_size, HYPRE_MEMORY_HOST); B_ext_offd_data = hypre_CTAlloc(HYPRE_Complex, B_ext_offd_size, HYPRE_MEMORY_HOST); } if (B_ext_offd_size || num_cols_offd_B) { temp = hypre_CTAlloc(HYPRE_BigInt, B_ext_offd_size + num_cols_offd_B, HYPRE_MEMORY_HOST); } } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif cnt_offd = B_ext_offd_i[ns]; cnt_diag = B_ext_diag_i[ns]; for (i = ns; i < ne; i++) { for (j = Bs_ext_i[i]; j < Bs_ext_i[i + 1]; j++) { if (Bs_ext_j[j] < first_col_diag_B || Bs_ext_j[j] > last_col_diag_B) { temp[cnt_offd] = Bs_ext_j[j]; B_big_offd_j[cnt_offd] = Bs_ext_j[j]; //Bs_ext_j[cnt_offd] = Bs_ext_j[j]; B_ext_offd_data[cnt_offd++] = Bs_ext_data[j]; } else { B_ext_diag_j[cnt_diag] = (HYPRE_Int)(Bs_ext_j[j] - first_col_diag_B); B_ext_diag_data[cnt_diag++] = Bs_ext_data[j]; } } } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif if (ii == 0) { HYPRE_Int cnt; if (num_procs > 1) { hypre_CSRMatrixDestroy(Bs_ext); Bs_ext = NULL; } cnt = 0; if (B_ext_offd_size || num_cols_offd_B) { cnt = B_ext_offd_size; for (i = 0; i < num_cols_offd_B; i++) { temp[cnt++] = col_map_offd_B[i]; } if (cnt) { HYPRE_BigInt value; hypre_BigQsort0(temp, 0, cnt - 1); num_cols_offd_C = 1; value = temp[0]; for (i = 1; i < cnt; i++) { if (temp[i] > value) { value = temp[i]; temp[num_cols_offd_C++] = value; } } } if (num_cols_offd_C) { col_map_offd_C = hypre_CTAlloc(HYPRE_BigInt, num_cols_offd_C, HYPRE_MEMORY_HOST); } for (i = 0; i < num_cols_offd_C; i++) { col_map_offd_C[i] = temp[i]; } hypre_TFree(temp, HYPRE_MEMORY_HOST); } } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif for (i = ns; i < ne; i++) { for (j = B_ext_offd_i[i]; j < B_ext_offd_i[i + 1]; j++) { B_ext_offd_j[j] = hypre_BigBinarySearch(col_map_offd_C, B_big_offd_j[j], //B_ext_offd_j[j] = hypre_BigBinarySearch(col_map_offd_C, Bs_ext_j[j], num_cols_offd_C); } } } /* end parallel region */ hypre_TFree(B_big_offd_j, HYPRE_MEMORY_HOST); hypre_TFree(my_diag_array, HYPRE_MEMORY_HOST); hypre_TFree(my_offd_array, HYPRE_MEMORY_HOST); if (num_cols_offd_B) { HYPRE_Int i, cnt; map_B_to_C = hypre_CTAlloc(HYPRE_Int, num_cols_offd_B, HYPRE_MEMORY_HOST); cnt = 0; for (i = 0; i < num_cols_offd_C; i++) { if (col_map_offd_C[i] == col_map_offd_B[cnt]) { map_B_to_C[cnt++] = i; if (cnt == num_cols_offd_B) { break; } } } } #endif /* !HYPRE_CONCURRENT_HOPSCOTCH */ #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_RENUMBER_COLIDX] += hypre_MPI_Wtime(); #endif HYPRE_ANNOTATE_REGION_BEGIN("%s", "First pass"); hypre_ParMatmul_RowSizes(memory_location_C, &C_diag_i, &C_offd_i, rownnz_A, A_diag_i, A_diag_j, A_offd_i, A_offd_j, B_diag_i, B_diag_j, B_offd_i, B_offd_j, B_ext_diag_i, B_ext_diag_j, B_ext_offd_i, B_ext_offd_j, map_B_to_C, &C_diag_size, &C_offd_size, num_rownnz_A, num_rows_diag_A, num_cols_offd_A, allsquare, num_cols_diag_B, num_cols_offd_B, num_cols_offd_C); HYPRE_ANNOTATE_REGION_END("%s", "First pass"); /*----------------------------------------------------------------------- * Allocate C_diag_data and C_diag_j arrays. * Allocate C_offd_data and C_offd_j arrays. *-----------------------------------------------------------------------*/ last_col_diag_B = first_col_diag_B + (HYPRE_BigInt)num_cols_diag_B - 1; C_diag_data = hypre_CTAlloc(HYPRE_Complex, C_diag_size, memory_location_C); C_diag_j = hypre_CTAlloc(HYPRE_Int, C_diag_size, memory_location_C); if (C_offd_size) { C_offd_data = hypre_CTAlloc(HYPRE_Complex, C_offd_size, memory_location_C); C_offd_j = hypre_CTAlloc(HYPRE_Int, C_offd_size, memory_location_C); } /*----------------------------------------------------------------------- * Second Pass: Fill in C_diag_data and C_diag_j. * Second Pass: Fill in C_offd_data and C_offd_j. *-----------------------------------------------------------------------*/ HYPRE_ANNOTATE_REGION_BEGIN("%s", "Second pass"); #ifdef HYPRE_USING_OPENMP #pragma omp parallel #endif { HYPRE_Int *B_marker = NULL; HYPRE_Int ns, ne, size, rest, ii; HYPRE_Int i1, ii1, i2, i3, jj2, jj3; HYPRE_Int jj_row_begin_diag, jj_count_diag; HYPRE_Int jj_row_begin_offd, jj_count_offd; HYPRE_Int num_threads; HYPRE_Complex a_entry; /*, a_b_product;*/ num_threads = hypre_NumActiveThreads(); size = num_rownnz_A / num_threads; rest = num_rownnz_A - size * num_threads; ii = hypre_GetThreadNum(); if (ii < rest) { ns = ii * size + ii; ne = (ii + 1) * size + ii + 1; } else { ns = ii * size + rest; ne = (ii + 1) * size + rest; } jj_count_diag = C_diag_i[rownnz_A ? rownnz_A[ns] : ns]; jj_count_offd = C_offd_i[rownnz_A ? rownnz_A[ns] : ns]; if (num_cols_diag_B || num_cols_offd_C) { B_marker = hypre_CTAlloc(HYPRE_Int, num_cols_diag_B + num_cols_offd_C, HYPRE_MEMORY_HOST); for (i1 = 0; i1 < num_cols_diag_B + num_cols_offd_C; i1++) { B_marker[i1] = -1; } } /*----------------------------------------------------------------------- * Loop over interior c-points. *-----------------------------------------------------------------------*/ for (i1 = ns; i1 < ne; i1++) { jj_row_begin_diag = jj_count_diag; jj_row_begin_offd = jj_count_offd; if (rownnz_A) { ii1 = rownnz_A[i1]; } else { ii1 = i1; /*-------------------------------------------------------------------- * Create diagonal entry, C_{i1,i1} *--------------------------------------------------------------------*/ if (allsquare) { B_marker[i1] = jj_count_diag; C_diag_data[jj_count_diag] = zero; C_diag_j[jj_count_diag] = i1; jj_count_diag++; } } /*----------------------------------------------------------------- * Loop over entries in row i1 of A_offd. *-----------------------------------------------------------------*/ if (num_cols_offd_A) { for (jj2 = A_offd_i[ii1]; jj2 < A_offd_i[ii1 + 1]; jj2++) { i2 = A_offd_j[jj2]; a_entry = A_offd_data[jj2]; /*----------------------------------------------------------- * Loop over entries in row i2 of B_ext. *-----------------------------------------------------------*/ for (jj3 = B_ext_offd_i[i2]; jj3 < B_ext_offd_i[i2 + 1]; jj3++) { i3 = num_cols_diag_B + B_ext_offd_j[jj3]; /*-------------------------------------------------------- * Check B_marker to see that C_{ii1,i3} has not already * been accounted for. If it has not, create a new entry. * If it has, add new contribution. *--------------------------------------------------------*/ if (B_marker[i3] < jj_row_begin_offd) { B_marker[i3] = jj_count_offd; C_offd_data[jj_count_offd] = a_entry * B_ext_offd_data[jj3]; C_offd_j[jj_count_offd] = i3 - num_cols_diag_B; jj_count_offd++; } else { C_offd_data[B_marker[i3]] += a_entry * B_ext_offd_data[jj3]; } } for (jj3 = B_ext_diag_i[i2]; jj3 < B_ext_diag_i[i2 + 1]; jj3++) { i3 = B_ext_diag_j[jj3]; if (B_marker[i3] < jj_row_begin_diag) { B_marker[i3] = jj_count_diag; C_diag_data[jj_count_diag] = a_entry * B_ext_diag_data[jj3]; C_diag_j[jj_count_diag] = i3; jj_count_diag++; } else { C_diag_data[B_marker[i3]] += a_entry * B_ext_diag_data[jj3]; } } } } /*----------------------------------------------------------------- * Loop over entries in row ii1 of A_diag. *-----------------------------------------------------------------*/ for (jj2 = A_diag_i[ii1]; jj2 < A_diag_i[ii1 + 1]; jj2++) { i2 = A_diag_j[jj2]; a_entry = A_diag_data[jj2]; /*----------------------------------------------------------- * Loop over entries in row i2 of B_diag. *-----------------------------------------------------------*/ for (jj3 = B_diag_i[i2]; jj3 < B_diag_i[i2 + 1]; jj3++) { i3 = B_diag_j[jj3]; /*-------------------------------------------------------- * Check B_marker to see that C_{ii1,i3} has not already * been accounted for. If it has not, create a new entry. * If it has, add new contribution. *--------------------------------------------------------*/ if (B_marker[i3] < jj_row_begin_diag) { B_marker[i3] = jj_count_diag; C_diag_data[jj_count_diag] = a_entry * B_diag_data[jj3]; C_diag_j[jj_count_diag] = i3; jj_count_diag++; } else { C_diag_data[B_marker[i3]] += a_entry * B_diag_data[jj3]; } } if (num_cols_offd_B) { for (jj3 = B_offd_i[i2]; jj3 < B_offd_i[i2 + 1]; jj3++) { i3 = num_cols_diag_B + map_B_to_C[B_offd_j[jj3]]; /*-------------------------------------------------------- * Check B_marker to see that C_{ii1,i3} has not already * been accounted for. If it has not, create a new entry. * If it has, add new contribution. *--------------------------------------------------------*/ if (B_marker[i3] < jj_row_begin_offd) { B_marker[i3] = jj_count_offd; C_offd_data[jj_count_offd] = a_entry * B_offd_data[jj3]; C_offd_j[jj_count_offd] = i3 - num_cols_diag_B; jj_count_offd++; } else { C_offd_data[B_marker[i3]] += a_entry * B_offd_data[jj3]; } } } } } hypre_TFree(B_marker, HYPRE_MEMORY_HOST); } /*end parallel region */ HYPRE_ANNOTATE_REGION_END("%s", "Second pass"); C = hypre_ParCSRMatrixCreate(comm, nrows_A, ncols_B, row_starts_A, col_starts_B, num_cols_offd_C, C_diag_size, C_offd_size); C_diag = hypre_ParCSRMatrixDiag(C); hypre_CSRMatrixData(C_diag) = C_diag_data; hypre_CSRMatrixI(C_diag) = C_diag_i; hypre_CSRMatrixJ(C_diag) = C_diag_j; hypre_CSRMatrixSetRownnz(C_diag); C_offd = hypre_ParCSRMatrixOffd(C); hypre_CSRMatrixI(C_offd) = C_offd_i; hypre_ParCSRMatrixOffd(C) = C_offd; if (num_cols_offd_C) { hypre_CSRMatrixData(C_offd) = C_offd_data; hypre_CSRMatrixJ(C_offd) = C_offd_j; hypre_ParCSRMatrixColMapOffd(C) = col_map_offd_C; } hypre_CSRMatrixSetRownnz(C_offd); hypre_CSRMatrixMemoryLocation(C_diag) = memory_location_C; hypre_CSRMatrixMemoryLocation(C_offd) = memory_location_C; /*----------------------------------------------------------------------- * Free various arrays *-----------------------------------------------------------------------*/ hypre_TFree(B_ext_diag_i, HYPRE_MEMORY_HOST); if (B_ext_diag_size) { hypre_TFree(B_ext_diag_j, HYPRE_MEMORY_HOST); hypre_TFree(B_ext_diag_data, HYPRE_MEMORY_HOST); } hypre_TFree(B_ext_offd_i, HYPRE_MEMORY_HOST); if (B_ext_offd_size) { hypre_TFree(B_ext_offd_j, HYPRE_MEMORY_HOST); hypre_TFree(B_ext_offd_data, HYPRE_MEMORY_HOST); } if (num_cols_offd_B) { hypre_TFree(map_B_to_C, HYPRE_MEMORY_HOST); } hypre_TFree(rownnz_A, HYPRE_MEMORY_HOST); #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_MATMUL] += hypre_MPI_Wtime(); #endif HYPRE_ANNOTATE_FUNC_END; return C; } /* The following function was formerly part of hypre_ParCSRMatrixExtractBExt but the code was removed so it can be used for a corresponding function for Boolean matrices JSP: to allow communication overlapping, it returns comm_handle_idx and comm_handle_data. Before accessing B, they should be destroyed (including send_data contained in the comm_handle). */ void hypre_ParCSRMatrixExtractBExt_Arrays_Overlap( HYPRE_Int ** pB_ext_i, HYPRE_BigInt ** pB_ext_j, HYPRE_Complex ** pB_ext_data, HYPRE_BigInt ** pB_ext_row_map, HYPRE_Int * num_nonzeros, HYPRE_Int data, HYPRE_Int find_row_map, MPI_Comm comm, hypre_ParCSRCommPkg * comm_pkg, HYPRE_Int num_cols_B, HYPRE_Int num_recvs, HYPRE_Int num_sends, HYPRE_BigInt first_col_diag, HYPRE_BigInt * row_starts, HYPRE_Int * recv_vec_starts, HYPRE_Int * send_map_starts, HYPRE_Int * send_map_elmts, HYPRE_Int * diag_i, HYPRE_Int * diag_j, HYPRE_Int * offd_i, HYPRE_Int * offd_j, HYPRE_BigInt * col_map_offd, HYPRE_Real * diag_data, HYPRE_Real * offd_data, hypre_ParCSRCommHandle **comm_handle_idx, hypre_ParCSRCommHandle **comm_handle_data, HYPRE_Int *CF_marker, HYPRE_Int *CF_marker_offd, HYPRE_Int skip_fine, /* 1 if only coarse points are needed */ HYPRE_Int skip_same_sign /* 1 if only points that have the same sign are needed */ // extended based long range interpolation: skip_fine = 1, skip_same_sign = 0 for S matrix, skip_fine = 1, skip_same_sign = 1 for A matrix // other interpolation: skip_fine = 0, skip_same_sign = 0 ) { hypre_ParCSRCommHandle *comm_handle, *row_map_comm_handle = NULL; hypre_ParCSRCommPkg *tmp_comm_pkg; HYPRE_Int *B_int_i; HYPRE_BigInt *B_int_j; HYPRE_Int *B_ext_i; HYPRE_BigInt * B_ext_j; HYPRE_Complex * B_ext_data; HYPRE_Complex * B_int_data; HYPRE_BigInt * B_int_row_map; HYPRE_BigInt * B_ext_row_map; HYPRE_Int num_procs, my_id; HYPRE_Int *jdata_recv_vec_starts; HYPRE_Int *jdata_send_map_starts; HYPRE_Int i, j, k; HYPRE_Int start_index; /*HYPRE_Int jrow;*/ HYPRE_Int num_rows_B_ext; HYPRE_Int *prefix_sum_workspace; hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm, &my_id); HYPRE_BigInt first_row_index = row_starts[0]; num_rows_B_ext = recv_vec_starts[num_recvs]; if ( num_rows_B_ext < 0 ) /* no B_ext, no communication */ { *pB_ext_i = NULL; *pB_ext_j = NULL; if ( data ) { *pB_ext_data = NULL; } if ( find_row_map ) { *pB_ext_row_map = NULL; } *num_nonzeros = 0; return; }; B_int_i = hypre_CTAlloc(HYPRE_Int, send_map_starts[num_sends] + 1, HYPRE_MEMORY_HOST); B_ext_i = hypre_CTAlloc(HYPRE_Int, num_rows_B_ext + 1, HYPRE_MEMORY_HOST); *pB_ext_i = B_ext_i; if ( find_row_map ) { B_int_row_map = hypre_CTAlloc( HYPRE_BigInt, send_map_starts[num_sends] + 1, HYPRE_MEMORY_HOST); B_ext_row_map = hypre_CTAlloc( HYPRE_BigInt, num_rows_B_ext + 1, HYPRE_MEMORY_HOST); *pB_ext_row_map = B_ext_row_map; }; /*-------------------------------------------------------------------------- * generate B_int_i through adding number of row-elements of offd and diag * for corresponding rows. B_int_i[j+1] contains the number of elements of * a row j (which is determined through send_map_elmts) *--------------------------------------------------------------------------*/ jdata_send_map_starts = hypre_CTAlloc(HYPRE_Int, num_sends + 1, HYPRE_MEMORY_HOST); jdata_recv_vec_starts = hypre_CTAlloc(HYPRE_Int, num_recvs + 1, HYPRE_MEMORY_HOST); jdata_send_map_starts[0] = B_int_i[0] = 0; /*HYPRE_Int prefix_sum_workspace[(hypre_NumThreads() + 1)*num_sends];*/ prefix_sum_workspace = hypre_TAlloc(HYPRE_Int, (hypre_NumThreads() + 1) * num_sends, HYPRE_MEMORY_HOST); #ifdef HYPRE_USING_OPENMP #pragma omp parallel private(i,j,k) #endif { /*HYPRE_Int counts[num_sends];*/ HYPRE_Int *counts; counts = hypre_TAlloc(HYPRE_Int, num_sends, HYPRE_MEMORY_HOST); for (i = 0; i < num_sends; i++) { HYPRE_Int j_begin, j_end; hypre_GetSimpleThreadPartition(&j_begin, &j_end, send_map_starts[i + 1] - send_map_starts[i]); j_begin += send_map_starts[i]; j_end += send_map_starts[i]; HYPRE_Int count = 0; if (skip_fine && skip_same_sign) { for (j = j_begin; j < j_end; j++) { HYPRE_Int jrow = send_map_elmts[j]; HYPRE_Int len = 0; if (diag_data[diag_i[jrow]] >= 0) { for (k = diag_i[jrow] + 1; k < diag_i[jrow + 1]; k++) { if (diag_data[k] < 0 && CF_marker[diag_j[k]] >= 0) { len++; } } for (k = offd_i[jrow]; k < offd_i[jrow + 1]; k++) { if (offd_data[k] < 0) { len++; } } } else { for (k = diag_i[jrow] + 1; k < diag_i[jrow + 1]; k++) { if (diag_data[k] > 0 && CF_marker[diag_j[k]] >= 0) { len++; } } for (k = offd_i[jrow]; k < offd_i[jrow + 1]; k++) { if (offd_data[k] > 0) { len++; } } } B_int_i[j + 1] = len; count += len; } } else if (skip_fine) { for (j = j_begin; j < j_end; j++) { HYPRE_Int jrow = send_map_elmts[j]; HYPRE_Int len = 0; for (k = diag_i[jrow]; k < diag_i[jrow + 1]; k++) { if (CF_marker[diag_j[k]] >= 0) { len++; } } for (k = offd_i[jrow]; k < offd_i[jrow + 1]; k++) { if (CF_marker_offd[offd_j[k]] >= 0) { len++; } } B_int_i[j + 1] = len; count += len; } } else { for (j = j_begin; j < j_end; j++) { HYPRE_Int jrow = send_map_elmts[j]; HYPRE_Int len = diag_i[jrow + 1] - diag_i[jrow]; len += offd_i[jrow + 1] - offd_i[jrow]; B_int_i[j + 1] = len; count += len; } } if (find_row_map) { for (j = j_begin; j < j_end; j++) { HYPRE_Int jrow = send_map_elmts[j]; B_int_row_map[j] = (HYPRE_BigInt)jrow + first_row_index; } } counts[i] = count; } hypre_prefix_sum_multiple(counts, jdata_send_map_starts + 1, num_sends, prefix_sum_workspace); #ifdef HYPRE_USING_OPENMP #pragma omp master #endif { for (i = 1; i < num_sends; i++) { jdata_send_map_starts[i + 1] += jdata_send_map_starts[i]; } /*-------------------------------------------------------------------------- * initialize communication *--------------------------------------------------------------------------*/ comm_handle = hypre_ParCSRCommHandleCreate(11, comm_pkg, &B_int_i[1], &(B_ext_i[1]) ); if ( find_row_map ) { /* scatter/gather B_int row numbers to form array of B_ext row numbers */ row_map_comm_handle = hypre_ParCSRCommHandleCreate (21, comm_pkg, B_int_row_map, B_ext_row_map ); } B_int_j = hypre_TAlloc(HYPRE_BigInt, jdata_send_map_starts[num_sends], HYPRE_MEMORY_HOST); if (data) { B_int_data = hypre_TAlloc(HYPRE_Complex, jdata_send_map_starts[num_sends], HYPRE_MEMORY_HOST); } } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif for (i = 0; i < num_sends; i++) { HYPRE_Int j_begin, j_end; hypre_GetSimpleThreadPartition(&j_begin, &j_end, send_map_starts[i + 1] - send_map_starts[i]); j_begin += send_map_starts[i]; j_end += send_map_starts[i]; HYPRE_Int count = counts[i] + jdata_send_map_starts[i]; if (data) { if (skip_same_sign && skip_fine) { for (j = j_begin; j < j_end; j++) { HYPRE_Int jrow = send_map_elmts[j]; /*HYPRE_Int count_begin = count;*/ if (diag_data[diag_i[jrow]] >= 0) { for (k = diag_i[jrow] + 1; k < diag_i[jrow + 1]; k++) { if (diag_data[k] < 0 && CF_marker[diag_j[k]] >= 0) { B_int_j[count] = (HYPRE_BigInt)diag_j[k] + first_col_diag; B_int_data[count] = diag_data[k]; count++; } } for (k = offd_i[jrow]; k < offd_i[jrow + 1]; k++) { HYPRE_Int c = offd_j[k]; HYPRE_BigInt c_global = col_map_offd[c]; if (offd_data[k] < 0) { B_int_j[count] = c_global; B_int_data[count] = offd_data[k]; count++; } } } else { for (k = diag_i[jrow] + 1; k < diag_i[jrow + 1]; k++) { if (diag_data[k] > 0 && CF_marker[diag_j[k]] >= 0) { B_int_j[count] = (HYPRE_BigInt)diag_j[k] + first_col_diag; B_int_data[count] = diag_data[k]; count++; } } for (k = offd_i[jrow]; k < offd_i[jrow + 1]; k++) { HYPRE_Int c = offd_j[k]; HYPRE_BigInt c_global = col_map_offd[c]; if (offd_data[k] > 0) { B_int_j[count] = c_global; B_int_data[count] = offd_data[k]; count++; } } } } } else { for (j = j_begin; j < j_end; ++j) { HYPRE_Int jrow = send_map_elmts[j]; for (k = diag_i[jrow]; k < diag_i[jrow + 1]; k++) { B_int_j[count] = (HYPRE_BigInt)diag_j[k] + first_col_diag; B_int_data[count] = diag_data[k]; count++; } for (k = offd_i[jrow]; k < offd_i[jrow + 1]; k++) { B_int_j[count] = col_map_offd[offd_j[k]]; B_int_data[count] = offd_data[k]; count++; } } } } // data else { if (skip_fine) { for (j = j_begin; j < j_end; j++) { HYPRE_Int jrow = send_map_elmts[j]; for (k = diag_i[jrow]; k < diag_i[jrow + 1]; k++) { if (CF_marker[diag_j[k]] >= 0) { B_int_j[count] = (HYPRE_BigInt)diag_j[k] + first_col_diag; count++; } } for (k = offd_i[jrow]; k < offd_i[jrow + 1]; k++) { if (CF_marker_offd[offd_j[k]] >= 0) { B_int_j[count] = col_map_offd[offd_j[k]]; count++; } } } } else { for (j = j_begin; j < j_end; ++j) { HYPRE_Int jrow = send_map_elmts[j]; for (k = diag_i[jrow]; k < diag_i[jrow + 1]; k++) { B_int_j[count] = (HYPRE_BigInt)diag_j[k] + first_col_diag; count++; } for (k = offd_i[jrow]; k < offd_i[jrow + 1]; k++) { B_int_j[count] = col_map_offd[offd_j[k]]; count++; } } } } // !data } /* for each send target */ hypre_TFree(counts, HYPRE_MEMORY_HOST); } /* omp parallel. JSP: this takes most of time in this function */ hypre_TFree(prefix_sum_workspace, HYPRE_MEMORY_HOST); tmp_comm_pkg = hypre_CTAlloc(hypre_ParCSRCommPkg, 1, HYPRE_MEMORY_HOST); hypre_ParCSRCommPkgComm(tmp_comm_pkg) = comm; hypre_ParCSRCommPkgNumSends(tmp_comm_pkg) = num_sends; hypre_ParCSRCommPkgNumRecvs(tmp_comm_pkg) = num_recvs; hypre_ParCSRCommPkgSendProcs(tmp_comm_pkg) = hypre_ParCSRCommPkgSendProcs(comm_pkg); hypre_ParCSRCommPkgRecvProcs(tmp_comm_pkg) = hypre_ParCSRCommPkgRecvProcs(comm_pkg); hypre_ParCSRCommPkgSendMapStarts(tmp_comm_pkg) = jdata_send_map_starts; hypre_ParCSRCommHandleDestroy(comm_handle); comm_handle = NULL; /*-------------------------------------------------------------------------- * after communication exchange B_ext_i[j+1] contains the number of elements * of a row j ! * evaluate B_ext_i and compute *num_nonzeros for B_ext *--------------------------------------------------------------------------*/ for (i = 0; i < num_recvs; i++) { for (j = recv_vec_starts[i]; j < recv_vec_starts[i + 1]; j++) { B_ext_i[j + 1] += B_ext_i[j]; } } *num_nonzeros = B_ext_i[num_rows_B_ext]; *pB_ext_j = hypre_TAlloc(HYPRE_BigInt, *num_nonzeros, HYPRE_MEMORY_HOST); B_ext_j = *pB_ext_j; if (data) { *pB_ext_data = hypre_TAlloc(HYPRE_Complex, *num_nonzeros, HYPRE_MEMORY_HOST); B_ext_data = *pB_ext_data; } for (i = 0; i < num_recvs; i++) { start_index = B_ext_i[recv_vec_starts[i]]; *num_nonzeros = B_ext_i[recv_vec_starts[i + 1]] - start_index; jdata_recv_vec_starts[i + 1] = B_ext_i[recv_vec_starts[i + 1]]; } hypre_ParCSRCommPkgRecvVecStarts(tmp_comm_pkg) = jdata_recv_vec_starts; *comm_handle_idx = hypre_ParCSRCommHandleCreate(21, tmp_comm_pkg, B_int_j, B_ext_j); if (data) { *comm_handle_data = hypre_ParCSRCommHandleCreate(1, tmp_comm_pkg, B_int_data, B_ext_data); } if (row_map_comm_handle) { hypre_ParCSRCommHandleDestroy(row_map_comm_handle); row_map_comm_handle = NULL; } hypre_TFree(jdata_send_map_starts, HYPRE_MEMORY_HOST); hypre_TFree(jdata_recv_vec_starts, HYPRE_MEMORY_HOST); hypre_TFree(tmp_comm_pkg, HYPRE_MEMORY_HOST); hypre_TFree(B_int_i, HYPRE_MEMORY_HOST); if ( find_row_map ) { hypre_TFree(B_int_row_map, HYPRE_MEMORY_HOST); } /* end generic part */ } void hypre_ParCSRMatrixExtractBExt_Arrays( HYPRE_Int ** pB_ext_i, HYPRE_BigInt ** pB_ext_j, HYPRE_Complex ** pB_ext_data, HYPRE_BigInt ** pB_ext_row_map, HYPRE_Int * num_nonzeros, HYPRE_Int data, HYPRE_Int find_row_map, MPI_Comm comm, hypre_ParCSRCommPkg * comm_pkg, HYPRE_Int num_cols_B, HYPRE_Int num_recvs, HYPRE_Int num_sends, HYPRE_BigInt first_col_diag, HYPRE_BigInt * row_starts, HYPRE_Int * recv_vec_starts, HYPRE_Int * send_map_starts, HYPRE_Int * send_map_elmts, HYPRE_Int * diag_i, HYPRE_Int * diag_j, HYPRE_Int * offd_i, HYPRE_Int * offd_j, HYPRE_BigInt * col_map_offd, HYPRE_Real * diag_data, HYPRE_Real * offd_data ) { hypre_ParCSRCommHandle *comm_handle_idx, *comm_handle_data; hypre_ParCSRMatrixExtractBExt_Arrays_Overlap( pB_ext_i, pB_ext_j, pB_ext_data, pB_ext_row_map, num_nonzeros, data, find_row_map, comm, comm_pkg, num_cols_B, num_recvs, num_sends, first_col_diag, row_starts, recv_vec_starts, send_map_starts, send_map_elmts, diag_i, diag_j, offd_i, offd_j, col_map_offd, diag_data, offd_data, &comm_handle_idx, &comm_handle_data, NULL, NULL, 0, 0); HYPRE_Int *send_idx = (HYPRE_Int *)comm_handle_idx->send_data; hypre_ParCSRCommHandleDestroy(comm_handle_idx); hypre_TFree(send_idx, HYPRE_MEMORY_HOST); if (data) { HYPRE_Real *send_data = (HYPRE_Real *)comm_handle_data->send_data; hypre_ParCSRCommHandleDestroy(comm_handle_data); hypre_TFree(send_data, HYPRE_MEMORY_HOST); } } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixExtractBExt : extracts rows from B which are located on * other processors and needed for multiplication with A locally. The rows * are returned as CSRMatrix. *--------------------------------------------------------------------------*/ hypre_CSRMatrix * hypre_ParCSRMatrixExtractBExt_Overlap( hypre_ParCSRMatrix *B, hypre_ParCSRMatrix *A, HYPRE_Int data, hypre_ParCSRCommHandle **comm_handle_idx, hypre_ParCSRCommHandle **comm_handle_data, HYPRE_Int *CF_marker, HYPRE_Int *CF_marker_offd, HYPRE_Int skip_fine, HYPRE_Int skip_same_sign ) { MPI_Comm comm = hypre_ParCSRMatrixComm(B); HYPRE_BigInt first_col_diag = hypre_ParCSRMatrixFirstColDiag(B); /*HYPRE_Int first_row_index = hypre_ParCSRMatrixFirstRowIndex(B);*/ HYPRE_BigInt *col_map_offd = hypre_ParCSRMatrixColMapOffd(B); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); HYPRE_Int num_recvs; HYPRE_Int *recv_vec_starts; HYPRE_Int num_sends; HYPRE_Int *send_map_starts; HYPRE_Int *send_map_elmts; hypre_CSRMatrix *diag = hypre_ParCSRMatrixDiag(B); HYPRE_Int *diag_i = hypre_CSRMatrixI(diag); HYPRE_Int *diag_j = hypre_CSRMatrixJ(diag); HYPRE_Real *diag_data = hypre_CSRMatrixData(diag); hypre_CSRMatrix *offd = hypre_ParCSRMatrixOffd(B); HYPRE_Int *offd_i = hypre_CSRMatrixI(offd); HYPRE_Int *offd_j = hypre_CSRMatrixJ(offd); HYPRE_Real *offd_data = hypre_CSRMatrixData(offd); HYPRE_Int num_cols_B, num_nonzeros; HYPRE_Int num_rows_B_ext; hypre_CSRMatrix *B_ext; HYPRE_Int *B_ext_i; HYPRE_BigInt *B_ext_j; HYPRE_Complex *B_ext_data; HYPRE_BigInt *idummy; /*--------------------------------------------------------------------- * If there exists no CommPkg for A, a CommPkg is generated using * equally load balanced partitionings *--------------------------------------------------------------------*/ if (!hypre_ParCSRMatrixCommPkg(A)) { hypre_MatvecCommPkgCreate(A); } comm_pkg = hypre_ParCSRMatrixCommPkg(A); num_recvs = hypre_ParCSRCommPkgNumRecvs(comm_pkg); recv_vec_starts = hypre_ParCSRCommPkgRecvVecStarts(comm_pkg); num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); send_map_starts = hypre_ParCSRCommPkgSendMapStarts(comm_pkg); send_map_elmts = hypre_ParCSRCommPkgSendMapElmts(comm_pkg); num_cols_B = hypre_ParCSRMatrixGlobalNumCols(B); num_rows_B_ext = recv_vec_starts[num_recvs]; hypre_ParCSRMatrixExtractBExt_Arrays_Overlap ( &B_ext_i, &B_ext_j, &B_ext_data, &idummy, &num_nonzeros, data, 0, comm, comm_pkg, num_cols_B, num_recvs, num_sends, first_col_diag, B->row_starts, recv_vec_starts, send_map_starts, send_map_elmts, diag_i, diag_j, offd_i, offd_j, col_map_offd, diag_data, offd_data, comm_handle_idx, comm_handle_data, CF_marker, CF_marker_offd, skip_fine, skip_same_sign ); B_ext = hypre_CSRMatrixCreate(num_rows_B_ext, num_cols_B, num_nonzeros); hypre_CSRMatrixMemoryLocation(B_ext) = HYPRE_MEMORY_HOST; hypre_CSRMatrixI(B_ext) = B_ext_i; hypre_CSRMatrixBigJ(B_ext) = B_ext_j; if (data) { hypre_CSRMatrixData(B_ext) = B_ext_data; } return B_ext; } hypre_CSRMatrix * hypre_ParCSRMatrixExtractBExt( hypre_ParCSRMatrix *B, hypre_ParCSRMatrix *A, HYPRE_Int want_data ) { #if 0 hypre_ParCSRCommHandle *comm_handle_idx, *comm_handle_data; hypre_CSRMatrix *B_ext = hypre_ParCSRMatrixExtractBExt_Overlap(B, A, want_data, &comm_handle_idx, &comm_handle_data, NULL, NULL, 0, 0); HYPRE_Int *send_idx = (HYPRE_Int *)comm_handle_idx->send_data; hypre_ParCSRCommHandleDestroy(comm_handle_idx); hypre_TFree(send_idx, HYPRE_MEMORY_HOST); if (want_data) { HYPRE_Real *send_data = (HYPRE_Real *)comm_handle_data->send_data; hypre_ParCSRCommHandleDestroy(comm_handle_data); hypre_TFree(send_data, HYPRE_MEMORY_HOST); } #else hypre_assert( hypre_CSRMatrixMemoryLocation(hypre_ParCSRMatrixDiag(B)) == hypre_CSRMatrixMemoryLocation(hypre_ParCSRMatrixOffd(B)) ); hypre_CSRMatrix *B_ext; void *request; if (!hypre_ParCSRMatrixCommPkg(A)) { hypre_MatvecCommPkgCreate(A); } hypre_ParcsrGetExternalRowsInit(B, hypre_CSRMatrixNumCols(hypre_ParCSRMatrixOffd(A)), hypre_ParCSRMatrixColMapOffd(A), hypre_ParCSRMatrixCommPkg(A), want_data, &request); B_ext = hypre_ParcsrGetExternalRowsWait(request); #endif return B_ext; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixTransposeHost *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixTransposeHost( hypre_ParCSRMatrix *A, hypre_ParCSRMatrix **AT_ptr, HYPRE_Int data ) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Int num_cols = hypre_ParCSRMatrixNumCols(A); HYPRE_BigInt first_row_index = hypre_ParCSRMatrixFirstRowIndex(A); HYPRE_BigInt *row_starts = hypre_ParCSRMatrixRowStarts(A); HYPRE_BigInt *col_starts = hypre_ParCSRMatrixColStarts(A); HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(A_offd); HYPRE_Int num_sends, num_recvs, num_cols_offd_AT; HYPRE_Int i, j, k, index, counter, j_row; HYPRE_BigInt value; hypre_ParCSRMatrix *AT; hypre_CSRMatrix *AT_diag; hypre_CSRMatrix *AT_offd; hypre_CSRMatrix *AT_tmp; HYPRE_BigInt first_row_index_AT, first_col_diag_AT; HYPRE_Int local_num_rows_AT, local_num_cols_AT; HYPRE_Int *AT_tmp_i; HYPRE_Int *AT_tmp_j; HYPRE_BigInt *AT_big_j = NULL; HYPRE_Complex *AT_tmp_data; HYPRE_Int *AT_buf_i; HYPRE_BigInt *AT_buf_j; HYPRE_Complex *AT_buf_data; HYPRE_Int *AT_offd_i; HYPRE_Int *AT_offd_j; HYPRE_Complex *AT_offd_data; HYPRE_BigInt *col_map_offd_AT; HYPRE_BigInt row_starts_AT[2]; HYPRE_BigInt col_starts_AT[2]; HYPRE_Int num_procs, my_id; HYPRE_Int *recv_procs, *send_procs; HYPRE_Int *recv_vec_starts; HYPRE_Int *send_map_starts; HYPRE_Int *send_map_elmts; HYPRE_Int *tmp_recv_vec_starts; HYPRE_Int *tmp_send_map_starts; hypre_ParCSRCommPkg *tmp_comm_pkg; hypre_ParCSRCommHandle *comm_handle; hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm, &my_id); num_cols_offd_AT = 0; counter = 0; AT_offd_j = NULL; AT_offd_data = NULL; col_map_offd_AT = NULL; HYPRE_MemoryLocation memory_location = hypre_ParCSRMatrixMemoryLocation(A); /*--------------------------------------------------------------------- * If there exists no CommPkg for A, a CommPkg is generated using * equally load balanced partitionings *--------------------------------------------------------------------*/ if (!comm_pkg) { hypre_MatvecCommPkgCreate(A); comm_pkg = hypre_ParCSRMatrixCommPkg(A); } if (num_procs > 1) { hypre_CSRMatrixTranspose (A_offd, &AT_tmp, data); AT_tmp_i = hypre_CSRMatrixI(AT_tmp); AT_tmp_j = hypre_CSRMatrixJ(AT_tmp); if (data) { AT_tmp_data = hypre_CSRMatrixData(AT_tmp); } num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); num_recvs = hypre_ParCSRCommPkgNumRecvs(comm_pkg); recv_procs = hypre_ParCSRCommPkgRecvProcs(comm_pkg); send_procs = hypre_ParCSRCommPkgSendProcs(comm_pkg); recv_vec_starts = hypre_ParCSRCommPkgRecvVecStarts(comm_pkg); send_map_starts = hypre_ParCSRCommPkgSendMapStarts(comm_pkg); send_map_elmts = hypre_ParCSRCommPkgSendMapElmts(comm_pkg); AT_buf_i = hypre_CTAlloc(HYPRE_Int, send_map_starts[num_sends], HYPRE_MEMORY_HOST); if (AT_tmp_i[num_cols_offd]) { AT_big_j = hypre_CTAlloc(HYPRE_BigInt, AT_tmp_i[num_cols_offd], HYPRE_MEMORY_HOST); } for (i = 0; i < AT_tmp_i[num_cols_offd]; i++) { //AT_tmp_j[i] += first_row_index; AT_big_j[i] = (HYPRE_BigInt)AT_tmp_j[i] + first_row_index; } for (i = 0; i < num_cols_offd; i++) { AT_tmp_i[i] = AT_tmp_i[i + 1] - AT_tmp_i[i]; } comm_handle = hypre_ParCSRCommHandleCreate(12, comm_pkg, AT_tmp_i, AT_buf_i); } hypre_CSRMatrixTranspose(A_diag, &AT_diag, data); AT_offd_i = hypre_CTAlloc(HYPRE_Int, num_cols + 1, memory_location); if (num_procs > 1) { hypre_ParCSRCommHandleDestroy(comm_handle); comm_handle = NULL; tmp_send_map_starts = hypre_CTAlloc(HYPRE_Int, num_sends + 1, HYPRE_MEMORY_HOST); tmp_recv_vec_starts = hypre_CTAlloc(HYPRE_Int, num_recvs + 1, HYPRE_MEMORY_HOST); tmp_send_map_starts[0] = send_map_starts[0]; for (i = 0; i < num_sends; i++) { tmp_send_map_starts[i + 1] = tmp_send_map_starts[i]; for (j = send_map_starts[i]; j < send_map_starts[i + 1]; j++) { tmp_send_map_starts[i + 1] += AT_buf_i[j]; AT_offd_i[send_map_elmts[j] + 1] += AT_buf_i[j]; } } for (i = 0; i < num_cols; i++) { AT_offd_i[i + 1] += AT_offd_i[i]; } tmp_recv_vec_starts[0] = recv_vec_starts[0]; for (i = 0; i < num_recvs; i++) { tmp_recv_vec_starts[i + 1] = tmp_recv_vec_starts[i]; for (j = recv_vec_starts[i]; j < recv_vec_starts[i + 1]; j++) { tmp_recv_vec_starts[i + 1] += AT_tmp_i[j]; } } tmp_comm_pkg = hypre_CTAlloc(hypre_ParCSRCommPkg, 1, HYPRE_MEMORY_HOST); hypre_ParCSRCommPkgComm(tmp_comm_pkg) = comm; hypre_ParCSRCommPkgNumSends(tmp_comm_pkg) = num_sends; hypre_ParCSRCommPkgNumRecvs(tmp_comm_pkg) = num_recvs; hypre_ParCSRCommPkgRecvProcs(tmp_comm_pkg) = recv_procs; hypre_ParCSRCommPkgSendProcs(tmp_comm_pkg) = send_procs; hypre_ParCSRCommPkgRecvVecStarts(tmp_comm_pkg) = tmp_recv_vec_starts; hypre_ParCSRCommPkgSendMapStarts(tmp_comm_pkg) = tmp_send_map_starts; AT_buf_j = hypre_CTAlloc(HYPRE_BigInt, tmp_send_map_starts[num_sends], HYPRE_MEMORY_HOST); comm_handle = hypre_ParCSRCommHandleCreate(22, tmp_comm_pkg, AT_big_j, AT_buf_j); hypre_ParCSRCommHandleDestroy(comm_handle); comm_handle = NULL; hypre_TFree(AT_big_j, HYPRE_MEMORY_HOST); if (data) { AT_buf_data = hypre_CTAlloc(HYPRE_Complex, tmp_send_map_starts[num_sends], HYPRE_MEMORY_HOST); comm_handle = hypre_ParCSRCommHandleCreate(2, tmp_comm_pkg, AT_tmp_data, AT_buf_data); hypre_ParCSRCommHandleDestroy(comm_handle); comm_handle = NULL; } hypre_TFree(tmp_recv_vec_starts, HYPRE_MEMORY_HOST); hypre_TFree(tmp_send_map_starts, HYPRE_MEMORY_HOST); hypre_TFree(tmp_comm_pkg, HYPRE_MEMORY_HOST); hypre_CSRMatrixDestroy(AT_tmp); if (AT_offd_i[num_cols]) { AT_offd_j = hypre_CTAlloc(HYPRE_Int, AT_offd_i[num_cols], memory_location); AT_big_j = hypre_CTAlloc(HYPRE_BigInt, AT_offd_i[num_cols], HYPRE_MEMORY_HOST); if (data) { AT_offd_data = hypre_CTAlloc(HYPRE_Complex, AT_offd_i[num_cols], memory_location); } } else { AT_offd_j = NULL; AT_offd_data = NULL; } counter = 0; for (i = 0; i < num_sends; i++) { for (j = send_map_starts[i]; j < send_map_starts[i + 1]; j++) { j_row = send_map_elmts[j]; index = AT_offd_i[j_row]; for (k = 0; k < AT_buf_i[j]; k++) { if (data) { AT_offd_data[index] = AT_buf_data[counter]; } AT_big_j[index++] = AT_buf_j[counter++]; } AT_offd_i[j_row] = index; } } for (i = num_cols; i > 0; i--) { AT_offd_i[i] = AT_offd_i[i - 1]; } AT_offd_i[0] = 0; if (counter) { hypre_BigQsort0(AT_buf_j, 0, counter - 1); num_cols_offd_AT = 1; value = AT_buf_j[0]; for (i = 1; i < counter; i++) { if (value < AT_buf_j[i]) { AT_buf_j[num_cols_offd_AT++] = AT_buf_j[i]; value = AT_buf_j[i]; } } } if (num_cols_offd_AT) { col_map_offd_AT = hypre_CTAlloc(HYPRE_BigInt, num_cols_offd_AT, HYPRE_MEMORY_HOST); } else { col_map_offd_AT = NULL; } for (i = 0; i < num_cols_offd_AT; i++) { col_map_offd_AT[i] = AT_buf_j[i]; } hypre_TFree(AT_buf_i, HYPRE_MEMORY_HOST); hypre_TFree(AT_buf_j, HYPRE_MEMORY_HOST); if (data) { hypre_TFree(AT_buf_data, HYPRE_MEMORY_HOST); } for (i = 0; i < counter; i++) { AT_offd_j[i] = hypre_BigBinarySearch(col_map_offd_AT, AT_big_j[i], num_cols_offd_AT); } hypre_TFree(AT_big_j, HYPRE_MEMORY_HOST); } AT_offd = hypre_CSRMatrixCreate(num_cols, num_cols_offd_AT, counter); hypre_CSRMatrixMemoryLocation(AT_offd) = memory_location; hypre_CSRMatrixI(AT_offd) = AT_offd_i; hypre_CSRMatrixJ(AT_offd) = AT_offd_j; hypre_CSRMatrixData(AT_offd) = AT_offd_data; for (i = 0; i < 2; i++) { row_starts_AT[i] = col_starts[i]; col_starts_AT[i] = row_starts[i]; } first_row_index_AT = row_starts_AT[0]; first_col_diag_AT = col_starts_AT[0]; local_num_rows_AT = (HYPRE_Int)(row_starts_AT[1] - first_row_index_AT ); local_num_cols_AT = (HYPRE_Int)(col_starts_AT[1] - first_col_diag_AT); AT = hypre_CTAlloc(hypre_ParCSRMatrix, 1, HYPRE_MEMORY_HOST); hypre_ParCSRMatrixComm(AT) = comm; hypre_ParCSRMatrixDiag(AT) = AT_diag; hypre_ParCSRMatrixOffd(AT) = AT_offd; hypre_ParCSRMatrixGlobalNumRows(AT) = hypre_ParCSRMatrixGlobalNumCols(A); hypre_ParCSRMatrixGlobalNumCols(AT) = hypre_ParCSRMatrixGlobalNumRows(A); hypre_ParCSRMatrixRowStarts(AT)[0] = row_starts_AT[0]; hypre_ParCSRMatrixRowStarts(AT)[1] = row_starts_AT[1]; hypre_ParCSRMatrixColStarts(AT)[0] = col_starts_AT[0]; hypre_ParCSRMatrixColStarts(AT)[1] = col_starts_AT[1]; hypre_ParCSRMatrixColMapOffd(AT) = col_map_offd_AT; hypre_ParCSRMatrixFirstRowIndex(AT) = first_row_index_AT; hypre_ParCSRMatrixFirstColDiag(AT) = first_col_diag_AT; hypre_ParCSRMatrixLastRowIndex(AT) = first_row_index_AT + local_num_rows_AT - 1; hypre_ParCSRMatrixLastColDiag(AT) = first_col_diag_AT + local_num_cols_AT - 1; hypre_ParCSRMatrixOwnsData(AT) = 1; hypre_ParCSRMatrixCommPkg(AT) = NULL; hypre_ParCSRMatrixCommPkgT(AT) = NULL; hypre_ParCSRMatrixRowindices(AT) = NULL; hypre_ParCSRMatrixRowvalues(AT) = NULL; hypre_ParCSRMatrixGetrowactive(AT) = 0; hypre_ParCSRMatrixOwnsAssumedPartition(AT) = 1; *AT_ptr = AT; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixTranspose *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixTranspose( hypre_ParCSRMatrix *A, hypre_ParCSRMatrix **AT_ptr, HYPRE_Int data ) { #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) hypre_GpuProfilingPushRange("ParCSRMatrixTranspose"); #endif #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy1( hypre_ParCSRMatrixMemoryLocation(A) ); if (exec == HYPRE_EXEC_DEVICE) { hypre_ParCSRMatrixTransposeDevice(A, AT_ptr, data); } else #endif { hypre_ParCSRMatrixTransposeHost(A, AT_ptr, data); } #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) hypre_GpuProfilingPopRange(); #endif return hypre_error_flag; } /* ----------------------------------------------------------------------------- * generate a parallel spanning tree (for Maxwell Equation) * G_csr is the node to edge connectivity matrix * ----------------------------------------------------------------------------- */ void hypre_ParCSRMatrixGenSpanningTree( hypre_ParCSRMatrix *G_csr, HYPRE_Int **indices, HYPRE_Int G_type ) { HYPRE_BigInt nrows_G, ncols_G; HYPRE_Int *G_diag_i, *G_diag_j, *GT_diag_mat, i, j, k, edge; HYPRE_Int *nodes_marked, *edges_marked, *queue, queue_tail, queue_head, node; HYPRE_Int mypid, nprocs, n_children, *children, nsends, *send_procs, *recv_cnts; HYPRE_Int nrecvs, *recv_procs, n_proc_array, *proc_array, *pgraph_i, *pgraph_j; HYPRE_Int parent, proc, proc2, node2, found, *t_indices, tree_size, *T_diag_i; HYPRE_Int *T_diag_j, *counts, offset; MPI_Comm comm; hypre_ParCSRCommPkg *comm_pkg; hypre_CSRMatrix *G_diag; /* fetch G matrix (G_type = 0 ==> node to edge) */ if (G_type == 0) { nrows_G = hypre_ParCSRMatrixGlobalNumRows(G_csr); ncols_G = hypre_ParCSRMatrixGlobalNumCols(G_csr); G_diag = hypre_ParCSRMatrixDiag(G_csr); G_diag_i = hypre_CSRMatrixI(G_diag); G_diag_j = hypre_CSRMatrixJ(G_diag); } else { nrows_G = hypre_ParCSRMatrixGlobalNumCols(G_csr); ncols_G = hypre_ParCSRMatrixGlobalNumRows(G_csr); G_diag = hypre_ParCSRMatrixDiag(G_csr); T_diag_i = hypre_CSRMatrixI(G_diag); T_diag_j = hypre_CSRMatrixJ(G_diag); counts = hypre_TAlloc(HYPRE_Int, nrows_G, HYPRE_MEMORY_HOST); for (i = 0; i < nrows_G; i++) { counts[i] = 0; } for (i = 0; i < T_diag_i[ncols_G]; i++) { counts[T_diag_j[i]]++; } G_diag_i = hypre_TAlloc(HYPRE_Int, (nrows_G + 1), HYPRE_MEMORY_HOST); G_diag_j = hypre_TAlloc(HYPRE_Int, T_diag_i[ncols_G], HYPRE_MEMORY_HOST); G_diag_i[0] = 0; for (i = 1; i <= nrows_G; i++) { G_diag_i[i] = G_diag_i[i - 1] + counts[i - 1]; } for (i = 0; i < ncols_G; i++) { for (j = T_diag_i[i]; j < T_diag_i[i + 1]; j++) { k = T_diag_j[j]; offset = G_diag_i[k]++; G_diag_j[offset] = i; } } G_diag_i[0] = 0; for (i = 1; i <= nrows_G; i++) { G_diag_i[i] = G_diag_i[i - 1] + counts[i - 1]; } hypre_TFree(counts, HYPRE_MEMORY_HOST); } /* form G transpose in special form (2 nodes per edge max) */ GT_diag_mat = hypre_TAlloc(HYPRE_Int, 2 * ncols_G, HYPRE_MEMORY_HOST); for (i = 0; i < 2 * ncols_G; i++) { GT_diag_mat[i] = -1; } for (i = 0; i < nrows_G; i++) { for (j = G_diag_i[i]; j < G_diag_i[i + 1]; j++) { edge = G_diag_j[j]; if (GT_diag_mat[edge * 2] == -1) { GT_diag_mat[edge * 2] = i; } else { GT_diag_mat[edge * 2 + 1] = i; } } } /* BFS on the local matrix graph to find tree */ nodes_marked = hypre_TAlloc(HYPRE_Int, nrows_G, HYPRE_MEMORY_HOST); edges_marked = hypre_TAlloc(HYPRE_Int, ncols_G, HYPRE_MEMORY_HOST); for (i = 0; i < nrows_G; i++) { nodes_marked[i] = 0; } for (i = 0; i < ncols_G; i++) { edges_marked[i] = 0; } queue = hypre_TAlloc(HYPRE_Int, nrows_G, HYPRE_MEMORY_HOST); queue_head = 0; queue_tail = 1; queue[0] = 0; nodes_marked[0] = 1; while ((queue_tail - queue_head) > 0) { node = queue[queue_tail - 1]; queue_tail--; for (i = G_diag_i[node]; i < G_diag_i[node + 1]; i++) { edge = G_diag_j[i]; if (edges_marked[edge] == 0) { if (GT_diag_mat[2 * edge + 1] != -1) { node2 = GT_diag_mat[2 * edge]; if (node2 == node) { node2 = GT_diag_mat[2 * edge + 1]; } if (nodes_marked[node2] == 0) { nodes_marked[node2] = 1; edges_marked[edge] = 1; queue[queue_tail] = node2; queue_tail++; } } } } } hypre_TFree(nodes_marked, HYPRE_MEMORY_HOST); hypre_TFree(queue, HYPRE_MEMORY_HOST); hypre_TFree(GT_diag_mat, HYPRE_MEMORY_HOST); /* fetch the communication information from */ comm = hypre_ParCSRMatrixComm(G_csr); hypre_MPI_Comm_rank(comm, &mypid); hypre_MPI_Comm_size(comm, &nprocs); comm_pkg = hypre_ParCSRMatrixCommPkg(G_csr); if (nprocs == 1 && comm_pkg == NULL) { hypre_MatvecCommPkgCreate((hypre_ParCSRMatrix *) G_csr); comm_pkg = hypre_ParCSRMatrixCommPkg(G_csr); } /* construct processor graph based on node-edge connection */ /* (local edges connected to neighbor processor nodes) */ n_children = 0; nrecvs = nsends = 0; if (nprocs > 1) { nsends = hypre_ParCSRCommPkgNumSends(comm_pkg); send_procs = hypre_ParCSRCommPkgSendProcs(comm_pkg); nrecvs = hypre_ParCSRCommPkgNumRecvs(comm_pkg); recv_procs = hypre_ParCSRCommPkgRecvProcs(comm_pkg); proc_array = NULL; if ((nsends + nrecvs) > 0) { n_proc_array = 0; proc_array = hypre_TAlloc(HYPRE_Int, (nsends + nrecvs), HYPRE_MEMORY_HOST); for (i = 0; i < nsends; i++) { proc_array[i] = send_procs[i]; } for (i = 0; i < nrecvs; i++) { proc_array[nsends + i] = recv_procs[i]; } hypre_qsort0(proc_array, 0, nsends + nrecvs - 1); n_proc_array = 1; for (i = 1; i < nrecvs + nsends; i++) if (proc_array[i] != proc_array[n_proc_array]) { proc_array[n_proc_array++] = proc_array[i]; } } pgraph_i = hypre_TAlloc(HYPRE_Int, (nprocs + 1), HYPRE_MEMORY_HOST); recv_cnts = hypre_TAlloc(HYPRE_Int, nprocs, HYPRE_MEMORY_HOST); hypre_MPI_Allgather(&n_proc_array, 1, HYPRE_MPI_INT, recv_cnts, 1, HYPRE_MPI_INT, comm); pgraph_i[0] = 0; for (i = 1; i <= nprocs; i++) { pgraph_i[i] = pgraph_i[i - 1] + recv_cnts[i - 1]; } pgraph_j = hypre_TAlloc(HYPRE_Int, pgraph_i[nprocs], HYPRE_MEMORY_HOST); hypre_MPI_Allgatherv(proc_array, n_proc_array, HYPRE_MPI_INT, pgraph_j, recv_cnts, pgraph_i, HYPRE_MPI_INT, comm); hypre_TFree(recv_cnts, HYPRE_MEMORY_HOST); /* BFS on the processor graph to determine parent and children */ nodes_marked = hypre_TAlloc(HYPRE_Int, nprocs, HYPRE_MEMORY_HOST); for (i = 0; i < nprocs; i++) { nodes_marked[i] = -1; } queue = hypre_TAlloc(HYPRE_Int, nprocs, HYPRE_MEMORY_HOST); queue_head = 0; queue_tail = 1; node = 0; queue[0] = node; while ((queue_tail - queue_head) > 0) { proc = queue[queue_tail - 1]; queue_tail--; for (i = pgraph_i[proc]; i < pgraph_i[proc + 1]; i++) { proc2 = pgraph_j[i]; if (nodes_marked[proc2] < 0) { nodes_marked[proc2] = proc; queue[queue_tail] = proc2; queue_tail++; } } } parent = nodes_marked[mypid]; n_children = 0; for (i = 0; i < nprocs; i++) if (nodes_marked[i] == mypid) { n_children++; } if (n_children == 0) {n_children = 0; children = NULL;} else { children = hypre_TAlloc(HYPRE_Int, n_children, HYPRE_MEMORY_HOST); n_children = 0; for (i = 0; i < nprocs; i++) if (nodes_marked[i] == mypid) { children[n_children++] = i; } } hypre_TFree(nodes_marked, HYPRE_MEMORY_HOST); hypre_TFree(queue, HYPRE_MEMORY_HOST); hypre_TFree(pgraph_i, HYPRE_MEMORY_HOST); hypre_TFree(pgraph_j, HYPRE_MEMORY_HOST); } /* first, connection with my parent : if the edge in my parent * * is incident to one of my nodes, then my parent will mark it */ found = 0; for (i = 0; i < nrecvs; i++) { proc = hypre_ParCSRCommPkgRecvProc(comm_pkg, i); if (proc == parent) { found = 1; break; } } /* but if all the edges connected to my parent are on my side, * * then I will just pick one of them as tree edge */ if (found == 0) { for (i = 0; i < nsends; i++) { proc = hypre_ParCSRCommPkgSendProc(comm_pkg, i); if (proc == parent) { k = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); edge = hypre_ParCSRCommPkgSendMapElmt(comm_pkg, k); edges_marked[edge] = 1; break; } } } /* next, if my processor has an edge incident on one node in my * * child, put this edge on the tree. But if there is no such * * edge, then I will assume my child will pick up an edge */ for (j = 0; j < n_children; j++) { proc = children[j]; for (i = 0; i < nsends; i++) { proc2 = hypre_ParCSRCommPkgSendProc(comm_pkg, i); if (proc == proc2) { k = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); edge = hypre_ParCSRCommPkgSendMapElmt(comm_pkg, k); edges_marked[edge] = 1; break; } } } if (n_children > 0) { hypre_TFree(children, HYPRE_MEMORY_HOST); } /* count the size of the tree */ tree_size = 0; for (i = 0; i < ncols_G; i++) if (edges_marked[i] == 1) { tree_size++; } t_indices = hypre_TAlloc(HYPRE_Int, (tree_size + 1), HYPRE_MEMORY_HOST); t_indices[0] = tree_size; tree_size = 1; for (i = 0; i < ncols_G; i++) if (edges_marked[i] == 1) { t_indices[tree_size++] = i; } (*indices) = t_indices; hypre_TFree(edges_marked, HYPRE_MEMORY_HOST); if (G_type != 0) { hypre_TFree(G_diag_i, HYPRE_MEMORY_HOST); hypre_TFree(G_diag_j, HYPRE_MEMORY_HOST); } } /* ----------------------------------------------------------------------------- * extract submatrices based on given indices * ----------------------------------------------------------------------------- */ void hypre_ParCSRMatrixExtractSubmatrices( hypre_ParCSRMatrix *A_csr, HYPRE_Int *indices2, hypre_ParCSRMatrix ***submatrices ) { HYPRE_Int nrows_A, nindices, *indices, *A_diag_i, *A_diag_j, mypid, nprocs; HYPRE_Int i, j, k, *proc_offsets1, *proc_offsets2, *exp_indices; HYPRE_BigInt *itmp_array; HYPRE_Int nnz11, nnz12, nnz21, nnz22, col, ncols_offd, nnz_offd, nnz_diag; HYPRE_Int nrows, nnz; HYPRE_BigInt global_nrows, global_ncols, *row_starts, *col_starts; HYPRE_Int *diag_i, *diag_j, row, *offd_i; HYPRE_Complex *A_diag_a, *diag_a; hypre_ParCSRMatrix *A11_csr, *A12_csr, *A21_csr, *A22_csr; hypre_CSRMatrix *A_diag, *diag, *offd; MPI_Comm comm; /* ----------------------------------------------------- * first make sure the incoming indices are in order * ----------------------------------------------------- */ nindices = indices2[0]; indices = &(indices2[1]); hypre_qsort0(indices, 0, nindices - 1); /* ----------------------------------------------------- * fetch matrix information * ----------------------------------------------------- */ nrows_A = (HYPRE_Int) hypre_ParCSRMatrixGlobalNumRows(A_csr); A_diag = hypre_ParCSRMatrixDiag(A_csr); A_diag_i = hypre_CSRMatrixI(A_diag); A_diag_j = hypre_CSRMatrixJ(A_diag); A_diag_a = hypre_CSRMatrixData(A_diag); comm = hypre_ParCSRMatrixComm(A_csr); hypre_MPI_Comm_rank(comm, &mypid); hypre_MPI_Comm_size(comm, &nprocs); if (nprocs > 1) { hypre_error_w_msg(HYPRE_ERROR_GENERIC, "ExtractSubmatrices: cannot handle nprocs > 1 yet.\n"); exit(1); } /* ----------------------------------------------------- * compute new matrix dimensions * ----------------------------------------------------- */ proc_offsets1 = hypre_TAlloc(HYPRE_Int, (nprocs + 1), HYPRE_MEMORY_HOST); proc_offsets2 = hypre_TAlloc(HYPRE_Int, (nprocs + 1), HYPRE_MEMORY_HOST); hypre_MPI_Allgather(&nindices, 1, HYPRE_MPI_INT, proc_offsets1, 1, HYPRE_MPI_INT, comm); k = 0; for (i = 0; i < nprocs; i++) { j = proc_offsets1[i]; proc_offsets1[i] = k; k += j; } proc_offsets1[nprocs] = k; itmp_array = hypre_ParCSRMatrixRowStarts(A_csr); for (i = 0; i <= nprocs; i++) { proc_offsets2[i] = itmp_array[i] - proc_offsets1[i]; } /* ----------------------------------------------------- * assign id's to row and col for later processing * ----------------------------------------------------- */ exp_indices = hypre_TAlloc(HYPRE_Int, nrows_A, HYPRE_MEMORY_HOST); for (i = 0; i < nrows_A; i++) { exp_indices[i] = -1; } for (i = 0; i < nindices; i++) { if (exp_indices[indices[i]] == -1) { exp_indices[indices[i]] = i; } else { hypre_error_w_msg(HYPRE_ERROR_GENERIC, "ExtractSubmatrices: wrong index %d %d\n"); exit(1); } } k = 0; for (i = 0; i < nrows_A; i++) { if (exp_indices[i] < 0) { exp_indices[i] = - k - 1; k++; } } /* ----------------------------------------------------- * compute number of nonzeros for each block * ----------------------------------------------------- */ nnz11 = nnz12 = nnz21 = nnz22 = 0; for (i = 0; i < nrows_A; i++) { if (exp_indices[i] >= 0) { for (j = A_diag_i[i]; j < A_diag_i[i + 1]; j++) { col = A_diag_j[j]; if (exp_indices[col] >= 0) { nnz11++; } else { nnz12++; } } } else { for (j = A_diag_i[i]; j < A_diag_i[i + 1]; j++) { col = A_diag_j[j]; if (exp_indices[col] >= 0) { nnz21++; } else { nnz22++; } } } } /* ----------------------------------------------------- * create A11 matrix (assume sequential for the moment) * ----------------------------------------------------- */ ncols_offd = 0; nnz_offd = 0; nnz_diag = nnz11; /* This case is not yet implemented! */ global_nrows = 0; global_ncols = 0; row_starts = NULL; col_starts = NULL; A11_csr = hypre_ParCSRMatrixCreate(comm, global_nrows, global_ncols, row_starts, col_starts, ncols_offd, nnz_diag, nnz_offd); nrows = nindices; diag_i = hypre_CTAlloc(HYPRE_Int, nrows + 1, HYPRE_MEMORY_HOST); diag_j = hypre_CTAlloc(HYPRE_Int, nnz_diag, HYPRE_MEMORY_HOST); diag_a = hypre_CTAlloc(HYPRE_Complex, nnz_diag, HYPRE_MEMORY_HOST); nnz = 0; row = 0; diag_i[0] = 0; for (i = 0; i < nrows_A; i++) { if (exp_indices[i] >= 0) { for (j = A_diag_i[i]; j < A_diag_i[i + 1]; j++) { col = A_diag_j[j]; if (exp_indices[col] >= 0) { diag_j[nnz] = exp_indices[col]; diag_a[nnz++] = A_diag_a[j]; } } row++; diag_i[row] = nnz; } } diag = hypre_ParCSRMatrixDiag(A11_csr); hypre_CSRMatrixI(diag) = diag_i; hypre_CSRMatrixJ(diag) = diag_j; hypre_CSRMatrixData(diag) = diag_a; offd_i = hypre_CTAlloc(HYPRE_Int, nrows + 1, HYPRE_MEMORY_HOST); for (i = 0; i <= nrows; i++) { offd_i[i] = 0; } offd = hypre_ParCSRMatrixOffd(A11_csr); hypre_CSRMatrixI(offd) = offd_i; hypre_CSRMatrixJ(offd) = NULL; hypre_CSRMatrixData(offd) = NULL; /* ----------------------------------------------------- * create A12 matrix (assume sequential for the moment) * ----------------------------------------------------- */ ncols_offd = 0; nnz_offd = 0; nnz_diag = nnz12; global_nrows = (HYPRE_BigInt)proc_offsets1[nprocs]; global_ncols = (HYPRE_BigInt)proc_offsets2[nprocs]; row_starts = hypre_CTAlloc(HYPRE_BigInt, nprocs + 1, HYPRE_MEMORY_HOST); col_starts = hypre_CTAlloc(HYPRE_BigInt, nprocs + 1, HYPRE_MEMORY_HOST); for (i = 0; i <= nprocs; i++) { row_starts[i] = (HYPRE_BigInt)proc_offsets1[i]; col_starts[i] = (HYPRE_BigInt)proc_offsets2[i]; } A12_csr = hypre_ParCSRMatrixCreate(comm, global_nrows, global_ncols, row_starts, col_starts, ncols_offd, nnz_diag, nnz_offd); nrows = nindices; diag_i = hypre_CTAlloc(HYPRE_Int, nrows + 1, HYPRE_MEMORY_HOST); diag_j = hypre_CTAlloc(HYPRE_Int, nnz_diag, HYPRE_MEMORY_HOST); diag_a = hypre_CTAlloc(HYPRE_Complex, nnz_diag, HYPRE_MEMORY_HOST); nnz = 0; row = 0; diag_i[0] = 0; for (i = 0; i < nrows_A; i++) { if (exp_indices[i] >= 0) { for (j = A_diag_i[i]; j < A_diag_i[i + 1]; j++) { col = A_diag_j[j]; if (exp_indices[col] < 0) { diag_j[nnz] = - exp_indices[col] - 1; diag_a[nnz++] = A_diag_a[j]; } } row++; diag_i[row] = nnz; } } if (nnz > nnz_diag) { hypre_assert(0); hypre_error(HYPRE_ERROR_GENERIC); } diag = hypre_ParCSRMatrixDiag(A12_csr); hypre_CSRMatrixI(diag) = diag_i; hypre_CSRMatrixJ(diag) = diag_j; hypre_CSRMatrixData(diag) = diag_a; offd_i = hypre_CTAlloc(HYPRE_Int, nrows + 1, HYPRE_MEMORY_HOST); for (i = 0; i <= nrows; i++) { offd_i[i] = 0; } offd = hypre_ParCSRMatrixOffd(A12_csr); hypre_CSRMatrixI(offd) = offd_i; hypre_CSRMatrixJ(offd) = NULL; hypre_CSRMatrixData(offd) = NULL; hypre_TFree(row_starts, HYPRE_MEMORY_HOST); hypre_TFree(col_starts, HYPRE_MEMORY_HOST); /* ----------------------------------------------------- * create A21 matrix (assume sequential for the moment) * ----------------------------------------------------- */ ncols_offd = 0; nnz_offd = 0; nnz_diag = nnz21; global_nrows = (HYPRE_BigInt)proc_offsets2[nprocs]; global_ncols = (HYPRE_BigInt)proc_offsets1[nprocs]; row_starts = hypre_CTAlloc(HYPRE_BigInt, nprocs + 1, HYPRE_MEMORY_HOST); col_starts = hypre_CTAlloc(HYPRE_BigInt, nprocs + 1, HYPRE_MEMORY_HOST); for (i = 0; i <= nprocs; i++) { row_starts[i] = (HYPRE_BigInt)proc_offsets2[i]; col_starts[i] = (HYPRE_BigInt)proc_offsets1[i]; } A21_csr = hypre_ParCSRMatrixCreate(comm, global_nrows, global_ncols, row_starts, col_starts, ncols_offd, nnz_diag, nnz_offd); nrows = nrows_A - nindices; diag_i = hypre_CTAlloc(HYPRE_Int, nrows + 1, HYPRE_MEMORY_HOST); diag_j = hypre_CTAlloc(HYPRE_Int, nnz_diag, HYPRE_MEMORY_HOST); diag_a = hypre_CTAlloc(HYPRE_Complex, nnz_diag, HYPRE_MEMORY_HOST); nnz = 0; row = 0; diag_i[0] = 0; for (i = 0; i < nrows_A; i++) { if (exp_indices[i] < 0) { for (j = A_diag_i[i]; j < A_diag_i[i + 1]; j++) { col = A_diag_j[j]; if (exp_indices[col] >= 0) { diag_j[nnz] = exp_indices[col]; diag_a[nnz++] = A_diag_a[j]; } } row++; diag_i[row] = nnz; } } diag = hypre_ParCSRMatrixDiag(A21_csr); hypre_CSRMatrixI(diag) = diag_i; hypre_CSRMatrixJ(diag) = diag_j; hypre_CSRMatrixData(diag) = diag_a; offd_i = hypre_CTAlloc(HYPRE_Int, nrows + 1, HYPRE_MEMORY_HOST); for (i = 0; i <= nrows; i++) { offd_i[i] = 0; } offd = hypre_ParCSRMatrixOffd(A21_csr); hypre_CSRMatrixI(offd) = offd_i; hypre_CSRMatrixJ(offd) = NULL; hypre_CSRMatrixData(offd) = NULL; hypre_TFree(row_starts, HYPRE_MEMORY_HOST); hypre_TFree(col_starts, HYPRE_MEMORY_HOST); /* ----------------------------------------------------- * create A22 matrix (assume sequential for the moment) * ----------------------------------------------------- */ ncols_offd = 0; nnz_offd = 0; nnz_diag = nnz22; global_nrows = (HYPRE_BigInt)proc_offsets2[nprocs]; global_ncols = (HYPRE_BigInt)proc_offsets2[nprocs]; row_starts = hypre_CTAlloc(HYPRE_BigInt, nprocs + 1, HYPRE_MEMORY_HOST); col_starts = hypre_CTAlloc(HYPRE_BigInt, nprocs + 1, HYPRE_MEMORY_HOST); for (i = 0; i <= nprocs; i++) { row_starts[i] = (HYPRE_BigInt)proc_offsets2[i]; col_starts[i] = (HYPRE_BigInt)proc_offsets2[i]; } A22_csr = hypre_ParCSRMatrixCreate(comm, global_nrows, global_ncols, row_starts, col_starts, ncols_offd, nnz_diag, nnz_offd); nrows = nrows_A - nindices; diag_i = hypre_CTAlloc(HYPRE_Int, nrows + 1, HYPRE_MEMORY_HOST); diag_j = hypre_CTAlloc(HYPRE_Int, nnz_diag, HYPRE_MEMORY_HOST); diag_a = hypre_CTAlloc(HYPRE_Complex, nnz_diag, HYPRE_MEMORY_HOST); nnz = 0; row = 0; diag_i[0] = 0; for (i = 0; i < nrows_A; i++) { if (exp_indices[i] < 0) { for (j = A_diag_i[i]; j < A_diag_i[i + 1]; j++) { col = A_diag_j[j]; if (exp_indices[col] < 0) { diag_j[nnz] = - exp_indices[col] - 1; diag_a[nnz++] = A_diag_a[j]; } } row++; diag_i[row] = nnz; } } diag = hypre_ParCSRMatrixDiag(A22_csr); hypre_CSRMatrixI(diag) = diag_i; hypre_CSRMatrixJ(diag) = diag_j; hypre_CSRMatrixData(diag) = diag_a; offd_i = hypre_CTAlloc(HYPRE_Int, nrows + 1, HYPRE_MEMORY_HOST); for (i = 0; i <= nrows; i++) { offd_i[i] = 0; } offd = hypre_ParCSRMatrixOffd(A22_csr); hypre_CSRMatrixI(offd) = offd_i; hypre_CSRMatrixJ(offd) = NULL; hypre_CSRMatrixData(offd) = NULL; hypre_TFree(row_starts, HYPRE_MEMORY_HOST); hypre_TFree(col_starts, HYPRE_MEMORY_HOST); /* ----------------------------------------------------- * hand the matrices back to the caller and clean up * ----------------------------------------------------- */ (*submatrices)[0] = A11_csr; (*submatrices)[1] = A12_csr; (*submatrices)[2] = A21_csr; (*submatrices)[3] = A22_csr; hypre_TFree(proc_offsets1, HYPRE_MEMORY_HOST); hypre_TFree(proc_offsets2, HYPRE_MEMORY_HOST); hypre_TFree(exp_indices, HYPRE_MEMORY_HOST); } /* ----------------------------------------------------------------------------- * extract submatrices of a rectangular matrix * ----------------------------------------------------------------------------- */ void hypre_ParCSRMatrixExtractRowSubmatrices( hypre_ParCSRMatrix *A_csr, HYPRE_Int *indices2, hypre_ParCSRMatrix ***submatrices ) { HYPRE_Int nrows_A, nindices, *indices, *A_diag_i, *A_diag_j, mypid, nprocs; HYPRE_Int i, j, k, *proc_offsets1, *proc_offsets2, *exp_indices; HYPRE_Int nnz11, nnz21, col, ncols_offd, nnz_offd, nnz_diag; HYPRE_Int *A_offd_i, *A_offd_j; HYPRE_Int nrows, nnz; HYPRE_BigInt global_nrows, global_ncols, *row_starts, *col_starts, *itmp_array; HYPRE_Int *diag_i, *diag_j, row, *offd_i, *offd_j, nnz11_offd, nnz21_offd; HYPRE_Complex *A_diag_a, *diag_a, *offd_a; hypre_ParCSRMatrix *A11_csr, *A21_csr; hypre_CSRMatrix *A_diag, *diag, *A_offd, *offd; MPI_Comm comm; /* ----------------------------------------------------- * first make sure the incoming indices are in order * ----------------------------------------------------- */ nindices = indices2[0]; indices = &(indices2[1]); hypre_qsort0(indices, 0, nindices - 1); /* ----------------------------------------------------- * fetch matrix information * ----------------------------------------------------- */ nrows_A = (HYPRE_Int)hypre_ParCSRMatrixGlobalNumRows(A_csr); A_diag = hypre_ParCSRMatrixDiag(A_csr); A_diag_i = hypre_CSRMatrixI(A_diag); A_diag_j = hypre_CSRMatrixJ(A_diag); A_diag_a = hypre_CSRMatrixData(A_diag); A_offd = hypre_ParCSRMatrixOffd(A_csr); A_offd_i = hypre_CSRMatrixI(A_offd); A_offd_j = hypre_CSRMatrixJ(A_offd); comm = hypre_ParCSRMatrixComm(A_csr); hypre_MPI_Comm_rank(comm, &mypid); hypre_MPI_Comm_size(comm, &nprocs); /* ----------------------------------------------------- * compute new matrix dimensions * ----------------------------------------------------- */ proc_offsets1 = hypre_TAlloc(HYPRE_Int, (nprocs + 1), HYPRE_MEMORY_HOST); proc_offsets2 = hypre_TAlloc(HYPRE_Int, (nprocs + 1), HYPRE_MEMORY_HOST); hypre_MPI_Allgather(&nindices, 1, HYPRE_MPI_INT, proc_offsets1, 1, HYPRE_MPI_INT, comm); k = 0; for (i = 0; i < nprocs; i++) { j = proc_offsets1[i]; proc_offsets1[i] = k; k += j; } proc_offsets1[nprocs] = k; itmp_array = hypre_ParCSRMatrixRowStarts(A_csr); for (i = 0; i <= nprocs; i++) { proc_offsets2[i] = (HYPRE_Int)(itmp_array[i] - proc_offsets1[i]); } /* ----------------------------------------------------- * assign id's to row and col for later processing * ----------------------------------------------------- */ exp_indices = hypre_TAlloc(HYPRE_Int, nrows_A, HYPRE_MEMORY_HOST); for (i = 0; i < nrows_A; i++) { exp_indices[i] = -1; } for (i = 0; i < nindices; i++) { if (exp_indices[indices[i]] == -1) { exp_indices[indices[i]] = i; } else { hypre_error_w_msg(HYPRE_ERROR_GENERIC, "ExtractRowSubmatrices: wrong index %d %d\n"); exit(1); } } k = 0; for (i = 0; i < nrows_A; i++) { if (exp_indices[i] < 0) { exp_indices[i] = - k - 1; k++; } } /* ----------------------------------------------------- * compute number of nonzeros for each block * ----------------------------------------------------- */ nnz11 = nnz21 = nnz11_offd = nnz21_offd = 0; for (i = 0; i < nrows_A; i++) { if (exp_indices[i] >= 0) { for (j = A_diag_i[i]; j < A_diag_i[i + 1]; j++) { col = A_diag_j[j]; if (exp_indices[col] >= 0) { nnz11++; } } nnz11_offd += A_offd_i[i + 1] - A_offd_i[i]; } else { for (j = A_diag_i[i]; j < A_diag_i[i + 1]; j++) { col = A_diag_j[j]; if (exp_indices[col] < 0) { nnz21++; } } nnz21_offd += A_offd_i[i + 1] - A_offd_i[i]; } } /* ----------------------------------------------------- * create A11 matrix (assume sequential for the moment) * ----------------------------------------------------- */ ncols_offd = hypre_CSRMatrixNumCols(hypre_ParCSRMatrixDiag(A_csr)); nnz_diag = nnz11; nnz_offd = nnz11_offd; global_nrows = (HYPRE_BigInt)proc_offsets1[nprocs]; itmp_array = hypre_ParCSRMatrixColStarts(A_csr); global_ncols = itmp_array[nprocs]; row_starts = hypre_CTAlloc(HYPRE_BigInt, nprocs + 1, HYPRE_MEMORY_HOST); col_starts = hypre_CTAlloc(HYPRE_BigInt, nprocs + 1, HYPRE_MEMORY_HOST); for (i = 0; i <= nprocs; i++) { row_starts[i] = (HYPRE_BigInt)proc_offsets1[i]; col_starts[i] = itmp_array[i]; } A11_csr = hypre_ParCSRMatrixCreate(comm, global_nrows, global_ncols, row_starts, col_starts, ncols_offd, nnz_diag, nnz_offd); nrows = nindices; diag_i = hypre_CTAlloc(HYPRE_Int, nrows + 1, HYPRE_MEMORY_HOST); diag_j = hypre_CTAlloc(HYPRE_Int, nnz_diag, HYPRE_MEMORY_HOST); diag_a = hypre_CTAlloc(HYPRE_Complex, nnz_diag, HYPRE_MEMORY_HOST); nnz = 0; row = 0; diag_i[0] = 0; for (i = 0; i < nrows_A; i++) { if (exp_indices[i] >= 0) { for (j = A_diag_i[i]; j < A_diag_i[i + 1]; j++) { col = A_diag_j[j]; if (exp_indices[col] >= 0) { diag_j[nnz] = exp_indices[col]; diag_a[nnz++] = A_diag_a[j]; } } row++; diag_i[row] = nnz; } } diag = hypre_ParCSRMatrixDiag(A11_csr); hypre_CSRMatrixI(diag) = diag_i; hypre_CSRMatrixJ(diag) = diag_j; hypre_CSRMatrixData(diag) = diag_a; offd_i = hypre_CTAlloc(HYPRE_Int, nrows + 1, HYPRE_MEMORY_HOST); offd_j = hypre_CTAlloc(HYPRE_Int, nnz_offd, HYPRE_MEMORY_HOST); offd_a = hypre_CTAlloc(HYPRE_Complex, nnz_offd, HYPRE_MEMORY_HOST); nnz = 0; row = 0; offd_i[0] = 0; for (i = 0; i < nrows_A; i++) { if (exp_indices[i] >= 0) { for (j = A_offd_i[i]; j < A_offd_i[i + 1]; j++) { offd_j[nnz] = A_offd_j[j]; offd_a[nnz++] = A_diag_a[j]; } row++; offd_i[row] = nnz; } } offd = hypre_ParCSRMatrixOffd(A11_csr); hypre_CSRMatrixI(offd) = offd_i; hypre_CSRMatrixJ(offd) = offd_j; hypre_CSRMatrixData(offd) = offd_a; hypre_TFree(row_starts, HYPRE_MEMORY_HOST); hypre_TFree(col_starts, HYPRE_MEMORY_HOST); /* ----------------------------------------------------- * create A21 matrix * ----------------------------------------------------- */ ncols_offd = hypre_CSRMatrixNumCols(hypre_ParCSRMatrixDiag(A_csr)); nnz_offd = nnz21_offd; nnz_diag = nnz21; global_nrows = (HYPRE_BigInt)proc_offsets2[nprocs]; itmp_array = hypre_ParCSRMatrixColStarts(A_csr); global_ncols = itmp_array[nprocs]; row_starts = hypre_CTAlloc(HYPRE_BigInt, nprocs + 1, HYPRE_MEMORY_HOST); col_starts = hypre_CTAlloc(HYPRE_BigInt, nprocs + 1, HYPRE_MEMORY_HOST); for (i = 0; i <= nprocs; i++) { row_starts[i] = (HYPRE_BigInt)proc_offsets2[i]; col_starts[i] = itmp_array[i]; } A21_csr = hypre_ParCSRMatrixCreate(comm, global_nrows, global_ncols, row_starts, col_starts, ncols_offd, nnz_diag, nnz_offd); nrows = nrows_A - nindices; diag_i = hypre_CTAlloc(HYPRE_Int, nrows + 1, HYPRE_MEMORY_HOST); diag_j = hypre_CTAlloc(HYPRE_Int, nnz_diag, HYPRE_MEMORY_HOST); diag_a = hypre_CTAlloc(HYPRE_Complex, nnz_diag, HYPRE_MEMORY_HOST); nnz = 0; row = 0; diag_i[0] = 0; for (i = 0; i < nrows_A; i++) { if (exp_indices[i] < 0) { for (j = A_diag_i[i]; j < A_diag_i[i + 1]; j++) { diag_j[nnz] = A_diag_j[j]; diag_a[nnz++] = A_diag_a[j]; } row++; diag_i[row] = nnz; } } diag = hypre_ParCSRMatrixDiag(A21_csr); hypre_CSRMatrixI(diag) = diag_i; hypre_CSRMatrixJ(diag) = diag_j; hypre_CSRMatrixData(diag) = diag_a; offd_i = hypre_CTAlloc(HYPRE_Int, nrows + 1, HYPRE_MEMORY_HOST); offd_j = hypre_CTAlloc(HYPRE_Int, nnz_offd, HYPRE_MEMORY_HOST); offd_a = hypre_CTAlloc(HYPRE_Complex, nnz_offd, HYPRE_MEMORY_HOST); nnz = 0; row = 0; offd_i[0] = 0; for (i = 0; i < nrows_A; i++) { if (exp_indices[i] < 0) { for (j = A_offd_i[i]; j < A_offd_i[i + 1]; j++) { offd_j[nnz] = A_offd_j[j]; offd_a[nnz++] = A_diag_a[j]; } row++; offd_i[row] = nnz; } } offd = hypre_ParCSRMatrixOffd(A21_csr); hypre_CSRMatrixI(offd) = offd_i; hypre_CSRMatrixJ(offd) = offd_j; hypre_CSRMatrixData(offd) = offd_a; hypre_TFree(row_starts, HYPRE_MEMORY_HOST); hypre_TFree(col_starts, HYPRE_MEMORY_HOST); /* ----------------------------------------------------- * hand the matrices back to the caller and clean up * ----------------------------------------------------- */ (*submatrices)[0] = A11_csr; (*submatrices)[1] = A21_csr; hypre_TFree(proc_offsets1, HYPRE_MEMORY_HOST); hypre_TFree(proc_offsets2, HYPRE_MEMORY_HOST); hypre_TFree(exp_indices, HYPRE_MEMORY_HOST); } /* ----------------------------------------------------------------------------- * return the sum of all local elements of the matrix * ----------------------------------------------------------------------------- */ HYPRE_Complex hypre_ParCSRMatrixLocalSumElts( hypre_ParCSRMatrix * A ) { hypre_CSRMatrix * A_diag = hypre_ParCSRMatrixDiag( A ); hypre_CSRMatrix * A_offd = hypre_ParCSRMatrixOffd( A ); return hypre_CSRMatrixSumElts(A_diag) + hypre_CSRMatrixSumElts(A_offd); } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixMatAminvDB * computes C = (A - inv(D)B) where D is a diagonal matrix * Note: Data structure of A is expected to be a subset of data structure of B! *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixAminvDB( hypre_ParCSRMatrix *A, hypre_ParCSRMatrix *B, HYPRE_Complex *d, hypre_ParCSRMatrix **C_ptr) { MPI_Comm comm = hypre_ParCSRMatrixComm(B); hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); hypre_ParCSRMatrix *C = NULL; HYPRE_Int num_cols_offd_A = hypre_CSRMatrixNumCols(A_offd); hypre_ParCSRCommPkg *comm_pkg_B = hypre_ParCSRMatrixCommPkg(B); hypre_CSRMatrix *B_diag = hypre_ParCSRMatrixDiag(B); hypre_CSRMatrix *B_offd = hypre_ParCSRMatrixOffd(B); HYPRE_Int num_cols_offd_B = hypre_CSRMatrixNumCols(B_offd); HYPRE_Int num_sends_B, num_recvs_B; HYPRE_Int i, j, cnt; HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); HYPRE_Complex *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); HYPRE_Complex *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_BigInt *col_map_offd_A = hypre_ParCSRMatrixColMapOffd(A); HYPRE_Int num_rows = hypre_CSRMatrixNumRows(B_diag); HYPRE_Int *B_diag_i = hypre_CSRMatrixI(B_diag); HYPRE_Int *B_diag_j = hypre_CSRMatrixJ(B_diag); HYPRE_Complex *B_diag_data = hypre_CSRMatrixData(B_diag); HYPRE_Int *B_offd_i = hypre_CSRMatrixI(B_offd); HYPRE_Int *B_offd_j = hypre_CSRMatrixJ(B_offd); HYPRE_Complex *B_offd_data = hypre_CSRMatrixData(B_offd); HYPRE_BigInt *col_map_offd_B = hypre_ParCSRMatrixColMapOffd(B); hypre_CSRMatrix *C_diag = NULL; hypre_CSRMatrix *C_offd = NULL; HYPRE_Int *C_diag_i = NULL; HYPRE_Int *C_diag_j = NULL; HYPRE_Complex *C_diag_data = NULL; HYPRE_Int *C_offd_i = NULL; HYPRE_Int *C_offd_j = NULL; HYPRE_Complex *C_offd_data = NULL; HYPRE_Int num_procs, my_id; HYPRE_Int *recv_procs_B; HYPRE_Int *send_procs_B; HYPRE_Int *recv_vec_starts_B; HYPRE_Int *send_map_starts_B; HYPRE_Int *send_map_elmts_B; hypre_ParCSRCommPkg *comm_pkg_C; HYPRE_Int *recv_procs_C; HYPRE_Int *send_procs_C; HYPRE_Int *recv_vec_starts_C; HYPRE_Int *send_map_starts_C; HYPRE_Int *send_map_elmts_C; HYPRE_Int *map_to_B; /*HYPRE_Int *C_diag_array; HYPRE_Int *C_offd_array;*/ HYPRE_Complex *D_tmp; HYPRE_Int size, rest, num_threads, ii; hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm, &my_id); num_threads = hypre_NumThreads(); /*C_diag_array = hypre_CTAlloc(HYPRE_Int, num_threads); C_offd_array = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST);*/ /*--------------------------------------------------------------------- * If there exists no CommPkg for B, a CommPkg is generated *--------------------------------------------------------------------*/ if (!comm_pkg_B) { hypre_MatvecCommPkgCreate(B); comm_pkg_B = hypre_ParCSRMatrixCommPkg(B); } C = hypre_ParCSRMatrixClone(B, 0); /*hypre_ParCSRMatrixInitialize(C);*/ C_diag = hypre_ParCSRMatrixDiag(C); C_diag_i = hypre_CSRMatrixI(C_diag); C_diag_j = hypre_CSRMatrixJ(C_diag); C_diag_data = hypre_CSRMatrixData(C_diag); C_offd = hypre_ParCSRMatrixOffd(C); C_offd_i = hypre_CSRMatrixI(C_offd); C_offd_j = hypre_CSRMatrixJ(C_offd); C_offd_data = hypre_CSRMatrixData(C_offd); size = num_rows / num_threads; rest = num_rows - size * num_threads; D_tmp = hypre_CTAlloc(HYPRE_Complex, num_rows, HYPRE_MEMORY_HOST); if (num_cols_offd_A) { map_to_B = hypre_CTAlloc(HYPRE_Int, num_cols_offd_A, HYPRE_MEMORY_HOST); cnt = 0; for (i = 0; i < num_cols_offd_A; i++) { while (col_map_offd_B[cnt] < col_map_offd_A[i]) { cnt++; } map_to_B[i] = cnt; cnt++; } } #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(ii, i, j) #endif for (ii = 0; ii < num_threads; ii++) { HYPRE_Int *A_marker = NULL; HYPRE_Int ns, ne, A_col, num_cols, nmax; if (ii < rest) { ns = ii * size + ii; ne = (ii + 1) * size + ii + 1; } else { ns = ii * size + rest; ne = (ii + 1) * size + rest; } nmax = hypre_max(num_rows, num_cols_offd_B); A_marker = hypre_CTAlloc(HYPRE_Int, nmax, HYPRE_MEMORY_HOST); for (i = 0; i < num_rows; i++) { A_marker[i] = -1; } for (i = ns; i < ne; i++) { D_tmp[i] = 1.0 / d[i]; } num_cols = C_diag_i[ns]; for (i = ns; i < ne; i++) { for (j = A_diag_i[i]; j < A_diag_i[i + 1]; j++) { A_col = A_diag_j[j]; if (A_marker[A_col] < C_diag_i[i]) { A_marker[A_col] = num_cols; C_diag_j[num_cols] = A_col; C_diag_data[num_cols] = A_diag_data[j]; num_cols++; } else { C_diag_data[A_marker[A_col]] += A_diag_data[j]; } } for (j = B_diag_i[i]; j < B_diag_i[i + 1]; j++) { A_col = B_diag_j[j]; if (A_marker[A_col] < C_diag_i[i]) { A_marker[A_col] = num_cols; C_diag_j[num_cols] = A_col; C_diag_data[num_cols] = -D_tmp[i] * B_diag_data[j]; num_cols++; } else { C_diag_data[A_marker[A_col]] -= D_tmp[i] * B_diag_data[j]; } } } for (i = 0; i < num_cols_offd_B; i++) { A_marker[i] = -1; } num_cols = C_offd_i[ns]; for (i = ns; i < ne; i++) { for (j = A_offd_i[i]; j < A_offd_i[i + 1]; j++) { A_col = map_to_B[A_offd_j[j]]; if (A_marker[A_col] < B_offd_i[i]) { A_marker[A_col] = num_cols; C_offd_j[num_cols] = A_col; C_offd_data[num_cols] = A_offd_data[j]; num_cols++; } else { C_offd_data[A_marker[A_col]] += A_offd_data[j]; } } for (j = B_offd_i[i]; j < B_offd_i[i + 1]; j++) { A_col = B_offd_j[j]; if (A_marker[A_col] < B_offd_i[i]) { A_marker[A_col] = num_cols; C_offd_j[num_cols] = A_col; C_offd_data[num_cols] = -D_tmp[i] * B_offd_data[j]; num_cols++; } else { C_offd_data[A_marker[A_col]] -= D_tmp[i] * B_offd_data[j]; } } } hypre_TFree(A_marker, HYPRE_MEMORY_HOST); } /* end parallel region */ /*for (i=0; i < num_cols_offd_B; i++) col_map_offd_C[i] = col_map_offd_B[i]; */ num_sends_B = hypre_ParCSRCommPkgNumSends(comm_pkg_B); num_recvs_B = hypre_ParCSRCommPkgNumRecvs(comm_pkg_B); recv_procs_B = hypre_ParCSRCommPkgRecvProcs(comm_pkg_B); recv_vec_starts_B = hypre_ParCSRCommPkgRecvVecStarts(comm_pkg_B); send_procs_B = hypre_ParCSRCommPkgSendProcs(comm_pkg_B); send_map_starts_B = hypre_ParCSRCommPkgSendMapStarts(comm_pkg_B); send_map_elmts_B = hypre_ParCSRCommPkgSendMapElmts(comm_pkg_B); recv_procs_C = hypre_CTAlloc(HYPRE_Int, num_recvs_B, HYPRE_MEMORY_HOST); recv_vec_starts_C = hypre_CTAlloc(HYPRE_Int, num_recvs_B + 1, HYPRE_MEMORY_HOST); send_procs_C = hypre_CTAlloc(HYPRE_Int, num_sends_B, HYPRE_MEMORY_HOST); send_map_starts_C = hypre_CTAlloc(HYPRE_Int, num_sends_B + 1, HYPRE_MEMORY_HOST); send_map_elmts_C = hypre_CTAlloc(HYPRE_Int, send_map_starts_B[num_sends_B], HYPRE_MEMORY_HOST); for (i = 0; i < num_recvs_B; i++) { recv_procs_C[i] = recv_procs_B[i]; } for (i = 0; i < num_recvs_B + 1; i++) { recv_vec_starts_C[i] = recv_vec_starts_B[i]; } for (i = 0; i < num_sends_B; i++) { send_procs_C[i] = send_procs_B[i]; } for (i = 0; i < num_sends_B + 1; i++) { send_map_starts_C[i] = send_map_starts_B[i]; } for (i = 0; i < send_map_starts_B[num_sends_B]; i++) { send_map_elmts_C[i] = send_map_elmts_B[i]; } comm_pkg_C = hypre_CTAlloc(hypre_ParCSRCommPkg, 1, HYPRE_MEMORY_HOST); hypre_ParCSRCommPkgComm(comm_pkg_C) = comm; hypre_ParCSRCommPkgNumRecvs(comm_pkg_C) = num_recvs_B; hypre_ParCSRCommPkgRecvProcs(comm_pkg_C) = recv_procs_C; hypre_ParCSRCommPkgRecvVecStarts(comm_pkg_C) = recv_vec_starts_C; hypre_ParCSRCommPkgNumSends(comm_pkg_C) = num_sends_B; hypre_ParCSRCommPkgSendProcs(comm_pkg_C) = send_procs_C; hypre_ParCSRCommPkgSendMapStarts(comm_pkg_C) = send_map_starts_C; hypre_ParCSRCommPkgSendMapElmts(comm_pkg_C) = send_map_elmts_C; hypre_ParCSRMatrixCommPkg(C) = comm_pkg_C; hypre_TFree(D_tmp, HYPRE_MEMORY_HOST); if (num_cols_offd_A) { hypre_TFree(map_to_B, HYPRE_MEMORY_HOST); } *C_ptr = C; return (hypre_error_flag); } /*-------------------------------------------------------------------------- * hypre_ParTMatmul: * * Multiplies two ParCSRMatrices transpose(A) and B and returns * the product in ParCSRMatrix C * * Note that C does not own the partitionings since its row_starts * is owned by A and col_starts by B. *--------------------------------------------------------------------------*/ hypre_ParCSRMatrix* hypre_ParTMatmul( hypre_ParCSRMatrix *A, hypre_ParCSRMatrix *B) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_ParCSRCommPkg *comm_pkg_A = hypre_ParCSRMatrixCommPkg(A); hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); hypre_CSRMatrix *AT_diag = NULL; hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); hypre_CSRMatrix *AT_offd = NULL; HYPRE_Int num_rows_diag_A = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int num_cols_diag_A = hypre_CSRMatrixNumCols(A_diag); hypre_CSRMatrix *B_diag = hypre_ParCSRMatrixDiag(B); hypre_CSRMatrix *B_offd = hypre_ParCSRMatrixOffd(B); HYPRE_BigInt *col_map_offd_B = hypre_ParCSRMatrixColMapOffd(B); HYPRE_BigInt first_col_diag_B = hypre_ParCSRMatrixFirstColDiag(B); HYPRE_BigInt *col_starts_A = hypre_ParCSRMatrixColStarts(A); HYPRE_BigInt *col_starts_B = hypre_ParCSRMatrixColStarts(B); HYPRE_Int num_rows_diag_B = hypre_CSRMatrixNumRows(B_diag); HYPRE_Int num_cols_diag_B = hypre_CSRMatrixNumCols(B_diag); HYPRE_Int num_cols_offd_B = hypre_CSRMatrixNumCols(B_offd); hypre_ParCSRMatrix *C; HYPRE_BigInt *col_map_offd_C = NULL; HYPRE_Int *map_B_to_C; hypre_CSRMatrix *C_diag = NULL; hypre_CSRMatrix *C_tmp_diag = NULL; HYPRE_Complex *C_diag_data = NULL; HYPRE_Int *C_diag_i = NULL; HYPRE_Int *C_diag_j = NULL; HYPRE_BigInt first_col_diag_C; HYPRE_BigInt last_col_diag_C; hypre_CSRMatrix *C_offd = NULL; hypre_CSRMatrix *C_tmp_offd = NULL; hypre_CSRMatrix *C_int = NULL; hypre_CSRMatrix *C_ext = NULL; HYPRE_Int *C_ext_i; HYPRE_BigInt *C_ext_j; HYPRE_Complex *C_ext_data; HYPRE_Int *C_ext_diag_i; HYPRE_Int *C_ext_diag_j; HYPRE_Complex *C_ext_diag_data; HYPRE_Int *C_ext_offd_i; HYPRE_Int *C_ext_offd_j; HYPRE_Complex *C_ext_offd_data; HYPRE_Int C_ext_size = 0; HYPRE_Int C_ext_diag_size = 0; HYPRE_Int C_ext_offd_size = 0; HYPRE_Int *C_tmp_diag_i; HYPRE_Int *C_tmp_diag_j; HYPRE_Complex *C_tmp_diag_data; HYPRE_Int *C_tmp_offd_i; HYPRE_Int *C_tmp_offd_j; HYPRE_Complex *C_tmp_offd_data; HYPRE_Complex *C_offd_data = NULL; HYPRE_Int *C_offd_i = NULL; HYPRE_Int *C_offd_j = NULL; HYPRE_BigInt *temp; HYPRE_Int *send_map_starts_A; HYPRE_Int *send_map_elmts_A; HYPRE_Int num_sends_A; HYPRE_Int num_cols_offd_C = 0; HYPRE_Int *P_marker; HYPRE_Int i, j; HYPRE_Int i1, j_indx; HYPRE_BigInt nrows_A, ncols_A; HYPRE_BigInt nrows_B, ncols_B; /*HYPRE_Int allsquare = 0;*/ HYPRE_Int cnt, cnt_offd, cnt_diag; HYPRE_BigInt value; HYPRE_Int num_procs, my_id; HYPRE_Int max_num_threads; HYPRE_Int *C_diag_array = NULL; HYPRE_Int *C_offd_array = NULL; HYPRE_BigInt first_row_index, first_col_diag; HYPRE_Int local_num_rows, local_num_cols; nrows_A = hypre_ParCSRMatrixGlobalNumRows(A); ncols_A = hypre_ParCSRMatrixGlobalNumCols(A); nrows_B = hypre_ParCSRMatrixGlobalNumRows(B); ncols_B = hypre_ParCSRMatrixGlobalNumCols(B); hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm, &my_id); max_num_threads = hypre_NumThreads(); if (nrows_A != nrows_B || num_rows_diag_A != num_rows_diag_B) { hypre_error_w_msg(HYPRE_ERROR_GENERIC, " Error! Incompatible matrix dimensions!\n"); return NULL; } HYPRE_MemoryLocation memory_location_A = hypre_ParCSRMatrixMemoryLocation(A); HYPRE_MemoryLocation memory_location_B = hypre_ParCSRMatrixMemoryLocation(B); /* RL: TODO cannot guarantee, maybe should never assert hypre_assert(memory_location_A == memory_location_B); */ /* RL: in the case of A=H, B=D, or A=D, B=H, let C = D, * not sure if this is the right thing to do. * Also, need something like this in other places * TODO */ HYPRE_MemoryLocation memory_location_C = hypre_max(memory_location_A, memory_location_B); /*if (num_cols_diag_A == num_cols_diag_B) allsquare = 1;*/ /*--------------------------------------------------------------------- * If there exists no CommPkg for A, a CommPkg is generated using * equally load balanced partitionings *--------------------------------------------------------------------*/ HYPRE_ANNOTATE_FUNC_BEGIN; if (!comm_pkg_A) { hypre_MatvecCommPkgCreate(A); comm_pkg_A = hypre_ParCSRMatrixCommPkg(A); } hypre_CSRMatrixTranspose(A_diag, &AT_diag, 1); hypre_CSRMatrixTranspose(A_offd, &AT_offd, 1); C_tmp_diag = hypre_CSRMatrixMultiply(AT_diag, B_diag); C_ext_size = 0; if (num_procs > 1) { hypre_CSRMatrix *C_int_diag; hypre_CSRMatrix *C_int_offd; void *request; C_tmp_offd = hypre_CSRMatrixMultiply(AT_diag, B_offd); C_int_diag = hypre_CSRMatrixMultiply(AT_offd, B_diag); C_int_offd = hypre_CSRMatrixMultiply(AT_offd, B_offd); hypre_ParCSRMatrixDiag(B) = C_int_diag; hypre_ParCSRMatrixOffd(B) = C_int_offd; C_int = hypre_MergeDiagAndOffd(B); hypre_ParCSRMatrixDiag(B) = B_diag; hypre_ParCSRMatrixOffd(B) = B_offd; hypre_ExchangeExternalRowsInit(C_int, comm_pkg_A, &request); C_ext = hypre_ExchangeExternalRowsWait(request); C_ext_i = hypre_CSRMatrixI(C_ext); C_ext_j = hypre_CSRMatrixBigJ(C_ext); C_ext_data = hypre_CSRMatrixData(C_ext); C_ext_size = C_ext_i[hypre_CSRMatrixNumRows(C_ext)]; hypre_CSRMatrixDestroy(C_int); hypre_CSRMatrixDestroy(C_int_diag); hypre_CSRMatrixDestroy(C_int_offd); } else { C_tmp_offd = hypre_CSRMatrixCreate(num_cols_diag_A, 0, 0); hypre_CSRMatrixInitialize(C_tmp_offd); hypre_CSRMatrixNumRownnz(C_tmp_offd) = 0; } hypre_CSRMatrixDestroy(AT_diag); hypre_CSRMatrixDestroy(AT_offd); /*----------------------------------------------------------------------- * Add contents of C_ext to C_tmp_diag and C_tmp_offd * to obtain C_diag and C_offd *-----------------------------------------------------------------------*/ /* check for new nonzero columns in C_offd generated through C_ext */ first_col_diag_C = first_col_diag_B; last_col_diag_C = first_col_diag_B + (HYPRE_BigInt)num_cols_diag_B - 1; C_tmp_diag_i = hypre_CSRMatrixI(C_tmp_diag); if (C_ext_size || num_cols_offd_B) { HYPRE_Int C_ext_num_rows; num_sends_A = hypre_ParCSRCommPkgNumSends(comm_pkg_A); send_map_starts_A = hypre_ParCSRCommPkgSendMapStarts(comm_pkg_A); send_map_elmts_A = hypre_ParCSRCommPkgSendMapElmts(comm_pkg_A); C_ext_num_rows = send_map_starts_A[num_sends_A]; C_ext_diag_i = hypre_CTAlloc(HYPRE_Int, C_ext_num_rows + 1, HYPRE_MEMORY_HOST); C_ext_offd_i = hypre_CTAlloc(HYPRE_Int, C_ext_num_rows + 1, HYPRE_MEMORY_HOST); temp = hypre_CTAlloc(HYPRE_BigInt, C_ext_size + num_cols_offd_B, HYPRE_MEMORY_HOST); C_ext_diag_size = 0; C_ext_offd_size = 0; for (i = 0; i < C_ext_num_rows; i++) { for (j = C_ext_i[i]; j < C_ext_i[i + 1]; j++) { if (C_ext_j[j] < first_col_diag_C || C_ext_j[j] > last_col_diag_C) { temp[C_ext_offd_size++] = C_ext_j[j]; } else { C_ext_diag_size++; } } C_ext_diag_i[i + 1] = C_ext_diag_size; C_ext_offd_i[i + 1] = C_ext_offd_size; } cnt = C_ext_offd_size; for (i = 0; i < num_cols_offd_B; i++) { temp[cnt++] = col_map_offd_B[i]; } if (cnt) { hypre_BigQsort0(temp, 0, cnt - 1); value = temp[0]; num_cols_offd_C = 1; for (i = 1; i < cnt; i++) { if (temp[i] > value) { value = temp[i]; temp[num_cols_offd_C++] = value; } } } if (num_cols_offd_C) { col_map_offd_C = hypre_CTAlloc(HYPRE_BigInt, num_cols_offd_C, HYPRE_MEMORY_HOST); } for (i = 0; i < num_cols_offd_C; i++) { col_map_offd_C[i] = temp[i]; } hypre_TFree(temp, HYPRE_MEMORY_HOST); if (C_ext_diag_size) { C_ext_diag_j = hypre_CTAlloc(HYPRE_Int, C_ext_diag_size, HYPRE_MEMORY_HOST); C_ext_diag_data = hypre_CTAlloc(HYPRE_Complex, C_ext_diag_size, HYPRE_MEMORY_HOST); } if (C_ext_offd_size) { C_ext_offd_j = hypre_CTAlloc(HYPRE_Int, C_ext_offd_size, HYPRE_MEMORY_HOST); C_ext_offd_data = hypre_CTAlloc(HYPRE_Complex, C_ext_offd_size, HYPRE_MEMORY_HOST); } C_tmp_diag_j = hypre_CSRMatrixJ(C_tmp_diag); C_tmp_diag_data = hypre_CSRMatrixData(C_tmp_diag); C_tmp_offd_i = hypre_CSRMatrixI(C_tmp_offd); C_tmp_offd_j = hypre_CSRMatrixJ(C_tmp_offd); C_tmp_offd_data = hypre_CSRMatrixData(C_tmp_offd); cnt_offd = 0; cnt_diag = 0; for (i = 0; i < C_ext_num_rows; i++) { for (j = C_ext_i[i]; j < C_ext_i[i + 1]; j++) { if (C_ext_j[j] < first_col_diag_C || C_ext_j[j] > last_col_diag_C) { C_ext_offd_j[cnt_offd] = hypre_BigBinarySearch(col_map_offd_C, C_ext_j[j], num_cols_offd_C); C_ext_offd_data[cnt_offd++] = C_ext_data[j]; } else { C_ext_diag_j[cnt_diag] = (HYPRE_Int)(C_ext_j[j] - first_col_diag_C); C_ext_diag_data[cnt_diag++] = C_ext_data[j]; } } } } if (C_ext) { hypre_CSRMatrixDestroy(C_ext); C_ext = NULL; } if (num_cols_offd_B) { map_B_to_C = hypre_CTAlloc(HYPRE_Int, num_cols_offd_B, HYPRE_MEMORY_HOST); cnt = 0; for (i = 0; i < num_cols_offd_C; i++) { if (col_map_offd_C[i] == col_map_offd_B[cnt]) { map_B_to_C[cnt++] = i; if (cnt == num_cols_offd_B) { break; } } } for (i = 0; i < hypre_CSRMatrixI(C_tmp_offd)[hypre_CSRMatrixNumRows(C_tmp_offd)]; i++) { j_indx = C_tmp_offd_j[i]; C_tmp_offd_j[i] = map_B_to_C[j_indx]; } } /*----------------------------------------------------------------------- * Need to compute: * C_diag = C_tmp_diag + C_ext_diag * C_offd = C_tmp_offd + C_ext_offd * * First generate structure *-----------------------------------------------------------------------*/ if (C_ext_size || num_cols_offd_B) { C_diag_i = hypre_CTAlloc(HYPRE_Int, num_cols_diag_A + 1, memory_location_C); C_offd_i = hypre_CTAlloc(HYPRE_Int, num_cols_diag_A + 1, memory_location_C); C_diag_array = hypre_CTAlloc(HYPRE_Int, max_num_threads, HYPRE_MEMORY_HOST); C_offd_array = hypre_CTAlloc(HYPRE_Int, max_num_threads, HYPRE_MEMORY_HOST); #ifdef HYPRE_USING_OPENMP #pragma omp parallel #endif { HYPRE_Int *B_marker = NULL; HYPRE_Int *B_marker_offd = NULL; HYPRE_Int ik, jk, j1, j2, jcol; HYPRE_Int ns, ne, ii, nnz_d, nnz_o; HYPRE_Int rest, size; HYPRE_Int num_threads = hypre_NumActiveThreads(); size = num_cols_diag_A / num_threads; rest = num_cols_diag_A - size * num_threads; ii = hypre_GetThreadNum(); if (ii < rest) { ns = ii * size + ii; ne = (ii + 1) * size + ii + 1; } else { ns = ii * size + rest; ne = (ii + 1) * size + rest; } B_marker = hypre_CTAlloc(HYPRE_Int, num_cols_diag_B, HYPRE_MEMORY_HOST); B_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd_C, HYPRE_MEMORY_HOST); for (ik = 0; ik < num_cols_diag_B; ik++) { B_marker[ik] = -1; } for (ik = 0; ik < num_cols_offd_C; ik++) { B_marker_offd[ik] = -1; } nnz_d = 0; nnz_o = 0; for (ik = ns; ik < ne; ik++) { for (jk = C_tmp_diag_i[ik]; jk < C_tmp_diag_i[ik + 1]; jk++) { jcol = C_tmp_diag_j[jk]; B_marker[jcol] = ik; nnz_d++; } for (jk = C_tmp_offd_i[ik]; jk < C_tmp_offd_i[ik + 1]; jk++) { jcol = C_tmp_offd_j[jk]; B_marker_offd[jcol] = ik; nnz_o++; } for (jk = 0; jk < num_sends_A; jk++) { for (j1 = send_map_starts_A[jk]; j1 < send_map_starts_A[jk + 1]; j1++) { if (send_map_elmts_A[j1] == ik) { for (j2 = C_ext_diag_i[j1]; j2 < C_ext_diag_i[j1 + 1]; j2++) { jcol = C_ext_diag_j[j2]; if (B_marker[jcol] < ik) { B_marker[jcol] = ik; nnz_d++; } } for (j2 = C_ext_offd_i[j1]; j2 < C_ext_offd_i[j1 + 1]; j2++) { jcol = C_ext_offd_j[j2]; if (B_marker_offd[jcol] < ik) { B_marker_offd[jcol] = ik; nnz_o++; } } break; } } } C_diag_array[ii] = nnz_d; C_offd_array[ii] = nnz_o; } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif if (ii == 0) { nnz_d = 0; nnz_o = 0; for (ik = 0; ik < num_threads - 1; ik++) { C_diag_array[ik + 1] += C_diag_array[ik]; C_offd_array[ik + 1] += C_offd_array[ik]; } nnz_d = C_diag_array[num_threads - 1]; nnz_o = C_offd_array[num_threads - 1]; C_diag_i[num_cols_diag_A] = nnz_d; C_offd_i[num_cols_diag_A] = nnz_o; C_diag = hypre_CSRMatrixCreate(num_cols_diag_A, num_cols_diag_A, nnz_d); C_offd = hypre_CSRMatrixCreate(num_cols_diag_A, num_cols_offd_C, nnz_o); hypre_CSRMatrixI(C_diag) = C_diag_i; hypre_CSRMatrixInitialize_v2(C_diag, 0, memory_location_C); C_diag_j = hypre_CSRMatrixJ(C_diag); C_diag_data = hypre_CSRMatrixData(C_diag); hypre_CSRMatrixI(C_offd) = C_offd_i; hypre_CSRMatrixInitialize_v2(C_offd, 0, memory_location_C); C_offd_j = hypre_CSRMatrixJ(C_offd); C_offd_data = hypre_CSRMatrixData(C_offd); } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif /*----------------------------------------------------------------------- * Need to compute C_diag = C_tmp_diag + C_ext_diag * and C_offd = C_tmp_offd + C_ext_offd !!!! * Now fill in values *-----------------------------------------------------------------------*/ for (ik = 0; ik < num_cols_diag_B; ik++) { B_marker[ik] = -1; } for (ik = 0; ik < num_cols_offd_C; ik++) { B_marker_offd[ik] = -1; } /*----------------------------------------------------------------------- * Populate matrices *-----------------------------------------------------------------------*/ nnz_d = 0; nnz_o = 0; if (ii) { nnz_d = C_diag_array[ii - 1]; nnz_o = C_offd_array[ii - 1]; } for (ik = ns; ik < ne; ik++) { C_diag_i[ik] = nnz_d; C_offd_i[ik] = nnz_o; for (jk = C_tmp_diag_i[ik]; jk < C_tmp_diag_i[ik + 1]; jk++) { jcol = C_tmp_diag_j[jk]; C_diag_j[nnz_d] = jcol; C_diag_data[nnz_d] = C_tmp_diag_data[jk]; B_marker[jcol] = nnz_d; nnz_d++; } for (jk = C_tmp_offd_i[ik]; jk < C_tmp_offd_i[ik + 1]; jk++) { jcol = C_tmp_offd_j[jk]; C_offd_j[nnz_o] = jcol; C_offd_data[nnz_o] = C_tmp_offd_data[jk]; B_marker_offd[jcol] = nnz_o; nnz_o++; } for (jk = 0; jk < num_sends_A; jk++) { for (j1 = send_map_starts_A[jk]; j1 < send_map_starts_A[jk + 1]; j1++) { if (send_map_elmts_A[j1] == ik) { for (j2 = C_ext_diag_i[j1]; j2 < C_ext_diag_i[j1 + 1]; j2++) { jcol = C_ext_diag_j[j2]; if (B_marker[jcol] < C_diag_i[ik]) { C_diag_j[nnz_d] = jcol; C_diag_data[nnz_d] = C_ext_diag_data[j2]; B_marker[jcol] = nnz_d; nnz_d++; } else { C_diag_data[B_marker[jcol]] += C_ext_diag_data[j2]; } } for (j2 = C_ext_offd_i[j1]; j2 < C_ext_offd_i[j1 + 1]; j2++) { jcol = C_ext_offd_j[j2]; if (B_marker_offd[jcol] < C_offd_i[ik]) { C_offd_j[nnz_o] = jcol; C_offd_data[nnz_o] = C_ext_offd_data[j2]; B_marker_offd[jcol] = nnz_o; nnz_o++; } else { C_offd_data[B_marker_offd[jcol]] += C_ext_offd_data[j2]; } } break; } } } } hypre_TFree(B_marker, HYPRE_MEMORY_HOST); hypre_TFree(B_marker_offd, HYPRE_MEMORY_HOST); } /*end parallel region */ hypre_TFree(C_diag_array, HYPRE_MEMORY_HOST); hypre_TFree(C_offd_array, HYPRE_MEMORY_HOST); } /*C = hypre_ParCSRMatrixCreate(comm, ncols_A, ncols_B, col_starts_A, col_starts_B, num_cols_offd_C, nnz_diag, nnz_offd); hypre_CSRMatrixDestroy(hypre_ParCSRMatrixDiag(C)); hypre_CSRMatrixDestroy(hypre_ParCSRMatrixOffd(C)); */ /* row_starts[0] is start of local rows. row_starts[1] is start of next processor's rows */ first_row_index = col_starts_A[0]; local_num_rows = (HYPRE_Int)(col_starts_A[1] - first_row_index ); first_col_diag = col_starts_B[0]; local_num_cols = (HYPRE_Int)(col_starts_B[1] - first_col_diag); C = hypre_CTAlloc(hypre_ParCSRMatrix, 1, HYPRE_MEMORY_HOST); hypre_ParCSRMatrixComm(C) = comm; hypre_ParCSRMatrixGlobalNumRows(C) = ncols_A; hypre_ParCSRMatrixGlobalNumCols(C) = ncols_B; hypre_ParCSRMatrixFirstRowIndex(C) = first_row_index; hypre_ParCSRMatrixFirstColDiag(C) = first_col_diag; hypre_ParCSRMatrixLastRowIndex(C) = first_row_index + (HYPRE_BigInt)local_num_rows - 1; hypre_ParCSRMatrixLastColDiag(C) = first_col_diag + (HYPRE_BigInt)local_num_cols - 1; hypre_ParCSRMatrixColMapOffd(C) = NULL; hypre_ParCSRMatrixAssumedPartition(C) = NULL; hypre_ParCSRMatrixCommPkg(C) = NULL; hypre_ParCSRMatrixCommPkgT(C) = NULL; /* C row/col starts*/ hypre_ParCSRMatrixRowStarts(C)[0] = col_starts_A[0]; hypre_ParCSRMatrixRowStarts(C)[1] = col_starts_A[1]; hypre_ParCSRMatrixColStarts(C)[0] = col_starts_B[0]; hypre_ParCSRMatrixColStarts(C)[1] = col_starts_B[1]; /* set defaults */ hypre_ParCSRMatrixOwnsData(C) = 1; hypre_ParCSRMatrixRowindices(C) = NULL; hypre_ParCSRMatrixRowvalues(C) = NULL; hypre_ParCSRMatrixGetrowactive(C) = 0; if (C_diag) { hypre_CSRMatrixSetRownnz(C_diag); hypre_ParCSRMatrixDiag(C) = C_diag; } else { hypre_ParCSRMatrixDiag(C) = C_tmp_diag; } if (C_offd) { hypre_CSRMatrixSetRownnz(C_offd); hypre_ParCSRMatrixOffd(C) = C_offd; } else { hypre_ParCSRMatrixOffd(C) = C_tmp_offd; } hypre_CSRMatrixMemoryLocation(hypre_ParCSRMatrixDiag(C)) = memory_location_C; hypre_CSRMatrixMemoryLocation(hypre_ParCSRMatrixOffd(C)) = memory_location_C; if (num_cols_offd_C) { HYPRE_Int jj_count_offd, nnz_offd; HYPRE_BigInt *new_col_map_offd_C = NULL; P_marker = hypre_CTAlloc(HYPRE_Int, num_cols_offd_C, HYPRE_MEMORY_HOST); for (i = 0; i < num_cols_offd_C; i++) { P_marker[i] = -1; } jj_count_offd = 0; nnz_offd = C_offd_i[num_cols_diag_A]; for (i = 0; i < nnz_offd; i++) { i1 = C_offd_j[i]; if (P_marker[i1]) { P_marker[i1] = 0; jj_count_offd++; } } if (jj_count_offd < num_cols_offd_C) { new_col_map_offd_C = hypre_CTAlloc(HYPRE_BigInt, jj_count_offd, HYPRE_MEMORY_HOST); jj_count_offd = 0; for (i = 0; i < num_cols_offd_C; i++) { if (!P_marker[i]) { P_marker[i] = jj_count_offd; new_col_map_offd_C[jj_count_offd++] = col_map_offd_C[i]; } } for (i = 0; i < nnz_offd; i++) { i1 = C_offd_j[i]; C_offd_j[i] = P_marker[i1]; } num_cols_offd_C = jj_count_offd; hypre_TFree(col_map_offd_C, HYPRE_MEMORY_HOST); col_map_offd_C = new_col_map_offd_C; hypre_CSRMatrixNumCols(hypre_ParCSRMatrixOffd(C)) = num_cols_offd_C; } hypre_TFree(P_marker, HYPRE_MEMORY_HOST); } hypre_ParCSRMatrixColMapOffd(C) = col_map_offd_C; /*----------------------------------------------------------------------- * Free various arrays *-----------------------------------------------------------------------*/ if (C_ext_size || num_cols_offd_B) { hypre_TFree(C_ext_diag_i, HYPRE_MEMORY_HOST); hypre_TFree(C_ext_offd_i, HYPRE_MEMORY_HOST); } if (C_ext_diag_size) { hypre_TFree(C_ext_diag_j, HYPRE_MEMORY_HOST); hypre_TFree(C_ext_diag_data, HYPRE_MEMORY_HOST); } if (C_ext_offd_size) { hypre_TFree(C_ext_offd_j, HYPRE_MEMORY_HOST); hypre_TFree(C_ext_offd_data, HYPRE_MEMORY_HOST); } if (num_cols_offd_B) { hypre_TFree(map_B_to_C, HYPRE_MEMORY_HOST); } if (C_diag) { hypre_CSRMatrixDestroy(C_tmp_diag); } if (C_offd) { hypre_CSRMatrixDestroy(C_tmp_offd); } #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) if ( hypre_GetExecPolicy2(memory_location_A, memory_location_B) == HYPRE_EXEC_DEVICE ) { hypre_CSRMatrixMoveDiagFirstDevice(hypre_ParCSRMatrixDiag(C)); hypre_SyncComputeStream(hypre_handle()); } #endif HYPRE_ANNOTATE_FUNC_END; return C; } /*-------------------------------------------------------------------------- * hypre_ParvecBdiagInvScal *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParvecBdiagInvScal( hypre_ParVector *b, HYPRE_Int blockSize, hypre_ParVector **bs, hypre_ParCSRMatrix *A) { MPI_Comm comm = hypre_ParCSRMatrixComm(b); HYPRE_Int num_procs, my_id; hypre_MPI_Comm_rank(comm, &my_id); hypre_MPI_Comm_size(comm, &num_procs); HYPRE_Int i, j, s, block_start, block_end; HYPRE_BigInt nrow_global = hypre_ParVectorGlobalSize(b); HYPRE_BigInt first_row = hypre_ParVectorFirstIndex(b); HYPRE_BigInt last_row = hypre_ParVectorLastIndex(b); HYPRE_BigInt end_row = last_row + 1; /* one past-the-last */ HYPRE_BigInt first_row_block = first_row / (HYPRE_BigInt)(blockSize) * (HYPRE_BigInt)blockSize; HYPRE_BigInt end_row_block = hypre_min( (last_row / (HYPRE_BigInt)blockSize + 1) * (HYPRE_BigInt)blockSize, nrow_global ); hypre_assert(blockSize == A->bdiag_size); HYPRE_Complex *bdiaginv = A->bdiaginv; hypre_ParCSRCommPkg *comm_pkg = A->bdiaginv_comm_pkg; HYPRE_Complex *dense = bdiaginv; //for (i=first_row_block; i < end_row; i+=blockSize) ; //printf("===[%d %d), [ %d %d ) %d === \n", first_row, end_row, first_row_block, end_row_block, i); /* local vector of b */ hypre_Vector *b_local = hypre_ParVectorLocalVector(b); HYPRE_Complex *b_local_data = hypre_VectorData(b_local); /* number of sends (#procs) */ HYPRE_Int num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); /* number of rows to send */ HYPRE_Int num_rows_send = hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends); /* number of recvs (#procs) */ HYPRE_Int num_recvs = hypre_ParCSRCommPkgNumRecvs(comm_pkg); /* number of rows to recv */ HYPRE_Int num_rows_recv = hypre_ParCSRCommPkgRecvVecStart(comm_pkg, num_recvs); hypre_ParCSRCommHandle *comm_handle; hypre_ParVector *bnew = hypre_ParVectorCreate( hypre_ParVectorComm(b), hypre_ParVectorGlobalSize(b), hypre_ParVectorPartitioning(b) ); hypre_ParVectorInitialize(bnew); hypre_Vector *bnew_local = hypre_ParVectorLocalVector(bnew); HYPRE_Complex *bnew_local_data = hypre_VectorData(bnew_local); /* send and recv b */ HYPRE_Complex *send_b = hypre_TAlloc(HYPRE_Complex, num_rows_send, HYPRE_MEMORY_HOST); HYPRE_Complex *recv_b = hypre_TAlloc(HYPRE_Complex, num_rows_recv, HYPRE_MEMORY_HOST); for (i = 0; i < num_rows_send; i++) { j = hypre_ParCSRCommPkgSendMapElmt(comm_pkg, i); send_b[i] = b_local_data[j]; } comm_handle = hypre_ParCSRCommHandleCreate(1, comm_pkg, send_b, recv_b); /* ... */ hypre_ParCSRCommHandleDestroy(comm_handle); for (block_start = first_row_block; block_start < end_row_block; block_start += blockSize) { HYPRE_BigInt big_i; block_end = hypre_min(block_start + (HYPRE_BigInt)blockSize, nrow_global); s = (HYPRE_Int)(block_end - block_start); for (big_i = block_start; big_i < block_end; big_i++) { if (big_i < first_row || big_i >= end_row) { continue; } HYPRE_Int local_i = (HYPRE_Int)(big_i - first_row); HYPRE_Int block_i = (HYPRE_Int)(big_i - block_start); bnew_local_data[local_i] = 0.0; for (j = 0; j < s; j++) { HYPRE_BigInt global_rid = block_start + (HYPRE_BigInt)j; HYPRE_Complex val = dense[block_i + j * blockSize]; if (val == 0.0) { continue; } if (global_rid >= first_row && global_rid < end_row) { HYPRE_Int rid = (HYPRE_Int)(global_rid - first_row); bnew_local_data[local_i] += val * b_local_data[rid]; } else { HYPRE_Int rid; if (global_rid < first_row) { rid = (HYPRE_Int)(global_rid - first_row_block); } else { rid = (HYPRE_Int)(first_row - first_row_block + global_rid - end_row); } bnew_local_data[local_i] += val * recv_b[rid]; } } } dense += blockSize * blockSize; } hypre_TFree(send_b, HYPRE_MEMORY_HOST); hypre_TFree(recv_b, HYPRE_MEMORY_HOST); *bs = bnew; return hypre_error_flag; } /** * @brief Compute As = B^{-1}*A, where B is the block diagonal of A * @param[in] A : * @param[in] blockSize: block size * @param[out] B : * @return * @warning */ HYPRE_Int hypre_ParcsrBdiagInvScal( hypre_ParCSRMatrix *A, HYPRE_Int blockSize, hypre_ParCSRMatrix **As) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); HYPRE_Int num_procs, my_id; hypre_MPI_Comm_rank(comm, &my_id); hypre_MPI_Comm_size(comm, &num_procs); HYPRE_Int i, j, k, s; HYPRE_BigInt block_start, block_end; /* diag part of A */ hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_a = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); /* off-diag part of A */ hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Real *A_offd_a = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd); HYPRE_BigInt *col_map_offd_A = hypre_ParCSRMatrixColMapOffd(A); HYPRE_Int nrow_local = hypre_CSRMatrixNumRows(A_diag); HYPRE_BigInt first_row = hypre_ParCSRMatrixFirstRowIndex(A); HYPRE_BigInt last_row = hypre_ParCSRMatrixLastRowIndex(A); HYPRE_BigInt end_row = first_row + (HYPRE_BigInt)nrow_local; /* one past-the-last */ HYPRE_Int ncol_local = hypre_CSRMatrixNumCols(A_diag); HYPRE_BigInt first_col = hypre_ParCSRMatrixFirstColDiag(A); /* HYPRE_Int last_col = hypre_ParCSRMatrixLastColDiag(A); */ HYPRE_BigInt end_col = first_col + (HYPRE_BigInt)ncol_local; HYPRE_BigInt nrow_global = hypre_ParCSRMatrixGlobalNumRows(A); HYPRE_BigInt ncol_global = hypre_ParCSRMatrixGlobalNumCols(A); HYPRE_BigInt *row_starts = hypre_ParCSRMatrixRowStarts(A); void *request; /* if square globally and locally */ HYPRE_Int square2 = (nrow_global == ncol_global) && (nrow_local == ncol_local) && (first_row == first_col); if (nrow_global != ncol_global) { hypre_printf("hypre_ParcsrBdiagInvScal: only support N_ROW == N_COL\n"); return hypre_error_flag; } /* in block diagonals, row range of the blocks this proc span */ HYPRE_BigInt first_row_block = first_row / (HYPRE_BigInt)blockSize * (HYPRE_BigInt)blockSize; HYPRE_BigInt end_row_block = hypre_min( (last_row / (HYPRE_BigInt)blockSize + 1) * (HYPRE_BigInt)blockSize, nrow_global ); HYPRE_Int num_blocks = (HYPRE_Int)(last_row / (HYPRE_BigInt)blockSize + 1 - first_row / (HYPRE_BigInt)blockSize); //for (i=first_row_block; i < end_row; i+=blockSize) ; //printf("===[%d %d), [ %d %d ) %d === \n", first_row, end_row, first_row_block, end_row_block, i); //return 0; /* number of external rows */ HYPRE_Int num_ext_rows = (HYPRE_Int)(end_row_block - first_row_block - (end_row - first_row)); HYPRE_BigInt *ext_indices; HYPRE_Int A_ext_nnz; hypre_CSRMatrix *A_ext = NULL; HYPRE_Complex *A_ext_a = NULL; HYPRE_Int *A_ext_i = NULL; HYPRE_BigInt *A_ext_j = NULL; HYPRE_Real *dense_all = hypre_CTAlloc(HYPRE_Complex, num_blocks * blockSize * blockSize, HYPRE_MEMORY_HOST); HYPRE_Real *dense = dense_all; HYPRE_Int *IPIV = hypre_TAlloc(HYPRE_Int, blockSize, HYPRE_MEMORY_HOST); HYPRE_Complex *dgetri_work = NULL; HYPRE_Int dgetri_lwork = -1, lapack_info; HYPRE_Int num_cols_A_offd_new; HYPRE_BigInt *col_map_offd_A_new; HYPRE_BigInt big_i; HYPRE_Int *offd2new = NULL; HYPRE_Int *marker_diag, *marker_newoffd; HYPRE_Int nnz_diag = A_diag_i[nrow_local]; HYPRE_Int nnz_offd = A_offd_i[nrow_local]; HYPRE_Int nnz_diag_new = 0, nnz_offd_new = 0; HYPRE_Int *A_diag_i_new, *A_diag_j_new, *A_offd_i_new, *A_offd_j_new; HYPRE_Complex *A_diag_a_new, *A_offd_a_new; /* heuristic */ HYPRE_Int nnz_diag_alloc = 2 * nnz_diag; HYPRE_Int nnz_offd_alloc = 2 * nnz_offd; A_diag_i_new = hypre_CTAlloc(HYPRE_Int, nrow_local + 1, HYPRE_MEMORY_HOST); A_diag_j_new = hypre_CTAlloc(HYPRE_Int, nnz_diag_alloc, HYPRE_MEMORY_HOST); A_diag_a_new = hypre_CTAlloc(HYPRE_Complex, nnz_diag_alloc, HYPRE_MEMORY_HOST); A_offd_i_new = hypre_CTAlloc(HYPRE_Int, nrow_local + 1, HYPRE_MEMORY_HOST); A_offd_j_new = hypre_CTAlloc(HYPRE_Int, nnz_offd_alloc, HYPRE_MEMORY_HOST); A_offd_a_new = hypre_CTAlloc(HYPRE_Complex, nnz_offd_alloc, HYPRE_MEMORY_HOST); hypre_ParCSRMatrix *Anew; hypre_CSRMatrix *Anew_diag; hypre_CSRMatrix *Anew_offd; HYPRE_Real eps = 2.2e-16; /* Start with extracting the external rows */ HYPRE_BigInt *ext_offd; ext_indices = hypre_CTAlloc(HYPRE_BigInt, num_ext_rows, HYPRE_MEMORY_HOST); j = 0; for (big_i = first_row_block; big_i < first_row; big_i++) { ext_indices[j++] = big_i; } for (big_i = end_row; big_i < end_row_block; big_i++) { ext_indices[j++] = big_i; } hypre_assert(j == num_ext_rows); /* create CommPkg for external rows */ hypre_ParCSRFindExtendCommPkg(comm, nrow_global, first_row, nrow_local, row_starts, hypre_ParCSRMatrixAssumedPartition(A), num_ext_rows, ext_indices, &A->bdiaginv_comm_pkg); hypre_ParcsrGetExternalRowsInit(A, num_ext_rows, ext_indices, A->bdiaginv_comm_pkg, 1, &request); A_ext = hypre_ParcsrGetExternalRowsWait(request); hypre_TFree(ext_indices, HYPRE_MEMORY_HOST); A_ext_i = hypre_CSRMatrixI(A_ext); A_ext_j = hypre_CSRMatrixBigJ(A_ext); A_ext_a = hypre_CSRMatrixData(A_ext); A_ext_nnz = A_ext_i[num_ext_rows]; ext_offd = hypre_CTAlloc(HYPRE_BigInt, A_ext_nnz, HYPRE_MEMORY_HOST); /* fint the offd incides in A_ext */ for (i = 0, j = 0; i < A_ext_nnz; i++) { /* global index */ HYPRE_BigInt cid = A_ext_j[i]; /* keep the offd indices */ if (cid < first_col || cid >= end_col) { ext_offd[j++] = cid; } } /* remove duplicates after sorting (TODO better ways?) */ hypre_BigQsort0(ext_offd, 0, j - 1); for (i = 0, k = 0; i < j; i++) { if (i == 0 || ext_offd[i] != ext_offd[i - 1]) { ext_offd[k++] = ext_offd[i]; } } /* uniion these `k' new indices into col_map_offd_A */ col_map_offd_A_new = hypre_CTAlloc(HYPRE_BigInt, num_cols_A_offd + k, HYPRE_MEMORY_HOST); if (k) { /* map offd to offd_new */ offd2new = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); } hypre_union2(num_cols_A_offd, col_map_offd_A, k, ext_offd, &num_cols_A_offd_new, col_map_offd_A_new, offd2new, NULL); hypre_TFree(ext_offd, HYPRE_MEMORY_HOST); /* * adjust column indices in A_ext */ for (i = 0; i < A_ext_nnz; i++) { HYPRE_BigInt cid = A_ext_j[i]; if (cid < first_col || cid >= end_col) { j = hypre_BigBinarySearch(col_map_offd_A_new, cid, num_cols_A_offd_new); /* searching must succeed */ hypre_assert(j >= 0 && j < num_cols_A_offd_new); /* trick: save ncol_local + j back */ A_ext_j[i] = ncol_local + j; } else { /* save local index: [0, ncol_local-1] */ A_ext_j[i] = cid - first_col; } } /* marker for diag */ marker_diag = hypre_TAlloc(HYPRE_Int, ncol_local, HYPRE_MEMORY_HOST); for (i = 0; i < ncol_local; i++) { marker_diag[i] = -1; } /* marker for newoffd */ marker_newoffd = hypre_TAlloc(HYPRE_Int, num_cols_A_offd_new, HYPRE_MEMORY_HOST); for (i = 0; i < num_cols_A_offd_new; i++) { marker_newoffd[i] = -1; } /* outer most loop for blocks */ for (block_start = first_row_block; block_start < end_row_block; block_start += (HYPRE_BigInt)blockSize) { HYPRE_BigInt big_i; block_end = hypre_min(block_start + (HYPRE_BigInt)blockSize, nrow_global); s = (HYPRE_Int)(block_end - block_start); /* 1. fill the dense block diag matrix */ for (big_i = block_start; big_i < block_end; big_i++) { /* row index in this block */ HYPRE_Int block_i = (HYPRE_Int)(big_i - block_start); /* row index i: it can be local or external */ if (big_i >= first_row && big_i < end_row) { /* is a local row */ j = (HYPRE_Int)(big_i - first_row); for (k = A_diag_i[j]; k < A_diag_i[j + 1]; k++) { HYPRE_BigInt cid = (HYPRE_BigInt)A_diag_j[k] + first_col; if (cid >= block_start && cid < block_end) { dense[block_i + (HYPRE_Int)(cid - block_start)*blockSize] = A_diag_a[k]; } } if (num_cols_A_offd) { for (k = A_offd_i[j]; k < A_offd_i[j + 1]; k++) { HYPRE_BigInt cid = col_map_offd_A[A_offd_j[k]]; if (cid >= block_start && cid < block_end) { dense[block_i + (HYPRE_Int)(cid - block_start)*blockSize] = A_offd_a[k]; } } } } else { /* is an external row */ if (big_i < first_row) { j = (HYPRE_Int)(big_i - first_row_block); } else { j = (HYPRE_Int)(first_row - first_row_block + big_i - end_row); } for (k = A_ext_i[j]; k < A_ext_i[j + 1]; k++) { HYPRE_BigInt cid = A_ext_j[k]; /* recover the global index */ cid = cid < (HYPRE_BigInt)ncol_local ? cid + first_col : col_map_offd_A_new[cid - ncol_local]; if (cid >= block_start && cid < block_end) { dense[block_i + (HYPRE_Int)(cid - block_start)*blockSize] = A_ext_a[k]; } } } } /* 2. invert the dense matrix */ hypre_dgetrf(&s, &s, dense, &blockSize, IPIV, &lapack_info); hypre_assert(lapack_info == 0); if (lapack_info == 0) { HYPRE_Int query = -1; HYPRE_Real lwork_opt; /* query the optimal size of work */ hypre_dgetri(&s, dense, &blockSize, IPIV, &lwork_opt, &query, &lapack_info); hypre_assert(lapack_info == 0); if (lwork_opt > dgetri_lwork) { dgetri_lwork = lwork_opt; dgetri_work = hypre_TReAlloc(dgetri_work, HYPRE_Complex, dgetri_lwork, HYPRE_MEMORY_HOST); } hypre_dgetri(&s, dense, &blockSize, IPIV, dgetri_work, &dgetri_lwork, &lapack_info); hypre_assert(lapack_info == 0); } /* filter out *zeros* */ HYPRE_Real Fnorm = 0.0; for (i = 0; i < s; i++) { for (j = 0; j < s; j++) { HYPRE_Complex t = dense[j + i * blockSize]; Fnorm += t * t; } } Fnorm = sqrt(Fnorm); for (i = 0; i < s; i++) { for (j = 0; j < s; j++) { if ( hypre_abs(dense[j + i * blockSize]) < eps * Fnorm ) { dense[j + i * blockSize] = 0.0; } } } /* 3. premultiplication: one-pass dynamic allocation */ for (big_i = block_start; big_i < block_end; big_i++) { /* starting points of this row in j */ HYPRE_Int diag_i_start = nnz_diag_new; HYPRE_Int offd_i_start = nnz_offd_new; /* compute a new row with global index 'i' and local index 'local_i' */ HYPRE_Int local_i = (HYPRE_Int)(big_i - first_row); /* row index in this block */ HYPRE_Int block_i = (HYPRE_Int)(big_i - block_start); if (big_i < first_row || big_i >= end_row) { continue; } /* if square^2: reserve the first space in diag part to the diag entry */ if (square2) { marker_diag[local_i] = nnz_diag_new; if (nnz_diag_new == nnz_diag_alloc) { nnz_diag_alloc = nnz_diag_alloc * 2 + 1; A_diag_j_new = hypre_TReAlloc(A_diag_j_new, HYPRE_Int, nnz_diag_alloc, HYPRE_MEMORY_HOST); A_diag_a_new = hypre_TReAlloc(A_diag_a_new, HYPRE_Complex, nnz_diag_alloc, HYPRE_MEMORY_HOST); } A_diag_j_new[nnz_diag_new] = local_i; A_diag_a_new[nnz_diag_new] = 0.0; nnz_diag_new ++; } /* combine s rows */ for (j = 0; j < s; j++) { /* row to combine: global row id */ HYPRE_BigInt global_rid = block_start + (HYPRE_BigInt)j; /* the multipiler */ HYPRE_Complex val = dense[block_i + j * blockSize]; if (val == 0.0) { continue; } if (global_rid >= first_row && global_rid < end_row) { /* this row is local */ HYPRE_Int rid = (HYPRE_Int)(global_rid - first_row); HYPRE_Int ii; for (ii = A_diag_i[rid]; ii < A_diag_i[rid + 1]; ii++) { HYPRE_Int col = A_diag_j[ii]; HYPRE_Complex vv = A_diag_a[ii]; if (marker_diag[col] < diag_i_start) { /* this col has not been seen before, create new entry */ marker_diag[col] = nnz_diag_new; if (nnz_diag_new == nnz_diag_alloc) { nnz_diag_alloc = nnz_diag_alloc * 2 + 1; A_diag_j_new = hypre_TReAlloc(A_diag_j_new, HYPRE_Int, nnz_diag_alloc, HYPRE_MEMORY_HOST); A_diag_a_new = hypre_TReAlloc(A_diag_a_new, HYPRE_Complex, nnz_diag_alloc, HYPRE_MEMORY_HOST); } A_diag_j_new[nnz_diag_new] = col; A_diag_a_new[nnz_diag_new] = val * vv; nnz_diag_new ++; } else { /* existing entry, update */ HYPRE_Int p = marker_diag[col]; hypre_assert(A_diag_j_new[p] == col); A_diag_a_new[p] += val * vv; } } for (ii = A_offd_i[rid]; ii < A_offd_i[rid + 1]; ii++) { HYPRE_Int col = A_offd_j[ii]; /* use the mapper to map to new offd */ HYPRE_Int col_new = offd2new ? offd2new[col] : col; HYPRE_Complex vv = A_offd_a[ii]; if (marker_newoffd[col_new] < offd_i_start) { /* this col has not been seen before, create new entry */ marker_newoffd[col_new] = nnz_offd_new; if (nnz_offd_new == nnz_offd_alloc) { nnz_offd_alloc = nnz_offd_alloc * 2 + 1; A_offd_j_new = hypre_TReAlloc(A_offd_j_new, HYPRE_Int, nnz_offd_alloc, HYPRE_MEMORY_HOST); A_offd_a_new = hypre_TReAlloc(A_offd_a_new, HYPRE_Complex, nnz_offd_alloc, HYPRE_MEMORY_HOST); } A_offd_j_new[nnz_offd_new] = col_new; A_offd_a_new[nnz_offd_new] = val * vv; nnz_offd_new ++; } else { /* existing entry, update */ HYPRE_Int p = marker_newoffd[col_new]; hypre_assert(A_offd_j_new[p] == col_new); A_offd_a_new[p] += val * vv; } } } else { /* this is an external row: go to A_ext */ HYPRE_Int rid, ii; if (global_rid < first_row) { rid = (HYPRE_Int)(global_rid - first_row_block); } else { rid = (HYPRE_Int)(first_row - first_row_block + global_rid - end_row); } for (ii = A_ext_i[rid]; ii < A_ext_i[rid + 1]; ii++) { HYPRE_Int col = (HYPRE_Int)A_ext_j[ii]; HYPRE_Complex vv = A_ext_a[ii]; if (col < ncol_local) { /* in diag part */ if (marker_diag[col] < diag_i_start) { /* this col has not been seen before, create new entry */ marker_diag[col] = nnz_diag_new; if (nnz_diag_new == nnz_diag_alloc) { nnz_diag_alloc = nnz_diag_alloc * 2 + 1; A_diag_j_new = hypre_TReAlloc(A_diag_j_new, HYPRE_Int, nnz_diag_alloc, HYPRE_MEMORY_HOST); A_diag_a_new = hypre_TReAlloc(A_diag_a_new, HYPRE_Complex, nnz_diag_alloc, HYPRE_MEMORY_HOST); } A_diag_j_new[nnz_diag_new] = col; A_diag_a_new[nnz_diag_new] = val * vv; nnz_diag_new ++; } else { /* existing entry, update */ HYPRE_Int p = marker_diag[col]; hypre_assert(A_diag_j_new[p] == col); A_diag_a_new[p] += val * vv; } } else { /* in offd part */ col -= ncol_local; if (marker_newoffd[col] < offd_i_start) { /* this col has not been seen before, create new entry */ marker_newoffd[col] = nnz_offd_new; if (nnz_offd_new == nnz_offd_alloc) { nnz_offd_alloc = nnz_offd_alloc * 2 + 1; A_offd_j_new = hypre_TReAlloc(A_offd_j_new, HYPRE_Int, nnz_offd_alloc, HYPRE_MEMORY_HOST); A_offd_a_new = hypre_TReAlloc(A_offd_a_new, HYPRE_Complex, nnz_offd_alloc, HYPRE_MEMORY_HOST); } A_offd_j_new[nnz_offd_new] = col; A_offd_a_new[nnz_offd_new] = val * vv; nnz_offd_new ++; } else { /* existing entry, update */ HYPRE_Int p = marker_newoffd[col]; hypre_assert(A_offd_j_new[p] == col); A_offd_a_new[p] += val * vv; } } } } } /* done for row local_i */ A_diag_i_new[local_i + 1] = nnz_diag_new; A_offd_i_new[local_i + 1] = nnz_offd_new; } /* for i, each row */ dense += blockSize * blockSize; } /* for each block */ /* done with all rows */ /* resize properly */ A_diag_j_new = hypre_TReAlloc(A_diag_j_new, HYPRE_Int, nnz_diag_new, HYPRE_MEMORY_HOST); A_diag_a_new = hypre_TReAlloc(A_diag_a_new, HYPRE_Complex, nnz_diag_new, HYPRE_MEMORY_HOST); A_offd_j_new = hypre_TReAlloc(A_offd_j_new, HYPRE_Int, nnz_offd_new, HYPRE_MEMORY_HOST); A_offd_a_new = hypre_TReAlloc(A_offd_a_new, HYPRE_Complex, nnz_offd_new, HYPRE_MEMORY_HOST); /* readjust col_map_offd_new */ for (i = 0; i < num_cols_A_offd_new; i++) { marker_newoffd[i] = -1; } for (i = 0; i < nnz_offd_new; i++) { j = A_offd_j_new[i]; if (marker_newoffd[j] == -1) { marker_newoffd[j] = 1; } } for (i = 0, j = 0; i < num_cols_A_offd_new; i++) { if (marker_newoffd[i] == 1) { col_map_offd_A_new[j] = col_map_offd_A_new[i]; marker_newoffd[i] = j++; } } num_cols_A_offd_new = j; for (i = 0; i < nnz_offd_new; i++) { j = marker_newoffd[A_offd_j_new[i]]; hypre_assert(j >= 0 && j < num_cols_A_offd_new); A_offd_j_new[i] = j; } /* Now, we should have everything of Parcsr matrix As */ Anew = hypre_ParCSRMatrixCreate(comm, nrow_global, ncol_global, hypre_ParCSRMatrixRowStarts(A), hypre_ParCSRMatrixColStarts(A), num_cols_A_offd_new, nnz_diag_new, nnz_offd_new); Anew_diag = hypre_ParCSRMatrixDiag(Anew); hypre_CSRMatrixData(Anew_diag) = A_diag_a_new; hypre_CSRMatrixI(Anew_diag) = A_diag_i_new; hypre_CSRMatrixJ(Anew_diag) = A_diag_j_new; Anew_offd = hypre_ParCSRMatrixOffd(Anew); hypre_CSRMatrixData(Anew_offd) = A_offd_a_new; hypre_CSRMatrixI(Anew_offd) = A_offd_i_new; hypre_CSRMatrixJ(Anew_offd) = A_offd_j_new; hypre_ParCSRMatrixColMapOffd(Anew) = col_map_offd_A_new; hypre_ParCSRMatrixSetNumNonzeros(Anew); hypre_ParCSRMatrixDNumNonzeros(Anew) = (HYPRE_Real) hypre_ParCSRMatrixNumNonzeros(Anew); //printf("nnz_diag %d --> %d, nnz_offd %d --> %d\n", nnz_diag, nnz_diag_new, nnz_offd, nnz_offd_new); /* create CommPkg of Anew */ hypre_MatvecCommPkgCreate(Anew); *As = Anew; /* if (bdiaginv) { *bdiaginv = dense_all; } else { hypre_TFree(dense_all, HYPRE_MEMORY_HOST); } */ /* save diagonal blocks in A */ A->bdiag_size = blockSize; A->bdiaginv = dense_all; /* free workspace */ hypre_TFree(IPIV, HYPRE_MEMORY_HOST); hypre_TFree(dgetri_work, HYPRE_MEMORY_HOST); hypre_TFree(marker_diag, HYPRE_MEMORY_HOST); hypre_TFree(marker_newoffd, HYPRE_MEMORY_HOST); hypre_TFree(offd2new, HYPRE_MEMORY_HOST); hypre_CSRMatrixDestroy(A_ext); return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_ParcsrGetExternalRowsInit *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParcsrGetExternalRowsInit( hypre_ParCSRMatrix *A, HYPRE_Int indices_len, HYPRE_BigInt *indices, hypre_ParCSRCommPkg *comm_pkg, HYPRE_Int want_data, void **request_ptr) { HYPRE_Int i, j, k; HYPRE_Int num_sends, num_rows_send, num_nnz_send, *send_i, num_recvs, num_rows_recv, num_nnz_recv, *recv_i, *send_jstarts, *recv_jstarts, *send_i_offset; HYPRE_BigInt *send_j, *recv_j; HYPRE_Complex *send_a = NULL, *recv_a = NULL; hypre_ParCSRCommPkg *comm_pkg_j; hypre_ParCSRCommHandle *comm_handle, *comm_handle_j, *comm_handle_a; /* HYPRE_Int global_num_rows = hypre_ParCSRMatrixGlobalNumRows(A); */ /* diag part of A */ hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_a = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); /* HYPRE_Int local_num_rows = hypre_CSRMatrixNumRows(A_diag); */ /* off-diag part of A */ hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Real *A_offd_a = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); /* HYPRE_BigInt *row_starts = hypre_ParCSRMatrixRowStarts(A); */ /* HYPRE_BigInt first_row = hypre_ParCSRMatrixFirstRowIndex(A); */ HYPRE_BigInt first_col = hypre_ParCSRMatrixFirstColDiag(A); HYPRE_BigInt *col_map_offd_A = hypre_ParCSRMatrixColMapOffd(A); MPI_Comm comm = hypre_ParCSRMatrixComm(A); HYPRE_Int num_procs; HYPRE_Int my_id; void **vrequest; hypre_CSRMatrix *A_ext; hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm, &my_id); /* number of sends (#procs) */ num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); /* number of rows to send */ num_rows_send = hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends); /* number of recvs (#procs) */ num_recvs = hypre_ParCSRCommPkgNumRecvs(comm_pkg); /* number of rows to recv */ num_rows_recv = hypre_ParCSRCommPkgRecvVecStart(comm_pkg, num_recvs); /* must be true if indices contains proper offd indices */ hypre_assert(indices_len == num_rows_recv); /* send_i/recv_i: * the arrays to send and recv: we first send and recv the row lengths */ send_i = hypre_TAlloc(HYPRE_Int, num_rows_send, HYPRE_MEMORY_HOST); recv_i = hypre_CTAlloc(HYPRE_Int, num_rows_recv + 1, HYPRE_MEMORY_HOST); /* fill the send array with row lengths */ for (i = 0, num_nnz_send = 0; i < num_rows_send; i++) { /* j: row index to send */ j = hypre_ParCSRCommPkgSendMapElmt(comm_pkg, i); send_i[i] = A_diag_i[j + 1] - A_diag_i[j] + A_offd_i[j + 1] - A_offd_i[j]; num_nnz_send += send_i[i]; } /* send this array out: note the shift in recv_i by one (async) */ comm_handle = hypre_ParCSRCommHandleCreate(11, comm_pkg, send_i, recv_i + 1); /* prepare data to send out. overlap with the above commmunication */ send_j = hypre_TAlloc(HYPRE_BigInt, num_nnz_send, HYPRE_MEMORY_HOST); if (want_data) { send_a = hypre_TAlloc(HYPRE_Complex, num_nnz_send, HYPRE_MEMORY_HOST); } send_i_offset = hypre_TAlloc(HYPRE_Int, num_rows_send + 1, HYPRE_MEMORY_HOST); send_i_offset[0] = 0; hypre_TMemcpy(send_i_offset + 1, send_i, HYPRE_Int, num_rows_send, HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST); /* prefix sum. TODO: OMP parallelization */ for (i = 1; i <= num_rows_send; i++) { send_i_offset[i] += send_i_offset[i - 1]; } hypre_assert(send_i_offset[num_rows_send] == num_nnz_send); /* pointers to each proc in send_j */ send_jstarts = hypre_TAlloc(HYPRE_Int, num_sends + 1, HYPRE_MEMORY_HOST); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for HYPRE_SMP_SCHEDULE #endif for (i = 0; i <= num_sends; i++) { send_jstarts[i] = send_i_offset[hypre_ParCSRCommPkgSendMapStart(comm_pkg, i)]; } hypre_assert(send_jstarts[num_sends] == num_nnz_send); /* fill the CSR matrix: j and a */ #ifdef HYPRE_USING_OPENMP #pragma omp parallel for HYPRE_SMP_SCHEDULE private(i,j,k) #endif for (i = 0; i < num_rows_send; i++) { HYPRE_Int i1 = send_i_offset[i]; j = hypre_ParCSRCommPkgSendMapElmt(comm_pkg, i); /* open row j and fill ja and a to send */ for (k = A_diag_i[j]; k < A_diag_i[j + 1]; k++) { send_j[i1] = first_col + A_diag_j[k]; if (want_data) { send_a[i1] = A_diag_a[k]; } i1++; } if (num_procs > 1) { for (k = A_offd_i[j]; k < A_offd_i[j + 1]; k++) { send_j[i1] = col_map_offd_A[A_offd_j[k]]; if (want_data) { send_a[i1] = A_offd_a[k]; } i1++; } } hypre_assert(send_i_offset[i + 1] == i1); } /* finish the above communication: send_i/recv_i */ hypre_ParCSRCommHandleDestroy(comm_handle); /* adjust recv_i to ptrs */ for (i = 1; i <= num_rows_recv; i++) { recv_i[i] += recv_i[i - 1]; } num_nnz_recv = recv_i[num_rows_recv]; recv_j = hypre_CTAlloc(HYPRE_BigInt, num_nnz_recv, HYPRE_MEMORY_HOST); if (want_data) { recv_a = hypre_CTAlloc(HYPRE_Complex, num_nnz_recv, HYPRE_MEMORY_HOST); } recv_jstarts = hypre_CTAlloc(HYPRE_Int, num_recvs + 1, HYPRE_MEMORY_HOST); for (i = 1; i <= num_recvs; i++) { j = hypre_ParCSRCommPkgRecvVecStart(comm_pkg, i); recv_jstarts[i] = recv_i[j]; } /* ready to send and recv: create a communication package for data */ comm_pkg_j = hypre_CTAlloc(hypre_ParCSRCommPkg, 1, HYPRE_MEMORY_HOST); hypre_ParCSRCommPkgComm (comm_pkg_j) = comm; hypre_ParCSRCommPkgNumSends (comm_pkg_j) = num_sends; hypre_ParCSRCommPkgSendProcs (comm_pkg_j) = hypre_ParCSRCommPkgSendProcs(comm_pkg); hypre_ParCSRCommPkgSendMapStarts(comm_pkg_j) = send_jstarts; hypre_ParCSRCommPkgNumRecvs (comm_pkg_j) = num_recvs; hypre_ParCSRCommPkgRecvProcs (comm_pkg_j) = hypre_ParCSRCommPkgRecvProcs(comm_pkg); hypre_ParCSRCommPkgRecvVecStarts(comm_pkg_j) = recv_jstarts; /* init communication */ /* ja */ comm_handle_j = hypre_ParCSRCommHandleCreate(21, comm_pkg_j, send_j, recv_j); if (want_data) { /* a */ comm_handle_a = hypre_ParCSRCommHandleCreate(1, comm_pkg_j, send_a, recv_a); } else { comm_handle_a = NULL; } /* create A_ext */ A_ext = hypre_CSRMatrixCreate(num_rows_recv, hypre_ParCSRMatrixGlobalNumCols(A), num_nnz_recv); hypre_CSRMatrixMemoryLocation(A_ext) = HYPRE_MEMORY_HOST; hypre_CSRMatrixI (A_ext) = recv_i; hypre_CSRMatrixBigJ(A_ext) = recv_j; hypre_CSRMatrixData(A_ext) = recv_a; /* output */ vrequest = hypre_TAlloc(void *, 4, HYPRE_MEMORY_HOST); vrequest[0] = (void *) comm_handle_j; vrequest[1] = (void *) comm_handle_a; vrequest[2] = (void *) A_ext; vrequest[3] = (void *) comm_pkg_j; *request_ptr = (void *) vrequest; /* free */ hypre_TFree(send_i, HYPRE_MEMORY_HOST); hypre_TFree(send_i_offset, HYPRE_MEMORY_HOST); return hypre_error_flag; } hypre_CSRMatrix* hypre_ParcsrGetExternalRowsWait(void *vrequest) { void **request = (void **) vrequest; hypre_ParCSRCommHandle *comm_handle_j = (hypre_ParCSRCommHandle *) request[0]; hypre_ParCSRCommHandle *comm_handle_a = (hypre_ParCSRCommHandle *) request[1]; hypre_CSRMatrix *A_ext = (hypre_CSRMatrix *) request[2]; hypre_ParCSRCommPkg *comm_pkg_j = (hypre_ParCSRCommPkg *) request[3]; HYPRE_BigInt *send_j = (HYPRE_BigInt *) hypre_ParCSRCommHandleSendData( comm_handle_j); if (comm_handle_a) { HYPRE_Complex *send_a = (HYPRE_Complex *) hypre_ParCSRCommHandleSendData(comm_handle_a); hypre_ParCSRCommHandleDestroy(comm_handle_a); hypre_TFree(send_a, HYPRE_MEMORY_HOST); } hypre_ParCSRCommHandleDestroy(comm_handle_j); hypre_TFree(send_j, HYPRE_MEMORY_HOST); hypre_TFree(hypre_ParCSRCommPkgSendMapStarts(comm_pkg_j), HYPRE_MEMORY_HOST); hypre_TFree(hypre_ParCSRCommPkgRecvVecStarts(comm_pkg_j), HYPRE_MEMORY_HOST); hypre_TFree(comm_pkg_j, HYPRE_MEMORY_HOST); hypre_TFree(request, HYPRE_MEMORY_HOST); return A_ext; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixAdd: performs C = alpha*A + beta*B * * A and B are assumed to have the same row and column partitionings *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixAddHost( HYPRE_Complex alpha, hypre_ParCSRMatrix *A, HYPRE_Complex beta, hypre_ParCSRMatrix *B, hypre_ParCSRMatrix **C_ptr ) { /* ParCSRMatrix data */ MPI_Comm comm = hypre_ParCSRMatrixComm(A); HYPRE_BigInt num_rows_A = hypre_ParCSRMatrixGlobalNumRows(A); HYPRE_BigInt num_cols_A = hypre_ParCSRMatrixGlobalNumCols(A); /* HYPRE_BigInt num_rows_B = hypre_ParCSRMatrixGlobalNumRows(B); */ /* HYPRE_BigInt num_cols_B = hypre_ParCSRMatrixGlobalNumCols(B); */ /* diag part of A */ hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Int *rownnz_diag_A = hypre_CSRMatrixRownnz(A_diag); HYPRE_Int num_rownnz_diag_A = hypre_CSRMatrixNumRownnz(A_diag); HYPRE_Int num_rows_diag_A = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int num_cols_diag_A = hypre_CSRMatrixNumCols(A_diag); /* off-diag part of A */ hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Int *rownnz_offd_A = hypre_CSRMatrixRownnz(A_offd); HYPRE_Int num_rownnz_offd_A = hypre_CSRMatrixNumRownnz(A_offd); HYPRE_Int num_rows_offd_A = hypre_CSRMatrixNumRows(A_offd); HYPRE_Int num_cols_offd_A = hypre_CSRMatrixNumCols(A_offd); HYPRE_BigInt *col_map_offd_A = hypre_ParCSRMatrixColMapOffd(A); HYPRE_Int *A2C_offd; /* diag part of B */ hypre_CSRMatrix *B_diag = hypre_ParCSRMatrixDiag(B); HYPRE_Int *rownnz_diag_B = hypre_CSRMatrixRownnz(B_diag); HYPRE_Int num_rownnz_diag_B = hypre_CSRMatrixNumRownnz(B_diag); HYPRE_Int num_rows_diag_B = hypre_CSRMatrixNumRows(B_diag); /* HYPRE_Int num_cols_diag_B = hypre_CSRMatrixNumCols(B_diag); */ /* off-diag part of B */ hypre_CSRMatrix *B_offd = hypre_ParCSRMatrixOffd(B); HYPRE_Int *rownnz_offd_B = hypre_CSRMatrixRownnz(B_offd); HYPRE_Int num_rownnz_offd_B = hypre_CSRMatrixNumRownnz(B_offd); HYPRE_Int num_rows_offd_B = hypre_CSRMatrixNumRows(B_offd); HYPRE_Int num_cols_offd_B = hypre_CSRMatrixNumCols(B_offd); HYPRE_BigInt *col_map_offd_B = hypre_ParCSRMatrixColMapOffd(B); HYPRE_Int *B2C_offd; /* C data */ hypre_ParCSRMatrix *C; hypre_CSRMatrix *C_diag; hypre_CSRMatrix *C_offd; HYPRE_BigInt *col_map_offd_C; HYPRE_Int *C_diag_i, *C_offd_i; HYPRE_Int *rownnz_diag_C = NULL; HYPRE_Int *rownnz_offd_C = NULL; HYPRE_Int num_rownnz_diag_C; HYPRE_Int num_rownnz_offd_C; HYPRE_Int num_rows_diag_C = num_rows_diag_A; HYPRE_Int num_cols_diag_C = num_cols_diag_A; HYPRE_Int num_rows_offd_C = num_rows_offd_A; HYPRE_Int num_cols_offd_C = num_cols_offd_A + num_cols_offd_B; HYPRE_Int *twspace; HYPRE_MemoryLocation memory_location_A = hypre_ParCSRMatrixMemoryLocation(A); HYPRE_MemoryLocation memory_location_B = hypre_ParCSRMatrixMemoryLocation(B); /* RL: TODO cannot guarantee, maybe should never assert hypre_assert(memory_location_A == memory_location_B); */ /* RL: in the case of A=H, B=D, or A=D, B=H, let C = D, * not sure if this is the right thing to do. * Also, need something like this in other places * TODO */ HYPRE_MemoryLocation memory_location_C = hypre_max(memory_location_A, memory_location_B); HYPRE_ANNOTATE_FUNC_BEGIN; /* Allocate memory */ twspace = hypre_TAlloc(HYPRE_Int, hypre_NumThreads(), HYPRE_MEMORY_HOST); C_diag_i = hypre_CTAlloc(HYPRE_Int, num_rows_diag_A + 1, memory_location_C); C_offd_i = hypre_CTAlloc(HYPRE_Int, num_rows_offd_A + 1, memory_location_C); col_map_offd_C = hypre_TAlloc(HYPRE_BigInt, num_cols_offd_C, HYPRE_MEMORY_HOST); /* Compute num_cols_offd_C, A2C_offd, and B2C_offd*/ A2C_offd = hypre_TAlloc(HYPRE_Int, num_cols_offd_A, HYPRE_MEMORY_HOST); B2C_offd = hypre_TAlloc(HYPRE_Int, num_cols_offd_B, HYPRE_MEMORY_HOST); hypre_union2(num_cols_offd_A, col_map_offd_A, num_cols_offd_B, col_map_offd_B, &num_cols_offd_C, col_map_offd_C, A2C_offd, B2C_offd); /* Set nonzero rows data of diag_C */ num_rownnz_diag_C = num_rows_diag_A; if ((num_rownnz_diag_A < num_rows_diag_A) && (num_rownnz_diag_B < num_rows_diag_B)) { hypre_MergeOrderedArrays( num_rownnz_diag_A, rownnz_diag_A, num_rownnz_diag_B, rownnz_diag_B, &num_rownnz_diag_C, &rownnz_diag_C); } /* Set nonzero rows data of offd_C */ num_rownnz_offd_C = num_rows_offd_A; if ((num_rownnz_offd_A < num_rows_offd_A) && (num_rownnz_offd_B < num_rows_offd_B)) { hypre_MergeOrderedArrays( num_rownnz_offd_A, rownnz_offd_A, num_rownnz_offd_B, rownnz_offd_B, &num_rownnz_offd_C, &rownnz_offd_C); } /* Set diag_C */ #ifdef HYPRE_USING_OPENMP #pragma omp parallel #endif { HYPRE_Int ii, num_threads; HYPRE_Int size, rest, ns, ne; HYPRE_Int *marker_diag; HYPRE_Int *marker_offd; ii = hypre_GetThreadNum(); num_threads = hypre_NumActiveThreads(); /*----------------------------------------------------------------------- * Compute C_diag = alpha*A_diag + beta*B_diag *-----------------------------------------------------------------------*/ size = num_rownnz_diag_C / num_threads; rest = num_rownnz_diag_C - size * num_threads; if (ii < rest) { ns = ii * size + ii; ne = (ii + 1) * size + ii + 1; } else { ns = ii * size + rest; ne = (ii + 1) * size + rest; } marker_diag = hypre_TAlloc(HYPRE_Int, num_cols_diag_A, HYPRE_MEMORY_HOST); hypre_CSRMatrixAddFirstPass(ns, ne, twspace, marker_diag, NULL, NULL, A_diag, B_diag, num_rows_diag_C, num_rownnz_diag_C, num_cols_diag_C, rownnz_diag_C, memory_location_C, C_diag_i, &C_diag); hypre_CSRMatrixAddSecondPass(ns, ne, twspace, marker_diag, NULL, NULL, rownnz_diag_C, alpha, beta, A_diag, B_diag, C_diag); hypre_TFree(marker_diag, HYPRE_MEMORY_HOST); /*----------------------------------------------------------------------- * Compute C_offd = alpha*A_offd + beta*B_offd *-----------------------------------------------------------------------*/ size = num_rownnz_offd_C / num_threads; rest = num_rownnz_offd_C - size * num_threads; if (ii < rest) { ns = ii * size + ii; ne = (ii + 1) * size + ii + 1; } else { ns = ii * size + rest; ne = (ii + 1) * size + rest; } marker_offd = hypre_TAlloc(HYPRE_Int, num_cols_offd_C, HYPRE_MEMORY_HOST); hypre_CSRMatrixAddFirstPass(ns, ne, twspace, marker_offd, A2C_offd, B2C_offd, A_offd, B_offd, num_rows_offd_C, num_rownnz_offd_C, num_cols_offd_C, rownnz_offd_C, memory_location_C, C_offd_i, &C_offd); hypre_CSRMatrixAddSecondPass(ns, ne, twspace, marker_offd, A2C_offd, B2C_offd, rownnz_offd_C, alpha, beta, A_offd, B_offd, C_offd); hypre_TFree(marker_offd, HYPRE_MEMORY_HOST); } /* end of omp parallel region */ /* Free memory */ hypre_TFree(twspace, HYPRE_MEMORY_HOST); hypre_TFree(A2C_offd, HYPRE_MEMORY_HOST); hypre_TFree(B2C_offd, HYPRE_MEMORY_HOST); /* Create ParCSRMatrix C */ C = hypre_ParCSRMatrixCreate(comm, num_rows_A, num_cols_A, hypre_ParCSRMatrixRowStarts(A), hypre_ParCSRMatrixColStarts(A), num_cols_offd_C, hypre_CSRMatrixNumNonzeros(C_diag), hypre_CSRMatrixNumNonzeros(C_offd)); hypre_CSRMatrixDestroy(hypre_ParCSRMatrixDiag(C)); hypre_CSRMatrixDestroy(hypre_ParCSRMatrixOffd(C)); hypre_ParCSRMatrixDiag(C) = C_diag; hypre_ParCSRMatrixOffd(C) = C_offd; hypre_ParCSRMatrixColMapOffd(C) = col_map_offd_C; hypre_ParCSRMatrixSetNumNonzeros(C); hypre_ParCSRMatrixDNumNonzeros(C) = (HYPRE_Real) hypre_ParCSRMatrixNumNonzeros(C); /* create CommPkg of C */ hypre_MatvecCommPkgCreate(C); *C_ptr = C; HYPRE_ANNOTATE_FUNC_END; return hypre_error_flag; } HYPRE_Int hypre_ParCSRMatrixAdd( HYPRE_Complex alpha, hypre_ParCSRMatrix *A, HYPRE_Complex beta, hypre_ParCSRMatrix *B, hypre_ParCSRMatrix **C_ptr ) { hypre_assert(hypre_ParCSRMatrixGlobalNumRows(A) == hypre_ParCSRMatrixGlobalNumRows(B)); hypre_assert(hypre_ParCSRMatrixGlobalNumCols(A) == hypre_ParCSRMatrixGlobalNumCols(B)); hypre_assert(hypre_ParCSRMatrixNumRows(A) == hypre_ParCSRMatrixNumRows(B)); hypre_assert(hypre_ParCSRMatrixNumCols(A) == hypre_ParCSRMatrixNumCols(B)); #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) if ( hypre_GetExecPolicy2( hypre_ParCSRMatrixMemoryLocation(A), hypre_ParCSRMatrixMemoryLocation(B) ) == HYPRE_EXEC_DEVICE ) { hypre_ParCSRMatrixAddDevice(alpha, A, beta, B, C_ptr); } else #endif { hypre_ParCSRMatrixAddHost(alpha, A, beta, B, C_ptr); } return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixFnorm *--------------------------------------------------------------------------*/ HYPRE_Real hypre_ParCSRMatrixFnorm( hypre_ParCSRMatrix *A ) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); HYPRE_Real f_diag, f_offd, local_result, result; f_diag = hypre_CSRMatrixFnorm(hypre_ParCSRMatrixDiag(A)); f_offd = hypre_CSRMatrixFnorm(hypre_ParCSRMatrixOffd(A)); local_result = f_diag * f_diag + f_offd * f_offd; hypre_MPI_Allreduce(&local_result, &result, 1, HYPRE_MPI_REAL, hypre_MPI_SUM, comm); return sqrt(result); } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixInfNorm * * Computes the infinity norm of A: * * norm = max_{i} sum_{j} |A_{ij}| *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixInfNorm( hypre_ParCSRMatrix *A, HYPRE_Real *norm ) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); /* diag part of A */ hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Complex *A_diag_a = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int num_rows_diag_A = hypre_CSRMatrixNumRows(A_diag); /* off-diag part of A */ hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Complex *A_offd_a = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); /* Local variables */ HYPRE_Int i, j; HYPRE_Real maxsum = 0.0; HYPRE_Real rowsum; #ifdef _MSC_VER #ifdef HYPRE_USING_OPENMP #pragma omp parallel private(i,j,rowsum) #endif { HYPRE_Real maxsum_local; maxsum_local = 0.0; #ifdef HYPRE_USING_OPENMP #pragma omp for HYPRE_SMP_SCHEDULE #endif for (i = 0; i < num_rows_diag_A; i++) { rowsum = 0.0; for (j = A_diag_i[i]; j < A_diag_i[i + 1]; j++) { rowsum += hypre_cabs(A_diag_a[j]); } for (j = A_offd_i[i]; j < A_offd_i[i + 1]; j++) { rowsum += hypre_cabs(A_offd_a[j]); } maxsum_local = hypre_max(maxsum_local, rowsum); } #ifdef HYPRE_USING_OPENMP #pragma omp critical #endif { maxsum = hypre_max(maxsum, maxsum_local); } } #else #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,j,rowsum) reduction(max:maxsum) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < num_rows_diag_A; i++) { rowsum = 0.0; for (j = A_diag_i[i]; j < A_diag_i[i + 1]; j++) { rowsum += hypre_cabs(A_diag_a[j]); } for (j = A_offd_i[i]; j < A_offd_i[i + 1]; j++) { rowsum += hypre_cabs(A_offd_a[j]); } maxsum = hypre_max(maxsum, rowsum); } #endif hypre_MPI_Allreduce(&maxsum, norm, 1, HYPRE_MPI_REAL, hypre_MPI_MAX, comm); return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_ExchangeExternalRowsInit *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ExchangeExternalRowsInit( hypre_CSRMatrix *B_ext, hypre_ParCSRCommPkg *comm_pkg_A, void **request_ptr) { MPI_Comm comm = hypre_ParCSRCommPkgComm(comm_pkg_A); HYPRE_Int num_recvs = hypre_ParCSRCommPkgNumRecvs(comm_pkg_A); HYPRE_Int *recv_procs = hypre_ParCSRCommPkgRecvProcs(comm_pkg_A); HYPRE_Int *recv_vec_starts = hypre_ParCSRCommPkgRecvVecStarts(comm_pkg_A); HYPRE_Int num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg_A); HYPRE_Int *send_procs = hypre_ParCSRCommPkgSendProcs(comm_pkg_A); HYPRE_Int *send_map_starts = hypre_ParCSRCommPkgSendMapStarts(comm_pkg_A); HYPRE_Int num_elmts_send = send_map_starts[num_sends]; HYPRE_Int num_elmts_recv = recv_vec_starts[num_recvs]; HYPRE_Int *B_ext_i = B_ext ? hypre_CSRMatrixI(B_ext) : NULL; HYPRE_BigInt *B_ext_j = B_ext ? hypre_CSRMatrixBigJ(B_ext) : NULL; HYPRE_Complex *B_ext_data = B_ext ? hypre_CSRMatrixData(B_ext) : NULL; HYPRE_Int B_ext_ncols = B_ext ? hypre_CSRMatrixNumCols(B_ext) : 0; HYPRE_Int B_ext_nrows = B_ext ? hypre_CSRMatrixNumRows(B_ext) : 0; HYPRE_Int *B_ext_rownnz = hypre_CTAlloc(HYPRE_Int, B_ext_nrows, HYPRE_MEMORY_HOST); hypre_assert(num_elmts_recv == B_ext_nrows); /* output matrix */ hypre_CSRMatrix *B_int; HYPRE_Int B_int_nrows = num_elmts_send; HYPRE_Int B_int_ncols = B_ext_ncols; HYPRE_Int *B_int_i = hypre_TAlloc(HYPRE_Int, B_int_nrows + 1, HYPRE_MEMORY_HOST); HYPRE_BigInt *B_int_j = NULL; HYPRE_Complex *B_int_data = NULL; HYPRE_Int B_int_nnz; hypre_ParCSRCommHandle *comm_handle, *comm_handle_j, *comm_handle_a; hypre_ParCSRCommPkg *comm_pkg_j; HYPRE_Int *jdata_recv_vec_starts; HYPRE_Int *jdata_send_map_starts; HYPRE_Int i; HYPRE_Int num_procs; void **vrequest; hypre_MPI_Comm_size(comm, &num_procs); jdata_send_map_starts = hypre_TAlloc(HYPRE_Int, num_sends + 1, HYPRE_MEMORY_HOST); /*-------------------------------------------------------------------------- * B_ext_rownnz contains the number of elements of row j * (to be determined through send_map_elmnts on the receiving end) *--------------------------------------------------------------------------*/ for (i = 0; i < B_ext_nrows; i++) { B_ext_rownnz[i] = B_ext_i[i + 1] - B_ext_i[i]; } /*-------------------------------------------------------------------------- * initialize communication: send/recv the row nnz * (note the use of comm_pkg_A, mode 12, as in transpose matvec *--------------------------------------------------------------------------*/ comm_handle = hypre_ParCSRCommHandleCreate(12, comm_pkg_A, B_ext_rownnz, B_int_i + 1); jdata_recv_vec_starts = hypre_TAlloc(HYPRE_Int, num_recvs + 1, HYPRE_MEMORY_HOST); jdata_recv_vec_starts[0] = 0; for (i = 1; i <= num_recvs; i++) { jdata_recv_vec_starts[i] = B_ext_i[recv_vec_starts[i]]; } comm_pkg_j = hypre_CTAlloc(hypre_ParCSRCommPkg, 1, HYPRE_MEMORY_HOST); hypre_ParCSRCommPkgComm(comm_pkg_j) = comm; hypre_ParCSRCommPkgNumSends(comm_pkg_j) = num_recvs; hypre_ParCSRCommPkgNumRecvs(comm_pkg_j) = num_sends; hypre_ParCSRCommPkgSendProcs(comm_pkg_j) = recv_procs; hypre_ParCSRCommPkgRecvProcs(comm_pkg_j) = send_procs; hypre_ParCSRCommHandleDestroy(comm_handle); /*-------------------------------------------------------------------------- * compute B_int: row nnz to row ptrs *--------------------------------------------------------------------------*/ B_int_i[0] = 0; for (i = 1; i <= B_int_nrows; i++) { B_int_i[i] += B_int_i[i - 1]; } B_int_nnz = B_int_i[B_int_nrows]; B_int_j = hypre_TAlloc(HYPRE_BigInt, B_int_nnz, HYPRE_MEMORY_HOST); B_int_data = hypre_TAlloc(HYPRE_Complex, B_int_nnz, HYPRE_MEMORY_HOST); for (i = 0; i <= num_sends; i++) { jdata_send_map_starts[i] = B_int_i[send_map_starts[i]]; } /* note the order of send/recv is reversed */ hypre_ParCSRCommPkgRecvVecStarts(comm_pkg_j) = jdata_send_map_starts; hypre_ParCSRCommPkgSendMapStarts(comm_pkg_j) = jdata_recv_vec_starts; /* send/recv CSR rows */ comm_handle_a = hypre_ParCSRCommHandleCreate( 1, comm_pkg_j, B_ext_data, B_int_data); comm_handle_j = hypre_ParCSRCommHandleCreate(21, comm_pkg_j, B_ext_j, B_int_j); /* create CSR */ B_int = hypre_CSRMatrixCreate(B_int_nrows, B_int_ncols, B_int_nnz); hypre_CSRMatrixMemoryLocation(B_int) = HYPRE_MEMORY_HOST; hypre_CSRMatrixI(B_int) = B_int_i; hypre_CSRMatrixBigJ(B_int) = B_int_j; hypre_CSRMatrixData(B_int) = B_int_data; /* output */ vrequest = hypre_TAlloc(void *, 4, HYPRE_MEMORY_HOST); vrequest[0] = (void *) comm_handle_j; vrequest[1] = (void *) comm_handle_a; vrequest[2] = (void *) B_int; vrequest[3] = (void *) comm_pkg_j; *request_ptr = (void *) vrequest; hypre_TFree(B_ext_rownnz, HYPRE_MEMORY_HOST); return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_ExchangeExternalRowsWait *--------------------------------------------------------------------------*/ hypre_CSRMatrix* hypre_ExchangeExternalRowsWait(void *vrequest) { void **request = (void **) vrequest; hypre_ParCSRCommHandle *comm_handle_j = (hypre_ParCSRCommHandle *) request[0]; hypre_ParCSRCommHandle *comm_handle_a = (hypre_ParCSRCommHandle *) request[1]; hypre_CSRMatrix *B_int = (hypre_CSRMatrix *) request[2]; hypre_ParCSRCommPkg *comm_pkg_j = (hypre_ParCSRCommPkg *) request[3]; /* communication done */ hypre_ParCSRCommHandleDestroy(comm_handle_a); hypre_ParCSRCommHandleDestroy(comm_handle_j); hypre_TFree(hypre_ParCSRCommPkgSendMapStarts(comm_pkg_j), HYPRE_MEMORY_HOST); hypre_TFree(hypre_ParCSRCommPkgRecvVecStarts(comm_pkg_j), HYPRE_MEMORY_HOST); hypre_TFree(comm_pkg_j, HYPRE_MEMORY_HOST); hypre_TFree(request, HYPRE_MEMORY_HOST); return B_int; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixExtractSubmatrixFC * * extract submatrix A_{FF}, A_{FC}, A_{CF} or A_{CC} * char job[2] = "FF", "FC", "CF" or "CC" *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixExtractSubmatrixFC( hypre_ParCSRMatrix *A, HYPRE_Int *CF_marker, HYPRE_BigInt *cpts_starts, const char *job, hypre_ParCSRMatrix **B_ptr, HYPRE_Real strength_thresh) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); hypre_ParCSRCommHandle *comm_handle; /* diag part of A */ hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Complex *A_diag_a = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); /* off-diag part of A */ hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Complex *A_offd_a = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd); //HYPRE_Int *col_map_offd_A = hypre_ParCSRMatrixColMapOffd(A); hypre_ParCSRMatrix *B; hypre_CSRMatrix *B_diag, *B_offd; HYPRE_Real *B_maxel_row; HYPRE_Int *B_diag_i, *B_diag_j, *B_offd_i, *B_offd_j; HYPRE_Complex *B_diag_a, *B_offd_a; HYPRE_Int num_cols_B_offd; HYPRE_BigInt *col_map_offd_B; HYPRE_Int i, j, k, k1, k2; HYPRE_BigInt B_nrow_global, B_ncol_global; HYPRE_Int A_nlocal, B_nrow_local, B_ncol_local, B_nnz_diag, B_nnz_offd; HYPRE_BigInt total_global_fpts, total_global_cpts, fpts_starts[2]; HYPRE_Int nf_local, nc_local; HYPRE_BigInt big_nf_local; HYPRE_Int row_set, col_set; HYPRE_BigInt *B_row_starts, *B_col_starts, B_first_col; HYPRE_Int my_id, num_procs; HYPRE_Int *sub_idx_diag; HYPRE_BigInt *sub_idx_offd; HYPRE_Int num_sends; HYPRE_BigInt *send_buf_data; /* MPI size and rank*/ hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm, &my_id); row_set = job[0] == 'F' ? -1 : 1; col_set = job[1] == 'F' ? -1 : 1; A_nlocal = hypre_CSRMatrixNumRows(A_diag); /*-------------- global number of C points and local C points * assuming cpts_starts is given */ if (row_set == 1 || col_set == 1) { if (my_id == (num_procs - 1)) { total_global_cpts = cpts_starts[1]; } hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_BIG_INT, num_procs - 1, comm); nc_local = (HYPRE_Int)(cpts_starts[1] - cpts_starts[0]); } /*-------------- global number of F points, local F points, and F starts */ if (row_set == -1 || col_set == -1) { nf_local = 0; for (i = 0; i < A_nlocal; i++) { if (CF_marker[i] < 0) { nf_local++; } } big_nf_local = (HYPRE_BigInt) nf_local; hypre_MPI_Scan(&big_nf_local, fpts_starts + 1, 1, HYPRE_MPI_BIG_INT, hypre_MPI_SUM, comm); fpts_starts[0] = fpts_starts[1] - nf_local; if (my_id == num_procs - 1) { total_global_fpts = fpts_starts[1]; } hypre_MPI_Bcast(&total_global_fpts, 1, HYPRE_MPI_BIG_INT, num_procs - 1, comm); } if (row_set == -1 && col_set == -1) { /* FF */ B_nrow_local = nf_local; B_ncol_local = nf_local; B_nrow_global = total_global_fpts; B_ncol_global = total_global_fpts; B_row_starts = B_col_starts = fpts_starts; } else if (row_set == -1 && col_set == 1) { /* FC */ B_nrow_local = nf_local; B_ncol_local = nc_local; B_nrow_global = total_global_fpts; B_ncol_global = total_global_cpts; B_row_starts = fpts_starts; B_col_starts = cpts_starts; } else if (row_set == 1 && col_set == -1) { /* CF */ B_nrow_local = nc_local; B_ncol_local = nf_local; B_nrow_global = total_global_cpts; B_ncol_global = total_global_fpts; B_row_starts = cpts_starts; B_col_starts = fpts_starts; } else { /* CC */ B_nrow_local = nc_local; B_ncol_local = nc_local; B_nrow_global = total_global_cpts; B_ncol_global = total_global_cpts; B_row_starts = B_col_starts = cpts_starts; } /* global index of my first col */ B_first_col = B_col_starts[0]; /* sub_idx_diag: [local] mapping from F+C to F/C, if not selected, be -1 */ sub_idx_diag = hypre_TAlloc(HYPRE_Int, A_nlocal, HYPRE_MEMORY_HOST); for (i = 0, k = 0; i < A_nlocal; i++) { HYPRE_Int CF_i = CF_marker[i] > 0 ? 1 : -1; if (CF_i == col_set) { sub_idx_diag[i] = k++; } else { sub_idx_diag[i] = -1; } } hypre_assert(k == B_ncol_local); num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); send_buf_data = hypre_TAlloc(HYPRE_BigInt, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST); k = 0; for (i = 0; i < num_sends; i++) { /* start pos of elements sent to send_proc[i] */ HYPRE_Int si = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); HYPRE_Int ei = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i + 1); /* loop through all elems to send_proc[i] */ for (j = si; j < ei; j++) { /* j1: local idx */ HYPRE_BigInt j1 = sub_idx_diag[hypre_ParCSRCommPkgSendMapElmt(comm_pkg, j)]; if (j1 != -1) { /* adjust j1 to B global idx */ j1 += B_first_col; } send_buf_data[k++] = j1; } } hypre_assert(k == hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends)); /* recv buffer */ sub_idx_offd = hypre_TAlloc(HYPRE_BigInt, num_cols_A_offd, HYPRE_MEMORY_HOST); /* create a handle to start communication. 11: for integer */ comm_handle = hypre_ParCSRCommHandleCreate(21, comm_pkg, send_buf_data, sub_idx_offd); /* destroy the handle to finish communication */ hypre_ParCSRCommHandleDestroy(comm_handle); for (i = 0, num_cols_B_offd = 0; i < num_cols_A_offd; i++) { if (sub_idx_offd[i] != -1) { num_cols_B_offd ++; } } col_map_offd_B = hypre_TAlloc(HYPRE_BigInt, num_cols_B_offd, HYPRE_MEMORY_HOST); for (i = 0, k = 0; i < num_cols_A_offd; i++) { if (sub_idx_offd[i] != -1) { col_map_offd_B[k] = sub_idx_offd[i]; sub_idx_offd[i] = k++; } } hypre_assert(k == num_cols_B_offd); /* count nnz and set ia */ B_nnz_diag = B_nnz_offd = 0; B_maxel_row = hypre_TAlloc(HYPRE_Real, B_nrow_local, HYPRE_MEMORY_HOST); B_diag_i = hypre_TAlloc(HYPRE_Int, B_nrow_local + 1, HYPRE_MEMORY_HOST); B_offd_i = hypre_TAlloc(HYPRE_Int, B_nrow_local + 1, HYPRE_MEMORY_HOST); B_diag_i[0] = B_offd_i[0] = 0; for (i = 0, k = 0; i < A_nlocal; i++) { HYPRE_Int CF_i = CF_marker[i] > 0 ? 1 : -1; if (CF_i != row_set) { continue; } k++; // Get max abs-value element of this row HYPRE_Real temp_max = 0; if (strength_thresh > 0) { for (j = A_diag_i[i] + 1; j < A_diag_i[i + 1]; j++) { if (hypre_cabs(A_diag_a[j]) > temp_max) { temp_max = hypre_cabs(A_diag_a[j]); } } for (j = A_offd_i[i]; j < A_offd_i[i + 1]; j++) { if (hypre_cabs(A_offd_a[j]) > temp_max) { temp_max = hypre_cabs(A_offd_a[j]); } } } B_maxel_row[k - 1] = temp_max; // add one for diagonal element j = A_diag_i[i]; if (sub_idx_diag[A_diag_j[j]] != -1) { B_nnz_diag++; } // Count nnzs larger than tolerance times max row element for (j = A_diag_i[i] + 1; j < A_diag_i[i + 1]; j++) { if ( (sub_idx_diag[A_diag_j[j]] != -1) && (hypre_cabs(A_diag_a[j]) > (strength_thresh * temp_max)) ) { B_nnz_diag++; } } for (j = A_offd_i[i]; j < A_offd_i[i + 1]; j++) { if ( (sub_idx_offd[A_offd_j[j]] != -1) && (hypre_cabs(A_offd_a[j]) > (strength_thresh * temp_max)) ) { B_nnz_offd++; } } B_diag_i[k] = B_nnz_diag; B_offd_i[k] = B_nnz_offd; } hypre_assert(k == B_nrow_local); B_diag_j = hypre_TAlloc(HYPRE_Int, B_nnz_diag, HYPRE_MEMORY_HOST); B_diag_a = hypre_TAlloc(HYPRE_Complex, B_nnz_diag, HYPRE_MEMORY_HOST); B_offd_j = hypre_TAlloc(HYPRE_Int, B_nnz_offd, HYPRE_MEMORY_HOST); B_offd_a = hypre_TAlloc(HYPRE_Complex, B_nnz_offd, HYPRE_MEMORY_HOST); for (i = 0, k = 0, k1 = 0, k2 = 0; i < A_nlocal; i++) { HYPRE_Int CF_i = CF_marker[i] > 0 ? 1 : -1; if (CF_i != row_set) { continue; } HYPRE_Real maxel = B_maxel_row[k]; k++; for (j = A_diag_i[i]; j < A_diag_i[i + 1]; j++) { HYPRE_Int j1 = sub_idx_diag[A_diag_j[j]]; if ( (j1 != -1) && ( (hypre_cabs(A_diag_a[j]) > (strength_thresh * maxel)) || j == A_diag_i[i] ) ) { B_diag_j[k1] = j1; B_diag_a[k1] = A_diag_a[j]; k1++; } } for (j = A_offd_i[i]; j < A_offd_i[i + 1]; j++) { HYPRE_Int j1 = sub_idx_offd[A_offd_j[j]]; if ((j1 != -1) && (hypre_cabs(A_offd_a[j]) > (strength_thresh * maxel))) { hypre_assert(j1 >= 0 && j1 < num_cols_B_offd); B_offd_j[k2] = j1; B_offd_a[k2] = A_offd_a[j]; k2++; } } } hypre_assert(k1 == B_nnz_diag && k2 == B_nnz_offd); /* ready to create B = A(rowset, colset) */ B = hypre_ParCSRMatrixCreate(comm, B_nrow_global, B_ncol_global, B_row_starts, B_col_starts, num_cols_B_offd, B_nnz_diag, B_nnz_offd); B_diag = hypre_ParCSRMatrixDiag(B); hypre_CSRMatrixMemoryLocation(B_diag) = HYPRE_MEMORY_HOST; hypre_CSRMatrixData(B_diag) = B_diag_a; hypre_CSRMatrixI(B_diag) = B_diag_i; hypre_CSRMatrixJ(B_diag) = B_diag_j; B_offd = hypre_ParCSRMatrixOffd(B); hypre_CSRMatrixMemoryLocation(B_offd) = HYPRE_MEMORY_HOST; hypre_CSRMatrixData(B_offd) = B_offd_a; hypre_CSRMatrixI(B_offd) = B_offd_i; hypre_CSRMatrixJ(B_offd) = B_offd_j; hypre_ParCSRMatrixColMapOffd(B) = col_map_offd_B; hypre_ParCSRMatrixSetNumNonzeros(B); hypre_ParCSRMatrixDNumNonzeros(B) = (HYPRE_Real) hypre_ParCSRMatrixNumNonzeros(B); hypre_MatvecCommPkgCreate(B); *B_ptr = B; hypre_TFree(B_maxel_row, HYPRE_MEMORY_HOST); hypre_TFree(send_buf_data, HYPRE_MEMORY_HOST); hypre_TFree(sub_idx_diag, HYPRE_MEMORY_HOST); hypre_TFree(sub_idx_offd, HYPRE_MEMORY_HOST); return hypre_error_flag; } /* drop the entries that are not on the diagonal and smaller than: * type 0: tol (TODO) * type 1: tol*(1-norm of row) * type 2: tol*(2-norm of row) * type -1: tol*(infinity norm of row) */ HYPRE_Int hypre_ParCSRMatrixDropSmallEntriesHost( hypre_ParCSRMatrix *A, HYPRE_Real tol, HYPRE_Int type) { HYPRE_Int i, j, k, nnz_diag, nnz_offd, A_diag_i_i, A_offd_i_i; MPI_Comm comm = hypre_ParCSRMatrixComm(A); /* diag part of A */ hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_a = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); /* off-diag part of A */ hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Real *A_offd_a = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd); HYPRE_BigInt *col_map_offd_A = hypre_ParCSRMatrixColMapOffd(A); HYPRE_Int *marker_offd = NULL; HYPRE_BigInt first_row = hypre_ParCSRMatrixFirstRowIndex(A); HYPRE_Int nrow_local = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int my_id, num_procs; /* MPI size and rank*/ hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm, &my_id); marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); nnz_diag = nnz_offd = A_diag_i_i = A_offd_i_i = 0; for (i = 0; i < nrow_local; i++) { /* compute row norm */ HYPRE_Real row_nrm = 0.0; for (j = A_diag_i_i; j < A_diag_i[i + 1]; j++) { HYPRE_Complex v = A_diag_a[j]; if (type == 1) { row_nrm += fabs(v); } else if (type == 2) { row_nrm += v * v; } else { row_nrm = hypre_max(row_nrm, fabs(v)); } } if (num_procs > 1) { for (j = A_offd_i_i; j < A_offd_i[i + 1]; j++) { HYPRE_Complex v = A_offd_a[j]; if (type == 1) { row_nrm += fabs(v); } else if (type == 2) { row_nrm += v * v; } else { row_nrm = hypre_max(row_nrm, fabs(v)); } } } if (type == 2) { row_nrm = sqrt(row_nrm); } /* drop small entries based on tol and row norm */ for (j = A_diag_i_i; j < A_diag_i[i + 1]; j++) { HYPRE_Int col = A_diag_j[j]; HYPRE_Complex val = A_diag_a[j]; if (i == col || fabs(val) >= tol * row_nrm) { A_diag_j[nnz_diag] = col; A_diag_a[nnz_diag] = val; nnz_diag ++; } } if (num_procs > 1) { for (j = A_offd_i_i; j < A_offd_i[i + 1]; j++) { HYPRE_Int col = A_offd_j[j]; HYPRE_Complex val = A_offd_a[j]; /* in normal cases: diagonal entry should not * appear in A_offd (but this can still be possible) */ if (i + first_row == col_map_offd_A[col] || fabs(val) >= tol * row_nrm) { if (0 == marker_offd[col]) { marker_offd[col] = 1; } A_offd_j[nnz_offd] = col; A_offd_a[nnz_offd] = val; nnz_offd ++; } } } A_diag_i_i = A_diag_i[i + 1]; A_offd_i_i = A_offd_i[i + 1]; A_diag_i[i + 1] = nnz_diag; A_offd_i[i + 1] = nnz_offd; } hypre_CSRMatrixNumNonzeros(A_diag) = nnz_diag; hypre_CSRMatrixNumNonzeros(A_offd) = nnz_offd; hypre_ParCSRMatrixSetNumNonzeros(A); hypre_ParCSRMatrixDNumNonzeros(A) = (HYPRE_Real) hypre_ParCSRMatrixNumNonzeros(A); for (i = 0, k = 0; i < num_cols_A_offd; i++) { if (marker_offd[i]) { col_map_offd_A[k] = col_map_offd_A[i]; marker_offd[i] = k++; } } /* num_cols_A_offd = k; */ hypre_CSRMatrixNumCols(A_offd) = k; for (i = 0; i < nnz_offd; i++) { A_offd_j[i] = marker_offd[A_offd_j[i]]; } if ( hypre_ParCSRMatrixCommPkg(A) ) { hypre_MatvecCommPkgDestroy( hypre_ParCSRMatrixCommPkg(A) ); } hypre_MatvecCommPkgCreate(A); hypre_TFree(marker_offd, HYPRE_MEMORY_HOST); return hypre_error_flag; } /* drop the entries that are not on the diagonal and smaller than * type 0: tol * type 1: tol*(1-norm of row) * type 2: tol*(2-norm of row) * type -1: tol*(infinity norm of row) * NOTE: some type options above unavailable on either host or device */ HYPRE_Int hypre_ParCSRMatrixDropSmallEntries( hypre_ParCSRMatrix *A, HYPRE_Real tol, HYPRE_Int type) { if (tol <= 0.0) { return hypre_error_flag; } #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) hypre_GpuProfilingPushRange("ParCSRMatrixDropSmallEntries"); #endif HYPRE_Int ierr = 0; #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy1( hypre_ParCSRMatrixMemoryLocation(A) ); if (exec == HYPRE_EXEC_DEVICE) { ierr = hypre_ParCSRMatrixDropSmallEntriesDevice(A, tol, type); } else #endif { ierr = hypre_ParCSRMatrixDropSmallEntriesHost(A, tol, type); } #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) hypre_GpuProfilingPopRange(); #endif return ierr; } /* Scale ParCSR matrix A = scalar * A * A: the target CSR matrix * scalar: real number */ HYPRE_Int hypre_ParCSRMatrixScale(hypre_ParCSRMatrix *A, HYPRE_Complex scalar) { hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); hypre_CSRMatrixScale(A_diag, scalar); hypre_CSRMatrixScale(A_offd, scalar); return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixReorder: * * Reorders the column and data arrays of a the diagonal component of a square * ParCSR matrix, such that the first entry in each row is the diagonal one. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixReorder(hypre_ParCSRMatrix *A) { HYPRE_BigInt nrows_A = hypre_ParCSRMatrixGlobalNumRows(A); HYPRE_BigInt ncols_A = hypre_ParCSRMatrixGlobalNumCols(A); hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); if (nrows_A != ncols_A) { hypre_error_w_msg(HYPRE_ERROR_GENERIC, " Error! Matrix should be square!\n"); return hypre_error_flag; } hypre_CSRMatrixReorder(A_diag); return hypre_error_flag; }
GB_unop__identity_fp64_fc32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__identity_fp64_fc32 // op(A') function: GB_unop_tran__identity_fp64_fc32 // C type: double // A type: GxB_FC32_t // cast: double cij = (double) crealf (aij) // unaryop: cij = aij #define GB_ATYPE \ GxB_FC32_t #define GB_CTYPE \ double // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ GxB_FC32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ double z = (double) crealf (aij) ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GxB_FC32_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ double z = (double) crealf (aij) ; \ Cx [pC] = z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_FP64 || GxB_NO_FC32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__identity_fp64_fc32 ( double *Cx, // Cx and Ax may be aliased const GxB_FC32_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GxB_FC32_t aij = Ax [p] ; double z = (double) crealf (aij) ; Cx [p] = z ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__identity_fp64_fc32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
pr59917-1.c
/* PR middle-end/59917 */ /* { dg-do compile } */ /* { dg-options "-O2 -fopenmp" } */ struct J { long buf[8]; }; extern int setjmp (struct J[1]); extern struct J j[1]; void foo (int); void bar (void) { if (setjmp (j) == 0) { int k; foo (-1); #pragma omp parallel for (k = 0; k < 10; ++k) foo (k); foo (-2); } }
8855.c
/* POLYBENCH/GPU-OPENMP * * This file is a part of the Polybench/GPU-OpenMP suite * * Contact: * William Killian <killian@udel.edu> * * Copyright 2013, The University of Delaware */ #include <stdio.h> #include <unistd.h> #include <string.h> #include <math.h> /* Include polybench common header. */ #include <polybench.h> /* Include benchmark-specific header. */ /* Default data type is double, default size is 4000. */ #include "covariance.h" /* Array initialization. */ static void init_array (int m, int n, DATA_TYPE *float_n, DATA_TYPE POLYBENCH_2D(data,M,N,m,n)) { int i, j; *float_n = 1.2; for (i = 0; i < M; i++) for (j = 0; j < N; j++) data[i][j] = ((DATA_TYPE) i*j) / M; } /* DCE code. Must scan the entire live-out data. Can be used also to check the correctness of the output. */ static void print_array(int m, DATA_TYPE POLYBENCH_2D(symmat,M,M,m,m)) { int i, j; for (i = 0; i < m; i++) for (j = 0; j < m; j++) { fprintf (stderr, DATA_PRINTF_MODIFIER, symmat[i][j]); if ((i * m + j) % 20 == 0) fprintf (stderr, "\n"); } fprintf (stderr, "\n"); } /* Main computational kernel. The whole function will be timed, including the call and return. */ static void kernel_covariance(int m, int n, DATA_TYPE float_n, DATA_TYPE POLYBENCH_2D(data,M,N,m,n), DATA_TYPE POLYBENCH_2D(symmat,M,M,m,m), DATA_TYPE POLYBENCH_1D(mean,M,m)) { int i, j, j1, j2; #pragma scop /* Determine mean of column vectors of input data matrix */ { for (j = 0; j < _PB_M; j++) { mean[j] = 0.0; for (i = 0; i < _PB_N; i++) mean[j] += data[i][j]; mean[j] /= float_n; } /* Center the column vectors. */ for (i = 0; i < _PB_N; i++) { #pragma omp parallel for schedule(dynamic, 1) num_threads(1) for (j = 0; j < _PB_M; j++) { data[i][j] -= mean[j]; } } /* Calculate the m * m covariance matrix. */ for (j1 = 0; j1 < _PB_M; j1++) { #pragma omp parallel for schedule(dynamic, 1) num_threads(1) for (j2 = j1; j2 < _PB_M; j2++) { symmat[j1][j2] = 0.0; for (i = 0; i < _PB_N; i++) symmat[j1][j2] += data[i][j1] * data[i][j2]; symmat[j2][j1] = symmat[j1][j2]; } } } #pragma endscop } int main(int argc, char** argv) { /* Retrieve problem size. */ int n = N; int m = M; /* Variable declaration/allocation. */ DATA_TYPE float_n; POLYBENCH_2D_ARRAY_DECL(data,DATA_TYPE,M,N,m,n); POLYBENCH_2D_ARRAY_DECL(symmat,DATA_TYPE,M,M,m,m); POLYBENCH_1D_ARRAY_DECL(mean,DATA_TYPE,M,m); /* Initialize array(s). */ init_array (m, n, &float_n, POLYBENCH_ARRAY(data)); /* Start timer. */ polybench_start_instruments; /* Run kernel. */ kernel_covariance (m, n, float_n, POLYBENCH_ARRAY(data), POLYBENCH_ARRAY(symmat), POLYBENCH_ARRAY(mean)); /* Stop and print timer. */ polybench_stop_instruments; polybench_print_instruments; /* Prevent dead-code elimination. All live-out data must be printed by the function call in argument. */ polybench_prevent_dce(print_array(m, POLYBENCH_ARRAY(symmat))); /* Be clean. */ POLYBENCH_FREE_ARRAY(data); POLYBENCH_FREE_ARRAY(symmat); POLYBENCH_FREE_ARRAY(mean); return 0; }
csr_matvec.c
/****************************************************************************** * Copyright 1998-2019 Lawrence Livermore National Security, LLC and other * HYPRE Project Developers. See the top-level COPYRIGHT file for details. * * SPDX-License-Identifier: (Apache-2.0 OR MIT) ******************************************************************************/ /****************************************************************************** * * Matvec functions for hypre_CSRMatrix class. * *****************************************************************************/ #include "seq_mv.h" /*-------------------------------------------------------------------------- * hypre_CSRMatrixMatvec *--------------------------------------------------------------------------*/ /* y[offset:end] = alpha*A[offset:end,:]*x + beta*b[offset:end] */ HYPRE_Int hypre_CSRMatrixMatvecOutOfPlaceHost( HYPRE_Complex alpha, hypre_CSRMatrix *A, hypre_Vector *x, HYPRE_Complex beta, hypre_Vector *b, hypre_Vector *y, HYPRE_Int offset ) { HYPRE_Complex *A_data = hypre_CSRMatrixData(A); HYPRE_Int *A_i = hypre_CSRMatrixI(A) + offset; HYPRE_Int *A_j = hypre_CSRMatrixJ(A); HYPRE_Int num_rows = hypre_CSRMatrixNumRows(A) - offset; HYPRE_Int num_cols = hypre_CSRMatrixNumCols(A); /*HYPRE_Int num_nnz = hypre_CSRMatrixNumNonzeros(A);*/ HYPRE_Int *A_rownnz = hypre_CSRMatrixRownnz(A); HYPRE_Int num_rownnz = hypre_CSRMatrixNumRownnz(A); HYPRE_Complex *x_data = hypre_VectorData(x); HYPRE_Complex *b_data = hypre_VectorData(b) + offset; HYPRE_Complex *y_data = hypre_VectorData(y) + offset; HYPRE_Int x_size = hypre_VectorSize(x); HYPRE_Int b_size = hypre_VectorSize(b) - offset; HYPRE_Int y_size = hypre_VectorSize(y) - offset; HYPRE_Int num_vectors = hypre_VectorNumVectors(x); HYPRE_Int idxstride_y = hypre_VectorIndexStride(y); HYPRE_Int vecstride_y = hypre_VectorVectorStride(y); /*HYPRE_Int idxstride_b = hypre_VectorIndexStride(b); HYPRE_Int vecstride_b = hypre_VectorVectorStride(b);*/ HYPRE_Int idxstride_x = hypre_VectorIndexStride(x); HYPRE_Int vecstride_x = hypre_VectorVectorStride(x); HYPRE_Complex temp, tempx; HYPRE_Int i, j, jj, m, ierr=0; HYPRE_Real xpar=0.7; hypre_Vector *x_tmp = NULL; /*--------------------------------------------------------------------- * Check for size compatibility. Matvec returns ierr = 1 if * length of X doesn't equal the number of columns of A, * ierr = 2 if the length of Y doesn't equal the number of rows * of A, and ierr = 3 if both are true. * * Because temporary vectors are often used in Matvec, none of * these conditions terminates processing, and the ierr flag * is informational only. *--------------------------------------------------------------------*/ hypre_assert( num_vectors == hypre_VectorNumVectors(y) ); hypre_assert( num_vectors == hypre_VectorNumVectors(b) ); if (num_cols != x_size) ierr = 1; if (num_rows != y_size || num_rows != b_size) ierr = 2; if (num_cols != x_size && (num_rows != y_size || num_rows != b_size)) ierr = 3; /*----------------------------------------------------------------------- * Do (alpha == 0.0) computation - RDF: USE MACHINE EPS *-----------------------------------------------------------------------*/ if (alpha == 0.0) { #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < num_rows*num_vectors; i++) y_data[i] = beta*b_data[i]; #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_MATVEC] += hypre_MPI_Wtime() - time_begin; #endif return ierr; } if (x == y) { x_tmp = hypre_SeqVectorCloneDeep(x); x_data = hypre_VectorData(x_tmp); } /*----------------------------------------------------------------------- * y = (beta/alpha)*y *-----------------------------------------------------------------------*/ temp = beta / alpha; /* use rownnz pointer to do the A*x multiplication when num_rownnz is smaller than num_rows */ if (num_rownnz < xpar*(num_rows) || num_vectors > 1) { /*----------------------------------------------------------------------- * y = (beta/alpha)*y *-----------------------------------------------------------------------*/ if (temp != 1.0) { if (temp == 0.0) { #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < num_rows*num_vectors; i++) y_data[i] = 0.0; } else { #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < num_rows*num_vectors; i++) y_data[i] = b_data[i]*temp; } } else { for (i = 0; i < num_rows*num_vectors; i++) y_data[i] = b_data[i]; } /*----------------------------------------------------------------- * y += A*x *-----------------------------------------------------------------*/ if (num_rownnz < xpar*(num_rows)) { #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,j,jj,m,tempx) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < num_rownnz; i++) { m = A_rownnz[i]; /* * for (jj = A_i[m]; jj < A_i[m+1]; jj++) * { * j = A_j[jj]; * y_data[m] += A_data[jj] * x_data[j]; * } */ if ( num_vectors==1 ) { tempx = 0; for (jj = A_i[m]; jj < A_i[m+1]; jj++) tempx += A_data[jj] * x_data[A_j[jj]]; y_data[m] += tempx; } else for ( j=0; j<num_vectors; ++j ) { tempx = 0; for (jj = A_i[m]; jj < A_i[m+1]; jj++) tempx += A_data[jj] * x_data[ j*vecstride_x + A_j[jj]*idxstride_x ]; y_data[ j*vecstride_y + m*idxstride_y] += tempx; } } } else // num_vectors > 1 { #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,j,jj,tempx) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < num_rows; i++) { for (j = 0; j < num_vectors; ++j) { tempx = 0; for (jj = A_i[i]; jj < A_i[i+1]; jj++) { tempx += A_data[jj] * x_data[ j*vecstride_x + A_j[jj]*idxstride_x ]; } y_data[ j*vecstride_y + i*idxstride_y ] += tempx; } } } /*----------------------------------------------------------------- * y = alpha*y *-----------------------------------------------------------------*/ if (alpha != 1.0) { #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < num_rows*num_vectors; i++) y_data[i] *= alpha; } } else { // JSP: this is currently the only path optimized #ifdef HYPRE_USING_OPENMP #pragma omp parallel private(i,jj,tempx) #endif { HYPRE_Int iBegin = hypre_CSRMatrixGetLoadBalancedPartitionBegin(A); HYPRE_Int iEnd = hypre_CSRMatrixGetLoadBalancedPartitionEnd(A); hypre_assert(iBegin <= iEnd); hypre_assert(iBegin >= 0 && iBegin <= num_rows); hypre_assert(iEnd >= 0 && iEnd <= num_rows); if (0 == temp) { if (1 == alpha) // JSP: a common path { for (i = iBegin; i < iEnd; i++) { tempx = 0.0; for (jj = A_i[i]; jj < A_i[i+1]; jj++) { tempx += A_data[jj] * x_data[A_j[jj]]; } y_data[i] = tempx; } } // y = A*x else if (-1 == alpha) { for (i = iBegin; i < iEnd; i++) { tempx = 0.0; for (jj = A_i[i]; jj < A_i[i+1]; jj++) { tempx -= A_data[jj] * x_data[A_j[jj]]; } y_data[i] = tempx; } } // y = -A*x else { for (i = iBegin; i < iEnd; i++) { tempx = 0.0; for (jj = A_i[i]; jj < A_i[i+1]; jj++) { tempx += A_data[jj] * x_data[A_j[jj]]; } y_data[i] = alpha*tempx; } } // y = alpha*A*x } // temp == 0 else if (-1 == temp) // beta == -alpha { if (1 == alpha) // JSP: a common path { for (i = iBegin; i < iEnd; i++) { tempx = -b_data[i]; for (jj = A_i[i]; jj < A_i[i+1]; jj++) { tempx += A_data[jj] * x_data[A_j[jj]]; } y_data[i] = tempx; } } // y = A*x - y else if (-1 == alpha) // JSP: a common path { for (i = iBegin; i < iEnd; i++) { tempx = b_data[i]; for (jj = A_i[i]; jj < A_i[i+1]; jj++) { tempx -= A_data[jj] * x_data[A_j[jj]]; } y_data[i] = tempx; } } // y = -A*x + y else { for (i = iBegin; i < iEnd; i++) { tempx = -b_data[i]; for (jj = A_i[i]; jj < A_i[i+1]; jj++) { tempx += A_data[jj] * x_data[A_j[jj]]; } y_data[i] = alpha*tempx; } } // y = alpha*(A*x - y) } // temp == -1 else if (1 == temp) { if (1 == alpha) // JSP: a common path { for (i = iBegin; i < iEnd; i++) { tempx = b_data[i]; for (jj = A_i[i]; jj < A_i[i+1]; jj++) { tempx += A_data[jj] * x_data[A_j[jj]]; } y_data[i] = tempx; } } // y = A*x + y else if (-1 == alpha) { for (i = iBegin; i < iEnd; i++) { tempx = -b_data[i]; for (jj = A_i[i]; jj < A_i[i+1]; jj++) { tempx -= A_data[jj] * x_data[A_j[jj]]; } y_data[i] = tempx; } } // y = -A*x - y else { for (i = iBegin; i < iEnd; i++) { tempx = b_data[i]; for (jj = A_i[i]; jj < A_i[i+1]; jj++) { tempx += A_data[jj] * x_data[A_j[jj]]; } y_data[i] = alpha*tempx; } } // y = alpha*(A*x + y) } else { if (1 == alpha) // JSP: a common path { for (i = iBegin; i < iEnd; i++) { tempx = b_data[i]*temp; for (jj = A_i[i]; jj < A_i[i+1]; jj++) { tempx += A_data[jj] * x_data[A_j[jj]]; } y_data[i] = tempx; } } // y = A*x + temp*y else if (-1 == alpha) { for (i = iBegin; i < iEnd; i++) { tempx = -b_data[i]*temp; for (jj = A_i[i]; jj < A_i[i+1]; jj++) { tempx -= A_data[jj] * x_data[A_j[jj]]; } y_data[i] = tempx; } } // y = -A*x - temp*y else { for (i = iBegin; i < iEnd; i++) { tempx = b_data[i]*temp; for (jj = A_i[i]; jj < A_i[i+1]; jj++) { tempx += A_data[jj] * x_data[A_j[jj]]; } y_data[i] = alpha*tempx; } } // y = alpha*(A*x + temp*y) } // temp != 0 && temp != -1 && temp != 1 } // omp parallel } if (x == y) { hypre_SeqVectorDestroy(x_tmp); } return ierr; } HYPRE_Int hypre_CSRMatrixMatvecOutOfPlace( HYPRE_Complex alpha, hypre_CSRMatrix *A, hypre_Vector *x, HYPRE_Complex beta, hypre_Vector *b, hypre_Vector *y, HYPRE_Int offset ) { #ifdef HYPRE_PROFILE HYPRE_Real time_begin = hypre_MPI_Wtime(); #endif HYPRE_Int ierr = 0; #if defined(HYPRE_USING_GPU) //HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy1( hypre_ParCSRMatrixMemoryLocation(A) ); //RL: TODO back to hypre_GetExecPolicy1 later HYPRE_ExecutionPolicy exec = HYPRE_EXEC_DEVICE; if (exec == HYPRE_EXEC_DEVICE) { ierr = hypre_CSRMatrixMatvecDevice(0, alpha, A, x, beta, b, y, offset); } else #endif { ierr = hypre_CSRMatrixMatvecOutOfPlaceHost(alpha, A, x, beta, b, y, offset); } #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_MATVEC] += hypre_MPI_Wtime() - time_begin; #endif return ierr; } HYPRE_Int hypre_CSRMatrixMatvec( HYPRE_Complex alpha, hypre_CSRMatrix *A, hypre_Vector *x, HYPRE_Complex beta, hypre_Vector *y ) { return hypre_CSRMatrixMatvecOutOfPlace(alpha, A, x, beta, y, y, 0); } /*-------------------------------------------------------------------------- * hypre_CSRMatrixMatvecT * * This version is using a different (more efficient) threading scheme * Performs y <- alpha * A^T * x + beta * y * * From Van Henson's modification of hypre_CSRMatrixMatvec. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_CSRMatrixMatvecTHost( HYPRE_Complex alpha, hypre_CSRMatrix *A, hypre_Vector *x, HYPRE_Complex beta, hypre_Vector *y ) { HYPRE_Complex *A_data = hypre_CSRMatrixData(A); HYPRE_Int *A_i = hypre_CSRMatrixI(A); HYPRE_Int *A_j = hypre_CSRMatrixJ(A); HYPRE_Int num_rows = hypre_CSRMatrixNumRows(A); HYPRE_Int num_cols = hypre_CSRMatrixNumCols(A); HYPRE_Complex *x_data = hypre_VectorData(x); HYPRE_Complex *y_data = hypre_VectorData(y); HYPRE_Int x_size = hypre_VectorSize(x); HYPRE_Int y_size = hypre_VectorSize(y); HYPRE_Int num_vectors = hypre_VectorNumVectors(x); HYPRE_Int idxstride_y = hypre_VectorIndexStride(y); HYPRE_Int vecstride_y = hypre_VectorVectorStride(y); HYPRE_Int idxstride_x = hypre_VectorIndexStride(x); HYPRE_Int vecstride_x = hypre_VectorVectorStride(x); HYPRE_Complex temp; HYPRE_Complex *y_data_expand; HYPRE_Int my_thread_num = 0, offset = 0; HYPRE_Int i, j, jv, jj; HYPRE_Int num_threads; HYPRE_Int ierr = 0; hypre_Vector *x_tmp = NULL; /*--------------------------------------------------------------------- * Check for size compatibility. MatvecT returns ierr = 1 if * length of X doesn't equal the number of rows of A, * ierr = 2 if the length of Y doesn't equal the number of * columns of A, and ierr = 3 if both are true. * * Because temporary vectors are often used in MatvecT, none of * these conditions terminates processing, and the ierr flag * is informational only. *--------------------------------------------------------------------*/ hypre_assert( num_vectors == hypre_VectorNumVectors(y) ); if (num_rows != x_size) ierr = 1; if (num_cols != y_size) ierr = 2; if (num_rows != x_size && num_cols != y_size) ierr = 3; /*----------------------------------------------------------------------- * Do (alpha == 0.0) computation - RDF: USE MACHINE EPS *-----------------------------------------------------------------------*/ if (alpha == 0.0) { #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < num_cols*num_vectors; i++) y_data[i] *= beta; return ierr; } if (x == y) { x_tmp = hypre_SeqVectorCloneDeep(x); x_data = hypre_VectorData(x_tmp); } /*----------------------------------------------------------------------- * y = (beta/alpha)*y *-----------------------------------------------------------------------*/ temp = beta / alpha; if (temp != 1.0) { if (temp == 0.0) { #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < num_cols*num_vectors; i++) y_data[i] = 0.0; } else { #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < num_cols*num_vectors; i++) y_data[i] *= temp; } } /*----------------------------------------------------------------- * y += A^T*x *-----------------------------------------------------------------*/ num_threads = hypre_NumThreads(); if (num_threads > 1) { y_data_expand = hypre_CTAlloc(HYPRE_Complex, num_threads*y_size, HYPRE_MEMORY_HOST); if ( num_vectors==1 ) { #ifdef HYPRE_USING_OPENMP #pragma omp parallel private(i,jj,j,my_thread_num,offset) #endif { my_thread_num = hypre_GetThreadNum(); offset = y_size*my_thread_num; #ifdef HYPRE_USING_OPENMP #pragma omp for HYPRE_SMP_SCHEDULE #endif for (i = 0; i < num_rows; i++) { for (jj = A_i[i]; jj < A_i[i+1]; jj++) { j = A_j[jj]; y_data_expand[offset + j] += A_data[jj] * x_data[i]; } } /* implied barrier (for threads)*/ #ifdef HYPRE_USING_OPENMP #pragma omp for HYPRE_SMP_SCHEDULE #endif for (i = 0; i < y_size; i++) { for (j = 0; j < num_threads; j++) { y_data[i] += y_data_expand[j*y_size + i]; } } } /* end parallel threaded region */ } else { /* multiple vector case is not threaded */ for (i = 0; i < num_rows; i++) { for ( jv=0; jv<num_vectors; ++jv ) { for (jj = A_i[i]; jj < A_i[i+1]; jj++) { j = A_j[jj]; y_data[ j*idxstride_y + jv*vecstride_y ] += A_data[jj] * x_data[ i*idxstride_x + jv*vecstride_x]; } } } } hypre_TFree(y_data_expand, HYPRE_MEMORY_HOST); } else { for (i = 0; i < num_rows; i++) { if ( num_vectors==1 ) { for (jj = A_i[i]; jj < A_i[i+1]; jj++) { j = A_j[jj]; y_data[j] += A_data[jj] * x_data[i]; } } else { for ( jv=0; jv<num_vectors; ++jv ) { for (jj = A_i[i]; jj < A_i[i+1]; jj++) { j = A_j[jj]; y_data[ j*idxstride_y + jv*vecstride_y ] += A_data[jj] * x_data[ i*idxstride_x + jv*vecstride_x ]; } } } } } /*----------------------------------------------------------------- * y = alpha*y *-----------------------------------------------------------------*/ if (alpha != 1.0) { #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < num_cols*num_vectors; i++) { y_data[i] *= alpha; } } if (x == y) { hypre_SeqVectorDestroy(x_tmp); } return ierr; } HYPRE_Int hypre_CSRMatrixMatvecT( HYPRE_Complex alpha, hypre_CSRMatrix *A, hypre_Vector *x, HYPRE_Complex beta, hypre_Vector *y ) { #ifdef HYPRE_PROFILE HYPRE_Real time_begin = hypre_MPI_Wtime(); #endif HYPRE_Int ierr = 0; #if defined(HYPRE_USING_GPU) //HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy1( hypre_ParCSRMatrixMemoryLocation(A) ); //RL: TODO back to hypre_GetExecPolicy1 later HYPRE_ExecutionPolicy exec = HYPRE_EXEC_DEVICE; if (exec == HYPRE_EXEC_DEVICE) { ierr = hypre_CSRMatrixMatvecDevice(1, alpha, A, x, beta, y, y, 0 ); } else #endif { ierr = hypre_CSRMatrixMatvecTHost(alpha, A, x, beta, y); } #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_MATVEC] += hypre_MPI_Wtime() - time_begin; #endif return ierr; } /*-------------------------------------------------------------------------- * hypre_CSRMatrixMatvec_FF *--------------------------------------------------------------------------*/ HYPRE_Int hypre_CSRMatrixMatvec_FF( HYPRE_Complex alpha, hypre_CSRMatrix *A, hypre_Vector *x, HYPRE_Complex beta, hypre_Vector *y, HYPRE_Int *CF_marker_x, HYPRE_Int *CF_marker_y, HYPRE_Int fpt ) { HYPRE_Complex *A_data = hypre_CSRMatrixData(A); HYPRE_Int *A_i = hypre_CSRMatrixI(A); HYPRE_Int *A_j = hypre_CSRMatrixJ(A); HYPRE_Int num_rows = hypre_CSRMatrixNumRows(A); HYPRE_Int num_cols = hypre_CSRMatrixNumCols(A); HYPRE_Complex *x_data = hypre_VectorData(x); HYPRE_Complex *y_data = hypre_VectorData(y); HYPRE_Int x_size = hypre_VectorSize(x); HYPRE_Int y_size = hypre_VectorSize(y); HYPRE_Complex temp; HYPRE_Int i, jj; HYPRE_Int ierr = 0; /*--------------------------------------------------------------------- * Check for size compatibility. Matvec returns ierr = 1 if * length of X doesn't equal the number of columns of A, * ierr = 2 if the length of Y doesn't equal the number of rows * of A, and ierr = 3 if both are true. * * Because temporary vectors are often used in Matvec, none of * these conditions terminates processing, and the ierr flag * is informational only. *--------------------------------------------------------------------*/ if (num_cols != x_size) ierr = 1; if (num_rows != y_size) ierr = 2; if (num_cols != x_size && num_rows != y_size) ierr = 3; /*----------------------------------------------------------------------- * Do (alpha == 0.0) computation - RDF: USE MACHINE EPS *-----------------------------------------------------------------------*/ if (alpha == 0.0) { #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < num_rows; i++) if (CF_marker_x[i] == fpt) y_data[i] *= beta; return ierr; } /*----------------------------------------------------------------------- * y = (beta/alpha)*y *-----------------------------------------------------------------------*/ temp = beta / alpha; if (temp != 1.0) { if (temp == 0.0) { #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < num_rows; i++) if (CF_marker_x[i] == fpt) y_data[i] = 0.0; } else { #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < num_rows; i++) if (CF_marker_x[i] == fpt) y_data[i] *= temp; } } /*----------------------------------------------------------------- * y += A*x *-----------------------------------------------------------------*/ #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,jj) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < num_rows; i++) { if (CF_marker_x[i] == fpt) { temp = y_data[i]; for (jj = A_i[i]; jj < A_i[i+1]; jj++) if (CF_marker_y[A_j[jj]] == fpt) temp += A_data[jj] * x_data[A_j[jj]]; y_data[i] = temp; } } /*----------------------------------------------------------------- * y = alpha*y *-----------------------------------------------------------------*/ if (alpha != 1.0) { #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < num_rows; i++) if (CF_marker_x[i] == fpt) y_data[i] *= alpha; } return ierr; }
9.norace4.c
// RUN: clang %loadLLOV %s -o /dev/null 2>&1 | FileCheck %s #include <omp.h> #define N 100 int main() { int sum = 0; #pragma omp ordered for (int i = 0; i < N; i++) { sum += i; } return 0; } // We do not support inter SCoP data races for now // CHECK: Region is Data Race Free. // END
sum-solution.c
/* * Sum of a*X[N] */ #include <stdio.h> #include <stdlib.h> #include <math.h> #include <string.h> #include <sys/timeb.h> /* read timer in second */ double read_timer() { struct timeb tm; ftime(&tm); return (double) tm.time + (double) tm.millitm / 1000.0; } /* read timer in ms */ double read_timer_ms() { struct timeb tm; ftime(&tm); return (double) tm.time * 1000.0 + (double) tm.millitm; } #define REAL float #define VECTOR_LENGTH 102400 /* initialize a vector with random floating point numbers */ void init(REAL A[], int N) { int i; for (i = 0; i < N; i++) { A[i] = (double) drand48(); } } REAL sum(int N, REAL X[], REAL a); REAL sumaxpy(int N, REAL X[], REAL Y[], REAL a); int main(int argc, char *argv[]) { int N = VECTOR_LENGTH; double elapsed; /* for timing */ if (argc < 2) { fprintf(stderr, "Usage: sum <n> (default %d)\n", N); exit(1); } N = atoi(argv[1]); REAL X[N]; REAL Y[N]; srand48((1 << 12)); init(X, N); init(Y, N); REAL a = 0.1234; /* example run */ elapsed = read_timer(); REAL result = sum(N, X, a); elapsed = (read_timer() - elapsed); int num_ths = omp_get_num_threads(); #pragma omp parallel { #pragma omp master num_ths = omp_get_num_threads(); } double elapsed_2 = read_timer(); result = sumaxpy(N, X, Y, a); elapsed_2 = (read_timer() - elapsed_2); /* you should add the call to each function and time the execution */ printf("======================================================================================================\n"); printf("\tSum %d numbers\n", N); printf("------------------------------------------------------------------------------------------------------\n"); printf("Performance:\t\tRuntime (ms)\t MFLOPS \n"); printf("------------------------------------------------------------------------------------------------------\n"); printf("Sum:\t\t\t%4f\t%4f\n", elapsed * 1.0e3, 2*N / (1.0e6 * elapsed)); printf("SumAXPY:\t\t\t%4f\t%4f\n", elapsed_2 * 1.0e3, 3*N / (1.0e6 * elapsed_2)); return 0; } REAL sum(int N, REAL X[], REAL a) { int i; REAL result = 0.0; #pragma omp parallel for reduction(+:result) for (i = 0; i < N; ++i) result += a * X[i]; return result; } REAL sum_reduce(int N, REAL X[], REAL a) { int i; REAL * results; int num_threads; #pragma omp parallel { #pragma omp master { num_threads = omp_get_num_threads(); results = malloc(sizeof(REAL)*num_threads); } #pragma omp barrier int id = omp_get_thread_num(); REAL tmp = 0.0; #pragma omp for for (i = 0; i < N; ++i) tmp += a * X[i]; results[id] = tmp; } REAL tmp = 0; for (i=0; i<num_threads; i++) tmp += results[i]; return tmp; } /* * sum: a*X[]+Y[] */ REAL sumaxpy(int N, REAL X[], REAL Y[], REAL a) { int i; REAL result = 0.0; for (i = 0; i < N; ++i) result += a * X[i] + Y[i]; return result; }
Example_doacross.3.c
/* * @@name: doacross.3c * @@type: C * @@compilable: no * @@linkable: no * @@expect: failure * @@version: omp_4.5 */ #define N 100 void work_wrong(double p[][N][N]) { int i, j, k; #pragma omp parallel for ordered(2) private(i,j,k) for (i=1; i<N-1; i++) { for (j=1; j<N-1; j++) { #pragma omp ordered depend(sink: i-1,j) depend(sink: i+1,j) \ depend(sink: i,j-1) depend(sink: i,j+1) for (k=1; k<N-1; k++) { double tmp1 = p[i-1][j][k] + p[i+1][j][k]; double tmp2 = p[i][j-1][k] + p[i][j+1][k]; double tmp3 = p[i][j][k-1] + p[i][j][k+1]; p[i][j][k] = (tmp1 + tmp2 + tmp3) / 6.0; } /* missing #pragma omp ordered depend(source) */ } } }
logreg.c
#include "../include/logreg.h" #include <stdlib.h> #include <stdio.h> #include <math.h> m_element_t _array_mean(matrix_t *arr){ m_element_t v = 0; size_t i, len = arr->rows; for(i=0;i<len;i++) v += arr->data[i]; return v/len; } int LOGREG_iteration(LOGREG_model_t *m, float lr, float lambda, matrix_t *X, matrix_t *Y){ matrix_t *P = LOGREG_inference(m,X), *P_less_y = M_sub(P,Y); m_element_t grad_b = _array_mean(P_less_y); //grad_w = (X.T @ (P - Y)) / m matrix_t *m_prod = M_dot_T(X,P_less_y),//X.T@(P-Y) *grad_w_tmp = M_mul_scalar(m_prod, 1./((m_element_t)X->rows)),// /m *t1 = M_mul_scalar(m->weights, 2*lambda),//L2 regularization *grad_w = M_sum(grad_w_tmp,t1); m->bias -= lr*grad_b; // this is more coerent but a lot slower i think, with inplace results would be better // t1 = M_mul_scalar(grad_w,lr) // t2 = M_sub(m->weights,t1) // M_free(t1); M_free(m->weights); // m->weights = t2 for(size_t i=0;i<m->m;i++) m->weights->data[i] -= grad_w->data[i]*lr; M_free(P_less_y); M_free(m_prod); M_free(P); M_free(grad_w_tmp); M_free(grad_w); M_free(t1); return 0; } matrix_t *LOGREG_inference(LOGREG_model_t *m, matrix_t *X){ matrix_t *Z1 = M_dot(X, m->weights), *Z = M_sum_scalar(Z1, m->bias); M_free(Z1); size_t len = Z->cols*Z->rows; //sigmoid #pragma omp parallel for for(size_t i=0;i<len;i++) Z->data[i] = 1.0/(1.0 + exp(-Z->data[i])); return Z; } #define T 100. int LOGREG_train(LOGREG_model_t *model, float lambda, matrix_t *X, matrix_t *y, int max_it, double loss_tol, train_cb* cb){ m_element_t lr = 1., loss=0, last_loss=0; int it = 0; matrix_t *P; do{ for(int i=0;i<T/2;i++) LOGREG_iteration(model,lr,lambda,X,y); it += T/2; //very stupid test with real dataset anyway. Not so bad but T must be tuned lr = lr>loss_tol*10 ? lr/(.1 + (it/T)) : lr*10; last_loss = loss; P = LOGREG_inference(model, X); loss = cross_entropy(P,y); M_free(P); if(cb != NULL && cb(it,loss,lr)) break; }while(it < max_it && ( fabs(last_loss-loss) > loss_tol )); return it; } LOGREG_model_t *LOGREG_new(size_t features){ LOGREG_model_t *m = malloc(sizeof(LOGREG_model_t)); m->m = features; m->weights = M_zeros(features,1); return m; } m_element_t cross_entropy(matrix_t *P, matrix_t *Y){ m_element_t res = 0.0; #pragma omp parallel for reduction(+:res) for(size_t i=0;i<P->rows;i++){ m_element_t p = M_get(P,i,0); if(p< EPS) p = EPS; else if(p> 1-EPS) p = 1 - EPS; res += M_get(Y,i,0)*log(p) + (1-M_get(Y,i,0))*log(1-p); } return -res/P->rows; } m_element_t accurancy(LOGREG_model_t *model, matrix_t *X, matrix_t *y){ matrix_t *P = LOGREG_inference(model,X); m_element_t mean = 0; size_t len = P->rows; for(size_t i=0;i<len;i++){ mean += (M_get(P,i,1)>0.5) == (M_get(y,i,1)>0.5); } M_free(P); return mean/len; }
lock.c
#include <omp.h> #include <stdio.h> static int lk; static int lock_counter; typedef int rhomp_lock_t; //typedef int bool; int lock(int op, rhomp_lock_t L) { int result = 0; if (op == 0) { // request lock if ((lk & (1<<L)) == 0) { lk |= (1<<L); result = 1; } } else { // request unlock lk &= ~(1<<L); } return result; } int rhomp_test_lock(rhomp_lock_t L) { return lock(0, L); } void rhomp_set_lock(rhomp_lock_t L) { while (lock(0, L) != 1) {} return; } void rhomp_unset_lock(rhomp_lock_t L) { lock(1, L); } rhomp_lock_t rhomp_init_lock() { rhomp_lock_t result = lock_counter; lock_counter++; return result; } void rhomp_begin_named_critical(rhomp_lock_t L) { rhomp_lock_t GW; if (L) { rhomp_set_lock(L); return; } else { rhomp_set_lock(3); if (L == 0) { rhomp_lock_t temp = rhomp_init_lock(); GW = temp; } rhomp_unset_lock(3); rhomp_set_lock(GW); return; } } #if 0 int foo() { int counter = 0; int i; int junk = rhomp_init_lock(); junk = rhomp_init_lock(); for (i = 0; i < 150; i++) { junk = rhomp_init_lock(); printf("initialized lock and value is %d\n", junk); } if (0) { junk = rhomp_test_lock(0); } #pragma omp parallel num_threads(10) { rhomp_set_lock(1); printf("thread %d got a lock\n", omp_get_thread_num()); counter++; rhomp_unset_lock(1); printf("thread %d unset the lock\n", omp_get_thread_num()); } return counter; } int hello() { printf("result is %d\n", foo()); return 0; } #endif
louvain_hill.h
//===------------------------------------------------------------*- C++ -*-===// // // Ripples: A C++ Library for Influence Maximization // Marco Minutoli <marco.minutoli@pnnl.gov> // Pacific Northwest National Laboratory // //===----------------------------------------------------------------------===// // // Copyright (c) 2019, Battelle Memorial Institute // // Battelle Memorial Institute (hereinafter Battelle) hereby grants permission // to any person or entity lawfully obtaining a copy of this software and // associated documentation files (hereinafter “the Software”) to redistribute // and use the Software in source and binary forms, with or without // modification. Such person or entity may use, copy, modify, merge, publish, // distribute, sublicense, and/or sell copies of the Software, and may permit // others to do so, subject to the following conditions: // // 1. Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimers. // // 2. Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // 3. Other than as used herein, neither the name Battelle Memorial Institute or // Battelle may be used in any form whatsoever without the express written // consent of Battelle. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" // AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE // ARE DISCLAIMED. IN NO EVENT SHALL BATTELLE OR CONTRIBUTORS BE LIABLE FOR ANY // DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES // (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; // LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND // ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // //===----------------------------------------------------------------------===// #ifndef RIPPLES_LOUVAIN_HILL_H #define RIPPLES_LOUVAIN_HILL_H #include <queue> #include <string> #include <type_traits> #include <vector> #include "ripples/hill_climbing_engine.h" #include "ripples/hill_climbing.h" #include "spdlog/fmt/ostr.h" #include "spdlog/sinks/stdout_color_sinks.h" #include "spdlog/spdlog.h" #ifdef RIPPLES_ENABLE_CUDA #include "cuda_runtime.h" #include "ripples/cuda/cuda_utils.h" #endif namespace ripples { struct LouvainHillConfiguration : public HillClimbingConfiguration { std::string communityList; size_t num_threads_d1{4}; void addCmdOptions(CLI::App &app) { AlgorithmConfiguration::addCmdOptions(app); app.add_option("--community-map", communityList, "The filename of the community map.") ->required() ->group("Algorithm Options"); app.add_option( "--streaming-gpu-workers", streaming_gpu_workers, "The number of GPU workers for the CPU+GPU streaming engine.") ->group("Streaming-Engine Options"); app.add_option( "--concurrent-partitions", num_threads_d1, "The number of partitions to be processed parallely.") ->group("Streaming-Engine Options"); } }; // struct LouvainIMMExecutionRecord : public IMMExecutionRecord {}; namespace { template <typename vertex_type> struct Compare { bool operator()(std::pair<vertex_type, size_t> &a, std::pair<vertex_type, size_t> &b) const { return a.second < b.second; } }; } // namespace template <typename GraphTy, typename RecordTy, typename ConfTy, typename execution_tag> std::vector<typename GraphTy::vertex_type> FindMostInfluentialSeedSet(std::vector<GraphTy> & communities, std::vector<std::vector<Bitmask<int>>> &sampled_graphs, std::vector<RecordTy> &R, ConfTy &CFG, execution_tag &&ex_tag) { spdlog::get("console")->info("SeedSelect start"); using vertex_type = typename GraphTy::vertex_type; Compare<vertex_type> cmp; omp_set_nested(1); int num_threads_d1 = CFG.num_threads_d1, num_threads_d2; num_threads_d2 = std::floor(omp_get_max_threads() / num_threads_d1); size_t total_gpu = 0; #if RIPPLES_ENABLE_CUDA total_gpu = int(cuda_num_devices() / num_threads_d1) * num_threads_d1; CFG.streaming_gpu_workers = total_gpu / num_threads_d1; #endif spdlog::get("console")->flush(); CFG.streaming_workers = num_threads_d2; CFG.streaming_workers -= CFG.streaming_gpu_workers; // Init on heap per community using vertex_contribution_pair = std::pair<vertex_type, size_t>; std::vector<vertex_contribution_pair> global_heap( CFG.k + 1, vertex_contribution_pair{-1, 0}); std::vector<uint64_t> active_communities(communities.size(), 1); auto heap_cmp = [](const vertex_contribution_pair &a, const vertex_contribution_pair &b) -> bool { return a.second > b.second; }; std::make_heap(global_heap.begin(), global_heap.end(), heap_cmp); std::mutex global_heap_mutex; using GraphFwd = ripples::Graph<uint32_t, ripples::WeightedDestination<uint32_t, float>, ripples::ForwardDirection<uint32_t>>; std::vector<SeedSelectionEngine<GraphFwd, std::vector<Bitmask<int>>::iterator>*> SEV; SEV.reserve(communities.size()); for (size_t i = 0; i < communities.size(); ++i) { auto S = new SeedSelectionEngine<GraphFwd, std::vector<Bitmask<int>>::iterator>(communities[i], CFG.streaming_workers, CFG.streaming_gpu_workers, "SeedSelectionEngine" + std::to_string(i), CFG.streaming_gpu_workers, (i % num_threads_d1) * CFG.streaming_gpu_workers); SEV[i] = S; } while (!std::all_of(active_communities.begin(), active_communities.end(), [](const uint64_t &v) -> bool { return v == 0; })) { #pragma omp parallel for schedule(static) num_threads(num_threads_d1) for (size_t i = 0; i < communities.size(); ++i) { if (active_communities[i] == 0) continue; vertex_contribution_pair vcp = SEV[i]->get_next_seed(sampled_graphs[i].begin(), sampled_graphs[i].end(), R[i].SeedSelectionTasks); vcp.first = communities[i].convertID(vcp.first); // Handle the global index insertion std::lock_guard<std::mutex> _(global_heap_mutex); std::pop_heap(global_heap.begin(), global_heap.end(), heap_cmp); global_heap.back() = vcp; std::push_heap(global_heap.begin(), global_heap.end(), heap_cmp); if (global_heap.front() == vcp) active_communities[i] = 0; } } std::pop_heap(global_heap.begin(), global_heap.end(), heap_cmp); global_heap.pop_back(); double coverage = 0; std::vector<typename GraphTy::vertex_type> seeds; seeds.reserve(CFG.k); std::sort_heap(global_heap.begin(), global_heap.end(), heap_cmp); for (auto e : global_heap) { seeds.push_back(e.first); coverage += e.second; } return seeds; } // template <typename GraphTy, typename ConfTy, typename GeneratorTy, // typename RecordTy, typename diff_model_tag> // auto LouvainHill(const std::vector<GraphTy> &communities, ConfTy &CFG, // GeneratorTy &gen, diff_model_tag &&model_tag, // sequential_tag &&ex_tag) { // //TODO:: Implement // } //! Influence Maximization using Community Structure. //! //! The algorithm uses the Louvain method for community detection and then //! IMM to select seeds frome the communities. //! //! \tparam GraphTy The type of the input graph. //! \tparam PRNG The type of the parallel random number generator. //! \tparam diff_model_tag Type-Tag to selecte the diffusion model. //! \tparam execution_tag Type-Tag to select the execution policy. //! //! \param communities The input graphs. The graphs are transoposed. //! \param k The size of the seed set. //! \param epsilon The parameter controlling the approximation guarantee. //! \param l Parameter usually set to 1. //! \param gen The parallel random number generator. //! \param model_tag The diffusion model tag. //! \param ex_tag The execution policy tag. template <typename GraphTy, typename ConfTy, typename RecordTy, typename GeneratorTy, typename diff_model_tag> auto LouvainHill(std::vector<GraphTy> &communities, ConfTy &CFG, std::vector<RecordTy> &R, GeneratorTy gen, diff_model_tag &&model_tag, omp_parallel_tag &&ex_tag) { using vertex_type = typename GraphTy::vertex_type; size_t k = CFG.k; std::vector<decltype(gen)> comm_gen(communities.size()); for (size_t i = 0; i < communities.size(); ++i) { auto local_gen = gen; local_gen.split(communities.size(), i); comm_gen[i] = local_gen; } std::vector<std::vector<Bitmask<int>>> sampled_graphs(communities.size()); // For each community do Sampling // #pragma omp parallel for schedule(dynamic) for (size_t i = 0; i < communities.size(); ++i) { sampled_graphs[i] = SampleFrom(communities[i], CFG, comm_gen[i], R[i], std::forward<diff_model_tag>(model_tag), i); } // Global seed selection using the heap auto S = FindMostInfluentialSeedSet(communities, sampled_graphs, R, CFG, std::forward<omp_parallel_tag>(ex_tag)); return std::make_pair(S, R); } } // namespace ripples #endif /* RIPPLES_LOUVAIN_HILL_H */
3d25pt_var.lbpar.c
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) /* * Order-1, 3D 25 point stencil with axis-symmetric ariable coefficients * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, m, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+8; Ny = atoi(argv[2])+8; Nz = atoi(argv[3])+8; } if (argc > 4) Nt = atoi(argv[4]); // allocate the arrays double ****A = (double ****) malloc(sizeof(double***)*2); for(m=0; m<2;m++){ A[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } double ****coef = (double ****) malloc(sizeof(double***)*13); for(m=0; m<13;m++){ coef[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ coef[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ coef[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 4; tile_size[1] = 4; tile_size[2] = 4; tile_size[3] = 64; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } for (m=0; m<13; m++) { for (i=1; i<Nz; i++) { for (j=1; j<Ny; j++) { for (k=1; k<Nx; k++) { coef[m][i][j][k] = 1.0 * (rand() % BASE); } } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 /* Copyright (C) 1991-2014 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7, t8; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((Nt >= 1) && (Nx >= 9) && (Ny >= 9) && (Nz >= 9)) { for (t1=-1;t1<=2*Nt-2;t1++) { lbp=ceild(t1+2,2); ubp=min(floord(4*Nt+Nz-9,4),floord(2*t1+Nz-4,4)); #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8) for (t2=lbp;t2<=ubp;t2++) { for (t3=max(ceild(t1+2,2),ceild(4*t2-Nz+9,4));t3<=min(min(floord(4*Nt+Ny-9,4),floord(2*t1+Ny-3,4)),floord(4*t2+Ny-9,4));t3++) { for (t4=max(max(ceild(t1-28,32),ceild(4*t2-Nz-51,64)),ceild(4*t3-Ny-51,64));t4<=min(min(min(floord(4*Nt+Nx-9,64),floord(2*t1+Nx-3,64)),floord(4*t2+Nx-9,64)),floord(4*t3+Nx-9,64));t4++) { for (t5=max(max(max(ceild(t1,2),ceild(4*t2-Nz+5,4)),ceild(4*t3-Ny+5,4)),ceild(64*t4-Nx+5,4));t5<=floord(t1+1,2);t5++) { for (t6=max(4*t2,-4*t1+4*t2+8*t5-3);t6<=min(min(4*t2+3,-4*t1+4*t2+8*t5),4*t5+Nz-5);t6++) { for (t7=4*t3;t7<=min(4*t3+3,4*t5+Ny-5);t7++) { lbv=max(64*t4,4*t5+4); ubv=min(64*t4+63,4*t5+Nx-5); #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] = (((((((((((((coef[0][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (coef[1][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 1][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 1][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 1][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 1][ (-4*t5+t8)]))) + (coef[3][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 1] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 1]))) + (coef[4][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 2][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 2][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[5][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 2][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 2][ (-4*t5+t8)]))) + (coef[6][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 2] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 2]))) + (coef[7][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 3][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 3][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[8][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 3][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 3][ (-4*t5+t8)]))) + (coef[9][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 3] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 3]))) + (coef[10][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 4][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 4][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[11][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 4][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 4][ (-4*t5+t8)]))) + (coef[12][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 4] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 4])));; } } } } } } } } } /* End of CLooG code */ gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(4, "variable axis-symmetric") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); for(m=0; m<13;m++){ for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(coef[m][i][j]); } free(coef[m][i]); } free(coef[m]); } return 0; }
deconvolution_3x3.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void deconv3x3s1_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias, const Option& opt) { int w = bottom_blob.w; int h = bottom_blob.h; int inch = bottom_blob.c; int outw = top_blob.w; int outch = top_blob.c; const float* kernel = _kernel; const float* bias = _bias; #pragma omp parallel for num_threads(opt.num_threads) for (int p=0; p<outch; p++) { Mat out = top_blob.channel(p); const float bias0 = bias ? bias[p] : 0.f; out.fill(bias0); for (int q=0; q<inch; q++) { const float* img0 = bottom_blob.channel(q); const float* kernel0 = kernel + p*inch*9 + q*9; const float* r0 = img0; const float* k0 = kernel0; const float* k1 = kernel0 + 3; const float* k2 = kernel0 + 6; #if __ARM_NEON float32x4_t _k0 = vld1q_f32(k0); float32x4_t _k1 = vld1q_f32(k1); float32x4_t _k2 = vld1q_f32(k2); #endif // __ARM_NEON for (int i = 0; i < h; i++) { float* outptr = out.row(i); float* outptr0 = outptr; float* outptr1 = outptr + outw; float* outptr2 = outptr + outw*2; int j = 0; #if __ARM_NEON for (; j+3 < w; j+=4) { float32x4_t _v = vld1q_f32(r0); #if 0 // bad compiler generate slow instructions :( // 0 float32x4_t _out00 = vld1q_f32(outptr0 + 0); _out00 = vmlaq_lane_f32(_out00, _v, vget_low_f32(_k0), 0); float32x4_t _out01 = vmulq_lane_f32(_v, vget_low_f32(_k0), 1); // ext float32x4_t _zero_out01 = vdupq_n_f32(0.f); _zero_out01 = vextq_f32(_zero_out01, _out01, 3); _out00 = vaddq_f32(_out00, _zero_out01); // float32x2_t _out00low = vget_low_f32(_out00); float32x2_t _out00high = vget_high_f32(_out00); _out00high = vmla_lane_f32(_out00high, vget_low_f32(_v), vget_high_f32(_k0), 0); _out00 = vcombine_f32(_out00low, _out00high); vst1q_f32(outptr0 + 0, _out00); // float32x2_t _out02high = vld1_f32(outptr0 + 4); float32x2_t _out01_zero = vext_f32(vget_high_f32(_out01), vget_low_f32(_zero_out01), 1); _out02high = vadd_f32(_out02high, _out01_zero); _out02high = vmla_lane_f32(_out02high, vget_high_f32(_v), vget_high_f32(_k0), 0); vst1_f32(outptr0 + 4, _out02high); // 1 float32x4_t _out10 = vld1q_f32(outptr1 + 0); _out10 = vmlaq_lane_f32(_out10, _v, vget_low_f32(_k1), 0); float32x4_t _out11 = vmulq_lane_f32(_v, vget_low_f32(_k1), 1); // ext float32x4_t _zero_out11 = vdupq_n_f32(0.f); _zero_out11 = vextq_f32(_zero_out11, _out11, 3); _out10 = vaddq_f32(_out10, _zero_out11); // float32x2_t _out10low = vget_low_f32(_out10); float32x2_t _out10high = vget_high_f32(_out10); _out10high = vmla_lane_f32(_out10high, vget_low_f32(_v), vget_high_f32(_k1), 0); _out10 = vcombine_f32(_out10low, _out10high); vst1q_f32(outptr1 + 0, _out10); // float32x2_t _out12high = vld1_f32(outptr1 + 4); float32x2_t _out11_zero = vext_f32(vget_high_f32(_out11), vget_low_f32(_zero_out11), 1); _out12high = vadd_f32(_out12high, _out11_zero); _out12high = vmla_lane_f32(_out12high, vget_high_f32(_v), vget_high_f32(_k1), 0); vst1_f32(outptr1 + 4, _out12high); // 2 float32x4_t _out20 = vld1q_f32(outptr2 + 0); _out20 = vmlaq_lane_f32(_out20, _v, vget_low_f32(_k2), 0); float32x4_t _out21 = vmulq_lane_f32(_v, vget_low_f32(_k2), 1); // ext float32x4_t _zero_out21 = vdupq_n_f32(0.f); _zero_out21 = vextq_f32(_zero_out21, _out21, 3); _out20 = vaddq_f32(_out20, _zero_out21); // float32x2_t _out20low = vget_low_f32(_out20); float32x2_t _out20high = vget_high_f32(_out20); _out20high = vmla_lane_f32(_out20high, vget_low_f32(_v), vget_high_f32(_k2), 0); _out20 = vcombine_f32(_out20low, _out20high); vst1q_f32(outptr2 + 0, _out20); // float32x2_t _out22high = vld1_f32(outptr2 + 4); float32x2_t _out21_zero = vext_f32(vget_high_f32(_out21), vget_low_f32(_zero_out21), 1); _out22high = vadd_f32(_out22high, _out21_zero); _out22high = vmla_lane_f32(_out22high, vget_high_f32(_v), vget_high_f32(_k2), 0); vst1_f32(outptr2 + 4, _out22high); #else // float32x4_t _out00 = vld1q_f32(outptr0 + 0); _out00 = vmlaq_lane_f32(_out00, _v, vget_low_f32(_k0), 0); vst1q_f32(outptr0 + 0, _out00); float32x4_t _out01 = vld1q_f32(outptr0 + 1); _out01 = vmlaq_lane_f32(_out01, _v, vget_low_f32(_k0), 1); vst1q_f32(outptr0 + 1, _out01); float32x4_t _out02 = vld1q_f32(outptr0 + 2); _out02 = vmlaq_lane_f32(_out02, _v, vget_high_f32(_k0), 0); vst1q_f32(outptr0 + 2, _out02); // float32x4_t _out10 = vld1q_f32(outptr1 + 0); _out10 = vmlaq_lane_f32(_out10, _v, vget_low_f32(_k1), 0); vst1q_f32(outptr1 + 0, _out10); float32x4_t _out11 = vld1q_f32(outptr1 + 1); _out11 = vmlaq_lane_f32(_out11, _v, vget_low_f32(_k1), 1); vst1q_f32(outptr1 + 1, _out11); float32x4_t _out12 = vld1q_f32(outptr1 + 2); _out12 = vmlaq_lane_f32(_out12, _v, vget_high_f32(_k1), 0); vst1q_f32(outptr1 + 2, _out12); // float32x4_t _out20 = vld1q_f32(outptr2 + 0); _out20 = vmlaq_lane_f32(_out20, _v, vget_low_f32(_k2), 0); vst1q_f32(outptr2 + 0, _out20); float32x4_t _out21 = vld1q_f32(outptr2 + 1); _out21 = vmlaq_lane_f32(_out21, _v, vget_low_f32(_k2), 1); vst1q_f32(outptr2 + 1, _out21); float32x4_t _out22 = vld1q_f32(outptr2 + 2); _out22 = vmlaq_lane_f32(_out22, _v, vget_high_f32(_k2), 0); vst1q_f32(outptr2 + 2, _out22); #endif r0 += 4; outptr0 += 4; outptr1 += 4; outptr2 += 4; } #endif // __ARM_NEON for (; j < w; j++) { float val = r0[0]; outptr0[0] += val * k0[0]; outptr0[1] += val * k0[1]; outptr0[2] += val * k0[2]; outptr1[0] += val * k1[0]; outptr1[1] += val * k1[1]; outptr1[2] += val * k1[2]; outptr2[0] += val * k2[0]; outptr2[1] += val * k2[1]; outptr2[2] += val * k2[2]; r0++; outptr0++; outptr1++; outptr2++; } } } } } static void deconv3x3s2_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias, const Option& opt) { int w = bottom_blob.w; int h = bottom_blob.h; int inch = bottom_blob.c; int outw = top_blob.w; int outch = top_blob.c; const float* kernel = _kernel; const float* bias = _bias; #pragma omp parallel for num_threads(opt.num_threads) for (int p=0; p<outch; p++) { Mat out = top_blob.channel(p); const float bias0 = bias ? bias[p] : 0.f; out.fill(bias0); for (int q=0; q<inch; q++) { const float* img0 = bottom_blob.channel(q); const float* kernel0 = kernel + p*inch*9 + q*9; const float* r0 = img0; const float* k0 = kernel0; const float* k1 = kernel0 + 3; const float* k2 = kernel0 + 6; #if __ARM_NEON float32x4_t _k0 = vld1q_f32(k0); float32x4_t _k1 = vld1q_f32(k1); float32x4_t _k2 = vld1q_f32(k2); #endif // __ARM_NEON for (int i = 0; i < h; i++) { float* outptr = out.row(i*2); float* outptr0 = outptr; float* outptr1 = outptr0 + outw; float* outptr2 = outptr1 + outw; int j = 0; #if __ARM_NEON for (; j+3 < w; j+=4) { float32x4_t _v = vld1q_f32(r0); // out row 0 float32x4_t _out00 = vmulq_lane_f32(_v, vget_low_f32(_k0), 0); // 0,2,4,6 float32x4_t _out01 = vmulq_lane_f32(_v, vget_low_f32(_k0), 1); // 1,3,5,7 float32x4_t _out02 = vmulq_lane_f32(_v, vget_high_f32(_k0), 0); // 2,4,6,8 float32x4x2_t _out0 = vld2q_f32(outptr0); _out0.val[0] = vaddq_f32(_out0.val[0], _out00); // 0,2,4,6 _out0.val[1] = vaddq_f32(_out0.val[1], _out01); // 1,3,5,7 vst2q_f32(outptr0, _out0); _out0 = vld2q_f32(outptr0 + 2); _out0.val[0] = vaddq_f32(_out0.val[0], _out02); // 2,4,6,8 vst2q_f32(outptr0 + 2, _out0); // out row 1 float32x4_t _out10 = vmulq_lane_f32(_v, vget_low_f32(_k1), 0); // 0,2,4,6 float32x4_t _out11 = vmulq_lane_f32(_v, vget_low_f32(_k1), 1); // 1,3,5,7 float32x4_t _out12 = vmulq_lane_f32(_v, vget_high_f32(_k1), 0); // 2,4,6,8 float32x4x2_t _out1 = vld2q_f32(outptr1); _out1.val[0] = vaddq_f32(_out1.val[0], _out10); // 0,2,4,6 _out1.val[1] = vaddq_f32(_out1.val[1], _out11); // 1,3,5,7 vst2q_f32(outptr1, _out1); _out1 = vld2q_f32(outptr1 + 2); _out1.val[0] = vaddq_f32(_out1.val[0], _out12); // 2,4,6,8 vst2q_f32(outptr1 + 2, _out1); // out row 2 float32x4_t _out20 = vmulq_lane_f32(_v, vget_low_f32(_k2), 0); // 0,2,4,6 float32x4_t _out21 = vmulq_lane_f32(_v, vget_low_f32(_k2), 1); // 1,3,5,7 float32x4_t _out22 = vmulq_lane_f32(_v, vget_high_f32(_k2), 0); // 2,4,6,8 float32x4x2_t _out2 = vld2q_f32(outptr2); _out2.val[0] = vaddq_f32(_out2.val[0], _out20); // 0,2,4,6 _out2.val[1] = vaddq_f32(_out2.val[1], _out21); // 1,3,5,7 vst2q_f32(outptr2, _out2); _out2 = vld2q_f32(outptr2 + 2); _out2.val[0] = vaddq_f32(_out2.val[0], _out22); // 2,4,6,8 vst2q_f32(outptr2 + 2, _out2); r0 += 4; outptr0 += 8; outptr1 += 8; outptr2 += 8; } #endif // __ARM_NEON for (; j < w; j++) { float val = r0[0]; outptr0[0] += val * k0[0]; outptr0[1] += val * k0[1]; outptr0[2] += val * k0[2]; outptr1[0] += val * k1[0]; outptr1[1] += val * k1[1]; outptr1[2] += val * k1[2]; outptr2[0] += val * k2[0]; outptr2[1] += val * k2[1]; outptr2[2] += val * k2[2]; r0++; outptr0 += 2; outptr1 += 2; outptr2 += 2; } } } } }
cryptsha512_fmt_plug.c
/* * This file is part of John the Ripper password cracker, * based on rawSHA256_fmt.c code and Drepper's spec at * http://www.akkadia.org/drepper/SHA-crypt.txt * * This software is Copyright (c) 2012 magnum, and it is hereby released to the * general public under the following terms: Redistribution and use in source * and binary forms, with or without modification, are permitted. * */ #if FMT_EXTERNS_H extern struct fmt_main fmt_cryptsha512; #elif FMT_REGISTERS_H john_register_one(&fmt_cryptsha512); #else #define _GNU_SOURCE 1 #include <string.h> #ifdef _OPENMP #define OMP_SCALE 16 #include <omp.h> #endif #include "arch.h" #include "sha2.h" #include "params.h" #include "common.h" #include "formats.h" // these MUST be defined prior to loading cryptsha512_valid.h #define BINARY_SIZE 64 #define SALT_LENGTH 16 #define CIPHERTEXT_LENGTH 86 #include "cryptsha512_common.h" #include "memdbg.h" #define FORMAT_LABEL "sha512crypt" #if ARCH_BITS >= 64 #define ALGORITHM_NAME "64/" ARCH_BITS_STR " " SHA2_LIB #else #define ALGORITHM_NAME "32/" ARCH_BITS_STR " " SHA2_LIB #endif #define PLAINTEXT_LENGTH 125 #define BINARY_ALIGN 4 #define SALT_SIZE sizeof(struct saltstruct) #define SALT_ALIGN 4 #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 static struct fmt_tests tests[] = { {"$6$LKO/Ute40T3FNF95$6S/6T2YuOIHY0N3XpLKABJ3soYcXD9mB7uVbtEZDj/LNscVhZoZ9DEH.sBciDrMsHOWOoASbNLTypH/5X26gN0", "U*U*U*U*"}, {"$6$LKO/Ute40T3FNF95$wK80cNqkiAUzFuVGxW6eFe8J.fSVI65MD5yEm8EjYMaJuDrhwe5XXpHDJpwF/kY.afsUs1LlgQAaOapVNbggZ1", "U*U***U"}, {"$6$LKO/Ute40T3FNF95$YS81pp1uhOHTgKLhSMtQCr2cDiUiN03Ud3gyD4ameviK1Zqz.w3oXsMgO6LrqmIEcG3hiqaUqHi/WEE2zrZqa/", "U*U***U*"}, {"$6$OmBOuxFYBZCYAadG$WCckkSZok9xhp4U1shIZEV7CCVwQUwMVea7L3A77th6SaE9jOPupEMJB.z0vIWCDiN9WLh2m9Oszrj5G.gt330", "*U*U*U*U"}, {"$6$ojWH1AiTee9x1peC$QVEnTvRVlPRhcLQCk/HnHaZmlGAAjCfrAN0FtOsOnUk5K5Bn/9eLHHiRzrTzaIKjW9NTLNIBUCtNVOowWS2mN.", ""}, {NULL} }; static int (*saved_key_length); static char (*saved_key)[PLAINTEXT_LENGTH + 1]; static ARCH_WORD_32 (*crypt_out)[BINARY_SIZE / sizeof(ARCH_WORD_32)]; static struct saltstruct { unsigned int len; unsigned int rounds; unsigned char salt[SALT_LENGTH]; } *cur_salt; static void init(struct fmt_main *self) { #ifdef _OPENMP int omp_t; omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt = omp_t * MIN_KEYS_PER_CRYPT; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt = omp_t * MAX_KEYS_PER_CRYPT; #endif saved_key_length = mem_calloc_tiny(sizeof(*saved_key_length) * self->params.max_keys_per_crypt, MEM_ALIGN_WORD); saved_key = mem_calloc_tiny(sizeof(*saved_key) * self->params.max_keys_per_crypt, MEM_ALIGN_WORD); crypt_out = mem_calloc_tiny(sizeof(*crypt_out) * self->params.max_keys_per_crypt, MEM_ALIGN_WORD); } static int get_hash_0(int index) { return crypt_out[index][0] & 0xf; } static int get_hash_1(int index) { return crypt_out[index][0] & 0xff; } static int get_hash_2(int index) { return crypt_out[index][0] & 0xfff; } static int get_hash_3(int index) { return crypt_out[index][0] & 0xffff; } static int get_hash_4(int index) { return crypt_out[index][0] & 0xfffff; } static int get_hash_5(int index) { return crypt_out[index][0] & 0xffffff; } static int get_hash_6(int index) { return crypt_out[index][0] & 0x7ffffff; } static void set_key(char *key, int index) { int len = strlen(key); saved_key_length[index] = len; if (len > PLAINTEXT_LENGTH) len = saved_key_length[index] = PLAINTEXT_LENGTH; memcpy(saved_key[index], key, len); } static char *get_key(int index) { saved_key[index][saved_key_length[index]] = 0; return saved_key[index]; } static int crypt_all(int *pcount, struct db_salt *salt) { int count = *pcount; int index = 0; #ifdef _OPENMP #pragma omp parallel for for (index = 0; index < count; index++) #endif { // portably align temp_result char * pointer machine word size. union xx { unsigned char c[BINARY_SIZE]; ARCH_WORD a[BINARY_SIZE/sizeof(ARCH_WORD)]; } u; unsigned char *temp_result = u.c; SHA512_CTX ctx; SHA512_CTX alt_ctx; size_t cnt; char *cp; char p_bytes[PLAINTEXT_LENGTH+1]; char s_bytes[PLAINTEXT_LENGTH+1]; /* Prepare for the real work. */ SHA512_Init(&ctx); /* Add the key string. */ SHA512_Update(&ctx, (unsigned char*)saved_key[index], saved_key_length[index]); /* The last part is the salt string. This must be at most 16 characters and it ends at the first `$' character (for compatibility with existing implementations). */ SHA512_Update(&ctx, cur_salt->salt, cur_salt->len); /* Compute alternate SHA512 sum with input KEY, SALT, and KEY. The final result will be added to the first context. */ SHA512_Init(&alt_ctx); /* Add key. */ SHA512_Update(&alt_ctx, (unsigned char*)saved_key[index], saved_key_length[index]); /* Add salt. */ SHA512_Update(&alt_ctx, cur_salt->salt, cur_salt->len); /* Add key again. */ SHA512_Update(&alt_ctx, (unsigned char*)saved_key[index], saved_key_length[index]); /* Now get result of this (64 bytes) and add it to the other context. */ SHA512_Final((unsigned char*)crypt_out[index], &alt_ctx); /* Add for any character in the key one byte of the alternate sum. */ for (cnt = saved_key_length[index]; cnt > BINARY_SIZE; cnt -= BINARY_SIZE) SHA512_Update(&ctx, (unsigned char*)crypt_out[index], BINARY_SIZE); SHA512_Update(&ctx, (unsigned char*)crypt_out[index], cnt); /* Take the binary representation of the length of the key and for every 1 add the alternate sum, for every 0 the key. */ for (cnt = saved_key_length[index]; cnt > 0; cnt >>= 1) if ((cnt & 1) != 0) SHA512_Update(&ctx, (unsigned char*)crypt_out[index], BINARY_SIZE); else SHA512_Update(&ctx, (unsigned char*)saved_key[index], saved_key_length[index]); /* Create intermediate result. */ SHA512_Final((unsigned char*)crypt_out[index], &ctx); /* Start computation of P byte sequence. */ SHA512_Init(&alt_ctx); /* For every character in the password add the entire password. */ for (cnt = 0; cnt < saved_key_length[index]; ++cnt) SHA512_Update(&alt_ctx, (unsigned char*)saved_key[index], saved_key_length[index]); /* Finish the digest. */ SHA512_Final(temp_result, &alt_ctx); /* Create byte sequence P. */ cp = p_bytes; for (cnt = saved_key_length[index]; cnt >= BINARY_SIZE; cnt -= BINARY_SIZE) cp = (char *) memcpy (cp, temp_result, BINARY_SIZE) + BINARY_SIZE; memcpy (cp, temp_result, cnt); /* Start computation of S byte sequence. */ SHA512_Init(&alt_ctx); /* For every character in the password add the entire password. */ for (cnt = 0; cnt < 16 + ((unsigned char*)crypt_out[index])[0]; ++cnt) SHA512_Update(&alt_ctx, cur_salt->salt, cur_salt->len); /* Finish the digest. */ SHA512_Final(temp_result, &alt_ctx); /* Create byte sequence S. */ cp = s_bytes; for (cnt = cur_salt->len; cnt >= BINARY_SIZE; cnt -= BINARY_SIZE) cp = (char *) memcpy (cp, temp_result, BINARY_SIZE) + BINARY_SIZE; memcpy (cp, temp_result, cnt); /* Repeatedly run the collected hash value through SHA512 to burn CPU cycles. */ for (cnt = 0; cnt < cur_salt->rounds; ++cnt) { /* New context. */ SHA512_Init(&ctx); /* Add key or last result. */ if ((cnt & 1) != 0) SHA512_Update(&ctx, p_bytes, saved_key_length[index]); else SHA512_Update(&ctx, (unsigned char*)crypt_out[index], BINARY_SIZE); /* Add salt for numbers not divisible by 3. */ if (cnt % 3 != 0) SHA512_Update(&ctx, s_bytes, cur_salt->len); /* Add key for numbers not divisible by 7. */ if (cnt % 7 != 0) SHA512_Update(&ctx, p_bytes, saved_key_length[index]); /* Add key or last result. */ if ((cnt & 1) != 0) SHA512_Update(&ctx, (unsigned char*)crypt_out[index], BINARY_SIZE); else SHA512_Update(&ctx, p_bytes, saved_key_length[index]); /* Create intermediate [SIC] result. */ SHA512_Final((unsigned char*)crypt_out[index], &ctx); } } return count; } static void set_salt(void *salt) { cur_salt = salt; } static void *get_salt(char *ciphertext) { static struct saltstruct out; int len; out.rounds = ROUNDS_DEFAULT; ciphertext += 3; if (!strncmp(ciphertext, ROUNDS_PREFIX, sizeof(ROUNDS_PREFIX) - 1)) { const char *num = ciphertext + sizeof(ROUNDS_PREFIX) - 1; char *endp; unsigned long int srounds = strtoul(num, &endp, 10); if (*endp == '$') { ciphertext = endp + 1; srounds = srounds < ROUNDS_MIN ? ROUNDS_MIN : srounds; out.rounds = srounds > ROUNDS_MAX ? ROUNDS_MAX : srounds; } } for (len = 0; ciphertext[len] != '$'; len++); memcpy(out.salt, ciphertext, len); out.len = len; return &out; } static int cmp_all(void *binary, int count) { int index = 0; #ifdef _OPENMP for (; index < count; index++) #endif if (!memcmp(binary, crypt_out[index], BINARY_SIZE)) return 1; return 0; } static int cmp_one(void *binary, int index) { return !memcmp(binary, crypt_out[index], BINARY_SIZE); } static int cmp_exact(char *source, int index) { return 1; } // Public domain hash function by DJ Bernstein // We are hashing the entire struct static int salt_hash(void *salt) { unsigned char *s = salt; unsigned int hash = 5381; unsigned int i; for (i = 0; i < SALT_SIZE; i++) hash = ((hash << 5) + hash) ^ s[i]; return hash & (SALT_HASH_SIZE - 1); } #if FMT_MAIN_VERSION > 11 /* iteration count as tunable cost parameter */ static unsigned int sha512crypt_iterations(void *salt) { struct saltstruct *sha512crypt_salt; sha512crypt_salt = salt; return (unsigned int)sha512crypt_salt->rounds; } #endif struct fmt_main fmt_cryptsha512 = { { FORMAT_LABEL, FORMAT_NAME, "SHA512 " ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP, #if FMT_MAIN_VERSION > 11 { "iteration count", }, #endif tests }, { init, fmt_default_done, fmt_default_reset, fmt_default_prepare, valid, fmt_default_split, get_binary, get_salt, #if FMT_MAIN_VERSION > 11 { sha512crypt_iterations, }, #endif fmt_default_source, { fmt_default_binary_hash_0, fmt_default_binary_hash_1, fmt_default_binary_hash_2, fmt_default_binary_hash_3, fmt_default_binary_hash_4, fmt_default_binary_hash_5, fmt_default_binary_hash_6 }, salt_hash, set_salt, set_key, get_key, fmt_default_clear_keys, crypt_all, { get_hash_0, get_hash_1, get_hash_2, get_hash_3, get_hash_4, get_hash_5, get_hash_6 }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
DRB036-truedepscalar-var-yes.c
/* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov, schordan1@llnl.gov, karlin1@llnl.gov) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https://github.com/LLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* Loop carried true dep between tmp =.. and ..= tmp. Data race pair: tmp@66:12 vs. tmp@67:5 */ #include "omprace.h" #include <omp.h> #include <stdlib.h> int main(int argc, char* argv[]) { omprace_init(); int i; int tmp; tmp = 10; int len=100; if (argc>1) len = atoi(argv[1]); int a[len]; #pragma omp parallel for for (i=0;i<len;i++) { a[i] = tmp; tmp =a[i]+i; } omprace_fini(); return 0; }