source
stringlengths
3
92
c
stringlengths
26
2.25M
scheduled-clauseModificado3.c
#include <stdio.h> #include <stdlib.h> #ifdef _OPENMP #include <omp.h> #else #define omp_get_thread_num() 0 #endif char* printSched (omp_sched_t type) { char * ret; if (type == omp_sched_static) ret = "Static"; else if (type == omp_sched_dynamic) ret = "Dynamic"; else if (type == omp_sched_guided) ret = "Guided"; else if (type == omp_sched_auto) ret = "Auto"; return ret; } int main(int argc, char **argv) { int i, n = 16,chunk, a[n],suma=0; omp_sched_t kind; int modifier; if(argc < 2) { fprintf(stderr,"\nFalta chunk \n"); exit(-1); } chunk = atoi(argv[1]); for (i=0; i<n; i++) a[i] = i; omp_get_schedule(&kind,&modifier); printf("\nAntes de la modificación:\n"); printf("dyn-var: %d\n", omp_get_dynamic()); printf("nthreads-var: %d\n", omp_get_max_threads()); printf("run-sched-var: %s, %d\n", printSched(kind), modifier); omp_set_dynamic(1); omp_set_num_threads(2); omp_set_schedule(omp_sched_guided,chunk+2); omp_get_schedule(&kind,&modifier); printf("\n Después de la modificación:\n"); printf("dyn-var: %d\n", omp_get_dynamic()); printf("nthreads-var: %d\n", omp_get_max_threads()); printf("run-sched-var: %s, %d\n", printSched(kind), modifier); #pragma omp parallel for firstprivate(suma) lastprivate(suma) schedule(dynamic,chunk) for (i=0; i<n; i++) { suma = suma + a[i]; //printf(" thread %d suma a[%d] suma=%d \n",omp_get_thread_num(),i,suma); } //printf("Fuera de 'parallel for' suma=%d\n",suma); }
TimeCluster.h
/****************************************************************************** ** Copyright (c) 2015, Intel Corporation ** ** All rights reserved. ** ** ** ** Redistribution and use in source and binary forms, with or without ** ** modification, are permitted provided that the following conditions ** ** are met: ** ** 1. Redistributions of source code must retain the above copyright ** ** notice, this list of conditions and the following disclaimer. ** ** 2. Redistributions in binary form must reproduce the above copyright ** ** notice, this list of conditions and the following disclaimer in the ** ** documentation and/or other materials provided with the distribution. ** ** 3. Neither the name of the copyright holder nor the names of its ** ** contributors may be used to endorse or promote products derived ** ** from this software without specific prior written permission. ** ** ** ** THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ** ** "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ** ** LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ** ** A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ** ** HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ** ** SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED ** ** TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR ** ** PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF ** ** LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING ** ** NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ** ** SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ** ******************************************************************************/ /* Alexander Heinecke (Intel Corp.) ******************************************************************************/ /** * @file * This file is part of SeisSol. * * @author Alex Breuer (breuer AT mytum.de, http://www5.in.tum.de/wiki/index.php/Dipl.-Math._Alexander_Breuer) * * @section LICENSE * Copyright (c) 2013-2015, SeisSol Group * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from this * software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * * @section DESCRIPTION * LTS cluster in SeisSol. **/ #ifndef TIMECLUSTER_H_ #define TIMECLUSTER_H_ #ifdef USE_MPI #include <mpi.h> #include <list> #endif #include <Initializer/typedefs.hpp> #include <SourceTerm/typedefs.hpp> #include <utils/logger.h> #include <Initializer/LTS.h> #include <Initializer/tree/LTSTree.hpp> #include <Kernels/Time.h> #include <Kernels/Local.h> #include <Kernels/Neighbor.h> #include <Kernels/DynamicRupture.h> #include <Kernels/Plasticity.h> #include <Solver/FreeSurfaceIntegrator.h> #include <Monitoring/LoopStatistics.h> #include <Kernels/TimeCommon.h> #ifdef ACL_DEVICE #include <device.h> #include <Solver/Pipeline/DrPipeline.h> #endif namespace seissol { namespace time_stepping { class TimeCluster; } namespace kernels { class ReceiverCluster; } } /** * Time cluster, which represents a collection of elements having the same time step width. **/ class seissol::time_stepping::TimeCluster { public: //! cluster id on this rank const unsigned int m_clusterId; //! global cluster cluster id const unsigned int m_globalClusterId; private: bool usePlasticity; //! number of time steps unsigned long m_numberOfTimeSteps; /* * integrators */ //! time kernel kernels::Time m_timeKernel; //! local kernel kernels::Local m_localKernel; //! neighbor kernel kernels::Neighbor m_neighborKernel; kernels::DynamicRupture m_dynamicRuptureKernel; /* * mesh structure */ struct MeshStructure *m_meshStructure; /* * global data */ //! global data structures GlobalData *m_globalDataOnHost{nullptr}; GlobalData *m_globalDataOnDevice{nullptr}; #ifdef ACL_DEVICE device::DeviceInstance& device = device::DeviceInstance::getInstance(); dr::pipeline::DrPipeline drPipeline; #endif /* * element data and mpi queues */ #ifdef USE_MPI //! pending copy region sends std::list< MPI_Request* > m_sendQueue; //! pending ghost region receives std::list< MPI_Request* > m_receiveQueue; #endif seissol::initializers::TimeCluster* m_clusterData; seissol::initializers::TimeCluster* m_dynRupClusterData; seissol::initializers::LTS* m_lts; seissol::initializers::DynamicRupture* m_dynRup; //! time step width of the performed time step. double m_timeStepWidth; //! Mapping of cells to point sources sourceterm::CellToPointSourcesMapping const* m_cellToPointSources; //! Number of mapping of cells to point sources unsigned m_numberOfCellToPointSourcesMappings; //! Point sources sourceterm::PointSources const* m_pointSources; //! true if dynamic rupture faces are present bool m_dynamicRuptureFaces; enum ComputePart { LocalInterior = 0, NeighborInterior, DRNeighborInterior, #ifdef USE_MPI LocalCopy, NeighborCopy, DRNeighborCopy, #endif DRFrictionLawCopy, DRFrictionLawInterior, PlasticityCheck, PlasticityYield, NUM_COMPUTE_PARTS }; long long m_flops_nonZero[NUM_COMPUTE_PARTS]; long long m_flops_hardware[NUM_COMPUTE_PARTS]; //! Tv parameter for plasticity double m_tv; //! Relax time for plasticity double m_oneMinusIntegratingFactor; //! Stopwatch of TimeManager LoopStatistics* m_loopStatistics; unsigned m_regionComputeLocalIntegration; unsigned m_regionComputeNeighboringIntegration; unsigned m_regionComputeDynamicRupture; kernels::ReceiverCluster* m_receiverCluster; #ifdef USE_MPI /** * Receives the copy layer data from relevant neighboring MPI clusters. **/ void receiveGhostLayer(); /** * Sends the associated regions of the copy layer to relevant neighboring MPI clusters **/ void sendCopyLayer(); #if defined(_OPENMP) && defined(USE_COMM_THREAD) /** * Inits Receives the copy layer data from relevant neighboring MPI clusters, active when using communication thread **/ void initReceiveGhostLayer(); /** * Inits Sends the associated regions of the copy layer to relevant neighboring MPI clusters, active when using communication thread **/ void initSendCopyLayer(); /** * Waits until the initialization of the communication is finished. **/ void waitForInits(); #endif /** * Tests for pending ghost layer communication. **/ bool testForGhostLayerReceives(); /** * Tests for pending copy layer communication. **/ bool testForCopyLayerSends(); #endif /** * Writes the receiver output if applicable (receivers present, receivers have to be written). **/ void writeReceivers(); /** * Computes the source terms if applicable. **/ void computeSources(); /** * Computes dynamic rupture. **/ void computeDynamicRupture( seissol::initializers::Layer& layerData ); /** * Computes all cell local integration. * * This are: * * time integration * * volume integration * * local boundary integration * * Remark: After this step the DOFs are only updated half with the boundary contribution * of the neighborings cells missing. * * @param i_numberOfCells number of cells. * @param i_cellInformation cell local information. * @param i_cellData cell data. * @param io_buffers time integration buffers. * @param io_derivatives time derivatives. * @param io_dofs degrees of freedom. **/ void computeLocalIntegration( seissol::initializers::Layer& i_layerData ); /** * Computes the contribution of the neighboring cells to the boundary integral. * * Remark: After this step (in combination with the local integration) the DOFs are at the next time step. * TODO: This excludes dynamic rupture contribution. * * @param i_numberOfCells number of cells. * @param i_cellInformation cell local information. * @param i_cellData cell data. * @param i_faceNeighbors pointers to neighboring time buffers or derivatives. * @param io_dofs degrees of freedom. **/ void computeNeighboringIntegration( seissol::initializers::Layer& i_layerData ); #ifndef ACL_DEVICE template<bool usePlasticity> std::pair<long, long> computeNeighboringIntegrationImplementation(seissol::initializers::Layer& i_layerData) { SCOREP_USER_REGION( "computeNeighboringIntegration", SCOREP_USER_REGION_TYPE_FUNCTION ) m_loopStatistics->begin(m_regionComputeNeighboringIntegration); real* (*faceNeighbors)[4] = i_layerData.var(m_lts->faceNeighbors); CellDRMapping (*drMapping)[4] = i_layerData.var(m_lts->drMapping); CellLocalInformation* cellInformation = i_layerData.var(m_lts->cellInformation); PlasticityData* plasticity = i_layerData.var(m_lts->plasticity); real (*pstrain)[7] = i_layerData.var(m_lts->pstrain); unsigned numberOTetsWithPlasticYielding = 0; kernels::NeighborData::Loader loader; loader.load(*m_lts, i_layerData); real *l_timeIntegrated[4]; real *l_faceNeighbors_prefetch[4]; #ifdef _OPENMP #pragma omp parallel for schedule(static) default(none) private(l_timeIntegrated, l_faceNeighbors_prefetch) shared(cellInformation, loader, faceNeighbors, pstrain, i_layerData, plasticity, drMapping) reduction(+:numberOTetsWithPlasticYielding) #endif for( unsigned int l_cell = 0; l_cell < i_layerData.getNumberOfCells(); l_cell++ ) { auto data = loader.entry(l_cell); seissol::kernels::TimeCommon::computeIntegrals(m_timeKernel, data.cellInformation.ltsSetup, data.cellInformation.faceTypes, m_subTimeStart, m_timeStepWidth, faceNeighbors[l_cell], #ifdef _OPENMP *reinterpret_cast<real (*)[4][tensor::I::size()]>(&(m_globalDataOnHost->integrationBufferLTS[omp_get_thread_num()*4*tensor::I::size()])), #else *reinterpret_cast<real (*)[4][tensor::I::size()]>(m_globalData->integrationBufferLTS), #endif l_timeIntegrated); #ifdef ENABLE_MATRIX_PREFETCH #pragma message("the current prefetch structure (flux matrices and tDOFs is tuned for higher order and shouldn't be harmful for lower orders") l_faceNeighbors_prefetch[0] = (cellInformation[l_cell].faceTypes[1] != FaceType::dynamicRupture) ? faceNeighbors[l_cell][1] : drMapping[l_cell][1].godunov; l_faceNeighbors_prefetch[1] = (cellInformation[l_cell].faceTypes[2] != FaceType::dynamicRupture) ? faceNeighbors[l_cell][2] : drMapping[l_cell][2].godunov; l_faceNeighbors_prefetch[2] = (cellInformation[l_cell].faceTypes[3] != FaceType::dynamicRupture) ? faceNeighbors[l_cell][3] : drMapping[l_cell][3].godunov; // fourth face's prefetches if (l_cell < (i_layerData.getNumberOfCells()-1) ) { l_faceNeighbors_prefetch[3] = (cellInformation[l_cell+1].faceTypes[0] != FaceType::dynamicRupture) ? faceNeighbors[l_cell+1][0] : drMapping[l_cell+1][0].godunov; } else { l_faceNeighbors_prefetch[3] = faceNeighbors[l_cell][3]; } #endif m_neighborKernel.computeNeighborsIntegral( data, drMapping[l_cell], #ifdef ENABLE_MATRIX_PREFETCH l_timeIntegrated, l_faceNeighbors_prefetch #else l_timeIntegrated #endif ); if constexpr (usePlasticity) { numberOTetsWithPlasticYielding += seissol::kernels::Plasticity::computePlasticity( m_oneMinusIntegratingFactor, m_timeStepWidth, m_tv, m_globalDataOnHost, &plasticity[l_cell], data.dofs, pstrain[l_cell] ); } #ifdef INTEGRATE_QUANTITIES seissol::SeisSol::main.postProcessor().integrateQuantities( m_timeStepWidth, i_layerData, l_cell, dofs[l_cell] ); #endif // INTEGRATE_QUANTITIES } const long long nonZeroFlopsPlasticity = i_layerData.getNumberOfCells() * m_flops_nonZero[PlasticityCheck] + numberOTetsWithPlasticYielding * m_flops_nonZero[PlasticityYield]; const long long hardwareFlopsPlasticity = i_layerData.getNumberOfCells() * m_flops_hardware[PlasticityCheck] + numberOTetsWithPlasticYielding * m_flops_hardware[PlasticityYield]; m_loopStatistics->end(m_regionComputeNeighboringIntegration, i_layerData.getNumberOfCells()); return {nonZeroFlopsPlasticity, hardwareFlopsPlasticity}; } #endif // ACL_DEVICE void computeLocalIntegrationFlops( unsigned numberOfCells, CellLocalInformation const* cellInformation, long long& nonZeroFlops, long long& hardwareFlops ); void computeNeighborIntegrationFlops( unsigned numberOfCells, CellLocalInformation const* cellInformation, CellDRMapping const (*drMapping)[4], long long& nonZeroFlops, long long& hardwareFlops, long long& drNonZeroFlops, long long& drHardwareFlops ); void computeDynamicRuptureFlops( seissol::initializers::Layer& layerData, long long& nonZeroFlops, long long& hardwareFlops ); void computeFlops(); //! Update relax time for plasticity void updateRelaxTime() { m_oneMinusIntegratingFactor = (m_tv > 0.0) ? 1.0 - exp(-m_timeStepWidth / m_tv) : 1.0; } public: //! flags identifiying if the respective part is allowed to be updated struct { bool localCopy; bool neighboringCopy; bool localInterior; bool neighboringInterior; } m_updatable; #ifdef USE_MPI //! send true LTS buffers volatile bool m_sendLtsBuffers; #endif //! reset lts buffers before performing time predictions volatile bool m_resetLtsBuffers; /* Sub start time of width respect to the next cluster; use 0 if not relevant, for example in GTS. * LTS requires to evaluate a partial time integration of the derivatives. The point zero in time refers to the derivation of the surrounding time derivatives, which * coincides with the last completed time step of the next cluster. The start/end of the time step is the start/end of this clusters time step relative to the zero point. * Example: * <verb> * 5 dt * |-----------------------------------------------------------------------------------------| <<< Time stepping of the next cluster (Cn) (5x larger than the current). * | | | | | | * |*****************|*****************|+++++++++++++++++| | | <<< Status of the current cluster. * | | | | | | * |-----------------|-----------------|-----------------|-----------------|-----------------| <<< Time stepping of the current cluster (Cc). * 0 dt 2dt 3dt 4dt 5dt * </verb> * * In the example above two clusters are illustrated: Cc and Cn. Cc is the current cluster under consideration and Cn the next cluster with respect to LTS terminology. * Cn is currently at time 0 and provided Cc with derivatives valid until 5dt. Cc updated already twice and did its last full update to reach 2dt (== subTimeStart). Next * computeNeighboringCopy is called to accomplish the next full update to reach 3dt (+++). Besides working on the buffers of own buffers and those of previous clusters, * Cc needs to evaluate the time prediction of Cn in the interval [2dt, 3dt]. */ double m_subTimeStart; //! number of full updates the cluster has performed since the last synchronization unsigned int m_numberOfFullUpdates; //! simulation time of the last full update (this is a complete volume and boundary integration) double m_fullUpdateTime; //! final time of the prediction (derivatives and time integrated DOFs). double m_predictionTime; //! time of the next receiver output double m_receiverTime; /** * Constructs a new LTS cluster. * * @param i_clusterId id of this cluster with respect to the current rank. * @param i_globalClusterId global id of this cluster. * @param usePlasticity true if using plasticity * @param i_timeKernel time integration kernel. * @param i_volumeKernel volume integration kernel. * @param i_boundaryKernel boundary integration kernel. * @param i_meshStructure mesh structure of this cluster. * @param i_copyCellInformation cell information in the copy layer. * @param i_interiorCellInformation cell information in the interior. * @param i_globalData global data. * @param i_copyCellData cell data in the copy layer. * @param i_interiorCellData cell data in the interior. * @param i_cells degrees of freedom, time buffers, time derivatives. **/ TimeCluster(unsigned int i_clusterId, unsigned int i_globalClusterId, bool usePlasticity, MeshStructure *i_meshStructure, CompoundGlobalData i_globalData, seissol::initializers::TimeCluster* i_clusterData, seissol::initializers::TimeCluster* i_dynRupClusterData, seissol::initializers::LTS* i_lts, seissol::initializers::DynamicRupture* i_dynRup, LoopStatistics* i_loopStatistics); /** * Destructor of a LTS cluster. * TODO: Currently prints only statistics in debug mode. **/ ~TimeCluster(); double timeStepWidth() const { return m_timeStepWidth; } void setTimeStepWidth(double timestep) { m_timeStepWidth = timestep; updateRelaxTime(); m_dynamicRuptureKernel.setTimeStepWidth(timestep); } /** * Adds a source to the cluster. * * @param i_meshId mesh id of the point of interest. **/ void addSource( unsigned int i_meshId ); /** * Sets the pointer to the cluster's point sources * * @param i_cellToPointSources Contains mappings of 1 cell offset to m point sources * @param i_numberOfCellToPointSourcesMappings Size of i_cellToPointSources * @param i_pointSources pointer to all point sources used on this cluster */ void setPointSources( sourceterm::CellToPointSourcesMapping const* i_cellToPointSources, unsigned i_numberOfCellToPointSourcesMappings, sourceterm::PointSources const* i_pointSources ); void setReceiverCluster( kernels::ReceiverCluster* receiverCluster) { m_receiverCluster = receiverCluster; } /** * Set Tv constant for plasticity. */ void setTv(double tv) { m_tv = tv; updateRelaxTime(); } #ifdef USE_MPI /** * Computes cell local integration of all cells in the copy layer and initiates the corresponding communication. * LTS buffers (updated more than once in general) are reset to zero up on request; GTS-Buffers are reset independently of the request. * * Cell local integration is: * * time integration * * volume integration * * local boundary integration * * @return true if the update (incl. communication requests), false if the update failed due to unfinshed sends of copy data to MPI neighbors. **/ bool computeLocalCopy(); #endif /** * Computes cell local integration of all cells in the interior. * LTS buffers (updated more than once in general) are reset to zero up on request; GTS-Buffers are reset independently of the request. * * Cell local integration is: * * time integration * * volume integration * * local boundary integration **/ void computeLocalInterior(); #ifdef USE_MPI /** * Computes the neighboring contribution to the boundary integral for all cells in the copy layer. * * @return true if the update (incl. communication requests), false if the update failed due to missing data from neighboring ranks. **/ bool computeNeighboringCopy(); #endif /** * Computes the neighboring contribution to the boundary integral for all cells in the interior. **/ void computeNeighboringInterior(); #if defined(_OPENMP) && defined(USE_MPI) && defined(USE_COMM_THREAD) /** * Tests for pending ghost layer communication, active when using communication thread **/ void pollForGhostLayerReceives(); /** * Polls for pending copy layer communication, active when using communication thread **/ void pollForCopyLayerSends(); /** * Start Receives the copy layer data from relevant neighboring MPI clusters, active when using communication thread **/ void startReceiveGhostLayer(); /** * start Sends the associated regions of the copy layer to relevant neighboring MPI clusters, active when using communication thread **/ void startSendCopyLayer(); #endif }; #endif
omptarget.h
//===---- omptarget.h - OpenMP GPU initialization ---------------- CUDA -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // // This file contains the declarations of all library macros, types, // and functions. // //===----------------------------------------------------------------------===// #ifndef OMPTARGET_H #define OMPTARGET_H #include "common/allocator.h" #include "common/debug.h" // debug #include "common/state-queue.h" #include "common/support.h" #include "interface.h" // interfaces with omp, compiler, and user #include "target_impl.h" #define OMPTARGET_NVPTX_VERSION 1.1 // used by the library for the interface with the app #define DISPATCH_FINISHED 0 #define DISPATCH_NOTFINISHED 1 // used by dynamic scheduling #define FINISHED 0 #define NOT_FINISHED 1 #define LAST_CHUNK 2 #define BARRIER_COUNTER 0 #define ORDERED_COUNTER 1 // arguments needed for L0 parallelism only. class omptarget_nvptx_SharedArgs { public: // All these methods must be called by the master thread only. INLINE void Init() { args = buffer; nArgs = MAX_SHARED_ARGS; } INLINE void DeInit() { // Free any memory allocated for outlined parallel function with a large // number of arguments. if (nArgs > MAX_SHARED_ARGS) { SafeFree(args, "new extended args"); Init(); } } INLINE void EnsureSize(size_t size) { if (size > nArgs) { if (nArgs > MAX_SHARED_ARGS) { SafeFree(args, "new extended args"); } args = (void **)SafeMalloc(size * sizeof(void *), "new extended args"); nArgs = size; } } // Called by all threads. INLINE void **GetArgs() const { return args; }; private: // buffer of pre-allocated arguments. void *buffer[MAX_SHARED_ARGS]; // pointer to arguments buffer. // starts off as a pointer to 'buffer' but can be dynamically allocated. void **args; // starts off as MAX_SHARED_ARGS but can increase in size. uint32_t nArgs; }; extern DEVICE omptarget_nvptx_SharedArgs EXTERN_SHARED(omptarget_nvptx_globalArgs); // Worker slot type which is initialized with the default worker slot // size of 4*32 bytes. struct __kmpc_data_sharing_slot { __kmpc_data_sharing_slot *Next; __kmpc_data_sharing_slot *Prev; void *PrevSlotStackPtr; void *DataEnd; char Data[DS_Worker_Warp_Slot_Size]; }; // Data structure to keep in shared memory that traces the current slot, stack, // and frame pointer as well as the active threads that didn't exit the current // environment. struct DataSharingStateTy { __kmpc_data_sharing_slot *SlotPtr[DS_Max_Warp_Number]; void *StackPtr[DS_Max_Warp_Number]; void * volatile FramePtr[DS_Max_Warp_Number]; __kmpc_impl_lanemask_t ActiveThreads[DS_Max_Warp_Number]; }; extern DEVICE DataSharingStateTy EXTERN_SHARED(DataSharingState); //////////////////////////////////////////////////////////////////////////////// // task ICV and (implicit & explicit) task state class omptarget_nvptx_TaskDescr { public: // methods for flags INLINE omp_sched_t GetRuntimeSched() const; INLINE void SetRuntimeSched(omp_sched_t sched); INLINE int InParallelRegion() const { return items.flags & TaskDescr_InPar; } INLINE int InL2OrHigherParallelRegion() const { return items.flags & TaskDescr_InParL2P; } INLINE int IsParallelConstruct() const { return items.flags & TaskDescr_IsParConstr; } INLINE int IsTaskConstruct() const { return !IsParallelConstruct(); } // methods for other fields INLINE uint16_t &ThreadId() { return items.threadId; } INLINE uint64_t &RuntimeChunkSize() { return items.runtimeChunkSize; } INLINE omptarget_nvptx_TaskDescr *GetPrevTaskDescr() const { return prev; } INLINE void SetPrevTaskDescr(omptarget_nvptx_TaskDescr *taskDescr) { prev = taskDescr; } // init & copy INLINE void InitLevelZeroTaskDescr(); INLINE void InitLevelOneTaskDescr(omptarget_nvptx_TaskDescr *parentTaskDescr); INLINE void Copy(omptarget_nvptx_TaskDescr *sourceTaskDescr); INLINE void CopyData(omptarget_nvptx_TaskDescr *sourceTaskDescr); INLINE void CopyParent(omptarget_nvptx_TaskDescr *parentTaskDescr); INLINE void CopyForExplicitTask(omptarget_nvptx_TaskDescr *parentTaskDescr); INLINE void CopyToWorkDescr(omptarget_nvptx_TaskDescr *masterTaskDescr); INLINE void CopyFromWorkDescr(omptarget_nvptx_TaskDescr *workTaskDescr); INLINE void CopyConvergentParent(omptarget_nvptx_TaskDescr *parentTaskDescr, uint16_t tid, uint16_t tnum); INLINE void SaveLoopData(); INLINE void RestoreLoopData() const; private: // bits for flags: (6 used, 2 free) // 3 bits (SchedMask) for runtime schedule // 1 bit (InPar) if this thread has encountered one or more parallel region // 1 bit (IsParConstr) if ICV for a parallel region (false = explicit task) // 1 bit (InParL2+) if this thread has encountered L2 or higher parallel // region static const uint8_t TaskDescr_SchedMask = (0x1 | 0x2 | 0x4); static const uint8_t TaskDescr_InPar = 0x10; static const uint8_t TaskDescr_IsParConstr = 0x20; static const uint8_t TaskDescr_InParL2P = 0x40; struct SavedLoopDescr_items { int64_t loopUpperBound; int64_t nextLowerBound; int64_t chunk; int64_t stride; kmp_sched_t schedule; } loopData; struct TaskDescr_items { uint8_t flags; // 6 bit used (see flag above) uint8_t unused; uint16_t threadId; // thread id uint64_t runtimeChunkSize; // runtime chunk size } items; omptarget_nvptx_TaskDescr *prev; }; // build on kmp typedef struct omptarget_nvptx_ExplicitTaskDescr { omptarget_nvptx_TaskDescr taskDescr; // omptarget_nvptx task description (must be first) kmp_TaskDescr kmpTaskDescr; // kmp task description (must be last) } omptarget_nvptx_ExplicitTaskDescr; //////////////////////////////////////////////////////////////////////////////// // Descriptor of a parallel region (worksharing in general) class omptarget_nvptx_WorkDescr { public: // access to data INLINE omptarget_nvptx_TaskDescr *WorkTaskDescr() { return &masterTaskICV; } private: omptarget_nvptx_TaskDescr masterTaskICV; }; //////////////////////////////////////////////////////////////////////////////// class omptarget_nvptx_TeamDescr { public: // access to data INLINE omptarget_nvptx_TaskDescr *LevelZeroTaskDescr() { return &levelZeroTaskDescr; } INLINE omptarget_nvptx_WorkDescr &WorkDescr() { return workDescrForActiveParallel; } // init INLINE void InitTeamDescr(); INLINE __kmpc_data_sharing_slot *GetPreallocatedSlotAddr(int wid) { worker_rootS[wid].DataEnd = &worker_rootS[wid].Data[0] + DS_Worker_Warp_Slot_Size; // We currently do not have a next slot. worker_rootS[wid].Next = 0; worker_rootS[wid].Prev = 0; worker_rootS[wid].PrevSlotStackPtr = 0; return (__kmpc_data_sharing_slot *)&worker_rootS[wid]; } private: omptarget_nvptx_TaskDescr levelZeroTaskDescr; // icv for team master initial thread omptarget_nvptx_WorkDescr workDescrForActiveParallel; // one, ONLY for the active par ALIGN(16) __kmpc_data_sharing_slot worker_rootS[DS_Max_Warp_Number]; }; //////////////////////////////////////////////////////////////////////////////// // thread private data (struct of arrays for better coalescing) // tid refers here to the global thread id // do not support multiple concurrent kernel a this time class omptarget_nvptx_ThreadPrivateContext { public: // task INLINE omptarget_nvptx_TaskDescr *Level1TaskDescr(int tid) { return &levelOneTaskDescr[tid]; } INLINE void SetTopLevelTaskDescr(int tid, omptarget_nvptx_TaskDescr *taskICV) { topTaskDescr[tid] = taskICV; } INLINE omptarget_nvptx_TaskDescr *GetTopLevelTaskDescr(int tid) const; // parallel INLINE uint16_t &NumThreadsForNextParallel(int tid) { return nextRegion.tnum[tid]; } // schedule (for dispatch) INLINE kmp_sched_t &ScheduleType(int tid) { return schedule[tid]; } INLINE int64_t &Chunk(int tid) { return chunk[tid]; } INLINE int64_t &LoopUpperBound(int tid) { return loopUpperBound[tid]; } INLINE int64_t &NextLowerBound(int tid) { return nextLowerBound[tid]; } INLINE int64_t &Stride(int tid) { return stride[tid]; } INLINE omptarget_nvptx_TeamDescr &TeamContext() { return teamContext; } INLINE void InitThreadPrivateContext(int tid); INLINE uint64_t &Cnt() { return cnt; } private: // team context for this team omptarget_nvptx_TeamDescr teamContext; // task ICV for implicit threads in the only parallel region omptarget_nvptx_TaskDescr levelOneTaskDescr[MAX_THREADS_PER_TEAM]; // pointer where to find the current task ICV (top of the stack) omptarget_nvptx_TaskDescr *topTaskDescr[MAX_THREADS_PER_TEAM]; union { // Only one of the two is live at the same time. // parallel uint16_t tnum[MAX_THREADS_PER_TEAM]; } nextRegion; // schedule (for dispatch) kmp_sched_t schedule[MAX_THREADS_PER_TEAM]; // remember schedule type for #for int64_t chunk[MAX_THREADS_PER_TEAM]; int64_t loopUpperBound[MAX_THREADS_PER_TEAM]; // state for dispatch with dyn/guided OR static (never use both at a time) int64_t nextLowerBound[MAX_THREADS_PER_TEAM]; int64_t stride[MAX_THREADS_PER_TEAM]; uint64_t cnt; }; /// Memory manager for statically allocated memory. class omptarget_nvptx_SimpleMemoryManager { private: struct MemDataTy { volatile unsigned keys[OMP_STATE_COUNT]; } MemData[MAX_SM] ALIGN(128); INLINE static uint32_t hash(unsigned key) { return key & (OMP_STATE_COUNT - 1); } public: INLINE void Release(); INLINE const void *Acquire(const void *buf, size_t size); }; //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// // global data tables //////////////////////////////////////////////////////////////////////////////// extern DEVICE omptarget_nvptx_SimpleMemoryManager omptarget_nvptx_simpleMemoryManager; extern DEVICE uint32_t EXTERN_SHARED(usedMemIdx); extern DEVICE uint32_t EXTERN_SHARED(usedSlotIdx); #if _OPENMP extern DEVICE uint8_t parallelLevel[MAX_THREADS_PER_TEAM / WARPSIZE]; #pragma omp allocate(parallelLevel) allocator(omp_pteam_mem_alloc) #else extern DEVICE uint8_t EXTERN_SHARED(parallelLevel)[MAX_THREADS_PER_TEAM / WARPSIZE]; #endif extern DEVICE uint16_t EXTERN_SHARED(threadLimit); extern DEVICE uint16_t EXTERN_SHARED(threadsInTeam); extern DEVICE uint16_t EXTERN_SHARED(nThreads); extern DEVICE omptarget_nvptx_ThreadPrivateContext * EXTERN_SHARED(omptarget_nvptx_threadPrivateContext); extern DEVICE uint32_t EXTERN_SHARED(execution_param); extern DEVICE void *EXTERN_SHARED(ReductionScratchpadPtr); //////////////////////////////////////////////////////////////////////////////// // work function (outlined parallel/simd functions) and arguments. // needed for L1 parallelism only. //////////////////////////////////////////////////////////////////////////////// typedef void *omptarget_nvptx_WorkFn; extern volatile DEVICE omptarget_nvptx_WorkFn EXTERN_SHARED(omptarget_nvptx_workFn); //////////////////////////////////////////////////////////////////////////////// // get private data structures //////////////////////////////////////////////////////////////////////////////// INLINE omptarget_nvptx_TeamDescr &getMyTeamDescriptor(); INLINE omptarget_nvptx_WorkDescr &getMyWorkDescriptor(); INLINE omptarget_nvptx_TaskDescr * getMyTopTaskDescriptor(bool isSPMDExecutionMode); INLINE omptarget_nvptx_TaskDescr *getMyTopTaskDescriptor(int globalThreadId); //////////////////////////////////////////////////////////////////////////////// // inlined implementation //////////////////////////////////////////////////////////////////////////////// #include "common/omptargeti.h" #endif
sigmoid_kernel_arm.c
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * License); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /* * Copyright (c) 2020, OPEN AI LAB * Author: haitao@openailab.com */ #include <math.h> #include <arm_neon.h> #include "sigmoid_kernel_arm.h" #define SIGMOID_MAX(a, b) ((a) > (b) ? (a) : (b)) #define SIGMOID_MIN(a, b) ((a) < (b) ? (a) : (b)) static inline float fast_exp(float x) { union { uint32_t i; float f; } v; v.i = (1 << 23) * (1.4426950409 * x + 126.93490512f); return v.f; } static float fast_exp1(float x) { volatile union { float f; unsigned int i; } cvt; /* exp(x) = 2^i * 2^f; i = floor (log2(e) * x), 0 <= f <= 1 */ float t = x * 1.442695041f; float fi = floorf(t); float f = t - fi; int i = ( int )fi; cvt.f = (0.3371894346f * f + 0.657636276f) * f + 1.00172476f; /* compute 2^f */ cvt.i += (i << 23); /* scale by 2^i */ return cvt.f; } static float acl_exp(float x) { volatile union { float f; unsigned int i; } cvt; /* exp(x) = = 2^k * exp(x-k ln2); k = round(x/ln2)*/ float t = x * 1.4426950408f; float f = x - (( int )t) * 0.6931471805f; int i = ( int )t; /// cvt.f = (0.3371894346f * f + 0.657636276f) * f + 1.00172476f; /* compute 2^f */ cvt.f = 1 + f * 1.00000011921f + (0.0416598916054f + f * 0.00833693705499f) * f * f + ((0.500000596046f + f * 0.166665703058f) + (0.0014122662833f + f * 0.000195780929062f) * f * f) * f * f * f * f; cvt.i += (i << 23); /* scale by 2^i */ return cvt.f; } static float exp10_f32(float x) { x = 1.0 + x * 0.0009765625f; x *= x; x *= x; x *= x; x *= x; x *= x; x *= x; x *= x; x *= x; x *= x; x *= x; return x; } static struct tab exp_tab; static void init_tab(void) { exp_tab.a0 = vdupq_n_f32(1.f); exp_tab.a1 = vdupq_n_f32(0.0416598916054f); exp_tab.a2 = vdupq_n_f32(0.500000596046f); exp_tab.a3 = vdupq_n_f32(0.0014122662833f); exp_tab.a4 = vdupq_n_f32(1.00000011921f); exp_tab.a5 = vdupq_n_f32(0.00833693705499f); exp_tab.a6 = vdupq_n_f32(0.166665703058f); exp_tab.a7 = vdupq_n_f32(0.000195780929062f); } static inline float32x4_t vtaylor_polyq_f32(float32x4_t x, struct tab* coeffs) { float32x4_t A = vmlaq_f32(coeffs->a0, coeffs->a4, x); float32x4_t B = vmlaq_f32(coeffs->a2, coeffs->a6, x); float32x4_t C = vmlaq_f32(coeffs->a1, coeffs->a5, x); float32x4_t D = vmlaq_f32(coeffs->a3, coeffs->a7, x); float32x4_t x2 = vmulq_f32(x, x); float32x4_t x4 = vmulq_f32(x2, x2); float32x4_t res = vmlaq_f32(vmlaq_f32(A, B, x2), vmlaq_f32(C, D, x2), x4); return res; } /* ACL exp function impelement */ static inline float32x4_t vexpq_f32(float32x4_t x) { const float32x4_t CONST_LN2 = vdupq_n_f32(0.6931471805f); // ln(2) const float32x4_t CONST_INV_LN2 = vdupq_n_f32(1.4426950408f); // 1/ln(2) const float32x4_t CONST_0 = vdupq_n_f32(0.f); const int32x4_t CONST_NEGATIVE_126 = vdupq_n_s32(-126); // Perform range reduction [-log(2),log(2)] int32x4_t m = vcvtq_s32_f32(vmulq_f32(x, CONST_INV_LN2)); float32x4_t val = vmlsq_f32(x, vcvtq_f32_s32(m), CONST_LN2); // Polynomial Approximation float32x4_t poly = vtaylor_polyq_f32(val, &exp_tab); // Reconstruct poly = vreinterpretq_f32_s32(vqaddq_s32(vreinterpretq_s32_f32(poly), vqshlq_n_s32(m, 23))); poly = vbslq_f32(vcltq_s32(m, CONST_NEGATIVE_126), CONST_0, poly); return poly; } /* exp(x) = lim(1+x/n)^n // n=10 */ static inline float32x4_t vexpq10_f32(float32x4_t x) { x = vmlaq_n_f32(vdupq_n_f32(1.0f), x, 0.0009765625f); // n = 10 x = vmulq_f32(x, x); x = vmulq_f32(x, x); x = vmulq_f32(x, x); x = vmulq_f32(x, x); x = vmulq_f32(x, x); x = vmulq_f32(x, x); x = vmulq_f32(x, x); x = vmulq_f32(x, x); x = vmulq_f32(x, x); x = vmulq_f32(x, x); return x; } int sigmoid_run(struct ir_tensor* output_tensor, struct ir_tensor* input_tensor, int num_thread) { init_tab(); float* input = ( float* )input_tensor->data; float* output = ( float* )output_tensor->data; float32x4_t min = vdupq_n_f32(-30.0f); float32x4_t max = vdupq_n_f32(30.0f); float32x4_t tmp_vec = vdupq_n_f32(1); int chan_num = input_tensor->dims[0] * input_tensor->dims[1]; int chan_size = input_tensor->dims[2] * input_tensor->dims[3]; #pragma omp parallel for num_threads(num_thread) for (int j = 0; j < chan_num; j++) { float* pinput = input + j * chan_size; float* poutput = output + j * chan_size; for (int i = 0; i < (chan_size & -4); i += 4) { float32x4_t _input = vld1q_f32(pinput + i); _input = vmaxq_f32(_input, min); _input = vminq_f32(_input, max); float32x4_t tmp_exp = vaddq_f32(tmp_vec, vexpq10_f32(vmulq_n_f32(_input, -1.0f))); float32x4_t out = vrecpeq_f32(tmp_exp); out = vmulq_f32(vrecpsq_f32(tmp_exp, out), out); out = vmulq_f32(vrecpsq_f32(tmp_exp, out), out); vst1q_f32(poutput, out); poutput += 4; } for (int i = chan_size & ~3; i < chan_size; i++) { pinput[i] = SIGMOID_MIN(pinput[i], 30.0f); pinput[i] = SIGMOID_MAX(pinput[i], -30.0f); float tmp_exp = exp10_f32(-pinput[i]); *poutput++ = 1 / (1 + tmp_exp); } } return 0; }
matProduct1_orphan.c
/****************************************************************************** * FILE: omp_mm.c * DESCRIPTION: * OpenMp Example - Matrix Multiply - C Version * Demonstrates a matrix multiply using OpenMP. Threads share row iterations * according to a predefined chunk size. * AUTHOR: Blaise Barney * LAST REVISED: 06/28/05 ******************************************************************************/ /** * This program performs the multiplication of two matrix's. * Online source: * https://computing.llnl.gov/tutorials/openMP/samples/C/omp_mm.c **/ #include <omp.h> #include <stdio.h> #include <stdlib.h> #ifdef _CIVL $input int NRA=5; /* number of rows in matrix A */ $input int NCA=5; /* number of columns in matrix A */ $input int NCB=5; /* number of columns in matrix B */ #else #define NRA 8 /* number of rows in matrix A */ #define NCA 8 /* number of columns in matrix A */ #define NCB 8 /* number of columns in matrix B */ #endif double a[NRA][NCA], /* matrix A to be multiplied */ b[NCA][NCB], /* matrix B to be multiplied */ c[NRA][NCB]; /* result matrix C */ int chunk; void matProd(int tid){ int i,j,k; #pragma omp for schedule (static, chunk) for (i=0; i<NRA; i++) { printf("Thread=%d did row=%d\n",tid,i); for(j=0; j<NCB; j++) for (k=0; k<NCA; k++) c[i][j] += a[i][k] * b[k][j]; } } void foo(int tid){ matProd(tid); } int main (int argc, char *argv[]) { int tid, nthreads, i, j, k; chunk = 10; /* set loop iteration chunk size */ /*** Spawn a parallel region explicitly scoping all variables ***/ #pragma omp parallel shared(a,b,c,nthreads,chunk) private(tid,i,j,k) { tid = omp_get_thread_num(); if (tid == 0) { nthreads = omp_get_num_threads(); printf("Starting matrix multiple example with %d threads\n",nthreads); printf("Initializing matrices...\n"); } /*** Initialize matrices ***/ #pragma omp for schedule (static, chunk) for (i=0; i<NRA; i++) for (j=0; j<NCA; j++) a[i][j]= i+j; #pragma omp for schedule (static, chunk) for (i=0; i<NCA; i++) for (j=0; j<NCB; j++) b[i][j]= i*j; #pragma omp for schedule (static, chunk) for (i=0; i<NRA; i++) for (j=0; j<NCB; j++) c[i][j]= 0; /*** Do matrix multiply sharing iterations on outer loop ***/ /*** Display who does which iterations for demonstration purposes ***/ printf("Thread %d starting matrix multiply...\n",tid); foo(tid); } /*** End of parallel region ***/ /*** Print results ***/ printf("******************************************************\n"); printf("Result Matrix:\n"); for (i=0; i<NRA; i++) { for (j=0; j<NCB; j++) printf("%6.2f ", c[i][j]); printf("\n"); } printf("******************************************************\n"); printf ("Done.\n"); }
GB_binop__bclr_uint64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__bclr_uint64) // A.*B function (eWiseMult): GB (_AemultB_08__bclr_uint64) // A.*B function (eWiseMult): GB (_AemultB_02__bclr_uint64) // A.*B function (eWiseMult): GB (_AemultB_04__bclr_uint64) // A.*B function (eWiseMult): GB (_AemultB_bitmap__bclr_uint64) // A*D function (colscale): GB ((none)) // D*A function (rowscale): GB ((none)) // C+=B function (dense accum): GB (_Cdense_accumB__bclr_uint64) // C+=b function (dense accum): GB (_Cdense_accumb__bclr_uint64) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__bclr_uint64) // C=scalar+B GB (_bind1st__bclr_uint64) // C=scalar+B' GB (_bind1st_tran__bclr_uint64) // C=A+scalar GB (_bind2nd__bclr_uint64) // C=A'+scalar GB (_bind2nd_tran__bclr_uint64) // C type: uint64_t // A type: uint64_t // A pattern? 0 // B type: uint64_t // B pattern? 0 // BinaryOp: cij = GB_BITCLR (aij, bij, uint64_t, 64) #define GB_ATYPE \ uint64_t #define GB_BTYPE \ uint64_t #define GB_CTYPE \ uint64_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint64_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ uint64_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint64_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = GB_BITCLR (x, y, uint64_t, 64) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 1 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_BCLR || GxB_NO_UINT64 || GxB_NO_BCLR_UINT64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__bclr_uint64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__bclr_uint64) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__bclr_uint64) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint64_t uint64_t bwork = (*((uint64_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t *restrict Cx = (uint64_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t *restrict Cx = (uint64_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__bclr_uint64) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; uint64_t alpha_scalar ; uint64_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((uint64_t *) alpha_scalar_in)) ; beta_scalar = (*((uint64_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__bclr_uint64) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__bclr_uint64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__bclr_uint64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__bclr_uint64) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__bclr_uint64) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t *Cx = (uint64_t *) Cx_output ; uint64_t x = (*((uint64_t *) x_input)) ; uint64_t *Bx = (uint64_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; uint64_t bij = GBX (Bx, p, false) ; Cx [p] = GB_BITCLR (x, bij, uint64_t, 64) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__bclr_uint64) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint64_t *Cx = (uint64_t *) Cx_output ; uint64_t *Ax = (uint64_t *) Ax_input ; uint64_t y = (*((uint64_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint64_t aij = GBX (Ax, p, false) ; Cx [p] = GB_BITCLR (aij, y, uint64_t, 64) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_BITCLR (x, aij, uint64_t, 64) ; \ } GrB_Info GB (_bind1st_tran__bclr_uint64) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint64_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t x = (*((const uint64_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint64_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_BITCLR (aij, y, uint64_t, 64) ; \ } GrB_Info GB (_bind2nd_tran__bclr_uint64) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t y = (*((const uint64_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
DenseSegment.h
/****************************************************************************** * ** Copyright (c) 2016, Intel Corporation ** * ** All rights reserved. ** * ** ** * ** Redistribution and use in source and binary forms, with or without ** * ** modification, are permitted provided that the following conditions ** * ** are met: ** * ** 1. Redistributions of source code must retain the above copyright ** * ** notice, this list of conditions and the following disclaimer. ** * ** 2. Redistributions in binary form must reproduce the above copyright ** * ** notice, this list of conditions and the following disclaimer in the ** * ** documentation and/or other materials provided with the distribution. ** * ** 3. Neither the name of the copyright holder nor the names of its ** * ** contributors may be used to endorse or promote products derived ** * ** from this software without specific prior written permission. ** * ** ** * ** THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ** * ** "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ** * ** LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ** * ** A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ** * ** HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ** * ** SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED ** * ** TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR ** * ** PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF ** * ** LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING ** * ** NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ** * ** SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * ******************************************************************************/ /* Michael Anderson (Intel Corp.) * * ******************************************************************************/ #ifndef SRC_DENSESEGMENT_H_ #define SRC_DENSESEGMENT_H_ #include "GMDP/utils/edgelist.h" #include "GMDP/utils/bitvector.h" #include "GMDP/singlenode/unionreduce.h" #include <string> #include <vector> #include <sstream> #include <cstdio> inline double get_compression_threshold(); enum compression_decision { NONE, COMPRESSED, SERIALIZED }; struct send_metadata { int nnz; size_t serialized_nbytes; size_t serialized_npartitions; friend class boost::serialization::access; template<class Archive> void serialize(Archive & ar, const unsigned int version) { ar & nnz; ar & serialized_nbytes; ar & serialized_npartitions; } }; template <typename T> class buffer { public: bool uninitialized; int nnz; int capacity; int num_ints; size_t serialized_nbytes; size_t serialized_npartitions; T * value; int * bit_vector; T * compressed_data; int * compressed_indices; char * serialized_data; size_t * serialized_partition_nbytes_scan; size_t * serialized_partition_nnz_scan; // Serialize friend boost::serialization::access; template<class Archive> void save(Archive& ar, const unsigned int version) const { ar & uninitialized; ar & nnz; ar & capacity; ar & num_ints; ar & serialized_nbytes; ar & serialized_npartitions; for(int i = 0 ; i < capacity; i++) { ar & value[i]; } for(int i = 0 ; i < num_ints; i++) { ar & bit_vector[i]; } for(int i = 0 ; i < capacity ; i++) { ar & compressed_data[i]; } for(int i = 0 ; i < capacity ; i++) { ar & compressed_indices[i]; } for(int i = 0 ; i < serialized_nbytes; i++) { ar & serialized_data[i]; } for(int i = 0 ; i < serialized_npartitions + 1; i++) { ar & serialized_partition_nbytes_scan[i]; } for(int i = 0 ; i < serialized_npartitions + 1; i++) { ar & serialized_partition_nnz_scan[i]; } } template<class Archive> void load(Archive& ar, const unsigned int version) { ar & uninitialized; ar & nnz; ar & capacity; ar & num_ints; ar & serialized_nbytes; ar & serialized_npartitions; delete [] value; delete [] bit_vector; delete [] compressed_data; delete [] compressed_indices; delete [] serialized_data; delete [] serialized_partition_nbytes_scan; delete [] serialized_partition_nnz_scan; value = new T[capacity]; bit_vector = new int[num_ints]; compressed_data = new T[capacity]; compressed_indices = new int[capacity]; serialized_data = new char[serialized_nbytes]; serialized_partition_nbytes_scan = new size_t[serialized_npartitions+1]; serialized_partition_nnz_scan = new size_t[serialized_npartitions+1]; for(int i = 0 ; i < capacity; i++) { ar & value[i]; } for(int i = 0 ; i < num_ints; i++) { ar & bit_vector[i]; } for(int i = 0 ; i < capacity ; i++) { ar & compressed_data[i]; } for(int i = 0 ; i < capacity ; i++) { ar & compressed_indices[i]; } for(int i = 0 ; i < serialized_nbytes; i++) { ar & serialized_data[i]; } for(int i = 0 ; i < serialized_npartitions + 1; i++) { ar & serialized_partition_nbytes_scan[i]; } for(int i = 0 ; i < serialized_npartitions + 1; i++) { ar & serialized_partition_nnz_scan[i]; } } BOOST_SERIALIZATION_SPLIT_MEMBER() buffer(int _capacity, int _num_ints) { capacity = _capacity; num_ints = _num_ints; value = new T[capacity]; bit_vector = new int[num_ints]; //compressed_data = reinterpret_cast<T*>(_mm_malloc(capacity * sizeof(T) + capacity*sizeof(int), 64)); compressed_data = new T[capacity]; compressed_indices = new int[capacity]; uninitialized = true; serialized_data = new char[0]; serialized_nbytes = 0; serialized_npartitions = omp_get_max_threads() * 16; serialized_partition_nbytes_scan = new size_t[serialized_npartitions+1]; serialized_partition_nnz_scan = new size_t[serialized_npartitions+1]; } buffer() : buffer(0,0) {} void alloc_serialized(size_t sz) { delete [] serialized_data; serialized_data = new char[sz]; serialized_nbytes = sz; } int compute_nnz() const { int len = 0; #pragma omp parallel for reduction(+:len) for (int ii = 0 ; ii < num_ints ; ii++) { int p = _popcnt32(bit_vector[ii]); len += p; } return len; } int compute_nnz(int start, int finish) const { int len = 0; #pragma omp parallel for reduction(+:len) for (int ii = start ; ii < finish ; ii++) { int p = _popcnt32(bit_vector[ii]); len += p; } return len; } template<bool EXTENDS_SERIALIZABLE = std::is_base_of<Serializable,T>::value, typename std::enable_if<EXTENDS_SERIALIZABLE>::type* = nullptr> void decompress() { memset(bit_vector, 0, num_ints* sizeof(int)); std::stringstream * sss = new std::stringstream[serialized_npartitions]; #pragma omp parallel for for(int p = 0 ; p < serialized_npartitions ; p++) { int i_per_partition = (num_ints + serialized_npartitions - 1) / serialized_npartitions; int start_i = i_per_partition * p; int end_i = i_per_partition * (p+1); if(end_i > num_ints) end_i = num_ints; sss[p].write(serialized_data + serialized_partition_nbytes_scan[p], (serialized_partition_nbytes_scan[p+1]-serialized_partition_nbytes_scan[p])); boost::archive::binary_iarchive ia(sss[p]); for(unsigned long int i = 0 ; i < (serialized_partition_nnz_scan[p+1] - serialized_partition_nnz_scan[p]) ; i++) { int idx; ia >> idx; ia >> value[idx]; set_bitvector(idx, bit_vector); } } delete [] sss; } template<bool EXTENDS_SERIALIZABLE = std::is_base_of<Serializable,T>::value, typename std::enable_if<!EXTENDS_SERIALIZABLE>::type* = nullptr> void decompress() { memset(bit_vector, 0, num_ints* sizeof(int)); //compressed_indices = reinterpret_cast<int*>(compressed_data + nnz); int npartitions = omp_get_max_threads(); int * start_nnzs = new int[npartitions]; int * end_nnzs = new int[npartitions]; int mystart = 0; int my_nz_per = (nnz + npartitions - 1) / npartitions; my_nz_per = ((my_nz_per + 31) / 32) * 32; for(int p = 0 ; p < npartitions ; p++) { start_nnzs[p] = mystart; mystart += my_nz_per; if(mystart > nnz) mystart = nnz; if(mystart < nnz) { int start32 = compressed_indices[mystart] / 32; while((mystart < nnz) && compressed_indices[mystart] / 32 == start32) mystart++; } end_nnzs[p] = mystart; } #pragma omp parallel for for(int p = 0 ; p < npartitions ; p++) { int start_nnz = start_nnzs[p]; int end_nnz = end_nnzs[p]; for(int i = start_nnz ; i < end_nnz ; i++) { int idx = compressed_indices[i]; set_bitvector(idx, bit_vector); value[idx] = compressed_data[i]; } } delete [] start_nnzs; delete [] end_nnzs; } template<bool EXTENDS_SERIALIZABLE = std::is_base_of<Serializable,T>::value, typename std::enable_if<EXTENDS_SERIALIZABLE>::type* = nullptr> void compress() { size_t * serialized_partition_nbytes = new size_t[serialized_npartitions]; size_t * serialized_partition_nnz = new size_t[serialized_npartitions]; std::stringstream * sss = new std::stringstream[serialized_npartitions]; #pragma omp parallel for for(int p = 0 ; p < serialized_npartitions ; p++) { int i_per_partition = (num_ints + serialized_npartitions - 1) / serialized_npartitions; int start_i = i_per_partition * p; int end_i = i_per_partition * (p+1); if(end_i > num_ints) end_i = num_ints; serialized_partition_nnz[p] = 0; boost::archive::binary_oarchive oa(sss[p]); for(int ii = start_i ; ii < end_i ; ii++) { if(_popcnt32(bit_vector[ii]) == 0) continue; for(int i = ii*32 ; i < (ii+1)*32 ; i++) { if(get_bitvector(i, bit_vector)) { oa << i; oa << value[i]; serialized_partition_nnz[p]++; } } } sss[p].seekg(0, sss[p].end); size_t sz = sss[p].tellg(); sss[p].seekg(0, sss[p].beg); serialized_partition_nbytes[p] = sz; } serialized_partition_nnz_scan[0] = 0; serialized_partition_nbytes_scan[0] = 0; for(int p = 0 ; p < serialized_npartitions ; p++) { serialized_partition_nnz_scan[p+1] = serialized_partition_nnz_scan[p] + serialized_partition_nnz[p]; serialized_partition_nbytes_scan[p+1] = serialized_partition_nbytes_scan[p] + serialized_partition_nbytes[p]; } size_t sz = serialized_partition_nbytes_scan[serialized_npartitions]; alloc_serialized(sz); #pragma omp parallel for for(int p = 0 ; p < serialized_npartitions ; p++) { sss[p].read(serialized_data + serialized_partition_nbytes_scan[p], serialized_partition_nbytes[p]); } delete [] serialized_partition_nnz; delete [] serialized_partition_nbytes; delete [] sss; } template<bool EXTENDS_SERIALIZABLE = std::is_base_of<Serializable,T>::value, typename std::enable_if<!EXTENDS_SERIALIZABLE>::type* = nullptr> void compress() { int npartitions = omp_get_max_threads() * 16; int * partition_nnz = new int[npartitions]; int * partition_nnz_scan = new int[npartitions+1]; #pragma omp parallel for for(int p = 0 ; p < npartitions ; p++) { int i_per_partition = (num_ints + npartitions - 1) / npartitions; int start_i = i_per_partition * p; int end_i = i_per_partition * (p+1); if(end_i > num_ints) end_i = num_ints; partition_nnz[p] = compute_nnz(start_i, end_i); } partition_nnz_scan[0] = 0; nnz = 0; for(int p = 0 ; p < npartitions ; p++) { partition_nnz_scan[p+1] = partition_nnz_scan[p] + partition_nnz[p]; nnz += partition_nnz[p]; } #pragma omp parallel for for(int p = 0 ; p < npartitions ; p++) { int i_per_partition = (num_ints + npartitions - 1) / npartitions; int start_i = i_per_partition * p; int end_i = i_per_partition * (p+1); if(end_i > num_ints) end_i = num_ints; int nzcnt = partition_nnz_scan[p]; for(int ii = start_i ; ii < end_i ; ii++) { if(_popcnt32(bit_vector[ii]) == 0) continue; for(int i = ii*32 ; i < (ii+1)*32 ; i++) { if(get_bitvector(i, bit_vector)) { compressed_data[nzcnt] = value[i]; compressed_indices[nzcnt] = i; nzcnt++; } } } } delete [] partition_nnz; delete [] partition_nnz_scan; } ~buffer() { delete [] value; delete [] bit_vector; delete [] compressed_data; delete [] compressed_indices; delete [] serialized_partition_nbytes_scan; delete [] serialized_partition_nnz_scan; delete [] serialized_data; } }; template <typename T> class DenseSegment { public: std::string name; int capacity; int num_ints; buffer<T> *properties; send_metadata received_md; std::vector<send_metadata> queued_md; std::vector<buffer<T> * > received; std::vector<buffer<T> * > uninitialized; friend boost::serialization::access; template<class Archive> void save(Archive& ar, const unsigned int version) const { bool properties_is_null = (properties == NULL); ar & properties_is_null; ar & name; ar & capacity; ar & num_ints; if(properties != NULL) { ar & properties; } ar & received_md; ar & queued_md; ar & received; ar & uninitialized; } template<class Archive> void load(Archive& ar, const unsigned int version) { bool properties_null; ar & properties_null; ar & name; ar & capacity; ar & num_ints; if(!properties_null) { ar & properties; } else { properties = NULL; } ar & received_md; ar & queued_md; ar & received; ar & uninitialized; } BOOST_SERIALIZATION_SPLIT_MEMBER() DenseSegment(int n) { capacity = n; num_ints = (n + sizeof(int) * 8 - 1) / (sizeof(int) * 8); properties = NULL; } DenseSegment() : DenseSegment(0) {} void ingestEdges(edge_t<T>* edges, int _m, int _nnz, int row_start) { alloc(); initialize(); for (uint64_t i = 0; i < (uint64_t)_nnz; i++) { int src = edges[i].src - row_start - 1; set_bitvector(src, properties->bit_vector); properties->value[src] = edges[i].val; } properties->nnz = _nnz; properties->uninitialized = false; } ~DenseSegment() { if(properties != NULL) { delete properties; } for(auto it = received.begin() ; it != received.end() ; it++) { delete *it; } received.clear(); for(auto it = uninitialized.begin() ; it != uninitialized.end() ; it++) { delete *it; } uninitialized.clear(); } int compute_nnz() const { if(properties == NULL) return 0; if(properties->uninitialized) return 0; return properties->compute_nnz(); } int compute_nnz(int start, int finish) const { if(properties == NULL) return 0; if(properties->uninitialized) return 0; return properties->compute_nnz(start, finish); } compression_decision should_compress(int test_nnz) { if(std::is_base_of<Serializable,T>::value) return SERIALIZED; if(test_nnz > get_compression_threshold() * capacity) return NONE; return COMPRESSED; } void compress() { alloc(); initialize(); if(should_compress(properties->nnz) == COMPRESSED || should_compress(properties->nnz) == SERIALIZED) { properties->compress(); } } void decompress() { assert(properties); if(should_compress(properties->nnz) == COMPRESSED || should_compress(properties->nnz) == SERIALIZED) { properties->decompress(); } } void set_uninitialized_received() { for(auto it = received.begin() ; it != received.end() ; it++) { (*it)->uninitialized = true; uninitialized.push_back(*it); } received.clear(); } void set_uninitialized() { set_uninitialized_received(); if(properties != NULL) { properties->uninitialized = true; properties->nnz = 0; } } void alloc() { if(properties == NULL) { properties = new buffer<T>(capacity, num_ints); } } void initialize() { if(properties->uninitialized) { memset(properties->bit_vector, 0, num_ints* sizeof(int)); properties->nnz = 0; } properties->uninitialized = false; } int getNNZ() { return properties->nnz; } void set(int idx, T val) { alloc(); initialize(); if(!get_bitvector(idx-1, properties->bit_vector)) properties->nnz++; properties->value[idx - 1] = val; set_bitvector(idx-1, properties->bit_vector); properties->uninitialized = false; } void setAll(T val) { alloc(); //initialize(); properties->uninitialized=false; if(num_ints == 0) return; properties->bit_vector[num_ints-1] = 0; #pragma omp parallel for for(int i = 0 ; i < num_ints-1 ; i++) { properties->bit_vector[i] = 0xFFFFFFFF; } for(int idx = std::max(0, capacity-32) ; idx < capacity ; idx++) { set_bitvector(idx, properties->bit_vector); } properties->nnz = capacity; #pragma omp parallel for for(int i = 0 ; i < capacity ; i++) { properties->value[i] = val; } } T get(const int idx) const { assert(properties); assert(!properties->uninitialized); return properties->value[idx - 1]; } void send_nnz(int myrank, int dst_rank, std::vector<MPI_Request>* requests) { send_metadata md = {properties->nnz, properties->serialized_nbytes, properties->serialized_npartitions}; MPI_Send(&md, sizeof(md), MPI_BYTE, dst_rank, 0, MPI_COMM_WORLD); } void recv_nnz_queue(int myrank, int src_rank, std::vector<MPI_Request>* requests) { send_metadata md; MPI_Recv(&md, sizeof(md), MPI_BYTE, src_rank, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE); queued_md.insert(queued_md.begin(), md); } void recv_nnz(int myrank, int src_rank, std::vector<MPI_Request>* requests) { alloc(); MPI_Recv(&received_md, sizeof(send_metadata), MPI_BYTE, src_rank, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE); } void send_segment(int myrank, int dst_rank, std::vector<MPI_Request>* requests) { if(should_compress(properties->nnz) == COMPRESSED) { MPI_Request r1; MPI_Request r2; MPI_Isend(properties->compressed_data, properties->nnz * sizeof(T), MPI_BYTE, dst_rank, 0, MPI_COMM_WORLD, &r1); MPI_Isend(properties->compressed_indices, properties->nnz * sizeof(int), MPI_BYTE, dst_rank, 0, MPI_COMM_WORLD, &r2); requests->push_back(r1); requests->push_back(r2); } else if(should_compress(properties->nnz) == SERIALIZED) { MPI_Request r1; MPI_Request r2; MPI_Request r3; MPI_Isend(properties->serialized_data, properties->serialized_nbytes, MPI_BYTE, dst_rank, 0, MPI_COMM_WORLD, &r1); MPI_Isend(properties->serialized_partition_nnz_scan, (properties->serialized_npartitions+1) * sizeof(size_t), MPI_BYTE, dst_rank, 0, MPI_COMM_WORLD, &r2); MPI_Isend(properties->serialized_partition_nbytes_scan, (properties->serialized_npartitions+1) * sizeof(size_t), MPI_BYTE, dst_rank, 0, MPI_COMM_WORLD, &r3); requests->push_back(r1); requests->push_back(r2); requests->push_back(r3); } else { MPI_Request r1; MPI_Request r2; MPI_Isend(properties->value, capacity * sizeof(T), MPI_BYTE, dst_rank, 0, MPI_COMM_WORLD, &r1); MPI_Isend(properties->bit_vector, num_ints * sizeof(int), MPI_BYTE, dst_rank, 0, MPI_COMM_WORLD, &r2); requests->push_back(r1); requests->push_back(r2); } } void recv_buffer(send_metadata md, buffer<T> * p, int myrank, int src_rank, std::vector<MPI_Request>* requests) { p->nnz = md.nnz; p->serialized_nbytes = md.serialized_nbytes; p->serialized_npartitions = md.serialized_npartitions; if(should_compress(p->nnz) == COMPRESSED) { MPI_Request r1; MPI_Request r2; MPI_Irecv(p->compressed_data, p->nnz * sizeof(T), MPI_BYTE, src_rank, 0, MPI_COMM_WORLD, &r1); MPI_Irecv(p->compressed_indices, p->nnz * sizeof(int), MPI_BYTE, src_rank, 0, MPI_COMM_WORLD, &r2); requests->push_back(r1); requests->push_back(r2); } else if(should_compress(p->nnz) == SERIALIZED) { MPI_Request r1; MPI_Request r2; MPI_Request r3; p->alloc_serialized(p->serialized_nbytes); MPI_Irecv(p->serialized_data, p->serialized_nbytes, MPI_BYTE, src_rank, 0, MPI_COMM_WORLD, &r1); MPI_Irecv(p->serialized_partition_nnz_scan, (p->serialized_npartitions+1) * sizeof(size_t), MPI_BYTE, src_rank, 0, MPI_COMM_WORLD, &r2); MPI_Irecv(p->serialized_partition_nbytes_scan, (p->serialized_npartitions+1) * sizeof(size_t), MPI_BYTE, src_rank, 0, MPI_COMM_WORLD, &r3); requests->push_back(r1); requests->push_back(r2); requests->push_back(r3); } else { MPI_Request r1; MPI_Request r2; MPI_Irecv(p->value, capacity * sizeof(T), MPI_BYTE, src_rank, 0, MPI_COMM_WORLD, &r1); MPI_Irecv(p->bit_vector, num_ints* sizeof(int), MPI_BYTE, src_rank, 0, MPI_COMM_WORLD, &r2); requests->push_back(r1); requests->push_back(r2); } p->uninitialized = false; } void recv_segment_queue(int myrank, int src_rank, std::vector<MPI_Request>* requests) { buffer<T> * new_properties; if(uninitialized.size() > 0) { new_properties = uninitialized.back(); uninitialized.pop_back(); } else { new_properties = new buffer<T>(capacity, num_ints); } send_metadata md = queued_md.back(); queued_md.pop_back(); recv_buffer(md, new_properties, myrank, src_rank, requests); received.push_back(new_properties); } void recv_segment(int myrank, int src_rank, std::vector<MPI_Request>* requests) { recv_buffer(received_md, properties, myrank, src_rank, requests); } void save(std::string fname, int start_id, int _m, bool includeHeader) { int nnz = compute_nnz(); std::ofstream fout; fout.open(fname); if(includeHeader) { fout << _m << " " << nnz << std::endl; } for(int i = 0 ; i < capacity ; i++) { if(get_bitvector(i, properties->bit_vector)) { fout << i + start_id << " " << properties->value[i] << std::endl; } } fout.close(); } void get_edges(edge_t<T> * edges, unsigned int start_nz) const { unsigned int mycnt = 0; for(int i = 0 ; i < capacity ; i++) { if(get_bitvector(i, properties->bit_vector)) { edges[mycnt].src = start_nz + i + 1; edges[mycnt].dst = 1; edges[mycnt].val = properties->value[i]; mycnt++; } } } template <typename Ta, typename Tb, typename Tc> void union_received(void (*op_fp)(Ta, Tb, Tc*, void*), void* vsp) { alloc(); initialize(); for(auto it = received.begin() ; it != received.end() ; it++) { if(should_compress((*it)->nnz) == COMPRESSED) { union_compressed((*it)->compressed_data, (*it)->compressed_indices, (*it)->nnz, capacity, num_ints, properties->value, properties->bit_vector, op_fp, vsp); } else if(should_compress((*it)->nnz) == SERIALIZED) { (*it)->decompress(); union_dense((*it)->value, (*it)->bit_vector, capacity, num_ints, properties->value, properties->bit_vector, properties->value, properties->bit_vector, op_fp, vsp); } else { union_dense((*it)->value, (*it)->bit_vector, capacity, num_ints, properties->value, properties->bit_vector, properties->value, properties->bit_vector, op_fp, vsp); } } } }; #endif // SRC_DENSESEGMENT_H_
gather_nd_op_cpu_impl.h
/* Copyright 2016 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ #ifndef TENSORFLOW_CORE_KERNELS_GATHER_ND_OP_CPU_IMPL_H_ #define TENSORFLOW_CORE_KERNELS_GATHER_ND_OP_CPU_IMPL_H_ // Specialization of GatherNdSlice to CPU #define EIGEN_USE_THREADS #include <atomic> #include "tensorflow/core/framework/bounds_check.h" #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/register_types.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/kernels/gather_nd_op.h" #include "tensorflow/core/platform/logging.h" #include "tensorflow/core/platform/mem.h" #include "tensorflow/core/platform/types.h" #include "tensorflow/core/util/util.h" namespace tensorflow { typedef Eigen::ThreadPoolDevice CPUDevice; namespace generator { template <typename T, typename Index, int IXDIM> class GatherNdSliceGenerator { public: EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE GatherNdSliceGenerator( const Index slice_size, typename TTypes<Index>::ConstMatrix Tindices, typename TTypes<T, IXDIM + 1>::ConstTensor Tparams, typename TTypes<T>::Matrix Tout, std::atomic<Index>* error_loc) : slice_size_(slice_size), Tindices_(Tindices), Tparams_(Tparams), Tout_(Tout), error_loc_(error_loc) {} EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE bool GenerateIndices( const Index loc, Eigen::array<Eigen::DenseIndex, IXDIM + 1>* ix) const { (*ix)[IXDIM] = 0; bool out_of_bounds = false; for (int i = 0; i < IXDIM; ++i) { const Index ix_i = internal::SubtleMustCopy(Tindices_(loc, i)); (*ix)[i] = ix_i; out_of_bounds |= !FastBoundsCheck(ix_i, Tparams_.dimension(i)); } return out_of_bounds; } EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE int32 operator()(const Eigen::array<Eigen::DenseIndex, 1>& loc_array) const { const Index loc = loc_array[0]; Eigen::array<Eigen::DenseIndex, IXDIM + 1> ix; Eigen::array<Eigen::DenseIndex, 2> ix_out; ix_out[0] = loc; ix_out[1] = 0; const bool out_of_bounds = GenerateIndices(loc, &ix); if (TF_PREDICT_FALSE(out_of_bounds)) { error_loc_->store(loc); std::fill_n(&Tout_(ix_out), slice_size_, T()); } else { std::copy_n(&Tparams_(ix), slice_size_, &Tout_(ix_out)); } return static_cast<int32>(0); // Return something... } private: const Index slice_size_; const typename TTypes<Index>::ConstMatrix Tindices_; const typename TTypes<T, IXDIM + 1>::ConstTensor Tparams_; mutable typename TTypes<T>::Matrix Tout_; std::atomic<Index>* error_loc_; }; } // namespace generator namespace functor { template <typename T, typename Index, int IXDIM> struct GatherNdSlice<CPUDevice, T, Index, IXDIM> { Index operator()(const CPUDevice& d, const Index slice_size, typename TTypes<int32>::Scalar Tscratch, typename TTypes<T, IXDIM + 1>::ConstTensor Tparams, typename TTypes<Index>::ConstMatrix Tindices, typename TTypes<T>::Matrix Tout) { std::atomic<Index> error_loc(-1); const Eigen::DenseIndex batch_size = Tindices.dimension(0); #if !defined(EIGEN_HAS_INDEX_LIST) Eigen::Tensor<Eigen::DenseIndex, 1>::Dimensions reshape_dims{{ 1 }}; Eigen::array<Eigen::DenseIndex, 1> broadcast_dims{{ batch_size }}; #else Eigen::IndexList<Eigen::type2index<1> > reshape_dims; Eigen::IndexList<Eigen::DenseIndex> broadcast_dims; broadcast_dims.set(0, batch_size); #endif generator::GatherNdSliceGenerator<T, Index, IXDIM> gather_nd_generator( slice_size, Tindices, Tparams, Tout, &error_loc); #if defined(INTEL_MKL) && defined(ENABLE_MKL) // Eigen implementation below is not highly performant. gather_nd_generator // does not seem to be called in parallel, leading to very poor performance. // Additionally, since it uses scalar (Tscratch) to invoke 'generate', it // needs to go through redundant operations like 'reshape', 'broadcast' and // 'sum'. OpenMP loop below essentially does same thing as Eigen code, but // is considerably more efficient. #pragma omp parallel for for (Eigen::DenseIndex i = 0; i < batch_size; i++) { const Eigen::array<Eigen::DenseIndex, 1> loc{i}; gather_nd_generator(loc); } #else // INTEL_MKL && ENABLE_MKL Tscratch.device(d) = Tscratch.reshape(reshape_dims) .broadcast(broadcast_dims) .generate(gather_nd_generator) .sum(); #endif // INTEL_MKL && ENABLE_MKL // error_loc() returns -1 if there's no out-of-bounds index, // otherwise it returns the location of an OOB index in Tindices. return error_loc.load(); } }; #define REGISTER_GATHER_ND_FULL(T, Index) \ template Index GatherNdSlice<CPUDevice, T, Index, CPU_PROVIDED_IXDIM>:: \ operator()(const CPUDevice& d, const Index slice_size, \ typename TTypes<int32>::Scalar Tscratch, \ typename TTypes<T, CPU_PROVIDED_IXDIM + 1>::ConstTensor Tparams, \ typename TTypes<Index>::ConstMatrix Tindices, \ typename TTypes<T>::Matrix Tout); #define REGISTER_GATHER_ND_CPU(type) \ REGISTER_GATHER_ND_FULL(type, int32); \ REGISTER_GATHER_ND_FULL(type, int64) TF_CALL_ALL_TYPES(REGISTER_GATHER_ND_CPU); TF_CALL_QUANTIZED_TYPES(REGISTER_GATHER_ND_CPU); } // namespace functor } // namespace tensorflow #endif // TENSORFLOW_CORE_KERNELS_GATHER_ND_OP_CPU_IMPL_H_
deeplearn_pooling.c
/* libdeep - a library for deep learning Copyright (C) 2015-2016 Bob Mottram <bob@robotics.uk.to> Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the University nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. . THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include "deeplearn_pooling.h" /** * @brief Pools the first layer into the second (max pooling) * @param depth Depth of the two layers * @param layer0_across Number of units across the first layer * @param layer0_down Number of units down the first layer * @param layer0 Array containing the first layer values * @param layer1_across Number of units across the second layer * @param layer1_down Number of units down the second layer * @param layer1 Array containing the second layer values * @returns zero on success */ int pooling_from_flt_to_flt(int depth, int layer0_across, int layer0_down, float layer0[], int layer1_across, int layer1_down, float layer1[]) { /* second layer must be smaller than the first */ if (layer1_across*layer1_down > layer0_across*layer0_down) { return -1; } /* if layers are the same size then copy the array */ if (layer1_across*layer1_down == layer0_across*layer0_down) { memcpy((void*)layer1,(void*)layer0, layer1_across*layer1_down*depth*sizeof(float)); return 0; } memset((void*)layer1,'\0',layer1_across*layer1_down*depth*sizeof(float)); /*#pragma omp parallel for*/ for (int y0 = 0; y0 < layer0_down; y0++) { int y1 = y0 * layer1_down / layer0_down; for (int x0 = 0; x0 < layer0_across; x0++) { int x1 = x0 * layer1_across / layer0_across; int n0 = (y0*layer0_across + x0)*depth; int n1 = (y1*layer1_across + x1)*depth; for (int d = 0; d < depth; d++) { if (layer0[n0+d] > layer1[n1+d]) { layer1[n1+d] = layer0[n0+d]; } } } } return 0; } /** * @brief Unpools the first layer into the second (inverse of max pooling) * @param depth Depth of the two layers * @param pooling_layer_across Number of units across the first layer * @param pooling_layer_down Number of units down the first layer * @param layer0 Array containing the first layer values * @param layer1_across Number of units across the second layer * @param layer1_down Number of units down the second layer * @param layer1 Array containing the second layer values * @returns zero on success */ int unpooling_from_flt_to_flt(int depth, int pooled_layer_across, int pooled_layer_down, float pooled_layer[], int original_layer_across, int original_layer_down, float original_layer[]) { /* second layer must be smaller than the first */ if (original_layer_across*original_layer_down > pooled_layer_across*pooled_layer_down) { return -1; } /* if layers are the same size then copy the array */ if (original_layer_across*original_layer_down == pooled_layer_across*pooled_layer_down) { memcpy((void*)original_layer,(void*)pooled_layer, pooled_layer_across*pooled_layer_down*depth*sizeof(float)); return 0; } /*#pragma omp parallel for*/ for (int y_original = 0; y_original < original_layer_down; y_original++) { int y_pooled = y_original * pooled_layer_down / original_layer_down; for (int x_original = 0; x_original < original_layer_across; x_original++) { int x_pooled = x_original * pooled_layer_across / original_layer_across; int n_pooled = (y_pooled*pooled_layer_across + x_pooled)*depth; int n_original = (y_original*original_layer_across + x_original)*depth; for (int d = 0; d < depth; d++) { original_layer[n_original+d] = pooled_layer[n_pooled+d]; } } } return 0; }
3d7pt_var.c
/* * Order-1, 3D 7 point stencil with variable coefficients * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, m, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+2; Ny = atoi(argv[2])+2; Nz = atoi(argv[3])+2; } if (argc > 4) Nt = atoi(argv[4]); // allocate the arrays double ****A = (double ****) malloc(sizeof(double***)*2); for(m=0; m<2;m++){ A[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } double ****coef = (double ****) malloc(sizeof(double***)*7); for(m=0; m<7;m++){ coef[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ coef[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ coef[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 32; tile_size[1] = 32; tile_size[2] = 32; tile_size[3] = 2048; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } for (m=0; m<7; m++) { for (i=1; i<Nz; i++) { for (j=1; j<Ny; j++) { for (k=1; k<Nx; k++) { coef[m][i][j][k] = 1.0 * (rand() % BASE); } } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 #pragma scop for (t = 0; t < Nt-1; t++) { for (i = 1; i < Nz-1; i++) { for (j = 1; j < Ny-1; j++) { for (k = 1; k < Nx-1; k++) { A[(t+1)%2][i][j][k] = coef[0][i][j][k] * A[t%2][i ][j ][k ] + coef[1][i][j][k] * A[t%2][i-1][j ][k ] + coef[2][i][j][k] * A[t%2][i ][j-1][k ] + coef[3][i][j][k] * A[t%2][i ][j ][k-1] + coef[4][i][j][k] * A[t%2][i+1][j ][k ] + coef[5][i][j][k] * A[t%2][i ][j+1][k ] + coef[6][i][j][k] * A[t%2][i ][j ][k+1]; } } } } #pragma endscop gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(1, "variable no-symmetry") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); for(m=0; m<7;m++){ for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(coef[m][i][j]); } free(coef[m][i]); } free(coef[m]); } return 0; }
openmp_scan.h
#include <omp.h> template<typename T, typename R, typename C, typename S> void openmp_scan( size_t n, T initial, size_t tilesize, R reduce, C combine, S scan ) { if (n > 0) { // Set t to the number of tiles that might be used, at most one tile // per thread with no tile smaller than the requested tilesize. size_t t = std::min( size_t(omp_get_max_threads()), (n-1)/tilesize+1 ); // Allocate space to hold the reduction value of each tile. temp_space<T> r(t); // Request one thread per tile. #pragma omp parallel num_threads(t) { // Find out how threads were actually delivered, which may be // fewer than the requested number. size_t p = omp_get_num_threads(); // Recompute tilesize so there is one tile per actual thread. tilesize = (n+p-1)/p; // Set m to index of last tile size_t m = p-1; #pragma omp for // Set r[i] to reduction of the ith tile for ( size_t i = 0; i <= m; ++i ) r[i] = reduce(i*tilesize, i==m ? n-m*tilesize : tilesize); #pragma omp single // Use single thread to do in-place exclusive scan on r. for ( size_t i = 0; i <= m; ++i ) { T tmp = r[i]; r[i] = initial; initial = combine(initial,tmp); } #pragma omp for // Do scan over each tile, using r[i] as initial value. for ( size_t i = 0; i <= m; ++i ) scan(i*tilesize, i==m ? n-m*tilesize : tilesize, r[i]); } } }
gi_regular_grid_bilinear_function.h
/* * * Copyright (C) 2018 Attila Gyulassy <jediati@sci.utah.edu> * All rights reserved. * * This software may be modified and distributed under the terms * of the BSD license. See the LICENSE file for details. */ #ifndef REGULAR_GRID_BILINEAR_FUNCTION #define REGULAR_GRID_BILINEAR_FUNCTION #include <algorithm> #include <cmath> #include "gi_basic_types.h" #include "gi_vectors.h" #include "base/gi_regular_grid_2d.h" namespace GInt { class RegularGridBilinearFunction { protected: RegularGrid2D * m_grid; Vec2d* m_grad; FLOATTYPE* m_image; FLOATTYPE m_min_value; FLOATTYPE m_max_value; bool m_i_made_gradient; bool m_i_made_image; void fill_extents() { FLOATTYPE t_max_val = m_max_value = m_image[0]; FLOATTYPE t_min_val = m_min_value = m_image[0]; INDEX_TYPE num_elements = m_grid->NumElements(); INDEX_TYPE ii; #pragma omp parallel shared(num_elements) private(ii) firstprivate(t_max_val,t_min_val) { #pragma omp for nowait for (ii = 0; ii<num_elements; ++ii) { if (m_image[ii] > t_max_val) { t_max_val = m_image[ii]; } if (m_image[ii] < t_min_val) { t_min_val = m_image[ii]; } } #pragma omp critical { if (t_max_val > m_max_value) m_max_value = t_max_val; if (t_min_val < m_min_value) m_min_value = t_min_val; } } } public: FLOATTYPE GetMinValue() const { return m_min_value; } FLOATTYPE GetMaxValue() const { return m_max_value; } RegularGridBilinearFunction(RegularGrid2D* grid, FLOATTYPE *image = 0) : m_grid(grid) { m_i_made_image = false; m_i_made_gradient = false; m_image = NULL; m_grad = NULL; // use the function if it is passed, otherwise simply allocate memory if (image != 0) { m_image = image; } //m_grad = new Vec2d[m_grid->NumElements()]; } ~RegularGridBilinearFunction() { if (m_i_made_gradient) delete[] m_grad; if (m_i_made_image) delete[] m_image; } // return pointer to underlying mesh and function const RegularGrid2D* GetGrid() const { return m_grid; } FLOATTYPE* GetImage() const { return m_image; } // sample the image at integral location FLOATTYPE SampleImage(const Vec2l& p) const { return m_image[m_grid->Index2d(p)]; } // sample the image at integral location FLOATTYPE SampleImage(const INDEX_TYPE id) const { return m_image[id]; } // sample the gradient at integral location const Vec2d& SampleGrad(const Vec2l& p) const { return m_grad[m_grid->Index2d(p)]; } FLOATTYPE BiLinInterpValue(const Vec2d& s) const { Vec2l n[4]; // for 8 vertices around s - some may be repeated based on boundary cond. m_grid->GatherSurrounding(s, n); Vec2d b = n[0]; //s.print_vf(); //b.print_vf(); Vec2d factors = s - b; FLOATTYPE x0 = (1 - factors[0]) * SampleImage(n[0]) + SampleImage(n[1]) * factors[0]; FLOATTYPE x1 = (1 - factors[0]) * SampleImage(n[2]) + SampleImage(n[3]) * factors[0]; return (1 - factors[1]) *x0 + x1 * factors[1]; } // return trilinearly interpolated value Vec2d BiLinInterpGrad(const Vec2d& s) const { Vec2l n[4]; // for 8 vertices around s - some may be repeated based on boundary cond. m_grid->GatherSurrounding(s, n); Vec2d b = n[0]; //s.print_vf(); //b.print_vf(); Vec2d factors = s - b; Vec2d x0 = Vec2d::Lerp(SampleGrad(n[0]), SampleGrad(n[1]), factors[0]); Vec2d x1 = Vec2d::Lerp(SampleGrad(n[2]), SampleGrad(n[3]), factors[0]); return Vec2d::Lerp(x0, x1, factors[1]); } void SetGradExplicit(INDEX_TYPE id, Vec2d vec) { this->m_grad[id] = vec; } // fill in vals with the 8 values of hte gradient around sample poitn void GetGradSurrounding(const Vec2d& s, Vec2d* vals) const { Vec2l n[4]; // for 8 vertices around s - some may be repeated based on boundary cond. m_grid->GatherSurrounding(s, n); for (int i = 0; i < 4; i++) vals[i] = SampleGrad(n[i]); } void GetGradSurrounding(const Vec2l& s, Vec2d* vals) const { Vec2l n[4]; // for 8 vertices around s - some may be repeated based on boundary cond. m_grid->GatherSurrounding(s, n); for (int i = 0; i < 4; i++) vals[i] = SampleGrad(n[i]); } // use with extreme care - no boundary checks, only do on really interior poitns void GetGradSurroundingNoBoundaryCheck(const Vec2d& s, Vec2d* vals) const { Vec2l n[4]; // for 8 vertices around s - some may be repeated based on boundary cond. m_grid->GatherSurroundingNoBoundaryCheck(s, n); for (int i = 0; i < 4; i++) vals[i] = SampleGrad(n[i]); } FLOATTYPE InterpolatedValue(const Vec2d& s) const { return BiLinInterpValue(s); } Vec2d InterpolatedGrad(const Vec2d& s) const { return BiLinInterpGrad(s); } // allow reuse of sampled gradient - the assumption that vals has the gradient arrows around s Vec2d BiLinInterpGrad(const Vec2d& s, const Vec2l& int_base, Vec2d* vals) const { //if (!(s.IntFloor() == int_base)) { // printf("s="); s.PrintFloat(); printf("d="); int_base.PrintFloat(); //} // //Vec2d d = int_base.IntFloor(); Vec2d factors = s - int_base; Vec2d x0 = Vec2d::Lerp(vals[0], vals[1], factors[0]); Vec2d x1 = Vec2d::Lerp(vals[2], vals[3], factors[0]); return Vec2d::Lerp(x0, x1, factors[1]); } void LoadImageFromFile(const char* fname) { size_t image_size = m_grid->NumElements(); // fill in image m_image = new FLOATTYPE[image_size]; m_i_made_image = true; FILE* fin = fopen(fname, "rb"); fread(m_image, sizeof(FLOATTYPE), image_size, fin); fclose(fin); fill_extents(); printf("min = %e, max = %e\n", this->m_min_value, this->m_max_value); } void ShallowCopyImage(FLOATTYPE *image) { m_image = image; INDEX_TYPE image_size = m_grid->NumElements(); fill_extents(); printf("min = %e, max = %e\n", this->m_min_value, this->m_max_value); } void DeepCopyImage(const FLOATTYPE *image) { m_image = new FLOATTYPE[m_grid->NumElements()]; m_i_made_image = true; INDEX_TYPE image_size = m_grid->NumElements(); memcpy(m_image, image, image_size*sizeof(FLOATTYPE)); fill_extents(); printf("min = %e, max = %e\n", this->m_min_value, this->m_max_value); } static const FLOATTYPE kRKCoefficients[5][9]; Vec2d GradientFromImage(const Vec2l& p, int rklevel) { Vec2l negs[9]; // don't support more than 4th order - cmon. would be ridiculous double res_x = 0.0; int rklevel_x = m_grid->Gather1DNeighborhood(p, 0, rklevel, negs); int nume_x = rklevel_x * 2 + 1; // number of entries to average for (int i = 0; i < nume_x; i++) { res_x += kRKCoefficients[rklevel_x][i] * SampleImage(negs[i]); } double res_y = 0.0; int rklevel_y = m_grid->Gather1DNeighborhood(p, 1, rklevel, negs); int nume_y = rklevel_y * 2 + 1; // number of entries to average for (int i = 0; i < nume_y; i++) { res_y += kRKCoefficients[rklevel_y][i] * SampleImage(negs[i]); } return Vec2d(res_x, res_y); } inline bool IsGreater(INDEX_TYPE a, INDEX_TYPE b) const { if (m_image[a] > m_image[b]) return true; if (m_image[b] > m_image[a]) return false; //if (a == b) printf("WHOA THERE NELLY\n"); return a > b; } void ComputeGradFromImage(int rklevel) { m_grad = new Vec2d[m_grid->NumElements()]; m_i_made_gradient = true; #pragma omp parallel for for (int i = 0; i < m_grid->XY()[0]; i++) { for (int j = 0; j < m_grid->XY()[1]; j++) { Vec2l p(i, j); m_grad[m_grid->Index2d(p)] = GradientFromImage(p, rklevel); } } } void Negate() { if (m_grad != NULL) { #pragma omp parallel for schedule(static) for (INDEX_TYPE i = 0; i < m_grid->NumElements(); i++) { this->m_image[i] *= -1; this->m_grad[i] *= -1.0; } } else { #pragma omp parallel for schedule(static) for (INDEX_TYPE i = 0; i < m_grid->NumElements(); i++) { this->m_image[i] *= -1; } } } }; }; #endif
graph_io.h
// Copyright 2016, National University of Defense Technology // Authors: Xuhao Chen <cxh@illinois.edu> and Pingfan Li <lipingfan@163.com> #include <set> #include <vector> #include <iostream> #include <fstream> #include <sstream> #include <string.h> #include <algorithm> #include "common.h" #include "timer.h" struct WeightedEdge { IndexT src; IndexT dst; WeightT wt; int eid; //WeightedEdge() : src(0), dst(0), wt(0), eid(0) {} //std::string to_string() const; }; bool compare_id(WeightedEdge a, WeightedEdge b) { return (a.dst < b.dst); } void fill_data(int m, int &nnz, IndexT *&row_offsets, IndexT *&column_indices, WeightT *&weight, vector<vector<WeightedEdge> > vertices, bool symmetrize, bool sorted, bool remove_selfloops, bool remove_redundents) { //sort the neighbor list if(sorted) { printf("Sorting the neighbor lists..."); for(int i = 0; i < m; i++) { std::sort(vertices[i].begin(), vertices[i].end(), compare_id); } printf(" Done\n"); } //remove self loops int num_selfloops = 0; if(remove_selfloops) { printf("Removing self loops..."); for(int i = 0; i < m; i++) { for(unsigned j = 0; j < vertices[i].size(); j ++) { if(i == vertices[i][j].dst) { vertices[i].erase(vertices[i].begin()+j); num_selfloops ++; j --; } } } printf(" %d selfloops are removed\n", num_selfloops); } // remove redundent int num_redundents = 0; if(remove_redundents) { printf("Removing redundent edges..."); for (int i = 0; i < m; i++) { for (unsigned j = 1; j < vertices[i].size(); j ++) { if (vertices[i][j].dst == vertices[i][j-1].dst) { vertices[i].erase(vertices[i].begin()+j); num_redundents ++; j --; } } } printf(" %d redundent edges are removed\n", num_redundents); } /* // print some neighbor lists for (int i = 0; i < 3; i++) { cout << "src " << i << ": "; for (int j = 0; j < vertices[i].size(); j ++) cout << vertices[i][j].dst << " "; cout << endl; } */ #ifdef SIM row_offsets = (IndexT *)aligned_alloc(PAGE_SIZE, (m + 1) * sizeof(IndexT)); #else row_offsets = (IndexT *)malloc((m + 1) * sizeof(IndexT)); #endif int count = 0; for (int i = 0; i < m; i++) { row_offsets[i] = count; count += vertices[i].size(); } row_offsets[m] = count; if (symmetrize) { if(count != nnz) { nnz = count; } } else { if (count + num_selfloops + num_redundents != nnz) printf("Error reading graph, number of edges in edge list %d != %d\n", count, nnz); nnz = count; } printf("num_vertices %d num_edges %d\n", m, nnz); /* double avgdeg; double variance = 0.0; int maxdeg = 0; int mindeg = m; avgdeg = (double)nnz / m; for (int i = 0; i < m; i++) { int deg_i = row_offsets[i + 1] - row_offsets[i]; if (deg_i > maxdeg) maxdeg = deg_i; if (deg_i < mindeg) mindeg = deg_i; variance += (deg_i - avgdeg) * (deg_i - avgdeg) / m; } printf("min_degree %d max_degree %d avg_degree %.2f variance %.2f\n", mindeg, maxdeg, avgdeg, variance); */ #ifdef SIM column_indices = (IndexT *)aligned_alloc(PAGE_SIZE, count * sizeof(IndexT)); weight = (WeightT *)aligned_alloc(PAGE_SIZE, count * sizeof(WeightT)); #else column_indices = (IndexT *)malloc(count * sizeof(IndexT)); weight = (WeightT *)malloc(count * sizeof(WeightT)); #endif vector<WeightedEdge>::iterator neighbor_list; for (int i = 0, index = 0; i < m; i++) { neighbor_list = vertices[i].begin(); while (neighbor_list != vertices[i].end()) { column_indices[index] = (*neighbor_list).dst; weight[index] = (*neighbor_list).wt; index ++; neighbor_list ++; } } /* // print some neighbor lists for (int i = 0; i < 6; i++) { int row_begin = row_offsets[i]; int row_end = row_offsets[i + 1]; cout << "src " << i << ": "; for (int j = row_begin; j < row_end; j ++) cout << column_indices[j] << " "; cout << endl; } //*/ //for (int i = 0; i < 10; i++) cout << weight[i] << ", "; //cout << endl; } // transfer gr graph to CSR format void gr2csr(char *gr, int &m, int &nnz, IndexT *&row_offsets, IndexT *&column_indices, WeightT *&weight, bool symmetrize, bool transpose, bool sorted, bool remove_selfloops, bool remove_redundents) { printf("Reading 9th DIMACS (.gr) input file %s\n", gr); std::ifstream cfile; cfile.open(gr); std::string str; getline(cfile, str); char c; sscanf(str.c_str(), "%c", &c); while (c == 'c') { getline(cfile, str); sscanf(str.c_str(), "%c", &c); } char sp[3]; sscanf(str.c_str(), "%c %s %d %d", &c, sp, &m, &nnz); printf("Before cleaning, the original num_vertices %d num_edges %d\n", m, nnz); getline(cfile, str); sscanf(str.c_str(), "%c", &c); while (c == 'c') { getline(cfile, str); sscanf(str.c_str(), "%c", &c); } vector<vector<WeightedEdge> > vertices; vector<WeightedEdge> neighbors; for (int i = 0; i < m; i++) vertices.push_back(neighbors); IndexT src, dst; for (int i = 0; i < nnz; i++) { #ifdef LONG_TYPES sscanf(str.c_str(), "%c %ld %ld", &c, &src, &dst); #else sscanf(str.c_str(), "%c %d %d", &c, &src, &dst); #endif if (c != 'a') printf("line %d\n", __LINE__); src--; dst--; WeightedEdge e1, e2; if(symmetrize) { e2.dst = src; e2.wt = 1; vertices[dst].push_back(e2); transpose = false; } if(!transpose) { e1.dst = dst; e1.wt = 1; vertices[src].push_back(e1); } else { e1.dst = src; e1.wt = 1; vertices[dst].push_back(e1); } if(i != nnz-1) getline(cfile, str); } fill_data(m, nnz, row_offsets, column_indices, weight, vertices, symmetrize, sorted, remove_selfloops, remove_redundents); } // transfer edgelist graph to CSR format void el2csr(char *el, int &m, int &nnz, IndexT *&row_offsets, IndexT *&column_indices, WeightT *&weight, bool symmetrize, bool transpose, bool sorted, bool remove_selfloops, bool remove_redundents) { printf("Reading edgelist (.el) input file %s\n", el); std::ifstream cfile; cfile.open(el); std::string str; getline(cfile, str); sscanf(str.c_str(), "%d %d", &m, &nnz); printf("Before cleaning, the original num_vertices %d num_edges %d\n", m, nnz); vector<vector<WeightedEdge> > vertices; vector<WeightedEdge> neighbors; for (int i = 0; i < m; i++) vertices.push_back(neighbors); IndexT dst, src; WeightT wt = 1; for (int i = 0; i < nnz; i ++) { //while (!cfile.eof()) { getline(cfile, str); #ifdef LONG_TYPES int num = sscanf(str.c_str(), "%ld %ld %ld", &src, &dst, &wt); #else int num = sscanf(str.c_str(), "%d %d %d", &src, &dst, &wt); #endif if (num == 2) wt = 1; if (wt < 0) wt = -wt; // non-negtive weight src--; dst--; WeightedEdge e1, e2; if(symmetrize && src != dst) { e2.dst = src; e2.wt = wt; vertices[dst].push_back(e2); transpose = false; } if(!transpose) { e1.dst = dst; e1.wt = wt; vertices[src].push_back(e1); } else { e1.dst = src; e1.wt = wt; vertices[dst].push_back(e1); } } cfile.close(); fill_data(m, nnz, row_offsets, column_indices, weight, vertices, symmetrize, sorted, remove_selfloops, remove_redundents); } // transfer *.graph file to CSR format void graph2csr(char *graph, int &m, int &nnz, IndexT *&row_offsets, IndexT *&column_indices, WeightT *&weight, bool symmetrize, bool transpose, bool sorted, bool remove_selfloops, bool remove_redundents) { printf("Reading .graph input file %s\n", graph); std::ifstream cfile; cfile.open(graph); std::string str; getline(cfile, str); sscanf(str.c_str(), "%d %d", &m, &nnz); printf("Before cleaning, the original num_vertices %d num_edges %d\n", m, nnz); vector<vector<WeightedEdge> > vertices; vector<WeightedEdge> neighbors; for (int i = 0; i < m; i++) vertices.push_back(neighbors); IndexT dst; for (int src = 0; src < m; src ++) { getline(cfile, str); istringstream istr; istr.str(str); while(istr>>dst) { dst --; WeightedEdge e1;//, e2; if(symmetrize && src != dst) { // for .graph format, the input file already contains edges in both directions //e2.dst = src; e2.wt = 1; //vertices[dst].push_back(e2); transpose = false; } if(!transpose) { e1.dst = dst; e1.wt = 1; vertices[src].push_back(e1); } else { e1.dst = src; e1.wt = 1; vertices[dst].push_back(e1); } } istr.clear(); } cfile.close(); fill_data(m, nnz, row_offsets, column_indices, weight, vertices, symmetrize, sorted, remove_selfloops, remove_redundents); } // transfer mtx graph to CSR format void mtx2csr(char *mtx, int &m, int &n, int &nnz, IndexT *&row_offsets, IndexT *&column_indices, WeightT *&weight, bool symmetrize, bool transpose, bool sorted, bool remove_selfloops, bool remove_redundents) { printf("Reading (.mtx) input file %s\n", mtx); std::ifstream cfile; cfile.open(mtx); std::string str; getline(cfile, str); char c; sscanf(str.c_str(), "%c", &c); while (c == '%') { getline(cfile, str); sscanf(str.c_str(), "%c", &c); } sscanf(str.c_str(), "%d %d %d", &m, &n, &nnz); if (m != n) { printf("Warning, m(%d) != n(%d)\n", m, n); } printf("Before cleaning, the original num_vertices %d num_edges %d\n", m, nnz); vector<vector<WeightedEdge> > vertices; vector<WeightedEdge> neighbors; for (int i = 0; i < m; i ++) vertices.push_back(neighbors); IndexT dst, src; WeightT wt = 1; for (int i = 0; i < nnz; i ++) { getline(cfile, str); #ifdef LONG_TYPES int num = sscanf(str.c_str(), "%ld %ld %ld", &src, &dst, &wt); #else int num = sscanf(str.c_str(), "%d %d %d", &src, &dst, &wt); #endif if (num == 2) wt = 1; if (wt < 0) wt = -wt; // non-negtive weight src--; dst--; WeightedEdge e1, e2; if(symmetrize && src != dst) { e2.dst = src; e2.wt = wt; vertices[dst].push_back(e2); transpose = false; } if(!transpose) { e1.dst = dst; e1.wt = wt; vertices[src].push_back(e1); } else { e1.dst = src; e1.wt = wt; vertices[dst].push_back(e1); } } cfile.close(); fill_data(m, nnz, row_offsets, column_indices, weight, vertices, symmetrize, sorted, remove_selfloops, remove_redundents); } /* void sort_neighbors(int m, int *row_offsets, int *&column_indices) { vector<int> neighbors; #pragma omp parallel for for(int i = 0; i < m; i++) { int row_begin = row_offsets[i]; int row_end = row_offsets[i + 1]; for (int offset = row_begin; offset < row_end; ++ offset) { neighbors.push_back(column_indices[offset]); } std::sort(neighbors.begin(), neighbors.end()); int k = 0; for (int offset = row_begin; offset < row_end; ++ offset) { column_indices[offset] = neighbors[k++]; } } } */ void read_graph(int argc, char *argv[], int &m, int &n, int &nnz, IndexT *&row_offsets, IndexT *&column_indices, int *&degree, WeightT *&weight, bool is_symmetrize=false, bool is_transpose=false, bool sorted=true, bool remove_selfloops=true, bool remove_redundents=true) { Timer t; t.Start(); //if(is_symmetrize) printf("Requiring symmetric graphs for this algorithm\n"); if (strstr(argv[1], ".mtx")) mtx2csr(argv[1], m, n, nnz, row_offsets, column_indices, weight, is_symmetrize, is_transpose, sorted, remove_selfloops, remove_redundents); else if (strstr(argv[1], ".graph")) graph2csr(argv[1], m, nnz, row_offsets, column_indices, weight, is_symmetrize, is_transpose, sorted, remove_selfloops, remove_redundents); else if (strstr(argv[1], ".gr")) gr2csr(argv[1], m, nnz, row_offsets, column_indices, weight, is_symmetrize, is_transpose, sorted, remove_selfloops, remove_redundents); else { printf("Unrecognizable input file format\n"); exit(0); } t.Stop(); printf("\truntime [%s] = %f ms.\n", "read_graph", t.Millisecs()); printf("Calculating degree..."); degree = (int *)malloc(m * sizeof(int)); for (int i = 0; i < m; i++) { degree[i] = row_offsets[i + 1] - row_offsets[i]; } printf(" Done\n"); } void print_degree(int m, int *in_degree, int *out_degree) { if(in_degree != NULL) { FILE *fp = fopen("in_degree.txt", "w"); fprintf(fp,"%d\n", m); for(int i = 0; i < m; i ++) fprintf(fp,"%d ", in_degree[i]); fclose(fp); } if(out_degree != NULL) { FILE *fp = fopen("out_degree.txt", "w"); fprintf(fp,"%d\n", m); for(int i = 0; i < m; i ++) fprintf(fp,"%d ", out_degree[i]); fclose(fp); } }
knn.c
#if (defined(__GNUC__) && !defined(__clang__) && !defined(__INTEL_COMPILER)) #pragma GCC push_options #pragma GCC optimize ("unroll-loops") #endif #include <R.h> #include <Rinternals.h> #include "safeomp.h" #include "types.h" #define MIN(a,b) ((a)<(b)?(a):(b)) #define FREE(ptr) if(ptr!=NULL) free(ptr) typedef struct { int k; double *restrict dists; int *restrict labels; } voters_t; static inline void classify_get_dists(cint m, cint n, cdbl_r x, cdbl_r test_obs, dbl_r dists) { memset(dists, 0, m*sizeof(*dists)); #pragma omp parallel for if(m*n>OMP_MIN_SIZE) for (int j=0; j<n; j++) { SAFE_SIMD for (int i=0; i<m; i++) { const double tmp = x[i + m*j] - test_obs[j]; dists[i] += tmp*tmp; } } } static inline int classify_single1(cint m, cint n, cdbl_r x, cint_r y, cdbl_r test_obs, dbl_r dists) { classify_get_dists(m, n, x, test_obs, dists); double min = dists[0]; int group = y[0]; for (int i=1; i<m; i++) { if (dists[i] < min) { min = dists[i]; group = y[i]; } } return group; } // assume x[1]:x[len-1] are sorted largest to smallest static inline void max2min_sort(const int len, double *const restrict x, int *const restrict y) { for (int i=0; i<=len; i++) { if (i == len || x[i] < x[0]) { for (int j=1; j<i; j++) { double xtmp = x[j]; x[j] = x[j-1]; x[j-1] = xtmp; int ytmp = y[j]; y[j] = y[j-1]; y[j-1] = ytmp; } return; } } } // TODO for now, ties are resolved by the smallest group number static inline int vote(voters_t *const restrict voters) { const int k = voters->k; double *const restrict tally = voters->dists; int *const restrict votes = voters->labels; int group; memset(tally, 0, k*sizeof(*tally)); SAFE_FOR_SIMD for (int i=0; i<k; i++) tally[votes[i]-1] += 1.0; group = 0; for (int i=1; i<k; i++) { if (tally[i] > tally[group]) group = i; } return group+1; } static inline int classify_single(voters_t *const restrict voters, cint m, cint n, cdbl_r x, cint_r y, cdbl_r test_obs, dbl_r dists) { const int k = voters->k; classify_get_dists(m, n, x, test_obs, dists); // get voters and vote SAFE_FOR_SIMD for (int i=0; i<k; i++) { voters->dists[i] = dists[i]; voters->labels[i] = y[i]; } for (int i=k; i<m; i++) { if (dists[i] < voters->dists[0]) { voters->dists[0] = dists[i]; voters->labels[0] = y[i]; max2min_sort(k, voters->dists, voters->labels); } } return vote(voters); } SEXP R_knn(SEXP x_, SEXP y_, SEXP test_, SEXP k_) { SEXP ret; cdbl_r x = REAL(x_); cint_r y = INTEGER(y_); cdbl_r test = REAL(test_); cint m = nrows(x_); cint n = ncols(x_); cint mtest = nrows(test_); cint k = INTEGER(k_)[0]; PROTECT(ret = allocVector(INTSXP, mtest)); int_r ret_pt = INTEGER(ret); double *dists = malloc(m * sizeof(*dists)); double *test_obs = malloc(n * sizeof(*test_obs)); if (dists == NULL || test_obs == NULL) { FREE(dists); FREE(test_obs); error("OOM"); } if (k == 1) { for (int b=0; b<mtest; b++) { for (int j=0; j<n; j++) test_obs[j] = (test+b)[j*mtest]; ret_pt[b] = classify_single1(m, n, x, y, test_obs, dists); } } else { voters_t voters; double *mindists = malloc(k * sizeof(*mindists)); int *mindists_labels = malloc(k * sizeof(*mindists_labels)); if (mindists == NULL || mindists_labels == NULL) { FREE(mindists); FREE(mindists_labels); FREE(dists); FREE(test_obs); error("OOM"); } voters.k = k; voters.dists = mindists; voters.labels = mindists_labels; for (int b=0; b<mtest; b++) { for (int j=0; j<n; j++) test_obs[j] = (test+b)[j*mtest]; ret_pt[b] = classify_single(&voters, m, n, x, y, test_obs, dists); } free(mindists); free(mindists_labels); } free(dists); free(test_obs); UNPROTECT(1); return ret; }
target-link-1.c
struct S { int s, t; }; int a = 1, b = 1; double c[27]; struct S d = { 8888, 8888 }; #pragma omp declare target link (a) to (b) link (c, d) int foo (void) { return a++ + b++; } int bar (int n) { int *p1 = &a; int *p2 = &b; c[n] += 2.0; d.s -= 2; d.t -= 2; return *p1 + *p2 + d.s + d.t; } #pragma omp declare target (foo, bar) int main () { a = b = 2; d.s = 17; d.t = 18; int res, n = 10; #pragma omp target map (to: a, b, c, d) map (from: res) { res = foo () + foo (); c[n] = 3.0; res += bar (n); } int shared_mem = 0; #pragma omp target map (alloc: shared_mem) shared_mem = 1; if ((shared_mem && res != (2 + 2) + (3 + 3) + (4 + 4 + 15 + 16)) || (!shared_mem && res != (2 + 1) + (3 + 2) + (4 + 3 + 15 + 16))) __builtin_abort (); #pragma omp target enter data map (to: c) #pragma omp target update from (c) res = (int) (c[n] + 0.5); if ((shared_mem && res != 5) || (!shared_mem && res != 0)) __builtin_abort (); #pragma omp target map (to: a, b) map (from: res) res = foo (); if ((shared_mem && res != 4 + 4) || (!shared_mem && res != 2 + 3)) __builtin_abort (); return 0; }
ex04.c
#include <stdio.h> #include <omp.h> #define ATOMIC 1 static long num_steps = 1000000; double step; int main(int argv, char* argc) { int num_threads; double pi, total_sum = 0.0; step = 1.0 / (double) num_steps; int num_procs = omp_get_num_procs(); int steps_per_thread; double startTime = omp_get_wtime(); #pragma omp parallel { #pragma omp single { num_threads = omp_get_num_threads(); steps_per_thread = num_steps / num_threads; printf ("Found %d CPUs. Using %d threads and computing %d steps per thread.\n", num_procs, num_threads, steps_per_thread); // Implicit barrier at the end } int i, id = omp_get_thread_num(); printf("Executing thread %d out of %d\n", id, num_threads); double x, sum; for (i = id * steps_per_thread; i < (id + 1) * steps_per_thread; i++) { x = (i + 0.5) * step; sum += 4.0 / (1.0 + x * x); } #ifdef ATOMIC #pragma omp atomic total_sum += sum; #else #pragma omp critical total_sum += sum; #endif } pi = step * total_sum; double endTime = omp_get_wtime(); printf ("Computed integral: %f\n", pi); printf ("Time elapsed: %f secs\n", (endTime - startTime)); return 0; }
DRACC_OMP_004_Counter_no_lock_Intra_yes.c
/* Concurrent access on a counter with no lock. Atomicity Violation. Data Race in line 15. Intra Region. */ #include <stdio.h> #define N 100000 int countervar = 0; int count(){ #pragma omp target map(tofrom:countervar) device(0) #pragma omp teams num_teams(1) #pragma omp distribute parallel for for (int i=0; i<N; i++){ countervar++; } return 0; } int main(){ count(); printf("counter: %i expected: 100000\n ",countervar); return 0; }
GB_unaryop__lnot_uint64_uint64.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__lnot_uint64_uint64 // op(A') function: GB_tran__lnot_uint64_uint64 // C type: uint64_t // A type: uint64_t // cast: uint64_t cij = (uint64_t) aij // unaryop: cij = !(aij != 0) #define GB_ATYPE \ uint64_t #define GB_CTYPE \ uint64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = !(x != 0) ; // casting #define GB_CASTING(z, x) \ uint64_t z = (uint64_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LNOT || GxB_NO_UINT64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__lnot_uint64_uint64 ( uint64_t *restrict Cx, const uint64_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__lnot_uint64_uint64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
pr49897-2.c
/* PR middle-end/49897 */ /* { dg-do run } */ extern void abort (void); int main () { int i, j, x = 0, y, sum = 0; #pragma omp parallel for reduction(+:sum) firstprivate(x) lastprivate(x, y) for (i = 0; i < 10; i++) { x = i; y = 0; #pragma omp parallel for reduction(+:sum) firstprivate(y) lastprivate(y) for (j = 0; j < 10; j++) { y = j; sum += y; } } if (x != 9 || y != 9 || sum != 450) abort (); return 0; }
GB_binop__bget_int16.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCUDA_DEV #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__bget_int16) // A.*B function (eWiseMult): GB (_AemultB_08__bget_int16) // A.*B function (eWiseMult): GB (_AemultB_02__bget_int16) // A.*B function (eWiseMult): GB (_AemultB_04__bget_int16) // A.*B function (eWiseMult): GB (_AemultB_bitmap__bget_int16) // A*D function (colscale): GB ((none)) // D*A function (rowscale): GB ((none)) // C+=B function (dense accum): GB (_Cdense_accumB__bget_int16) // C+=b function (dense accum): GB (_Cdense_accumb__bget_int16) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__bget_int16) // C=scalar+B GB (_bind1st__bget_int16) // C=scalar+B' GB (_bind1st_tran__bget_int16) // C=A+scalar GB (_bind2nd__bget_int16) // C=A'+scalar GB (_bind2nd_tran__bget_int16) // C type: int16_t // A type: int16_t // A pattern? 0 // B type: int16_t // B pattern? 0 // BinaryOp: cij = GB_BITGET (aij, bij, int16_t, 16) #define GB_ATYPE \ int16_t #define GB_BTYPE \ int16_t #define GB_CTYPE \ int16_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ int16_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ int16_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int16_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = GB_BITGET (x, y, int16_t, 16) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 1 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_BGET || GxB_NO_INT16 || GxB_NO_BGET_INT16) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__bget_int16) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__bget_int16) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__bget_int16) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int16_t int16_t bwork = (*((int16_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *restrict Cx = (int16_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *restrict Cx = (int16_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__bget_int16) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; int16_t alpha_scalar ; int16_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((int16_t *) alpha_scalar_in)) ; beta_scalar = (*((int16_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__bget_int16) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__bget_int16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__bget_int16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__bget_int16) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__bget_int16) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *Cx = (int16_t *) Cx_output ; int16_t x = (*((int16_t *) x_input)) ; int16_t *Bx = (int16_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; int16_t bij = GBX (Bx, p, false) ; Cx [p] = GB_BITGET (x, bij, int16_t, 16) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__bget_int16) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int16_t *Cx = (int16_t *) Cx_output ; int16_t *Ax = (int16_t *) Ax_input ; int16_t y = (*((int16_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int16_t aij = GBX (Ax, p, false) ; Cx [p] = GB_BITGET (aij, y, int16_t, 16) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_BITGET (x, aij, int16_t, 16) ; \ } GrB_Info GB (_bind1st_tran__bget_int16) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int16_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t x = (*((const int16_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int16_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_BITGET (aij, y, int16_t, 16) ; \ } GrB_Info GB (_bind2nd_tran__bget_int16) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t y = (*((const int16_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
mcgsm.h
#ifndef CMT_MCGSM_H #define CMT_MCGSM_H #include <vector> #include <utility> #include "Eigen/Core" #include "trainable.h" #include "exception.h" #include "regularizer.h" namespace CMT { using std::vector; using std::pair; using Eigen::Dynamic; using Eigen::Array; using Eigen::ArrayXXd; using Eigen::MatrixXd; class MCGSM : public Trainable { public: struct Parameters : public Trainable::Parameters { public: bool trainPriors; bool trainScales; bool trainWeights; bool trainFeatures; bool trainCholeskyFactors; bool trainPredictors; bool trainLinearFeatures; bool trainMeans; Regularizer regularizeFeatures; Regularizer regularizePredictors; Regularizer regularizeWeights; Regularizer regularizeLinearFeatures; Regularizer regularizeMeans; Regularizer regularizer; Parameters(); Parameters(const Parameters& params); virtual Parameters& operator=(const Parameters& params); }; using Trainable::logLikelihood; using Trainable::initialize; using Trainable::train; MCGSM( int dimIn, int dimOut = 1, int numComponents = 8, int numScales = 6, int numFeatures = -1); MCGSM(int dimIn, const MCGSM& mcgsm); MCGSM(int dimIn, int dimOut, const MCGSM& mcgsm); virtual ~MCGSM(); inline int dimIn() const; inline int dimOut() const; inline int numComponents() const; inline int numScales() const; inline int numFeatures() const; inline ArrayXXd priors() const; inline void setPriors(const ArrayXXd& priors); inline ArrayXXd scales() const; inline void setScales(const ArrayXXd& scales); inline ArrayXXd weights() const; inline void setWeights(const ArrayXXd& weights); inline MatrixXd features() const; inline void setFeatures(const MatrixXd& features); inline vector<MatrixXd> choleskyFactors() const; inline void setCholeskyFactors(const vector<MatrixXd>& choleskyFactors); inline vector<MatrixXd> predictors() const; inline void setPredictors(const vector<MatrixXd>& predictors); inline MatrixXd linearFeatures() const; inline void setLinearFeatures(const MatrixXd& linearFeatures); inline MatrixXd means() const; inline void setMeans(const MatrixXd& means); virtual void initialize(const MatrixXd& input, const MatrixXd& output); virtual MatrixXd sample(const MatrixXd& input) const; virtual MatrixXd sample( const MatrixXd& input, const Array<int, 1, Dynamic>& labels) const; virtual MatrixXd reconstruct(const MatrixXd& input, const MatrixXd& output) const; virtual Array<int, 1, Dynamic> samplePrior(const MatrixXd& input) const; virtual Array<int, 1, Dynamic> samplePosterior( const MatrixXd& input, const MatrixXd& output) const; virtual ArrayXXd prior(const MatrixXd& input) const; virtual ArrayXXd posterior(const MatrixXd& input, const MatrixXd& output) const; virtual Array<double, 1, Dynamic> logLikelihood( const MatrixXd& input, const MatrixXd& output) const; virtual Array<double, 1, Dynamic> logLikelihood( const MatrixXd& input, const MatrixXd& output, const Array<int, 1, Dynamic>& labels) const; virtual pair<pair<ArrayXXd, ArrayXXd>, Array<double, 1, Dynamic> > computeDataGradient( const MatrixXd& input, const MatrixXd& output) const; virtual int numParameters(const Trainable::Parameters& params = Parameters()) const; virtual lbfgsfloatval_t* parameters(const Trainable::Parameters& params = Parameters()) const; virtual void setParameters(const lbfgsfloatval_t* x, const Trainable::Parameters& params = Parameters()); virtual double parameterGradient( const MatrixXd& input, const MatrixXd& output, const lbfgsfloatval_t* x, lbfgsfloatval_t* g, const Trainable::Parameters& params = Parameters()) const; protected: // hyperparameters int mDimIn; int mDimOut; int mNumComponents; int mNumScales; int mNumFeatures; // parameters ArrayXXd mPriors; ArrayXXd mScales; ArrayXXd mWeights; MatrixXd mFeatures; vector<MatrixXd> mCholeskyFactors; vector<MatrixXd> mPredictors; MatrixXd mLinearFeatures; MatrixXd mMeans; virtual bool train( const MatrixXd& input, const MatrixXd& output, const MatrixXd* inputVal = 0, const MatrixXd* outputVal = 0, const Trainable::Parameters& params = Trainable::Parameters()); }; } inline int CMT::MCGSM::dimIn() const { return mDimIn; } inline int CMT::MCGSM::dimOut() const { return mDimOut; } inline int CMT::MCGSM::numComponents() const { return mNumComponents; } inline int CMT::MCGSM::numScales() const { return mNumScales; } inline int CMT::MCGSM::numFeatures() const { return mNumFeatures; } inline Eigen::ArrayXXd CMT::MCGSM::scales() const { return mScales; } inline void CMT::MCGSM::setScales(const ArrayXXd& scales) { if(scales.rows() != mNumComponents || scales.cols() != mNumScales) throw Exception("Wrong number of scales."); mScales = scales; } inline Eigen::ArrayXXd CMT::MCGSM::weights() const { return mWeights; } inline void CMT::MCGSM::setWeights(const ArrayXXd& weights) { if(dimIn() == 0) return; if(weights.rows() != mNumComponents || weights.cols() != mNumFeatures) throw Exception("Wrong number of weights."); mWeights = weights; } inline Eigen::ArrayXXd CMT::MCGSM::priors() const { return mPriors; } inline void CMT::MCGSM::setPriors(const ArrayXXd& priors) { if(priors.rows() != mNumComponents || priors.cols() != mNumScales) throw Exception("Wrong number of prior weights."); mPriors = priors; } inline Eigen::MatrixXd CMT::MCGSM::features() const { return mFeatures; } inline void CMT::MCGSM::setFeatures(const MatrixXd& features) { if(dimIn() == 0) return; if(features.rows() != mDimIn) throw Exception("Features have wrong dimensionality."); if(features.cols() != mNumFeatures) throw Exception("Wrong number of features."); mFeatures = features; } inline std::vector<Eigen::MatrixXd> CMT::MCGSM::choleskyFactors() const { return mCholeskyFactors; } inline void CMT::MCGSM::setCholeskyFactors(const vector<MatrixXd>& choleskyFactors) { if(choleskyFactors.size() != mNumComponents) throw Exception("Wrong number of Cholesky factors."); for(int i = 0; i < mNumComponents; ++i) if(choleskyFactors[i].rows() != mDimOut || choleskyFactors[i].cols() != mDimOut) throw Exception("Cholesky factor has wrong dimensionality."); mCholeskyFactors = choleskyFactors; #pragma omp parallel for for(int i = 0; i < mNumComponents; ++i) { double prec = mCholeskyFactors[i](0, 0); // normalize representation mCholeskyFactors[i] /= prec; mScales.row(i) += 2. * log(prec); mWeights.row(i) /= prec; } } inline std::vector<Eigen::MatrixXd> CMT::MCGSM::predictors() const { return mPredictors; } inline void CMT::MCGSM::setPredictors(const vector<MatrixXd>& predictors) { if(dimIn() == 0) return; if(predictors.size() != mNumComponents) throw Exception("Wrong number of predictors."); for(int i = 0; i < predictors.size(); ++i) if(predictors[i].rows() != mDimOut || predictors[i].cols() != mDimIn) throw Exception("Predictor has wrong dimensionality."); mPredictors = predictors; } inline Eigen::MatrixXd CMT::MCGSM::linearFeatures() const { return mLinearFeatures; } inline void CMT::MCGSM::setLinearFeatures(const MatrixXd& linearFeatures) { if(linearFeatures.rows() != mNumComponents || linearFeatures.cols() != mDimIn) throw Exception("Linear features have wrong dimensionality."); mLinearFeatures = linearFeatures; } inline Eigen::MatrixXd CMT::MCGSM::means() const { return mMeans; } inline void CMT::MCGSM::setMeans(const MatrixXd& means) { if(means.cols() != mNumComponents || means.rows() != mDimOut) throw Exception("Means have wrong dimensionality."); mMeans = means; } #endif
wpapsk.h
/* * This software is Copyright (c) 2012 Lukas Odzioba <lukas dot odzioba at gmail dot com> * and Copyright (c) 2012-2014 magnum * and it is hereby released to the general public under the following terms: * Redistribution and use in source and binary forms, with or without modification, are permitted. * * hccap format was introduced by oclHashcat-plus, and it is described here: http://hashcat.net/wiki/hccap * Code is based on Aircrack-ng source */ #ifndef _WPAPSK_H #define _WPAPSK_H #include "arch.h" #include "params.h" #include "common.h" #include "johnswap.h" #include "stdint.h" #include <assert.h> #include <openssl/hmac.h> #define HCCAP_SIZE sizeof(hccap_t) #define BINARY_SIZE sizeof(mic_t) #define BINARY_ALIGN 4 #define PLAINTEXT_LENGTH 63 /* We can do 64 but spec. says 63 */ #define SALT_SIZE (sizeof(hccap_t) - sizeof(mic_t)) #define SALT_ALIGN MEM_ALIGN_NONE #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH -1 /** if you want to change hccap_t structure is also defined in hccap2john.c **/ typedef struct { char essid[36]; unsigned char mac1[6]; unsigned char mac2[6]; unsigned char nonce1[32]; unsigned char nonce2[32]; unsigned char eapol[256]; int eapol_size; int keyver; unsigned char keymic[16]; } hccap_t; typedef struct { unsigned char keymic[16]; } mic_t; typedef struct { uint32_t length; uint8_t v[PLAINTEXT_LENGTH + 1]; } wpapsk_password; typedef struct { uint32_t v[8]; } wpapsk_hash; typedef struct { uint32_t length; #ifdef JOHN_OCL_WPAPSK uint8_t eapol[256 + 64]; uint32_t eapol_size; // blocks uint8_t data[64 + 12]; #endif uint8_t salt[36]; // essid } wpapsk_salt; #ifndef _WPAPSK_CUDA_KERNEL static struct fmt_tests tests[] = { /* WPA2 testcase from http://wiki.wireshark.org/SampleCaptures */ {"$WPAPSK$Coherer#..l/Uf7J..qHUXMunTE3nfbMWSwxv27Ua0XutIOrfRSuv9gOCIugIVGlosMyXdNxfBZUAYmgKqeb6GBPxLiIZr56NtWTGR/Cp5ldAk61.5I0.Ec.2...........nTE3nfbMWSwxv27Ua0XutIOrfRSuv9gOCIugIVGlosM.................................................................3X.I.E..1uk0.E..1uk2.E..1uk0....................................................................................................................................................................................../t.....U...8FWdk8OpPckhewBwt4MXYI", "Induction"}, {"$WPAPSK$Harkonen#./FgTY0../B4zX6AKFO9kuLT4BQSyqEXwo.6XOiS4u8vlMNNs5grN91SVL.WK3GkF2rXfkPFGGi38MHkHDMbH.sm49Vc3pO4HPSUJE21.5I0.Ec.2........../KFO9kuLT4BQSyqEXwo.6XOiS4u8vlMNNs5grN91SVL..................................................................3X.I.E..1uk2.E..1uk2.E..1uk0.E..................................................................................................................................................................................../t.....U...BIpIs8sePU4r8yNnOxKHfM", "12345678"}, /* WPA, from aircrack-ng tests */ {"$WPAPSK$test#..qHuv0A..ZPYJBRzZwAKpEXUJwpza/b69itFaq4.OWoGHfonpc13zCAUsRIfQN2Zar6EXp2BYcRuSkWEJIWjEJJvb4DWZCspbZ51.21.3zy.EY.6........../zZwAKpEXUJwpza/b69itFaq4.OWoGHfonpc13zCAUsQ..................................................................BoK.31m.E2..31m.U2..31m.U2..31m.U................................................................................................................................................................................/X.....E...AkkDQmDg9837LBHG.dGlKA", "biscotte"}, /* Maximum length, 63 characters */ {"$WPAPSK$Greased Lighting#kA5.CDNB.07cofsOMXEEUwFTkO/RX2sQUaW9eteI8ynpFMwRgFZC6kk7bGqgvfcXnuF1f7L5fgn4fQMLmDrKjdBNjb6LClRmfLiTYk21.5I0.Ec............7MXEEUwFTkO/RX2sQUaW9eteI8ynpFMwRgFZC6kk7bGo.................................................................3X.I.E..1uk2.E..1uk2.E..1uk00...................................................................................................................................................................................../t.....U...D06LUdWVfGPaP1Oa3AV9Hg", "W*A5z&1?op2_L&Hla-OA$#5i_Lu@F+6d?je?u5!6+6766eluu7-l+jOEkIwLe90"}, {NULL} }; #endif /** Below are common variables used by wpapsk_fmt.c cuda_wpapsk_fmt.c and opencl_wpapsk_fmt.c **/ static hccap_t hccap; ///structure with hccap data static wpapsk_salt currentsalt; ///structure for essid static mic_t *mic; ///table for MIC keys #ifndef JOHN_OCL_WPAPSK static wpapsk_password *inbuffer; ///table for candidate passwords static wpapsk_hash *outbuffer; ///table for PMK calculated by GPU #endif static const char wpapsk_prefix[] = "$WPAPSK$"; static int new_keys = 1; static char last_ssid[sizeof(hccap.essid)]; /** Below are common functions used by wpapsk_fmt.c cuda_wpapsk_fmt.c and opencl_wpapsk_fmt.c **/ static hccap_t *decode_hccap(char *ciphertext) { static hccap_t hccap; char *essid = ciphertext + strlen(wpapsk_prefix); char *hash = strrchr(ciphertext, '#'); char *d = hccap.essid; char *cap = hash + 1; unsigned char tbuf[sizeof(hccap_t)]; unsigned char *dst = tbuf; int i; memset(&hccap, 0, sizeof(hccap)); if (hash == NULL) return &hccap; while (essid != hash) { ///copy essid to hccap *d++ = *essid++; } *d = '\0'; assert(*essid == '#'); for (i = 0; i < 118; i++) { dst[0] = (atoi64[ARCH_INDEX(cap[0])] << 2) | (atoi64[ARCH_INDEX(cap[1])] >> 4); dst[1] = (atoi64[ARCH_INDEX(cap[1])] << 4) | (atoi64[ARCH_INDEX(cap[2])] >> 2); dst[2] = (atoi64[ARCH_INDEX(cap[2])] << 6) | (atoi64[ARCH_INDEX(cap[3])]); dst += 3; cap += 4; } dst[0] = (atoi64[ARCH_INDEX(cap[0])] << 2) | (atoi64[ARCH_INDEX(cap[1])] >> 4); dst[1] = (atoi64[ARCH_INDEX(cap[1])] << 4) | (atoi64[ARCH_INDEX(cap[2])] >> 2); /* This emits warnings on some compilers */ //memcpy(&hccap.mac1,tbuf,sizeof(hccap_t)-36); memcpy(((char*)&hccap) + 36, tbuf, sizeof(hccap_t) - 36); #if !ARCH_LITTLE_ENDIAN hccap.eapol_size = JOHNSWAP(hccap.eapol_size); hccap.keyver = JOHNSWAP(hccap.keyver); #endif return &hccap; } static void *get_binary(char *ciphertext) { static union { unsigned char c[BINARY_SIZE]; ARCH_WORD_32 dummy; } binary; hccap_t *hccap = decode_hccap(ciphertext); memcpy(binary.c, hccap->keymic, BINARY_SIZE); return binary.c; } static void *get_salt(char *ciphertext) { static hccap_t s; memcpy(&s, decode_hccap(ciphertext), SALT_SIZE); return &s; } static int valid(char *ciphertext, struct fmt_main *self) { char *hash; int hashlength = 0; hccap_t *hccap; if (strncmp(ciphertext, wpapsk_prefix, strlen(wpapsk_prefix)) != 0) return 0; hash = strrchr(ciphertext, '#'); if (hash == NULL || hash - (ciphertext + strlen(wpapsk_prefix)) > 32) return 0; hash++; while (hash < ciphertext + strlen(ciphertext)) { if (atoi64[ARCH_INDEX(*hash++)] == 0x7f) return 0; hashlength++; } if (hashlength != 475) return 0; hccap = decode_hccap(ciphertext); if (strlen(hccap->essid) > 32) /* real life limit */ return 0; if(hccap->eapol_size > 256) return 0; if(hccap->eapol_size < 0) return 0; return 1; } #ifndef JOHN_OCL_WPAPSK static MAYBE_INLINE void prf_512(uint32_t * key, uint8_t * data, uint32_t * ret) { HMAC_CTX ctx; char *text = (char*)"Pairwise key expansion"; unsigned char buff[100]; memcpy(buff, text, 22); memcpy(buff + 23, data, 76); buff[22] = 0; buff[76 + 23] = 0; HMAC_Init(&ctx, key, 32, EVP_sha1()); HMAC_Update(&ctx, buff, 100); HMAC_Final(&ctx, (unsigned char *) ret, NULL); HMAC_CTX_cleanup(&ctx); } #endif static void insert_mac(uint8_t * data) { int k = memcmp(hccap.mac1, hccap.mac2, 6); if (k > 0) { memcpy(data, hccap.mac2, 6); memcpy(data + 6, hccap.mac1, 6); } else { memcpy(data, hccap.mac1, 6); memcpy(data + 6, hccap.mac2, 6); } } static void insert_nonce(uint8_t * data) { int k = memcmp(hccap.nonce1, hccap.nonce2, 32); if (k > 0) { memcpy(data, hccap.nonce2, 32); memcpy(data + 32, hccap.nonce1, 32); } else { memcpy(data, hccap.nonce1, 32); memcpy(data + 32, hccap.nonce2, 32); } } #ifdef WPAPSK_DEBUG static char *tomac(unsigned char *p) { static char buf[48]; sprintf(buf, "%02X:%02X:%02X:%02X:%02X:%02X", p[0], p[1], p[2], p[3], p[4], p[5]); return buf; } static char *hex(unsigned char *p, int len) { static char buf[1024]; char *op=buf; int i; if (len > 32) { do { for (i = 0; i < 32; ++i) { op += sprintf (op, "%02X", p[i]); if (i<31&&i%4==3) op += sprintf (op, " "); if (i==15) op += sprintf (op, ": "); } len -= 32; p += 32; op += sprintf (op, "\n "); } while (len > 32); } for (i = 0; i < len; ++i) { op += sprintf (op, "%02X", p[i]); if (i<31&&i%4==3) op += sprintf (op, " "); if (i==15) op += sprintf (op, ": "); } return buf; } static void Debug_hccap() { printf("essid: %s\n", hccap.essid); printf("mac1: %s\n", tomac(hccap.mac1)); printf("mac2: %s\n", tomac(hccap.mac2)); printf("nonce1: %s\n", hex(hccap.nonce1, 32)); printf("nonce2: %s\n", hex(hccap.nonce2, 32)); printf("eapol: %s\n", hex(hccap.eapol, 256)); printf("epol_sz: %d (0x%02X)\n", hccap.eapol_size, hccap.eapol_size); printf("keyver: %d\n", hccap.keyver); printf("keymic: %s\n", hex(hccap.keymic, 16)); } #endif static void set_salt(void *salt) { memcpy(&hccap, salt, SALT_SIZE); strncpy((char*)currentsalt.salt, hccap.essid, sizeof(currentsalt.salt)); currentsalt.length = strlen(hccap.essid); #ifdef JOHN_OCL_WPAPSK currentsalt.eapol_size = 1 + (hccap.eapol_size + 8) / 64; memcpy(currentsalt.eapol, hccap.eapol, hccap.eapol_size); memset(currentsalt.eapol + hccap.eapol_size, 0x80, 1); memset(currentsalt.eapol + hccap.eapol_size + 1, 0, 256 + 64 - hccap.eapol_size - 1); if (hccap.keyver != 1) alter_endianity(currentsalt.eapol, 256+56); ((unsigned int*)currentsalt.eapol)[16 * ((hccap.eapol_size + 8) / 64) + ((hccap.keyver == 1) ? 14 : 15)] = (64 + hccap.eapol_size) << 3; insert_mac(currentsalt.data); insert_nonce(currentsalt.data + 12); alter_endianity(currentsalt.data, 64 + 12); HANDLE_CLERROR(clEnqueueWriteBuffer(queue[gpu_id], mem_salt, CL_FALSE, 0, sizeof(wpapsk_salt), &currentsalt, 0, NULL, NULL), "Copy setting to gpu"); #endif //Debug_hccap(); } #ifndef JOHN_OCL_WPAPSK static void clear_keys(void) { new_keys = 1; } #undef set_key static void set_key(char *key, int index) { uint8_t length = strlen(key); if (length > PLAINTEXT_LENGTH) length = PLAINTEXT_LENGTH; inbuffer[index].length = length; memcpy(inbuffer[index].v, key, length); } static char *get_key(int index) { static char ret[PLAINTEXT_LENGTH + 1]; uint8_t length = inbuffer[index].length; memcpy(ret, inbuffer[index].v, length); ret[length] = '\0'; return ret; } static void wpapsk_postprocess(int keys) { int i; uint8_t data[64 + 12]; insert_mac(data); insert_nonce(data + 12); if (hccap.keyver == 1) { #ifdef _OPENMP #pragma omp parallel for default(none) private(i) shared(keys, outbuffer, data, hccap, mic) #endif for (i = 0; i < keys; i++) { uint32_t prf[20/4]; prf_512(outbuffer[i].v, data, prf); HMAC(EVP_md5(), prf, 16, hccap.eapol, hccap.eapol_size, mic[i].keymic, NULL); } } else { #ifdef _OPENMP #pragma omp parallel for default(none) private(i) shared(keys, outbuffer, data, hccap, mic) #endif for (i = 0; i < keys; i++) { uint32_t prf[20/4]; unsigned char keymic[20]; prf_512(outbuffer[i].v, data, prf); HMAC(EVP_sha1(), prf, 16, hccap.eapol, hccap.eapol_size, keymic, NULL); memcpy(mic[i].keymic, keymic, 16); } } } #endif static int binary_hash_0(void *binary) { #ifdef WPAPSK_DEBUG puts("binary"); uint32_t i, *b = binary; for (i = 0; i < 4; i++) printf("%08x ", b[i]); puts(""); #endif return ((uint32_t *) binary)[0] & PH_MASK_0; } static int get_hash_0(int index) { #ifdef WPAPSK_DEBUG int i; puts("get_hash"); uint32_t *b = (uint32_t *)mic[index].keymic; for (i = 0; i < 4; i++) printf("%08x ", b[i]); puts(""); #endif uint32_t *h = (uint32_t *) mic[index].keymic; return h[0] & PH_MASK_0; } static int get_hash_1(int index) { uint32_t *h = (uint32_t *) mic[index].keymic; return h[0] & PH_MASK_1; } static int get_hash_2(int index) { uint32_t *h = (uint32_t *) mic[index].keymic; return h[0] & PH_MASK_2; } static int get_hash_3(int index) { uint32_t *h = (uint32_t *) mic[index].keymic; return h[0] & PH_MASK_3; } static int get_hash_4(int index) { uint32_t *h = (uint32_t *) mic[index].keymic; return h[0] & PH_MASK_4; } static int get_hash_5(int index) { uint32_t *h = (uint32_t *) mic[index].keymic; return h[0] & PH_MASK_5; } static int get_hash_6(int index) { uint32_t *h = (uint32_t *) mic[index].keymic; return h[0] & PH_MASK_6; } static int cmp_all(void *binary, int count) { uint32_t i, b = ((uint32_t *) binary)[0]; for (i = 0; i < count; i++) { uint32_t *m = (uint32_t*) mic[i].keymic; if (b == m[0]) return 1; } return 0; } static int cmp_one(void *binary, int index) { uint8_t i; uint32_t *b = (uint32_t*) binary; uint32_t *m = (uint32_t*) mic[index].keymic; for (i = 0; i < BINARY_SIZE / 4; i++) if (b[i] != m[i]) return 0; return 1; } static int cmp_exact(char *source, int index) { return 1; } static int salt_compare(const void *x, const void *y) { int c = strncmp((const char*)x, (const char*)y, 36); if (c) return c; return memcmp((const char*)x, (const char*)y, SALT_SIZE); } #endif
ASTMatchers.h
//===- ASTMatchers.h - Structural query framework ---------------*- C++ -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // // This file implements matchers to be used together with the MatchFinder to // match AST nodes. // // Matchers are created by generator functions, which can be combined in // a functional in-language DSL to express queries over the C++ AST. // // For example, to match a class with a certain name, one would call: // cxxRecordDecl(hasName("MyClass")) // which returns a matcher that can be used to find all AST nodes that declare // a class named 'MyClass'. // // For more complicated match expressions we're often interested in accessing // multiple parts of the matched AST nodes once a match is found. In that case, // call `.bind("name")` on match expressions that match the nodes you want to // access. // // For example, when we're interested in child classes of a certain class, we // would write: // cxxRecordDecl(hasName("MyClass"), has(recordDecl().bind("child"))) // When the match is found via the MatchFinder, a user provided callback will // be called with a BoundNodes instance that contains a mapping from the // strings that we provided for the `.bind()` calls to the nodes that were // matched. // In the given example, each time our matcher finds a match we get a callback // where "child" is bound to the RecordDecl node of the matching child // class declaration. // // See ASTMatchersInternal.h for a more in-depth explanation of the // implementation details of the matcher framework. // // See ASTMatchFinder.h for how to use the generated matchers to run over // an AST. // //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_ASTMATCHERS_ASTMATCHERS_H #define LLVM_CLANG_ASTMATCHERS_ASTMATCHERS_H #include "clang/AST/ASTContext.h" #include "clang/AST/ASTTypeTraits.h" #include "clang/AST/Attr.h" #include "clang/AST/CXXInheritance.h" #include "clang/AST/Decl.h" #include "clang/AST/DeclCXX.h" #include "clang/AST/DeclFriend.h" #include "clang/AST/DeclObjC.h" #include "clang/AST/DeclTemplate.h" #include "clang/AST/Expr.h" #include "clang/AST/ExprCXX.h" #include "clang/AST/ExprObjC.h" #include "clang/AST/LambdaCapture.h" #include "clang/AST/NestedNameSpecifier.h" #include "clang/AST/OpenMPClause.h" #include "clang/AST/OperationKinds.h" #include "clang/AST/ParentMapContext.h" #include "clang/AST/Stmt.h" #include "clang/AST/StmtCXX.h" #include "clang/AST/StmtObjC.h" #include "clang/AST/StmtOpenMP.h" #include "clang/AST/TemplateBase.h" #include "clang/AST/TemplateName.h" #include "clang/AST/Type.h" #include "clang/AST/TypeLoc.h" #include "clang/ASTMatchers/ASTMatchersInternal.h" #include "clang/ASTMatchers/ASTMatchersMacros.h" #include "clang/Basic/AttrKinds.h" #include "clang/Basic/ExceptionSpecificationType.h" #include "clang/Basic/FileManager.h" #include "clang/Basic/IdentifierTable.h" #include "clang/Basic/LLVM.h" #include "clang/Basic/SourceManager.h" #include "clang/Basic/Specifiers.h" #include "clang/Basic/TypeTraits.h" #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/StringRef.h" #include "llvm/Support/Casting.h" #include "llvm/Support/Compiler.h" #include "llvm/Support/ErrorHandling.h" #include "llvm/Support/Regex.h" #include <cassert> #include <cstddef> #include <iterator> #include <limits> #include <string> #include <utility> #include <vector> namespace clang { namespace ast_matchers { /// Maps string IDs to AST nodes matched by parts of a matcher. /// /// The bound nodes are generated by calling \c bind("id") on the node matchers /// of the nodes we want to access later. /// /// The instances of BoundNodes are created by \c MatchFinder when the user's /// callbacks are executed every time a match is found. class BoundNodes { public: /// Returns the AST node bound to \c ID. /// /// Returns NULL if there was no node bound to \c ID or if there is a node but /// it cannot be converted to the specified type. template <typename T> const T *getNodeAs(StringRef ID) const { return MyBoundNodes.getNodeAs<T>(ID); } /// Type of mapping from binding identifiers to bound nodes. This type /// is an associative container with a key type of \c std::string and a value /// type of \c clang::DynTypedNode using IDToNodeMap = internal::BoundNodesMap::IDToNodeMap; /// Retrieve mapping from binding identifiers to bound nodes. const IDToNodeMap &getMap() const { return MyBoundNodes.getMap(); } private: friend class internal::BoundNodesTreeBuilder; /// Create BoundNodes from a pre-filled map of bindings. BoundNodes(internal::BoundNodesMap &MyBoundNodes) : MyBoundNodes(MyBoundNodes) {} internal::BoundNodesMap MyBoundNodes; }; /// Types of matchers for the top-level classes in the AST class /// hierarchy. /// @{ using DeclarationMatcher = internal::Matcher<Decl>; using StatementMatcher = internal::Matcher<Stmt>; using TypeMatcher = internal::Matcher<QualType>; using TypeLocMatcher = internal::Matcher<TypeLoc>; using NestedNameSpecifierMatcher = internal::Matcher<NestedNameSpecifier>; using NestedNameSpecifierLocMatcher = internal::Matcher<NestedNameSpecifierLoc>; using CXXBaseSpecifierMatcher = internal::Matcher<CXXBaseSpecifier>; using CXXCtorInitializerMatcher = internal::Matcher<CXXCtorInitializer>; using TemplateArgumentMatcher = internal::Matcher<TemplateArgument>; using TemplateArgumentLocMatcher = internal::Matcher<TemplateArgumentLoc>; /// @} /// Matches any node. /// /// Useful when another matcher requires a child matcher, but there's no /// additional constraint. This will often be used with an explicit conversion /// to an \c internal::Matcher<> type such as \c TypeMatcher. /// /// Example: \c DeclarationMatcher(anything()) matches all declarations, e.g., /// \code /// "int* p" and "void f()" in /// int* p; /// void f(); /// \endcode /// /// Usable as: Any Matcher inline internal::TrueMatcher anything() { return internal::TrueMatcher(); } /// Matches the top declaration context. /// /// Given /// \code /// int X; /// namespace NS { /// int Y; /// } // namespace NS /// \endcode /// decl(hasDeclContext(translationUnitDecl())) /// matches "int X", but not "int Y". extern const internal::VariadicDynCastAllOfMatcher<Decl, TranslationUnitDecl> translationUnitDecl; /// Matches typedef declarations. /// /// Given /// \code /// typedef int X; /// using Y = int; /// \endcode /// typedefDecl() /// matches "typedef int X", but not "using Y = int" extern const internal::VariadicDynCastAllOfMatcher<Decl, TypedefDecl> typedefDecl; /// Matches typedef name declarations. /// /// Given /// \code /// typedef int X; /// using Y = int; /// \endcode /// typedefNameDecl() /// matches "typedef int X" and "using Y = int" extern const internal::VariadicDynCastAllOfMatcher<Decl, TypedefNameDecl> typedefNameDecl; /// Matches type alias declarations. /// /// Given /// \code /// typedef int X; /// using Y = int; /// \endcode /// typeAliasDecl() /// matches "using Y = int", but not "typedef int X" extern const internal::VariadicDynCastAllOfMatcher<Decl, TypeAliasDecl> typeAliasDecl; /// Matches type alias template declarations. /// /// typeAliasTemplateDecl() matches /// \code /// template <typename T> /// using Y = X<T>; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, TypeAliasTemplateDecl> typeAliasTemplateDecl; /// Matches AST nodes that were expanded within the main-file. /// /// Example matches X but not Y /// (matcher = cxxRecordDecl(isExpansionInMainFile()) /// \code /// #include <Y.h> /// class X {}; /// \endcode /// Y.h: /// \code /// class Y {}; /// \endcode /// /// Usable as: Matcher<Decl>, Matcher<Stmt>, Matcher<TypeLoc> AST_POLYMORPHIC_MATCHER(isExpansionInMainFile, AST_POLYMORPHIC_SUPPORTED_TYPES(Decl, Stmt, TypeLoc)) { auto &SourceManager = Finder->getASTContext().getSourceManager(); return SourceManager.isInMainFile( SourceManager.getExpansionLoc(Node.getBeginLoc())); } /// Matches AST nodes that were expanded within system-header-files. /// /// Example matches Y but not X /// (matcher = cxxRecordDecl(isExpansionInSystemHeader()) /// \code /// #include <SystemHeader.h> /// class X {}; /// \endcode /// SystemHeader.h: /// \code /// class Y {}; /// \endcode /// /// Usable as: Matcher<Decl>, Matcher<Stmt>, Matcher<TypeLoc> AST_POLYMORPHIC_MATCHER(isExpansionInSystemHeader, AST_POLYMORPHIC_SUPPORTED_TYPES(Decl, Stmt, TypeLoc)) { auto &SourceManager = Finder->getASTContext().getSourceManager(); auto ExpansionLoc = SourceManager.getExpansionLoc(Node.getBeginLoc()); if (ExpansionLoc.isInvalid()) { return false; } return SourceManager.isInSystemHeader(ExpansionLoc); } /// Matches AST nodes that were expanded within files whose name is /// partially matching a given regex. /// /// Example matches Y but not X /// (matcher = cxxRecordDecl(isExpansionInFileMatching("AST.*")) /// \code /// #include "ASTMatcher.h" /// class X {}; /// \endcode /// ASTMatcher.h: /// \code /// class Y {}; /// \endcode /// /// Usable as: Matcher<Decl>, Matcher<Stmt>, Matcher<TypeLoc> AST_POLYMORPHIC_MATCHER_REGEX(isExpansionInFileMatching, AST_POLYMORPHIC_SUPPORTED_TYPES(Decl, Stmt, TypeLoc), RegExp) { auto &SourceManager = Finder->getASTContext().getSourceManager(); auto ExpansionLoc = SourceManager.getExpansionLoc(Node.getBeginLoc()); if (ExpansionLoc.isInvalid()) { return false; } auto FileEntry = SourceManager.getFileEntryForID(SourceManager.getFileID(ExpansionLoc)); if (!FileEntry) { return false; } auto Filename = FileEntry->getName(); return RegExp->match(Filename); } /// Matches statements that are (transitively) expanded from the named macro. /// Does not match if only part of the statement is expanded from that macro or /// if different parts of the the statement are expanded from different /// appearances of the macro. AST_POLYMORPHIC_MATCHER_P(isExpandedFromMacro, AST_POLYMORPHIC_SUPPORTED_TYPES(Decl, Stmt, TypeLoc), std::string, MacroName) { // Verifies that the statement' beginning and ending are both expanded from // the same instance of the given macro. auto& Context = Finder->getASTContext(); llvm::Optional<SourceLocation> B = internal::getExpansionLocOfMacro(MacroName, Node.getBeginLoc(), Context); if (!B) return false; llvm::Optional<SourceLocation> E = internal::getExpansionLocOfMacro(MacroName, Node.getEndLoc(), Context); if (!E) return false; return *B == *E; } /// Matches declarations. /// /// Examples matches \c X, \c C, and the friend declaration inside \c C; /// \code /// void X(); /// class C { /// friend X; /// }; /// \endcode extern const internal::VariadicAllOfMatcher<Decl> decl; /// Matches decomposition-declarations. /// /// Examples matches the declaration node with \c foo and \c bar, but not /// \c number. /// (matcher = declStmt(has(decompositionDecl()))) /// /// \code /// int number = 42; /// auto [foo, bar] = std::make_pair{42, 42}; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, DecompositionDecl> decompositionDecl; /// Matches binding declarations /// Example matches \c foo and \c bar /// (matcher = bindingDecl() /// /// \code /// auto [foo, bar] = std::make_pair{42, 42}; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, BindingDecl> bindingDecl; /// Matches a declaration of a linkage specification. /// /// Given /// \code /// extern "C" {} /// \endcode /// linkageSpecDecl() /// matches "extern "C" {}" extern const internal::VariadicDynCastAllOfMatcher<Decl, LinkageSpecDecl> linkageSpecDecl; /// Matches a declaration of anything that could have a name. /// /// Example matches \c X, \c S, the anonymous union type, \c i, and \c U; /// \code /// typedef int X; /// struct S { /// union { /// int i; /// } U; /// }; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, NamedDecl> namedDecl; /// Matches a declaration of label. /// /// Given /// \code /// goto FOO; /// FOO: bar(); /// \endcode /// labelDecl() /// matches 'FOO:' extern const internal::VariadicDynCastAllOfMatcher<Decl, LabelDecl> labelDecl; /// Matches a declaration of a namespace. /// /// Given /// \code /// namespace {} /// namespace test {} /// \endcode /// namespaceDecl() /// matches "namespace {}" and "namespace test {}" extern const internal::VariadicDynCastAllOfMatcher<Decl, NamespaceDecl> namespaceDecl; /// Matches a declaration of a namespace alias. /// /// Given /// \code /// namespace test {} /// namespace alias = ::test; /// \endcode /// namespaceAliasDecl() /// matches "namespace alias" but not "namespace test" extern const internal::VariadicDynCastAllOfMatcher<Decl, NamespaceAliasDecl> namespaceAliasDecl; /// Matches class, struct, and union declarations. /// /// Example matches \c X, \c Z, \c U, and \c S /// \code /// class X; /// template<class T> class Z {}; /// struct S {}; /// union U {}; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, RecordDecl> recordDecl; /// Matches C++ class declarations. /// /// Example matches \c X, \c Z /// \code /// class X; /// template<class T> class Z {}; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, CXXRecordDecl> cxxRecordDecl; /// Matches C++ class template declarations. /// /// Example matches \c Z /// \code /// template<class T> class Z {}; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, ClassTemplateDecl> classTemplateDecl; /// Matches C++ class template specializations. /// /// Given /// \code /// template<typename T> class A {}; /// template<> class A<double> {}; /// A<int> a; /// \endcode /// classTemplateSpecializationDecl() /// matches the specializations \c A<int> and \c A<double> extern const internal::VariadicDynCastAllOfMatcher< Decl, ClassTemplateSpecializationDecl> classTemplateSpecializationDecl; /// Matches C++ class template partial specializations. /// /// Given /// \code /// template<class T1, class T2, int I> /// class A {}; /// /// template<class T, int I> /// class A<T, T*, I> {}; /// /// template<> /// class A<int, int, 1> {}; /// \endcode /// classTemplatePartialSpecializationDecl() /// matches the specialization \c A<T,T*,I> but not \c A<int,int,1> extern const internal::VariadicDynCastAllOfMatcher< Decl, ClassTemplatePartialSpecializationDecl> classTemplatePartialSpecializationDecl; /// Matches declarator declarations (field, variable, function /// and non-type template parameter declarations). /// /// Given /// \code /// class X { int y; }; /// \endcode /// declaratorDecl() /// matches \c int y. extern const internal::VariadicDynCastAllOfMatcher<Decl, DeclaratorDecl> declaratorDecl; /// Matches parameter variable declarations. /// /// Given /// \code /// void f(int x); /// \endcode /// parmVarDecl() /// matches \c int x. extern const internal::VariadicDynCastAllOfMatcher<Decl, ParmVarDecl> parmVarDecl; /// Matches C++ access specifier declarations. /// /// Given /// \code /// class C { /// public: /// int a; /// }; /// \endcode /// accessSpecDecl() /// matches 'public:' extern const internal::VariadicDynCastAllOfMatcher<Decl, AccessSpecDecl> accessSpecDecl; /// Matches class bases. /// /// Examples matches \c public virtual B. /// \code /// class B {}; /// class C : public virtual B {}; /// \endcode extern const internal::VariadicAllOfMatcher<CXXBaseSpecifier> cxxBaseSpecifier; /// Matches constructor initializers. /// /// Examples matches \c i(42). /// \code /// class C { /// C() : i(42) {} /// int i; /// }; /// \endcode extern const internal::VariadicAllOfMatcher<CXXCtorInitializer> cxxCtorInitializer; /// Matches template arguments. /// /// Given /// \code /// template <typename T> struct C {}; /// C<int> c; /// \endcode /// templateArgument() /// matches 'int' in C<int>. extern const internal::VariadicAllOfMatcher<TemplateArgument> templateArgument; /// Matches template arguments (with location info). /// /// Given /// \code /// template <typename T> struct C {}; /// C<int> c; /// \endcode /// templateArgumentLoc() /// matches 'int' in C<int>. extern const internal::VariadicAllOfMatcher<TemplateArgumentLoc> templateArgumentLoc; /// Matches template name. /// /// Given /// \code /// template <typename T> class X { }; /// X<int> xi; /// \endcode /// templateName() /// matches 'X' in X<int>. extern const internal::VariadicAllOfMatcher<TemplateName> templateName; /// Matches non-type template parameter declarations. /// /// Given /// \code /// template <typename T, int N> struct C {}; /// \endcode /// nonTypeTemplateParmDecl() /// matches 'N', but not 'T'. extern const internal::VariadicDynCastAllOfMatcher<Decl, NonTypeTemplateParmDecl> nonTypeTemplateParmDecl; /// Matches template type parameter declarations. /// /// Given /// \code /// template <typename T, int N> struct C {}; /// \endcode /// templateTypeParmDecl() /// matches 'T', but not 'N'. extern const internal::VariadicDynCastAllOfMatcher<Decl, TemplateTypeParmDecl> templateTypeParmDecl; /// Matches template template parameter declarations. /// /// Given /// \code /// template <template <typename> class Z, int N> struct C {}; /// \endcode /// templateTypeParmDecl() /// matches 'Z', but not 'N'. extern const internal::VariadicDynCastAllOfMatcher<Decl, TemplateTemplateParmDecl> templateTemplateParmDecl; /// Matches public C++ declarations and C++ base specifers that specify public /// inheritance. /// /// Examples: /// \code /// class C { /// public: int a; // fieldDecl(isPublic()) matches 'a' /// protected: int b; /// private: int c; /// }; /// \endcode /// /// \code /// class Base {}; /// class Derived1 : public Base {}; // matches 'Base' /// struct Derived2 : Base {}; // matches 'Base' /// \endcode AST_POLYMORPHIC_MATCHER(isPublic, AST_POLYMORPHIC_SUPPORTED_TYPES(Decl, CXXBaseSpecifier)) { return getAccessSpecifier(Node) == AS_public; } /// Matches protected C++ declarations and C++ base specifers that specify /// protected inheritance. /// /// Examples: /// \code /// class C { /// public: int a; /// protected: int b; // fieldDecl(isProtected()) matches 'b' /// private: int c; /// }; /// \endcode /// /// \code /// class Base {}; /// class Derived : protected Base {}; // matches 'Base' /// \endcode AST_POLYMORPHIC_MATCHER(isProtected, AST_POLYMORPHIC_SUPPORTED_TYPES(Decl, CXXBaseSpecifier)) { return getAccessSpecifier(Node) == AS_protected; } /// Matches private C++ declarations and C++ base specifers that specify private /// inheritance. /// /// Examples: /// \code /// class C { /// public: int a; /// protected: int b; /// private: int c; // fieldDecl(isPrivate()) matches 'c' /// }; /// \endcode /// /// \code /// struct Base {}; /// struct Derived1 : private Base {}; // matches 'Base' /// class Derived2 : Base {}; // matches 'Base' /// \endcode AST_POLYMORPHIC_MATCHER(isPrivate, AST_POLYMORPHIC_SUPPORTED_TYPES(Decl, CXXBaseSpecifier)) { return getAccessSpecifier(Node) == AS_private; } /// Matches non-static data members that are bit-fields. /// /// Given /// \code /// class C { /// int a : 2; /// int b; /// }; /// \endcode /// fieldDecl(isBitField()) /// matches 'int a;' but not 'int b;'. AST_MATCHER(FieldDecl, isBitField) { return Node.isBitField(); } /// Matches non-static data members that are bit-fields of the specified /// bit width. /// /// Given /// \code /// class C { /// int a : 2; /// int b : 4; /// int c : 2; /// }; /// \endcode /// fieldDecl(hasBitWidth(2)) /// matches 'int a;' and 'int c;' but not 'int b;'. AST_MATCHER_P(FieldDecl, hasBitWidth, unsigned, Width) { return Node.isBitField() && Node.getBitWidthValue(Finder->getASTContext()) == Width; } /// Matches non-static data members that have an in-class initializer. /// /// Given /// \code /// class C { /// int a = 2; /// int b = 3; /// int c; /// }; /// \endcode /// fieldDecl(hasInClassInitializer(integerLiteral(equals(2)))) /// matches 'int a;' but not 'int b;'. /// fieldDecl(hasInClassInitializer(anything())) /// matches 'int a;' and 'int b;' but not 'int c;'. AST_MATCHER_P(FieldDecl, hasInClassInitializer, internal::Matcher<Expr>, InnerMatcher) { const Expr *Initializer = Node.getInClassInitializer(); return (Initializer != nullptr && InnerMatcher.matches(*Initializer, Finder, Builder)); } /// Determines whether the function is "main", which is the entry point /// into an executable program. AST_MATCHER(FunctionDecl, isMain) { return Node.isMain(); } /// Matches the specialized template of a specialization declaration. /// /// Given /// \code /// template<typename T> class A {}; #1 /// template<> class A<int> {}; #2 /// \endcode /// classTemplateSpecializationDecl(hasSpecializedTemplate(classTemplateDecl())) /// matches '#2' with classTemplateDecl() matching the class template /// declaration of 'A' at #1. AST_MATCHER_P(ClassTemplateSpecializationDecl, hasSpecializedTemplate, internal::Matcher<ClassTemplateDecl>, InnerMatcher) { const ClassTemplateDecl* Decl = Node.getSpecializedTemplate(); return (Decl != nullptr && InnerMatcher.matches(*Decl, Finder, Builder)); } /// Matches a declaration that has been implicitly added /// by the compiler (eg. implicit default/copy constructors). AST_MATCHER(Decl, isImplicit) { return Node.isImplicit(); } /// Matches classTemplateSpecializations, templateSpecializationType and /// functionDecl that have at least one TemplateArgument matching the given /// InnerMatcher. /// /// Given /// \code /// template<typename T> class A {}; /// template<> class A<double> {}; /// A<int> a; /// /// template<typename T> f() {}; /// void func() { f<int>(); }; /// \endcode /// /// \endcode /// classTemplateSpecializationDecl(hasAnyTemplateArgument( /// refersToType(asString("int")))) /// matches the specialization \c A<int> /// /// functionDecl(hasAnyTemplateArgument(refersToType(asString("int")))) /// matches the specialization \c f<int> AST_POLYMORPHIC_MATCHER_P( hasAnyTemplateArgument, AST_POLYMORPHIC_SUPPORTED_TYPES(ClassTemplateSpecializationDecl, TemplateSpecializationType, FunctionDecl), internal::Matcher<TemplateArgument>, InnerMatcher) { ArrayRef<TemplateArgument> List = internal::getTemplateSpecializationArgs(Node); return matchesFirstInRange(InnerMatcher, List.begin(), List.end(), Finder, Builder) != List.end(); } /// Causes all nested matchers to be matched with the specified traversal kind. /// /// Given /// \code /// void foo() /// { /// int i = 3.0; /// } /// \endcode /// The matcher /// \code /// traverse(TK_IgnoreUnlessSpelledInSource, /// varDecl(hasInitializer(floatLiteral().bind("init"))) /// ) /// \endcode /// matches the variable declaration with "init" bound to the "3.0". template <typename T> internal::Matcher<T> traverse(TraversalKind TK, const internal::Matcher<T> &InnerMatcher) { return internal::DynTypedMatcher::constructRestrictedWrapper( new internal::TraversalMatcher<T>(TK, InnerMatcher), InnerMatcher.getID().first) .template unconditionalConvertTo<T>(); } template <typename T> internal::BindableMatcher<T> traverse(TraversalKind TK, const internal::BindableMatcher<T> &InnerMatcher) { return internal::BindableMatcher<T>( internal::DynTypedMatcher::constructRestrictedWrapper( new internal::TraversalMatcher<T>(TK, InnerMatcher), InnerMatcher.getID().first) .template unconditionalConvertTo<T>()); } template <typename... T> internal::TraversalWrapper<internal::VariadicOperatorMatcher<T...>> traverse(TraversalKind TK, const internal::VariadicOperatorMatcher<T...> &InnerMatcher) { return internal::TraversalWrapper<internal::VariadicOperatorMatcher<T...>>( TK, InnerMatcher); } template <template <typename ToArg, typename FromArg> class ArgumentAdapterT, typename T, typename ToTypes> internal::TraversalWrapper< internal::ArgumentAdaptingMatcherFuncAdaptor<ArgumentAdapterT, T, ToTypes>> traverse(TraversalKind TK, const internal::ArgumentAdaptingMatcherFuncAdaptor< ArgumentAdapterT, T, ToTypes> &InnerMatcher) { return internal::TraversalWrapper< internal::ArgumentAdaptingMatcherFuncAdaptor<ArgumentAdapterT, T, ToTypes>>(TK, InnerMatcher); } template <template <typename T, typename... P> class MatcherT, typename... P, typename ReturnTypesF> internal::TraversalWrapper< internal::PolymorphicMatcher<MatcherT, ReturnTypesF, P...>> traverse(TraversalKind TK, const internal::PolymorphicMatcher<MatcherT, ReturnTypesF, P...> &InnerMatcher) { return internal::TraversalWrapper< internal::PolymorphicMatcher<MatcherT, ReturnTypesF, P...>>(TK, InnerMatcher); } template <typename... T> internal::Matcher<typename internal::GetClade<T...>::Type> traverse(TraversalKind TK, const internal::MapAnyOfHelper<T...> &InnerMatcher) { return traverse(TK, InnerMatcher.with()); } /// Matches expressions that match InnerMatcher after any implicit AST /// nodes are stripped off. /// /// Parentheses and explicit casts are not discarded. /// Given /// \code /// class C {}; /// C a = C(); /// C b; /// C c = b; /// \endcode /// The matchers /// \code /// varDecl(hasInitializer(ignoringImplicit(cxxConstructExpr()))) /// \endcode /// would match the declarations for a, b, and c. /// While /// \code /// varDecl(hasInitializer(cxxConstructExpr())) /// \endcode /// only match the declarations for b and c. AST_MATCHER_P(Expr, ignoringImplicit, internal::Matcher<Expr>, InnerMatcher) { return InnerMatcher.matches(*Node.IgnoreImplicit(), Finder, Builder); } /// Matches expressions that match InnerMatcher after any implicit casts /// are stripped off. /// /// Parentheses and explicit casts are not discarded. /// Given /// \code /// int arr[5]; /// int a = 0; /// char b = 0; /// const int c = a; /// int *d = arr; /// long e = (long) 0l; /// \endcode /// The matchers /// \code /// varDecl(hasInitializer(ignoringImpCasts(integerLiteral()))) /// varDecl(hasInitializer(ignoringImpCasts(declRefExpr()))) /// \endcode /// would match the declarations for a, b, c, and d, but not e. /// While /// \code /// varDecl(hasInitializer(integerLiteral())) /// varDecl(hasInitializer(declRefExpr())) /// \endcode /// only match the declarations for b, c, and d. AST_MATCHER_P(Expr, ignoringImpCasts, internal::Matcher<Expr>, InnerMatcher) { return InnerMatcher.matches(*Node.IgnoreImpCasts(), Finder, Builder); } /// Matches expressions that match InnerMatcher after parentheses and /// casts are stripped off. /// /// Implicit and non-C Style casts are also discarded. /// Given /// \code /// int a = 0; /// char b = (0); /// void* c = reinterpret_cast<char*>(0); /// char d = char(0); /// \endcode /// The matcher /// varDecl(hasInitializer(ignoringParenCasts(integerLiteral()))) /// would match the declarations for a, b, c, and d. /// while /// varDecl(hasInitializer(integerLiteral())) /// only match the declaration for a. AST_MATCHER_P(Expr, ignoringParenCasts, internal::Matcher<Expr>, InnerMatcher) { return InnerMatcher.matches(*Node.IgnoreParenCasts(), Finder, Builder); } /// Matches expressions that match InnerMatcher after implicit casts and /// parentheses are stripped off. /// /// Explicit casts are not discarded. /// Given /// \code /// int arr[5]; /// int a = 0; /// char b = (0); /// const int c = a; /// int *d = (arr); /// long e = ((long) 0l); /// \endcode /// The matchers /// varDecl(hasInitializer(ignoringParenImpCasts(integerLiteral()))) /// varDecl(hasInitializer(ignoringParenImpCasts(declRefExpr()))) /// would match the declarations for a, b, c, and d, but not e. /// while /// varDecl(hasInitializer(integerLiteral())) /// varDecl(hasInitializer(declRefExpr())) /// would only match the declaration for a. AST_MATCHER_P(Expr, ignoringParenImpCasts, internal::Matcher<Expr>, InnerMatcher) { return InnerMatcher.matches(*Node.IgnoreParenImpCasts(), Finder, Builder); } /// Matches types that match InnerMatcher after any parens are stripped. /// /// Given /// \code /// void (*fp)(void); /// \endcode /// The matcher /// \code /// varDecl(hasType(pointerType(pointee(ignoringParens(functionType()))))) /// \endcode /// would match the declaration for fp. AST_MATCHER_P_OVERLOAD(QualType, ignoringParens, internal::Matcher<QualType>, InnerMatcher, 0) { return InnerMatcher.matches(Node.IgnoreParens(), Finder, Builder); } /// Overload \c ignoringParens for \c Expr. /// /// Given /// \code /// const char* str = ("my-string"); /// \endcode /// The matcher /// \code /// implicitCastExpr(hasSourceExpression(ignoringParens(stringLiteral()))) /// \endcode /// would match the implicit cast resulting from the assignment. AST_MATCHER_P_OVERLOAD(Expr, ignoringParens, internal::Matcher<Expr>, InnerMatcher, 1) { const Expr *E = Node.IgnoreParens(); return InnerMatcher.matches(*E, Finder, Builder); } /// Matches expressions that are instantiation-dependent even if it is /// neither type- nor value-dependent. /// /// In the following example, the expression sizeof(sizeof(T() + T())) /// is instantiation-dependent (since it involves a template parameter T), /// but is neither type- nor value-dependent, since the type of the inner /// sizeof is known (std::size_t) and therefore the size of the outer /// sizeof is known. /// \code /// template<typename T> /// void f(T x, T y) { sizeof(sizeof(T() + T()); } /// \endcode /// expr(isInstantiationDependent()) matches sizeof(sizeof(T() + T()) AST_MATCHER(Expr, isInstantiationDependent) { return Node.isInstantiationDependent(); } /// Matches expressions that are type-dependent because the template type /// is not yet instantiated. /// /// For example, the expressions "x" and "x + y" are type-dependent in /// the following code, but "y" is not type-dependent: /// \code /// template<typename T> /// void add(T x, int y) { /// x + y; /// } /// \endcode /// expr(isTypeDependent()) matches x + y AST_MATCHER(Expr, isTypeDependent) { return Node.isTypeDependent(); } /// Matches expression that are value-dependent because they contain a /// non-type template parameter. /// /// For example, the array bound of "Chars" in the following example is /// value-dependent. /// \code /// template<int Size> int f() { return Size; } /// \endcode /// expr(isValueDependent()) matches return Size AST_MATCHER(Expr, isValueDependent) { return Node.isValueDependent(); } /// Matches classTemplateSpecializations, templateSpecializationType and /// functionDecl where the n'th TemplateArgument matches the given InnerMatcher. /// /// Given /// \code /// template<typename T, typename U> class A {}; /// A<bool, int> b; /// A<int, bool> c; /// /// template<typename T> void f() {} /// void func() { f<int>(); }; /// \endcode /// classTemplateSpecializationDecl(hasTemplateArgument( /// 1, refersToType(asString("int")))) /// matches the specialization \c A<bool, int> /// /// functionDecl(hasTemplateArgument(0, refersToType(asString("int")))) /// matches the specialization \c f<int> AST_POLYMORPHIC_MATCHER_P2( hasTemplateArgument, AST_POLYMORPHIC_SUPPORTED_TYPES(ClassTemplateSpecializationDecl, TemplateSpecializationType, FunctionDecl), unsigned, N, internal::Matcher<TemplateArgument>, InnerMatcher) { ArrayRef<TemplateArgument> List = internal::getTemplateSpecializationArgs(Node); if (List.size() <= N) return false; return InnerMatcher.matches(List[N], Finder, Builder); } /// Matches if the number of template arguments equals \p N. /// /// Given /// \code /// template<typename T> struct C {}; /// C<int> c; /// \endcode /// classTemplateSpecializationDecl(templateArgumentCountIs(1)) /// matches C<int>. AST_POLYMORPHIC_MATCHER_P( templateArgumentCountIs, AST_POLYMORPHIC_SUPPORTED_TYPES(ClassTemplateSpecializationDecl, TemplateSpecializationType), unsigned, N) { return internal::getTemplateSpecializationArgs(Node).size() == N; } /// Matches a TemplateArgument that refers to a certain type. /// /// Given /// \code /// struct X {}; /// template<typename T> struct A {}; /// A<X> a; /// \endcode /// classTemplateSpecializationDecl(hasAnyTemplateArgument( /// refersToType(class(hasName("X"))))) /// matches the specialization \c A<X> AST_MATCHER_P(TemplateArgument, refersToType, internal::Matcher<QualType>, InnerMatcher) { if (Node.getKind() != TemplateArgument::Type) return false; return InnerMatcher.matches(Node.getAsType(), Finder, Builder); } /// Matches a TemplateArgument that refers to a certain template. /// /// Given /// \code /// template<template <typename> class S> class X {}; /// template<typename T> class Y {}; /// X<Y> xi; /// \endcode /// classTemplateSpecializationDecl(hasAnyTemplateArgument( /// refersToTemplate(templateName()))) /// matches the specialization \c X<Y> AST_MATCHER_P(TemplateArgument, refersToTemplate, internal::Matcher<TemplateName>, InnerMatcher) { if (Node.getKind() != TemplateArgument::Template) return false; return InnerMatcher.matches(Node.getAsTemplate(), Finder, Builder); } /// Matches a canonical TemplateArgument that refers to a certain /// declaration. /// /// Given /// \code /// struct B { int next; }; /// template<int(B::*next_ptr)> struct A {}; /// A<&B::next> a; /// \endcode /// classTemplateSpecializationDecl(hasAnyTemplateArgument( /// refersToDeclaration(fieldDecl(hasName("next"))))) /// matches the specialization \c A<&B::next> with \c fieldDecl(...) matching /// \c B::next AST_MATCHER_P(TemplateArgument, refersToDeclaration, internal::Matcher<Decl>, InnerMatcher) { if (Node.getKind() == TemplateArgument::Declaration) return InnerMatcher.matches(*Node.getAsDecl(), Finder, Builder); return false; } /// Matches a sugar TemplateArgument that refers to a certain expression. /// /// Given /// \code /// struct B { int next; }; /// template<int(B::*next_ptr)> struct A {}; /// A<&B::next> a; /// \endcode /// templateSpecializationType(hasAnyTemplateArgument( /// isExpr(hasDescendant(declRefExpr(to(fieldDecl(hasName("next")))))))) /// matches the specialization \c A<&B::next> with \c fieldDecl(...) matching /// \c B::next AST_MATCHER_P(TemplateArgument, isExpr, internal::Matcher<Expr>, InnerMatcher) { if (Node.getKind() == TemplateArgument::Expression) return InnerMatcher.matches(*Node.getAsExpr(), Finder, Builder); return false; } /// Matches a TemplateArgument that is an integral value. /// /// Given /// \code /// template<int T> struct C {}; /// C<42> c; /// \endcode /// classTemplateSpecializationDecl( /// hasAnyTemplateArgument(isIntegral())) /// matches the implicit instantiation of C in C<42> /// with isIntegral() matching 42. AST_MATCHER(TemplateArgument, isIntegral) { return Node.getKind() == TemplateArgument::Integral; } /// Matches a TemplateArgument that refers to an integral type. /// /// Given /// \code /// template<int T> struct C {}; /// C<42> c; /// \endcode /// classTemplateSpecializationDecl( /// hasAnyTemplateArgument(refersToIntegralType(asString("int")))) /// matches the implicit instantiation of C in C<42>. AST_MATCHER_P(TemplateArgument, refersToIntegralType, internal::Matcher<QualType>, InnerMatcher) { if (Node.getKind() != TemplateArgument::Integral) return false; return InnerMatcher.matches(Node.getIntegralType(), Finder, Builder); } /// Matches a TemplateArgument of integral type with a given value. /// /// Note that 'Value' is a string as the template argument's value is /// an arbitrary precision integer. 'Value' must be euqal to the canonical /// representation of that integral value in base 10. /// /// Given /// \code /// template<int T> struct C {}; /// C<42> c; /// \endcode /// classTemplateSpecializationDecl( /// hasAnyTemplateArgument(equalsIntegralValue("42"))) /// matches the implicit instantiation of C in C<42>. AST_MATCHER_P(TemplateArgument, equalsIntegralValue, std::string, Value) { if (Node.getKind() != TemplateArgument::Integral) return false; return toString(Node.getAsIntegral(), 10) == Value; } /// Matches an Objective-C autorelease pool statement. /// /// Given /// \code /// @autoreleasepool { /// int x = 0; /// } /// \endcode /// autoreleasePoolStmt(stmt()) matches the declaration of "x" /// inside the autorelease pool. extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCAutoreleasePoolStmt> autoreleasePoolStmt; /// Matches any value declaration. /// /// Example matches A, B, C and F /// \code /// enum X { A, B, C }; /// void F(); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, ValueDecl> valueDecl; /// Matches C++ constructor declarations. /// /// Example matches Foo::Foo() and Foo::Foo(int) /// \code /// class Foo { /// public: /// Foo(); /// Foo(int); /// int DoSomething(); /// }; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, CXXConstructorDecl> cxxConstructorDecl; /// Matches explicit C++ destructor declarations. /// /// Example matches Foo::~Foo() /// \code /// class Foo { /// public: /// virtual ~Foo(); /// }; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, CXXDestructorDecl> cxxDestructorDecl; /// Matches enum declarations. /// /// Example matches X /// \code /// enum X { /// A, B, C /// }; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, EnumDecl> enumDecl; /// Matches enum constants. /// /// Example matches A, B, C /// \code /// enum X { /// A, B, C /// }; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, EnumConstantDecl> enumConstantDecl; /// Matches tag declarations. /// /// Example matches X, Z, U, S, E /// \code /// class X; /// template<class T> class Z {}; /// struct S {}; /// union U {}; /// enum E { /// A, B, C /// }; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, TagDecl> tagDecl; /// Matches method declarations. /// /// Example matches y /// \code /// class X { void y(); }; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, CXXMethodDecl> cxxMethodDecl; /// Matches conversion operator declarations. /// /// Example matches the operator. /// \code /// class X { operator int() const; }; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, CXXConversionDecl> cxxConversionDecl; /// Matches user-defined and implicitly generated deduction guide. /// /// Example matches the deduction guide. /// \code /// template<typename T> /// class X { X(int) }; /// X(int) -> X<int>; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, CXXDeductionGuideDecl> cxxDeductionGuideDecl; /// Matches variable declarations. /// /// Note: this does not match declarations of member variables, which are /// "field" declarations in Clang parlance. /// /// Example matches a /// \code /// int a; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, VarDecl> varDecl; /// Matches field declarations. /// /// Given /// \code /// class X { int m; }; /// \endcode /// fieldDecl() /// matches 'm'. extern const internal::VariadicDynCastAllOfMatcher<Decl, FieldDecl> fieldDecl; /// Matches indirect field declarations. /// /// Given /// \code /// struct X { struct { int a; }; }; /// \endcode /// indirectFieldDecl() /// matches 'a'. extern const internal::VariadicDynCastAllOfMatcher<Decl, IndirectFieldDecl> indirectFieldDecl; /// Matches function declarations. /// /// Example matches f /// \code /// void f(); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, FunctionDecl> functionDecl; /// Matches C++ function template declarations. /// /// Example matches f /// \code /// template<class T> void f(T t) {} /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, FunctionTemplateDecl> functionTemplateDecl; /// Matches friend declarations. /// /// Given /// \code /// class X { friend void foo(); }; /// \endcode /// friendDecl() /// matches 'friend void foo()'. extern const internal::VariadicDynCastAllOfMatcher<Decl, FriendDecl> friendDecl; /// Matches statements. /// /// Given /// \code /// { ++a; } /// \endcode /// stmt() /// matches both the compound statement '{ ++a; }' and '++a'. extern const internal::VariadicAllOfMatcher<Stmt> stmt; /// Matches declaration statements. /// /// Given /// \code /// int a; /// \endcode /// declStmt() /// matches 'int a'. extern const internal::VariadicDynCastAllOfMatcher<Stmt, DeclStmt> declStmt; /// Matches member expressions. /// /// Given /// \code /// class Y { /// void x() { this->x(); x(); Y y; y.x(); a; this->b; Y::b; } /// int a; static int b; /// }; /// \endcode /// memberExpr() /// matches this->x, x, y.x, a, this->b extern const internal::VariadicDynCastAllOfMatcher<Stmt, MemberExpr> memberExpr; /// Matches unresolved member expressions. /// /// Given /// \code /// struct X { /// template <class T> void f(); /// void g(); /// }; /// template <class T> void h() { X x; x.f<T>(); x.g(); } /// \endcode /// unresolvedMemberExpr() /// matches x.f<T> extern const internal::VariadicDynCastAllOfMatcher<Stmt, UnresolvedMemberExpr> unresolvedMemberExpr; /// Matches member expressions where the actual member referenced could not be /// resolved because the base expression or the member name was dependent. /// /// Given /// \code /// template <class T> void f() { T t; t.g(); } /// \endcode /// cxxDependentScopeMemberExpr() /// matches t.g extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXDependentScopeMemberExpr> cxxDependentScopeMemberExpr; /// Matches call expressions. /// /// Example matches x.y() and y() /// \code /// X x; /// x.y(); /// y(); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CallExpr> callExpr; /// Matches call expressions which were resolved using ADL. /// /// Example matches y(x) but not y(42) or NS::y(x). /// \code /// namespace NS { /// struct X {}; /// void y(X); /// } /// /// void y(...); /// /// void test() { /// NS::X x; /// y(x); // Matches /// NS::y(x); // Doesn't match /// y(42); // Doesn't match /// using NS::y; /// y(x); // Found by both unqualified lookup and ADL, doesn't match // } /// \endcode AST_MATCHER(CallExpr, usesADL) { return Node.usesADL(); } /// Matches lambda expressions. /// /// Example matches [&](){return 5;} /// \code /// [&](){return 5;} /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, LambdaExpr> lambdaExpr; /// Matches member call expressions. /// /// Example matches x.y() /// \code /// X x; /// x.y(); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXMemberCallExpr> cxxMemberCallExpr; /// Matches ObjectiveC Message invocation expressions. /// /// The innermost message send invokes the "alloc" class method on the /// NSString class, while the outermost message send invokes the /// "initWithString" instance method on the object returned from /// NSString's "alloc". This matcher should match both message sends. /// \code /// [[NSString alloc] initWithString:@"Hello"] /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCMessageExpr> objcMessageExpr; /// Matches Objective-C interface declarations. /// /// Example matches Foo /// \code /// @interface Foo /// @end /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCInterfaceDecl> objcInterfaceDecl; /// Matches Objective-C implementation declarations. /// /// Example matches Foo /// \code /// @implementation Foo /// @end /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCImplementationDecl> objcImplementationDecl; /// Matches Objective-C protocol declarations. /// /// Example matches FooDelegate /// \code /// @protocol FooDelegate /// @end /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCProtocolDecl> objcProtocolDecl; /// Matches Objective-C category declarations. /// /// Example matches Foo (Additions) /// \code /// @interface Foo (Additions) /// @end /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCCategoryDecl> objcCategoryDecl; /// Matches Objective-C category definitions. /// /// Example matches Foo (Additions) /// \code /// @implementation Foo (Additions) /// @end /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCCategoryImplDecl> objcCategoryImplDecl; /// Matches Objective-C method declarations. /// /// Example matches both declaration and definition of -[Foo method] /// \code /// @interface Foo /// - (void)method; /// @end /// /// @implementation Foo /// - (void)method {} /// @end /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCMethodDecl> objcMethodDecl; /// Matches block declarations. /// /// Example matches the declaration of the nameless block printing an input /// integer. /// /// \code /// myFunc(^(int p) { /// printf("%d", p); /// }) /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, BlockDecl> blockDecl; /// Matches Objective-C instance variable declarations. /// /// Example matches _enabled /// \code /// @implementation Foo { /// BOOL _enabled; /// } /// @end /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCIvarDecl> objcIvarDecl; /// Matches Objective-C property declarations. /// /// Example matches enabled /// \code /// @interface Foo /// @property BOOL enabled; /// @end /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCPropertyDecl> objcPropertyDecl; /// Matches Objective-C \@throw statements. /// /// Example matches \@throw /// \code /// @throw obj; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCAtThrowStmt> objcThrowStmt; /// Matches Objective-C @try statements. /// /// Example matches @try /// \code /// @try {} /// @catch (...) {} /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCAtTryStmt> objcTryStmt; /// Matches Objective-C @catch statements. /// /// Example matches @catch /// \code /// @try {} /// @catch (...) {} /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCAtCatchStmt> objcCatchStmt; /// Matches Objective-C @finally statements. /// /// Example matches @finally /// \code /// @try {} /// @finally {} /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCAtFinallyStmt> objcFinallyStmt; /// Matches expressions that introduce cleanups to be run at the end /// of the sub-expression's evaluation. /// /// Example matches std::string() /// \code /// const std::string str = std::string(); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, ExprWithCleanups> exprWithCleanups; /// Matches init list expressions. /// /// Given /// \code /// int a[] = { 1, 2 }; /// struct B { int x, y; }; /// B b = { 5, 6 }; /// \endcode /// initListExpr() /// matches "{ 1, 2 }" and "{ 5, 6 }" extern const internal::VariadicDynCastAllOfMatcher<Stmt, InitListExpr> initListExpr; /// Matches the syntactic form of init list expressions /// (if expression have it). AST_MATCHER_P(InitListExpr, hasSyntacticForm, internal::Matcher<Expr>, InnerMatcher) { const Expr *SyntForm = Node.getSyntacticForm(); return (SyntForm != nullptr && InnerMatcher.matches(*SyntForm, Finder, Builder)); } /// Matches C++ initializer list expressions. /// /// Given /// \code /// std::vector<int> a({ 1, 2, 3 }); /// std::vector<int> b = { 4, 5 }; /// int c[] = { 6, 7 }; /// std::pair<int, int> d = { 8, 9 }; /// \endcode /// cxxStdInitializerListExpr() /// matches "{ 1, 2, 3 }" and "{ 4, 5 }" extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXStdInitializerListExpr> cxxStdInitializerListExpr; /// Matches implicit initializers of init list expressions. /// /// Given /// \code /// point ptarray[10] = { [2].y = 1.0, [2].x = 2.0, [0].x = 1.0 }; /// \endcode /// implicitValueInitExpr() /// matches "[0].y" (implicitly) extern const internal::VariadicDynCastAllOfMatcher<Stmt, ImplicitValueInitExpr> implicitValueInitExpr; /// Matches paren list expressions. /// ParenListExprs don't have a predefined type and are used for late parsing. /// In the final AST, they can be met in template declarations. /// /// Given /// \code /// template<typename T> class X { /// void f() { /// X x(*this); /// int a = 0, b = 1; int i = (a, b); /// } /// }; /// \endcode /// parenListExpr() matches "*this" but NOT matches (a, b) because (a, b) /// has a predefined type and is a ParenExpr, not a ParenListExpr. extern const internal::VariadicDynCastAllOfMatcher<Stmt, ParenListExpr> parenListExpr; /// Matches substitutions of non-type template parameters. /// /// Given /// \code /// template <int N> /// struct A { static const int n = N; }; /// struct B : public A<42> {}; /// \endcode /// substNonTypeTemplateParmExpr() /// matches "N" in the right-hand side of "static const int n = N;" extern const internal::VariadicDynCastAllOfMatcher<Stmt, SubstNonTypeTemplateParmExpr> substNonTypeTemplateParmExpr; /// Matches using declarations. /// /// Given /// \code /// namespace X { int x; } /// using X::x; /// \endcode /// usingDecl() /// matches \code using X::x \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, UsingDecl> usingDecl; /// Matches using-enum declarations. /// /// Given /// \code /// namespace X { enum x {...}; } /// using enum X::x; /// \endcode /// usingEnumDecl() /// matches \code using enum X::x \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, UsingEnumDecl> usingEnumDecl; /// Matches using namespace declarations. /// /// Given /// \code /// namespace X { int x; } /// using namespace X; /// \endcode /// usingDirectiveDecl() /// matches \code using namespace X \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, UsingDirectiveDecl> usingDirectiveDecl; /// Matches reference to a name that can be looked up during parsing /// but could not be resolved to a specific declaration. /// /// Given /// \code /// template<typename T> /// T foo() { T a; return a; } /// template<typename T> /// void bar() { /// foo<T>(); /// } /// \endcode /// unresolvedLookupExpr() /// matches \code foo<T>() \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, UnresolvedLookupExpr> unresolvedLookupExpr; /// Matches unresolved using value declarations. /// /// Given /// \code /// template<typename X> /// class C : private X { /// using X::x; /// }; /// \endcode /// unresolvedUsingValueDecl() /// matches \code using X::x \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, UnresolvedUsingValueDecl> unresolvedUsingValueDecl; /// Matches unresolved using value declarations that involve the /// typename. /// /// Given /// \code /// template <typename T> /// struct Base { typedef T Foo; }; /// /// template<typename T> /// struct S : private Base<T> { /// using typename Base<T>::Foo; /// }; /// \endcode /// unresolvedUsingTypenameDecl() /// matches \code using Base<T>::Foo \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, UnresolvedUsingTypenameDecl> unresolvedUsingTypenameDecl; /// Matches a constant expression wrapper. /// /// Example matches the constant in the case statement: /// (matcher = constantExpr()) /// \code /// switch (a) { /// case 37: break; /// } /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, ConstantExpr> constantExpr; /// Matches parentheses used in expressions. /// /// Example matches (foo() + 1) /// \code /// int foo() { return 1; } /// int a = (foo() + 1); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, ParenExpr> parenExpr; /// Matches constructor call expressions (including implicit ones). /// /// Example matches string(ptr, n) and ptr within arguments of f /// (matcher = cxxConstructExpr()) /// \code /// void f(const string &a, const string &b); /// char *ptr; /// int n; /// f(string(ptr, n), ptr); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXConstructExpr> cxxConstructExpr; /// Matches unresolved constructor call expressions. /// /// Example matches T(t) in return statement of f /// (matcher = cxxUnresolvedConstructExpr()) /// \code /// template <typename T> /// void f(const T& t) { return T(t); } /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXUnresolvedConstructExpr> cxxUnresolvedConstructExpr; /// Matches implicit and explicit this expressions. /// /// Example matches the implicit this expression in "return i". /// (matcher = cxxThisExpr()) /// \code /// struct foo { /// int i; /// int f() { return i; } /// }; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXThisExpr> cxxThisExpr; /// Matches nodes where temporaries are created. /// /// Example matches FunctionTakesString(GetStringByValue()) /// (matcher = cxxBindTemporaryExpr()) /// \code /// FunctionTakesString(GetStringByValue()); /// FunctionTakesStringByPointer(GetStringPointer()); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXBindTemporaryExpr> cxxBindTemporaryExpr; /// Matches nodes where temporaries are materialized. /// /// Example: Given /// \code /// struct T {void func();}; /// T f(); /// void g(T); /// \endcode /// materializeTemporaryExpr() matches 'f()' in these statements /// \code /// T u(f()); /// g(f()); /// f().func(); /// \endcode /// but does not match /// \code /// f(); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, MaterializeTemporaryExpr> materializeTemporaryExpr; /// Matches new expressions. /// /// Given /// \code /// new X; /// \endcode /// cxxNewExpr() /// matches 'new X'. extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXNewExpr> cxxNewExpr; /// Matches delete expressions. /// /// Given /// \code /// delete X; /// \endcode /// cxxDeleteExpr() /// matches 'delete X'. extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXDeleteExpr> cxxDeleteExpr; /// Matches noexcept expressions. /// /// Given /// \code /// bool a() noexcept; /// bool b() noexcept(true); /// bool c() noexcept(false); /// bool d() noexcept(noexcept(a())); /// bool e = noexcept(b()) || noexcept(c()); /// \endcode /// cxxNoexceptExpr() /// matches `noexcept(a())`, `noexcept(b())` and `noexcept(c())`. /// doesn't match the noexcept specifier in the declarations a, b, c or d. extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXNoexceptExpr> cxxNoexceptExpr; /// Matches array subscript expressions. /// /// Given /// \code /// int i = a[1]; /// \endcode /// arraySubscriptExpr() /// matches "a[1]" extern const internal::VariadicDynCastAllOfMatcher<Stmt, ArraySubscriptExpr> arraySubscriptExpr; /// Matches the value of a default argument at the call site. /// /// Example matches the CXXDefaultArgExpr placeholder inserted for the /// default value of the second parameter in the call expression f(42) /// (matcher = cxxDefaultArgExpr()) /// \code /// void f(int x, int y = 0); /// f(42); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXDefaultArgExpr> cxxDefaultArgExpr; /// Matches overloaded operator calls. /// /// Note that if an operator isn't overloaded, it won't match. Instead, use /// binaryOperator matcher. /// Currently it does not match operators such as new delete. /// FIXME: figure out why these do not match? /// /// Example matches both operator<<((o << b), c) and operator<<(o, b) /// (matcher = cxxOperatorCallExpr()) /// \code /// ostream &operator<< (ostream &out, int i) { }; /// ostream &o; int b = 1, c = 1; /// o << b << c; /// \endcode /// See also the binaryOperation() matcher for more-general matching of binary /// uses of this AST node. extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXOperatorCallExpr> cxxOperatorCallExpr; /// Matches rewritten binary operators /// /// Example matches use of "<": /// \code /// #include <compare> /// struct HasSpaceshipMem { /// int a; /// constexpr auto operator<=>(const HasSpaceshipMem&) const = default; /// }; /// void compare() { /// HasSpaceshipMem hs1, hs2; /// if (hs1 < hs2) /// return; /// } /// \endcode /// See also the binaryOperation() matcher for more-general matching /// of this AST node. extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXRewrittenBinaryOperator> cxxRewrittenBinaryOperator; /// Matches expressions. /// /// Example matches x() /// \code /// void f() { x(); } /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, Expr> expr; /// Matches expressions that refer to declarations. /// /// Example matches x in if (x) /// \code /// bool x; /// if (x) {} /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, DeclRefExpr> declRefExpr; /// Matches a reference to an ObjCIvar. /// /// Example: matches "a" in "init" method: /// \code /// @implementation A { /// NSString *a; /// } /// - (void) init { /// a = @"hello"; /// } /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCIvarRefExpr> objcIvarRefExpr; /// Matches a reference to a block. /// /// Example: matches "^{}": /// \code /// void f() { ^{}(); } /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, BlockExpr> blockExpr; /// Matches if statements. /// /// Example matches 'if (x) {}' /// \code /// if (x) {} /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, IfStmt> ifStmt; /// Matches for statements. /// /// Example matches 'for (;;) {}' /// \code /// for (;;) {} /// int i[] = {1, 2, 3}; for (auto a : i); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, ForStmt> forStmt; /// Matches the increment statement of a for loop. /// /// Example: /// forStmt(hasIncrement(unaryOperator(hasOperatorName("++")))) /// matches '++x' in /// \code /// for (x; x < N; ++x) { } /// \endcode AST_MATCHER_P(ForStmt, hasIncrement, internal::Matcher<Stmt>, InnerMatcher) { const Stmt *const Increment = Node.getInc(); return (Increment != nullptr && InnerMatcher.matches(*Increment, Finder, Builder)); } /// Matches the initialization statement of a for loop. /// /// Example: /// forStmt(hasLoopInit(declStmt())) /// matches 'int x = 0' in /// \code /// for (int x = 0; x < N; ++x) { } /// \endcode AST_MATCHER_P(ForStmt, hasLoopInit, internal::Matcher<Stmt>, InnerMatcher) { const Stmt *const Init = Node.getInit(); return (Init != nullptr && InnerMatcher.matches(*Init, Finder, Builder)); } /// Matches range-based for statements. /// /// cxxForRangeStmt() matches 'for (auto a : i)' /// \code /// int i[] = {1, 2, 3}; for (auto a : i); /// for(int j = 0; j < 5; ++j); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXForRangeStmt> cxxForRangeStmt; /// Matches the initialization statement of a for loop. /// /// Example: /// forStmt(hasLoopVariable(anything())) /// matches 'int x' in /// \code /// for (int x : a) { } /// \endcode AST_MATCHER_P(CXXForRangeStmt, hasLoopVariable, internal::Matcher<VarDecl>, InnerMatcher) { const VarDecl *const Var = Node.getLoopVariable(); return (Var != nullptr && InnerMatcher.matches(*Var, Finder, Builder)); } /// Matches the range initialization statement of a for loop. /// /// Example: /// forStmt(hasRangeInit(anything())) /// matches 'a' in /// \code /// for (int x : a) { } /// \endcode AST_MATCHER_P(CXXForRangeStmt, hasRangeInit, internal::Matcher<Expr>, InnerMatcher) { const Expr *const Init = Node.getRangeInit(); return (Init != nullptr && InnerMatcher.matches(*Init, Finder, Builder)); } /// Matches while statements. /// /// Given /// \code /// while (true) {} /// \endcode /// whileStmt() /// matches 'while (true) {}'. extern const internal::VariadicDynCastAllOfMatcher<Stmt, WhileStmt> whileStmt; /// Matches do statements. /// /// Given /// \code /// do {} while (true); /// \endcode /// doStmt() /// matches 'do {} while(true)' extern const internal::VariadicDynCastAllOfMatcher<Stmt, DoStmt> doStmt; /// Matches break statements. /// /// Given /// \code /// while (true) { break; } /// \endcode /// breakStmt() /// matches 'break' extern const internal::VariadicDynCastAllOfMatcher<Stmt, BreakStmt> breakStmt; /// Matches continue statements. /// /// Given /// \code /// while (true) { continue; } /// \endcode /// continueStmt() /// matches 'continue' extern const internal::VariadicDynCastAllOfMatcher<Stmt, ContinueStmt> continueStmt; /// Matches co_return statements. /// /// Given /// \code /// while (true) { co_return; } /// \endcode /// coreturnStmt() /// matches 'co_return' extern const internal::VariadicDynCastAllOfMatcher<Stmt, CoreturnStmt> coreturnStmt; /// Matches return statements. /// /// Given /// \code /// return 1; /// \endcode /// returnStmt() /// matches 'return 1' extern const internal::VariadicDynCastAllOfMatcher<Stmt, ReturnStmt> returnStmt; /// Matches goto statements. /// /// Given /// \code /// goto FOO; /// FOO: bar(); /// \endcode /// gotoStmt() /// matches 'goto FOO' extern const internal::VariadicDynCastAllOfMatcher<Stmt, GotoStmt> gotoStmt; /// Matches label statements. /// /// Given /// \code /// goto FOO; /// FOO: bar(); /// \endcode /// labelStmt() /// matches 'FOO:' extern const internal::VariadicDynCastAllOfMatcher<Stmt, LabelStmt> labelStmt; /// Matches address of label statements (GNU extension). /// /// Given /// \code /// FOO: bar(); /// void *ptr = &&FOO; /// goto *bar; /// \endcode /// addrLabelExpr() /// matches '&&FOO' extern const internal::VariadicDynCastAllOfMatcher<Stmt, AddrLabelExpr> addrLabelExpr; /// Matches switch statements. /// /// Given /// \code /// switch(a) { case 42: break; default: break; } /// \endcode /// switchStmt() /// matches 'switch(a)'. extern const internal::VariadicDynCastAllOfMatcher<Stmt, SwitchStmt> switchStmt; /// Matches case and default statements inside switch statements. /// /// Given /// \code /// switch(a) { case 42: break; default: break; } /// \endcode /// switchCase() /// matches 'case 42:' and 'default:'. extern const internal::VariadicDynCastAllOfMatcher<Stmt, SwitchCase> switchCase; /// Matches case statements inside switch statements. /// /// Given /// \code /// switch(a) { case 42: break; default: break; } /// \endcode /// caseStmt() /// matches 'case 42:'. extern const internal::VariadicDynCastAllOfMatcher<Stmt, CaseStmt> caseStmt; /// Matches default statements inside switch statements. /// /// Given /// \code /// switch(a) { case 42: break; default: break; } /// \endcode /// defaultStmt() /// matches 'default:'. extern const internal::VariadicDynCastAllOfMatcher<Stmt, DefaultStmt> defaultStmt; /// Matches compound statements. /// /// Example matches '{}' and '{{}}' in 'for (;;) {{}}' /// \code /// for (;;) {{}} /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CompoundStmt> compoundStmt; /// Matches catch statements. /// /// \code /// try {} catch(int i) {} /// \endcode /// cxxCatchStmt() /// matches 'catch(int i)' extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXCatchStmt> cxxCatchStmt; /// Matches try statements. /// /// \code /// try {} catch(int i) {} /// \endcode /// cxxTryStmt() /// matches 'try {}' extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXTryStmt> cxxTryStmt; /// Matches throw expressions. /// /// \code /// try { throw 5; } catch(int i) {} /// \endcode /// cxxThrowExpr() /// matches 'throw 5' extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXThrowExpr> cxxThrowExpr; /// Matches null statements. /// /// \code /// foo();; /// \endcode /// nullStmt() /// matches the second ';' extern const internal::VariadicDynCastAllOfMatcher<Stmt, NullStmt> nullStmt; /// Matches asm statements. /// /// \code /// int i = 100; /// __asm("mov al, 2"); /// \endcode /// asmStmt() /// matches '__asm("mov al, 2")' extern const internal::VariadicDynCastAllOfMatcher<Stmt, AsmStmt> asmStmt; /// Matches bool literals. /// /// Example matches true /// \code /// true /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXBoolLiteralExpr> cxxBoolLiteral; /// Matches string literals (also matches wide string literals). /// /// Example matches "abcd", L"abcd" /// \code /// char *s = "abcd"; /// wchar_t *ws = L"abcd"; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, StringLiteral> stringLiteral; /// Matches character literals (also matches wchar_t). /// /// Not matching Hex-encoded chars (e.g. 0x1234, which is a IntegerLiteral), /// though. /// /// Example matches 'a', L'a' /// \code /// char ch = 'a'; /// wchar_t chw = L'a'; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CharacterLiteral> characterLiteral; /// Matches integer literals of all sizes / encodings, e.g. /// 1, 1L, 0x1 and 1U. /// /// Does not match character-encoded integers such as L'a'. extern const internal::VariadicDynCastAllOfMatcher<Stmt, IntegerLiteral> integerLiteral; /// Matches float literals of all sizes / encodings, e.g. /// 1.0, 1.0f, 1.0L and 1e10. /// /// Does not match implicit conversions such as /// \code /// float a = 10; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, FloatingLiteral> floatLiteral; /// Matches imaginary literals, which are based on integer and floating /// point literals e.g.: 1i, 1.0i extern const internal::VariadicDynCastAllOfMatcher<Stmt, ImaginaryLiteral> imaginaryLiteral; /// Matches fixed point literals extern const internal::VariadicDynCastAllOfMatcher<Stmt, FixedPointLiteral> fixedPointLiteral; /// Matches user defined literal operator call. /// /// Example match: "foo"_suffix extern const internal::VariadicDynCastAllOfMatcher<Stmt, UserDefinedLiteral> userDefinedLiteral; /// Matches compound (i.e. non-scalar) literals /// /// Example match: {1}, (1, 2) /// \code /// int array[4] = {1}; /// vector int myvec = (vector int)(1, 2); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CompoundLiteralExpr> compoundLiteralExpr; /// Matches co_await expressions. /// /// Given /// \code /// co_await 1; /// \endcode /// coawaitExpr() /// matches 'co_await 1' extern const internal::VariadicDynCastAllOfMatcher<Stmt, CoawaitExpr> coawaitExpr; /// Matches co_await expressions where the type of the promise is dependent extern const internal::VariadicDynCastAllOfMatcher<Stmt, DependentCoawaitExpr> dependentCoawaitExpr; /// Matches co_yield expressions. /// /// Given /// \code /// co_yield 1; /// \endcode /// coyieldExpr() /// matches 'co_yield 1' extern const internal::VariadicDynCastAllOfMatcher<Stmt, CoyieldExpr> coyieldExpr; /// Matches nullptr literal. extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXNullPtrLiteralExpr> cxxNullPtrLiteralExpr; /// Matches GNU __builtin_choose_expr. extern const internal::VariadicDynCastAllOfMatcher<Stmt, ChooseExpr> chooseExpr; /// Matches GNU __null expression. extern const internal::VariadicDynCastAllOfMatcher<Stmt, GNUNullExpr> gnuNullExpr; /// Matches C11 _Generic expression. extern const internal::VariadicDynCastAllOfMatcher<Stmt, GenericSelectionExpr> genericSelectionExpr; /// Matches atomic builtins. /// Example matches __atomic_load_n(ptr, 1) /// \code /// void foo() { int *ptr; __atomic_load_n(ptr, 1); } /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, AtomicExpr> atomicExpr; /// Matches statement expression (GNU extension). /// /// Example match: ({ int X = 4; X; }) /// \code /// int C = ({ int X = 4; X; }); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, StmtExpr> stmtExpr; /// Matches binary operator expressions. /// /// Example matches a || b /// \code /// !(a || b) /// \endcode /// See also the binaryOperation() matcher for more-general matching. extern const internal::VariadicDynCastAllOfMatcher<Stmt, BinaryOperator> binaryOperator; /// Matches unary operator expressions. /// /// Example matches !a /// \code /// !a || b /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, UnaryOperator> unaryOperator; /// Matches conditional operator expressions. /// /// Example matches a ? b : c /// \code /// (a ? b : c) + 42 /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, ConditionalOperator> conditionalOperator; /// Matches binary conditional operator expressions (GNU extension). /// /// Example matches a ?: b /// \code /// (a ?: b) + 42; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, BinaryConditionalOperator> binaryConditionalOperator; /// Matches opaque value expressions. They are used as helpers /// to reference another expressions and can be met /// in BinaryConditionalOperators, for example. /// /// Example matches 'a' /// \code /// (a ?: c) + 42; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, OpaqueValueExpr> opaqueValueExpr; /// Matches a C++ static_assert declaration. /// /// Example: /// staticAssertExpr() /// matches /// static_assert(sizeof(S) == sizeof(int)) /// in /// \code /// struct S { /// int x; /// }; /// static_assert(sizeof(S) == sizeof(int)); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, StaticAssertDecl> staticAssertDecl; /// Matches a reinterpret_cast expression. /// /// Either the source expression or the destination type can be matched /// using has(), but hasDestinationType() is more specific and can be /// more readable. /// /// Example matches reinterpret_cast<char*>(&p) in /// \code /// void* p = reinterpret_cast<char*>(&p); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXReinterpretCastExpr> cxxReinterpretCastExpr; /// Matches a C++ static_cast expression. /// /// \see hasDestinationType /// \see reinterpretCast /// /// Example: /// cxxStaticCastExpr() /// matches /// static_cast<long>(8) /// in /// \code /// long eight(static_cast<long>(8)); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXStaticCastExpr> cxxStaticCastExpr; /// Matches a dynamic_cast expression. /// /// Example: /// cxxDynamicCastExpr() /// matches /// dynamic_cast<D*>(&b); /// in /// \code /// struct B { virtual ~B() {} }; struct D : B {}; /// B b; /// D* p = dynamic_cast<D*>(&b); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXDynamicCastExpr> cxxDynamicCastExpr; /// Matches a const_cast expression. /// /// Example: Matches const_cast<int*>(&r) in /// \code /// int n = 42; /// const int &r(n); /// int* p = const_cast<int*>(&r); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXConstCastExpr> cxxConstCastExpr; /// Matches a C-style cast expression. /// /// Example: Matches (int) 2.2f in /// \code /// int i = (int) 2.2f; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CStyleCastExpr> cStyleCastExpr; /// Matches explicit cast expressions. /// /// Matches any cast expression written in user code, whether it be a /// C-style cast, a functional-style cast, or a keyword cast. /// /// Does not match implicit conversions. /// /// Note: the name "explicitCast" is chosen to match Clang's terminology, as /// Clang uses the term "cast" to apply to implicit conversions as well as to /// actual cast expressions. /// /// \see hasDestinationType. /// /// Example: matches all five of the casts in /// \code /// int((int)(reinterpret_cast<int>(static_cast<int>(const_cast<int>(42))))) /// \endcode /// but does not match the implicit conversion in /// \code /// long ell = 42; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, ExplicitCastExpr> explicitCastExpr; /// Matches the implicit cast nodes of Clang's AST. /// /// This matches many different places, including function call return value /// eliding, as well as any type conversions. extern const internal::VariadicDynCastAllOfMatcher<Stmt, ImplicitCastExpr> implicitCastExpr; /// Matches any cast nodes of Clang's AST. /// /// Example: castExpr() matches each of the following: /// \code /// (int) 3; /// const_cast<Expr *>(SubExpr); /// char c = 0; /// \endcode /// but does not match /// \code /// int i = (0); /// int k = 0; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CastExpr> castExpr; /// Matches functional cast expressions /// /// Example: Matches Foo(bar); /// \code /// Foo f = bar; /// Foo g = (Foo) bar; /// Foo h = Foo(bar); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXFunctionalCastExpr> cxxFunctionalCastExpr; /// Matches functional cast expressions having N != 1 arguments /// /// Example: Matches Foo(bar, bar) /// \code /// Foo h = Foo(bar, bar); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXTemporaryObjectExpr> cxxTemporaryObjectExpr; /// Matches predefined identifier expressions [C99 6.4.2.2]. /// /// Example: Matches __func__ /// \code /// printf("%s", __func__); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, PredefinedExpr> predefinedExpr; /// Matches C99 designated initializer expressions [C99 6.7.8]. /// /// Example: Matches { [2].y = 1.0, [0].x = 1.0 } /// \code /// point ptarray[10] = { [2].y = 1.0, [0].x = 1.0 }; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, DesignatedInitExpr> designatedInitExpr; /// Matches designated initializer expressions that contain /// a specific number of designators. /// /// Example: Given /// \code /// point ptarray[10] = { [2].y = 1.0, [0].x = 1.0 }; /// point ptarray2[10] = { [2].y = 1.0, [2].x = 0.0, [0].x = 1.0 }; /// \endcode /// designatorCountIs(2) /// matches '{ [2].y = 1.0, [0].x = 1.0 }', /// but not '{ [2].y = 1.0, [2].x = 0.0, [0].x = 1.0 }'. AST_MATCHER_P(DesignatedInitExpr, designatorCountIs, unsigned, N) { return Node.size() == N; } /// Matches \c QualTypes in the clang AST. extern const internal::VariadicAllOfMatcher<QualType> qualType; /// Matches \c Types in the clang AST. extern const internal::VariadicAllOfMatcher<Type> type; /// Matches \c TypeLocs in the clang AST. extern const internal::VariadicAllOfMatcher<TypeLoc> typeLoc; /// Matches if any of the given matchers matches. /// /// Unlike \c anyOf, \c eachOf will generate a match result for each /// matching submatcher. /// /// For example, in: /// \code /// class A { int a; int b; }; /// \endcode /// The matcher: /// \code /// cxxRecordDecl(eachOf(has(fieldDecl(hasName("a")).bind("v")), /// has(fieldDecl(hasName("b")).bind("v")))) /// \endcode /// will generate two results binding "v", the first of which binds /// the field declaration of \c a, the second the field declaration of /// \c b. /// /// Usable as: Any Matcher extern const internal::VariadicOperatorMatcherFunc< 2, std::numeric_limits<unsigned>::max()> eachOf; /// Matches if any of the given matchers matches. /// /// Usable as: Any Matcher extern const internal::VariadicOperatorMatcherFunc< 2, std::numeric_limits<unsigned>::max()> anyOf; /// Matches if all given matchers match. /// /// Usable as: Any Matcher extern const internal::VariadicOperatorMatcherFunc< 2, std::numeric_limits<unsigned>::max()> allOf; /// Matches any node regardless of the submatcher. /// /// However, \c optionally will retain any bindings generated by the submatcher. /// Useful when additional information which may or may not present about a main /// matching node is desired. /// /// For example, in: /// \code /// class Foo { /// int bar; /// } /// \endcode /// The matcher: /// \code /// cxxRecordDecl( /// optionally(has( /// fieldDecl(hasName("bar")).bind("var") /// ))).bind("record") /// \endcode /// will produce a result binding for both "record" and "var". /// The matcher will produce a "record" binding for even if there is no data /// member named "bar" in that class. /// /// Usable as: Any Matcher extern const internal::VariadicOperatorMatcherFunc<1, 1> optionally; /// Matches sizeof (C99), alignof (C++11) and vec_step (OpenCL) /// /// Given /// \code /// Foo x = bar; /// int y = sizeof(x) + alignof(x); /// \endcode /// unaryExprOrTypeTraitExpr() /// matches \c sizeof(x) and \c alignof(x) extern const internal::VariadicDynCastAllOfMatcher<Stmt, UnaryExprOrTypeTraitExpr> unaryExprOrTypeTraitExpr; /// Matches any of the \p NodeMatchers with InnerMatchers nested within /// /// Given /// \code /// if (true); /// for (; true; ); /// \endcode /// with the matcher /// \code /// mapAnyOf(ifStmt, forStmt).with( /// hasCondition(cxxBoolLiteralExpr(equals(true))) /// ).bind("trueCond") /// \endcode /// matches the \c if and the \c for. It is equivalent to: /// \code /// auto trueCond = hasCondition(cxxBoolLiteralExpr(equals(true))); /// anyOf( /// ifStmt(trueCond).bind("trueCond"), /// forStmt(trueCond).bind("trueCond") /// ); /// \endcode /// /// The with() chain-call accepts zero or more matchers which are combined /// as-if with allOf() in each of the node matchers. /// Usable as: Any Matcher template <typename T, typename... U> auto mapAnyOf(internal::VariadicDynCastAllOfMatcher<T, U> const &...) { return internal::MapAnyOfHelper<U...>(); } /// Matches nodes which can be used with binary operators. /// /// The code /// \code /// var1 != var2; /// \endcode /// might be represented in the clang AST as a binaryOperator, a /// cxxOperatorCallExpr or a cxxRewrittenBinaryOperator, depending on /// /// * whether the types of var1 and var2 are fundamental (binaryOperator) or at /// least one is a class type (cxxOperatorCallExpr) /// * whether the code appears in a template declaration, if at least one of the /// vars is a dependent-type (binaryOperator) /// * whether the code relies on a rewritten binary operator, such as a /// spaceship operator or an inverted equality operator /// (cxxRewrittenBinaryOperator) /// /// This matcher elides details in places where the matchers for the nodes are /// compatible. /// /// Given /// \code /// binaryOperation( /// hasOperatorName("!="), /// hasLHS(expr().bind("lhs")), /// hasRHS(expr().bind("rhs")) /// ) /// \endcode /// matches each use of "!=" in: /// \code /// struct S{ /// bool operator!=(const S&) const; /// }; /// /// void foo() /// { /// 1 != 2; /// S() != S(); /// } /// /// template<typename T> /// void templ() /// { /// 1 != 2; /// T() != S(); /// } /// struct HasOpEq /// { /// bool operator==(const HasOpEq &) const; /// }; /// /// void inverse() /// { /// HasOpEq s1; /// HasOpEq s2; /// if (s1 != s2) /// return; /// } /// /// struct HasSpaceship /// { /// bool operator<=>(const HasOpEq &) const; /// }; /// /// void use_spaceship() /// { /// HasSpaceship s1; /// HasSpaceship s2; /// if (s1 != s2) /// return; /// } /// \endcode extern const internal::MapAnyOfMatcher<BinaryOperator, CXXOperatorCallExpr, CXXRewrittenBinaryOperator> binaryOperation; /// Matches function calls and constructor calls /// /// Because CallExpr and CXXConstructExpr do not share a common /// base class with API accessing arguments etc, AST Matchers for code /// which should match both are typically duplicated. This matcher /// removes the need for duplication. /// /// Given code /// \code /// struct ConstructorTakesInt /// { /// ConstructorTakesInt(int i) {} /// }; /// /// void callTakesInt(int i) /// { /// } /// /// void doCall() /// { /// callTakesInt(42); /// } /// /// void doConstruct() /// { /// ConstructorTakesInt cti(42); /// } /// \endcode /// /// The matcher /// \code /// invocation(hasArgument(0, integerLiteral(equals(42)))) /// \endcode /// matches the expression in both doCall and doConstruct extern const internal::MapAnyOfMatcher<CallExpr, CXXConstructExpr> invocation; /// Matches unary expressions that have a specific type of argument. /// /// Given /// \code /// int a, c; float b; int s = sizeof(a) + sizeof(b) + alignof(c); /// \endcode /// unaryExprOrTypeTraitExpr(hasArgumentOfType(asString("int")) /// matches \c sizeof(a) and \c alignof(c) AST_MATCHER_P(UnaryExprOrTypeTraitExpr, hasArgumentOfType, internal::Matcher<QualType>, InnerMatcher) { const QualType ArgumentType = Node.getTypeOfArgument(); return InnerMatcher.matches(ArgumentType, Finder, Builder); } /// Matches unary expressions of a certain kind. /// /// Given /// \code /// int x; /// int s = sizeof(x) + alignof(x) /// \endcode /// unaryExprOrTypeTraitExpr(ofKind(UETT_SizeOf)) /// matches \c sizeof(x) /// /// If the matcher is use from clang-query, UnaryExprOrTypeTrait parameter /// should be passed as a quoted string. e.g., ofKind("UETT_SizeOf"). AST_MATCHER_P(UnaryExprOrTypeTraitExpr, ofKind, UnaryExprOrTypeTrait, Kind) { return Node.getKind() == Kind; } /// Same as unaryExprOrTypeTraitExpr, but only matching /// alignof. inline internal::BindableMatcher<Stmt> alignOfExpr( const internal::Matcher<UnaryExprOrTypeTraitExpr> &InnerMatcher) { return stmt(unaryExprOrTypeTraitExpr( allOf(anyOf(ofKind(UETT_AlignOf), ofKind(UETT_PreferredAlignOf)), InnerMatcher))); } /// Same as unaryExprOrTypeTraitExpr, but only matching /// sizeof. inline internal::BindableMatcher<Stmt> sizeOfExpr( const internal::Matcher<UnaryExprOrTypeTraitExpr> &InnerMatcher) { return stmt(unaryExprOrTypeTraitExpr( allOf(ofKind(UETT_SizeOf), InnerMatcher))); } /// Matches NamedDecl nodes that have the specified name. /// /// Supports specifying enclosing namespaces or classes by prefixing the name /// with '<enclosing>::'. /// Does not match typedefs of an underlying type with the given name. /// /// Example matches X (Name == "X") /// \code /// class X; /// \endcode /// /// Example matches X (Name is one of "::a::b::X", "a::b::X", "b::X", "X") /// \code /// namespace a { namespace b { class X; } } /// \endcode inline internal::Matcher<NamedDecl> hasName(StringRef Name) { return internal::Matcher<NamedDecl>( new internal::HasNameMatcher({std::string(Name)})); } /// Matches NamedDecl nodes that have any of the specified names. /// /// This matcher is only provided as a performance optimization of hasName. /// \code /// hasAnyName(a, b, c) /// \endcode /// is equivalent to, but faster than /// \code /// anyOf(hasName(a), hasName(b), hasName(c)) /// \endcode extern const internal::VariadicFunction<internal::Matcher<NamedDecl>, StringRef, internal::hasAnyNameFunc> hasAnyName; /// Matches NamedDecl nodes whose fully qualified names contain /// a substring matched by the given RegExp. /// /// Supports specifying enclosing namespaces or classes by /// prefixing the name with '<enclosing>::'. Does not match typedefs /// of an underlying type with the given name. /// /// Example matches X (regexp == "::X") /// \code /// class X; /// \endcode /// /// Example matches X (regexp is one of "::X", "^foo::.*X", among others) /// \code /// namespace foo { namespace bar { class X; } } /// \endcode AST_MATCHER_REGEX(NamedDecl, matchesName, RegExp) { std::string FullNameString = "::" + Node.getQualifiedNameAsString(); return RegExp->match(FullNameString); } /// Matches overloaded operator names. /// /// Matches overloaded operator names specified in strings without the /// "operator" prefix: e.g. "<<". /// /// Given: /// \code /// class A { int operator*(); }; /// const A &operator<<(const A &a, const A &b); /// A a; /// a << a; // <-- This matches /// \endcode /// /// \c cxxOperatorCallExpr(hasOverloadedOperatorName("<<"))) matches the /// specified line and /// \c cxxRecordDecl(hasMethod(hasOverloadedOperatorName("*"))) /// matches the declaration of \c A. /// /// Usable as: Matcher<CXXOperatorCallExpr>, Matcher<FunctionDecl> inline internal::PolymorphicMatcher< internal::HasOverloadedOperatorNameMatcher, AST_POLYMORPHIC_SUPPORTED_TYPES(CXXOperatorCallExpr, FunctionDecl), std::vector<std::string>> hasOverloadedOperatorName(StringRef Name) { return internal::PolymorphicMatcher< internal::HasOverloadedOperatorNameMatcher, AST_POLYMORPHIC_SUPPORTED_TYPES(CXXOperatorCallExpr, FunctionDecl), std::vector<std::string>>({std::string(Name)}); } /// Matches overloaded operator names. /// /// Matches overloaded operator names specified in strings without the /// "operator" prefix: e.g. "<<". /// /// hasAnyOverloadedOperatorName("+", "-") /// Is equivalent to /// anyOf(hasOverloadedOperatorName("+"), hasOverloadedOperatorName("-")) extern const internal::VariadicFunction< internal::PolymorphicMatcher<internal::HasOverloadedOperatorNameMatcher, AST_POLYMORPHIC_SUPPORTED_TYPES( CXXOperatorCallExpr, FunctionDecl), std::vector<std::string>>, StringRef, internal::hasAnyOverloadedOperatorNameFunc> hasAnyOverloadedOperatorName; /// Matches template-dependent, but known, member names. /// /// In template declarations, dependent members are not resolved and so can /// not be matched to particular named declarations. /// /// This matcher allows to match on the known name of members. /// /// Given /// \code /// template <typename T> /// struct S { /// void mem(); /// }; /// template <typename T> /// void x() { /// S<T> s; /// s.mem(); /// } /// \endcode /// \c cxxDependentScopeMemberExpr(hasMemberName("mem")) matches `s.mem()` AST_MATCHER_P(CXXDependentScopeMemberExpr, hasMemberName, std::string, N) { return Node.getMember().getAsString() == N; } /// Matches template-dependent, but known, member names against an already-bound /// node /// /// In template declarations, dependent members are not resolved and so can /// not be matched to particular named declarations. /// /// This matcher allows to match on the name of already-bound VarDecl, FieldDecl /// and CXXMethodDecl nodes. /// /// Given /// \code /// template <typename T> /// struct S { /// void mem(); /// }; /// template <typename T> /// void x() { /// S<T> s; /// s.mem(); /// } /// \endcode /// The matcher /// @code /// \c cxxDependentScopeMemberExpr( /// hasObjectExpression(declRefExpr(hasType(templateSpecializationType( /// hasDeclaration(classTemplateDecl(has(cxxRecordDecl(has( /// cxxMethodDecl(hasName("mem")).bind("templMem") /// ))))) /// )))), /// memberHasSameNameAsBoundNode("templMem") /// ) /// @endcode /// first matches and binds the @c mem member of the @c S template, then /// compares its name to the usage in @c s.mem() in the @c x function template AST_MATCHER_P(CXXDependentScopeMemberExpr, memberHasSameNameAsBoundNode, std::string, BindingID) { auto MemberName = Node.getMember().getAsString(); return Builder->removeBindings( [this, MemberName](const BoundNodesMap &Nodes) { const auto &BN = Nodes.getNode(this->BindingID); if (const auto *ND = BN.get<NamedDecl>()) { if (!isa<FieldDecl, CXXMethodDecl, VarDecl>(ND)) return true; return ND->getName() != MemberName; } return true; }); } /// Matches C++ classes that are directly or indirectly derived from a class /// matching \c Base, or Objective-C classes that directly or indirectly /// subclass a class matching \c Base. /// /// Note that a class is not considered to be derived from itself. /// /// Example matches Y, Z, C (Base == hasName("X")) /// \code /// class X; /// class Y : public X {}; // directly derived /// class Z : public Y {}; // indirectly derived /// typedef X A; /// typedef A B; /// class C : public B {}; // derived from a typedef of X /// \endcode /// /// In the following example, Bar matches isDerivedFrom(hasName("X")): /// \code /// class Foo; /// typedef Foo X; /// class Bar : public Foo {}; // derived from a type that X is a typedef of /// \endcode /// /// In the following example, Bar matches isDerivedFrom(hasName("NSObject")) /// \code /// @interface NSObject @end /// @interface Bar : NSObject @end /// \endcode /// /// Usable as: Matcher<CXXRecordDecl>, Matcher<ObjCInterfaceDecl> AST_POLYMORPHIC_MATCHER_P( isDerivedFrom, AST_POLYMORPHIC_SUPPORTED_TYPES(CXXRecordDecl, ObjCInterfaceDecl), internal::Matcher<NamedDecl>, Base) { // Check if the node is a C++ struct/union/class. if (const auto *RD = dyn_cast<CXXRecordDecl>(&Node)) return Finder->classIsDerivedFrom(RD, Base, Builder, /*Directly=*/false); // The node must be an Objective-C class. const auto *InterfaceDecl = cast<ObjCInterfaceDecl>(&Node); return Finder->objcClassIsDerivedFrom(InterfaceDecl, Base, Builder, /*Directly=*/false); } /// Overloaded method as shortcut for \c isDerivedFrom(hasName(...)). AST_POLYMORPHIC_MATCHER_P_OVERLOAD( isDerivedFrom, AST_POLYMORPHIC_SUPPORTED_TYPES(CXXRecordDecl, ObjCInterfaceDecl), std::string, BaseName, 1) { if (BaseName.empty()) return false; const auto M = isDerivedFrom(hasName(BaseName)); if (const auto *RD = dyn_cast<CXXRecordDecl>(&Node)) return Matcher<CXXRecordDecl>(M).matches(*RD, Finder, Builder); const auto *InterfaceDecl = cast<ObjCInterfaceDecl>(&Node); return Matcher<ObjCInterfaceDecl>(M).matches(*InterfaceDecl, Finder, Builder); } /// Matches C++ classes that have a direct or indirect base matching \p /// BaseSpecMatcher. /// /// Example: /// matcher hasAnyBase(hasType(cxxRecordDecl(hasName("SpecialBase")))) /// \code /// class Foo; /// class Bar : Foo {}; /// class Baz : Bar {}; /// class SpecialBase; /// class Proxy : SpecialBase {}; // matches Proxy /// class IndirectlyDerived : Proxy {}; //matches IndirectlyDerived /// \endcode /// // FIXME: Refactor this and isDerivedFrom to reuse implementation. AST_MATCHER_P(CXXRecordDecl, hasAnyBase, internal::Matcher<CXXBaseSpecifier>, BaseSpecMatcher) { return internal::matchesAnyBase(Node, BaseSpecMatcher, Finder, Builder); } /// Matches C++ classes that have a direct base matching \p BaseSpecMatcher. /// /// Example: /// matcher hasDirectBase(hasType(cxxRecordDecl(hasName("SpecialBase")))) /// \code /// class Foo; /// class Bar : Foo {}; /// class Baz : Bar {}; /// class SpecialBase; /// class Proxy : SpecialBase {}; // matches Proxy /// class IndirectlyDerived : Proxy {}; // doesn't match /// \endcode AST_MATCHER_P(CXXRecordDecl, hasDirectBase, internal::Matcher<CXXBaseSpecifier>, BaseSpecMatcher) { return Node.hasDefinition() && llvm::any_of(Node.bases(), [&](const CXXBaseSpecifier &Base) { return BaseSpecMatcher.matches(Base, Finder, Builder); }); } /// Similar to \c isDerivedFrom(), but also matches classes that directly /// match \c Base. AST_POLYMORPHIC_MATCHER_P_OVERLOAD( isSameOrDerivedFrom, AST_POLYMORPHIC_SUPPORTED_TYPES(CXXRecordDecl, ObjCInterfaceDecl), internal::Matcher<NamedDecl>, Base, 0) { const auto M = anyOf(Base, isDerivedFrom(Base)); if (const auto *RD = dyn_cast<CXXRecordDecl>(&Node)) return Matcher<CXXRecordDecl>(M).matches(*RD, Finder, Builder); const auto *InterfaceDecl = cast<ObjCInterfaceDecl>(&Node); return Matcher<ObjCInterfaceDecl>(M).matches(*InterfaceDecl, Finder, Builder); } /// Overloaded method as shortcut for /// \c isSameOrDerivedFrom(hasName(...)). AST_POLYMORPHIC_MATCHER_P_OVERLOAD( isSameOrDerivedFrom, AST_POLYMORPHIC_SUPPORTED_TYPES(CXXRecordDecl, ObjCInterfaceDecl), std::string, BaseName, 1) { if (BaseName.empty()) return false; const auto M = isSameOrDerivedFrom(hasName(BaseName)); if (const auto *RD = dyn_cast<CXXRecordDecl>(&Node)) return Matcher<CXXRecordDecl>(M).matches(*RD, Finder, Builder); const auto *InterfaceDecl = cast<ObjCInterfaceDecl>(&Node); return Matcher<ObjCInterfaceDecl>(M).matches(*InterfaceDecl, Finder, Builder); } /// Matches C++ or Objective-C classes that are directly derived from a class /// matching \c Base. /// /// Note that a class is not considered to be derived from itself. /// /// Example matches Y, C (Base == hasName("X")) /// \code /// class X; /// class Y : public X {}; // directly derived /// class Z : public Y {}; // indirectly derived /// typedef X A; /// typedef A B; /// class C : public B {}; // derived from a typedef of X /// \endcode /// /// In the following example, Bar matches isDerivedFrom(hasName("X")): /// \code /// class Foo; /// typedef Foo X; /// class Bar : public Foo {}; // derived from a type that X is a typedef of /// \endcode AST_POLYMORPHIC_MATCHER_P_OVERLOAD( isDirectlyDerivedFrom, AST_POLYMORPHIC_SUPPORTED_TYPES(CXXRecordDecl, ObjCInterfaceDecl), internal::Matcher<NamedDecl>, Base, 0) { // Check if the node is a C++ struct/union/class. if (const auto *RD = dyn_cast<CXXRecordDecl>(&Node)) return Finder->classIsDerivedFrom(RD, Base, Builder, /*Directly=*/true); // The node must be an Objective-C class. const auto *InterfaceDecl = cast<ObjCInterfaceDecl>(&Node); return Finder->objcClassIsDerivedFrom(InterfaceDecl, Base, Builder, /*Directly=*/true); } /// Overloaded method as shortcut for \c isDirectlyDerivedFrom(hasName(...)). AST_POLYMORPHIC_MATCHER_P_OVERLOAD( isDirectlyDerivedFrom, AST_POLYMORPHIC_SUPPORTED_TYPES(CXXRecordDecl, ObjCInterfaceDecl), std::string, BaseName, 1) { if (BaseName.empty()) return false; const auto M = isDirectlyDerivedFrom(hasName(BaseName)); if (const auto *RD = dyn_cast<CXXRecordDecl>(&Node)) return Matcher<CXXRecordDecl>(M).matches(*RD, Finder, Builder); const auto *InterfaceDecl = cast<ObjCInterfaceDecl>(&Node); return Matcher<ObjCInterfaceDecl>(M).matches(*InterfaceDecl, Finder, Builder); } /// Matches the first method of a class or struct that satisfies \c /// InnerMatcher. /// /// Given: /// \code /// class A { void func(); }; /// class B { void member(); }; /// \endcode /// /// \c cxxRecordDecl(hasMethod(hasName("func"))) matches the declaration of /// \c A but not \c B. AST_MATCHER_P(CXXRecordDecl, hasMethod, internal::Matcher<CXXMethodDecl>, InnerMatcher) { BoundNodesTreeBuilder Result(*Builder); auto MatchIt = matchesFirstInPointerRange(InnerMatcher, Node.method_begin(), Node.method_end(), Finder, &Result); if (MatchIt == Node.method_end()) return false; if (Finder->isTraversalIgnoringImplicitNodes() && (*MatchIt)->isImplicit()) return false; *Builder = std::move(Result); return true; } /// Matches the generated class of lambda expressions. /// /// Given: /// \code /// auto x = []{}; /// \endcode /// /// \c cxxRecordDecl(isLambda()) matches the implicit class declaration of /// \c decltype(x) AST_MATCHER(CXXRecordDecl, isLambda) { return Node.isLambda(); } /// Matches AST nodes that have child AST nodes that match the /// provided matcher. /// /// Example matches X, Y /// (matcher = cxxRecordDecl(has(cxxRecordDecl(hasName("X"))) /// \code /// class X {}; // Matches X, because X::X is a class of name X inside X. /// class Y { class X {}; }; /// class Z { class Y { class X {}; }; }; // Does not match Z. /// \endcode /// /// ChildT must be an AST base type. /// /// Usable as: Any Matcher /// Note that has is direct matcher, so it also matches things like implicit /// casts and paren casts. If you are matching with expr then you should /// probably consider using ignoringParenImpCasts like: /// has(ignoringParenImpCasts(expr())). extern const internal::ArgumentAdaptingMatcherFunc<internal::HasMatcher> has; /// Matches AST nodes that have descendant AST nodes that match the /// provided matcher. /// /// Example matches X, Y, Z /// (matcher = cxxRecordDecl(hasDescendant(cxxRecordDecl(hasName("X"))))) /// \code /// class X {}; // Matches X, because X::X is a class of name X inside X. /// class Y { class X {}; }; /// class Z { class Y { class X {}; }; }; /// \endcode /// /// DescendantT must be an AST base type. /// /// Usable as: Any Matcher extern const internal::ArgumentAdaptingMatcherFunc< internal::HasDescendantMatcher> hasDescendant; /// Matches AST nodes that have child AST nodes that match the /// provided matcher. /// /// Example matches X, Y, Y::X, Z::Y, Z::Y::X /// (matcher = cxxRecordDecl(forEach(cxxRecordDecl(hasName("X"))) /// \code /// class X {}; /// class Y { class X {}; }; // Matches Y, because Y::X is a class of name X /// // inside Y. /// class Z { class Y { class X {}; }; }; // Does not match Z. /// \endcode /// /// ChildT must be an AST base type. /// /// As opposed to 'has', 'forEach' will cause a match for each result that /// matches instead of only on the first one. /// /// Usable as: Any Matcher extern const internal::ArgumentAdaptingMatcherFunc<internal::ForEachMatcher> forEach; /// Matches AST nodes that have descendant AST nodes that match the /// provided matcher. /// /// Example matches X, A, A::X, B, B::C, B::C::X /// (matcher = cxxRecordDecl(forEachDescendant(cxxRecordDecl(hasName("X"))))) /// \code /// class X {}; /// class A { class X {}; }; // Matches A, because A::X is a class of name /// // X inside A. /// class B { class C { class X {}; }; }; /// \endcode /// /// DescendantT must be an AST base type. /// /// As opposed to 'hasDescendant', 'forEachDescendant' will cause a match for /// each result that matches instead of only on the first one. /// /// Note: Recursively combined ForEachDescendant can cause many matches: /// cxxRecordDecl(forEachDescendant(cxxRecordDecl( /// forEachDescendant(cxxRecordDecl()) /// ))) /// will match 10 times (plus injected class name matches) on: /// \code /// class A { class B { class C { class D { class E {}; }; }; }; }; /// \endcode /// /// Usable as: Any Matcher extern const internal::ArgumentAdaptingMatcherFunc< internal::ForEachDescendantMatcher> forEachDescendant; /// Matches if the node or any descendant matches. /// /// Generates results for each match. /// /// For example, in: /// \code /// class A { class B {}; class C {}; }; /// \endcode /// The matcher: /// \code /// cxxRecordDecl(hasName("::A"), /// findAll(cxxRecordDecl(isDefinition()).bind("m"))) /// \endcode /// will generate results for \c A, \c B and \c C. /// /// Usable as: Any Matcher template <typename T> internal::Matcher<T> findAll(const internal::Matcher<T> &Matcher) { return eachOf(Matcher, forEachDescendant(Matcher)); } /// Matches AST nodes that have a parent that matches the provided /// matcher. /// /// Given /// \code /// void f() { for (;;) { int x = 42; if (true) { int x = 43; } } } /// \endcode /// \c compoundStmt(hasParent(ifStmt())) matches "{ int x = 43; }". /// /// Usable as: Any Matcher extern const internal::ArgumentAdaptingMatcherFunc< internal::HasParentMatcher, internal::TypeList<Decl, NestedNameSpecifierLoc, Stmt, TypeLoc>, internal::TypeList<Decl, NestedNameSpecifierLoc, Stmt, TypeLoc>> hasParent; /// Matches AST nodes that have an ancestor that matches the provided /// matcher. /// /// Given /// \code /// void f() { if (true) { int x = 42; } } /// void g() { for (;;) { int x = 43; } } /// \endcode /// \c expr(integerLiteral(hasAncestor(ifStmt()))) matches \c 42, but not 43. /// /// Usable as: Any Matcher extern const internal::ArgumentAdaptingMatcherFunc< internal::HasAncestorMatcher, internal::TypeList<Decl, NestedNameSpecifierLoc, Stmt, TypeLoc>, internal::TypeList<Decl, NestedNameSpecifierLoc, Stmt, TypeLoc>> hasAncestor; /// Matches if the provided matcher does not match. /// /// Example matches Y (matcher = cxxRecordDecl(unless(hasName("X")))) /// \code /// class X {}; /// class Y {}; /// \endcode /// /// Usable as: Any Matcher extern const internal::VariadicOperatorMatcherFunc<1, 1> unless; /// Matches a node if the declaration associated with that node /// matches the given matcher. /// /// The associated declaration is: /// - for type nodes, the declaration of the underlying type /// - for CallExpr, the declaration of the callee /// - for MemberExpr, the declaration of the referenced member /// - for CXXConstructExpr, the declaration of the constructor /// - for CXXNewExpr, the declaration of the operator new /// - for ObjCIvarExpr, the declaration of the ivar /// /// For type nodes, hasDeclaration will generally match the declaration of the /// sugared type. Given /// \code /// class X {}; /// typedef X Y; /// Y y; /// \endcode /// in varDecl(hasType(hasDeclaration(decl()))) the decl will match the /// typedefDecl. A common use case is to match the underlying, desugared type. /// This can be achieved by using the hasUnqualifiedDesugaredType matcher: /// \code /// varDecl(hasType(hasUnqualifiedDesugaredType( /// recordType(hasDeclaration(decl()))))) /// \endcode /// In this matcher, the decl will match the CXXRecordDecl of class X. /// /// Usable as: Matcher<AddrLabelExpr>, Matcher<CallExpr>, /// Matcher<CXXConstructExpr>, Matcher<CXXNewExpr>, Matcher<DeclRefExpr>, /// Matcher<EnumType>, Matcher<InjectedClassNameType>, Matcher<LabelStmt>, /// Matcher<MemberExpr>, Matcher<QualType>, Matcher<RecordType>, /// Matcher<TagType>, Matcher<TemplateSpecializationType>, /// Matcher<TemplateTypeParmType>, Matcher<TypedefType>, /// Matcher<UnresolvedUsingType> inline internal::PolymorphicMatcher< internal::HasDeclarationMatcher, void(internal::HasDeclarationSupportedTypes), internal::Matcher<Decl>> hasDeclaration(const internal::Matcher<Decl> &InnerMatcher) { return internal::PolymorphicMatcher< internal::HasDeclarationMatcher, void(internal::HasDeclarationSupportedTypes), internal::Matcher<Decl>>( InnerMatcher); } /// Matches a \c NamedDecl whose underlying declaration matches the given /// matcher. /// /// Given /// \code /// namespace N { template<class T> void f(T t); } /// template <class T> void g() { using N::f; f(T()); } /// \endcode /// \c unresolvedLookupExpr(hasAnyDeclaration( /// namedDecl(hasUnderlyingDecl(hasName("::N::f"))))) /// matches the use of \c f in \c g() . AST_MATCHER_P(NamedDecl, hasUnderlyingDecl, internal::Matcher<NamedDecl>, InnerMatcher) { const NamedDecl *UnderlyingDecl = Node.getUnderlyingDecl(); return UnderlyingDecl != nullptr && InnerMatcher.matches(*UnderlyingDecl, Finder, Builder); } /// Matches on the implicit object argument of a member call expression, after /// stripping off any parentheses or implicit casts. /// /// Given /// \code /// class Y { public: void m(); }; /// Y g(); /// class X : public Y {}; /// void z(Y y, X x) { y.m(); (g()).m(); x.m(); } /// \endcode /// cxxMemberCallExpr(on(hasType(cxxRecordDecl(hasName("Y"))))) /// matches `y.m()` and `(g()).m()`. /// cxxMemberCallExpr(on(hasType(cxxRecordDecl(hasName("X"))))) /// matches `x.m()`. /// cxxMemberCallExpr(on(callExpr())) /// matches `(g()).m()`. /// /// FIXME: Overload to allow directly matching types? AST_MATCHER_P(CXXMemberCallExpr, on, internal::Matcher<Expr>, InnerMatcher) { const Expr *ExprNode = Node.getImplicitObjectArgument() ->IgnoreParenImpCasts(); return (ExprNode != nullptr && InnerMatcher.matches(*ExprNode, Finder, Builder)); } /// Matches on the receiver of an ObjectiveC Message expression. /// /// Example /// matcher = objCMessageExpr(hasReceiverType(asString("UIWebView *"))); /// matches the [webView ...] message invocation. /// \code /// NSString *webViewJavaScript = ... /// UIWebView *webView = ... /// [webView stringByEvaluatingJavaScriptFromString:webViewJavascript]; /// \endcode AST_MATCHER_P(ObjCMessageExpr, hasReceiverType, internal::Matcher<QualType>, InnerMatcher) { const QualType TypeDecl = Node.getReceiverType(); return InnerMatcher.matches(TypeDecl, Finder, Builder); } /// Returns true when the Objective-C method declaration is a class method. /// /// Example /// matcher = objcMethodDecl(isClassMethod()) /// matches /// \code /// @interface I + (void)foo; @end /// \endcode /// but not /// \code /// @interface I - (void)bar; @end /// \endcode AST_MATCHER(ObjCMethodDecl, isClassMethod) { return Node.isClassMethod(); } /// Returns true when the Objective-C method declaration is an instance method. /// /// Example /// matcher = objcMethodDecl(isInstanceMethod()) /// matches /// \code /// @interface I - (void)bar; @end /// \endcode /// but not /// \code /// @interface I + (void)foo; @end /// \endcode AST_MATCHER(ObjCMethodDecl, isInstanceMethod) { return Node.isInstanceMethod(); } /// Returns true when the Objective-C message is sent to a class. /// /// Example /// matcher = objcMessageExpr(isClassMessage()) /// matches /// \code /// [NSString stringWithFormat:@"format"]; /// \endcode /// but not /// \code /// NSString *x = @"hello"; /// [x containsString:@"h"]; /// \endcode AST_MATCHER(ObjCMessageExpr, isClassMessage) { return Node.isClassMessage(); } /// Returns true when the Objective-C message is sent to an instance. /// /// Example /// matcher = objcMessageExpr(isInstanceMessage()) /// matches /// \code /// NSString *x = @"hello"; /// [x containsString:@"h"]; /// \endcode /// but not /// \code /// [NSString stringWithFormat:@"format"]; /// \endcode AST_MATCHER(ObjCMessageExpr, isInstanceMessage) { return Node.isInstanceMessage(); } /// Matches if the Objective-C message is sent to an instance, /// and the inner matcher matches on that instance. /// /// For example the method call in /// \code /// NSString *x = @"hello"; /// [x containsString:@"h"]; /// \endcode /// is matched by /// objcMessageExpr(hasReceiver(declRefExpr(to(varDecl(hasName("x")))))) AST_MATCHER_P(ObjCMessageExpr, hasReceiver, internal::Matcher<Expr>, InnerMatcher) { const Expr *ReceiverNode = Node.getInstanceReceiver(); return (ReceiverNode != nullptr && InnerMatcher.matches(*ReceiverNode->IgnoreParenImpCasts(), Finder, Builder)); } /// Matches when BaseName == Selector.getAsString() /// /// matcher = objCMessageExpr(hasSelector("loadHTMLString:baseURL:")); /// matches the outer message expr in the code below, but NOT the message /// invocation for self.bodyView. /// \code /// [self.bodyView loadHTMLString:html baseURL:NULL]; /// \endcode AST_MATCHER_P(ObjCMessageExpr, hasSelector, std::string, BaseName) { Selector Sel = Node.getSelector(); return BaseName.compare(Sel.getAsString()) == 0; } /// Matches when at least one of the supplied string equals to the /// Selector.getAsString() /// /// matcher = objCMessageExpr(hasSelector("methodA:", "methodB:")); /// matches both of the expressions below: /// \code /// [myObj methodA:argA]; /// [myObj methodB:argB]; /// \endcode extern const internal::VariadicFunction<internal::Matcher<ObjCMessageExpr>, StringRef, internal::hasAnySelectorFunc> hasAnySelector; /// Matches ObjC selectors whose name contains /// a substring matched by the given RegExp. /// matcher = objCMessageExpr(matchesSelector("loadHTMLString\:baseURL?")); /// matches the outer message expr in the code below, but NOT the message /// invocation for self.bodyView. /// \code /// [self.bodyView loadHTMLString:html baseURL:NULL]; /// \endcode AST_MATCHER_REGEX(ObjCMessageExpr, matchesSelector, RegExp) { std::string SelectorString = Node.getSelector().getAsString(); return RegExp->match(SelectorString); } /// Matches when the selector is the empty selector /// /// Matches only when the selector of the objCMessageExpr is NULL. This may /// represent an error condition in the tree! AST_MATCHER(ObjCMessageExpr, hasNullSelector) { return Node.getSelector().isNull(); } /// Matches when the selector is a Unary Selector /// /// matcher = objCMessageExpr(matchesSelector(hasUnarySelector()); /// matches self.bodyView in the code below, but NOT the outer message /// invocation of "loadHTMLString:baseURL:". /// \code /// [self.bodyView loadHTMLString:html baseURL:NULL]; /// \endcode AST_MATCHER(ObjCMessageExpr, hasUnarySelector) { return Node.getSelector().isUnarySelector(); } /// Matches when the selector is a keyword selector /// /// objCMessageExpr(hasKeywordSelector()) matches the generated setFrame /// message expression in /// /// \code /// UIWebView *webView = ...; /// CGRect bodyFrame = webView.frame; /// bodyFrame.size.height = self.bodyContentHeight; /// webView.frame = bodyFrame; /// // ^---- matches here /// \endcode AST_MATCHER(ObjCMessageExpr, hasKeywordSelector) { return Node.getSelector().isKeywordSelector(); } /// Matches when the selector has the specified number of arguments /// /// matcher = objCMessageExpr(numSelectorArgs(0)); /// matches self.bodyView in the code below /// /// matcher = objCMessageExpr(numSelectorArgs(2)); /// matches the invocation of "loadHTMLString:baseURL:" but not that /// of self.bodyView /// \code /// [self.bodyView loadHTMLString:html baseURL:NULL]; /// \endcode AST_MATCHER_P(ObjCMessageExpr, numSelectorArgs, unsigned, N) { return Node.getSelector().getNumArgs() == N; } /// Matches if the call expression's callee expression matches. /// /// Given /// \code /// class Y { void x() { this->x(); x(); Y y; y.x(); } }; /// void f() { f(); } /// \endcode /// callExpr(callee(expr())) /// matches this->x(), x(), y.x(), f() /// with callee(...) /// matching this->x, x, y.x, f respectively /// /// Note: Callee cannot take the more general internal::Matcher<Expr> /// because this introduces ambiguous overloads with calls to Callee taking a /// internal::Matcher<Decl>, as the matcher hierarchy is purely /// implemented in terms of implicit casts. AST_MATCHER_P(CallExpr, callee, internal::Matcher<Stmt>, InnerMatcher) { const Expr *ExprNode = Node.getCallee(); return (ExprNode != nullptr && InnerMatcher.matches(*ExprNode, Finder, Builder)); } /// Matches if the call expression's callee's declaration matches the /// given matcher. /// /// Example matches y.x() (matcher = callExpr(callee( /// cxxMethodDecl(hasName("x"))))) /// \code /// class Y { public: void x(); }; /// void z() { Y y; y.x(); } /// \endcode AST_MATCHER_P_OVERLOAD(CallExpr, callee, internal::Matcher<Decl>, InnerMatcher, 1) { return callExpr(hasDeclaration(InnerMatcher)).matches(Node, Finder, Builder); } /// Matches if the expression's or declaration's type matches a type /// matcher. /// /// Example matches x (matcher = expr(hasType(cxxRecordDecl(hasName("X"))))) /// and z (matcher = varDecl(hasType(cxxRecordDecl(hasName("X"))))) /// and U (matcher = typedefDecl(hasType(asString("int"))) /// and friend class X (matcher = friendDecl(hasType("X")) /// and public virtual X (matcher = cxxBaseSpecifier(hasType( /// asString("class X"))) /// \code /// class X {}; /// void y(X &x) { x; X z; } /// typedef int U; /// class Y { friend class X; }; /// class Z : public virtual X {}; /// \endcode AST_POLYMORPHIC_MATCHER_P_OVERLOAD( hasType, AST_POLYMORPHIC_SUPPORTED_TYPES(Expr, FriendDecl, TypedefNameDecl, ValueDecl, CXXBaseSpecifier), internal::Matcher<QualType>, InnerMatcher, 0) { QualType QT = internal::getUnderlyingType(Node); if (!QT.isNull()) return InnerMatcher.matches(QT, Finder, Builder); return false; } /// Overloaded to match the declaration of the expression's or value /// declaration's type. /// /// In case of a value declaration (for example a variable declaration), /// this resolves one layer of indirection. For example, in the value /// declaration "X x;", cxxRecordDecl(hasName("X")) matches the declaration of /// X, while varDecl(hasType(cxxRecordDecl(hasName("X")))) matches the /// declaration of x. /// /// Example matches x (matcher = expr(hasType(cxxRecordDecl(hasName("X"))))) /// and z (matcher = varDecl(hasType(cxxRecordDecl(hasName("X"))))) /// and friend class X (matcher = friendDecl(hasType("X")) /// and public virtual X (matcher = cxxBaseSpecifier(hasType( /// cxxRecordDecl(hasName("X")))) /// \code /// class X {}; /// void y(X &x) { x; X z; } /// class Y { friend class X; }; /// class Z : public virtual X {}; /// \endcode /// /// Example matches class Derived /// (matcher = cxxRecordDecl(hasAnyBase(hasType(cxxRecordDecl(hasName("Base")))))) /// \code /// class Base {}; /// class Derived : Base {}; /// \endcode /// /// Usable as: Matcher<Expr>, Matcher<FriendDecl>, Matcher<ValueDecl>, /// Matcher<CXXBaseSpecifier> AST_POLYMORPHIC_MATCHER_P_OVERLOAD( hasType, AST_POLYMORPHIC_SUPPORTED_TYPES(Expr, FriendDecl, ValueDecl, CXXBaseSpecifier), internal::Matcher<Decl>, InnerMatcher, 1) { QualType QT = internal::getUnderlyingType(Node); if (!QT.isNull()) return qualType(hasDeclaration(InnerMatcher)).matches(QT, Finder, Builder); return false; } /// Matches if the type location of a node matches the inner matcher. /// /// Examples: /// \code /// int x; /// \endcode /// declaratorDecl(hasTypeLoc(loc(asString("int")))) /// matches int x /// /// \code /// auto x = int(3); /// \code /// cxxTemporaryObjectExpr(hasTypeLoc(loc(asString("int")))) /// matches int(3) /// /// \code /// struct Foo { Foo(int, int); }; /// auto x = Foo(1, 2); /// \code /// cxxFunctionalCastExpr(hasTypeLoc(loc(asString("struct Foo")))) /// matches Foo(1, 2) /// /// Usable as: Matcher<BlockDecl>, Matcher<CXXBaseSpecifier>, /// Matcher<CXXCtorInitializer>, Matcher<CXXFunctionalCastExpr>, /// Matcher<CXXNewExpr>, Matcher<CXXTemporaryObjectExpr>, /// Matcher<CXXUnresolvedConstructExpr>, /// Matcher<ClassTemplateSpecializationDecl>, Matcher<CompoundLiteralExpr>, /// Matcher<DeclaratorDecl>, Matcher<ExplicitCastExpr>, /// Matcher<ObjCPropertyDecl>, Matcher<TemplateArgumentLoc>, /// Matcher<TypedefNameDecl> AST_POLYMORPHIC_MATCHER_P( hasTypeLoc, AST_POLYMORPHIC_SUPPORTED_TYPES( BlockDecl, CXXBaseSpecifier, CXXCtorInitializer, CXXFunctionalCastExpr, CXXNewExpr, CXXTemporaryObjectExpr, CXXUnresolvedConstructExpr, ClassTemplateSpecializationDecl, CompoundLiteralExpr, DeclaratorDecl, ExplicitCastExpr, ObjCPropertyDecl, TemplateArgumentLoc, TypedefNameDecl), internal::Matcher<TypeLoc>, Inner) { TypeSourceInfo *source = internal::GetTypeSourceInfo(Node); if (source == nullptr) { // This happens for example for implicit destructors. return false; } return Inner.matches(source->getTypeLoc(), Finder, Builder); } /// Matches if the matched type is represented by the given string. /// /// Given /// \code /// class Y { public: void x(); }; /// void z() { Y* y; y->x(); } /// \endcode /// cxxMemberCallExpr(on(hasType(asString("class Y *")))) /// matches y->x() AST_MATCHER_P(QualType, asString, std::string, Name) { return Name == Node.getAsString(); } /// Matches if the matched type is a pointer type and the pointee type /// matches the specified matcher. /// /// Example matches y->x() /// (matcher = cxxMemberCallExpr(on(hasType(pointsTo /// cxxRecordDecl(hasName("Y"))))))) /// \code /// class Y { public: void x(); }; /// void z() { Y *y; y->x(); } /// \endcode AST_MATCHER_P( QualType, pointsTo, internal::Matcher<QualType>, InnerMatcher) { return (!Node.isNull() && Node->isAnyPointerType() && InnerMatcher.matches(Node->getPointeeType(), Finder, Builder)); } /// Overloaded to match the pointee type's declaration. AST_MATCHER_P_OVERLOAD(QualType, pointsTo, internal::Matcher<Decl>, InnerMatcher, 1) { return pointsTo(qualType(hasDeclaration(InnerMatcher))) .matches(Node, Finder, Builder); } /// Matches if the matched type matches the unqualified desugared /// type of the matched node. /// /// For example, in: /// \code /// class A {}; /// using B = A; /// \endcode /// The matcher type(hasUnqualifiedDesugaredType(recordType())) matches /// both B and A. AST_MATCHER_P(Type, hasUnqualifiedDesugaredType, internal::Matcher<Type>, InnerMatcher) { return InnerMatcher.matches(*Node.getUnqualifiedDesugaredType(), Finder, Builder); } /// Matches if the matched type is a reference type and the referenced /// type matches the specified matcher. /// /// Example matches X &x and const X &y /// (matcher = varDecl(hasType(references(cxxRecordDecl(hasName("X")))))) /// \code /// class X { /// void a(X b) { /// X &x = b; /// const X &y = b; /// } /// }; /// \endcode AST_MATCHER_P(QualType, references, internal::Matcher<QualType>, InnerMatcher) { return (!Node.isNull() && Node->isReferenceType() && InnerMatcher.matches(Node->getPointeeType(), Finder, Builder)); } /// Matches QualTypes whose canonical type matches InnerMatcher. /// /// Given: /// \code /// typedef int &int_ref; /// int a; /// int_ref b = a; /// \endcode /// /// \c varDecl(hasType(qualType(referenceType()))))) will not match the /// declaration of b but \c /// varDecl(hasType(qualType(hasCanonicalType(referenceType())))))) does. AST_MATCHER_P(QualType, hasCanonicalType, internal::Matcher<QualType>, InnerMatcher) { if (Node.isNull()) return false; return InnerMatcher.matches(Node.getCanonicalType(), Finder, Builder); } /// Overloaded to match the referenced type's declaration. AST_MATCHER_P_OVERLOAD(QualType, references, internal::Matcher<Decl>, InnerMatcher, 1) { return references(qualType(hasDeclaration(InnerMatcher))) .matches(Node, Finder, Builder); } /// Matches on the implicit object argument of a member call expression. Unlike /// `on`, matches the argument directly without stripping away anything. /// /// Given /// \code /// class Y { public: void m(); }; /// Y g(); /// class X : public Y { void g(); }; /// void z(Y y, X x) { y.m(); x.m(); x.g(); (g()).m(); } /// \endcode /// cxxMemberCallExpr(onImplicitObjectArgument(hasType( /// cxxRecordDecl(hasName("Y"))))) /// matches `y.m()`, `x.m()` and (g()).m(), but not `x.g()`. /// cxxMemberCallExpr(on(callExpr())) /// does not match `(g()).m()`, because the parens are not ignored. /// /// FIXME: Overload to allow directly matching types? AST_MATCHER_P(CXXMemberCallExpr, onImplicitObjectArgument, internal::Matcher<Expr>, InnerMatcher) { const Expr *ExprNode = Node.getImplicitObjectArgument(); return (ExprNode != nullptr && InnerMatcher.matches(*ExprNode, Finder, Builder)); } /// Matches if the type of the expression's implicit object argument either /// matches the InnerMatcher, or is a pointer to a type that matches the /// InnerMatcher. /// /// Given /// \code /// class Y { public: void m(); }; /// class X : public Y { void g(); }; /// void z() { Y y; y.m(); Y *p; p->m(); X x; x.m(); x.g(); } /// \endcode /// cxxMemberCallExpr(thisPointerType(hasDeclaration( /// cxxRecordDecl(hasName("Y"))))) /// matches `y.m()`, `p->m()` and `x.m()`. /// cxxMemberCallExpr(thisPointerType(hasDeclaration( /// cxxRecordDecl(hasName("X"))))) /// matches `x.g()`. AST_MATCHER_P_OVERLOAD(CXXMemberCallExpr, thisPointerType, internal::Matcher<QualType>, InnerMatcher, 0) { return onImplicitObjectArgument( anyOf(hasType(InnerMatcher), hasType(pointsTo(InnerMatcher)))) .matches(Node, Finder, Builder); } /// Overloaded to match the type's declaration. AST_MATCHER_P_OVERLOAD(CXXMemberCallExpr, thisPointerType, internal::Matcher<Decl>, InnerMatcher, 1) { return onImplicitObjectArgument( anyOf(hasType(InnerMatcher), hasType(pointsTo(InnerMatcher)))) .matches(Node, Finder, Builder); } /// Matches a DeclRefExpr that refers to a declaration that matches the /// specified matcher. /// /// Example matches x in if(x) /// (matcher = declRefExpr(to(varDecl(hasName("x"))))) /// \code /// bool x; /// if (x) {} /// \endcode AST_MATCHER_P(DeclRefExpr, to, internal::Matcher<Decl>, InnerMatcher) { const Decl *DeclNode = Node.getDecl(); return (DeclNode != nullptr && InnerMatcher.matches(*DeclNode, Finder, Builder)); } /// Matches a \c DeclRefExpr that refers to a declaration through a /// specific using shadow declaration. /// /// Given /// \code /// namespace a { void f() {} } /// using a::f; /// void g() { /// f(); // Matches this .. /// a::f(); // .. but not this. /// } /// \endcode /// declRefExpr(throughUsingDecl(anything())) /// matches \c f() AST_MATCHER_P(DeclRefExpr, throughUsingDecl, internal::Matcher<UsingShadowDecl>, InnerMatcher) { const NamedDecl *FoundDecl = Node.getFoundDecl(); if (const UsingShadowDecl *UsingDecl = dyn_cast<UsingShadowDecl>(FoundDecl)) return InnerMatcher.matches(*UsingDecl, Finder, Builder); return false; } /// Matches an \c OverloadExpr if any of the declarations in the set of /// overloads matches the given matcher. /// /// Given /// \code /// template <typename T> void foo(T); /// template <typename T> void bar(T); /// template <typename T> void baz(T t) { /// foo(t); /// bar(t); /// } /// \endcode /// unresolvedLookupExpr(hasAnyDeclaration( /// functionTemplateDecl(hasName("foo")))) /// matches \c foo in \c foo(t); but not \c bar in \c bar(t); AST_MATCHER_P(OverloadExpr, hasAnyDeclaration, internal::Matcher<Decl>, InnerMatcher) { return matchesFirstInPointerRange(InnerMatcher, Node.decls_begin(), Node.decls_end(), Finder, Builder) != Node.decls_end(); } /// Matches the Decl of a DeclStmt which has a single declaration. /// /// Given /// \code /// int a, b; /// int c; /// \endcode /// declStmt(hasSingleDecl(anything())) /// matches 'int c;' but not 'int a, b;'. AST_MATCHER_P(DeclStmt, hasSingleDecl, internal::Matcher<Decl>, InnerMatcher) { if (Node.isSingleDecl()) { const Decl *FoundDecl = Node.getSingleDecl(); return InnerMatcher.matches(*FoundDecl, Finder, Builder); } return false; } /// Matches a variable declaration that has an initializer expression /// that matches the given matcher. /// /// Example matches x (matcher = varDecl(hasInitializer(callExpr()))) /// \code /// bool y() { return true; } /// bool x = y(); /// \endcode AST_MATCHER_P( VarDecl, hasInitializer, internal::Matcher<Expr>, InnerMatcher) { const Expr *Initializer = Node.getAnyInitializer(); return (Initializer != nullptr && InnerMatcher.matches(*Initializer, Finder, Builder)); } /// \brief Matches a static variable with local scope. /// /// Example matches y (matcher = varDecl(isStaticLocal())) /// \code /// void f() { /// int x; /// static int y; /// } /// static int z; /// \endcode AST_MATCHER(VarDecl, isStaticLocal) { return Node.isStaticLocal(); } /// Matches a variable declaration that has function scope and is a /// non-static local variable. /// /// Example matches x (matcher = varDecl(hasLocalStorage()) /// \code /// void f() { /// int x; /// static int y; /// } /// int z; /// \endcode AST_MATCHER(VarDecl, hasLocalStorage) { return Node.hasLocalStorage(); } /// Matches a variable declaration that does not have local storage. /// /// Example matches y and z (matcher = varDecl(hasGlobalStorage()) /// \code /// void f() { /// int x; /// static int y; /// } /// int z; /// \endcode AST_MATCHER(VarDecl, hasGlobalStorage) { return Node.hasGlobalStorage(); } /// Matches a variable declaration that has automatic storage duration. /// /// Example matches x, but not y, z, or a. /// (matcher = varDecl(hasAutomaticStorageDuration()) /// \code /// void f() { /// int x; /// static int y; /// thread_local int z; /// } /// int a; /// \endcode AST_MATCHER(VarDecl, hasAutomaticStorageDuration) { return Node.getStorageDuration() == SD_Automatic; } /// Matches a variable declaration that has static storage duration. /// It includes the variable declared at namespace scope and those declared /// with "static" and "extern" storage class specifiers. /// /// \code /// void f() { /// int x; /// static int y; /// thread_local int z; /// } /// int a; /// static int b; /// extern int c; /// varDecl(hasStaticStorageDuration()) /// matches the function declaration y, a, b and c. /// \endcode AST_MATCHER(VarDecl, hasStaticStorageDuration) { return Node.getStorageDuration() == SD_Static; } /// Matches a variable declaration that has thread storage duration. /// /// Example matches z, but not x, z, or a. /// (matcher = varDecl(hasThreadStorageDuration()) /// \code /// void f() { /// int x; /// static int y; /// thread_local int z; /// } /// int a; /// \endcode AST_MATCHER(VarDecl, hasThreadStorageDuration) { return Node.getStorageDuration() == SD_Thread; } /// Matches a variable declaration that is an exception variable from /// a C++ catch block, or an Objective-C \@catch statement. /// /// Example matches x (matcher = varDecl(isExceptionVariable()) /// \code /// void f(int y) { /// try { /// } catch (int x) { /// } /// } /// \endcode AST_MATCHER(VarDecl, isExceptionVariable) { return Node.isExceptionVariable(); } /// Checks that a call expression or a constructor call expression has /// a specific number of arguments (including absent default arguments). /// /// Example matches f(0, 0) (matcher = callExpr(argumentCountIs(2))) /// \code /// void f(int x, int y); /// f(0, 0); /// \endcode AST_POLYMORPHIC_MATCHER_P(argumentCountIs, AST_POLYMORPHIC_SUPPORTED_TYPES( CallExpr, CXXConstructExpr, CXXUnresolvedConstructExpr, ObjCMessageExpr), unsigned, N) { unsigned NumArgs = Node.getNumArgs(); if (!Finder->isTraversalIgnoringImplicitNodes()) return NumArgs == N; while (NumArgs) { if (!isa<CXXDefaultArgExpr>(Node.getArg(NumArgs - 1))) break; --NumArgs; } return NumArgs == N; } /// Matches the n'th argument of a call expression or a constructor /// call expression. /// /// Example matches y in x(y) /// (matcher = callExpr(hasArgument(0, declRefExpr()))) /// \code /// void x(int) { int y; x(y); } /// \endcode AST_POLYMORPHIC_MATCHER_P2(hasArgument, AST_POLYMORPHIC_SUPPORTED_TYPES( CallExpr, CXXConstructExpr, CXXUnresolvedConstructExpr, ObjCMessageExpr), unsigned, N, internal::Matcher<Expr>, InnerMatcher) { if (N >= Node.getNumArgs()) return false; const Expr *Arg = Node.getArg(N); if (Finder->isTraversalIgnoringImplicitNodes() && isa<CXXDefaultArgExpr>(Arg)) return false; return InnerMatcher.matches(*Arg->IgnoreParenImpCasts(), Finder, Builder); } /// Matches the n'th item of an initializer list expression. /// /// Example matches y. /// (matcher = initListExpr(hasInit(0, expr()))) /// \code /// int x{y}. /// \endcode AST_MATCHER_P2(InitListExpr, hasInit, unsigned, N, ast_matchers::internal::Matcher<Expr>, InnerMatcher) { return N < Node.getNumInits() && InnerMatcher.matches(*Node.getInit(N), Finder, Builder); } /// Matches declaration statements that contain a specific number of /// declarations. /// /// Example: Given /// \code /// int a, b; /// int c; /// int d = 2, e; /// \endcode /// declCountIs(2) /// matches 'int a, b;' and 'int d = 2, e;', but not 'int c;'. AST_MATCHER_P(DeclStmt, declCountIs, unsigned, N) { return std::distance(Node.decl_begin(), Node.decl_end()) == (ptrdiff_t)N; } /// Matches the n'th declaration of a declaration statement. /// /// Note that this does not work for global declarations because the AST /// breaks up multiple-declaration DeclStmt's into multiple single-declaration /// DeclStmt's. /// Example: Given non-global declarations /// \code /// int a, b = 0; /// int c; /// int d = 2, e; /// \endcode /// declStmt(containsDeclaration( /// 0, varDecl(hasInitializer(anything())))) /// matches only 'int d = 2, e;', and /// declStmt(containsDeclaration(1, varDecl())) /// \code /// matches 'int a, b = 0' as well as 'int d = 2, e;' /// but 'int c;' is not matched. /// \endcode AST_MATCHER_P2(DeclStmt, containsDeclaration, unsigned, N, internal::Matcher<Decl>, InnerMatcher) { const unsigned NumDecls = std::distance(Node.decl_begin(), Node.decl_end()); if (N >= NumDecls) return false; DeclStmt::const_decl_iterator Iterator = Node.decl_begin(); std::advance(Iterator, N); return InnerMatcher.matches(**Iterator, Finder, Builder); } /// Matches a C++ catch statement that has a catch-all handler. /// /// Given /// \code /// try { /// // ... /// } catch (int) { /// // ... /// } catch (...) { /// // ... /// } /// \endcode /// cxxCatchStmt(isCatchAll()) matches catch(...) but not catch(int). AST_MATCHER(CXXCatchStmt, isCatchAll) { return Node.getExceptionDecl() == nullptr; } /// Matches a constructor initializer. /// /// Given /// \code /// struct Foo { /// Foo() : foo_(1) { } /// int foo_; /// }; /// \endcode /// cxxRecordDecl(has(cxxConstructorDecl( /// hasAnyConstructorInitializer(anything()) /// ))) /// record matches Foo, hasAnyConstructorInitializer matches foo_(1) AST_MATCHER_P(CXXConstructorDecl, hasAnyConstructorInitializer, internal::Matcher<CXXCtorInitializer>, InnerMatcher) { auto MatchIt = matchesFirstInPointerRange(InnerMatcher, Node.init_begin(), Node.init_end(), Finder, Builder); if (MatchIt == Node.init_end()) return false; return (*MatchIt)->isWritten() || !Finder->isTraversalIgnoringImplicitNodes(); } /// Matches the field declaration of a constructor initializer. /// /// Given /// \code /// struct Foo { /// Foo() : foo_(1) { } /// int foo_; /// }; /// \endcode /// cxxRecordDecl(has(cxxConstructorDecl(hasAnyConstructorInitializer( /// forField(hasName("foo_")))))) /// matches Foo /// with forField matching foo_ AST_MATCHER_P(CXXCtorInitializer, forField, internal::Matcher<FieldDecl>, InnerMatcher) { const FieldDecl *NodeAsDecl = Node.getAnyMember(); return (NodeAsDecl != nullptr && InnerMatcher.matches(*NodeAsDecl, Finder, Builder)); } /// Matches the initializer expression of a constructor initializer. /// /// Given /// \code /// struct Foo { /// Foo() : foo_(1) { } /// int foo_; /// }; /// \endcode /// cxxRecordDecl(has(cxxConstructorDecl(hasAnyConstructorInitializer( /// withInitializer(integerLiteral(equals(1))))))) /// matches Foo /// with withInitializer matching (1) AST_MATCHER_P(CXXCtorInitializer, withInitializer, internal::Matcher<Expr>, InnerMatcher) { const Expr* NodeAsExpr = Node.getInit(); return (NodeAsExpr != nullptr && InnerMatcher.matches(*NodeAsExpr, Finder, Builder)); } /// Matches a constructor initializer if it is explicitly written in /// code (as opposed to implicitly added by the compiler). /// /// Given /// \code /// struct Foo { /// Foo() { } /// Foo(int) : foo_("A") { } /// string foo_; /// }; /// \endcode /// cxxConstructorDecl(hasAnyConstructorInitializer(isWritten())) /// will match Foo(int), but not Foo() AST_MATCHER(CXXCtorInitializer, isWritten) { return Node.isWritten(); } /// Matches a constructor initializer if it is initializing a base, as /// opposed to a member. /// /// Given /// \code /// struct B {}; /// struct D : B { /// int I; /// D(int i) : I(i) {} /// }; /// struct E : B { /// E() : B() {} /// }; /// \endcode /// cxxConstructorDecl(hasAnyConstructorInitializer(isBaseInitializer())) /// will match E(), but not match D(int). AST_MATCHER(CXXCtorInitializer, isBaseInitializer) { return Node.isBaseInitializer(); } /// Matches a constructor initializer if it is initializing a member, as /// opposed to a base. /// /// Given /// \code /// struct B {}; /// struct D : B { /// int I; /// D(int i) : I(i) {} /// }; /// struct E : B { /// E() : B() {} /// }; /// \endcode /// cxxConstructorDecl(hasAnyConstructorInitializer(isMemberInitializer())) /// will match D(int), but not match E(). AST_MATCHER(CXXCtorInitializer, isMemberInitializer) { return Node.isMemberInitializer(); } /// Matches any argument of a call expression or a constructor call /// expression, or an ObjC-message-send expression. /// /// Given /// \code /// void x(int, int, int) { int y; x(1, y, 42); } /// \endcode /// callExpr(hasAnyArgument(declRefExpr())) /// matches x(1, y, 42) /// with hasAnyArgument(...) /// matching y /// /// For ObjectiveC, given /// \code /// @interface I - (void) f:(int) y; @end /// void foo(I *i) { [i f:12]; } /// \endcode /// objcMessageExpr(hasAnyArgument(integerLiteral(equals(12)))) /// matches [i f:12] AST_POLYMORPHIC_MATCHER_P(hasAnyArgument, AST_POLYMORPHIC_SUPPORTED_TYPES( CallExpr, CXXConstructExpr, CXXUnresolvedConstructExpr, ObjCMessageExpr), internal::Matcher<Expr>, InnerMatcher) { for (const Expr *Arg : Node.arguments()) { if (Finder->isTraversalIgnoringImplicitNodes() && isa<CXXDefaultArgExpr>(Arg)) break; BoundNodesTreeBuilder Result(*Builder); if (InnerMatcher.matches(*Arg, Finder, &Result)) { *Builder = std::move(Result); return true; } } return false; } /// Matches any capture of a lambda expression. /// /// Given /// \code /// void foo() { /// int x; /// auto f = [x](){}; /// } /// \endcode /// lambdaExpr(hasAnyCapture(anything())) /// matches [x](){}; AST_MATCHER_P_OVERLOAD(LambdaExpr, hasAnyCapture, internal::Matcher<VarDecl>, InnerMatcher, 0) { for (const LambdaCapture &Capture : Node.captures()) { if (Capture.capturesVariable()) { BoundNodesTreeBuilder Result(*Builder); if (InnerMatcher.matches(*Capture.getCapturedVar(), Finder, &Result)) { *Builder = std::move(Result); return true; } } } return false; } /// Matches any capture of 'this' in a lambda expression. /// /// Given /// \code /// struct foo { /// void bar() { /// auto f = [this](){}; /// } /// } /// \endcode /// lambdaExpr(hasAnyCapture(cxxThisExpr())) /// matches [this](){}; AST_MATCHER_P_OVERLOAD(LambdaExpr, hasAnyCapture, internal::Matcher<CXXThisExpr>, InnerMatcher, 1) { return llvm::any_of(Node.captures(), [](const LambdaCapture &LC) { return LC.capturesThis(); }); } /// Matches a constructor call expression which uses list initialization. AST_MATCHER(CXXConstructExpr, isListInitialization) { return Node.isListInitialization(); } /// Matches a constructor call expression which requires /// zero initialization. /// /// Given /// \code /// void foo() { /// struct point { double x; double y; }; /// point pt[2] = { { 1.0, 2.0 } }; /// } /// \endcode /// initListExpr(has(cxxConstructExpr(requiresZeroInitialization())) /// will match the implicit array filler for pt[1]. AST_MATCHER(CXXConstructExpr, requiresZeroInitialization) { return Node.requiresZeroInitialization(); } /// Matches the n'th parameter of a function or an ObjC method /// declaration or a block. /// /// Given /// \code /// class X { void f(int x) {} }; /// \endcode /// cxxMethodDecl(hasParameter(0, hasType(varDecl()))) /// matches f(int x) {} /// with hasParameter(...) /// matching int x /// /// For ObjectiveC, given /// \code /// @interface I - (void) f:(int) y; @end /// \endcode // /// the matcher objcMethodDecl(hasParameter(0, hasName("y"))) /// matches the declaration of method f with hasParameter /// matching y. AST_POLYMORPHIC_MATCHER_P2(hasParameter, AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl, ObjCMethodDecl, BlockDecl), unsigned, N, internal::Matcher<ParmVarDecl>, InnerMatcher) { return (N < Node.parameters().size() && InnerMatcher.matches(*Node.parameters()[N], Finder, Builder)); } /// Matches all arguments and their respective ParmVarDecl. /// /// Given /// \code /// void f(int i); /// int y; /// f(y); /// \endcode /// callExpr( /// forEachArgumentWithParam( /// declRefExpr(to(varDecl(hasName("y")))), /// parmVarDecl(hasType(isInteger())) /// )) /// matches f(y); /// with declRefExpr(...) /// matching int y /// and parmVarDecl(...) /// matching int i AST_POLYMORPHIC_MATCHER_P2(forEachArgumentWithParam, AST_POLYMORPHIC_SUPPORTED_TYPES(CallExpr, CXXConstructExpr), internal::Matcher<Expr>, ArgMatcher, internal::Matcher<ParmVarDecl>, ParamMatcher) { BoundNodesTreeBuilder Result; // The first argument of an overloaded member operator is the implicit object // argument of the method which should not be matched against a parameter, so // we skip over it here. BoundNodesTreeBuilder Matches; unsigned ArgIndex = cxxOperatorCallExpr(callee(cxxMethodDecl())) .matches(Node, Finder, &Matches) ? 1 : 0; int ParamIndex = 0; bool Matched = false; for (; ArgIndex < Node.getNumArgs(); ++ArgIndex) { BoundNodesTreeBuilder ArgMatches(*Builder); if (ArgMatcher.matches(*(Node.getArg(ArgIndex)->IgnoreParenCasts()), Finder, &ArgMatches)) { BoundNodesTreeBuilder ParamMatches(ArgMatches); if (expr(anyOf(cxxConstructExpr(hasDeclaration(cxxConstructorDecl( hasParameter(ParamIndex, ParamMatcher)))), callExpr(callee(functionDecl( hasParameter(ParamIndex, ParamMatcher)))))) .matches(Node, Finder, &ParamMatches)) { Result.addMatch(ParamMatches); Matched = true; } } ++ParamIndex; } *Builder = std::move(Result); return Matched; } /// Matches all arguments and their respective types for a \c CallExpr or /// \c CXXConstructExpr. It is very similar to \c forEachArgumentWithParam but /// it works on calls through function pointers as well. /// /// The difference is, that function pointers do not provide access to a /// \c ParmVarDecl, but only the \c QualType for each argument. /// /// Given /// \code /// void f(int i); /// int y; /// f(y); /// void (*f_ptr)(int) = f; /// f_ptr(y); /// \endcode /// callExpr( /// forEachArgumentWithParamType( /// declRefExpr(to(varDecl(hasName("y")))), /// qualType(isInteger()).bind("type) /// )) /// matches f(y) and f_ptr(y) /// with declRefExpr(...) /// matching int y /// and qualType(...) /// matching int AST_POLYMORPHIC_MATCHER_P2(forEachArgumentWithParamType, AST_POLYMORPHIC_SUPPORTED_TYPES(CallExpr, CXXConstructExpr), internal::Matcher<Expr>, ArgMatcher, internal::Matcher<QualType>, ParamMatcher) { BoundNodesTreeBuilder Result; // The first argument of an overloaded member operator is the implicit object // argument of the method which should not be matched against a parameter, so // we skip over it here. BoundNodesTreeBuilder Matches; unsigned ArgIndex = cxxOperatorCallExpr(callee(cxxMethodDecl())) .matches(Node, Finder, &Matches) ? 1 : 0; const FunctionProtoType *FProto = nullptr; if (const auto *Call = dyn_cast<CallExpr>(&Node)) { if (const auto *Value = dyn_cast_or_null<ValueDecl>(Call->getCalleeDecl())) { QualType QT = Value->getType().getCanonicalType(); // This does not necessarily lead to a `FunctionProtoType`, // e.g. K&R functions do not have a function prototype. if (QT->isFunctionPointerType()) FProto = QT->getPointeeType()->getAs<FunctionProtoType>(); if (QT->isMemberFunctionPointerType()) { const auto *MP = QT->getAs<MemberPointerType>(); assert(MP && "Must be member-pointer if its a memberfunctionpointer"); FProto = MP->getPointeeType()->getAs<FunctionProtoType>(); assert(FProto && "The call must have happened through a member function " "pointer"); } } } int ParamIndex = 0; bool Matched = false; unsigned NumArgs = Node.getNumArgs(); if (FProto && FProto->isVariadic()) NumArgs = std::min(NumArgs, FProto->getNumParams()); for (; ArgIndex < NumArgs; ++ArgIndex, ++ParamIndex) { BoundNodesTreeBuilder ArgMatches(*Builder); if (ArgMatcher.matches(*(Node.getArg(ArgIndex)->IgnoreParenCasts()), Finder, &ArgMatches)) { BoundNodesTreeBuilder ParamMatches(ArgMatches); // This test is cheaper compared to the big matcher in the next if. // Therefore, please keep this order. if (FProto) { QualType ParamType = FProto->getParamType(ParamIndex); if (ParamMatcher.matches(ParamType, Finder, &ParamMatches)) { Result.addMatch(ParamMatches); Matched = true; continue; } } if (expr(anyOf(cxxConstructExpr(hasDeclaration(cxxConstructorDecl( hasParameter(ParamIndex, hasType(ParamMatcher))))), callExpr(callee(functionDecl( hasParameter(ParamIndex, hasType(ParamMatcher))))))) .matches(Node, Finder, &ParamMatches)) { Result.addMatch(ParamMatches); Matched = true; continue; } } } *Builder = std::move(Result); return Matched; } /// Matches the ParmVarDecl nodes that are at the N'th position in the parameter /// list. The parameter list could be that of either a block, function, or /// objc-method. /// /// /// Given /// /// \code /// void f(int a, int b, int c) { /// } /// \endcode /// /// ``parmVarDecl(isAtPosition(0))`` matches ``int a``. /// /// ``parmVarDecl(isAtPosition(1))`` matches ``int b``. AST_MATCHER_P(ParmVarDecl, isAtPosition, unsigned, N) { const clang::DeclContext *Context = Node.getParentFunctionOrMethod(); if (const auto *Decl = dyn_cast_or_null<FunctionDecl>(Context)) return N < Decl->param_size() && Decl->getParamDecl(N) == &Node; if (const auto *Decl = dyn_cast_or_null<BlockDecl>(Context)) return N < Decl->param_size() && Decl->getParamDecl(N) == &Node; if (const auto *Decl = dyn_cast_or_null<ObjCMethodDecl>(Context)) return N < Decl->param_size() && Decl->getParamDecl(N) == &Node; return false; } /// Matches any parameter of a function or an ObjC method declaration or a /// block. /// /// Does not match the 'this' parameter of a method. /// /// Given /// \code /// class X { void f(int x, int y, int z) {} }; /// \endcode /// cxxMethodDecl(hasAnyParameter(hasName("y"))) /// matches f(int x, int y, int z) {} /// with hasAnyParameter(...) /// matching int y /// /// For ObjectiveC, given /// \code /// @interface I - (void) f:(int) y; @end /// \endcode // /// the matcher objcMethodDecl(hasAnyParameter(hasName("y"))) /// matches the declaration of method f with hasParameter /// matching y. /// /// For blocks, given /// \code /// b = ^(int y) { printf("%d", y) }; /// \endcode /// /// the matcher blockDecl(hasAnyParameter(hasName("y"))) /// matches the declaration of the block b with hasParameter /// matching y. AST_POLYMORPHIC_MATCHER_P(hasAnyParameter, AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl, ObjCMethodDecl, BlockDecl), internal::Matcher<ParmVarDecl>, InnerMatcher) { return matchesFirstInPointerRange(InnerMatcher, Node.param_begin(), Node.param_end(), Finder, Builder) != Node.param_end(); } /// Matches \c FunctionDecls and \c FunctionProtoTypes that have a /// specific parameter count. /// /// Given /// \code /// void f(int i) {} /// void g(int i, int j) {} /// void h(int i, int j); /// void j(int i); /// void k(int x, int y, int z, ...); /// \endcode /// functionDecl(parameterCountIs(2)) /// matches \c g and \c h /// functionProtoType(parameterCountIs(2)) /// matches \c g and \c h /// functionProtoType(parameterCountIs(3)) /// matches \c k AST_POLYMORPHIC_MATCHER_P(parameterCountIs, AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl, FunctionProtoType), unsigned, N) { return Node.getNumParams() == N; } /// Matches \c FunctionDecls that have a noreturn attribute. /// /// Given /// \code /// void nope(); /// [[noreturn]] void a(); /// __attribute__((noreturn)) void b(); /// struct c { [[noreturn]] c(); }; /// \endcode /// functionDecl(isNoReturn()) /// matches all of those except /// \code /// void nope(); /// \endcode AST_MATCHER(FunctionDecl, isNoReturn) { return Node.isNoReturn(); } /// Matches the return type of a function declaration. /// /// Given: /// \code /// class X { int f() { return 1; } }; /// \endcode /// cxxMethodDecl(returns(asString("int"))) /// matches int f() { return 1; } AST_MATCHER_P(FunctionDecl, returns, internal::Matcher<QualType>, InnerMatcher) { return InnerMatcher.matches(Node.getReturnType(), Finder, Builder); } /// Matches extern "C" function or variable declarations. /// /// Given: /// \code /// extern "C" void f() {} /// extern "C" { void g() {} } /// void h() {} /// extern "C" int x = 1; /// extern "C" int y = 2; /// int z = 3; /// \endcode /// functionDecl(isExternC()) /// matches the declaration of f and g, but not the declaration of h. /// varDecl(isExternC()) /// matches the declaration of x and y, but not the declaration of z. AST_POLYMORPHIC_MATCHER(isExternC, AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl, VarDecl)) { return Node.isExternC(); } /// Matches variable/function declarations that have "static" storage /// class specifier ("static" keyword) written in the source. /// /// Given: /// \code /// static void f() {} /// static int i = 0; /// extern int j; /// int k; /// \endcode /// functionDecl(isStaticStorageClass()) /// matches the function declaration f. /// varDecl(isStaticStorageClass()) /// matches the variable declaration i. AST_POLYMORPHIC_MATCHER(isStaticStorageClass, AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl, VarDecl)) { return Node.getStorageClass() == SC_Static; } /// Matches deleted function declarations. /// /// Given: /// \code /// void Func(); /// void DeletedFunc() = delete; /// \endcode /// functionDecl(isDeleted()) /// matches the declaration of DeletedFunc, but not Func. AST_MATCHER(FunctionDecl, isDeleted) { return Node.isDeleted(); } /// Matches defaulted function declarations. /// /// Given: /// \code /// class A { ~A(); }; /// class B { ~B() = default; }; /// \endcode /// functionDecl(isDefaulted()) /// matches the declaration of ~B, but not ~A. AST_MATCHER(FunctionDecl, isDefaulted) { return Node.isDefaulted(); } /// Matches weak function declarations. /// /// Given: /// \code /// void foo() __attribute__((__weakref__("__foo"))); /// void bar(); /// \endcode /// functionDecl(isWeak()) /// matches the weak declaration "foo", but not "bar". AST_MATCHER(FunctionDecl, isWeak) { return Node.isWeak(); } /// Matches functions that have a dynamic exception specification. /// /// Given: /// \code /// void f(); /// void g() noexcept; /// void h() noexcept(true); /// void i() noexcept(false); /// void j() throw(); /// void k() throw(int); /// void l() throw(...); /// \endcode /// functionDecl(hasDynamicExceptionSpec()) and /// functionProtoType(hasDynamicExceptionSpec()) /// match the declarations of j, k, and l, but not f, g, h, or i. AST_POLYMORPHIC_MATCHER(hasDynamicExceptionSpec, AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl, FunctionProtoType)) { if (const FunctionProtoType *FnTy = internal::getFunctionProtoType(Node)) return FnTy->hasDynamicExceptionSpec(); return false; } /// Matches functions that have a non-throwing exception specification. /// /// Given: /// \code /// void f(); /// void g() noexcept; /// void h() throw(); /// void i() throw(int); /// void j() noexcept(false); /// \endcode /// functionDecl(isNoThrow()) and functionProtoType(isNoThrow()) /// match the declarations of g, and h, but not f, i or j. AST_POLYMORPHIC_MATCHER(isNoThrow, AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl, FunctionProtoType)) { const FunctionProtoType *FnTy = internal::getFunctionProtoType(Node); // If the function does not have a prototype, then it is assumed to be a // throwing function (as it would if the function did not have any exception // specification). if (!FnTy) return false; // Assume the best for any unresolved exception specification. if (isUnresolvedExceptionSpec(FnTy->getExceptionSpecType())) return true; return FnTy->isNothrow(); } /// Matches constexpr variable and function declarations, /// and if constexpr. /// /// Given: /// \code /// constexpr int foo = 42; /// constexpr int bar(); /// void baz() { if constexpr(1 > 0) {} } /// \endcode /// varDecl(isConstexpr()) /// matches the declaration of foo. /// functionDecl(isConstexpr()) /// matches the declaration of bar. /// ifStmt(isConstexpr()) /// matches the if statement in baz. AST_POLYMORPHIC_MATCHER(isConstexpr, AST_POLYMORPHIC_SUPPORTED_TYPES(VarDecl, FunctionDecl, IfStmt)) { return Node.isConstexpr(); } /// Matches selection statements with initializer. /// /// Given: /// \code /// void foo() { /// if (int i = foobar(); i > 0) {} /// switch (int i = foobar(); i) {} /// for (auto& a = get_range(); auto& x : a) {} /// } /// void bar() { /// if (foobar() > 0) {} /// switch (foobar()) {} /// for (auto& x : get_range()) {} /// } /// \endcode /// ifStmt(hasInitStatement(anything())) /// matches the if statement in foo but not in bar. /// switchStmt(hasInitStatement(anything())) /// matches the switch statement in foo but not in bar. /// cxxForRangeStmt(hasInitStatement(anything())) /// matches the range for statement in foo but not in bar. AST_POLYMORPHIC_MATCHER_P(hasInitStatement, AST_POLYMORPHIC_SUPPORTED_TYPES(IfStmt, SwitchStmt, CXXForRangeStmt), internal::Matcher<Stmt>, InnerMatcher) { const Stmt *Init = Node.getInit(); return Init != nullptr && InnerMatcher.matches(*Init, Finder, Builder); } /// Matches the condition expression of an if statement, for loop, /// switch statement or conditional operator. /// /// Example matches true (matcher = hasCondition(cxxBoolLiteral(equals(true)))) /// \code /// if (true) {} /// \endcode AST_POLYMORPHIC_MATCHER_P( hasCondition, AST_POLYMORPHIC_SUPPORTED_TYPES(IfStmt, ForStmt, WhileStmt, DoStmt, SwitchStmt, AbstractConditionalOperator), internal::Matcher<Expr>, InnerMatcher) { const Expr *const Condition = Node.getCond(); return (Condition != nullptr && InnerMatcher.matches(*Condition, Finder, Builder)); } /// Matches the then-statement of an if statement. /// /// Examples matches the if statement /// (matcher = ifStmt(hasThen(cxxBoolLiteral(equals(true))))) /// \code /// if (false) true; else false; /// \endcode AST_MATCHER_P(IfStmt, hasThen, internal::Matcher<Stmt>, InnerMatcher) { const Stmt *const Then = Node.getThen(); return (Then != nullptr && InnerMatcher.matches(*Then, Finder, Builder)); } /// Matches the else-statement of an if statement. /// /// Examples matches the if statement /// (matcher = ifStmt(hasElse(cxxBoolLiteral(equals(true))))) /// \code /// if (false) false; else true; /// \endcode AST_MATCHER_P(IfStmt, hasElse, internal::Matcher<Stmt>, InnerMatcher) { const Stmt *const Else = Node.getElse(); return (Else != nullptr && InnerMatcher.matches(*Else, Finder, Builder)); } /// Matches if a node equals a previously bound node. /// /// Matches a node if it equals the node previously bound to \p ID. /// /// Given /// \code /// class X { int a; int b; }; /// \endcode /// cxxRecordDecl( /// has(fieldDecl(hasName("a"), hasType(type().bind("t")))), /// has(fieldDecl(hasName("b"), hasType(type(equalsBoundNode("t")))))) /// matches the class \c X, as \c a and \c b have the same type. /// /// Note that when multiple matches are involved via \c forEach* matchers, /// \c equalsBoundNodes acts as a filter. /// For example: /// compoundStmt( /// forEachDescendant(varDecl().bind("d")), /// forEachDescendant(declRefExpr(to(decl(equalsBoundNode("d")))))) /// will trigger a match for each combination of variable declaration /// and reference to that variable declaration within a compound statement. AST_POLYMORPHIC_MATCHER_P(equalsBoundNode, AST_POLYMORPHIC_SUPPORTED_TYPES(Stmt, Decl, Type, QualType), std::string, ID) { // FIXME: Figure out whether it makes sense to allow this // on any other node types. // For *Loc it probably does not make sense, as those seem // unique. For NestedNameSepcifier it might make sense, as // those also have pointer identity, but I'm not sure whether // they're ever reused. internal::NotEqualsBoundNodePredicate Predicate; Predicate.ID = ID; Predicate.Node = DynTypedNode::create(Node); return Builder->removeBindings(Predicate); } /// Matches the condition variable statement in an if statement. /// /// Given /// \code /// if (A* a = GetAPointer()) {} /// \endcode /// hasConditionVariableStatement(...) /// matches 'A* a = GetAPointer()'. AST_MATCHER_P(IfStmt, hasConditionVariableStatement, internal::Matcher<DeclStmt>, InnerMatcher) { const DeclStmt* const DeclarationStatement = Node.getConditionVariableDeclStmt(); return DeclarationStatement != nullptr && InnerMatcher.matches(*DeclarationStatement, Finder, Builder); } /// Matches the index expression of an array subscript expression. /// /// Given /// \code /// int i[5]; /// void f() { i[1] = 42; } /// \endcode /// arraySubscriptExpression(hasIndex(integerLiteral())) /// matches \c i[1] with the \c integerLiteral() matching \c 1 AST_MATCHER_P(ArraySubscriptExpr, hasIndex, internal::Matcher<Expr>, InnerMatcher) { if (const Expr* Expression = Node.getIdx()) return InnerMatcher.matches(*Expression, Finder, Builder); return false; } /// Matches the base expression of an array subscript expression. /// /// Given /// \code /// int i[5]; /// void f() { i[1] = 42; } /// \endcode /// arraySubscriptExpression(hasBase(implicitCastExpr( /// hasSourceExpression(declRefExpr())))) /// matches \c i[1] with the \c declRefExpr() matching \c i AST_MATCHER_P(ArraySubscriptExpr, hasBase, internal::Matcher<Expr>, InnerMatcher) { if (const Expr* Expression = Node.getBase()) return InnerMatcher.matches(*Expression, Finder, Builder); return false; } /// Matches a 'for', 'while', 'do while' statement or a function /// definition that has a given body. Note that in case of functions /// this matcher only matches the definition itself and not the other /// declarations of the same function. /// /// Given /// \code /// for (;;) {} /// \endcode /// hasBody(compoundStmt()) /// matches 'for (;;) {}' /// with compoundStmt() /// matching '{}' /// /// Given /// \code /// void f(); /// void f() {} /// \endcode /// hasBody(functionDecl()) /// matches 'void f() {}' /// with compoundStmt() /// matching '{}' /// but does not match 'void f();' AST_POLYMORPHIC_MATCHER_P(hasBody, AST_POLYMORPHIC_SUPPORTED_TYPES(DoStmt, ForStmt, WhileStmt, CXXForRangeStmt, FunctionDecl), internal::Matcher<Stmt>, InnerMatcher) { if (Finder->isTraversalIgnoringImplicitNodes() && isDefaultedHelper(&Node)) return false; const Stmt *const Statement = internal::GetBodyMatcher<NodeType>::get(Node); return (Statement != nullptr && InnerMatcher.matches(*Statement, Finder, Builder)); } /// Matches a function declaration that has a given body present in the AST. /// Note that this matcher matches all the declarations of a function whose /// body is present in the AST. /// /// Given /// \code /// void f(); /// void f() {} /// void g(); /// \endcode /// functionDecl(hasAnyBody(compoundStmt())) /// matches both 'void f();' /// and 'void f() {}' /// with compoundStmt() /// matching '{}' /// but does not match 'void g();' AST_MATCHER_P(FunctionDecl, hasAnyBody, internal::Matcher<Stmt>, InnerMatcher) { const Stmt *const Statement = Node.getBody(); return (Statement != nullptr && InnerMatcher.matches(*Statement, Finder, Builder)); } /// Matches compound statements where at least one substatement matches /// a given matcher. Also matches StmtExprs that have CompoundStmt as children. /// /// Given /// \code /// { {}; 1+2; } /// \endcode /// hasAnySubstatement(compoundStmt()) /// matches '{ {}; 1+2; }' /// with compoundStmt() /// matching '{}' AST_POLYMORPHIC_MATCHER_P(hasAnySubstatement, AST_POLYMORPHIC_SUPPORTED_TYPES(CompoundStmt, StmtExpr), internal::Matcher<Stmt>, InnerMatcher) { const CompoundStmt *CS = CompoundStmtMatcher<NodeType>::get(Node); return CS && matchesFirstInPointerRange(InnerMatcher, CS->body_begin(), CS->body_end(), Finder, Builder) != CS->body_end(); } /// Checks that a compound statement contains a specific number of /// child statements. /// /// Example: Given /// \code /// { for (;;) {} } /// \endcode /// compoundStmt(statementCountIs(0))) /// matches '{}' /// but does not match the outer compound statement. AST_MATCHER_P(CompoundStmt, statementCountIs, unsigned, N) { return Node.size() == N; } /// Matches literals that are equal to the given value of type ValueT. /// /// Given /// \code /// f('\0', false, 3.14, 42); /// \endcode /// characterLiteral(equals(0)) /// matches '\0' /// cxxBoolLiteral(equals(false)) and cxxBoolLiteral(equals(0)) /// match false /// floatLiteral(equals(3.14)) and floatLiteral(equals(314e-2)) /// match 3.14 /// integerLiteral(equals(42)) /// matches 42 /// /// Note that you cannot directly match a negative numeric literal because the /// minus sign is not part of the literal: It is a unary operator whose operand /// is the positive numeric literal. Instead, you must use a unaryOperator() /// matcher to match the minus sign: /// /// unaryOperator(hasOperatorName("-"), /// hasUnaryOperand(integerLiteral(equals(13)))) /// /// Usable as: Matcher<CharacterLiteral>, Matcher<CXXBoolLiteralExpr>, /// Matcher<FloatingLiteral>, Matcher<IntegerLiteral> template <typename ValueT> internal::PolymorphicMatcher<internal::ValueEqualsMatcher, void(internal::AllNodeBaseTypes), ValueT> equals(const ValueT &Value) { return internal::PolymorphicMatcher<internal::ValueEqualsMatcher, void(internal::AllNodeBaseTypes), ValueT>( Value); } AST_POLYMORPHIC_MATCHER_P_OVERLOAD(equals, AST_POLYMORPHIC_SUPPORTED_TYPES(CharacterLiteral, CXXBoolLiteralExpr, IntegerLiteral), bool, Value, 0) { return internal::ValueEqualsMatcher<NodeType, ParamT>(Value) .matchesNode(Node); } AST_POLYMORPHIC_MATCHER_P_OVERLOAD(equals, AST_POLYMORPHIC_SUPPORTED_TYPES(CharacterLiteral, CXXBoolLiteralExpr, IntegerLiteral), unsigned, Value, 1) { return internal::ValueEqualsMatcher<NodeType, ParamT>(Value) .matchesNode(Node); } AST_POLYMORPHIC_MATCHER_P_OVERLOAD(equals, AST_POLYMORPHIC_SUPPORTED_TYPES(CharacterLiteral, CXXBoolLiteralExpr, FloatingLiteral, IntegerLiteral), double, Value, 2) { return internal::ValueEqualsMatcher<NodeType, ParamT>(Value) .matchesNode(Node); } /// Matches the operator Name of operator expressions (binary or /// unary). /// /// Example matches a || b (matcher = binaryOperator(hasOperatorName("||"))) /// \code /// !(a || b) /// \endcode AST_POLYMORPHIC_MATCHER_P( hasOperatorName, AST_POLYMORPHIC_SUPPORTED_TYPES(BinaryOperator, CXXOperatorCallExpr, CXXRewrittenBinaryOperator, UnaryOperator), std::string, Name) { if (Optional<StringRef> OpName = internal::getOpName(Node)) return *OpName == Name; return false; } /// Matches operator expressions (binary or unary) that have any of the /// specified names. /// /// hasAnyOperatorName("+", "-") /// Is equivalent to /// anyOf(hasOperatorName("+"), hasOperatorName("-")) extern const internal::VariadicFunction< internal::PolymorphicMatcher<internal::HasAnyOperatorNameMatcher, AST_POLYMORPHIC_SUPPORTED_TYPES( BinaryOperator, CXXOperatorCallExpr, CXXRewrittenBinaryOperator, UnaryOperator), std::vector<std::string>>, StringRef, internal::hasAnyOperatorNameFunc> hasAnyOperatorName; /// Matches all kinds of assignment operators. /// /// Example 1: matches a += b (matcher = binaryOperator(isAssignmentOperator())) /// \code /// if (a == b) /// a += b; /// \endcode /// /// Example 2: matches s1 = s2 /// (matcher = cxxOperatorCallExpr(isAssignmentOperator())) /// \code /// struct S { S& operator=(const S&); }; /// void x() { S s1, s2; s1 = s2; } /// \endcode AST_POLYMORPHIC_MATCHER( isAssignmentOperator, AST_POLYMORPHIC_SUPPORTED_TYPES(BinaryOperator, CXXOperatorCallExpr, CXXRewrittenBinaryOperator)) { return Node.isAssignmentOp(); } /// Matches comparison operators. /// /// Example 1: matches a == b (matcher = binaryOperator(isComparisonOperator())) /// \code /// if (a == b) /// a += b; /// \endcode /// /// Example 2: matches s1 < s2 /// (matcher = cxxOperatorCallExpr(isComparisonOperator())) /// \code /// struct S { bool operator<(const S& other); }; /// void x(S s1, S s2) { bool b1 = s1 < s2; } /// \endcode AST_POLYMORPHIC_MATCHER( isComparisonOperator, AST_POLYMORPHIC_SUPPORTED_TYPES(BinaryOperator, CXXOperatorCallExpr, CXXRewrittenBinaryOperator)) { return Node.isComparisonOp(); } /// Matches the left hand side of binary operator expressions. /// /// Example matches a (matcher = binaryOperator(hasLHS())) /// \code /// a || b /// \endcode AST_POLYMORPHIC_MATCHER_P(hasLHS, AST_POLYMORPHIC_SUPPORTED_TYPES( BinaryOperator, CXXOperatorCallExpr, CXXRewrittenBinaryOperator, ArraySubscriptExpr), internal::Matcher<Expr>, InnerMatcher) { const Expr *LeftHandSide = internal::getLHS(Node); return (LeftHandSide != nullptr && InnerMatcher.matches(*LeftHandSide, Finder, Builder)); } /// Matches the right hand side of binary operator expressions. /// /// Example matches b (matcher = binaryOperator(hasRHS())) /// \code /// a || b /// \endcode AST_POLYMORPHIC_MATCHER_P(hasRHS, AST_POLYMORPHIC_SUPPORTED_TYPES( BinaryOperator, CXXOperatorCallExpr, CXXRewrittenBinaryOperator, ArraySubscriptExpr), internal::Matcher<Expr>, InnerMatcher) { const Expr *RightHandSide = internal::getRHS(Node); return (RightHandSide != nullptr && InnerMatcher.matches(*RightHandSide, Finder, Builder)); } /// Matches if either the left hand side or the right hand side of a /// binary operator matches. AST_POLYMORPHIC_MATCHER_P( hasEitherOperand, AST_POLYMORPHIC_SUPPORTED_TYPES(BinaryOperator, CXXOperatorCallExpr, CXXRewrittenBinaryOperator), internal::Matcher<Expr>, InnerMatcher) { return internal::VariadicDynCastAllOfMatcher<Stmt, NodeType>()( anyOf(hasLHS(InnerMatcher), hasRHS(InnerMatcher))) .matches(Node, Finder, Builder); } /// Matches if both matchers match with opposite sides of the binary operator. /// /// Example matcher = binaryOperator(hasOperands(integerLiteral(equals(1), /// integerLiteral(equals(2))) /// \code /// 1 + 2 // Match /// 2 + 1 // Match /// 1 + 1 // No match /// 2 + 2 // No match /// \endcode AST_POLYMORPHIC_MATCHER_P2( hasOperands, AST_POLYMORPHIC_SUPPORTED_TYPES(BinaryOperator, CXXOperatorCallExpr, CXXRewrittenBinaryOperator), internal::Matcher<Expr>, Matcher1, internal::Matcher<Expr>, Matcher2) { return internal::VariadicDynCastAllOfMatcher<Stmt, NodeType>()( anyOf(allOf(hasLHS(Matcher1), hasRHS(Matcher2)), allOf(hasLHS(Matcher2), hasRHS(Matcher1)))) .matches(Node, Finder, Builder); } /// Matches if the operand of a unary operator matches. /// /// Example matches true (matcher = hasUnaryOperand( /// cxxBoolLiteral(equals(true)))) /// \code /// !true /// \endcode AST_POLYMORPHIC_MATCHER_P(hasUnaryOperand, AST_POLYMORPHIC_SUPPORTED_TYPES(UnaryOperator, CXXOperatorCallExpr), internal::Matcher<Expr>, InnerMatcher) { const Expr *const Operand = internal::getSubExpr(Node); return (Operand != nullptr && InnerMatcher.matches(*Operand, Finder, Builder)); } /// Matches if the cast's source expression /// or opaque value's source expression matches the given matcher. /// /// Example 1: matches "a string" /// (matcher = castExpr(hasSourceExpression(cxxConstructExpr()))) /// \code /// class URL { URL(string); }; /// URL url = "a string"; /// \endcode /// /// Example 2: matches 'b' (matcher = /// opaqueValueExpr(hasSourceExpression(implicitCastExpr(declRefExpr()))) /// \code /// int a = b ?: 1; /// \endcode AST_POLYMORPHIC_MATCHER_P(hasSourceExpression, AST_POLYMORPHIC_SUPPORTED_TYPES(CastExpr, OpaqueValueExpr), internal::Matcher<Expr>, InnerMatcher) { const Expr *const SubExpression = internal::GetSourceExpressionMatcher<NodeType>::get(Node); return (SubExpression != nullptr && InnerMatcher.matches(*SubExpression, Finder, Builder)); } /// Matches casts that has a given cast kind. /// /// Example: matches the implicit cast around \c 0 /// (matcher = castExpr(hasCastKind(CK_NullToPointer))) /// \code /// int *p = 0; /// \endcode /// /// If the matcher is use from clang-query, CastKind parameter /// should be passed as a quoted string. e.g., hasCastKind("CK_NullToPointer"). AST_MATCHER_P(CastExpr, hasCastKind, CastKind, Kind) { return Node.getCastKind() == Kind; } /// Matches casts whose destination type matches a given matcher. /// /// (Note: Clang's AST refers to other conversions as "casts" too, and calls /// actual casts "explicit" casts.) AST_MATCHER_P(ExplicitCastExpr, hasDestinationType, internal::Matcher<QualType>, InnerMatcher) { const QualType NodeType = Node.getTypeAsWritten(); return InnerMatcher.matches(NodeType, Finder, Builder); } /// Matches implicit casts whose destination type matches a given /// matcher. /// /// FIXME: Unit test this matcher AST_MATCHER_P(ImplicitCastExpr, hasImplicitDestinationType, internal::Matcher<QualType>, InnerMatcher) { return InnerMatcher.matches(Node.getType(), Finder, Builder); } /// Matches TagDecl object that are spelled with "struct." /// /// Example matches S, but not C, U or E. /// \code /// struct S {}; /// class C {}; /// union U {}; /// enum E {}; /// \endcode AST_MATCHER(TagDecl, isStruct) { return Node.isStruct(); } /// Matches TagDecl object that are spelled with "union." /// /// Example matches U, but not C, S or E. /// \code /// struct S {}; /// class C {}; /// union U {}; /// enum E {}; /// \endcode AST_MATCHER(TagDecl, isUnion) { return Node.isUnion(); } /// Matches TagDecl object that are spelled with "class." /// /// Example matches C, but not S, U or E. /// \code /// struct S {}; /// class C {}; /// union U {}; /// enum E {}; /// \endcode AST_MATCHER(TagDecl, isClass) { return Node.isClass(); } /// Matches TagDecl object that are spelled with "enum." /// /// Example matches E, but not C, S or U. /// \code /// struct S {}; /// class C {}; /// union U {}; /// enum E {}; /// \endcode AST_MATCHER(TagDecl, isEnum) { return Node.isEnum(); } /// Matches the true branch expression of a conditional operator. /// /// Example 1 (conditional ternary operator): matches a /// \code /// condition ? a : b /// \endcode /// /// Example 2 (conditional binary operator): matches opaqueValueExpr(condition) /// \code /// condition ?: b /// \endcode AST_MATCHER_P(AbstractConditionalOperator, hasTrueExpression, internal::Matcher<Expr>, InnerMatcher) { const Expr *Expression = Node.getTrueExpr(); return (Expression != nullptr && InnerMatcher.matches(*Expression, Finder, Builder)); } /// Matches the false branch expression of a conditional operator /// (binary or ternary). /// /// Example matches b /// \code /// condition ? a : b /// condition ?: b /// \endcode AST_MATCHER_P(AbstractConditionalOperator, hasFalseExpression, internal::Matcher<Expr>, InnerMatcher) { const Expr *Expression = Node.getFalseExpr(); return (Expression != nullptr && InnerMatcher.matches(*Expression, Finder, Builder)); } /// Matches if a declaration has a body attached. /// /// Example matches A, va, fa /// \code /// class A {}; /// class B; // Doesn't match, as it has no body. /// int va; /// extern int vb; // Doesn't match, as it doesn't define the variable. /// void fa() {} /// void fb(); // Doesn't match, as it has no body. /// @interface X /// - (void)ma; // Doesn't match, interface is declaration. /// @end /// @implementation X /// - (void)ma {} /// @end /// \endcode /// /// Usable as: Matcher<TagDecl>, Matcher<VarDecl>, Matcher<FunctionDecl>, /// Matcher<ObjCMethodDecl> AST_POLYMORPHIC_MATCHER(isDefinition, AST_POLYMORPHIC_SUPPORTED_TYPES(TagDecl, VarDecl, ObjCMethodDecl, FunctionDecl)) { return Node.isThisDeclarationADefinition(); } /// Matches if a function declaration is variadic. /// /// Example matches f, but not g or h. The function i will not match, even when /// compiled in C mode. /// \code /// void f(...); /// void g(int); /// template <typename... Ts> void h(Ts...); /// void i(); /// \endcode AST_MATCHER(FunctionDecl, isVariadic) { return Node.isVariadic(); } /// Matches the class declaration that the given method declaration /// belongs to. /// /// FIXME: Generalize this for other kinds of declarations. /// FIXME: What other kind of declarations would we need to generalize /// this to? /// /// Example matches A() in the last line /// (matcher = cxxConstructExpr(hasDeclaration(cxxMethodDecl( /// ofClass(hasName("A")))))) /// \code /// class A { /// public: /// A(); /// }; /// A a = A(); /// \endcode AST_MATCHER_P(CXXMethodDecl, ofClass, internal::Matcher<CXXRecordDecl>, InnerMatcher) { ASTChildrenNotSpelledInSourceScope RAII(Finder, false); const CXXRecordDecl *Parent = Node.getParent(); return (Parent != nullptr && InnerMatcher.matches(*Parent, Finder, Builder)); } /// Matches each method overridden by the given method. This matcher may /// produce multiple matches. /// /// Given /// \code /// class A { virtual void f(); }; /// class B : public A { void f(); }; /// class C : public B { void f(); }; /// \endcode /// cxxMethodDecl(ofClass(hasName("C")), /// forEachOverridden(cxxMethodDecl().bind("b"))).bind("d") /// matches once, with "b" binding "A::f" and "d" binding "C::f" (Note /// that B::f is not overridden by C::f). /// /// The check can produce multiple matches in case of multiple inheritance, e.g. /// \code /// class A1 { virtual void f(); }; /// class A2 { virtual void f(); }; /// class C : public A1, public A2 { void f(); }; /// \endcode /// cxxMethodDecl(ofClass(hasName("C")), /// forEachOverridden(cxxMethodDecl().bind("b"))).bind("d") /// matches twice, once with "b" binding "A1::f" and "d" binding "C::f", and /// once with "b" binding "A2::f" and "d" binding "C::f". AST_MATCHER_P(CXXMethodDecl, forEachOverridden, internal::Matcher<CXXMethodDecl>, InnerMatcher) { BoundNodesTreeBuilder Result; bool Matched = false; for (const auto *Overridden : Node.overridden_methods()) { BoundNodesTreeBuilder OverriddenBuilder(*Builder); const bool OverriddenMatched = InnerMatcher.matches(*Overridden, Finder, &OverriddenBuilder); if (OverriddenMatched) { Matched = true; Result.addMatch(OverriddenBuilder); } } *Builder = std::move(Result); return Matched; } /// Matches declarations of virtual methods and C++ base specifers that specify /// virtual inheritance. /// /// Example: /// \code /// class A { /// public: /// virtual void x(); // matches x /// }; /// \endcode /// /// Example: /// \code /// class Base {}; /// class DirectlyDerived : virtual Base {}; // matches Base /// class IndirectlyDerived : DirectlyDerived, Base {}; // matches Base /// \endcode /// /// Usable as: Matcher<CXXMethodDecl>, Matcher<CXXBaseSpecifier> AST_POLYMORPHIC_MATCHER(isVirtual, AST_POLYMORPHIC_SUPPORTED_TYPES(CXXMethodDecl, CXXBaseSpecifier)) { return Node.isVirtual(); } /// Matches if the given method declaration has an explicit "virtual". /// /// Given /// \code /// class A { /// public: /// virtual void x(); /// }; /// class B : public A { /// public: /// void x(); /// }; /// \endcode /// matches A::x but not B::x AST_MATCHER(CXXMethodDecl, isVirtualAsWritten) { return Node.isVirtualAsWritten(); } /// Matches if the given method or class declaration is final. /// /// Given: /// \code /// class A final {}; /// /// struct B { /// virtual void f(); /// }; /// /// struct C : B { /// void f() final; /// }; /// \endcode /// matches A and C::f, but not B, C, or B::f AST_POLYMORPHIC_MATCHER(isFinal, AST_POLYMORPHIC_SUPPORTED_TYPES(CXXRecordDecl, CXXMethodDecl)) { return Node.template hasAttr<FinalAttr>(); } /// Matches if the given method declaration is pure. /// /// Given /// \code /// class A { /// public: /// virtual void x() = 0; /// }; /// \endcode /// matches A::x AST_MATCHER(CXXMethodDecl, isPure) { return Node.isPure(); } /// Matches if the given method declaration is const. /// /// Given /// \code /// struct A { /// void foo() const; /// void bar(); /// }; /// \endcode /// /// cxxMethodDecl(isConst()) matches A::foo() but not A::bar() AST_MATCHER(CXXMethodDecl, isConst) { return Node.isConst(); } /// Matches if the given method declaration declares a copy assignment /// operator. /// /// Given /// \code /// struct A { /// A &operator=(const A &); /// A &operator=(A &&); /// }; /// \endcode /// /// cxxMethodDecl(isCopyAssignmentOperator()) matches the first method but not /// the second one. AST_MATCHER(CXXMethodDecl, isCopyAssignmentOperator) { return Node.isCopyAssignmentOperator(); } /// Matches if the given method declaration declares a move assignment /// operator. /// /// Given /// \code /// struct A { /// A &operator=(const A &); /// A &operator=(A &&); /// }; /// \endcode /// /// cxxMethodDecl(isMoveAssignmentOperator()) matches the second method but not /// the first one. AST_MATCHER(CXXMethodDecl, isMoveAssignmentOperator) { return Node.isMoveAssignmentOperator(); } /// Matches if the given method declaration overrides another method. /// /// Given /// \code /// class A { /// public: /// virtual void x(); /// }; /// class B : public A { /// public: /// virtual void x(); /// }; /// \endcode /// matches B::x AST_MATCHER(CXXMethodDecl, isOverride) { return Node.size_overridden_methods() > 0 || Node.hasAttr<OverrideAttr>(); } /// Matches method declarations that are user-provided. /// /// Given /// \code /// struct S { /// S(); // #1 /// S(const S &) = default; // #2 /// S(S &&) = delete; // #3 /// }; /// \endcode /// cxxConstructorDecl(isUserProvided()) will match #1, but not #2 or #3. AST_MATCHER(CXXMethodDecl, isUserProvided) { return Node.isUserProvided(); } /// Matches member expressions that are called with '->' as opposed /// to '.'. /// /// Member calls on the implicit this pointer match as called with '->'. /// /// Given /// \code /// class Y { /// void x() { this->x(); x(); Y y; y.x(); a; this->b; Y::b; } /// template <class T> void f() { this->f<T>(); f<T>(); } /// int a; /// static int b; /// }; /// template <class T> /// class Z { /// void x() { this->m; } /// }; /// \endcode /// memberExpr(isArrow()) /// matches this->x, x, y.x, a, this->b /// cxxDependentScopeMemberExpr(isArrow()) /// matches this->m /// unresolvedMemberExpr(isArrow()) /// matches this->f<T>, f<T> AST_POLYMORPHIC_MATCHER( isArrow, AST_POLYMORPHIC_SUPPORTED_TYPES(MemberExpr, UnresolvedMemberExpr, CXXDependentScopeMemberExpr)) { return Node.isArrow(); } /// Matches QualType nodes that are of integer type. /// /// Given /// \code /// void a(int); /// void b(long); /// void c(double); /// \endcode /// functionDecl(hasAnyParameter(hasType(isInteger()))) /// matches "a(int)", "b(long)", but not "c(double)". AST_MATCHER(QualType, isInteger) { return Node->isIntegerType(); } /// Matches QualType nodes that are of unsigned integer type. /// /// Given /// \code /// void a(int); /// void b(unsigned long); /// void c(double); /// \endcode /// functionDecl(hasAnyParameter(hasType(isUnsignedInteger()))) /// matches "b(unsigned long)", but not "a(int)" and "c(double)". AST_MATCHER(QualType, isUnsignedInteger) { return Node->isUnsignedIntegerType(); } /// Matches QualType nodes that are of signed integer type. /// /// Given /// \code /// void a(int); /// void b(unsigned long); /// void c(double); /// \endcode /// functionDecl(hasAnyParameter(hasType(isSignedInteger()))) /// matches "a(int)", but not "b(unsigned long)" and "c(double)". AST_MATCHER(QualType, isSignedInteger) { return Node->isSignedIntegerType(); } /// Matches QualType nodes that are of character type. /// /// Given /// \code /// void a(char); /// void b(wchar_t); /// void c(double); /// \endcode /// functionDecl(hasAnyParameter(hasType(isAnyCharacter()))) /// matches "a(char)", "b(wchar_t)", but not "c(double)". AST_MATCHER(QualType, isAnyCharacter) { return Node->isAnyCharacterType(); } /// Matches QualType nodes that are of any pointer type; this includes /// the Objective-C object pointer type, which is different despite being /// syntactically similar. /// /// Given /// \code /// int *i = nullptr; /// /// @interface Foo /// @end /// Foo *f; /// /// int j; /// \endcode /// varDecl(hasType(isAnyPointer())) /// matches "int *i" and "Foo *f", but not "int j". AST_MATCHER(QualType, isAnyPointer) { return Node->isAnyPointerType(); } /// Matches QualType nodes that are const-qualified, i.e., that /// include "top-level" const. /// /// Given /// \code /// void a(int); /// void b(int const); /// void c(const int); /// void d(const int*); /// void e(int const) {}; /// \endcode /// functionDecl(hasAnyParameter(hasType(isConstQualified()))) /// matches "void b(int const)", "void c(const int)" and /// "void e(int const) {}". It does not match d as there /// is no top-level const on the parameter type "const int *". AST_MATCHER(QualType, isConstQualified) { return Node.isConstQualified(); } /// Matches QualType nodes that are volatile-qualified, i.e., that /// include "top-level" volatile. /// /// Given /// \code /// void a(int); /// void b(int volatile); /// void c(volatile int); /// void d(volatile int*); /// void e(int volatile) {}; /// \endcode /// functionDecl(hasAnyParameter(hasType(isVolatileQualified()))) /// matches "void b(int volatile)", "void c(volatile int)" and /// "void e(int volatile) {}". It does not match d as there /// is no top-level volatile on the parameter type "volatile int *". AST_MATCHER(QualType, isVolatileQualified) { return Node.isVolatileQualified(); } /// Matches QualType nodes that have local CV-qualifiers attached to /// the node, not hidden within a typedef. /// /// Given /// \code /// typedef const int const_int; /// const_int i; /// int *const j; /// int *volatile k; /// int m; /// \endcode /// \c varDecl(hasType(hasLocalQualifiers())) matches only \c j and \c k. /// \c i is const-qualified but the qualifier is not local. AST_MATCHER(QualType, hasLocalQualifiers) { return Node.hasLocalQualifiers(); } /// Matches a member expression where the member is matched by a /// given matcher. /// /// Given /// \code /// struct { int first, second; } first, second; /// int i(second.first); /// int j(first.second); /// \endcode /// memberExpr(member(hasName("first"))) /// matches second.first /// but not first.second (because the member name there is "second"). AST_MATCHER_P(MemberExpr, member, internal::Matcher<ValueDecl>, InnerMatcher) { return InnerMatcher.matches(*Node.getMemberDecl(), Finder, Builder); } /// Matches a member expression where the object expression is matched by a /// given matcher. Implicit object expressions are included; that is, it matches /// use of implicit `this`. /// /// Given /// \code /// struct X { /// int m; /// int f(X x) { x.m; return m; } /// }; /// \endcode /// memberExpr(hasObjectExpression(hasType(cxxRecordDecl(hasName("X"))))) /// matches `x.m`, but not `m`; however, /// memberExpr(hasObjectExpression(hasType(pointsTo( // cxxRecordDecl(hasName("X")))))) /// matches `m` (aka. `this->m`), but not `x.m`. AST_POLYMORPHIC_MATCHER_P( hasObjectExpression, AST_POLYMORPHIC_SUPPORTED_TYPES(MemberExpr, UnresolvedMemberExpr, CXXDependentScopeMemberExpr), internal::Matcher<Expr>, InnerMatcher) { if (const auto *E = dyn_cast<UnresolvedMemberExpr>(&Node)) if (E->isImplicitAccess()) return false; if (const auto *E = dyn_cast<CXXDependentScopeMemberExpr>(&Node)) if (E->isImplicitAccess()) return false; return InnerMatcher.matches(*Node.getBase(), Finder, Builder); } /// Matches any using shadow declaration. /// /// Given /// \code /// namespace X { void b(); } /// using X::b; /// \endcode /// usingDecl(hasAnyUsingShadowDecl(hasName("b")))) /// matches \code using X::b \endcode AST_MATCHER_P(BaseUsingDecl, hasAnyUsingShadowDecl, internal::Matcher<UsingShadowDecl>, InnerMatcher) { return matchesFirstInPointerRange(InnerMatcher, Node.shadow_begin(), Node.shadow_end(), Finder, Builder) != Node.shadow_end(); } /// Matches a using shadow declaration where the target declaration is /// matched by the given matcher. /// /// Given /// \code /// namespace X { int a; void b(); } /// using X::a; /// using X::b; /// \endcode /// usingDecl(hasAnyUsingShadowDecl(hasTargetDecl(functionDecl()))) /// matches \code using X::b \endcode /// but not \code using X::a \endcode AST_MATCHER_P(UsingShadowDecl, hasTargetDecl, internal::Matcher<NamedDecl>, InnerMatcher) { return InnerMatcher.matches(*Node.getTargetDecl(), Finder, Builder); } /// Matches template instantiations of function, class, or static /// member variable template instantiations. /// /// Given /// \code /// template <typename T> class X {}; class A {}; X<A> x; /// \endcode /// or /// \code /// template <typename T> class X {}; class A {}; template class X<A>; /// \endcode /// or /// \code /// template <typename T> class X {}; class A {}; extern template class X<A>; /// \endcode /// cxxRecordDecl(hasName("::X"), isTemplateInstantiation()) /// matches the template instantiation of X<A>. /// /// But given /// \code /// template <typename T> class X {}; class A {}; /// template <> class X<A> {}; X<A> x; /// \endcode /// cxxRecordDecl(hasName("::X"), isTemplateInstantiation()) /// does not match, as X<A> is an explicit template specialization. /// /// Usable as: Matcher<FunctionDecl>, Matcher<VarDecl>, Matcher<CXXRecordDecl> AST_POLYMORPHIC_MATCHER(isTemplateInstantiation, AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl, VarDecl, CXXRecordDecl)) { return (Node.getTemplateSpecializationKind() == TSK_ImplicitInstantiation || Node.getTemplateSpecializationKind() == TSK_ExplicitInstantiationDefinition || Node.getTemplateSpecializationKind() == TSK_ExplicitInstantiationDeclaration); } /// Matches declarations that are template instantiations or are inside /// template instantiations. /// /// Given /// \code /// template<typename T> void A(T t) { T i; } /// A(0); /// A(0U); /// \endcode /// functionDecl(isInstantiated()) /// matches 'A(int) {...};' and 'A(unsigned) {...}'. AST_MATCHER_FUNCTION(internal::Matcher<Decl>, isInstantiated) { auto IsInstantiation = decl(anyOf(cxxRecordDecl(isTemplateInstantiation()), functionDecl(isTemplateInstantiation()))); return decl(anyOf(IsInstantiation, hasAncestor(IsInstantiation))); } /// Matches statements inside of a template instantiation. /// /// Given /// \code /// int j; /// template<typename T> void A(T t) { T i; j += 42;} /// A(0); /// A(0U); /// \endcode /// declStmt(isInTemplateInstantiation()) /// matches 'int i;' and 'unsigned i'. /// unless(stmt(isInTemplateInstantiation())) /// will NOT match j += 42; as it's shared between the template definition and /// instantiation. AST_MATCHER_FUNCTION(internal::Matcher<Stmt>, isInTemplateInstantiation) { return stmt( hasAncestor(decl(anyOf(cxxRecordDecl(isTemplateInstantiation()), functionDecl(isTemplateInstantiation()))))); } /// Matches explicit template specializations of function, class, or /// static member variable template instantiations. /// /// Given /// \code /// template<typename T> void A(T t) { } /// template<> void A(int N) { } /// \endcode /// functionDecl(isExplicitTemplateSpecialization()) /// matches the specialization A<int>(). /// /// Usable as: Matcher<FunctionDecl>, Matcher<VarDecl>, Matcher<CXXRecordDecl> AST_POLYMORPHIC_MATCHER(isExplicitTemplateSpecialization, AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl, VarDecl, CXXRecordDecl)) { return (Node.getTemplateSpecializationKind() == TSK_ExplicitSpecialization); } /// Matches \c TypeLocs for which the given inner /// QualType-matcher matches. AST_MATCHER_FUNCTION_P_OVERLOAD(internal::BindableMatcher<TypeLoc>, loc, internal::Matcher<QualType>, InnerMatcher, 0) { return internal::BindableMatcher<TypeLoc>( new internal::TypeLocTypeMatcher(InnerMatcher)); } /// Matches type \c bool. /// /// Given /// \code /// struct S { bool func(); }; /// \endcode /// functionDecl(returns(booleanType())) /// matches "bool func();" AST_MATCHER(Type, booleanType) { return Node.isBooleanType(); } /// Matches type \c void. /// /// Given /// \code /// struct S { void func(); }; /// \endcode /// functionDecl(returns(voidType())) /// matches "void func();" AST_MATCHER(Type, voidType) { return Node.isVoidType(); } template <typename NodeType> using AstTypeMatcher = internal::VariadicDynCastAllOfMatcher<Type, NodeType>; /// Matches builtin Types. /// /// Given /// \code /// struct A {}; /// A a; /// int b; /// float c; /// bool d; /// \endcode /// builtinType() /// matches "int b", "float c" and "bool d" extern const AstTypeMatcher<BuiltinType> builtinType; /// Matches all kinds of arrays. /// /// Given /// \code /// int a[] = { 2, 3 }; /// int b[4]; /// void f() { int c[a[0]]; } /// \endcode /// arrayType() /// matches "int a[]", "int b[4]" and "int c[a[0]]"; extern const AstTypeMatcher<ArrayType> arrayType; /// Matches C99 complex types. /// /// Given /// \code /// _Complex float f; /// \endcode /// complexType() /// matches "_Complex float f" extern const AstTypeMatcher<ComplexType> complexType; /// Matches any real floating-point type (float, double, long double). /// /// Given /// \code /// int i; /// float f; /// \endcode /// realFloatingPointType() /// matches "float f" but not "int i" AST_MATCHER(Type, realFloatingPointType) { return Node.isRealFloatingType(); } /// Matches arrays and C99 complex types that have a specific element /// type. /// /// Given /// \code /// struct A {}; /// A a[7]; /// int b[7]; /// \endcode /// arrayType(hasElementType(builtinType())) /// matches "int b[7]" /// /// Usable as: Matcher<ArrayType>, Matcher<ComplexType> AST_TYPELOC_TRAVERSE_MATCHER_DECL(hasElementType, getElement, AST_POLYMORPHIC_SUPPORTED_TYPES(ArrayType, ComplexType)); /// Matches C arrays with a specified constant size. /// /// Given /// \code /// void() { /// int a[2]; /// int b[] = { 2, 3 }; /// int c[b[0]]; /// } /// \endcode /// constantArrayType() /// matches "int a[2]" extern const AstTypeMatcher<ConstantArrayType> constantArrayType; /// Matches nodes that have the specified size. /// /// Given /// \code /// int a[42]; /// int b[2 * 21]; /// int c[41], d[43]; /// char *s = "abcd"; /// wchar_t *ws = L"abcd"; /// char *w = "a"; /// \endcode /// constantArrayType(hasSize(42)) /// matches "int a[42]" and "int b[2 * 21]" /// stringLiteral(hasSize(4)) /// matches "abcd", L"abcd" AST_POLYMORPHIC_MATCHER_P(hasSize, AST_POLYMORPHIC_SUPPORTED_TYPES(ConstantArrayType, StringLiteral), unsigned, N) { return internal::HasSizeMatcher<NodeType>::hasSize(Node, N); } /// Matches C++ arrays whose size is a value-dependent expression. /// /// Given /// \code /// template<typename T, int Size> /// class array { /// T data[Size]; /// }; /// \endcode /// dependentSizedArrayType /// matches "T data[Size]" extern const AstTypeMatcher<DependentSizedArrayType> dependentSizedArrayType; /// Matches C arrays with unspecified size. /// /// Given /// \code /// int a[] = { 2, 3 }; /// int b[42]; /// void f(int c[]) { int d[a[0]]; }; /// \endcode /// incompleteArrayType() /// matches "int a[]" and "int c[]" extern const AstTypeMatcher<IncompleteArrayType> incompleteArrayType; /// Matches C arrays with a specified size that is not an /// integer-constant-expression. /// /// Given /// \code /// void f() { /// int a[] = { 2, 3 } /// int b[42]; /// int c[a[0]]; /// } /// \endcode /// variableArrayType() /// matches "int c[a[0]]" extern const AstTypeMatcher<VariableArrayType> variableArrayType; /// Matches \c VariableArrayType nodes that have a specific size /// expression. /// /// Given /// \code /// void f(int b) { /// int a[b]; /// } /// \endcode /// variableArrayType(hasSizeExpr(ignoringImpCasts(declRefExpr(to( /// varDecl(hasName("b"))))))) /// matches "int a[b]" AST_MATCHER_P(VariableArrayType, hasSizeExpr, internal::Matcher<Expr>, InnerMatcher) { return InnerMatcher.matches(*Node.getSizeExpr(), Finder, Builder); } /// Matches atomic types. /// /// Given /// \code /// _Atomic(int) i; /// \endcode /// atomicType() /// matches "_Atomic(int) i" extern const AstTypeMatcher<AtomicType> atomicType; /// Matches atomic types with a specific value type. /// /// Given /// \code /// _Atomic(int) i; /// _Atomic(float) f; /// \endcode /// atomicType(hasValueType(isInteger())) /// matches "_Atomic(int) i" /// /// Usable as: Matcher<AtomicType> AST_TYPELOC_TRAVERSE_MATCHER_DECL(hasValueType, getValue, AST_POLYMORPHIC_SUPPORTED_TYPES(AtomicType)); /// Matches types nodes representing C++11 auto types. /// /// Given: /// \code /// auto n = 4; /// int v[] = { 2, 3 } /// for (auto i : v) { } /// \endcode /// autoType() /// matches "auto n" and "auto i" extern const AstTypeMatcher<AutoType> autoType; /// Matches types nodes representing C++11 decltype(<expr>) types. /// /// Given: /// \code /// short i = 1; /// int j = 42; /// decltype(i + j) result = i + j; /// \endcode /// decltypeType() /// matches "decltype(i + j)" extern const AstTypeMatcher<DecltypeType> decltypeType; /// Matches \c AutoType nodes where the deduced type is a specific type. /// /// Note: There is no \c TypeLoc for the deduced type and thus no /// \c getDeducedLoc() matcher. /// /// Given /// \code /// auto a = 1; /// auto b = 2.0; /// \endcode /// autoType(hasDeducedType(isInteger())) /// matches "auto a" /// /// Usable as: Matcher<AutoType> AST_TYPE_TRAVERSE_MATCHER(hasDeducedType, getDeducedType, AST_POLYMORPHIC_SUPPORTED_TYPES(AutoType)); /// Matches \c DecltypeType nodes to find out the underlying type. /// /// Given /// \code /// decltype(1) a = 1; /// decltype(2.0) b = 2.0; /// \endcode /// decltypeType(hasUnderlyingType(isInteger())) /// matches the type of "a" /// /// Usable as: Matcher<DecltypeType> AST_TYPE_TRAVERSE_MATCHER(hasUnderlyingType, getUnderlyingType, AST_POLYMORPHIC_SUPPORTED_TYPES(DecltypeType)); /// Matches \c FunctionType nodes. /// /// Given /// \code /// int (*f)(int); /// void g(); /// \endcode /// functionType() /// matches "int (*f)(int)" and the type of "g". extern const AstTypeMatcher<FunctionType> functionType; /// Matches \c FunctionProtoType nodes. /// /// Given /// \code /// int (*f)(int); /// void g(); /// \endcode /// functionProtoType() /// matches "int (*f)(int)" and the type of "g" in C++ mode. /// In C mode, "g" is not matched because it does not contain a prototype. extern const AstTypeMatcher<FunctionProtoType> functionProtoType; /// Matches \c ParenType nodes. /// /// Given /// \code /// int (*ptr_to_array)[4]; /// int *array_of_ptrs[4]; /// \endcode /// /// \c varDecl(hasType(pointsTo(parenType()))) matches \c ptr_to_array but not /// \c array_of_ptrs. extern const AstTypeMatcher<ParenType> parenType; /// Matches \c ParenType nodes where the inner type is a specific type. /// /// Given /// \code /// int (*ptr_to_array)[4]; /// int (*ptr_to_func)(int); /// \endcode /// /// \c varDecl(hasType(pointsTo(parenType(innerType(functionType()))))) matches /// \c ptr_to_func but not \c ptr_to_array. /// /// Usable as: Matcher<ParenType> AST_TYPE_TRAVERSE_MATCHER(innerType, getInnerType, AST_POLYMORPHIC_SUPPORTED_TYPES(ParenType)); /// Matches block pointer types, i.e. types syntactically represented as /// "void (^)(int)". /// /// The \c pointee is always required to be a \c FunctionType. extern const AstTypeMatcher<BlockPointerType> blockPointerType; /// Matches member pointer types. /// Given /// \code /// struct A { int i; } /// A::* ptr = A::i; /// \endcode /// memberPointerType() /// matches "A::* ptr" extern const AstTypeMatcher<MemberPointerType> memberPointerType; /// Matches pointer types, but does not match Objective-C object pointer /// types. /// /// Given /// \code /// int *a; /// int &b = *a; /// int c = 5; /// /// @interface Foo /// @end /// Foo *f; /// \endcode /// pointerType() /// matches "int *a", but does not match "Foo *f". extern const AstTypeMatcher<PointerType> pointerType; /// Matches an Objective-C object pointer type, which is different from /// a pointer type, despite being syntactically similar. /// /// Given /// \code /// int *a; /// /// @interface Foo /// @end /// Foo *f; /// \endcode /// pointerType() /// matches "Foo *f", but does not match "int *a". extern const AstTypeMatcher<ObjCObjectPointerType> objcObjectPointerType; /// Matches both lvalue and rvalue reference types. /// /// Given /// \code /// int *a; /// int &b = *a; /// int &&c = 1; /// auto &d = b; /// auto &&e = c; /// auto &&f = 2; /// int g = 5; /// \endcode /// /// \c referenceType() matches the types of \c b, \c c, \c d, \c e, and \c f. extern const AstTypeMatcher<ReferenceType> referenceType; /// Matches lvalue reference types. /// /// Given: /// \code /// int *a; /// int &b = *a; /// int &&c = 1; /// auto &d = b; /// auto &&e = c; /// auto &&f = 2; /// int g = 5; /// \endcode /// /// \c lValueReferenceType() matches the types of \c b, \c d, and \c e. \c e is /// matched since the type is deduced as int& by reference collapsing rules. extern const AstTypeMatcher<LValueReferenceType> lValueReferenceType; /// Matches rvalue reference types. /// /// Given: /// \code /// int *a; /// int &b = *a; /// int &&c = 1; /// auto &d = b; /// auto &&e = c; /// auto &&f = 2; /// int g = 5; /// \endcode /// /// \c rValueReferenceType() matches the types of \c c and \c f. \c e is not /// matched as it is deduced to int& by reference collapsing rules. extern const AstTypeMatcher<RValueReferenceType> rValueReferenceType; /// Narrows PointerType (and similar) matchers to those where the /// \c pointee matches a given matcher. /// /// Given /// \code /// int *a; /// int const *b; /// float const *f; /// \endcode /// pointerType(pointee(isConstQualified(), isInteger())) /// matches "int const *b" /// /// Usable as: Matcher<BlockPointerType>, Matcher<MemberPointerType>, /// Matcher<PointerType>, Matcher<ReferenceType> AST_TYPELOC_TRAVERSE_MATCHER_DECL( pointee, getPointee, AST_POLYMORPHIC_SUPPORTED_TYPES(BlockPointerType, MemberPointerType, PointerType, ReferenceType)); /// Matches typedef types. /// /// Given /// \code /// typedef int X; /// \endcode /// typedefType() /// matches "typedef int X" extern const AstTypeMatcher<TypedefType> typedefType; /// Matches enum types. /// /// Given /// \code /// enum C { Green }; /// enum class S { Red }; /// /// C c; /// S s; /// \endcode // /// \c enumType() matches the type of the variable declarations of both \c c and /// \c s. extern const AstTypeMatcher<EnumType> enumType; /// Matches template specialization types. /// /// Given /// \code /// template <typename T> /// class C { }; /// /// template class C<int>; // A /// C<char> var; // B /// \endcode /// /// \c templateSpecializationType() matches the type of the explicit /// instantiation in \c A and the type of the variable declaration in \c B. extern const AstTypeMatcher<TemplateSpecializationType> templateSpecializationType; /// Matches C++17 deduced template specialization types, e.g. deduced class /// template types. /// /// Given /// \code /// template <typename T> /// class C { public: C(T); }; /// /// C c(123); /// \endcode /// \c deducedTemplateSpecializationType() matches the type in the declaration /// of the variable \c c. extern const AstTypeMatcher<DeducedTemplateSpecializationType> deducedTemplateSpecializationType; /// Matches types nodes representing unary type transformations. /// /// Given: /// \code /// typedef __underlying_type(T) type; /// \endcode /// unaryTransformType() /// matches "__underlying_type(T)" extern const AstTypeMatcher<UnaryTransformType> unaryTransformType; /// Matches record types (e.g. structs, classes). /// /// Given /// \code /// class C {}; /// struct S {}; /// /// C c; /// S s; /// \endcode /// /// \c recordType() matches the type of the variable declarations of both \c c /// and \c s. extern const AstTypeMatcher<RecordType> recordType; /// Matches tag types (record and enum types). /// /// Given /// \code /// enum E {}; /// class C {}; /// /// E e; /// C c; /// \endcode /// /// \c tagType() matches the type of the variable declarations of both \c e /// and \c c. extern const AstTypeMatcher<TagType> tagType; /// Matches types specified with an elaborated type keyword or with a /// qualified name. /// /// Given /// \code /// namespace N { /// namespace M { /// class D {}; /// } /// } /// class C {}; /// /// class C c; /// N::M::D d; /// \endcode /// /// \c elaboratedType() matches the type of the variable declarations of both /// \c c and \c d. extern const AstTypeMatcher<ElaboratedType> elaboratedType; /// Matches ElaboratedTypes whose qualifier, a NestedNameSpecifier, /// matches \c InnerMatcher if the qualifier exists. /// /// Given /// \code /// namespace N { /// namespace M { /// class D {}; /// } /// } /// N::M::D d; /// \endcode /// /// \c elaboratedType(hasQualifier(hasPrefix(specifiesNamespace(hasName("N")))) /// matches the type of the variable declaration of \c d. AST_MATCHER_P(ElaboratedType, hasQualifier, internal::Matcher<NestedNameSpecifier>, InnerMatcher) { if (const NestedNameSpecifier *Qualifier = Node.getQualifier()) return InnerMatcher.matches(*Qualifier, Finder, Builder); return false; } /// Matches ElaboratedTypes whose named type matches \c InnerMatcher. /// /// Given /// \code /// namespace N { /// namespace M { /// class D {}; /// } /// } /// N::M::D d; /// \endcode /// /// \c elaboratedType(namesType(recordType( /// hasDeclaration(namedDecl(hasName("D")))))) matches the type of the variable /// declaration of \c d. AST_MATCHER_P(ElaboratedType, namesType, internal::Matcher<QualType>, InnerMatcher) { return InnerMatcher.matches(Node.getNamedType(), Finder, Builder); } /// Matches types that represent the result of substituting a type for a /// template type parameter. /// /// Given /// \code /// template <typename T> /// void F(T t) { /// int i = 1 + t; /// } /// \endcode /// /// \c substTemplateTypeParmType() matches the type of 't' but not '1' extern const AstTypeMatcher<SubstTemplateTypeParmType> substTemplateTypeParmType; /// Matches template type parameter substitutions that have a replacement /// type that matches the provided matcher. /// /// Given /// \code /// template <typename T> /// double F(T t); /// int i; /// double j = F(i); /// \endcode /// /// \c substTemplateTypeParmType(hasReplacementType(type())) matches int AST_TYPE_TRAVERSE_MATCHER( hasReplacementType, getReplacementType, AST_POLYMORPHIC_SUPPORTED_TYPES(SubstTemplateTypeParmType)); /// Matches template type parameter types. /// /// Example matches T, but not int. /// (matcher = templateTypeParmType()) /// \code /// template <typename T> void f(int i); /// \endcode extern const AstTypeMatcher<TemplateTypeParmType> templateTypeParmType; /// Matches injected class name types. /// /// Example matches S s, but not S<T> s. /// (matcher = parmVarDecl(hasType(injectedClassNameType()))) /// \code /// template <typename T> struct S { /// void f(S s); /// void g(S<T> s); /// }; /// \endcode extern const AstTypeMatcher<InjectedClassNameType> injectedClassNameType; /// Matches decayed type /// Example matches i[] in declaration of f. /// (matcher = valueDecl(hasType(decayedType(hasDecayedType(pointerType()))))) /// Example matches i[1]. /// (matcher = expr(hasType(decayedType(hasDecayedType(pointerType()))))) /// \code /// void f(int i[]) { /// i[1] = 0; /// } /// \endcode extern const AstTypeMatcher<DecayedType> decayedType; /// Matches the decayed type, whoes decayed type matches \c InnerMatcher AST_MATCHER_P(DecayedType, hasDecayedType, internal::Matcher<QualType>, InnerType) { return InnerType.matches(Node.getDecayedType(), Finder, Builder); } /// Matches declarations whose declaration context, interpreted as a /// Decl, matches \c InnerMatcher. /// /// Given /// \code /// namespace N { /// namespace M { /// class D {}; /// } /// } /// \endcode /// /// \c cxxRcordDecl(hasDeclContext(namedDecl(hasName("M")))) matches the /// declaration of \c class \c D. AST_MATCHER_P(Decl, hasDeclContext, internal::Matcher<Decl>, InnerMatcher) { const DeclContext *DC = Node.getDeclContext(); if (!DC) return false; return InnerMatcher.matches(*Decl::castFromDeclContext(DC), Finder, Builder); } /// Matches nested name specifiers. /// /// Given /// \code /// namespace ns { /// struct A { static void f(); }; /// void A::f() {} /// void g() { A::f(); } /// } /// ns::A a; /// \endcode /// nestedNameSpecifier() /// matches "ns::" and both "A::" extern const internal::VariadicAllOfMatcher<NestedNameSpecifier> nestedNameSpecifier; /// Same as \c nestedNameSpecifier but matches \c NestedNameSpecifierLoc. extern const internal::VariadicAllOfMatcher<NestedNameSpecifierLoc> nestedNameSpecifierLoc; /// Matches \c NestedNameSpecifierLocs for which the given inner /// NestedNameSpecifier-matcher matches. AST_MATCHER_FUNCTION_P_OVERLOAD( internal::BindableMatcher<NestedNameSpecifierLoc>, loc, internal::Matcher<NestedNameSpecifier>, InnerMatcher, 1) { return internal::BindableMatcher<NestedNameSpecifierLoc>( new internal::LocMatcher<NestedNameSpecifierLoc, NestedNameSpecifier>( InnerMatcher)); } /// Matches nested name specifiers that specify a type matching the /// given \c QualType matcher without qualifiers. /// /// Given /// \code /// struct A { struct B { struct C {}; }; }; /// A::B::C c; /// \endcode /// nestedNameSpecifier(specifiesType( /// hasDeclaration(cxxRecordDecl(hasName("A"))) /// )) /// matches "A::" AST_MATCHER_P(NestedNameSpecifier, specifiesType, internal::Matcher<QualType>, InnerMatcher) { if (!Node.getAsType()) return false; return InnerMatcher.matches(QualType(Node.getAsType(), 0), Finder, Builder); } /// Matches nested name specifier locs that specify a type matching the /// given \c TypeLoc. /// /// Given /// \code /// struct A { struct B { struct C {}; }; }; /// A::B::C c; /// \endcode /// nestedNameSpecifierLoc(specifiesTypeLoc(loc(type( /// hasDeclaration(cxxRecordDecl(hasName("A"))))))) /// matches "A::" AST_MATCHER_P(NestedNameSpecifierLoc, specifiesTypeLoc, internal::Matcher<TypeLoc>, InnerMatcher) { return Node && Node.getNestedNameSpecifier()->getAsType() && InnerMatcher.matches(Node.getTypeLoc(), Finder, Builder); } /// Matches on the prefix of a \c NestedNameSpecifier. /// /// Given /// \code /// struct A { struct B { struct C {}; }; }; /// A::B::C c; /// \endcode /// nestedNameSpecifier(hasPrefix(specifiesType(asString("struct A")))) and /// matches "A::" AST_MATCHER_P_OVERLOAD(NestedNameSpecifier, hasPrefix, internal::Matcher<NestedNameSpecifier>, InnerMatcher, 0) { const NestedNameSpecifier *NextNode = Node.getPrefix(); if (!NextNode) return false; return InnerMatcher.matches(*NextNode, Finder, Builder); } /// Matches on the prefix of a \c NestedNameSpecifierLoc. /// /// Given /// \code /// struct A { struct B { struct C {}; }; }; /// A::B::C c; /// \endcode /// nestedNameSpecifierLoc(hasPrefix(loc(specifiesType(asString("struct A"))))) /// matches "A::" AST_MATCHER_P_OVERLOAD(NestedNameSpecifierLoc, hasPrefix, internal::Matcher<NestedNameSpecifierLoc>, InnerMatcher, 1) { NestedNameSpecifierLoc NextNode = Node.getPrefix(); if (!NextNode) return false; return InnerMatcher.matches(NextNode, Finder, Builder); } /// Matches nested name specifiers that specify a namespace matching the /// given namespace matcher. /// /// Given /// \code /// namespace ns { struct A {}; } /// ns::A a; /// \endcode /// nestedNameSpecifier(specifiesNamespace(hasName("ns"))) /// matches "ns::" AST_MATCHER_P(NestedNameSpecifier, specifiesNamespace, internal::Matcher<NamespaceDecl>, InnerMatcher) { if (!Node.getAsNamespace()) return false; return InnerMatcher.matches(*Node.getAsNamespace(), Finder, Builder); } /// Overloads for the \c equalsNode matcher. /// FIXME: Implement for other node types. /// @{ /// Matches if a node equals another node. /// /// \c Decl has pointer identity in the AST. AST_MATCHER_P_OVERLOAD(Decl, equalsNode, const Decl*, Other, 0) { return &Node == Other; } /// Matches if a node equals another node. /// /// \c Stmt has pointer identity in the AST. AST_MATCHER_P_OVERLOAD(Stmt, equalsNode, const Stmt*, Other, 1) { return &Node == Other; } /// Matches if a node equals another node. /// /// \c Type has pointer identity in the AST. AST_MATCHER_P_OVERLOAD(Type, equalsNode, const Type*, Other, 2) { return &Node == Other; } /// @} /// Matches each case or default statement belonging to the given switch /// statement. This matcher may produce multiple matches. /// /// Given /// \code /// switch (1) { case 1: case 2: default: switch (2) { case 3: case 4: ; } } /// \endcode /// switchStmt(forEachSwitchCase(caseStmt().bind("c"))).bind("s") /// matches four times, with "c" binding each of "case 1:", "case 2:", /// "case 3:" and "case 4:", and "s" respectively binding "switch (1)", /// "switch (1)", "switch (2)" and "switch (2)". AST_MATCHER_P(SwitchStmt, forEachSwitchCase, internal::Matcher<SwitchCase>, InnerMatcher) { BoundNodesTreeBuilder Result; // FIXME: getSwitchCaseList() does not necessarily guarantee a stable // iteration order. We should use the more general iterating matchers once // they are capable of expressing this matcher (for example, it should ignore // case statements belonging to nested switch statements). bool Matched = false; for (const SwitchCase *SC = Node.getSwitchCaseList(); SC; SC = SC->getNextSwitchCase()) { BoundNodesTreeBuilder CaseBuilder(*Builder); bool CaseMatched = InnerMatcher.matches(*SC, Finder, &CaseBuilder); if (CaseMatched) { Matched = true; Result.addMatch(CaseBuilder); } } *Builder = std::move(Result); return Matched; } /// Matches each constructor initializer in a constructor definition. /// /// Given /// \code /// class A { A() : i(42), j(42) {} int i; int j; }; /// \endcode /// cxxConstructorDecl(forEachConstructorInitializer( /// forField(decl().bind("x")) /// )) /// will trigger two matches, binding for 'i' and 'j' respectively. AST_MATCHER_P(CXXConstructorDecl, forEachConstructorInitializer, internal::Matcher<CXXCtorInitializer>, InnerMatcher) { BoundNodesTreeBuilder Result; bool Matched = false; for (const auto *I : Node.inits()) { if (Finder->isTraversalIgnoringImplicitNodes() && !I->isWritten()) continue; BoundNodesTreeBuilder InitBuilder(*Builder); if (InnerMatcher.matches(*I, Finder, &InitBuilder)) { Matched = true; Result.addMatch(InitBuilder); } } *Builder = std::move(Result); return Matched; } /// Matches constructor declarations that are copy constructors. /// /// Given /// \code /// struct S { /// S(); // #1 /// S(const S &); // #2 /// S(S &&); // #3 /// }; /// \endcode /// cxxConstructorDecl(isCopyConstructor()) will match #2, but not #1 or #3. AST_MATCHER(CXXConstructorDecl, isCopyConstructor) { return Node.isCopyConstructor(); } /// Matches constructor declarations that are move constructors. /// /// Given /// \code /// struct S { /// S(); // #1 /// S(const S &); // #2 /// S(S &&); // #3 /// }; /// \endcode /// cxxConstructorDecl(isMoveConstructor()) will match #3, but not #1 or #2. AST_MATCHER(CXXConstructorDecl, isMoveConstructor) { return Node.isMoveConstructor(); } /// Matches constructor declarations that are default constructors. /// /// Given /// \code /// struct S { /// S(); // #1 /// S(const S &); // #2 /// S(S &&); // #3 /// }; /// \endcode /// cxxConstructorDecl(isDefaultConstructor()) will match #1, but not #2 or #3. AST_MATCHER(CXXConstructorDecl, isDefaultConstructor) { return Node.isDefaultConstructor(); } /// Matches constructors that delegate to another constructor. /// /// Given /// \code /// struct S { /// S(); // #1 /// S(int) {} // #2 /// S(S &&) : S() {} // #3 /// }; /// S::S() : S(0) {} // #4 /// \endcode /// cxxConstructorDecl(isDelegatingConstructor()) will match #3 and #4, but not /// #1 or #2. AST_MATCHER(CXXConstructorDecl, isDelegatingConstructor) { return Node.isDelegatingConstructor(); } /// Matches constructor, conversion function, and deduction guide declarations /// that have an explicit specifier if this explicit specifier is resolved to /// true. /// /// Given /// \code /// template<bool b> /// struct S { /// S(int); // #1 /// explicit S(double); // #2 /// operator int(); // #3 /// explicit operator bool(); // #4 /// explicit(false) S(bool) // # 7 /// explicit(true) S(char) // # 8 /// explicit(b) S(S) // # 9 /// }; /// S(int) -> S<true> // #5 /// explicit S(double) -> S<false> // #6 /// \endcode /// cxxConstructorDecl(isExplicit()) will match #2 and #8, but not #1, #7 or #9. /// cxxConversionDecl(isExplicit()) will match #4, but not #3. /// cxxDeductionGuideDecl(isExplicit()) will match #6, but not #5. AST_POLYMORPHIC_MATCHER(isExplicit, AST_POLYMORPHIC_SUPPORTED_TYPES( CXXConstructorDecl, CXXConversionDecl, CXXDeductionGuideDecl)) { return Node.isExplicit(); } /// Matches the expression in an explicit specifier if present in the given /// declaration. /// /// Given /// \code /// template<bool b> /// struct S { /// S(int); // #1 /// explicit S(double); // #2 /// operator int(); // #3 /// explicit operator bool(); // #4 /// explicit(false) S(bool) // # 7 /// explicit(true) S(char) // # 8 /// explicit(b) S(S) // # 9 /// }; /// S(int) -> S<true> // #5 /// explicit S(double) -> S<false> // #6 /// \endcode /// cxxConstructorDecl(hasExplicitSpecifier(constantExpr())) will match #7, #8 and #9, but not #1 or #2. /// cxxConversionDecl(hasExplicitSpecifier(constantExpr())) will not match #3 or #4. /// cxxDeductionGuideDecl(hasExplicitSpecifier(constantExpr())) will not match #5 or #6. AST_MATCHER_P(FunctionDecl, hasExplicitSpecifier, internal::Matcher<Expr>, InnerMatcher) { ExplicitSpecifier ES = ExplicitSpecifier::getFromDecl(&Node); if (!ES.getExpr()) return false; ASTChildrenNotSpelledInSourceScope RAII(Finder, false); return InnerMatcher.matches(*ES.getExpr(), Finder, Builder); } /// Matches function and namespace declarations that are marked with /// the inline keyword. /// /// Given /// \code /// inline void f(); /// void g(); /// namespace n { /// inline namespace m {} /// } /// \endcode /// functionDecl(isInline()) will match ::f(). /// namespaceDecl(isInline()) will match n::m. AST_POLYMORPHIC_MATCHER(isInline, AST_POLYMORPHIC_SUPPORTED_TYPES(NamespaceDecl, FunctionDecl)) { // This is required because the spelling of the function used to determine // whether inline is specified or not differs between the polymorphic types. if (const auto *FD = dyn_cast<FunctionDecl>(&Node)) return FD->isInlineSpecified(); else if (const auto *NSD = dyn_cast<NamespaceDecl>(&Node)) return NSD->isInline(); llvm_unreachable("Not a valid polymorphic type"); } /// Matches anonymous namespace declarations. /// /// Given /// \code /// namespace n { /// namespace {} // #1 /// } /// \endcode /// namespaceDecl(isAnonymous()) will match #1 but not ::n. AST_MATCHER(NamespaceDecl, isAnonymous) { return Node.isAnonymousNamespace(); } /// Matches declarations in the namespace `std`, but not in nested namespaces. /// /// Given /// \code /// class vector {}; /// namespace foo { /// class vector {}; /// namespace std { /// class vector {}; /// } /// } /// namespace std { /// inline namespace __1 { /// class vector {}; // #1 /// namespace experimental { /// class vector {}; /// } /// } /// } /// \endcode /// cxxRecordDecl(hasName("vector"), isInStdNamespace()) will match only #1. AST_MATCHER(Decl, isInStdNamespace) { return Node.isInStdNamespace(); } /// If the given case statement does not use the GNU case range /// extension, matches the constant given in the statement. /// /// Given /// \code /// switch (1) { case 1: case 1+1: case 3 ... 4: ; } /// \endcode /// caseStmt(hasCaseConstant(integerLiteral())) /// matches "case 1:" AST_MATCHER_P(CaseStmt, hasCaseConstant, internal::Matcher<Expr>, InnerMatcher) { if (Node.getRHS()) return false; return InnerMatcher.matches(*Node.getLHS(), Finder, Builder); } /// Matches declaration that has a given attribute. /// /// Given /// \code /// __attribute__((device)) void f() { ... } /// \endcode /// decl(hasAttr(clang::attr::CUDADevice)) matches the function declaration of /// f. If the matcher is used from clang-query, attr::Kind parameter should be /// passed as a quoted string. e.g., hasAttr("attr::CUDADevice"). AST_MATCHER_P(Decl, hasAttr, attr::Kind, AttrKind) { for (const auto *Attr : Node.attrs()) { if (Attr->getKind() == AttrKind) return true; } return false; } /// Matches the return value expression of a return statement /// /// Given /// \code /// return a + b; /// \endcode /// hasReturnValue(binaryOperator()) /// matches 'return a + b' /// with binaryOperator() /// matching 'a + b' AST_MATCHER_P(ReturnStmt, hasReturnValue, internal::Matcher<Expr>, InnerMatcher) { if (const auto *RetValue = Node.getRetValue()) return InnerMatcher.matches(*RetValue, Finder, Builder); return false; } /// Matches CUDA kernel call expression. /// /// Example matches, /// \code /// kernel<<<i,j>>>(); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CUDAKernelCallExpr> cudaKernelCallExpr; /// Matches expressions that resolve to a null pointer constant, such as /// GNU's __null, C++11's nullptr, or C's NULL macro. /// /// Given: /// \code /// void *v1 = NULL; /// void *v2 = nullptr; /// void *v3 = __null; // GNU extension /// char *cp = (char *)0; /// int *ip = 0; /// int i = 0; /// \endcode /// expr(nullPointerConstant()) /// matches the initializer for v1, v2, v3, cp, and ip. Does not match the /// initializer for i. AST_MATCHER_FUNCTION(internal::Matcher<Expr>, nullPointerConstant) { return anyOf( gnuNullExpr(), cxxNullPtrLiteralExpr(), integerLiteral(equals(0), hasParent(expr(hasType(pointerType()))))); } /// Matches the DecompositionDecl the binding belongs to. /// /// For example, in: /// \code /// void foo() /// { /// int arr[3]; /// auto &[f, s, t] = arr; /// /// f = 42; /// } /// \endcode /// The matcher: /// \code /// bindingDecl(hasName("f"), /// forDecomposition(decompositionDecl()) /// \endcode /// matches 'f' in 'auto &[f, s, t]'. AST_MATCHER_P(BindingDecl, forDecomposition, internal::Matcher<ValueDecl>, InnerMatcher) { if (const ValueDecl *VD = Node.getDecomposedDecl()) return InnerMatcher.matches(*VD, Finder, Builder); return false; } /// Matches the Nth binding of a DecompositionDecl. /// /// For example, in: /// \code /// void foo() /// { /// int arr[3]; /// auto &[f, s, t] = arr; /// /// f = 42; /// } /// \endcode /// The matcher: /// \code /// decompositionDecl(hasBinding(0, /// bindingDecl(hasName("f").bind("fBinding")))) /// \endcode /// matches the decomposition decl with 'f' bound to "fBinding". AST_MATCHER_P2(DecompositionDecl, hasBinding, unsigned, N, internal::Matcher<BindingDecl>, InnerMatcher) { if (Node.bindings().size() <= N) return false; return InnerMatcher.matches(*Node.bindings()[N], Finder, Builder); } /// Matches any binding of a DecompositionDecl. /// /// For example, in: /// \code /// void foo() /// { /// int arr[3]; /// auto &[f, s, t] = arr; /// /// f = 42; /// } /// \endcode /// The matcher: /// \code /// decompositionDecl(hasAnyBinding(bindingDecl(hasName("f").bind("fBinding")))) /// \endcode /// matches the decomposition decl with 'f' bound to "fBinding". AST_MATCHER_P(DecompositionDecl, hasAnyBinding, internal::Matcher<BindingDecl>, InnerMatcher) { return llvm::any_of(Node.bindings(), [&](const auto *Binding) { return InnerMatcher.matches(*Binding, Finder, Builder); }); } /// Matches declaration of the function the statement belongs to. /// /// Deprecated. Use forCallable() to correctly handle the situation when /// the declaration is not a function (but a block or an Objective-C method). /// forFunction() not only fails to take non-functions into account but also /// may match the wrong declaration in their presence. /// /// Given: /// \code /// F& operator=(const F& o) { /// std::copy_if(o.begin(), o.end(), begin(), [](V v) { return v > 0; }); /// return *this; /// } /// \endcode /// returnStmt(forFunction(hasName("operator="))) /// matches 'return *this' /// but does not match 'return v > 0' AST_MATCHER_P(Stmt, forFunction, internal::Matcher<FunctionDecl>, InnerMatcher) { const auto &Parents = Finder->getASTContext().getParents(Node); llvm::SmallVector<DynTypedNode, 8> Stack(Parents.begin(), Parents.end()); while (!Stack.empty()) { const auto &CurNode = Stack.back(); Stack.pop_back(); if (const auto *FuncDeclNode = CurNode.get<FunctionDecl>()) { if (InnerMatcher.matches(*FuncDeclNode, Finder, Builder)) { return true; } } else if (const auto *LambdaExprNode = CurNode.get<LambdaExpr>()) { if (InnerMatcher.matches(*LambdaExprNode->getCallOperator(), Finder, Builder)) { return true; } } else { for (const auto &Parent : Finder->getASTContext().getParents(CurNode)) Stack.push_back(Parent); } } return false; } /// Matches declaration of the function, method, or block the statement /// belongs to. /// /// Given: /// \code /// F& operator=(const F& o) { /// std::copy_if(o.begin(), o.end(), begin(), [](V v) { return v > 0; }); /// return *this; /// } /// \endcode /// returnStmt(forCallable(functionDecl(hasName("operator=")))) /// matches 'return *this' /// but does not match 'return v > 0' /// /// Given: /// \code /// -(void) foo { /// int x = 1; /// dispatch_sync(queue, ^{ int y = 2; }); /// } /// \endcode /// declStmt(forCallable(objcMethodDecl())) /// matches 'int x = 1' /// but does not match 'int y = 2'. /// whereas declStmt(forCallable(blockDecl())) /// matches 'int y = 2' /// but does not match 'int x = 1'. AST_MATCHER_P(Stmt, forCallable, internal::Matcher<Decl>, InnerMatcher) { const auto &Parents = Finder->getASTContext().getParents(Node); llvm::SmallVector<DynTypedNode, 8> Stack(Parents.begin(), Parents.end()); while (!Stack.empty()) { const auto &CurNode = Stack.back(); Stack.pop_back(); if (const auto *FuncDeclNode = CurNode.get<FunctionDecl>()) { if (InnerMatcher.matches(*FuncDeclNode, Finder, Builder)) { return true; } } else if (const auto *LambdaExprNode = CurNode.get<LambdaExpr>()) { if (InnerMatcher.matches(*LambdaExprNode->getCallOperator(), Finder, Builder)) { return true; } } else if (const auto *ObjCMethodDeclNode = CurNode.get<ObjCMethodDecl>()) { if (InnerMatcher.matches(*ObjCMethodDeclNode, Finder, Builder)) { return true; } } else if (const auto *BlockDeclNode = CurNode.get<BlockDecl>()) { if (InnerMatcher.matches(*BlockDeclNode, Finder, Builder)) { return true; } } else { for (const auto &Parent : Finder->getASTContext().getParents(CurNode)) Stack.push_back(Parent); } } return false; } /// Matches a declaration that has external formal linkage. /// /// Example matches only z (matcher = varDecl(hasExternalFormalLinkage())) /// \code /// void f() { /// int x; /// static int y; /// } /// int z; /// \endcode /// /// Example matches f() because it has external formal linkage despite being /// unique to the translation unit as though it has internal likage /// (matcher = functionDecl(hasExternalFormalLinkage())) /// /// \code /// namespace { /// void f() {} /// } /// \endcode AST_MATCHER(NamedDecl, hasExternalFormalLinkage) { return Node.hasExternalFormalLinkage(); } /// Matches a declaration that has default arguments. /// /// Example matches y (matcher = parmVarDecl(hasDefaultArgument())) /// \code /// void x(int val) {} /// void y(int val = 0) {} /// \endcode /// /// Deprecated. Use hasInitializer() instead to be able to /// match on the contents of the default argument. For example: /// /// \code /// void x(int val = 7) {} /// void y(int val = 42) {} /// \endcode /// parmVarDecl(hasInitializer(integerLiteral(equals(42)))) /// matches the parameter of y /// /// A matcher such as /// parmVarDecl(hasInitializer(anything())) /// is equivalent to parmVarDecl(hasDefaultArgument()). AST_MATCHER(ParmVarDecl, hasDefaultArgument) { return Node.hasDefaultArg(); } /// Matches array new expressions. /// /// Given: /// \code /// MyClass *p1 = new MyClass[10]; /// \endcode /// cxxNewExpr(isArray()) /// matches the expression 'new MyClass[10]'. AST_MATCHER(CXXNewExpr, isArray) { return Node.isArray(); } /// Matches placement new expression arguments. /// /// Given: /// \code /// MyClass *p1 = new (Storage, 16) MyClass(); /// \endcode /// cxxNewExpr(hasPlacementArg(1, integerLiteral(equals(16)))) /// matches the expression 'new (Storage, 16) MyClass()'. AST_MATCHER_P2(CXXNewExpr, hasPlacementArg, unsigned, Index, internal::Matcher<Expr>, InnerMatcher) { return Node.getNumPlacementArgs() > Index && InnerMatcher.matches(*Node.getPlacementArg(Index), Finder, Builder); } /// Matches any placement new expression arguments. /// /// Given: /// \code /// MyClass *p1 = new (Storage) MyClass(); /// \endcode /// cxxNewExpr(hasAnyPlacementArg(anything())) /// matches the expression 'new (Storage, 16) MyClass()'. AST_MATCHER_P(CXXNewExpr, hasAnyPlacementArg, internal::Matcher<Expr>, InnerMatcher) { return llvm::any_of(Node.placement_arguments(), [&](const Expr *Arg) { return InnerMatcher.matches(*Arg, Finder, Builder); }); } /// Matches array new expressions with a given array size. /// /// Given: /// \code /// MyClass *p1 = new MyClass[10]; /// \endcode /// cxxNewExpr(hasArraySize(integerLiteral(equals(10)))) /// matches the expression 'new MyClass[10]'. AST_MATCHER_P(CXXNewExpr, hasArraySize, internal::Matcher<Expr>, InnerMatcher) { return Node.isArray() && *Node.getArraySize() && InnerMatcher.matches(**Node.getArraySize(), Finder, Builder); } /// Matches a class declaration that is defined. /// /// Example matches x (matcher = cxxRecordDecl(hasDefinition())) /// \code /// class x {}; /// class y; /// \endcode AST_MATCHER(CXXRecordDecl, hasDefinition) { return Node.hasDefinition(); } /// Matches C++11 scoped enum declaration. /// /// Example matches Y (matcher = enumDecl(isScoped())) /// \code /// enum X {}; /// enum class Y {}; /// \endcode AST_MATCHER(EnumDecl, isScoped) { return Node.isScoped(); } /// Matches a function declared with a trailing return type. /// /// Example matches Y (matcher = functionDecl(hasTrailingReturn())) /// \code /// int X() {} /// auto Y() -> int {} /// \endcode AST_MATCHER(FunctionDecl, hasTrailingReturn) { if (const auto *F = Node.getType()->getAs<FunctionProtoType>()) return F->hasTrailingReturn(); return false; } /// Matches expressions that match InnerMatcher that are possibly wrapped in an /// elidable constructor and other corresponding bookkeeping nodes. /// /// In C++17, elidable copy constructors are no longer being generated in the /// AST as it is not permitted by the standard. They are, however, part of the /// AST in C++14 and earlier. So, a matcher must abstract over these differences /// to work in all language modes. This matcher skips elidable constructor-call /// AST nodes, `ExprWithCleanups` nodes wrapping elidable constructor-calls and /// various implicit nodes inside the constructor calls, all of which will not /// appear in the C++17 AST. /// /// Given /// /// \code /// struct H {}; /// H G(); /// void f() { /// H D = G(); /// } /// \endcode /// /// ``varDecl(hasInitializer(ignoringElidableConstructorCall(callExpr())))`` /// matches ``H D = G()`` in C++11 through C++17 (and beyond). AST_MATCHER_P(Expr, ignoringElidableConstructorCall, ast_matchers::internal::Matcher<Expr>, InnerMatcher) { // E tracks the node that we are examining. const Expr *E = &Node; // If present, remove an outer `ExprWithCleanups` corresponding to the // underlying `CXXConstructExpr`. This check won't cover all cases of added // `ExprWithCleanups` corresponding to `CXXConstructExpr` nodes (because the // EWC is placed on the outermost node of the expression, which this may not // be), but, it still improves the coverage of this matcher. if (const auto *CleanupsExpr = dyn_cast<ExprWithCleanups>(&Node)) E = CleanupsExpr->getSubExpr(); if (const auto *CtorExpr = dyn_cast<CXXConstructExpr>(E)) { if (CtorExpr->isElidable()) { if (const auto *MaterializeTemp = dyn_cast<MaterializeTemporaryExpr>(CtorExpr->getArg(0))) { return InnerMatcher.matches(*MaterializeTemp->getSubExpr(), Finder, Builder); } } } return InnerMatcher.matches(Node, Finder, Builder); } //----------------------------------------------------------------------------// // OpenMP handling. //----------------------------------------------------------------------------// /// Matches any ``#pragma omp`` executable directive. /// /// Given /// /// \code /// #pragma omp parallel /// #pragma omp parallel default(none) /// #pragma omp taskyield /// \endcode /// /// ``ompExecutableDirective()`` matches ``omp parallel``, /// ``omp parallel default(none)`` and ``omp taskyield``. extern const internal::VariadicDynCastAllOfMatcher<Stmt, OMPExecutableDirective> ompExecutableDirective; /// Matches standalone OpenMP directives, /// i.e., directives that can't have a structured block. /// /// Given /// /// \code /// #pragma omp parallel /// {} /// #pragma omp taskyield /// \endcode /// /// ``ompExecutableDirective(isStandaloneDirective()))`` matches /// ``omp taskyield``. AST_MATCHER(OMPExecutableDirective, isStandaloneDirective) { return Node.isStandaloneDirective(); } /// Matches the structured-block of the OpenMP executable directive /// /// Prerequisite: the executable directive must not be standalone directive. /// If it is, it will never match. /// /// Given /// /// \code /// #pragma omp parallel /// ; /// #pragma omp parallel /// {} /// \endcode /// /// ``ompExecutableDirective(hasStructuredBlock(nullStmt()))`` will match ``;`` AST_MATCHER_P(OMPExecutableDirective, hasStructuredBlock, internal::Matcher<Stmt>, InnerMatcher) { if (Node.isStandaloneDirective()) return false; // Standalone directives have no structured blocks. return InnerMatcher.matches(*Node.getStructuredBlock(), Finder, Builder); } /// Matches any clause in an OpenMP directive. /// /// Given /// /// \code /// #pragma omp parallel /// #pragma omp parallel default(none) /// \endcode /// /// ``ompExecutableDirective(hasAnyClause(anything()))`` matches /// ``omp parallel default(none)``. AST_MATCHER_P(OMPExecutableDirective, hasAnyClause, internal::Matcher<OMPClause>, InnerMatcher) { ArrayRef<OMPClause *> Clauses = Node.clauses(); return matchesFirstInPointerRange(InnerMatcher, Clauses.begin(), Clauses.end(), Finder, Builder) != Clauses.end(); } /// Matches OpenMP ``default`` clause. /// /// Given /// /// \code /// #pragma omp parallel default(none) /// #pragma omp parallel default(shared) /// #pragma omp parallel default(firstprivate) /// #pragma omp parallel /// \endcode /// /// ``ompDefaultClause()`` matches ``default(none)``, ``default(shared)``, and /// ``default(firstprivate)`` extern const internal::VariadicDynCastAllOfMatcher<OMPClause, OMPDefaultClause> ompDefaultClause; /// Matches if the OpenMP ``default`` clause has ``none`` kind specified. /// /// Given /// /// \code /// #pragma omp parallel /// #pragma omp parallel default(none) /// #pragma omp parallel default(shared) /// #pragma omp parallel default(firstprivate) /// \endcode /// /// ``ompDefaultClause(isNoneKind())`` matches only ``default(none)``. AST_MATCHER(OMPDefaultClause, isNoneKind) { return Node.getDefaultKind() == llvm::omp::OMP_DEFAULT_none; } /// Matches if the OpenMP ``default`` clause has ``shared`` kind specified. /// /// Given /// /// \code /// #pragma omp parallel /// #pragma omp parallel default(none) /// #pragma omp parallel default(shared) /// #pragma omp parallel default(firstprivate) /// \endcode /// /// ``ompDefaultClause(isSharedKind())`` matches only ``default(shared)``. AST_MATCHER(OMPDefaultClause, isSharedKind) { return Node.getDefaultKind() == llvm::omp::OMP_DEFAULT_shared; } /// Matches if the OpenMP ``default`` clause has ``firstprivate`` kind /// specified. /// /// Given /// /// \code /// #pragma omp parallel /// #pragma omp parallel default(none) /// #pragma omp parallel default(shared) /// #pragma omp parallel default(firstprivate) /// \endcode /// /// ``ompDefaultClause(isFirstPrivateKind())`` matches only /// ``default(firstprivate)``. AST_MATCHER(OMPDefaultClause, isFirstPrivateKind) { return Node.getDefaultKind() == llvm::omp::OMP_DEFAULT_firstprivate; } /// Matches if the OpenMP directive is allowed to contain the specified OpenMP /// clause kind. /// /// Given /// /// \code /// #pragma omp parallel /// #pragma omp parallel for /// #pragma omp for /// \endcode /// /// `ompExecutableDirective(isAllowedToContainClause(OMPC_default))`` matches /// ``omp parallel`` and ``omp parallel for``. /// /// If the matcher is use from clang-query, ``OpenMPClauseKind`` parameter /// should be passed as a quoted string. e.g., /// ``isAllowedToContainClauseKind("OMPC_default").`` AST_MATCHER_P(OMPExecutableDirective, isAllowedToContainClauseKind, OpenMPClauseKind, CKind) { return llvm::omp::isAllowedClauseForDirective( Node.getDirectiveKind(), CKind, Finder->getASTContext().getLangOpts().OpenMP); } //----------------------------------------------------------------------------// // End OpenMP handling. //----------------------------------------------------------------------------// } // namespace ast_matchers } // namespace clang #endif // LLVM_CLANG_ASTMATCHERS_ASTMATCHERS_H
configurator.c
/* Simple tool to create config.h. * Would be much easier with ccan modules, but deliberately standalone. * * Copyright 2011 Rusty Russell <rusty@rustcorp.com.au>. MIT license. * * c12r_err, c12r_errx functions copied from ccan/err/err.c * Copyright Rusty Russell <rusty@rustcorp.com.au>. CC0 (Public domain) License. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ #define _POSIX_C_SOURCE 200809L /* For pclose, popen, strdup */ #define EXIT_BAD_USAGE 1 #define EXIT_TROUBLE_RUNNING 2 #define EXIT_BAD_TEST 3 #define EXIT_BAD_INPUT 4 #include <errno.h> #include <stdio.h> #include <stdarg.h> #include <stdbool.h> #include <stdlib.h> #include <string.h> #include <unistd.h> #ifdef _MSC_VER #define popen _popen #define pclose _pclose #endif #ifdef _MSC_VER #define DEFAULT_COMPILER "cl" /* Note: Dash options avoid POSIX path conversion when used under msys bash * and are therefore preferred to slash (e.g. -nologo over /nologo) * Note: Disable Warning 4200 "nonstandard extension used : zero-sized array * in struct/union" for flexible array members. */ #define DEFAULT_FLAGS "-nologo -Zi -W4 -wd4200 " \ "-D_CRT_NONSTDC_NO_WARNINGS -D_CRT_SECURE_NO_WARNINGS" #define DEFAULT_OUTPUT_EXE_FLAG "-Fe:" #else #define DEFAULT_COMPILER "cc" #define DEFAULT_FLAGS "-g3 -ggdb -Wall -Wundef -Wmissing-prototypes -Wmissing-declarations -Wstrict-prototypes -Wold-style-definition" #define DEFAULT_OUTPUT_EXE_FLAG "-o" #endif #define OUTPUT_FILE "configurator.out" #define INPUT_FILE "configuratortest.c" #ifdef _WIN32 #define DIR_SEP "\\" #else #define DIR_SEP "/" #endif static const char *progname = ""; static int verbose; static bool like_a_libtool = false; struct test { const char *name; const char *desc; /* * Template style flags (pick one): * OUTSIDE_MAIN: * - put a simple boilerplate main below it. * DEFINES_FUNC: * - defines a static function called func; adds ref to avoid warnings * INSIDE_MAIN: * - put this inside main(). * DEFINES_EVERYTHING: * - don't add any boilerplate at all. * * Execution flags: * EXECUTE: * - a runtime test; must compile, exit 0 means flag is set. * MAY_NOT_COMPILE: * - Only useful with EXECUTE: don't get upset if it doesn't compile. * <nothing>: * - a compile test, if it compiles must run and exit 0. */ const char *style; const char *depends; const char *link; const char *fragment; const char *flags; const char *overrides; /* On success, force this to '1' */ bool done; bool answer; }; /* Terminated by a NULL name */ static struct test *tests; static const struct test base_tests[] = { { "HAVE_32BIT_OFF_T", "off_t is 32 bits", "DEFINES_EVERYTHING|EXECUTE|MAY_NOT_COMPILE", NULL, NULL, "#include <sys/types.h>\n" "int main(void) {\n" " return sizeof(off_t) == 4 ? 0 : 1;\n" "}\n" }, { "HAVE_ALIGNOF", "__alignof__ support", "INSIDE_MAIN", NULL, NULL, "return __alignof__(double) > 0 ? 0 : 1;" }, { "HAVE_ASPRINTF", "asprintf() declaration", "DEFINES_FUNC", NULL, NULL, "#ifndef _GNU_SOURCE\n" "#define _GNU_SOURCE\n" "#endif\n" "#include <stdio.h>\n" "static char *func(int x) {" " char *p;\n" " if (asprintf(&p, \"%u\", x) == -1) \n" " p = NULL;\n" " return p;\n" "}" }, { "HAVE_ATTRIBUTE_COLD", "__attribute__((cold)) support", "DEFINES_FUNC", NULL, NULL, "static int __attribute__((cold)) func(int x) { return x; }" }, { "HAVE_ATTRIBUTE_CONST", "__attribute__((const)) support", "DEFINES_FUNC", NULL, NULL, "static int __attribute__((const)) func(int x) { return x; }" }, { "HAVE_ATTRIBUTE_DEPRECATED", "__attribute__((deprecated)) support", "DEFINES_FUNC", NULL, NULL, "static int __attribute__((deprecated)) func(int x) { return x; }" }, { "HAVE_ATTRIBUTE_NONNULL", "__attribute__((nonnull)) support", "DEFINES_FUNC", NULL, NULL, "static char *__attribute__((nonnull)) func(char *p) { return p; }" }, { "HAVE_ATTRIBUTE_SENTINEL", "__attribute__((sentinel)) support", "DEFINES_FUNC", NULL, NULL, "static int __attribute__((sentinel)) func(int i, ...) { return i; }" }, { "HAVE_ATTRIBUTE_PURE", "__attribute__((pure)) support", "DEFINES_FUNC", NULL, NULL, "static int __attribute__((pure)) func(int x) { return x; }" }, { "HAVE_ATTRIBUTE_MAY_ALIAS", "__attribute__((may_alias)) support", "OUTSIDE_MAIN", NULL, NULL, "typedef short __attribute__((__may_alias__)) short_a;" }, { "HAVE_ATTRIBUTE_NORETURN", "__attribute__((noreturn)) support", "DEFINES_FUNC", NULL, NULL, "#include <stdlib.h>\n" "static void __attribute__((noreturn)) func(int x) { exit(x); }" }, { "HAVE_ATTRIBUTE_PRINTF", "__attribute__ format printf support", "DEFINES_FUNC", NULL, NULL, "static void __attribute__((format(__printf__, 1, 2))) func(const char *fmt, ...) { (void)fmt; }" }, { "HAVE_ATTRIBUTE_UNUSED", "__attribute__((unused)) support", "OUTSIDE_MAIN", NULL, NULL, "static int __attribute__((unused)) func(int x) { return x; }" }, { "HAVE_ATTRIBUTE_USED", "__attribute__((used)) support", "OUTSIDE_MAIN", NULL, NULL, "static int __attribute__((used)) func(int x) { return x; }" }, { "HAVE_BACKTRACE", "backtrace() in <execinfo.h>", "DEFINES_FUNC", NULL, NULL, "#include <execinfo.h>\n" "static int func(int x) {" " void *bt[10];\n" " return backtrace(bt, 10) < x;\n" "}" }, { "HAVE_BIG_ENDIAN", "big endian", "INSIDE_MAIN|EXECUTE", NULL, NULL, "union { int i; char c[sizeof(int)]; } u;\n" "u.i = 0x01020304;\n" "return u.c[0] == 0x01 && u.c[1] == 0x02 && u.c[2] == 0x03 && u.c[3] == 0x04 ? 0 : 1;" }, { "HAVE_BSWAP_64", "bswap64 in byteswap.h", "DEFINES_FUNC", "HAVE_BYTESWAP_H", NULL, "#include <byteswap.h>\n" "static int func(int x) { return bswap_64(x); }" }, { "HAVE_BUILTIN_CHOOSE_EXPR", "__builtin_choose_expr support", "INSIDE_MAIN", NULL, NULL, "return __builtin_choose_expr(1, 0, \"garbage\");" }, { "HAVE_BUILTIN_CLZ", "__builtin_clz support", "INSIDE_MAIN", NULL, NULL, "return __builtin_clz(1) == (sizeof(int)*8 - 1) ? 0 : 1;" }, { "HAVE_BUILTIN_CLZL", "__builtin_clzl support", "INSIDE_MAIN", NULL, NULL, "return __builtin_clzl(1) == (sizeof(long)*8 - 1) ? 0 : 1;" }, { "HAVE_BUILTIN_CLZLL", "__builtin_clzll support", "INSIDE_MAIN", NULL, NULL, "return __builtin_clzll(1) == (sizeof(long long)*8 - 1) ? 0 : 1;" }, { "HAVE_BUILTIN_CTZ", "__builtin_ctz support", "INSIDE_MAIN", NULL, NULL, "return __builtin_ctz(1 << (sizeof(int)*8 - 1)) == (sizeof(int)*8 - 1) ? 0 : 1;" }, { "HAVE_BUILTIN_CTZL", "__builtin_ctzl support", "INSIDE_MAIN", NULL, NULL, "return __builtin_ctzl(1UL << (sizeof(long)*8 - 1)) == (sizeof(long)*8 - 1) ? 0 : 1;" }, { "HAVE_BUILTIN_CTZLL", "__builtin_ctzll support", "INSIDE_MAIN", NULL, NULL, "return __builtin_ctzll(1ULL << (sizeof(long long)*8 - 1)) == (sizeof(long long)*8 - 1) ? 0 : 1;" }, { "HAVE_BUILTIN_CONSTANT_P", "__builtin_constant_p support", "INSIDE_MAIN", NULL, NULL, "return __builtin_constant_p(1) ? 0 : 1;" }, { "HAVE_BUILTIN_EXPECT", "__builtin_expect support", "INSIDE_MAIN", NULL, NULL, "return __builtin_expect(argc == 1, 1) ? 0 : 1;" }, { "HAVE_BUILTIN_FFS", "__builtin_ffs support", "INSIDE_MAIN", NULL, NULL, "return __builtin_ffs(0) == 0 ? 0 : 1;" }, { "HAVE_BUILTIN_FFSL", "__builtin_ffsl support", "INSIDE_MAIN", NULL, NULL, "return __builtin_ffsl(0L) == 0 ? 0 : 1;" }, { "HAVE_BUILTIN_FFSLL", "__builtin_ffsll support", "INSIDE_MAIN", NULL, NULL, "return __builtin_ffsll(0LL) == 0 ? 0 : 1;" }, { "HAVE_BUILTIN_POPCOUNT", "__builtin_popcount support", "INSIDE_MAIN", NULL, NULL, "return __builtin_popcount(255) == 8 ? 0 : 1;" }, { "HAVE_BUILTIN_POPCOUNTL", "__builtin_popcountl support", "INSIDE_MAIN", NULL, NULL, "return __builtin_popcountl(255L) == 8 ? 0 : 1;" }, { "HAVE_BUILTIN_POPCOUNTLL", "__builtin_popcountll support", "INSIDE_MAIN", NULL, NULL, "return __builtin_popcountll(255LL) == 8 ? 0 : 1;" }, { "HAVE_BUILTIN_TYPES_COMPATIBLE_P", "__builtin_types_compatible_p support", "INSIDE_MAIN", NULL, NULL, "return __builtin_types_compatible_p(char *, int) ? 1 : 0;" }, { "HAVE_ICCARM_INTRINSICS", "<intrinsics.h>", "DEFINES_FUNC", NULL, NULL, "#include <intrinsics.h>\n" "int func(int v) {\n" " return __CLZ(__RBIT(v));\n" "}" }, { "HAVE_BYTESWAP_H", "<byteswap.h>", "OUTSIDE_MAIN", NULL, NULL, "#include <byteswap.h>\n" }, { "HAVE_CLOCK_GETTIME", "clock_gettime() declaration", "DEFINES_FUNC", "HAVE_STRUCT_TIMESPEC", NULL, "#include <time.h>\n" "static struct timespec func(void) {\n" " struct timespec ts;\n" " clock_gettime(CLOCK_REALTIME, &ts);\n" " return ts;\n" "}\n" }, { "HAVE_CLOCK_GETTIME_IN_LIBRT", "clock_gettime() in librt", "DEFINES_FUNC", "HAVE_STRUCT_TIMESPEC !HAVE_CLOCK_GETTIME", "-lrt", "#include <time.h>\n" "static struct timespec func(void) {\n" " struct timespec ts;\n" " clock_gettime(CLOCK_REALTIME, &ts);\n" " return ts;\n" "}\n", /* This means HAVE_CLOCK_GETTIME, too */ "HAVE_CLOCK_GETTIME" }, { "HAVE_COMPOUND_LITERALS", "compound literal support", "INSIDE_MAIN", NULL, NULL, "int *foo = (int[]) { 1, 2, 3, 4 };\n" "return foo[0] ? 0 : 1;" }, { "HAVE_FCHDIR", "fchdir support", "DEFINES_EVERYTHING|EXECUTE|MAY_NOT_COMPILE", NULL, NULL, "#include <sys/types.h>\n" "#include <sys/stat.h>\n" "#include <fcntl.h>\n" "#include <unistd.h>\n" "int main(void) {\n" " int fd = open(\"..\", O_RDONLY);\n" " return fchdir(fd) == 0 ? 0 : 1;\n" "}\n" }, { "HAVE_ERR_H", "<err.h>", "DEFINES_FUNC", NULL, NULL, "#include <err.h>\n" "static void func(int arg) {\n" " if (arg == 0)\n" " err(1, \"err %u\", arg);\n" " if (arg == 1)\n" " errx(1, \"err %u\", arg);\n" " if (arg == 3)\n" " warn(\"warn %u\", arg);\n" " if (arg == 4)\n" " warnx(\"warn %u\", arg);\n" "}\n" }, { "HAVE_FILE_OFFSET_BITS", "_FILE_OFFSET_BITS to get 64-bit offsets", "DEFINES_EVERYTHING|EXECUTE|MAY_NOT_COMPILE", "HAVE_32BIT_OFF_T", NULL, "#define _FILE_OFFSET_BITS 64\n" "#include <sys/types.h>\n" "int main(void) {\n" " return sizeof(off_t) == 8 ? 0 : 1;\n" "}\n" }, { "HAVE_FOR_LOOP_DECLARATION", "for loop declaration support", "INSIDE_MAIN", NULL, NULL, "int ret = 1;\n" "for (int i = 0; i < argc; i++) { ret = 0; };\n" "return ret;" }, { "HAVE_FLEXIBLE_ARRAY_MEMBER", "flexible array member support", "OUTSIDE_MAIN", NULL, NULL, "struct foo { unsigned int x; int arr[]; };" }, { "HAVE_GETPAGESIZE", "getpagesize() in <unistd.h>", "DEFINES_FUNC", NULL, NULL, "#include <unistd.h>\n" "static int func(void) { return getpagesize(); }" }, { "HAVE_ISBLANK", "isblank() in <ctype.h>", "DEFINES_FUNC", NULL, NULL, "#ifndef _GNU_SOURCE\n" "#define _GNU_SOURCE\n" "#endif\n" "#include <ctype.h>\n" "static int func(void) { return isblank(' '); }" }, { "HAVE_LITTLE_ENDIAN", "little endian", "INSIDE_MAIN|EXECUTE", NULL, NULL, "union { int i; char c[sizeof(int)]; } u;\n" "u.i = 0x01020304;\n" "return u.c[0] == 0x04 && u.c[1] == 0x03 && u.c[2] == 0x02 && u.c[3] == 0x01 ? 0 : 1;" }, { "HAVE_MEMMEM", "memmem in <string.h>", "DEFINES_FUNC", NULL, NULL, "#ifndef _GNU_SOURCE\n" "#define _GNU_SOURCE\n" "#endif\n" "#include <string.h>\n" "static void *func(void *h, size_t hl, void *n, size_t nl) {\n" "return memmem(h, hl, n, nl);" "}\n", }, { "HAVE_MEMRCHR", "memrchr in <string.h>", "DEFINES_FUNC", NULL, NULL, "#ifndef _GNU_SOURCE\n" "#define _GNU_SOURCE\n" "#endif\n" "#include <string.h>\n" "static void *func(void *s, int c, size_t n) {\n" "return memrchr(s, c, n);" "}\n", }, { "HAVE_MMAP", "mmap() declaration", "DEFINES_FUNC", NULL, NULL, "#include <sys/mman.h>\n" "static void *func(int fd) {\n" " return mmap(0, 65536, PROT_READ, MAP_SHARED, fd, 0);\n" "}" }, { "HAVE_PROC_SELF_MAPS", "/proc/self/maps exists", "DEFINES_EVERYTHING|EXECUTE|MAY_NOT_COMPILE", NULL, NULL, "#include <sys/types.h>\n" "#include <sys/stat.h>\n" "#include <fcntl.h>\n" "int main(void) {\n" " return open(\"/proc/self/maps\", O_RDONLY) != -1 ? 0 : 1;\n" "}\n" }, { "HAVE_QSORT_R_PRIVATE_LAST", "qsort_r cmp takes trailing arg", "DEFINES_EVERYTHING|EXECUTE|MAY_NOT_COMPILE", NULL, NULL, "#ifndef _GNU_SOURCE\n" "#define _GNU_SOURCE\n" "#endif\n" "#include <stdlib.h>\n" "static int cmp(const void *lp, const void *rp, void *priv) {\n" " *(unsigned int *)priv = 1;\n" " return *(const int *)lp - *(const int *)rp; }\n" "int main(void) {\n" " int array[] = { 9, 2, 5 };\n" " unsigned int called = 0;\n" " qsort_r(array, 3, sizeof(int), cmp, &called);\n" " return called && array[0] == 2 && array[1] == 5 && array[2] == 9 ? 0 : 1;\n" "}\n" }, { "HAVE_STRUCT_TIMESPEC", "struct timespec declaration", "DEFINES_FUNC", NULL, NULL, "#include <time.h>\n" "static void func(void) {\n" " struct timespec ts;\n" " ts.tv_sec = ts.tv_nsec = 1;\n" "}\n" }, { "HAVE_SECTION_START_STOP", "__attribute__((section)) and __start/__stop", "DEFINES_FUNC", NULL, NULL, "static void *__attribute__((__section__(\"mysec\"))) p = &p;\n" "static int func(void) {\n" " extern void *__start_mysec[], *__stop_mysec[];\n" " return __stop_mysec - __start_mysec;\n" "}\n" }, { "HAVE_STACK_GROWS_UPWARDS", "stack grows upwards", "DEFINES_EVERYTHING|EXECUTE", NULL, NULL, "#include <stddef.h>\n" "static ptrdiff_t nest(const void *base, unsigned int i)\n" "{\n" " if (i == 0)\n" " return (const char *)&i - (const char *)base;\n" " return nest(base, i-1);\n" "}\n" "int main(int argc, char *argv[]) {\n" " (void)argv;\n" " return (nest(&argc, argc) > 0) ? 0 : 1;\n" "}\n" }, { "HAVE_STATEMENT_EXPR", "statement expression support", "INSIDE_MAIN", NULL, NULL, "return ({ int x = argc; x == argc ? 0 : 1; });" }, { "HAVE_SYS_FILIO_H", "<sys/filio.h>", "OUTSIDE_MAIN", NULL, NULL, /* Solaris needs this for FIONREAD */ "#include <sys/filio.h>\n" }, { "HAVE_SYS_TERMIOS_H", "<sys/termios.h>", "OUTSIDE_MAIN", NULL, NULL, "#include <sys/termios.h>\n" }, { "HAVE_SYS_UNISTD_H", "<sys/unistd.h>", "OUTSIDE_MAIN", NULL, NULL, "#include <sys/unistd.h>\n" }, { "HAVE_TYPEOF", "__typeof__ support", "INSIDE_MAIN", NULL, NULL, "__typeof__(argc) i; i = argc; return i == argc ? 0 : 1;" }, { "HAVE_UNALIGNED_ACCESS", "unaligned access to int", "DEFINES_EVERYTHING|EXECUTE", NULL, NULL, "#include <string.h>\n" "int main(int argc, char *argv[]) {\n" " (void)argc;\n" " char pad[sizeof(int *) * 1];\n" " strncpy(pad, argv[0], sizeof(pad));\n" " int *x = (int *)pad, *y = (int *)(pad + 1);\n" " return *x == *y;\n" "}\n" }, { "HAVE_UTIME", "utime() declaration", "DEFINES_FUNC", NULL, NULL, "#include <sys/types.h>\n" "#include <utime.h>\n" "static int func(const char *filename) {\n" " struct utimbuf times = { 0 };\n" " return utime(filename, &times);\n" "}" }, { "HAVE_WARN_UNUSED_RESULT", "__attribute__((warn_unused_result))", "DEFINES_FUNC", NULL, NULL, "#include <sys/types.h>\n" "#include <utime.h>\n" "static __attribute__((warn_unused_result)) int func(int i) {\n" " return i + 1;\n" "}" }, { "HAVE_OPENMP", "#pragma omp and -fopenmp support", "INSIDE_MAIN|EXECUTE|MAY_NOT_COMPILE", NULL, NULL, "int i;\n" "#pragma omp parallel for\n" "for(i = 0; i < 0; i++) {};\n" "return 0;\n", "-Werror -fopenmp" }, { "HAVE_VALGRIND_MEMCHECK_H", "<valgrind/memcheck.h>", "OUTSIDE_MAIN", NULL, NULL, "#include <valgrind/memcheck.h>\n" }, { "HAVE_UCONTEXT", "working <ucontext.h", "DEFINES_EVERYTHING|EXECUTE|MAY_NOT_COMPILE", NULL, NULL, "#include <ucontext.h>\n" "static int x = 0;\n" "static char stack[2048];\n" "static ucontext_t a, b;\n" "static void fn(void) {\n" " x |= 2;\n" " setcontext(&b);\n" " x |= 4;\n" "}\n" "int main(void) {\n" " x |= 1;\n" " getcontext(&a);\n" " a.uc_stack.ss_sp = stack;\n" " a.uc_stack.ss_size = sizeof(stack);\n" " makecontext(&a, fn, 0);\n" " swapcontext(&b, &a);\n" " return (x == 3) ? 0 : 1;\n" "}\n" }, { "HAVE_POINTER_SAFE_MAKECONTEXT", "passing pointers via makecontext()", "DEFINES_EVERYTHING|EXECUTE|MAY_NOT_COMPILE", "HAVE_UCONTEXT", NULL, "#include <stddef.h>\n" "#include <ucontext.h>\n" "static int worked = 0;\n" "static char stack[1024];\n" "static ucontext_t a, b;\n" "static void fn(void *p, void *q) {\n" " void *cp = &worked;\n" " void *cq = (void *)(~((ptrdiff_t)cp));\n" " if ((p == cp) && (q == cq))\n" " worked = 1;\n" " setcontext(&b);\n" "}\n" "int main(void) {\n" " void *ap = &worked;\n" " void *aq = (void *)(~((ptrdiff_t)ap));\n" " getcontext(&a);\n" " a.uc_stack.ss_sp = stack;\n" " a.uc_stack.ss_size = sizeof(stack);\n" " makecontext(&a, (void (*)(void))fn, 2, ap, aq);\n" " swapcontext(&b, &a);\n" " return worked ? 0 : 1;\n" "}\n" }, { "HAVE_BUILTIN_CPU_SUPPORTS", "__builtin_cpu_supports()", "DEFINES_FUNC", NULL, NULL, "#include <stdbool.h>\n" "static bool func(void) {\n" " return __builtin_cpu_supports(\"mmx\");\n" "}" }, }; static void c12r_err(int eval, const char *fmt, ...) { int err_errno = errno; va_list ap; fprintf(stderr, "%s: ", progname); va_start(ap, fmt); vfprintf(stderr, fmt, ap); va_end(ap); fprintf(stderr, ": %s\n", strerror(err_errno)); exit(eval); } static void c12r_errx(int eval, const char *fmt, ...) { va_list ap; fprintf(stderr, "%s: ", progname); va_start(ap, fmt); vfprintf(stderr, fmt, ap); va_end(ap); fprintf(stderr, "\n"); exit(eval); } static void start_test(const char *what, const char *why) { if (like_a_libtool) { printf("%s%s... ", what, why); fflush(stdout); } } static void end_test(bool result) { if (like_a_libtool) printf("%s\n", result ? "yes" : "no"); } static size_t fcopy(FILE *fsrc, FILE *fdst) { char buffer[BUFSIZ]; size_t rsize, wsize; size_t copied = 0; while ((rsize = fread(buffer, 1, BUFSIZ, fsrc)) > 0) { wsize = fwrite(buffer, 1, rsize, fdst); copied += wsize; if (wsize != rsize) break; } return copied; } static char *grab_stream(FILE *file) { size_t max, ret, size = 0; char *buffer; max = BUFSIZ; buffer = malloc(max); while ((ret = fread(buffer+size, 1, max - size, file)) == max - size) { size += ret; buffer = realloc(buffer, max *= 2); } size += ret; if (ferror(file)) c12r_err(EXIT_TROUBLE_RUNNING, "reading from command"); buffer[size] = '\0'; return buffer; } static char *run(const char *cmd, int *exitstatus) { static const char redir[] = " 2>&1"; size_t cmdlen; char *cmdredir; FILE *cmdout; char *ret; cmdlen = strlen(cmd); cmdredir = malloc(cmdlen + sizeof(redir)); memcpy(cmdredir, cmd, cmdlen); memcpy(cmdredir + cmdlen, redir, sizeof(redir)); cmdout = popen(cmdredir, "r"); if (!cmdout) c12r_err(EXIT_TROUBLE_RUNNING, "popen \"%s\"", cmdredir); free(cmdredir); ret = grab_stream(cmdout); *exitstatus = pclose(cmdout); return ret; } static char *connect_args(const char *argv[], const char *outflag, const char *files) { unsigned int i; char *ret; size_t len = strlen(outflag) + strlen(files) + 1; for (i = 1; argv[i]; i++) len += 1 + strlen(argv[i]); ret = malloc(len); len = 0; for (i = 1; argv[i]; i++) { strcpy(ret + len, argv[i]); len += strlen(argv[i]); if (argv[i+1] || *outflag) ret[len++] = ' '; } strcpy(ret + len, outflag); len += strlen(outflag); strcpy(ret + len, files); return ret; } static struct test *find_test(const char *name) { unsigned int i; for (i = 0; tests[i].name; i++) { if (strcmp(tests[i].name, name) == 0) return &tests[i]; } c12r_errx(EXIT_BAD_TEST, "Unknown test %s", name); abort(); } #define PRE_BOILERPLATE "/* Test program generated by configurator. */\n" #define MAIN_START_BOILERPLATE \ "int main(int argc, char *argv[]) {\n" \ " (void)argc;\n" \ " (void)argv;\n" #define USE_FUNC_BOILERPLATE "(void)func;\n" #define MAIN_BODY_BOILERPLATE "return 0;\n" #define MAIN_END_BOILERPLATE "}\n" static bool run_test(const char *cmd, struct test *test) { char *output, *newcmd; FILE *outf; int status; if (test->done) return test->answer; if (test->depends) { size_t len; const char *deps = test->depends; char *dep; /* Space-separated dependencies, could be ! for inverse. */ while ((len = strcspn(deps, " ")) != 0) { bool positive = true; if (deps[len]) { dep = strdup(deps); dep[len] = '\0'; } else { dep = (char *)deps; } if (dep[0] == '!') { dep++; positive = false; } if (run_test(cmd, find_test(dep)) != positive) { test->answer = false; test->done = true; return test->answer; } if (deps[len]) free(dep); deps += len; deps += strspn(deps, " "); } } outf = fopen(INPUT_FILE, verbose > 1 ? "w+" : "w"); if (!outf) c12r_err(EXIT_TROUBLE_RUNNING, "creating %s", INPUT_FILE); fprintf(outf, "%s", PRE_BOILERPLATE); if (strstr(test->style, "INSIDE_MAIN")) { fprintf(outf, "%s", MAIN_START_BOILERPLATE); fprintf(outf, "%s", test->fragment); fprintf(outf, "%s", MAIN_END_BOILERPLATE); } else if (strstr(test->style, "OUTSIDE_MAIN")) { fprintf(outf, "%s", test->fragment); fprintf(outf, "%s", MAIN_START_BOILERPLATE); fprintf(outf, "%s", MAIN_BODY_BOILERPLATE); fprintf(outf, "%s", MAIN_END_BOILERPLATE); } else if (strstr(test->style, "DEFINES_FUNC")) { fprintf(outf, "%s", test->fragment); fprintf(outf, "%s", MAIN_START_BOILERPLATE); fprintf(outf, "%s", USE_FUNC_BOILERPLATE); fprintf(outf, "%s", MAIN_BODY_BOILERPLATE); fprintf(outf, "%s", MAIN_END_BOILERPLATE); } else if (strstr(test->style, "DEFINES_EVERYTHING")) { fprintf(outf, "%s", test->fragment); } else c12r_errx(EXIT_BAD_TEST, "Unknown style for test %s: %s", test->name, test->style); if (verbose > 1) { fseek(outf, 0, SEEK_SET); fcopy(outf, stdout); } fclose(outf); newcmd = strdup(cmd); if (test->flags) { newcmd = realloc(newcmd, strlen(newcmd) + strlen(" ") + strlen(test->flags) + 1); strcat(newcmd, " "); strcat(newcmd, test->flags); if (verbose > 1) printf("Extra flags line: %s", newcmd); } if (test->link) { newcmd = realloc(newcmd, strlen(newcmd) + strlen(" ") + strlen(test->link) + 1); strcat(newcmd, " "); strcat(newcmd, test->link); if (verbose > 1) printf("Extra link line: %s", newcmd); } start_test("checking for ", test->desc); output = run(newcmd, &status); free(newcmd); if (status != 0 || strstr(output, "warning")) { if (verbose) printf("Compile %s for %s, status %i: %s\n", status ? "fail" : "warning", test->name, status, output); if (strstr(test->style, "EXECUTE") && !strstr(test->style, "MAY_NOT_COMPILE")) c12r_errx(EXIT_BAD_TEST, "Test for %s did not compile:\n%s", test->name, output); test->answer = false; free(output); } else { /* Compile succeeded. */ free(output); /* We run INSIDE_MAIN tests for sanity checking. */ if (strstr(test->style, "EXECUTE") || strstr(test->style, "INSIDE_MAIN")) { output = run("." DIR_SEP OUTPUT_FILE, &status); if (!strstr(test->style, "EXECUTE") && status != 0) c12r_errx(EXIT_BAD_TEST, "Test for %s failed with %i:\n%s", test->name, status, output); if (verbose && status) printf("%s exited %i\n", test->name, status); free(output); } test->answer = (status == 0); } test->done = true; end_test(test->answer); if (test->answer && test->overrides) { struct test *override = find_test(test->overrides); override->done = true; override->answer = true; } return test->answer; } static char *any_field(char **fieldname) { char buf[1000]; for (;;) { char *p, *eq; if (!fgets(buf, sizeof(buf), stdin)) return NULL; p = buf; /* Ignore whitespace, lines starting with # */ while (*p == ' ' || *p == '\t') p++; if (*p == '#' || *p == '\n') continue; eq = strchr(p, '='); if (!eq) c12r_errx(EXIT_BAD_INPUT, "no = in line: %s", p); *eq = '\0'; *fieldname = strdup(p); p = eq + 1; if (strlen(p) && p[strlen(p)-1] == '\n') p[strlen(p)-1] = '\0'; return strdup(p); } } static char *read_field(const char *name, bool compulsory) { char *fieldname, *value; value = any_field(&fieldname); if (!value) { if (!compulsory) return NULL; c12r_errx(EXIT_BAD_INPUT, "Could not read field %s", name); } if (strcmp(fieldname, name) != 0) c12r_errx(EXIT_BAD_INPUT, "Expected field %s not %s", name, fieldname); return value; } /* Test descriptions from stdin: * Lines starting with # or whitespace-only are ignored. * * First three non-ignored lines must be: * var=<varname> * desc=<description-for-autotools-style> * style=OUTSIDE_MAIN DEFINES_FUNC INSIDE_MAIN DEFINES_EVERYTHING EXECUTE MAY_NOT_COMPILE * * Followed by optional lines: * depends=<space-separated-testnames, ! to invert> * link=<extra args for link line> * flags=<extra args for compile line> * overrides=<testname-to-force> * * Finally a code line, either: * code=<oneline> OR * code= * <lines of code> * <end-comment> * * And <end-comment> looks like this next comment: */ /*END*/ static bool read_test(struct test *test) { char *field, *value; char buf[1000]; memset(test, 0, sizeof(*test)); test->name = read_field("var", false); if (!test->name) return false; test->desc = read_field("desc", true); test->style = read_field("style", true); /* Read any optional fields. */ while ((value = any_field(&field)) != NULL) { if (strcmp(field, "depends") == 0) test->depends = value; else if (strcmp(field, "link") == 0) test->link = value; else if (strcmp(field, "flags") == 0) test->flags = value; else if (strcmp(field, "overrides") == 0) test->overrides = value; else if (strcmp(field, "code") == 0) break; else c12r_errx(EXIT_BAD_INPUT, "Unknown field %s in %s", field, test->name); } if (!value) c12r_errx(EXIT_BAD_INPUT, "Missing code in %s", test->name); if (strlen(value) == 0) { /* Multiline program, read to END comment */ while (fgets(buf, sizeof(buf), stdin) != 0) { size_t n; if (strncmp(buf, "/*END*/", 7) == 0) break; n = strlen(value); value = realloc(value, n + strlen(buf) + 1); strcpy(value + n, buf); n += strlen(buf); } } test->fragment = value; return true; } static void read_tests(size_t num_tests) { while (read_test(tests + num_tests)) { num_tests++; tests = realloc(tests, (num_tests + 1) * sizeof(tests[0])); tests[num_tests].name = NULL; } } int main(int argc, const char *argv[]) { char *cmd; unsigned int i; const char *default_args[] = { "", DEFAULT_COMPILER, DEFAULT_FLAGS, NULL }; const char *outflag = DEFAULT_OUTPUT_EXE_FLAG; const char *configurator_cc = NULL; const char *orig_cc; const char *varfile = NULL; const char *headerfile = NULL; bool extra_tests = false; FILE *outf; if (argc > 0) progname = argv[0]; while (argc > 1) { if (strcmp(argv[1], "--help") == 0) { printf("Usage: configurator [-v] [--var-file=<filename>] [-O<outflag>] [--configurator-cc=<compiler-for-tests>] [--autotools-style] [--extra-tests] [<compiler> <flags>...]\n" " <compiler> <flags> will have \"<outflag> <outfile> <infile.c>\" appended\n" "Default: %s %s %s\n", DEFAULT_COMPILER, DEFAULT_FLAGS, DEFAULT_OUTPUT_EXE_FLAG); exit(0); } if (strncmp(argv[1], "-O", 2) == 0) { argc--; argv++; outflag = argv[1] + 2; if (!*outflag) { fprintf(stderr, "%s: option requires an argument -- O\n", argv[0]); exit(EXIT_BAD_USAGE); } } else if (strcmp(argv[1], "-v") == 0) { argc--; argv++; verbose++; } else if (strcmp(argv[1], "-vv") == 0) { argc--; argv++; verbose += 2; } else if (strncmp(argv[1], "--configurator-cc=", 18) == 0) { configurator_cc = argv[1] + 18; argc--; argv++; } else if (strncmp(argv[1], "--var-file=", 11) == 0) { varfile = argv[1] + 11; argc--; argv++; } else if (strcmp(argv[1], "--autotools-style") == 0) { like_a_libtool = true; argc--; argv++; } else if (strncmp(argv[1], "--header-file=", 14) == 0) { headerfile = argv[1] + 14; argc--; argv++; } else if (strcmp(argv[1], "--extra-tests") == 0) { extra_tests = true; argc--; argv++; } else if (strcmp(argv[1], "--") == 0) { break; } else if (argv[1][0] == '-') { c12r_errx(EXIT_BAD_USAGE, "Unknown option %s", argv[1]); } else { break; } } if (argc == 1) argv = default_args; /* Copy with NULL entry at end */ tests = calloc(sizeof(base_tests)/sizeof(base_tests[0]) + 1, sizeof(base_tests[0])); memcpy(tests, base_tests, sizeof(base_tests)); if (extra_tests) read_tests(sizeof(base_tests)/sizeof(base_tests[0])); orig_cc = argv[1]; if (configurator_cc) argv[1] = configurator_cc; cmd = connect_args(argv, outflag, OUTPUT_FILE " " INPUT_FILE); if (like_a_libtool) { start_test("Making autoconf users comfortable", ""); sleep(1); end_test(1); } for (i = 0; tests[i].name; i++) run_test(cmd, &tests[i]); free(cmd); remove(OUTPUT_FILE); remove(INPUT_FILE); if (varfile) { FILE *vars; if (strcmp(varfile, "-") == 0) vars = stdout; else { start_test("Writing variables to ", varfile); vars = fopen(varfile, "a"); if (!vars) c12r_err(EXIT_TROUBLE_RUNNING, "Could not open %s", varfile); } for (i = 0; tests[i].name; i++) fprintf(vars, "%s=%u\n", tests[i].name, tests[i].answer); if (vars != stdout) { if (fclose(vars) != 0) c12r_err(EXIT_TROUBLE_RUNNING, "Closing %s", varfile); end_test(1); } } if (headerfile) { start_test("Writing header to ", headerfile); outf = fopen(headerfile, "w"); if (!outf) c12r_err(EXIT_TROUBLE_RUNNING, "Could not open %s", headerfile); } else outf = stdout; fprintf(outf, "/* Generated by CCAN configurator */\n" "#ifndef CCAN_CONFIG_H\n" "#define CCAN_CONFIG_H\n"); fprintf(outf, "#ifndef _GNU_SOURCE\n"); fprintf(outf, "#define _GNU_SOURCE /* Always use GNU extensions. */\n"); fprintf(outf, "#endif\n"); fprintf(outf, "#define CCAN_COMPILER \"%s\"\n", orig_cc); cmd = connect_args(argv + 1, "", ""); fprintf(outf, "#define CCAN_CFLAGS \"%s\"\n", cmd); free(cmd); fprintf(outf, "#define CCAN_OUTPUT_EXE_CFLAG \"%s\"\n\n", outflag); /* This one implies "#include <ccan/..." works, eg. for tdb2.h */ fprintf(outf, "#define HAVE_CCAN 1\n"); for (i = 0; tests[i].name; i++) fprintf(outf, "#define %s %u\n", tests[i].name, tests[i].answer); fprintf(outf, "#endif /* CCAN_CONFIG_H */\n"); if (headerfile) { if (fclose(outf) != 0) c12r_err(EXIT_TROUBLE_RUNNING, "Closing %s", headerfile); end_test(1); } return 0; }
GB_unop__abs_fp64_fp64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCUDA_DEV #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__abs_fp64_fp64) // op(A') function: GB (_unop_tran__abs_fp64_fp64) // C type: double // A type: double // cast: double cij = aij // unaryop: cij = fabs (aij) #define GB_ATYPE \ double #define GB_CTYPE \ double // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ double aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = fabs (x) ; // casting #define GB_CAST(z, aij) \ double z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ double aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ double z = aij ; \ Cx [pC] = fabs (z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ABS || GxB_NO_FP64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__abs_fp64_fp64) ( double *Cx, // Cx and Ax may be aliased const double *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { double aij = Ax [p] ; double z = aij ; Cx [p] = fabs (z) ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; double aij = Ax [p] ; double z = aij ; Cx [p] = fabs (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__abs_fp64_fp64) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
memdbg.c
/* * This software was written by Jim Fougeron jfoug AT cox dot net * in 2013. No copyright is claimed, and the software is hereby * placed in the public domain. In case this attempt to disclaim * copyright and place the software in the public domain is deemed * null and void, then the software is Copyright (c) 2013 Jim Fougeron * and it is hereby released to the general public under the following * terms: * * This software may be modified, redistributed, and used for any * purpose, in source and binary forms, with or without modification. */ /* * memdbg.c * Memory management debugging (at runtime) * * memdbg.c contains routines detect, and report memory * problems, such as double frees, passing bad pointers to * free, most buffer overwrites. Also, tracking of non-freed * data, showing memory leaks, can also be shown. * * Compilation Options (provided from Makefile CFLAGS) * * MEMDBG_ON If this is NOT defined, then memdbg will * get out of your way, and most normal memory functions * will be called with no overhead at all. * * MEMDBG_EXTRA_CHECKS If defined, then we do not 'really' free * the memory. We simply set the fence posts to deleted status, * and proceed. This allows us finding double frees, and other * usages of smashes. NOTE, when this is set, and there are a * LOT of memory alloc/frees, then at some point the calls to * free will fail. If this happens, there is code in place that * frees the oldest freed block (really frees it), and does that * over and over again, until either we have no freed blocks left * OR the app is able to allocate this new buffer. In this situation * we do lose track of those older freed blocks of memory, but it * allows the application to continue forward, even though this * debugging code exausted all memory. */ #if defined (MEMDBG_ON) #include <stdio.h> #include <stdlib.h> #include <string.h> #include "common.h" #define __MEMDBG__ #include "memdbg.h" #include "pseudo_intrinsics.h" #include "jumbo.h" #ifdef _OPENMP #include <omp.h> #endif /* * This function ALWAYS must be defined. It is (HAS) to be used if there is code which * has some library code that allocates memory which was NOT handled by one of the allocation * functions within this wrapper class, BUT which really needs to be freed. Thus the user code * really needs to have straight access to the libc function free(). We give them that access, * but they have to call this function, and not the 'free' function, which would get wrapped * and call into MEMDBG_free(p, filename, fileline). */ void MEMDBG_libc_free(void *p) { free(p); } void *MEMDBG_libc_alloc(size_t size) { return malloc(size); } void *MEMDBG_libc_calloc(size_t count, size_t size) { return calloc(count, size); } #ifdef _MSC_VER #define malloc(a) _aligned_malloc(a,16) #define realloc(a,b) _aligned_realloc(a,b,16) #define free(a) _aligned_free(a) #endif /* * these fence posts (first fence post guarding underflow), are: * MEMFPOST == allocated memory * MEMFPOSTt == allocated 'tiny' memory (allocated with mem_alloc_tiny() from memory.c) * MEMFPOSTd == freed (deleted) memory. Will only be set this way, and stored in the * freed_memlist, if MEMDBG_EXTRA_CHECKS is set. */ const char *cpMEMFPOST = "\xa5\xa5\xa5\xa5"; const char *cpMEMFPOSTd = "\x5a\x5a\x5a\x5a"; const char *cpMEMFPOSTt = "\xa5\x55\xa5\xa5"; /* * this structure will contain data that is butted RIGHT against * the tail end of the allocated block. We put a fence post here, * and thus can detect buffer overwrite. */ typedef struct _hdr2 { /* we use a unsigned char, and do not care about alignment. We ALWAYS treat this var with * a memcpy, memcmp, etc, so that this works the same on aligned required CPU or non-aligned required. */ unsigned char mdbg_fpst[4]; } MEMDBG_HDR2; /* * This structure is carefully crafted to keep it in proper alignment. * We later will put the HDR2 RIGHT against the head end and tail end * of the buffer. This allows us to catch 1 byte over or underflow. */ typedef struct _hdr { struct _hdr *mdbg_next; struct _hdr *mdbg_prev; /* points to just 'right' before allocated memory, for underflow catching */ MEMDBG_HDR2 *mdbg_hdr1; /* points to just 'right' after allocated memory, for overflow catching */ MEMDBG_HDR2 *mdbg_hdr2; const char *mdbg_file; ARCH_WORD_32 mdbg_line; ARCH_WORD_32 mdbg_cnt; ARCH_WORD_32 mdbg_size; } MEMDBG_HDR; static size_t mem_size = 0; static size_t max_mem_size = 0; static size_t mem_sizet = 0; static size_t max_mem_sizet = 0; static MEMDBG_HDR *memlist = NULL; static unsigned long alloc_cnt = 0; #ifdef MEMDBG_EXTRA_CHECKS static MEMDBG_HDR *freed_memlist = NULL; static size_t freed_mem_size = 0; static unsigned long freed_cnt = 0; #endif #define RESERVE_SZ (sizeof(MEMDBG_HDR) + sizeof(MEMDBG_HDR*) + 4 + 16) #define RESERVE_SZ_AL(a) (sizeof(MEMDBG_HDR) + sizeof(MEMDBG_HDR*) + 4 + 16 + a*2) #define CLIENT_2_HDR_PTR(a) ((MEMDBG_HDR *) (((char *) ((ARCH_WORD)(((char *)a)-4-sizeof(MEMDBG_HDR*)) & ~0xF)))) #define CLIENT_2_HDR(a) ((MEMDBG_HDR *) (((char *) ((ARCH_WORD)(((char *)a)-4-sizeof(MEMDBG_HDR*)) & ~0xF))))->mdbg_next #define HDR_2_CLIENT(a) ((void *) (((char*)((MEMDBG_HDR *) (a->mdbg_hdr1))) + 4)) static void mem_fence_post_err_fp (void *, const char *, int, char *fp, int line); static void mem_fence_post_err_ne_fp (void *, const char *, int, char *fp, int line); static void mem_fence_post_errd_fp (void *, const char *, int, char *fp, int line); static void mem_fence_post_errd_ne_fp(void *, const char *, int, char *fp, int line); #define mem_fence_post_err(a,b,c) mem_fence_post_err_fp(a,b,c,__FILE__,__LINE__) #define mem_fence_post_err_ne(a,b,c) mem_fence_post_err_ne_fp(a,b,c,__FILE__,__LINE__) #define mem_fence_post_errd(a,b,c) mem_fence_post_errd_fp(a,b,c,__FILE__,__LINE__) #define mem_fence_post_errd_ne(a,b,c) mem_fence_post_errd_ne_fp(a,b,c,__FILE__,__LINE__) #ifdef MEMDBG_EXTRA_CHECKS /* NOTE, which this function is called, the memory (client memory) gets SMASHED */ /* If this starts causing the program to crash, then it is likely that the client */ /* code is using dangling pointers by accessing the memory after a free or realloc */ static void MEMDBG_FREEDLIST_add(MEMDBG_HDR *); #endif /* * these are now macros. This makes it easier for doing omp critical * sections. It is illegal to branch into or out of a CRITICAL block */ #define MEMDBG_LIST_delete(p) \ if (p->mdbg_next != NULL) \ p->mdbg_next->mdbg_prev = p->mdbg_prev; \ if (p->mdbg_prev != NULL) \ p->mdbg_prev->mdbg_next = p->mdbg_next; \ else \ memlist = p->mdbg_next #define MEMDBG_LIST_add(p) \ p->mdbg_next = memlist; \ p->mdbg_prev = NULL; \ if (memlist != NULL) \ memlist->mdbg_prev = p; \ memlist = p /* * This function can be called directly by client code. * it lists how much memory is currently allocated. * a good check before program exit, is are there 0 * bytes allocated. */ size_t MemDbg_Used(int show_freed) { #ifdef MEMDBG_EXTRA_CHECKS if (show_freed) return freed_mem_size; #endif return mem_size+mem_sizet; } /* * This function can be called directly by client code. * It writes out all non-freed memory. */ void MemDbg_Display(FILE *fp) { MEMDBG_HDR *p; int idx; if (!(mem_size+mem_sizet) && !getenv("MEMDBG")) return; fprintf(fp, "\n------------------------------\n"); fprintf(fp, "MEMDBG: allocation information (display):\n"); fprintf(fp, " current normal alloc mem (leaks)"LLu" max normal mem allocated: "LLu"\n", (unsigned long long)mem_size, (unsigned long long)max_mem_size); fprintf(fp, " current 'tiny' alloc mem (leaks)"LLu" max tiny mem allocated: "LLu"\n", (unsigned long long)mem_sizet, (unsigned long long)max_mem_sizet); #ifdef MEMDBG_EXTRA_CHECKS fprintf(fp, " Freed mem size: "LLu" (freed cnt: %lu)", (unsigned long long)freed_mem_size, freed_cnt); #endif if (!(mem_size+mem_sizet)) return; fprintf(fp, "\n"); fprintf(fp, "Index : alloc# : Size : File(Line) [first 20 bytes, or size of bytes]\n"); idx = 0; p = memlist; while (p != NULL) { int bfreed = 0, bbad=0; fprintf(fp, "%-5d : %-6d : %6llu : %s(%u)", idx++, p->mdbg_cnt, (unsigned long long)p->mdbg_size, p->mdbg_file, p->mdbg_line); if (memcmp(p->mdbg_hdr1->mdbg_fpst, cpMEMFPOST, 4) && memcmp(p->mdbg_hdr1->mdbg_fpst, cpMEMFPOSTt, 4)) { bbad=1; if (!memcmp(p->mdbg_hdr1->mdbg_fpst, cpMEMFPOSTd, 4)) { fprintf(fp, " INVALID ( freed already? )"); bfreed = 1; } else fprintf(fp, " INVALID ( buffer underflow )"); } if (memcmp(p->mdbg_hdr2->mdbg_fpst, cpMEMFPOST, 4)) { if (bfreed && !memcmp(p->mdbg_hdr2->mdbg_fpst, cpMEMFPOSTd, 4)) { bbad=1; fprintf(fp, " YES Data was freed."); } else { unsigned i; char *cp = ((char*)p)+RESERVE_SZ; fprintf(fp, " INVALID (buffer overflow) tail of block: "); cp = (char*)p->mdbg_hdr2->mdbg_fpst; cp -= 16; for (i = 0; i < 20; ++i) { if(*cp < ' ' || *cp > '~') fprintf(fp, "."); else fprintf(fp, "%c", *cp); ++cp; } fprintf(fp, " and the head of the block was: "); } } if (!bbad) { unsigned i; char *cp = ((char*)p)+RESERVE_SZ; fprintf(fp, " "); for (i = 0; i < 20 && i < p->mdbg_size; ++i) { if(*cp < ' ' || *cp > '~') fprintf(fp, "."); else fprintf(fp, "%c", *cp); ++cp; } } fprintf(fp, "\n"); p = p->mdbg_next; } } /* * This function can be called directly by client code. * It will walk the list of memory, 'looking' for errors. */ void MemDbg_Validate(int level) { MemDbg_Validate_msg2(level, NULL, 0); } void MemDbg_Validate_msg(int level, const char *pMsg) { MemDbg_Validate_msg2(level, pMsg, 0); } void MemDbg_Validate_msg2(int level, const char *pMsg, int bShowExMessages) { /* Level 0 we ALWAYS walk the alloc list, looking for over/underwrite, and validate a few other items. */ MEMDBG_HDR *p = memlist; int error = 0; int cnt=0; #ifdef MEMDBG_EXTRA_CHECKS unsigned char *cp; unsigned i; #endif if (bShowExMessages) { if (pMsg) fprintf(stderr, "%s\n", pMsg); fprintf(stderr, "MemDbg_Validate level 0 checking"); } while (p) { if (memcmp(p->mdbg_hdr1->mdbg_fpst, cpMEMFPOST, 4) && memcmp(p->mdbg_hdr1->mdbg_fpst, cpMEMFPOSTt, 4)) { ++cnt; if (cnt < 100) { if (!memcmp(p->mdbg_hdr1->mdbg_fpst, cpMEMFPOSTd, 4)) fprintf(stderr, "\nDeleted memory still in chain\n"); else { fprintf(stderr, "\nMemory buffer underwrite found! Will try to list what file/line allocated the buffer\n"); mem_fence_post_err_ne(p, p->mdbg_file, p->mdbg_line); } } error = 1; } if (memcmp(p->mdbg_hdr2->mdbg_fpst, cpMEMFPOST, 4)) { ++cnt; if (cnt < 100) { if (!memcmp(p->mdbg_hdr1->mdbg_fpst, cpMEMFPOSTd, 4)) { } else { fprintf(stderr, "\nMemory buffer overwrite found! Will try to list what file/line allocated the buffer\n"); mem_fence_post_err_ne(p, p->mdbg_file, p->mdbg_line); } } error = 1; } // Loop detect code { MEMDBG_HDR volatile *p2 = p->mdbg_next; while (p2) { if (p2 == p || p2 == p2->mdbg_next) { fprintf (stderr, "Error, internal loop in the memdbg linked list, aborting\n"); break; } p2 = p2->mdbg_next; } } if (cnt > 1000) break; p = p->mdbg_next; } if (error) { fprintf(stderr, "\nExiting due to the error detected\n"); if (cnt > 100) fprintf(stderr, "There were %d total errors, only first 100 shown\n", cnt); exit(1); } if (bShowExMessages) fprintf(stderr, " Passed\n"); if (level == MEMDBG_VALIDATE_MIN) return; #ifdef MEMDBG_EXTRA_CHECKS // Ok, we have a list of all freed items. We will do work on this. p = freed_memlist; if (!p) return; cnt = 0; if (bShowExMessages) fprintf(stderr, "MemDbg_Validate level 1 checking"); while (p) { if (memcmp(p->mdbg_hdr1->mdbg_fpst, cpMEMFPOSTd, 4)) { ++cnt; if (cnt < 100) fprintf(stderr, "\nFreed Memory buffer underwrite found! Will try to list what file/line allocated the buffer\n"); mem_fence_post_err_ne(p, p->mdbg_file, p->mdbg_line); error = 1; } if (memcmp(p->mdbg_hdr2->mdbg_fpst, cpMEMFPOSTd, 4)) { ++cnt; if (cnt < 100) fprintf(stderr, "\nFreed Memory buffer overwrite found! Will try to list what file/line allocated the buffer\n"); mem_fence_post_err_ne(p, p->mdbg_file, p->mdbg_line); error = 1; } // Loop detect code { MEMDBG_HDR *p2 = p->mdbg_next; while (p2) { if (p2 == p || p2 == p2->mdbg_next) { fprintf (stderr, "Error, internal loop in the memdbg linked list, aborting\n"); break; } p2 = p2->mdbg_next; } } if (cnt > 1000) break; p = p->mdbg_next; } if (error) { fprintf(stderr, "\nExiting due to the error detected\n"); if (cnt > 100) fprintf(stderr, "There were %d total errors, only first 100 shown\n", cnt); exit(1); } if (bShowExMessages) fprintf(stderr, " Passed\n"); if (level == MEMDBG_VALIDATE_DEEP) return; p = freed_memlist; cnt = 0; if (bShowExMessages) fprintf(stderr, "MemDbg_Validate level 2 checking"); while (p) { cp = (unsigned char*)HDR_2_CLIENT(p); if (p->mdbg_size != p->mdbg_hdr2->mdbg_fpst - cp) { fprintf(stderr, "\nFreed Memory buffer underwrite found (size var busted)! Will try to list what file/line allocated the buffer\n"); mem_fence_post_err_ne(p, p->mdbg_file, p->mdbg_line); error = 1; } else { for (i = 0; i < p->mdbg_size; ++i) { // in 'deeper' mode, we only look at first 8 bytes. If these are not overwritten, it is less likely that the buffer // has been written to. It 'can' be written to later on, and if we use deepest, we will look at the FULL buffer. if (i == 8) break; if (*cp++ != 0xCD) { ++cnt; if (cnt < 100) fprintf(stderr, "\nFreed Memory buffer modification found! Will try to list what file/line allocated the buffer\n"); mem_fence_post_err_ne(p, p->mdbg_file, p->mdbg_line); error = 1; break; } } } // Loop detect code { MEMDBG_HDR *p2 = p->mdbg_next; while (p2) { if (p2 == p || p2 == p2->mdbg_next) { fprintf (stderr, "Error, internal loop in the memdbg linked list, aborting\n"); break; } p2 = p2->mdbg_next; } } if (cnt > 1000) break; p = p->mdbg_next; } if (error) { fprintf(stderr, "\nExiting due to the error detected\n"); if (cnt > 100) fprintf(stderr, "There were %d total errors, only first 100 shown\n", cnt); exit(1); } if (bShowExMessages) fprintf(stderr, " Passed\n"); if (level == MEMDBG_VALIDATE_DEEPER) return; p = freed_memlist; cnt = 0; if (bShowExMessages) fprintf(stderr, "MemDbg_Validate level 3 checking"); while (p) { cp = (unsigned char*)HDR_2_CLIENT(p); // in this deepest mode, we look at the ENTIRE buffer. In deeper, we looked at first 8, so here, we just start from 8 and look forward. for (i = 8; i < p->mdbg_size; ++i) { if (*cp++ != 0xCD) { ++cnt; if (cnt < 100) fprintf(stderr, "\nFreed Memory buffer modification found! Will try to list what file/line allocated the buffer\n"); mem_fence_post_err_ne(p, p->mdbg_file, p->mdbg_line); error = 1; break; } } // Loop detect code { MEMDBG_HDR *p2 = p->mdbg_next; while (p2) { if (p2 == p || p2 == p2->mdbg_next) { fprintf (stderr, "Error, internal loop in the memdbg linked list, aborting\n"); break; } p2 = p2->mdbg_next; } } if (cnt > 1000) break; p = p->mdbg_next; } if (error) { fprintf(stderr, "\nExiting due to the error detected\n"); if (cnt > 100) fprintf(stderr, "There were %d total errors, only first 100 shown\n", cnt); exit(1); } if (bShowExMessages) fprintf(stderr, " Passed\n"); #endif } #ifdef MEMDBG_EXTRA_CHECKS /* Ok, if we are out of memory, due to keeping too much freed memory around, then free * up oldest blocks until we can malloc this block. the rar format is a bad actor, * as could be many of the 'non-hash' (old zip for sure), as these have to decrypt * a full file, to be assured the password is correct. */ static void release_oldest_freed_block() { MEMDBG_HDR *p = freed_memlist, *pp; if (!p) return; #ifdef _OPENMP #pragma omp critical (memdbg_crit) #endif { p = freed_memlist; while (p->mdbg_next) p = p->mdbg_next; // now unlink it. freed_mem_size -= p->mdbg_size; --freed_cnt; p->mdbg_prev->mdbg_next = NULL; pp = p->mdbg_prev; } // now free it free(p); if (freed_cnt > 10) { // free one more. #ifdef _OPENMP #pragma omp critical (memdbg_crit) { // NOTE, we can not be assured that pp was still pointing // to the last item in the list. We have to look AGAIN, // within a critical section. pp = freed_memlist; while (pp->mdbg_next) pp = pp->mdbg_next; #endif freed_mem_size -= pp->mdbg_size; --freed_cnt; pp->mdbg_prev->mdbg_next = NULL; #ifdef _OPENMP } #endif // now free it free(pp); } } #endif void * MEMDBG_calloc(size_t count, size_t size, char *file, int line) { char *p; size *= count; if ( ((signed long long)mem_size) < 0) fprintf(stderr, "MEMDBG_calloc "LLd" %s:%d mem:"LLd"\n", (unsigned long long)size, file, line, (unsigned long long)mem_size); p = (char*)MEMDBG_alloc(size,file,line); memset(p, 0, size); return p; } /* * MEMDBG_alloc * Allocate a memory block. makes a protected call to malloc(), allocating * extra data, and adding data to all required structures. */ void * MEMDBG_alloc(size_t size, char *file, int line) { MEMDBG_HDR *p, *p2; if ( ((signed long long)mem_size) < 0) fprintf(stderr, "MEMDBG_alloc "LLd" %s:%d mem:"LLd"\n", (unsigned long long)size, file, line, (unsigned long long)mem_size); // TODO: we have to compute proper size here. p = (MEMDBG_HDR*)malloc(RESERVE_SZ + size + 4); #ifdef MEMDBG_EXTRA_CHECKS #ifdef _OPENMP { int i = 0; do { #pragma omp critical (memdbg_crit) { if (!p && freed_mem_size > (RESERVE_SZ + size + 4) && !p && freed_cnt) i = 1; } if (i) { release_oldest_freed_block(); p = (MEMDBG_HDR*)malloc(RESERVE_SZ + size + 4); } } while (i && !p); } #else /* this is the 'right' block, but hard to do with the restrictions of no branching out that omp critical places on us */ if (!p && freed_mem_size > (RESERVE_SZ + size + 4)) { while (!p && freed_cnt) { release_oldest_freed_block(); p = (MEMDBG_HDR*)malloc(RESERVE_SZ + size + 4); } } #endif #endif if (!p) { if ( ((signed long long)mem_size) < 0) fprintf(stderr, "MEMDBG_alloc (end) "LLd" %s:%d mem:"LLd"\n", (unsigned long long)size, file, line, (unsigned long long)mem_size); return NULL; } p->mdbg_hdr1 = (MEMDBG_HDR2*)(((char*)p)+RESERVE_SZ-4); p2 = CLIENT_2_HDR_PTR(p->mdbg_hdr1+4); memcpy(p2, &p, sizeof(p)); memcpy(p->mdbg_hdr1->mdbg_fpst, cpMEMFPOST, 4); p->mdbg_size = size; p->mdbg_file = file; p->mdbg_line = line; p->mdbg_hdr2 = (MEMDBG_HDR2*)(((char*)p->mdbg_hdr1)+4 + size); memcpy(p->mdbg_hdr2, cpMEMFPOST, 4); #ifdef _OPENMP #pragma omp critical (memdbg_crit) #endif { p->mdbg_cnt = ++alloc_cnt; mem_size += size; if (mem_size > max_mem_size) max_mem_size = mem_size; MEMDBG_LIST_add(p); } if ( ((signed long long)mem_size) < 0) fprintf(stderr, "MEMDBG_alloc (end) "LLd" %s:%d mem:"LLd"\n", (unsigned long long)size, file, line, (unsigned long long)mem_size); return HDR_2_CLIENT(p); } /* * MEMDBG_alloc_align * Allocate a memory block. makes a protected call to malloc(), allocating * extra data, and adding data to all required structures. */ void * MEMDBG_alloc_align(size_t size, int align, char *file, int line) { MEMDBG_HDR *p, *p2; char *p3; if ( ((signed long long)mem_size) < 0) fprintf(stderr, "MEMDBG_alloc_align "LLd" %s:%d mem:"LLd"\n", (unsigned long long)size, file, line, (unsigned long long)mem_size); p = (MEMDBG_HDR*)malloc(RESERVE_SZ_AL(align) + size + 4); if (!p) { if ( ((signed long long)mem_size) < 0) fprintf(stderr, "MEMDBG_alloc_align (end) "LLd" %s:%d mem:"LLd"\n", (unsigned long long)size, file, line, (unsigned long long)mem_size); return NULL; } p3 = ((char*)p)+RESERVE_SZ+align-1-4; p3 -= ((size_t)p3)%align; if ( (((size_t)p3)/align) % align == 0) p3 += align; p->mdbg_hdr1 = (MEMDBG_HDR2*)(p3-4); p2 = CLIENT_2_HDR_PTR(p3); memcpy(p2, &p, sizeof(p)); memcpy(p->mdbg_hdr1->mdbg_fpst, cpMEMFPOST, 4); p->mdbg_size = size; p->mdbg_file = file; p->mdbg_line = line; p->mdbg_hdr2 = (MEMDBG_HDR2*)(p3 + size); memcpy(p->mdbg_hdr2, cpMEMFPOST, 4); #ifdef _OPENMP #pragma omp critical (memdbg_crit) #endif { p->mdbg_cnt = ++alloc_cnt; mem_size += size; if (mem_size > max_mem_size) max_mem_size = mem_size; MEMDBG_LIST_add(p); } if ( ((signed long long)mem_size) < 0) fprintf(stderr, "MEMDBG_alloc_align (end) "LLd" %s:%d mem:"LLd"\n", (unsigned long long)size, file, line, (unsigned long long)mem_size); return HDR_2_CLIENT(p); } /* * MEMDBG_realloc * Reallocate a memory block makes a protected call to realloc(), allocating * extra data, and adding data to all required structures. * *** realloc is a NASTY function. The code here has taken a few turns, and * has reduced this to simply allocating a new block (or freeing if size is 0) * and copying the 'known' amount of data to the new block, and then freeing * the prior block. If the realloc is larger than before, then then undefined * data at end of the block is set to 0xcd. NOTE, this code was changed in * this manner due to not being able to find the bug in the original re-alloc * and bug #2062 in the rar format. */ void * MEMDBG_realloc(void *ptr, size_t size, char *file, int line) { MEMDBG_HDR *p; unsigned char *v; if ( ((signed long long)mem_size) < 0) fprintf(stderr, "MEMDBG_realloc("LLd") %s:%d mem:"LLd"\n", (unsigned long long)size, file, line, (unsigned long long)mem_size); /* if ptr is null, this function works just like alloc, so simply use alloc */ if (!ptr) return MEMDBG_alloc(size, file, line); if (!size) { MEM_FREE(ptr); return NULL; } v = (unsigned char*)MEMDBG_alloc(size, file, line); p = CLIENT_2_HDR(ptr); if (size > p->mdbg_size) { memcpy(v, ((unsigned char*)(p->mdbg_hdr1))+4, p->mdbg_size); memset(v+p->mdbg_size, 0xcd, size-p->mdbg_size); } else memcpy(v, ((unsigned char*)(p->mdbg_hdr1))+4, size); MEMDBG_free(ptr,file,line); return v; } /* * MEMDBG_strdup * Duplicate a ASCIIZ string in memory, with a protected call to strdup, * allocating extra data, and adding data to all required structures. */ char *MEMDBG_strdup(const char *str, char *file, int line) { char * s; if ( ((signed long long)mem_size) < 0) fprintf(stderr, "MEMDBG_strdup(%ld) %s:%d mem:"LLd"\n", (long)strlen(str), file, line, (unsigned long long)mem_size); s = (char*)MEMDBG_alloc(strlen(str)+1, file, line); if (s != NULL) strcpy(s, str); return s; } /* * Return the count 'id' count of an allocated block. This will match the * value shown on a leak report, and may help to line up exactly which * block is leaking */ unsigned MEMDBG_get_cnt (const void *ptr, const char **err_msg) { MEMDBG_HDR *p = CLIENT_2_HDR(ptr); *err_msg = "valid memdbg block"; if (memcmp(p->mdbg_hdr1->mdbg_fpst, cpMEMFPOSTt, 4)) *err_msg = "INVALID memdbg memory (possible underflow), mdbg_cnt returned may not be correct!"; return (unsigned)p->mdbg_cnt; } /* * Return the size of the allocated buffer. The size here is the size of data * that the user would see. This is not the full memdbg buffer size. This * would be the size reported in a leak report. */ size_t MEMDBG_get_size(const void *ptr, const char **err_msg) { MEMDBG_HDR *p = CLIENT_2_HDR(ptr); *err_msg = "valid memdbg block"; if (memcmp(p->mdbg_hdr1->mdbg_fpst, cpMEMFPOSTt, 4)) *err_msg = "INVALID memdbg memory (possible underflow), mdbg_size returned may not be correct!"; return p->mdbg_size; } /* * Return the file and line number of the caller code that allocated this * buffer. This is not the full memdbg buffer size. This would be the * size reported in a leak report. */ const char *MEMDBG_get_file(const void *ptr, const char **err_msg) { MEMDBG_HDR *p = CLIENT_2_HDR(ptr); *err_msg = "valid memdbg block"; if (memcmp(p->mdbg_hdr1->mdbg_fpst, cpMEMFPOSTt, 4)) *err_msg = "INVALID memdbg memory (possible underflow), mdbg_file returned may not be correct!"; return p->mdbg_file; } unsigned MEMDBG_get_line(const void *ptr, const char **err_msg) { MEMDBG_HDR *p = CLIENT_2_HDR(ptr); *err_msg = "valid memdbg block"; if (memcmp(p->mdbg_hdr1->mdbg_fpst, cpMEMFPOSTt, 4)) *err_msg = "INVALID memdbg memory (possible underflow), mdbg_line returned may not be correct!"; return (unsigned)p->mdbg_line; } /* * MEMDBG_free * Free a memory block, checking a lot of data, which would have been * set at allocation time. */ void MEMDBG_free(const void *ptr, char *file, int line) { MEMDBG_HDR *p; int err=0, i; #ifdef _OPENMP #pragma omp critical (memdbg_crit) #endif { p = CLIENT_2_HDR(ptr); /* is this correctly allocated memory */ for (i = 0; i < 4; ++i) if ( ((char*)(p->mdbg_hdr1->mdbg_fpst))[i] != cpMEMFPOST[i] || ((char*)(p->mdbg_hdr2->mdbg_fpst))[i] != cpMEMFPOST[i]) break; if (i == 4) /* yes, correctly allocated memory */ mem_size -= p->mdbg_size; else { /* it could be a 'tiny' allocated block */ for (i = 0; i < 4; ++i) if ( ((char*)(p->mdbg_hdr1->mdbg_fpst))[i] != cpMEMFPOSTt[i] || ((char*)(p->mdbg_hdr2->mdbg_fpst))[i] != cpMEMFPOST[i]) break; if (i == 4) /* yes, and valid tiny block */ mem_sizet -= p->mdbg_size; else { /* some error, i.e. bad block */ err = 1; for (i = 0; i < 4; ++i) { if (((char*)(p->mdbg_hdr1->mdbg_fpst))[i] != cpMEMFPOSTd[i] || ((char*)(p->mdbg_hdr2->mdbg_fpst))[i] != cpMEMFPOSTd[i]) { break; } } if (i == 4) err = 2; /* double free */ } } if (!err) { MEMDBG_LIST_delete(p); for (i = 0; i < 4; ++i) { ((char*)(p->mdbg_hdr2->mdbg_fpst))[i] = cpMEMFPOSTd[i]; ((char*)(p->mdbg_hdr1->mdbg_fpst))[i] = cpMEMFPOSTd[i]; } } } if (err) { if (err == 2) mem_fence_post_errd(p, file, line); else mem_fence_post_err(p, file, line); return; } #ifndef MEMDBG_EXTRA_CHECKS free(p); #else MEMDBG_FREEDLIST_add(p); #endif if ( ((signed long long)mem_size) < 0) fprintf(stderr, "MEMDBG_free (end) %s:%d mem:"LLd"\n", file, line, (unsigned long long)mem_size); } #ifdef MEMDBG_EXTRA_CHECKS /* NOTE, there is no LIST_delete() for the freed list. We only put * data onto this list, it is kept for full runtime. We may want to * later add some way for the app to clean it up, but for now, we * add it, and keep it all. */ static void MEMDBG_FREEDLIST_add(MEMDBG_HDR *p) { unsigned char *cp; size_t i; #ifdef _OPENMP #pragma omp critical (memdbg_crit) #endif { freed_mem_size += p->mdbg_size; ++freed_cnt; p->mdbg_next = freed_memlist; p->mdbg_prev = NULL; if (freed_memlist != NULL) freed_memlist->mdbg_prev = p; freed_memlist = p; /* Ok, now 'DEADBEEF' the original data buffer */ cp = (unsigned char*)HDR_2_CLIENT(p); for (i = 0; i < p->mdbg_size; ++i) *cp++ = 0xCD; } } #endif /* *these functions allow taking a memory snapshot, * calling some code, then validating that memory * is the same after the code. This will help * catch memory leaks and other such problems, within * formats and such. Simply get the snapshot, * run self tests (or other), when it exits, check * the snapshot to make sure nothing leaked. */ MEMDBG_HANDLE MEMDBG_getSnapshot(int id) { MEMDBG_HANDLE h; h.id = id; h.mem_size = mem_size; h.alloc_cnt = alloc_cnt; return h; } void MEMDBG_checkSnapshot(MEMDBG_HANDLE h) { /* call the real function, but list do not exit on leak */ MEMDBG_checkSnapshot_possible_exit_on_error(h,0); } /* NOT needed to be thread safe, must be called from single threaded code */ void MEMDBG_checkSnapshot_possible_exit_on_error(MEMDBG_HANDLE h, int exit_on_any_leaks) { /* ok, we do several things. * 1 walk allocation change, showing any memory 'newer' than in the handle (not tiny alloc stuff). * 2 validate allocation chain (and free chain if in extra mode). * if there were any errors in #2, then exit. * if any memory leaks (#1) and exit_on_any_leaks true, we also exit. */ MEMDBG_HDR *p = memlist; int leak = 0; /* first step, walk allocation list, looking for leaks */ while (p) { if (p->mdbg_cnt > h.alloc_cnt) { if (!memcmp(p->mdbg_hdr1->mdbg_fpst, cpMEMFPOST, 4)) { leak = 1; fprintf(stderr, "Mem leak: "LLu" bytes, alloc_num %d, file %s, line %d\n", (unsigned long long)p->mdbg_size, p->mdbg_cnt, p->mdbg_file, p->mdbg_line); } //else fprintf(stderr, "Mem : "LLu" bytes, alloc_num %d, file %s, line %d\n", (unsigned long long)p->mdbg_size, p->mdbg_cnt, p->mdbg_file, p->mdbg_line); } p = p->mdbg_next; } MemDbg_Validate_msg2(3, "MEMDBG_checkSnapshot", 0); if (leak) { exit(1); } } /* MUST be thread safe */ void MEMDBG_tag_mem_from_alloc_tiny(void *ptr) { MEMDBG_HDR *p; p = CLIENT_2_HDR(ptr); #ifdef _OPENMP #pragma omp critical (memdbg_crit) #endif { if (!memcmp(p->mdbg_hdr1->mdbg_fpst, cpMEMFPOST, 4)) { memcpy(p->mdbg_hdr1->mdbg_fpst, cpMEMFPOSTt, 4); mem_size -= p->mdbg_size; mem_sizet += p->mdbg_size; if (mem_sizet > max_mem_sizet) max_mem_sizet = mem_sizet; } } } static void mem_fence_post_err_fp(void *p, const char *file, int line, char *fp, int line2) { mem_fence_post_err_ne_fp(p, file, line,fp,line2); MemDbg_Display(stderr); exit(1); } static void mem_fence_post_errd_fp(void *p, const char *file, int line, char *fp, int line2) { mem_fence_post_errd_ne_fp(p, file, line,fp,line2); MemDbg_Display(stderr); exit(1); } static void mem_fence_post_err_ne_fp(void *p, const char *file, int line, char *fp, int line2) { char buf[120], *cp=buf, *ip; int i; ip = (char*) p; for (i = 0; i < 16; ++i) { if (ip[i] >= ' ' && ip[i] <= '~') *cp++ = ip[i]; else *cp++ = '.'; } *cp++ = ' '; for (i = 0; i < 16; ++i) cp += sprintf(cp, " %02x", (unsigned char)ip[i]); fprintf(stderr, "Memory fence_post error - %p - %s(%d) (%d)\n\tdata: (%s)\n", p, file, line, line2, buf); } static void mem_fence_post_errd_ne_fp(void *p, const char *file, int line, char *fp, int line2) { fprintf(stderr, "Memory fence_postd error, memory double freed - %p - %s(%d) (%d)\n", p, file, line, line2); } #endif /* MEMDBG_ON */
aix_ssha_fmt_plug.c
/* AIX ssha cracker patch for JtR. Hacked together during April of 2013 by Dhiru * Kholia <dhiru at openwall.com> and magnum. * * Thanks to atom (of hashcat project) and philsmd for discovering and * publishing the details of various AIX hashing algorithms. * * This software is Copyright (c) 2013 Dhiru Kholia <dhiru at openwall.com> and * magnum, and * it is hereby released to the general public under the following terms: * Redistribution and use in source and binary forms, with or without * modification, are permitted. */ #if FMT_EXTERNS_H extern struct fmt_main fmt_aixssha1; extern struct fmt_main fmt_aixssha256; extern struct fmt_main fmt_aixssha512; #elif FMT_REGISTERS_H john_register_one(&fmt_aixssha1); john_register_one(&fmt_aixssha256); john_register_one(&fmt_aixssha512); #else #include <string.h> #include <assert.h> #include <errno.h> #include "arch.h" #include "misc.h" #include "common.h" #include "formats.h" #include "params.h" #include "options.h" #ifdef _OPENMP static int omp_t = 1; #include <omp.h> #ifndef OMP_SCALE #define OMP_SCALE 8 // Tuned on i7 w/HT for SHA-256 #endif #endif #include "pbkdf2_hmac_sha1.h" #include "pbkdf2_hmac_sha256.h" #include "pbkdf2_hmac_sha512.h" #include "memdbg.h" #define FORMAT_LABEL_SHA1 "aix-ssha1" #define FORMAT_LABEL_SHA256 "aix-ssha256" #define FORMAT_LABEL_SHA512 "aix-ssha512" #define FORMAT_NAME_SHA1 "AIX LPA {ssha1}" #define FORMAT_NAME_SHA256 "AIX LPA {ssha256}" #define FORMAT_NAME_SHA512 "AIX LPA {ssha512}" #define FORMAT_TAG1 "{ssha1}" #define FORMAT_TAG256 "{ssha256}" #define FORMAT_TAG512 "{ssha512}" #define FORMAT_TAG1_LEN (sizeof(FORMAT_TAG1)-1) #define FORMAT_TAG256_LEN (sizeof(FORMAT_TAG256)-1) #define FORMAT_TAG512_LEN (sizeof(FORMAT_TAG512)-1) #ifdef SIMD_COEF_32 #define ALGORITHM_NAME_SHA1 "PBKDF2-SHA1 " SHA1_ALGORITHM_NAME #else #define ALGORITHM_NAME_SHA1 "PBKDF2-SHA1 32/" ARCH_BITS_STR #endif #ifdef SIMD_COEF_32 #define ALGORITHM_NAME_SHA256 "PBKDF2-SHA256 " SHA256_ALGORITHM_NAME #else #define ALGORITHM_NAME_SHA256 "PBKDF2-SHA256 32/" ARCH_BITS_STR #endif #ifdef SIMD_COEF_64 #define ALGORITHM_NAME_SHA512 "PBKDF2-SHA512 " SHA512_ALGORITHM_NAME #else #define ALGORITHM_NAME_SHA512 "PBKDF2-SHA512 32/" ARCH_BITS_STR #endif #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH -1 #define PLAINTEXT_LENGTH 125 /* actual max in AIX is 255 */ #define BINARY_SIZE 20 #define BINARY_ALIGN 4 #define CMP_SIZE BINARY_SIZE - 2 #define LARGEST_BINARY_SIZE 64 #define MAX_SALT_SIZE 24 #define SALT_SIZE sizeof(struct custom_salt) #define SALT_ALIGN 4 #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 static struct fmt_tests aixssha_tests1[] = { {"{ssha1}06$T6numGi8BRLzTYnF$AdXq1t6baevg9/cu5QBBk8Xg.se", "whatdoyouwantfornothing$$$$$$"}, {"{ssha1}06$6cZ2YrFYwVQPAVNb$1agAljwERjlin9RxFxzKl.E0.sJ", "gentoo=>meh"}, /* Full 125 byte PW (longest JtR will handle). generated by pass_gen.pl */ {"{ssha1}06$uOYCzfO5dt0EdnwG$CK81ljQknzEAcfwg97cocEwz.gv", "12345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345"}, {NULL} }; static struct fmt_tests aixssha_tests256[] = { {"{ssha256}06$YPhynOx/iJQaJOeV$EXQbOSYZftEo3k01uoanAbA7jEKZRUU9LCCs/tyU.wG", "verylongbutnotverystrongpassword"}, {"{ssha256}06$5lsi4pETf/0p/12k$xACBftDMh30RqgrM5Sppl.Txgho41u0oPoD21E1b.QT", "I<3JtR"}, /* Full 125 byte PW (longest JtR will handle). generated by pass_gen.pl */ {"{ssha256}06$qcXPTOQzDAqZuiHc$pS/1HC4uI5jIERNerX8.Zd0G/gDepZuqR7S5WHEn.AW", "12345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345"}, {NULL} }; static struct fmt_tests aixssha_tests512[] = { {"{ssha512}06$y2/.O4drNJd3ecgJ$DhNk3sS28lkIo7XZaXWSkFOIdP2Zsd9DIKdYDSuSU5tsnl29Q7xTc3f64eAGMpcMJCVp/SXZ4Xgx3jlHVIOr..", "solarisalwaysbusyitseems"}, {"{ssha512}06$Dz/dDr1qa8JJm0UB$DFNu2y8US18fW37ht8WRiwhSeOqAMJTJ6mLDW03D/SeQpdI50GJMYb1fBog5/ZU3oM9qsSr9w6u22.OjjufV..", "idontbelievethatyourpasswordislongerthanthisone"}, /* hash posted on john-users */ {"{ssha512}06$................$0egLaF88SUk6GAFIMN/vTwa/IYB.KlubYmjiaWvmQ975vHvgC3rf0I6ZYzgyUiQftS8qs7ULLQpRLrA3LA....", "44"}, {"{ssha512}06$aXayEJGxA02Bl4d2$TWfWx34oD.UjrS/Qtco6Ij2XPY1CPYJfdk3CcxEjnMZvQw2p5obHYH7SI2wxcJgaS9.S9Hz948R.GdGwsvR...", "test"}, /* http://www.ibmsystemsmag.com/aix/administrator/security/password_hash/?page=2 <== partially corrupted hash? */ {"{ssha512}06$otYx2eSXx.OkEY4F$No5ZvSfhYuB1MSkBhhcKJIjS0.q//awdkcZwF9/TXi3EnL6QeronmS0jCc3P2aEV9WLi5arzN1YjVwkx8bng..", "colorado"}, /* Full 125 byte PW (longest JtR will handle). generated by pass_gen.pl */ {"{ssha512}06$w6THk2lJbkqW0rXv$yH6VWp3X9ad2l8nhYi22lrrmWskXvEU.PONbWUAZHrjhgQjdU83jtRnYmpRZIJzTVC3RFcoqpbtd63n/UdlS..", "12345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345"}, {NULL} }; static char (*saved_key)[PLAINTEXT_LENGTH + 1]; static uint32_t (*crypt_out)[BINARY_SIZE / sizeof(uint32_t)]; static struct custom_salt { int iterations; int type; unsigned char salt[MAX_SALT_SIZE + 1]; } *cur_salt; static void init(struct fmt_main *self) { #ifdef _OPENMP omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif saved_key = mem_calloc_align(sizeof(*saved_key), self->params.max_keys_per_crypt, MEM_ALIGN_WORD); crypt_out = mem_calloc_align(sizeof(*crypt_out), self->params.max_keys_per_crypt, MEM_ALIGN_WORD); } static void done(void) { MEM_FREE(crypt_out); MEM_FREE(saved_key); } static inline int valid_common(char *ciphertext, struct fmt_main *self, int b64len, char *sig, int siglen) { char *p = ciphertext; int len; if (!strncmp(p, sig, siglen)) p += siglen; else return 0; len = strspn(p, DIGITCHARS); /* iterations, exactly two digits */ if (len != 2 || atoi(p) > 31) /* actual range is 4..31 */ return 0; p += 2; if (*p++ != '$') return 0; len = strspn(p, BASE64_CRYPT); /* salt, 8..24 base64 chars */ if (len < 8 || len > MAX_SALT_SIZE) return 0; p += len; if (*p++ != '$') return 0; len = strspn(p, BASE64_CRYPT); /* hash */ if (len != b64len) return 0; if (p[len] != 0) /* nothing more allowed */ return 0; return 1; } static int valid_sha1(char *ciphertext, struct fmt_main *self) { return valid_common(ciphertext, self, 27, FORMAT_TAG1, FORMAT_TAG1_LEN); } static int valid_sha256(char *ciphertext, struct fmt_main *self) { return valid_common(ciphertext, self, 43, FORMAT_TAG256, FORMAT_TAG256_LEN); } static int valid_sha512(char *ciphertext, struct fmt_main *self) { return valid_common(ciphertext, self, 86, FORMAT_TAG512, FORMAT_TAG512_LEN); } static void *get_salt(char *ciphertext) { char *ctcopy = strdup(ciphertext); char *keeptr = ctcopy; char *p; static struct custom_salt cs; keeptr = ctcopy; memset(&cs, 0, sizeof(cs)); if ((strncmp(ciphertext, FORMAT_TAG1, FORMAT_TAG1_LEN) == 0)) { cs.type = 1; ctcopy += FORMAT_TAG1_LEN; } else if ((strncmp(ciphertext, FORMAT_TAG256, FORMAT_TAG256_LEN) == 0)) { cs.type = 256; ctcopy += FORMAT_TAG256_LEN; } else { cs.type = 512; ctcopy += FORMAT_TAG512_LEN; } p = strtokm(ctcopy, "$"); cs.iterations = 1 << atoi(p); p = strtokm(NULL, "$"); strncpy((char*)cs.salt, p, 17); MEM_FREE(keeptr); return (void *)&cs; } #define TO_BINARY(b1, b2, b3) { \ value = (uint32_t)atoi64[ARCH_INDEX(pos[0])] | \ ((uint32_t)atoi64[ARCH_INDEX(pos[1])] << 6) | \ ((uint32_t)atoi64[ARCH_INDEX(pos[2])] << 12) | \ ((uint32_t)atoi64[ARCH_INDEX(pos[3])] << 18); \ pos += 4; \ out.c[b1] = value >> 16; \ out.c[b2] = value >> 8; \ out.c[b3] = value; } static void *get_binary(char *ciphertext) { static union { unsigned char c[LARGEST_BINARY_SIZE+3]; uint64_t dummy; } out; uint32_t value; char *pos = strrchr(ciphertext, '$') + 1; int len = strlen(pos); int i; memset(&out, 0, sizeof(out)); for (i = 0; i < len/4*3; i += 3) TO_BINARY(i, i + 1, i + 2); if (len % 3 == 1) { value = (uint32_t)atoi64[ARCH_INDEX(pos[0])] | ((uint32_t)atoi64[ARCH_INDEX(pos[1])] << 6); out.c[i] = value; } else if (len % 3 == 2) { /* sha-1, sha-256 */ value = (uint32_t)atoi64[ARCH_INDEX(pos[0])] | ((uint32_t)atoi64[ARCH_INDEX(pos[1])] << 6) | ((uint32_t)atoi64[ARCH_INDEX(pos[2])] << 12); out.c[i++] = value >> 8; out.c[i++] = value; } return (void *)out.c; } #define COMMON_GET_HASH_VAR crypt_out #include "common-get-hash.h" static void set_salt(void *salt) { cur_salt = (struct custom_salt *)salt; } static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; int inc=1, index = 0; switch(cur_salt->type) { case 1: #ifdef SSE_GROUP_SZ_SHA1 inc = SSE_GROUP_SZ_SHA1; #endif break; case 256: #ifdef SSE_GROUP_SZ_SHA256 inc = SSE_GROUP_SZ_SHA256; #endif break; default: #ifdef SSE_GROUP_SZ_SHA512 inc = SSE_GROUP_SZ_SHA512; #endif break; } #ifdef _OPENMP #pragma omp parallel for #endif for (index = 0; index < count; index += inc) { int j = index; while (j < index + inc) { if (cur_salt->type == 1) { #ifdef SSE_GROUP_SZ_SHA1 int lens[SSE_GROUP_SZ_SHA1], i; unsigned char *pin[SSE_GROUP_SZ_SHA1]; union { uint32_t *pout[SSE_GROUP_SZ_SHA1]; unsigned char *poutc; } x; for (i = 0; i < SSE_GROUP_SZ_SHA1; ++i) { lens[i] = strlen(saved_key[j]); pin[i] = (unsigned char*)(saved_key[j]); x.pout[i] = crypt_out[j]; ++j; } pbkdf2_sha1_sse((const unsigned char **)pin, lens, cur_salt->salt, strlen((char*)cur_salt->salt), cur_salt->iterations, &(x.poutc), BINARY_SIZE, 0); #else pbkdf2_sha1((const unsigned char*)(saved_key[j]), strlen(saved_key[j]), cur_salt->salt, strlen((char*)cur_salt->salt), cur_salt->iterations, (unsigned char*)crypt_out[j], BINARY_SIZE, 0); ++j; #endif } else if (cur_salt->type == 256) { #ifdef SSE_GROUP_SZ_SHA256 int lens[SSE_GROUP_SZ_SHA256], i; unsigned char *pin[SSE_GROUP_SZ_SHA256]; union { uint32_t *pout[SSE_GROUP_SZ_SHA256]; unsigned char *poutc; } x; for (i = 0; i < SSE_GROUP_SZ_SHA256; ++i) { lens[i] = strlen(saved_key[j]); pin[i] = (unsigned char*)saved_key[j]; x.pout[i] = crypt_out[j]; ++j; } pbkdf2_sha256_sse((const unsigned char **)pin, lens, cur_salt->salt, strlen((char*)cur_salt->salt), cur_salt->iterations, &(x.poutc), BINARY_SIZE, 0); #else pbkdf2_sha256((const unsigned char*)(saved_key[j]), strlen(saved_key[j]), cur_salt->salt, strlen((char*)cur_salt->salt), cur_salt->iterations, (unsigned char*)crypt_out[j], BINARY_SIZE, 0); ++j; #endif } else { #ifdef SSE_GROUP_SZ_SHA512 int lens[SSE_GROUP_SZ_SHA512], i; unsigned char *pin[SSE_GROUP_SZ_SHA512]; union { uint32_t *pout[SSE_GROUP_SZ_SHA512]; unsigned char *poutc; } x; for (i = 0; i < SSE_GROUP_SZ_SHA512; ++i) { lens[i] = strlen(saved_key[j]); pin[i] = (unsigned char*)saved_key[j]; x.pout[i] = crypt_out[j]; ++j; } pbkdf2_sha512_sse((const unsigned char **)pin, lens, cur_salt->salt, strlen((char*)cur_salt->salt), cur_salt->iterations, &(x.poutc), BINARY_SIZE, 0); #else pbkdf2_sha512((const unsigned char*)(saved_key[j]), strlen(saved_key[j]), cur_salt->salt, strlen((char*)cur_salt->salt), cur_salt->iterations, (unsigned char*)crypt_out[j], BINARY_SIZE, 0); ++j; #endif } } } return count; } static int cmp_all(void *binary, int count) { int index = 0; //dump_stuff_msg("\nbinary ", binary, CMP_SIZE); for (; index < count; index++) { //dump_stuff_msg("crypt_out", crypt_out[index], CMP_SIZE); if (!memcmp(binary, crypt_out[index], CMP_SIZE-2)) return 1; } return 0; } static int cmp_one(void *binary, int index) { return !memcmp(binary, crypt_out[index], CMP_SIZE-2); } static int cmp_exact(char *source, int index) { return 1; } static void aixssha_set_key(char *key, int index) { strnzcpy(saved_key[index], key, sizeof(*saved_key)); } static char *get_key(int index) { return saved_key[index]; } /* report iteration count as tunable cost value */ static unsigned int aixssha_iteration_count(void *salt) { struct custom_salt *my_salt; my_salt = salt; return (unsigned int) my_salt->iterations; } struct fmt_main fmt_aixssha1 = { { FORMAT_LABEL_SHA1, FORMAT_NAME_SHA1, ALGORITHM_NAME_SHA1, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, #ifdef SIMD_COEF_32 SSE_GROUP_SZ_SHA1, SSE_GROUP_SZ_SHA1, #else MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, #endif FMT_CASE | FMT_8_BIT | FMT_OMP, { "iteration count", }, { FORMAT_TAG1 }, aixssha_tests1 }, { init, done, fmt_default_reset, fmt_default_prepare, valid_sha1, fmt_default_split, get_binary, get_salt, { aixssha_iteration_count, }, fmt_default_source, { fmt_default_binary_hash_0, fmt_default_binary_hash_1, fmt_default_binary_hash_2, fmt_default_binary_hash_3, fmt_default_binary_hash_4, fmt_default_binary_hash_5, fmt_default_binary_hash_6 }, fmt_default_salt_hash, NULL, set_salt, aixssha_set_key, get_key, fmt_default_clear_keys, crypt_all, { #define COMMON_GET_HASH_LINK #include "common-get-hash.h" }, cmp_all, cmp_one, cmp_exact } }; struct fmt_main fmt_aixssha256 = { { FORMAT_LABEL_SHA256, FORMAT_NAME_SHA256, ALGORITHM_NAME_SHA256, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, #ifdef SIMD_COEF_32 SSE_GROUP_SZ_SHA256, SSE_GROUP_SZ_SHA256, #else MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, #endif FMT_CASE | FMT_8_BIT | FMT_OMP, { "iteration count", }, { FORMAT_TAG256 }, aixssha_tests256 }, { init, done, fmt_default_reset, fmt_default_prepare, valid_sha256, fmt_default_split, get_binary, get_salt, { aixssha_iteration_count, }, fmt_default_source, { fmt_default_binary_hash_0, fmt_default_binary_hash_1, fmt_default_binary_hash_2, fmt_default_binary_hash_3, fmt_default_binary_hash_4, fmt_default_binary_hash_5, fmt_default_binary_hash_6 }, fmt_default_salt_hash, NULL, set_salt, aixssha_set_key, get_key, fmt_default_clear_keys, crypt_all, { #define COMMON_GET_HASH_LINK #include "common-get-hash.h" }, cmp_all, cmp_one, cmp_exact } }; struct fmt_main fmt_aixssha512 = { { FORMAT_LABEL_SHA512, FORMAT_NAME_SHA512, ALGORITHM_NAME_SHA512, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, #ifdef SIMD_COEF_64 SSE_GROUP_SZ_SHA512, SSE_GROUP_SZ_SHA512, #else MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, #endif FMT_CASE | FMT_8_BIT | FMT_OMP, { "iteration count", }, { FORMAT_TAG512 }, aixssha_tests512 }, { init, done, fmt_default_reset, fmt_default_prepare, valid_sha512, fmt_default_split, get_binary, get_salt, { aixssha_iteration_count, }, fmt_default_source, { fmt_default_binary_hash_0, fmt_default_binary_hash_1, fmt_default_binary_hash_2, fmt_default_binary_hash_3, fmt_default_binary_hash_4, fmt_default_binary_hash_5, fmt_default_binary_hash_6 }, fmt_default_salt_hash, NULL, set_salt, aixssha_set_key, get_key, fmt_default_clear_keys, crypt_all, { #define COMMON_GET_HASH_LINK #include "common-get-hash.h" }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
omp-matvect-mult.c
/***************************************************************************** Example : omp-matvect-mult.c Objective : Write an OpenMP program for Matrix vector multiplication This example demonstrates the use of PARALLEL FOR Directive and Private clause .It uses loop work-sharing construct i.e. distribution of columns of matrix Input : a) Number of threads b) Size of matrix (rows and columns) c) Vector size Output : Each thread computes the assigned row vector multiplication and master thread prints the final output and time taken . Created :Aug 2011 Author : RarchK *********************************************************************************/ #include <stdio.h> #include<sys/time.h> #include <omp.h> #include <stdlib.h> /* Main Program */ main(int argc,char **argv) { int NoofRows, NoofCols, Vectorsize, i, j,Noofthreads; /*float **Matrix, *Vector, *Result, *Checkoutput;*/ double **Matrix, *Vector, *Result, *Checkoutput; struct timeval TimeValue_Start; struct timezone TimeZone_Start; struct timeval TimeValue_Final; struct timezone TimeZone_Final; long time_start, time_end; double time_overhead; printf("\n\t\t---------------------------------------------------------------------------"); printf("\n\t\t Email : RarchK"); printf("\n\t\t---------------------------------------------------------------------------"); printf("\n\t\t Objective : Matrix-Vector Computations (Floating Point Operations)\n "); printf("\n\t\t Matrix into Vector Multiplication using "); printf("\n\t\t OpenMP one PARALLEL for directive and Private Clause;"); printf("\n\t\t..........................................................................\n"); /* Checking for command line arguments */ if( argc != 5 ){ printf("\t\t Very Few Arguments\n "); printf("\t\t Syntax : exec <Threads> <NoOfRows> <NoofColumns> <vector-size>\n"); exit(-1); } Noofthreads=atoi(argv[1]); if ((Noofthreads!=1) && (Noofthreads!=2) && (Noofthreads!=4) && (Noofthreads!=8) && (Noofthreads!= 16) ) { printf("\n Number of threads should be 1,2,4,8 or 16 for the execution of program. \n\n"); exit(-1); } NoofRows=atoi(argv[2]); NoofCols=atoi(argv[3]); Vectorsize=atoi(argv[4]); /* printf("\n\t\t Read the matrix size no. of rows and columns and vectorsize\n"); scanf("%d%d%d", &NoofRows, &NoofCols, &Vectorsize);*/ if (NoofRows <= 0 || NoofCols <= 0 || Vectorsize <= 0) { printf("\n\t\t The Matrix and Vectorsize should be of positive sign\n"); exit(1); } /* Checking For Matrix Vector Computation Necessary Condition */ if (NoofCols != Vectorsize) { printf("\n\t\t Matrix Vector computation cannot be possible \n"); exit(1); } /* Dynamic Memory Allocation And Initialization Of Matrix Elements */ Matrix = (double **) malloc(sizeof(double) * NoofRows); for (i = 0; i < NoofRows; i++) { Matrix[i] = (double *) malloc(sizeof(double) * NoofCols); for (j = 0; j < NoofCols; j++) Matrix[i][j] = i + j; } /* Dynamic Memory Allocation */ Vector = (double *) malloc(sizeof(double) * Vectorsize); /* vector Initialization */ for (i = 0; i < Vectorsize; i++) Vector[i] = i; printf("\n"); printf("\n\t\t Threads : %d ",Noofthreads); printf("\n\t\t Matrix Size : %d X %d ",NoofRows,NoofCols); printf("\n\t\t Vector Size : %d\n",Vectorsize); /* Dynamic Memory Allocation */ Result = (double *) malloc(sizeof(double) * NoofRows); Checkoutput = (double *) malloc(sizeof(double) * NoofRows); for (i = 0; i < NoofRows; i = i + 1) { Result[i]=0.0; Checkoutput[i]=0.0; } gettimeofday(&TimeValue_Start, &TimeZone_Start); omp_set_num_threads(Noofthreads); /* OpenMP Parallel for Directive : Fork a team of threads giving them their own copies of variables */ #pragma omp parallel for private(j) for (i = 0; i < NoofRows; i = i + 1) { for (j = 0; j < NoofCols; j = j + 1) { Result[i] = Result[i] + Matrix[i][j] * Vector[j]; } }/* All thread join Master thread */ gettimeofday(&TimeValue_Final, &TimeZone_Final); /* Serial Computation */ for (i = 0; i < NoofRows; i = i + 1) for (j = 0; j < NoofCols; j = j + 1) Checkoutput[i] = Checkoutput[i] + Matrix[i][j] * Vector[j]; /* Checking with the serial calculation */ for (i = 0; i < NoofRows; i = i + 1) if (Checkoutput[i] == Result[i]) continue; else { printf("\n\t\t There is a difference from Serial and Parallel Computation \n"); exit(1); } time_end = TimeValue_Final.tv_sec * 1000000 + TimeValue_Final.tv_usec; time_start = TimeValue_Start.tv_sec * 1000000 + TimeValue_Start.tv_usec; time_overhead = (time_end - time_start)/1000000.0; printf("\n\t\t Matrix into Vector Multiplication using OpenMP Parallel for directive ......Done \n"); /*printf("\n\t\t Calculated PI : \t%1.15lf \n\t\t Error : \t%1.16lf\n", totalsum, fabs(totalsum - PI));*/ printf("\n\t\t Time in Seconds (T) : %lf",time_overhead); printf("\n\n\t\t ( T represents the Time taken for computation )"); printf("\n\t\t..........................................................................\n"); /* Freeing The Memory Allocations */ free(Vector); free(Result); free(Matrix); free(Checkoutput); }
toimg.c
/* Copyright 2013-2018 The Regents of the University of California. * All rights reserved. Use of this source code is governed by * a BSD-style license which can be found in the LICENSE file. * * Authors: * 2013, 2015 Martin Uecker <uecker@eecs.berkeley.edu> * 2015, 2018 Jon Tamir <jtamir@eecs.berkeley.edu> */ #include <stdlib.h> #include <assert.h> #include <stdio.h> #include <stdint.h> #include <strings.h> #include <complex.h> #include <stdbool.h> #include <math.h> #include "num/multind.h" #include "num/init.h" #include "num/flpmath.h" #include "misc/misc.h" #include "misc/debug.h" #include "misc/mmio.h" #include "misc/png.h" #include "misc/dicom.h" #include "misc/opts.h" #ifndef DIMS #define DIMS 16 #endif #ifndef CFL_SIZE #define CFL_SIZE sizeof(complex float) #endif static const char usage_str[] = "[-h] <input> <output_prefix>"; static const char help_str[] = "Create magnitude images as png or proto-dicom.\n" "The first two non-singleton dimensions will\n" "be used for the image, and the other dimensions\n" "will be looped over.\n"; // from view:src/draw.c static double clamp(double a, double b, double x) { return (x < a) ? a : ((x > b) ? b : x); } static double windowing(double g, double a, double b, double x) { return pow(clamp(0., 1., (x - a) / (b - a)), g); } static void toimg(bool dicom, bool use_windowing, const char* name, long inum, float gamma, float contrast, float window, float scale, long h, long w, const complex float* data) { int len = strlen(name); assert(len >= 1); int nr_bytes = dicom ? 2 : 3; unsigned char (*buf)[h][w][nr_bytes] = TYPE_ALLOC(unsigned char[h][w][nr_bytes]); float max_val = dicom ? 65535. : 255.; for (int i = 0; i < h; i++) { for (int j = 0; j < w; j++) { double val = cabsf(data[j * h + i]) / scale; unsigned int value = (unsigned int)(max_val * (use_windowing ? windowing(gamma, contrast, window, val) : val)); if (!dicom) { (*buf)[i][j][0] = value; (*buf)[i][j][1] = value; (*buf)[i][j][2] = value; } else { (*buf)[i][j][0] = (value >> 0) & 0xFF; (*buf)[i][j][1] = (value >> 8) & 0xFF; } } } (dicom ? dicom_write : png_write_rgb24)(name, w, h, inum, &(*buf)[0][0][0]); free(buf); } static void toimg_stack(const char* name, bool dicom, bool single_scale, bool use_windowing, float gamma, float contrast, float window, const long dims[DIMS], const complex float* data) { long data_size = md_calc_size(DIMS, dims); long sq_dims[DIMS] = { [0 ... DIMS - 1] = 1 }; int l = 0; for (int i = 0; i < DIMS; i++) if (1 != dims[i]) sq_dims[l++] = dims[i]; float max = 0.; for (long i = 0; i < data_size; i++) max = MAX(cabsf(data[i]), max); int len = strlen(name); assert(len >= 1); long num_imgs = md_calc_size(DIMS - 2, sq_dims + 2); long img_size = md_calc_size(2, sq_dims); debug_printf(DP_INFO, "Writing %d image(s)...", num_imgs); #pragma omp parallel for for (long i = 0; i < num_imgs; i++) { char name_i[len + 10]; // extra space for ".0000.png" if (num_imgs > 1) sprintf(name_i, "%s-%04ld.%s", name, i, dicom ? "dcm" : "png"); else sprintf(name_i, "%s.%s", name, dicom ? "dcm" : "png"); float scale = 0.; if (use_windowing) scale = md_znorm(2, sq_dims, data + i * img_size) / md_calc_size(2, sq_dims); else if (single_scale) scale = max; else for (long j = 0; j < md_calc_size(2, sq_dims); j++) scale = MAX(cabsf(data[i * img_size + j]), scale); if (0. == scale) scale = 1.; toimg(dicom, use_windowing, name_i, i, gamma, contrast, window, scale, sq_dims[0], sq_dims[1], data + i * img_size); } debug_printf(DP_INFO, "done.\n", num_imgs); } int main_toimg(int argc, char* argv[argc]) { float gamma = 1.; float contrast = 0.; float window = 750.; bool use_windowing = false; bool single_scale = true; bool dicom = false; const struct opt_s opts[] = { OPT_FLOAT('g', &gamma, "gamma", "gamma level"), OPT_FLOAT('c', &contrast, "contrast", "contrast level"), OPT_FLOAT('w', &window, "window", "window level"), OPT_SET('d', &dicom, "write to dicom format (deprecated, use extension .dcm)"), OPT_CLEAR('m', &single_scale, "re-scale each image"), OPT_SET('W', &use_windowing, "use dynamic windowing"), }; cmdline(&argc, argv, 2, 2, usage_str, help_str, ARRAY_SIZE(opts), opts); num_init(); char* ext = rindex(argv[2], '.'); if (NULL != ext) { assert(!dicom); if (0 == strcmp(ext, ".dcm")) dicom = true; else if (0 != strcmp(ext, ".png")) error("Unknown file extension."); *ext = '\0'; } long dims[DIMS]; complex float* data = load_cfl(argv[1], DIMS, dims); toimg_stack(argv[2], dicom, single_scale, use_windowing, gamma, contrast, window, dims, data); unmap_cfl(DIMS, dims, data); return 0; }
struct_copy.c
/*BHEADER********************************************************************** * Copyright (c) 2008, Lawrence Livermore National Security, LLC. * Produced at the Lawrence Livermore National Laboratory. * This file is part of HYPRE. See file COPYRIGHT for details. * * HYPRE is free software; you can redistribute it and/or modify it under the * terms of the GNU Lesser General Public License (as published by the Free * Software Foundation) version 2.1 dated February 1999. * * $Revision$ ***********************************************************************EHEADER*/ /****************************************************************************** * * Structured copy routine * *****************************************************************************/ #include "_hypre_struct_mv.h" /*-------------------------------------------------------------------------- * hypre_StructCopy *--------------------------------------------------------------------------*/ HYPRE_Int hypre_StructCopy( hypre_StructVector *x, hypre_StructVector *y ) { hypre_Box *x_data_box; hypre_Box *y_data_box; HYPRE_Int xi; HYPRE_Int yi; HYPRE_Complex *xp; HYPRE_Complex *yp; hypre_BoxArray *boxes; hypre_Box *box; hypre_Index loop_size; hypre_IndexRef start; hypre_Index unit_stride; HYPRE_Int i; hypre_SetIndex(unit_stride, 1); boxes = hypre_StructGridBoxes(hypre_StructVectorGrid(y)); hypre_ForBoxI(i, boxes) { box = hypre_BoxArrayBox(boxes, i); start = hypre_BoxIMin(box); x_data_box = hypre_BoxArrayBox(hypre_StructVectorDataSpace(x), i); y_data_box = hypre_BoxArrayBox(hypre_StructVectorDataSpace(y), i); xp = hypre_StructVectorBoxData(x, i); yp = hypre_StructVectorBoxData(y, i); hypre_BoxGetSize(box, loop_size); hypre_BoxLoop2Begin(hypre_StructVectorNDim(x), loop_size, x_data_box, start, unit_stride, xi, y_data_box, start, unit_stride, yi); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(HYPRE_BOX_PRIVATE,xi,yi) HYPRE_SMP_SCHEDULE #endif hypre_BoxLoop2For(xi, yi) { yp[yi] = xp[xi]; } hypre_BoxLoop2End(xi, yi); } return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_StructPartialCopy: copy only the components on a subset of the grid. * A BoxArrayArray of boxes are needed- for each box of x, only an array * of subboxes (i.e., a boxarray for each box of x) are copied. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_StructPartialCopy( hypre_StructVector *x, hypre_StructVector *y, hypre_BoxArrayArray *array_boxes ) { hypre_Box *x_data_box; hypre_Box *y_data_box; HYPRE_Int xi; HYPRE_Int yi; HYPRE_Complex *xp; HYPRE_Complex *yp; hypre_BoxArray *boxes; hypre_Box *box; hypre_Index loop_size; hypre_IndexRef start; hypre_Index unit_stride; HYPRE_Int i, j ; hypre_SetIndex(unit_stride, 1); hypre_ForBoxArrayI(i, array_boxes) { boxes = hypre_BoxArrayArrayBoxArray(array_boxes, i); x_data_box = hypre_BoxArrayBox(hypre_StructVectorDataSpace(x), i); y_data_box = hypre_BoxArrayBox(hypre_StructVectorDataSpace(y), i); xp = hypre_StructVectorBoxData(x, i); yp = hypre_StructVectorBoxData(y, i); /* array of sub_boxes of box_i of the vector */ hypre_ForBoxI(j, boxes) { box = hypre_BoxArrayBox(boxes, j); start = hypre_BoxIMin(box); hypre_BoxGetSize(box, loop_size); hypre_BoxLoop2Begin(hypre_StructVectorNDim(x), loop_size, x_data_box, start, unit_stride, xi, y_data_box, start, unit_stride, yi); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(HYPRE_BOX_PRIVATE,xi,yi) HYPRE_SMP_SCHEDULE #endif hypre_BoxLoop2For(xi, yi) { yp[yi] = xp[xi]; } hypre_BoxLoop2End(xi, yi); } } return hypre_error_flag; }
ft_ao.c
#include <stdlib.h> #include <complex.h> #define NCTRMAX 72 static void shift_bas(double *xyz, int *ptr_coords, double *L, int nxyz, double *env) { int i, p; for (i = 0; i < nxyz; i++) { p = ptr_coords[i]; env[p+0] = xyz[i*3+0] + L[0]; env[p+1] = xyz[i*3+1] + L[1]; env[p+2] = xyz[i*3+2] + L[2]; } } static void axpy_s1(double complex **out, double complex *in, double complex *exp_Lk, int nkpts, size_t off, size_t nGv, int ni, int nj, int ip, int di, int dj) { int i, j, n, ik; double complex *pin, *pout; for (ik = 0; ik < nkpts; ik++) { for (j = 0; j < dj; j++) { for (i = 0; i < di; i++) { pout = out[ik] + off + (j*ni+i) * nGv; pin = in + (j*di+i) * nGv; for (n = 0; n < nGv; n++) { pout[n] += pin[n] * exp_Lk[ik]; } } } } } static void axpy_igtj(double complex **out, double complex *in, double complex *exp_Lk, int nkpts, size_t off, size_t nGv, int ni, int nj, int ip, int di, int dj) { const size_t ip1 = ip + 1; int i, j, n, ik; double complex *pin, *pout; for (ik = 0; ik < nkpts; ik++) { pout = out[ik] + off; for (i = 0; i < di; i++) { for (j = 0; j < dj; j++) { pin = in + (j*di+i) * nGv; for (n = 0; n < nGv; n++) { pout[j*nGv+n] += pin[n] * exp_Lk[ik]; } } pout += (ip1 + i) * nGv; } } } static void axpy_ieqj(double complex **out, double complex *in, double complex *exp_Lk, int nkpts, size_t off, size_t nGv, int ni, int nj, int ip, int di, int dj) { const size_t ip1 = ip + 1; int i, j, n, ik; double complex *pin, *pout; for (ik = 0; ik < nkpts; ik++) { pout = out[ik] + off; for (i = 0; i < di; i++) { for (j = 0; j <= i; j++) { pin = in + (j*di+i) * nGv; for (n = 0; n < nGv; n++) { pout[j*nGv+n] += pin[n] * exp_Lk[ik]; } } pout += (ip1 + i) * nGv; } } } void PBC_ft_fill_s1(int (*intor)(), void (*eval_gz)(), double complex **out, double complex *exp_Lk, int nkpts, int ish, int jsh, double complex *buf, int *shls_slice, int *ao_loc, double complex fac, double *Gv, double *b, int *gxyz, int *gs, int nGv, int *atm, int natm, int *bas, int nbas, double *env) { const int ish0 = shls_slice[0]; const int ish1 = shls_slice[1]; const int jsh0 = shls_slice[2]; const int jsh1 = shls_slice[3]; ish += ish0; jsh += jsh0; const int nrow = ao_loc[ish1] - ao_loc[ish0]; const int ncol = ao_loc[jsh1] - ao_loc[jsh0]; const size_t off = ao_loc[ish] - ao_loc[ish0] + (ao_loc[jsh] - ao_loc[jsh0]) * nrow; const int di = ao_loc[ish+1] - ao_loc[ish]; const int dj = ao_loc[jsh+1] - ao_loc[jsh]; const int ip = ao_loc[ish] - ao_loc[ish0]; int shls[2] = {ish, jsh}; int dims[2] = {di, dj}; if ((*intor)(buf, shls, dims, NULL, eval_gz, fac, Gv, b, gxyz, gs, nGv, atm, natm, bas, nbas, env)) { axpy_s1(out, buf, exp_Lk, nkpts, off*nGv, nGv, nrow, ncol, ip, di, dj); } } void PBC_ft_fill_s1hermi(int (*intor)(), void (*eval_gz)(), double complex **out, double complex *exp_Lk, int nkpts, int ish, int jsh, double complex *buf, int *shls_slice, int *ao_loc, double complex fac, double *Gv, double *b, int *gxyz, int *gs, int nGv, int *atm, int natm, int *bas, int nbas, double *env) { const int ish0 = shls_slice[0]; const int jsh0 = shls_slice[2]; const int ip = ao_loc[ish+ish0]; const int jp = ao_loc[jsh+jsh0] - ao_loc[jsh0]; if (ip >= jp) { PBC_ft_fill_s1(intor, eval_gz, out, exp_Lk, nkpts, ish, jsh, buf, shls_slice, ao_loc, fac, Gv, b, gxyz, gs, nGv, atm, natm, bas, nbas, env); } } void PBC_ft_fill_s2(int (*intor)(), void (*eval_gz)(), double complex **out, double complex *exp_Lk, int nkpts, int ish, int jsh, double complex *buf, int *shls_slice, int *ao_loc, double complex fac, double *Gv, double *b, int *gxyz, int *gs, int nGv, int *atm, int natm, int *bas, int nbas, double *env) { const int ish0 = shls_slice[0]; const int ish1 = shls_slice[1]; const int jsh0 = shls_slice[2]; const int jsh1 = shls_slice[3]; ish += ish0; jsh += jsh0; const int ip = ao_loc[ish]; const int jp = ao_loc[jsh] - ao_loc[jsh0]; if (ip < jp) { return; } const int nrow = ao_loc[ish1] - ao_loc[ish0]; const int ncol = ao_loc[jsh1] - ao_loc[jsh0]; const int di = ao_loc[ish+1] - ao_loc[ish]; const int dj = ao_loc[jsh+1] - ao_loc[jsh]; const int i0 = ao_loc[ish0]; const size_t off = ip * (ip + 1) / 2 - i0 * (i0 + 1) / 2 + jp; int shls[2] = {ish, jsh}; int dims[2] = {di, dj}; if ((*intor)(buf, shls, dims, NULL, eval_gz, fac, Gv, b, gxyz, gs, nGv, atm, natm, bas, nbas, env)) { if (ip != jp) { axpy_igtj(out, buf, exp_Lk, nkpts, off*nGv, nGv, nrow, ncol, ip, di, dj); } else { axpy_ieqj(out, buf, exp_Lk, nkpts, off*nGv, nGv, nrow, ncol, ip, di, dj); } } } void ft_ovlp_kpts(int (*intor)(), void (*eval_gz)(), void (*fill)(), double complex **out, double complex *exp_Lk, int nkpts, int *shls_slice, int *ao_loc, double *Gv, double *b, int *gxyz, int *gs, int nGv, int *atm, int natm, int *bas, int nbas, double *env) { const int ish0 = shls_slice[0]; const int ish1 = shls_slice[1]; const int jsh0 = shls_slice[2]; const int jsh1 = shls_slice[3]; const int nish = ish1 - ish0; const int njsh = jsh1 - jsh0; const double complex fac = 1; #pragma omp parallel default(none) \ shared(intor, eval_gz, fill, out, exp_Lk, nkpts, Gv, b, gxyz, gs, nGv,\ shls_slice, ao_loc, atm, natm, bas, nbas, env) { int i, j, ij; double complex *buf = malloc(sizeof(double complex) * nGv*NCTRMAX*NCTRMAX); #pragma omp for schedule(dynamic) for (ij = 0; ij < nish*njsh; ij++) { i = ij / njsh; j = ij % njsh; (*fill)(intor, eval_gz, out, exp_Lk, nkpts, i, j, buf, shls_slice, ao_loc, fac, Gv, b, gxyz, gs, nGv, atm, natm, bas, nbas, env); } free(buf); } } void PBC_ft_latsum_kpts(int (*intor)(), void (*eval_gz)(), void (*fill)(), double complex **out, double *xyz, int *ptr_coords, int nxyz, double *Ls, int nimgs, double complex *exp_Lk, int nkpts, int *shls_slice, int *ao_loc, double *Gv, double *b, int *gxyz, int *gs, int nGv, int *atm, int natm, int *bas, int nbas, double *env) { int m; for (m = 0; m < nimgs; m++) { shift_bas(xyz, ptr_coords, Ls+m*3, nxyz, env); ft_ovlp_kpts(intor, eval_gz, fill, out, exp_Lk+m*nkpts, nkpts, shls_slice, ao_loc, Gv, b, gxyz, gs, nGv, atm, natm, bas, nbas, env); } }
HSetMaintainer.h
#ifndef HSET_MAINTAINER_H #define HSET_MAINTAINER_H /************************************************************* * Author : Markus Schordan * *************************************************************/ #include <boost/unordered_set.hpp> //#define HSET_MAINTAINER_DEBUG_MODE /*! * \author Markus Schordan * \date 2012. */ template<typename KeyType,typename HashFun, typename EqualToPred> class HSetMaintainer : public boost::unordered_set<KeyType*,HashFun,EqualToPred> { public: typedef std::pair<bool,const KeyType*> ProcessingResult; /*! * \author Marc Jasper * \date 2016. */ HSetMaintainer() { _keepStatesDuringDeconstruction = false; } /*! * \author Marc Jasper * \date 2016. */ HSetMaintainer(bool keepStates) { _keepStatesDuringDeconstruction = keepStates; } /*! * \author Marc Jasper * \date 2016. */ virtual ~HSetMaintainer() { if (!_keepStatesDuringDeconstruction){ typename HSetMaintainer::iterator i; for (i=this->begin(); i!=this->end(); ++i) { delete (*i); } } } bool exists(KeyType& s) { return determine(s)!=0; } size_t id(const KeyType& s) { typename boost::unordered_set<KeyType*,HashFun,EqualToPred>::const_iterator i; i=HSetMaintainer<KeyType,HashFun,EqualToPred>::find(s); if(i!=HSetMaintainer<KeyType,HashFun,EqualToPred>::end()) { // in lack of operator '-' we compute the distance size_t pos=0; typename boost::unordered_set<KeyType*,HashFun,EqualToPred>::const_iterator b; b=HSetMaintainer<KeyType,HashFun,EqualToPred>::begin(); while(b!=i) { pos++; ++b; } return pos; } else throw "Error: unknown value. Maintainer cannot determine an id."; } typename HSetMaintainer<KeyType,HashFun,EqualToPred>::iterator i; KeyType* determine(KeyType& s) { KeyType* ret=0; typename HSetMaintainer<KeyType,HashFun,EqualToPred>::iterator i; #pragma omp critical(HASHSET) { i=HSetMaintainer<KeyType,HashFun,EqualToPred>::find(&s); if(i!=HSetMaintainer<KeyType,HashFun,EqualToPred>::end()) { ret=const_cast<KeyType*>(*i); } else { ret=0; } } return ret; } const KeyType* determine(const KeyType& s) { const KeyType* ret=0; typename HSetMaintainer<KeyType,HashFun,EqualToPred>::iterator i; #pragma omp critical(HASHSET) { i=HSetMaintainer<KeyType,HashFun,EqualToPred>::find(const_cast<KeyType*>(&s)); if(i!=HSetMaintainer<KeyType,HashFun,EqualToPred>::end()) { ret=const_cast<KeyType*>(*i); } else { ret=0; } } return ret; } ProcessingResult process(const KeyType* key) { ProcessingResult res2; #pragma omp critical(HASHSET) { std::pair<typename HSetMaintainer::iterator, bool> res; typename HSetMaintainer::iterator iter=this->find(const_cast<KeyType*>(key)); // TODO: eliminate const_cast if(iter!=this->end()) { // found it! res=std::make_pair(iter,false); } else { res=this->insert(const_cast<KeyType*>(key)); // TODO: eliminate const_cast } res2=std::make_pair(res.second,*res.first); } return res2; } const KeyType* processNewOrExisting(const KeyType* s) { ProcessingResult res=process(s); return res.second; } //! <true,const KeyType> if new element was inserted //! <false,const KeyType> if element already existed ProcessingResult process(KeyType key) { ProcessingResult res2; #pragma omp critical(HASHSET) { std::pair<typename HSetMaintainer::iterator, bool> res; typename HSetMaintainer::iterator iter=this->find(&key); if(iter!=this->end()) { // found it! res=std::make_pair(iter,false); } else { // converting the stack allocated object to heap allocated // this copies the entire object // TODO: this can be avoided by providing a process function with a pointer arg // this requires a more detailed result: pointer exists, alternate pointer with equal object exists, does not exist KeyType* keyPtr=new KeyType(); *keyPtr=key; res=this->insert(keyPtr); if (!res.second) { // this case should never occur, condition "iter!=this->end()" above would have been satisfied and // this else branch would have therefore been ignored std::cerr << "ERROR: HSetMaintainer: Element was not inserted even though it could not be found in the set." << std::endl; ROSE_ASSERT(0); delete keyPtr; keyPtr = NULL; } } #ifdef HSET_MAINTAINER_DEBUG_MODE std::pair<typename HSetMaintainer::iterator, bool> res1; res1=this->insert(key); std::pair<typename HSetMaintainer::iterator, bool> res2; res2=this->insert(key); if(!(res1==res2)) { std::cerr<< "Error: HsetMaintainer failed:"<<std::endl; std::cerr<< "res1:"<<(*res1.first).toString()<<":"<<res1.second<<std::endl; std::cerr<< "res2:"<<(*res2.first).toString()<<":"<<res2.second<<std::endl; exit(1); } std::cerr << "HSET insert OK"<<std::endl; #endif res2=std::make_pair(res.second,*res.first); } return res2; } const KeyType* processNew(KeyType& s) { //std::pair<typename HSetMaintainer::iterator, bool> res=process(s); ProcessingResult res=process(s); if(res.first!=true) { std::cerr<< "Error: HsetMaintainer::processNew failed:"<<std::endl; std::cerr<< "res:"; std::cout <<":"<<res.first<<std::endl; std::cout <<res.second->toString(); exit(1); } return res.second; } const KeyType* processNewOrExisting(KeyType& s) { ProcessingResult res=process(s); return res.second; } long numberOf() { return HSetMaintainer<KeyType,HashFun,EqualToPred>::size(); } long maxCollisions() { size_t max=0; for(size_t i=0; i<HSetMaintainer<KeyType,HashFun,EqualToPred>::bucket_count();++i) { if(HSetMaintainer<KeyType,HashFun,EqualToPred>::bucket_size(i)>max) { max=HSetMaintainer<KeyType,HashFun,EqualToPred>::bucket_size(i); } } return max; } double loadFactor() { return HSetMaintainer<KeyType,HashFun,EqualToPred>::load_factor(); } long memorySize() const { long mem=0; for(typename HSetMaintainer<KeyType,HashFun,EqualToPred>::const_iterator i =HSetMaintainer<KeyType,HashFun,EqualToPred>::begin(); i!=HSetMaintainer<KeyType,HashFun,EqualToPred>::end(); ++i) { mem+=(*i)->memorySize(); mem+=sizeof(*i); } return mem+sizeof(*this); } private: //const KeyType* ptr(KeyType& s) {} bool _keepStatesDuringDeconstruction; }; #endif
csyr2k.c
/** * * @file * * PLASMA is a software package provided by: * University of Tennessee, US, * University of Manchester, UK. * * @generated from /home/luszczek/workspace/plasma/bitbucket/plasma/compute/zsyr2k.c, normal z -> c, Fri Sep 28 17:38:03 2018 * **/ #include "plasma.h" #include "plasma_async.h" #include "plasma_context.h" #include "plasma_descriptor.h" #include "plasma_internal.h" #include "plasma_tuning.h" #include "plasma_types.h" #include "plasma_workspace.h" /***************************************************************************//** * * @ingroup plasma_syr2k * * Performs one of the symmetric rank 2k operations * * \f[ C = \alpha A \times B^T + \alpha B \times A^T + \beta C, \f] * or * \f[ C = \alpha A^T \times B + \alpha B^T \times A + \beta C, \f] * * where alpha and beta are scalars, * C is an n-by-n symmetric matrix, and A and B are n-by-k matrices * in the first case and k-by-n matrices in the second case. * ******************************************************************************* * * @param[in] uplo * - PlasmaUpper: Upper triangle of C is stored; * - PlasmaLower: Lower triangle of C is stored. * * @param[in] trans * - PlasmaNoTrans: * \f[ C = \alpha A \times B^T + \alpha B \times A^T + \beta C; \f] * - PlasmaTrans: * \f[ C = \alpha A^T \times B + \alpha B^T \times A + \beta C. \f] * * @param[in] n * The order of the matrix C. n >= zero. * * @param[in] k * If trans = PlasmaNoTrans, number of columns of the A and B matrices; * if trans = PlasmaTrans, number of rows of the A and B matrices. * * @param[in] alpha * The scalar alpha. * * @param[in] pA * An lda-by-ka matrix. * If trans = PlasmaNoTrans, ka = k; * if trans = PlasmaTrans, ka = n. * * @param[in] lda * The leading dimension of the array A. * If trans = PlasmaNoTrans, lda >= max(1, n); * if trans = PlasmaTrans, lda >= max(1, k). * * @param[in] pB * An ldb-by-kb matrix. * If trans = PlasmaNoTrans, kb = k; * if trans = PlasmaTrans, kb = n. * * @param[in] ldb * The leading dimension of the array B. * If trans = PlasmaNoTrans, ldb >= max(1, n); * if trans = PlasmaTrans, ldb >= max(1, k). * * @param[in] beta * The scalar beta. * * @param[in,out] pC * An ldc-by-n matrix. * On exit, the uplo part of the matrix is overwritten * by the uplo part of the updated matrix. * * @param[in] ldc * The leading dimension of the array C. ldc >= max(1, n). * ******************************************************************************* * * @retval PlasmaSuccess successful exit * ******************************************************************************* * * @sa plasma_omp_csyr2k * @sa plasma_csyr2k * @sa plasma_dsyr2k * @sa plasma_ssyr2k * ******************************************************************************/ int plasma_csyr2k(plasma_enum_t uplo, plasma_enum_t trans, int n, int k, plasma_complex32_t alpha, plasma_complex32_t *pA, int lda, plasma_complex32_t *pB, int ldb, plasma_complex32_t beta, plasma_complex32_t *pC, int ldc) { // Get PLASMA context. plasma_context_t *plasma = plasma_context_self(); if (plasma == NULL) { plasma_fatal_error("PLASMA not initialized"); return PlasmaErrorNotInitialized; } // Check input arguments. if ((uplo != PlasmaUpper) && (uplo != PlasmaLower)) { plasma_error("illegal value of uplo"); return -1; } if ((trans != PlasmaNoTrans) && (trans != PlasmaTrans)) { plasma_error("illegal value of trans"); return -2; } if (n < 0) { plasma_error("illegal value of n"); return -3; } if (k < 0) { plasma_error("illegal value of k"); return -4; } int am, an; int bm, bn; if (trans == PlasmaNoTrans) { am = n; an = k; bm = n; bn = k; } else { am = k; an = n; bm = k; bn = n; } if (lda < imax(1, am)) { plasma_error("illegal value of lda"); return -7; } if (ldb < imax(1, bm)) { plasma_error("illegal value of ldb"); return -9; } if (ldc < imax(1, n)) { plasma_error("illegal value of ldc"); return -12; } // quick return if (n == 0 || ((alpha == 0.0 || k == 0.0) && beta == 1.0)) return PlasmaSuccess; // Tune parameters. if (plasma->tuning) plasma_tune_syr2k(plasma, PlasmaComplexFloat, n, k); // Set tiling parameters. int nb = plasma->nb; // Create tile matrices. plasma_desc_t A; plasma_desc_t B; plasma_desc_t C; int retval; retval = plasma_desc_general_create(PlasmaComplexFloat, nb, nb, am, an, 0, 0, am, an, &A); if (retval != PlasmaSuccess) { plasma_error("plasma_desc_general_create() failed"); return retval; } retval = plasma_desc_general_create(PlasmaComplexFloat, nb, nb, bm, bn, 0, 0, bm, bn, &B); if (retval != PlasmaSuccess) { plasma_error("plasma_desc_general_create() failed"); plasma_desc_destroy(&A); return retval; } retval = plasma_desc_general_create(PlasmaComplexFloat, nb, nb, n, n, 0, 0, n, n, &C); if (retval != PlasmaSuccess) { plasma_error("plasma_desc_general_create() failed"); plasma_desc_destroy(&A); plasma_desc_destroy(&B); return retval; } // Initialize sequence. plasma_sequence_t sequence; retval = plasma_sequence_init(&sequence); // Initialize request. plasma_request_t request; retval = plasma_request_init(&request); // asynchronous block #pragma omp parallel #pragma omp master { // Translate to tile layout. plasma_omp_cge2desc(pA, lda, A, &sequence, &request); plasma_omp_cge2desc(pB, ldb, B, &sequence, &request); plasma_omp_cge2desc(pC, ldc, C, &sequence, &request); // Call the tile async function. plasma_omp_csyr2k(uplo, trans, alpha, A, B, beta, C, &sequence, &request); // Translate back to LAPACK layout. plasma_omp_cdesc2ge(C, pC, ldc, &sequence, &request); } // implicit synchronization // Free matrices in tile layout. plasma_desc_destroy(&A); plasma_desc_destroy(&B); plasma_desc_destroy(&C); // Return status. int status = sequence.status; return status; } /***************************************************************************//** * * @ingroup plasma_syr2k * * Performs rank 2k update. * Non-blocking tile version of plasma_csyr2k(). * May return before the computation is finished. * Operates on matrices stored by tiles. * All matrices are passed through descriptors. * All dimensions are taken from the descriptors. * Allows for pipelining of operations at runtime. * ******************************************************************************* * * @param[in] uplo * - PlasmaUpper: Upper triangle of C is stored; * - PlasmaLower: Lower triangle of C is stored. * * @param[in] trans * - PlasmaNoTrans: * \f[ C = \alpha A \times B^T + \alpha B \times A^T + \beta C; \f] * - PlasmaTrans: * \f[ C = \alpha A^T \times B + \alpha B^T \times A + \beta C. \f] * * @param[in] alpha * The scalar alpha. * * @param[in] A * Descriptor of matrix A. * *@param[in] B * Descriptor of matrix B. * * @param[in] beta * The scalar beta. * * @param[in,out] C * Descriptor of matrix C. * * @param[in] sequence * Identifies the sequence of function calls that this call belongs to * (for completion checks and exception handling purposes). Check * the sequence->status for errors. * * @param[out] request * Identifies this function call (for exception handling purposes). * * @retval void * Errors are returned by setting sequence->status and * request->status to error values. The sequence->status and * request->status should never be set to PlasmaSuccess (the * initial values) since another async call may be setting a * failure value at the same time. * ******************************************************************************* * * @sa plasma_csyr2k * @sa plasma_omp_csyr2k * @sa plasma_omp_csyr2k * ******************************************************************************/ void plasma_omp_csyr2k(plasma_enum_t uplo, plasma_enum_t trans, plasma_complex32_t alpha, plasma_desc_t A, plasma_desc_t B, plasma_complex32_t beta, plasma_desc_t C, plasma_sequence_t *sequence, plasma_request_t *request) { // Get PLASMA context. plasma_context_t *plasma = plasma_context_self(); if (plasma == NULL) { plasma_fatal_error("PLASMA not initialized"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } // Check input arguments. if ((uplo != PlasmaUpper) && (uplo != PlasmaLower)) { plasma_error("illegal value of uplo"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if ((trans != PlasmaNoTrans) && (trans != PlasmaTrans)) { plasma_error("illegal value of trans"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (plasma_desc_check(A) != PlasmaSuccess) { plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); plasma_error("invalid A"); return; } if (plasma_desc_check(B) != PlasmaSuccess) { plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); plasma_error("invalid B"); return; } if (plasma_desc_check(C) != PlasmaSuccess) { plasma_error("invalid C"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (sequence == NULL) { plasma_error("NULL sequence"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (request == NULL) { plasma_error("NULL request"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } // quick return int k = trans == PlasmaNoTrans ? A.n : A.m; if (C.m == 0 || ((alpha == 0.0 || k == 0) && beta == 1.0)) return; // Call the parallel function. plasma_pcsyr2k(uplo, trans, alpha, A, B, beta, C, sequence, request); }
CriticalBodyLink.c
int x; int main () { #pragma omp critical { 100; } #pragma omp critical { int x; } }
diagmv_x_csr_n.c
#include "alphasparse/kernel.h" #include "alphasparse/util.h" #include "alphasparse/opt.h" #ifdef _OPENMP #include <omp.h> #endif static alphasparse_status_t diagmv_x_csr_n_omp(const ALPHA_Number alpha, const ALPHA_SPMAT_CSR *A, const ALPHA_Number *x, const ALPHA_Number beta, ALPHA_Number *y) { const ALPHA_INT m = A->rows; const ALPHA_INT thread_num = alpha_get_thread_num(); #ifdef _OPENMP #pragma omp parallel for num_threads(thread_num) #endif for (ALPHA_INT i = 0; i < m; ++i) { register ALPHA_Number tmp; alpha_setzero(tmp); for (ALPHA_INT ai = A->rows_start[i]; ai < A->rows_end[i]; ++ai) { if (A->col_indx[ai] == i) { alpha_mul(tmp, alpha, A->values[ai]); alpha_mule(tmp, x[i]); break; } } alpha_madd(y[i], beta, y[i], tmp); // y[i] = beta * y[i] + tmp; } return ALPHA_SPARSE_STATUS_SUCCESS; } alphasparse_status_t ONAME(const ALPHA_Number alpha, const ALPHA_SPMAT_CSR *A, const ALPHA_Number *x, const ALPHA_Number beta, ALPHA_Number *y) { return diagmv_x_csr_n_omp(alpha, A, x, beta, y); }
elemwise_binary_scalar_op.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * Copyright (c) 2016 by Contributors * \file elemwise_binary_scalar_op.h * \brief Function definition of elementwise binary scalar operators */ #ifndef MXNET_OPERATOR_TENSOR_ELEMWISE_BINARY_SCALAR_OP_H_ #define MXNET_OPERATOR_TENSOR_ELEMWISE_BINARY_SCALAR_OP_H_ #include <mxnet/operator_util.h> #include <vector> #include <utility> #include "../mshadow_op.h" #include "../elemwise_op_common.h" #include "elemwise_unary_op.h" namespace mxnet { namespace op { class BinaryScalarOp : public UnaryOp { /*! \brief Tensor operation against a scalar with a dense result */ template<typename OP, typename DType, typename IType> static void ComputeExDenseResultRsp(mshadow::Stream<cpu> *stream, const nnvm::NodeAttrs &attrs, const OpContext &ctx, const NDArray &input, const OpReqType req, const NDArray &output) { const double alpha = nnvm::get<double>(attrs.parsed); CHECK_EQ(output.shape(), input.shape()); const int64_t row_count = output.shape()[0]; const int64_t items_per_row = output.shape().Size() / row_count; const DType result_for_zero = OP::Map(DType(0), DType(alpha)); mshadow::Tensor<cpu, 1, DType> input_data = input.data().FlatTo1D<cpu, DType>(stream); mshadow::Tensor<cpu, 1, DType> output_data = output.data().FlatTo1D<cpu, DType>(stream); const int64_t sparse_row_count = input.aux_shape(rowsparse::kIdx).Size(); if (sparse_row_count != row_count) { mshadow::Tensor<cpu, 1, IType> row_indexes = input.aux_data( rowsparse::kIdx).FlatTo1D<cpu, IType>(stream); int64_t input_iter = 0; int64_t output_row = 0; IType next_input_row = 0; while (output_row < row_count) { next_input_row = input_iter < sparse_row_count ? int64_t(row_indexes[input_iter]) : row_count; // Split up into blocks of contiguous data and do those together // Do contiguous dense blocks const int64_t dense_block_count = next_input_row - output_row; if (dense_block_count > 0) { MXNET_ASSIGN_REQ_SWITCH(req, Req, { mxnet_op::Kernel<mxnet_op::op_with_req<mshadow_op::identity, Req>, cpu>::Launch( stream, items_per_row * dense_block_count, output_data.dptr_ + items_per_row * output_row, result_for_zero); }); output_row += dense_block_count; continue; } // Do contiguous sparse blocks int64_t next_non_contiguous_sparse = input_iter; while (next_non_contiguous_sparse < sparse_row_count - 1) { if (row_indexes[next_non_contiguous_sparse + 1] != row_indexes[next_non_contiguous_sparse] + 1) { break; } ++next_non_contiguous_sparse; } const int64_t sparse_block_count = next_non_contiguous_sparse - input_iter + 1; if (sparse_block_count > 0) { MXNET_ASSIGN_REQ_SWITCH(req, Req, { mxnet_op::Kernel<mxnet_op::op_with_req<OP, Req>, cpu>::Launch( stream, items_per_row * sparse_block_count, &output_data.dptr_[items_per_row * output_row], &input_data.dptr_[items_per_row * input_iter], DType(alpha)); }); output_row += sparse_block_count; input_iter += sparse_block_count; continue; } } } else { // All rows exist (eventually we don't have to do complex // things to call GPU kernels because we don't need to access row indices) MXNET_ASSIGN_REQ_SWITCH(req, Req, { mxnet_op::Kernel<mxnet_op::op_with_req<OP, Req>, cpu>::Launch( stream, items_per_row * row_count, output_data.dptr_, input_data.dptr_, DType(alpha)); }); } } /*! \brief Tensor operation against a scalar with a dense result */ template<typename OP, typename DType, typename IType> static void ComputeExDenseResultRsp(mshadow::Stream<gpu> *stream, const nnvm::NodeAttrs &attrs, const OpContext &ctx, const NDArray &input, const OpReqType req, const NDArray &output) { LOG(FATAL) << "NOT IMPLEMENTED"; } /*! \brief Tensor operation against a scalar with a dense result */ template<typename OP, typename DType, typename IType, typename CType> static void ComputeExDenseResultCsr(mshadow::Stream<cpu> *stream, const nnvm::NodeAttrs &attrs, const OpContext &ctx, const NDArray &input, const OpReqType req, const NDArray &output) { CHECK_EQ(output.shape(), input.shape()); const double alpha = nnvm::get<double>(attrs.parsed); const DType dense_fill_val = OP::Map(DType(0), DType(alpha)); const TBlob column_indexes = input.aux_data(csr::kIdx); const size_t item_count = column_indexes.Size(); // Pre-fill dense with 0-input/output value FillDense<DType>(stream, output.shape().Size(), dense_fill_val, req, output.data().dptr<DType>()); mshadow::Tensor<cpu, 2, DType> out = AsRowise2D<DType>(stream, output.data()); if (item_count) { const DType *in = input.data().dptr<DType>(); const IType *column_indexes_ptr = column_indexes.dptr<IType>(); const auto row_count = static_cast<size_t>(input.shape()[0]); const TBlob row_starts = input.aux_data(csr::kIndPtr); const CType *row_starts_ptr = row_starts.dptr<CType>(); #pragma omp parallel for for (int i = 0; i < static_cast<int>(row_count); ++i) { const bool last_row = i == static_cast<int>(row_count) - 1; // Split up into blocks of contiguous data and do those together const size_t row_item_start_iter = row_starts_ptr[i]; const size_t input_items_this_row = !last_row ? static_cast<size_t>(row_starts_ptr[i + 1]) - row_item_start_iter : item_count - row_item_start_iter; if (input_items_this_row) { const IType *this_row_column_indexes = column_indexes_ptr + row_item_start_iter; const DType *row_data_start = in + row_item_start_iter; DType *output_this_row = out[i].dptr_; // More overhead to use OMP for small loops, so don't if (input_items_this_row > 1000) { #pragma omp parallel for for (CType j = 0; j < static_cast<CType>(input_items_this_row); ++j) { const IType col = this_row_column_indexes[j]; const DType val = row_data_start[j]; output_this_row[col] = OP::Map(val, DType(alpha)); } } else { for (CType j = 0; j < static_cast<CType>(input_items_this_row); ++j) { const IType col = this_row_column_indexes[j]; const DType val = row_data_start[j]; output_this_row[col] = OP::Map(val, DType(alpha)); } } } } } } /*! \brief Tensor operation against a scalar with a dense result */ template<typename OP, typename DType, typename IType, typename CType> static void ComputeExDenseResultCsr(mshadow::Stream<gpu> *stream, const nnvm::NodeAttrs &attrs, const OpContext &ctx, const NDArray &input, const OpReqType req, const NDArray &output) { LOG(FATAL) << "NOT IMPLEMENTED"; } template<typename xpu, typename OP, typename DType, typename IType> static void ComputeExDenseResult(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const NDArray &input, const OpReqType req, const NDArray output) { mshadow::Stream<xpu> *stream = ctx.get_stream<xpu>(); CHECK_EQ(output.storage_type(), kDefaultStorage); switch (input.storage_type()) { case kRowSparseStorage: { ComputeExDenseResultRsp<OP, DType, IType>(stream, attrs, ctx, input, req, output); break; } case kCSRStorage: { MSHADOW_IDX_TYPE_SWITCH(input.aux_data(csr::kIndPtr).type_flag_, CType, { ComputeExDenseResultCsr<OP, DType, IType, CType>(stream, attrs, ctx, input, req, output); }); break; } default: CHECK(false) << "Unsupported sparse storage type"; break; } } public: template<typename OP> static void Compute_(const nnvm::NodeAttrs &attrs, mshadow::Stream<cpu>* s, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs) { DCHECK_EQ(inputs.size(), 1); DCHECK_EQ(outputs.size(), 1); using namespace mshadow; using namespace mshadow::expr; const double alpha = nnvm::get<double>(attrs.parsed); MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, { MXNET_ASSIGN_REQ_SWITCH(req[0], Req, { mxnet_op::Kernel<mxnet_op::op_with_req<OP, Req>, cpu>::Launch( s, inputs[0].Size(), outputs[0].dptr<DType>(), inputs[0].dptr<DType>(), DType(alpha)); }); }); } #if MXNET_USE_CUDA template<typename OP> static void Compute_(const nnvm::NodeAttrs &attrs, mshadow::Stream<gpu>* s, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs); #endif template<typename xpu, typename OP> static void Compute(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs) { mshadow::Stream<xpu> *s = ctx.get_stream<xpu>(); Compute_<OP>(attrs, s, inputs, req, outputs); } template<typename xpu, typename OP> static void ComputeInt(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs) { DCHECK_EQ(inputs.size(), 1); DCHECK_EQ(outputs.size(), 1); using namespace mshadow; using namespace mshadow::expr; Stream<xpu> *s = ctx.get_stream<xpu>(); const double alpha = nnvm::get<double>(attrs.parsed); MXNET_INT_TYPE_SWITCH(outputs[0].type_flag_, DType, { MXNET_ASSIGN_REQ_SWITCH(req[0], Req, { mxnet_op::Kernel<mxnet_op::op_with_req<OP, Req>, xpu>::Launch( s, inputs[0].Size(), outputs[0].dptr<DType>(), inputs[0].dptr<DType>(), DType(alpha)); }); }); } template<typename xpu, typename OP> static void ComputeLogic(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs) { DCHECK_EQ(inputs.size(), 1); DCHECK_EQ(outputs.size(), 1); using namespace mshadow; using namespace mshadow::expr; Stream<xpu> *s = ctx.get_stream<xpu>(); const double alpha = nnvm::get<double>(attrs.parsed); MSHADOW_TYPE_SWITCH_WITH_BOOL(inputs[0].type_flag_, DType, { MXNET_ASSIGN_REQ_SWITCH(req[0], Req, { mxnet_op::Kernel<mxnet_op::op_with_req<OP, Req>, xpu>::Launch( s, inputs[0].Size(), outputs[0].dptr<bool>(), inputs[0].dptr<DType>(), DType(alpha)); }); }); } template<typename xpu, typename OP> static void ComputeEx(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<NDArray> &inputs, const std::vector<OpReqType> &req, const std::vector<NDArray> &outputs) { DCHECK_EQ(inputs.size(), 1); DCHECK_EQ(outputs.size(), 1); const auto in_stype = inputs[0].storage_type(); const auto out_stype = outputs[0].storage_type(); if (req[0] == kNullOp) { return; } if ((in_stype == kRowSparseStorage && out_stype == kRowSparseStorage) || (in_stype == kCSRStorage && out_stype == kCSRStorage)) { // csr -> csr, or rsp -> rsp UnaryOp::MapToFCompute<xpu>(attrs, ctx, inputs, req, outputs, Compute<xpu, OP>); } else if (out_stype == kDefaultStorage && (in_stype == kRowSparseStorage || in_stype == kCSRStorage)) { MSHADOW_TYPE_SWITCH(outputs[0].data().type_flag_, DType, { MSHADOW_IDX_TYPE_SWITCH(inputs[0].aux_type(rowsparse::kIdx), IType, { ComputeExDenseResult<xpu, OP, DType, IType>(attrs, ctx, inputs[0], req[0], outputs[0]); }); }); } else { LogUnimplementedOp(attrs, ctx, inputs, req, outputs); } } template<typename xpu, typename OP> static void LogicComputeEx(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<NDArray> &inputs, const std::vector<OpReqType> &req, const std::vector<NDArray> &outputs) { DCHECK_EQ(inputs.size(), 1); DCHECK_EQ(outputs.size(), 1); const auto in_stype = inputs[0].storage_type(); const auto out_stype = outputs[0].storage_type(); if (req[0] == kNullOp) { return; } if ((in_stype == kRowSparseStorage && out_stype == kRowSparseStorage) || (in_stype == kCSRStorage && out_stype == kCSRStorage)) { // csr -> csr, or rsp -> rsp UnaryOp::MapToFCompute<xpu>(attrs, ctx, inputs, req, outputs, Compute<xpu, OP>); } else { LogUnimplementedOp(attrs, ctx, inputs, req, outputs); } } template<typename OP> static void Backward_(const nnvm::NodeAttrs &attrs, mshadow::Stream<cpu>* s, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs) { using namespace mshadow; using namespace mshadow::expr; const double alpha = nnvm::get<double>(attrs.parsed); MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, { MXNET_ASSIGN_REQ_SWITCH(req[0], Req, { mxnet::op::mxnet_op::Kernel<mxnet::op::mxnet_op::op_with_req< mxnet::op::mxnet_op::backward_grad_tuned<OP>, Req>, cpu>:: Launch(s, inputs[0].Size(), outputs[0].dptr<DType>(), inputs[0].dptr<DType>(), inputs[1].dptr<DType>(), DType(alpha)); }); }); } #if MXNET_USE_CUDA template<typename OP> static void Backward_(const nnvm::NodeAttrs &attrs, mshadow::Stream<gpu>* s, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs); #endif template<typename xpu, typename OP> static void Backward(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs) { using namespace mshadow; using namespace mshadow::expr; Stream<xpu> *s = ctx.get_stream<xpu>(); Backward_<OP>(attrs, s, inputs, req, outputs); } }; #define MXNET_OPERATOR_REGISTER_BINARY_SCALAR(name) \ NNVM_REGISTER_OP(name) \ .set_num_inputs(1) \ .set_num_outputs(1) \ .set_attr_parser([](NodeAttrs* attrs) { \ attrs->parsed = std::stod(attrs->dict["scalar"]); \ }) \ .set_attr<mxnet::FInferShape>("FInferShape", ElemwiseShape<1, 1>) \ .set_attr<nnvm::FInferType>("FInferType", ElemwiseType<1, 1>) \ .set_attr<nnvm::FInplaceOption>("FInplaceOption", \ [](const NodeAttrs& attrs){ \ return std::vector<std::pair<int, int> >{{0, 0}}; \ }) \ .add_argument("data", "NDArray-or-Symbol", "source input") \ .add_argument("scalar", "float", "scalar input") } // namespace op } // namespace mxnet #ifdef __CUDACC__ #include "elemwise_binary_scalar_op.cuh" #endif #endif // MXNET_OPERATOR_TENSOR_ELEMWISE_BINARY_SCALAR_OP_H_
GB_binop__eq_int64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__eq_int64) // A.*B function (eWiseMult): GB (_AemultB) // A.*B function (eWiseMult): GB (_AemultB_02__eq_int64) // A.*B function (eWiseMult): GB (_AemultB_03__eq_int64) // A.*B function (eWiseMult): GB (_AemultB_bitmap__eq_int64) // A*D function (colscale): GB (_AxD__eq_int64) // D*A function (rowscale): GB (_DxB__eq_int64) // C+=B function (dense accum): GB (_Cdense_accumB__eq_int64) // C+=b function (dense accum): GB (_Cdense_accumb__eq_int64) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__eq_int64) // C=scalar+B GB (_bind1st__eq_int64) // C=scalar+B' GB (_bind1st_tran__eq_int64) // C=A+scalar GB (_bind2nd__eq_int64) // C=A'+scalar GB (_bind2nd_tran__eq_int64) // C type: bool // A type: int64_t // B,b type: int64_t // BinaryOp: cij = (aij == bij) #define GB_ATYPE \ int64_t #define GB_BTYPE \ int64_t #define GB_CTYPE \ bool // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 0 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 0 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int64_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ int64_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ bool t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = (x == y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_EQ || GxB_NO_INT64 || GxB_NO_EQ_INT64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__eq_int64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__eq_int64) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { #include "GB_dense_subassign_23_template.c" } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__eq_int64) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { // get the scalar b for C += b, of type int64_t int64_t bwork = (*((int64_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__eq_int64) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__eq_int64) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__eq_int64) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__eq_int64) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__eq_int64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__eq_int64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__eq_int64) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__eq_int64) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *Cx = (bool *) Cx_output ; int64_t x = (*((int64_t *) x_input)) ; int64_t *Bx = (int64_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; int64_t bij = Bx [p] ; Cx [p] = (x == bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__eq_int64) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; bool *Cx = (bool *) Cx_output ; int64_t *Ax = (int64_t *) Ax_input ; int64_t y = (*((int64_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int64_t aij = Ax [p] ; Cx [p] = (aij == y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int64_t aij = Ax [pA] ; \ Cx [pC] = (x == aij) ; \ } GrB_Info GB (_bind1st_tran__eq_int64) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int64_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t x = (*((const int64_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int64_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int64_t aij = Ax [pA] ; \ Cx [pC] = (aij == y) ; \ } GrB_Info GB (_bind2nd_tran__eq_int64) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t y = (*((const int64_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
declare_variant_messages.c
// RUN: %clang_cc1 -triple=x86_64-pc-win32 -verify -fopenmp -x c -std=c99 -fms-extensions -Wno-pragma-pack %s // RUN: %clang_cc1 -triple=x86_64-pc-win32 -verify -fopenmp-simd -x c -std=c99 -fms-extensions -Wno-pragma-pack %s #pragma omp declare // expected-error {{expected an OpenMP directive}} int foo(void); #pragma omp declare variant // expected-error {{expected '(' after 'declare variant'}} #pragma omp declare variant( // expected-error {{expected expression}} expected-error {{expected ')'}} expected-note {{to match this '('}} #pragma omp declare variant(foo // expected-error {{expected ')'}} expected-error {{expected 'match' clause on 'omp declare variant' directive}} expected-note {{to match this '('}} #pragma omp declare variant(x) // expected-error {{use of undeclared identifier 'x'}} #pragma omp declare variant(foo) // expected-error {{expected 'match' clause on 'omp declare variant' directive}} #pragma omp declare variant(foo) // expected-error {{expected 'match' clause on 'omp declare variant' directive}} #pragma omp declare variant(foo) xxx // expected-error {{expected 'match' clause on 'omp declare variant' directive}} #pragma omp declare variant(foo) match // expected-error {{expected '(' after 'match'}} #pragma omp declare variant(foo) match( // expected-error {{expected ')'}} expected-warning {{expected identifier or string literal describing a context set; set skipped}} expected-note {{context set options are: 'construct' 'device' 'implementation' 'user'}} expected-note {{the ignored set spans until here}} expected-note {{to match this '('}} #pragma omp declare variant(foo) match() // expected-warning {{expected identifier or string literal describing a context set; set skipped}} expected-note {{context set options are: 'construct' 'device' 'implementation' 'user'}} expected-note {{the ignored set spans until here}} #pragma omp declare variant(foo) match(xxx) // expected-warning {{'xxx' is not a valid context set in a `declare variant`; set ignored}} expected-note {{context set options are: 'construct' 'device' 'implementation' 'user'}} expected-note {{the ignored set spans until here}} #pragma omp declare variant(foo) match(xxx=) // expected-warning {{'xxx' is not a valid context set in a `declare variant`; set ignored}} expected-note {{context set options are: 'construct' 'device' 'implementation' 'user'}} expected-note {{the ignored set spans until here}} #pragma omp declare variant(foo) match(xxx=yyy) // expected-warning {{'xxx' is not a valid context set in a `declare variant`; set ignored}} expected-note {{context set options are: 'construct' 'device' 'implementation' 'user'}} expected-note {{the ignored set spans until here}} #pragma omp declare variant(foo) match(xxx=yyy}) // expected-error {{expected ')'}} expected-warning {{'xxx' is not a valid context set in a `declare variant`; set ignored}} expected-note {{context set options are: 'construct' 'device' 'implementation' 'user'}} expected-note {{the ignored set spans until here}} expected-note {{to match this '('}} #pragma omp declare variant(foo) match(xxx={) // expected-error {{expected ')'}} expected-warning {{'xxx' is not a valid context set in a `declare variant`; set ignored}} expected-note {{context set options are: 'construct' 'device' 'implementation' 'user'}} expected-note {{the ignored set spans until here}} expected-note {{to match this '('}} #pragma omp declare variant(foo) match(xxx={}) // expected-warning {{'xxx' is not a valid context set in a `declare variant`; set ignored}} expected-note {{context set options are: 'construct' 'device' 'implementation' 'user'}} expected-note {{the ignored set spans until here}} #pragma omp declare variant(foo) match(xxx={vvv, vvv}) // expected-warning {{'xxx' is not a valid context set in a `declare variant`; set ignored}} expected-note {{context set options are: 'construct' 'device' 'implementation' 'user'}} expected-note {{the ignored set spans until here}} #pragma omp declare variant(foo) match(xxx={vvv} xxx) // expected-warning {{'xxx' is not a valid context set in a `declare variant`; set ignored}} expected-note {{context set options are: 'construct' 'device' 'implementation' 'user'}} expected-note {{the ignored set spans until here}} #pragma omp declare variant(foo) match(xxx={vvv}) xxx // expected-warning {{'xxx' is not a valid context set in a `declare variant`; set ignored}} expected-note {{context set options are: 'construct' 'device' 'implementation' 'user'}} expected-note {{the ignored set spans until here}} #pragma omp declare variant(foo) match(implementation={xxx}) // expected-warning {{'xxx' is not a valid context selector for the context set 'implementation'; selector ignored}} expected-note {{context selector options are: 'vendor' 'extension' 'unified_address' 'unified_shared_memory' 'reverse_offload' 'dynamic_allocators' 'atomic_default_mem_order'}} expected-note {{the ignored selector spans until here}} #pragma omp declare variant(foo) match(implementation={vendor}) // expected-warning {{the context selector 'vendor' in context set 'implementation' requires a context property defined in parentheses; selector ignored}} expected-note {{the ignored selector spans until here}} #pragma omp declare variant(foo) match(implementation={vendor(}) // expected-error {{expected ')'}} expected-warning {{expected identifier or string literal describing a context property; property skipped}} expected-note {{context property options are: 'amd' 'arm' 'bsc' 'cray' 'fujitsu' 'gnu' 'ibm' 'intel' 'llvm' 'pgi' 'ti' 'unknown'}} expected-note {{to match this '('}} #pragma omp declare variant(foo) match(implementation={vendor()}) // expected-warning {{expected identifier or string literal describing a context property; property skipped}} expected-note {{context property options are: 'amd' 'arm' 'bsc' 'cray' 'fujitsu' 'gnu' 'ibm' 'intel' 'llvm' 'pgi' 'ti' 'unknown'}} #pragma omp declare variant(foo) match(implementation={vendor(score ibm)}) // expected-error {{expected '(' after 'score'}} expected-warning {{expected '':'' after the score expression; '':'' assumed}} #pragma omp declare variant(foo) match(implementation={vendor(score( ibm)}) // expected-error {{use of undeclared identifier 'ibm'}} expected-error {{expected ')'}} expected-warning {{expected '':'' after the score expression; '':'' assumed}} expected-warning {{expected identifier or string literal describing a context property; property skipped}} expected-note {{context property options are: 'amd' 'arm' 'bsc' 'cray' 'fujitsu' 'gnu' 'ibm' 'intel' 'llvm' 'pgi' 'ti' 'unknown'}} expected-note {{to match this '('}} #pragma omp declare variant(foo) match(implementation={vendor(score(2 ibm)}) // expected-error {{expected ')'}} expected-error {{expected ')'}} expected-warning {{expected '':'' after the score expression; '':'' assumed}} expected-warning {{expected identifier or string literal describing a context property; property skipped}} expected-note {{to match this '('}} expected-note {{context property options are: 'amd' 'arm' 'bsc' 'cray' 'fujitsu' 'gnu' 'ibm' 'intel' 'llvm' 'pgi' 'ti' 'unknown'}} expected-note {{to match this '('}} #pragma omp declare variant(foo) match(implementation={vendor(score(foo()) ibm)}) // expected-warning {{expected '':'' after the score expression; '':'' assumed}} expected-warning {{score expressions in the OpenMP context selector need to be constant; foo() is not and will be ignored}} #pragma omp declare variant(foo) match(implementation={vendor(score(5): ibm), vendor(llvm)}) // expected-warning {{the context selector 'vendor' was used already in the same 'omp declare variant' directive; selector ignored}} expected-note {{the previous context selector 'vendor' used here}} expected-note {{the ignored selector spans until here}} #pragma omp declare variant(foo) match(implementation={vendor(score(5): ibm), kind(cpu)}) // expected-warning {{the context selector 'kind' is not valid for the context set 'implementation'; selector ignored}} expected-note {{the context selector 'kind' can be nested in the context set 'device'; try 'match(device={kind(property)})'}} expected-note {{the ignored selector spans until here}} #pragma omp declare variant(foo) match(device={xxx}) // expected-warning {{'xxx' is not a valid context selector for the context set 'device'; selector ignored}} expected-note {{context selector options are: 'kind' 'isa' 'arch'}} expected-note {{the ignored selector spans until here}} #pragma omp declare variant(foo) match(device={kind}) // expected-warning {{the context selector 'kind' in context set 'device' requires a context property defined in parentheses; selector ignored}} expected-note {{the ignored selector spans until here}} #pragma omp declare variant(foo) match(device={kind(}) // expected-error {{expected ')'}} expected-warning {{expected identifier or string literal describing a context property; property skipped}} expected-note {{context property options are: 'host' 'nohost' 'cpu' 'gpu' 'fpga' 'any'}} expected-note {{to match this '('}} #pragma omp declare variant(foo) match(device={kind()}) // expected-warning {{expected identifier or string literal describing a context property; property skipped}} expected-note {{context property options are: 'host' 'nohost' 'cpu' 'gpu' 'fpga' 'any'}} #pragma omp declare variant(foo) match(device={kind(score cpu)}) // expected-error {{expected '(' after 'score'}} expected-warning {{expected '':'' after the score expression; '':'' assumed}} expected-warning {{the context selector 'kind' in the context set 'device' cannot have a score ('<invalid>'); score ignored}} #pragma omp declare variant(foo) match(device={kind(score( ibm)}) // expected-error {{use of undeclared identifier 'ibm'}} expected-error {{expected ')'}} expected-warning {{expected '':'' after the score expression; '':'' assumed}} expected-warning {{the context selector 'kind' in the context set 'device' cannot have a score ('<invalid>'); score ignored}} expected-warning {{expected identifier or string literal describing a context property; property skipped}} expected-note {{context property options are: 'host' 'nohost' 'cpu' 'gpu' 'fpga' 'any'}} expected-note {{to match this '('}} #pragma omp declare variant(foo) match(device={kind(score(2 gpu)}) // expected-error {{expected ')'}} expected-error {{expected ')'}} expected-warning {{expected '':'' after the score expression; '':'' assumed}} expected-warning {{the context selector 'kind' in the context set 'device' cannot have a score ('2'); score ignored}} expected-warning {{expected identifier or string literal describing a context property; property skipped}} expected-note {{to match this '('}} expected-note {{context property options are: 'host' 'nohost' 'cpu' 'gpu' 'fpga' 'any'}} expected-note {{to match this '('}} #pragma omp declare variant(foo) match(device={kind(score(foo()) ibm)}) // expected-warning {{expected '':'' after the score expression; '':'' assumed}} expected-warning {{the context selector 'kind' in the context set 'device' cannot have a score ('foo()'); score ignored}} expected-warning {{'ibm' is not a valid context property for the context selector 'kind' and the context set 'device'; property ignored}} expected-note {{try 'match(implementation={vendor(ibm)})'}} expected-note {{the ignored property spans until here}} #pragma omp declare variant(foo) match(device={kind(score(5): host), kind(llvm)}) // expected-warning {{the context selector 'kind' in the context set 'device' cannot have a score ('5'); score ignored}} expected-warning {{the context selector 'kind' was used already in the same 'omp declare variant' directive; selector ignored}} expected-note {{the previous context selector 'kind' used here}} expected-note {{the ignored selector spans until here}} #pragma omp declare variant(foo) match(device={kind(score(5): nohost), vendor(llvm)}) // expected-warning {{the context selector 'kind' in the context set 'device' cannot have a score ('5'); score ignored}} expected-warning {{the context selector 'vendor' is not valid for the context set 'device'; selector ignored}} expected-note {{the context selector 'vendor' can be nested in the context set 'implementation'; try 'match(implementation={vendor(property)})'}} expected-note {{the ignored selector spans until here}} #pragma omp declare variant(foo) match(implementation={extension("aaa")}) // expected-warning {{'aaa' is not a valid context property for the context selector 'extension' and the context set 'implementation'; property ignored}} expected-note {{context property options are: 'match_all' 'match_any' 'match_none'}} expected-note {{the ignored property spans until here}} int bar(void); #pragma omp declare variant(foo) match(implementation = {vendor(score(foo) :llvm)}) // expected-warning {{score expressions in the OpenMP context selector need to be constant; foo is not and will be ignored}} #pragma omp declare variant(foo) match(implementation = {vendor(score(foo()) :llvm)}) // expected-warning {{score expressions in the OpenMP context selector need to be constant; foo() is not and will be ignored}} #pragma omp declare variant(foo) match(implementation = {vendor(score(<expr>) :llvm)}) // expected-error {{expected expression}} expected-error {{use of undeclared identifier 'expr'}} expected-error {{expected expression}} #pragma omp declare variant(foo) match(user = {condition(foo)}) // expected-error {{the user condition in the OpenMP context selector needs to be constant; foo is not}} #pragma omp declare variant(foo) match(user = {condition(foo())}) // expected-error {{the user condition in the OpenMP context selector needs to be constant; foo() is not}} #pragma omp declare variant(foo) match(user = {condition(<expr>)}) // expected-error {{expected expression}} expected-error {{use of undeclared identifier 'expr'}} expected-error {{expected expression}} expected-note {{the ignored selector spans until here}} int score_and_cond_non_const(); #pragma omp declare variant(foo) match(xxx={}) // expected-warning {{'xxx' is not a valid context set in a `declare variant`; set ignored}} expected-note {{context set options are: 'construct' 'device' 'implementation' 'user'}} expected-note {{the ignored set spans until here}} int a; // expected-error {{'#pragma omp declare variant' can only be applied to functions}} #pragma omp declare variant(foo) match(xxx={}) // expected-warning {{'xxx' is not a valid context set in a `declare variant`; set ignored}} expected-note {{context set options are: 'construct' 'device' 'implementation' 'user'}} expected-note {{the ignored set spans until here}} #pragma omp threadprivate(a) // expected-error {{'#pragma omp declare variant' can only be applied to functions}} int var; #pragma omp threadprivate(var) #pragma omp declare variant(foo) match(xxx={}) // expected-error {{function declaration is expected after 'declare variant' directive}} #pragma omp declare // expected-error {{expected an OpenMP directive}} #pragma omp declare variant(foo) match(xxx={}) // expected-error {{function declaration is expected after 'declare variant' directive}} #pragma omp declare variant(foo) match(xxx={}) // expected-error {{function declaration is expected after 'declare variant' directive}} #pragma options align=packed int main(); #pragma omp declare variant(foo) match(implementation={vendor(llvm)}) // expected-error {{function declaration is expected after 'declare variant' directive}} #pragma omp declare variant(foo) match(implementation={vendor(llvm)}) // expected-error {{function declaration is expected after 'declare variant' directive}} #pragma init_seg(compiler) int main(); #pragma omp declare variant(foo) match(xxx={}) // expected-error {{single declaration is expected after 'declare variant' directive}} expected-warning {{'xxx' is not a valid context set in a `declare variant`; set ignored}} expected-note {{context set options are: 'construct' 'device' 'implementation' 'user'}} expected-note {{the ignored set spans until here}} int b, c; int no_proto(); #pragma omp declare variant(no_proto) match(xxx={}) // expected-warning {{'xxx' is not a valid context set in a `declare variant`; set ignored}} expected-note {{context set options are: 'construct' 'device' 'implementation' 'user'}} expected-note {{the ignored set spans until here}} int no_proto_too(); int proto1(int); #pragma omp declare variant(proto1) match(xxx={}) // expected-warning {{'xxx' is not a valid context set in a `declare variant`; set ignored}} expected-note {{context set options are: 'construct' 'device' 'implementation' 'user'}} expected-note {{the ignored set spans until here}} int diff_proto(); // expected-note {{previous declaration is here}} int diff_proto(double); // expected-error {{conflicting types for 'diff_proto'}} #pragma omp declare variant(no_proto) match(xxx={}) // expected-warning {{'xxx' is not a valid context set in a `declare variant`; set ignored}} expected-note {{context set options are: 'construct' 'device' 'implementation' 'user'}} expected-note {{the ignored set spans until here}} int diff_proto1(double); int after_use_variant(void); int after_use(); int bar() { return after_use(); } #pragma omp declare variant(after_use_variant) match(xxx={}) // expected-warning {{'xxx' is not a valid context set in a `declare variant`; set ignored}} expected-warning {{'#pragma omp declare variant' cannot be applied for function after first usage; the original function might be used}} expected-note {{context set options are: 'construct' 'device' 'implementation' 'user'}} expected-note {{the ignored set spans until here}} int after_use(void); #pragma omp declare variant(after_use_variant) match(xxx={}) // expected-warning {{'xxx' is not a valid context set in a `declare variant`; set ignored}} expected-note {{context set options are: 'construct' 'device' 'implementation' 'user'}} expected-note {{the ignored set spans until here}} int defined(void) { return 0; } int defined1(void) { return 0; } #pragma omp declare variant(after_use_variant) match(xxx={}) // expected-warning {{'xxx' is not a valid context set in a `declare variant`; set ignored}} expected-warning {{'#pragma omp declare variant' cannot be applied to the function that was defined already; the original function might be used}} expected-note {{context set options are: 'construct' 'device' 'implementation' 'user'}} expected-note {{the ignored set spans until here}} int defined1(void); int diff_cc_variant(void); #pragma omp declare variant(diff_cc_variant) match(xxx={}) // expected-error {{variant in '#pragma omp declare variant' with type 'int (void)' is incompatible with type 'int (void) __attribute__((vectorcall))'}} expected-warning {{'xxx' is not a valid context set in a `declare variant`; set ignored}} expected-note {{context set options are: 'construct' 'device' 'implementation' 'user'}} expected-note {{the ignored set spans until here}} __vectorcall int diff_cc(void); int diff_ret_variant(void); #pragma omp declare variant(diff_ret_variant) match(xxx={}) // expected-error {{variant in '#pragma omp declare variant' with type 'int (void)' is incompatible with type 'void (void)'}} expected-warning {{'xxx' is not a valid context set in a `declare variant`; set ignored}} expected-note {{context set options are: 'construct' 'device' 'implementation' 'user'}} expected-note {{the ignored set spans until here}} void diff_ret(void); void marked(void); void not_marked(void); #pragma omp declare variant(not_marked) match(implementation={vendor(unknown)}, device={kind(cpu)}) // expected-note {{marked as 'declare variant' here}} void marked_variant(void); #pragma omp declare variant(marked_variant) match(xxx={}) // expected-warning {{'xxx' is not a valid context set in a `declare variant`; set ignored}} expected-warning {{variant function in '#pragma omp declare variant' is itself marked as '#pragma omp declare variant'}} expected-note {{context set options are: 'construct' 'device' 'implementation' 'user'}} expected-note {{the ignored set spans until here}} void marked(void); #pragma omp declare variant // expected-error {{function declaration is expected after 'declare variant' directive}} #pragma omp declare variant // expected-error {{function declaration is expected after 'declare variant' directive}}
sequence.c
#include "sequence.h" #include <omp.h> #include <stdlib.h> #include <string.h> #include "type.h" unsigned char* read_msa(const char* path, int* ncol, int* nrow) { FILE* f = fopen(path, "r"); char buf[SEQ_BUFFER_SIZE]; int nc; *nrow = 0; *ncol = 0; while (fgets(buf, SEQ_BUFFER_SIZE, f)) { (*nrow)++; nc = strlen(buf); *ncol = nc > *ncol ? nc : *ncol; } *ncol -= 1; unsigned char* out = (unsigned char*)malloc(sizeof(unsigned char) * (*ncol * *nrow)); rewind(f); for (int i = 0; i < *nrow; i++) { fgets(buf, SEQ_BUFFER_SIZE, f); for (int j = 0; j < *ncol; j++) { out[i * *ncol + j] = aatoi(buf[j]); } } fclose(f); return out; } unsigned char aatoi(unsigned char aa) { char id; switch (aa) { case '-': id = 0; break; case 'A': id = 1; break; case 'C': id = 2; break; case 'D': id = 3; break; case 'E': id = 4; break; case 'F': id = 5; break; case 'G': id = 6; break; case 'H': id = 7; break; case 'I': id = 8; break; case 'K': id = 9; break; case 'L': id = 10; break; case 'M': id = 11; break; case 'N': id = 12; break; case 'P': id = 13; break; case 'Q': id = 14; break; case 'R': id = 15; break; case 'S': id = 16; break; case 'T': id = 17; break; case 'V': id = 18; break; case 'W': id = 19; break; case 'Y': id = 20; break; default: id = 0; } return id; } double cal_seq_weight(double* w, unsigned char* msa, int ncol, int nrow, double seq_id) { for (int i = 0; i < nrow; i++) { // printf("%d\n", i); w[i] = 1.0; } int i, j; double sim; #pragma omp parallel for default(shared) private(j, sim) for (i = 0; i < nrow; i++) { for (j = i + 1; j < nrow; j++) { sim = cal_seq_sim(msa + i * ncol, msa + j * ncol, ncol); if (sim >= seq_id) { #pragma omp critical w[i] += 1.0; w[j] += 1.0; } } } double neff = 0.0; for (int i = 0; i < nrow; i++) { w[i] = 1.0 / w[i]; neff += w[i]; } printf(">msa ncol= %d nrow= %d neff= %.2f\n", ncol, nrow, neff); return neff; } double cal_seq_sim(unsigned char* seq1, unsigned char* seq2, int ncol) { double sum = 0.0; for (int i = 0; i < ncol; i++) { sum += seq1[i] == seq2[i]; } return sum / ncol; }
rand_B1.c
#include "rand_B1.h" #include "rdtsc.h" //START_FUNC_DECL int rand_B1( uint64_t *X, uint64_t nX, RAND_B1_REC_TYPE *ptr_in, uint64_t idx ) //STOP_FUNC_DECL { int status = 0; static uint64_t l_sum; uint64_t seed = ptr_in->seed; double p = ptr_in->probability; if ( ( p < 0 ) || ( p > 1 ) ) { go_BYE(-1); } if ( idx == 0 ) { //seed has not yet been set l_sum = 0; if ( ptr_in->seed == 0 ) { ptr_in->seed = RDTSC(); } srand48_r(seed, &(ptr_in->buffer)); } //-- Initialize to 0 uint8_t *lX = (uint8_t *)X; uint64_t lnX = nX / 8; if ( ( lnX * 8 ) != nX ) { lnX++; } // #pragma omp parallel for for ( uint64_t i = 0; i < lnX; i++ ) { lX[i] = 0; } // #pragma omp parallel for for ( uint64_t i = 0; i < nX; i++ ) { uint64_t word_idx = i >> 6; /* divide by 64 */ uint64_t bit_idx = i & 0x3F; /* remainder after division by 64 */ double rval = drand48(); if ( rval <= p ) { uint64_t bval = ( (uint64_t)1 << bit_idx ); X[word_idx] |= bval; l_sum++; } } // fprintf(stderr, "randB1: %d, %llu, %lld, %lf \n", idx, nX, l_sum, p); BYE: return status; }
rt_dlansy.c
#include "runtime.h" void RT_CORE_dlansy(Quark *quark, Quark_Task_Flags *task_flags, int norm, PLASMA_enum uplo, int N, const double *A, int LDA, int szeA, int szeW, double *result) { plasma_context_t *plasma; plasma = plasma_context_self(); if (plasma->runtime == PLASMA_QUARK) { QUARK_CORE_dlansy( quark, task_flags, norm, uplo, N, A, LDA, szeA, szeW, result); } else if ( plasma->runtime == PLASMA_OMPSS ) { szeW = max(1, szeW); double *work = malloc(szeW * sizeof(double)); #pragma omp target device (smp) copy_deps #pragma omp task in([szeA]A) out([1]result) label(dlansy) *result = LAPACKE_dlansy_work(LAPACK_COL_MAJOR, lapack_const(norm), lapack_const(uplo), N, A, LDA, work); } } void RT_CORE_dlansy_f1(Quark *quark, Quark_Task_Flags *task_flags, PLASMA_enum norm, PLASMA_enum uplo, int N, const double *A, int LDA, int szeA, int szeW, double *result, double *fake, int szeF) { plasma_context_t *plasma; plasma = plasma_context_self(); if (plasma->runtime == PLASMA_QUARK) { QUARK_CORE_dlansy_f1( quark, task_flags, norm, uplo, N, A, LDA, szeA, szeW, result, fake, szeF); } else if ( plasma->runtime == PLASMA_OMPSS ) { szeW = max(1, szeW); double *work = malloc(szeW * sizeof(double)); if ( result == fake ) { #pragma omp target device (smp) copy_deps #pragma omp task in([szeA]A) out([1]result) label(dlansy_f1) *result = LAPACKE_dlansy_work(LAPACK_COL_MAJOR, lapack_const(norm), lapack_const(uplo), N, A, LDA, work); } else { #pragma omp target device (smp) copy_deps #pragma omp task in([szeA]A) out([1]result) fake([szeF]fake) label(dlansy_f1) *result = LAPACKE_dlansy_work(LAPACK_COL_MAJOR, lapack_const(norm), lapack_const(uplo), N, A, LDA, work); } } }
atomic.c
#include <omp.h> int main (void) { int a; #pragma omp parallel { #pragma omp atomic a+=1; } }
Source.c
#include <omp.h> #include <stdio.h> #include <stdlib.h> #define PI 3.14159265358979323846264 void calcPI(int iterations, int threads) { double m, mypi = 0.0; double ni = 0.0; double difTime; int i; double start = omp_get_wtime(); m = 1.0 / (double)iterations; //OBS!!! catch problems with shared variables!!! #pragma omp parallel for num_threads(threads), reduction(+: mypi), reduction(+:ni) for (i = 0; i < iterations; i++) { ni = ((double)i + 0.5) * m; mypi += 4.0 / (1.0 + ni * ni); } mypi *= m; double end = omp_get_wtime(); difTime = end - start; printf("Iterations %d - threads %d\tExecution time %f\n", iterations, threads, difTime); printf("\tMyPI = %.70f\n", mypi); printf("\tMyPI - PI = %.70f\n\n", (mypi - PI)); } int main(int argc, char* argv[]) { int iteLength = 3; //int iteArr[3] = { 24000000, 48000000, 96000000 }; int iteArr[3] = { 24000000, 48000000, 1000000000}; int thLength = 5; int threads[5] = {1,6,12,24,48}; for (int thIndex = 0; thIndex < thLength; thIndex++) { for (int iteIndex = 0; iteIndex < 3; iteIndex++) { calcPI(iteArr[iteIndex], threads[thIndex]); } } }
bml_submatrix_ellpack_typed.c
#ifdef BML_USE_MAGMA #include "magma_v2.h" #endif #include "../../macros.h" #include "../../typed.h" #include "../bml_allocate.h" #include "../bml_logger.h" #include "../bml_submatrix.h" #include "../bml_types.h" #include "../dense/bml_allocate_dense.h" #include "bml_allocate_ellpack.h" #include "bml_submatrix_ellpack.h" #include "bml_types_ellpack.h" #include <complex.h> #include <math.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #ifdef _OPENMP #include <omp.h> #endif /** Determine element indices for submatrix, given a set of nodes/orbitals. * * \ingroup submatrix_group_C * * \param A Hamiltonian matrix A * \param B Graph matrix B * \param nodelist List of node/orbital indeces * \param nsize Size of nodelist * \param core_halo_index List of core+halo indeces * \param vsize Size of core_halo_index and number of cores * \param double_jump_flag Flag to use double jump (0=no, 1=yes) */ void TYPED_FUNC( bml_matrix2submatrix_index_ellpack) ( bml_matrix_ellpack_t * A, bml_matrix_ellpack_t * B, int *nodelist, int nsize, int *core_halo_index, int *vsize, int double_jump_flag) { int l, ll, ii, ls, k; int A_N = A->N; int A_M = A->M; int *A_nnz = A->nnz; int *A_index = A->index; int B_N = B->N; int B_M = B->M; int *B_nnz = B->nnz; int *B_index = B->index; int ix[A_N]; memset(ix, 0, A_N * sizeof(int)); l = 0; ll = 0; #ifdef USE_OMP_OFFLOAD #pragma omp target update from(A_nnz[:A_N], A_index[:A_N*A_M]) #pragma omp target update from(B_nnz[:B_N], B_index[:B_N*B_M]) #endif // Cores are first followed by halos for (int j = 0; j < nsize; j++) { ii = nodelist[j]; if (ix[ii] == 0) { ix[ii] = ii + 1; core_halo_index[l] = ii; l++; ll++; } } // Collect halo indeces from graph for (int j = 0; j < nsize; j++) { ii = nodelist[j]; for (int jp = 0; jp < B_nnz[ii]; jp++) { k = B_index[ROWMAJOR(ii, jp, B_N, B_M)]; if (ix[k] == 0) { ix[k] = ii + 1; core_halo_index[l] = k; l++; } } } // Add more halo elements from H for (int j = 0; j < nsize; j++) { ii = nodelist[j]; for (int jp = 0; jp < A_nnz[ii]; jp++) { k = A_index[ROWMAJOR(ii, jp, A_N, A_M)]; if (ix[k] == 0) { ix[k] = ii + 1; core_halo_index[l] = k; l++; } } } // Perform a "double jump" for extra halo elements // based on graph, like performing a symbolic X^2 if (double_jump_flag == 1) { ls = l; for (int j = 0; j < ls; j++) { ii = core_halo_index[j]; for (int jp = 0; jp < B_nnz[ii]; jp++) { k = B_index[ROWMAJOR(ii, jp, B_N, B_M)]; if (ix[k] == 0) { ix[k] = ii + 1; core_halo_index[l] = k; l++; } } } } vsize[0] = l; vsize[1] = ll; } /** Determine element indices for submatrix, given a set of nodes/orbitals. * * \ingroup submatrix_group_C * * \param B Graph matrix B * \param nodelist List of node/orbital indeces * \param nsize Size of nodelist * \param core_halo_index List of core+halo indeces * \param vsize Size of core_halo_index and number of cores * \param double_jump_flag Flag to use double jump (0=no, 1=yes) */ void TYPED_FUNC( bml_matrix2submatrix_index_graph_ellpack) ( bml_matrix_ellpack_t * B, int *nodelist, int nsize, int *core_halo_index, int *vsize, int double_jump_flag) { int l, ll, ii, ls, k; int B_N = B->N; int B_M = B->M; int *B_index = B->index; int *B_nnz = B->nnz; int ix[B_N]; memset(ix, 0, B_N * sizeof(int)); l = 0; ll = 0; #ifdef USE_OMP_OFFLOAD #pragma omp target update from(B_nnz[:B_N], B_index[:B_N*B_M]) #endif // Cores are first followed by halos for (int j = 0; j < nsize; j++) { ii = nodelist[j]; if (ix[ii] == 0) { ix[ii] = ii + 1; core_halo_index[l] = ii; l++; ll++; } } // Collext halo indeces from graph for (int j = 0; j < nsize; j++) { ii = nodelist[j]; for (int jp = 0; jp < B_nnz[ii]; jp++) { k = B_index[ROWMAJOR(ii, jp, B_N, B_M)]; if (ix[k] == 0) { ix[k] = ii + 1; core_halo_index[l] = k; l++; } } } // Use graph for double jumps if (double_jump_flag == 1) { ls = l; for (int j = 0; j < ls; j++) { ii = core_halo_index[j]; for (int jp = 0; jp < B_nnz[ii]; jp++) { k = B_index[ROWMAJOR(ii, jp, B_N, B_M)]; if (ix[k] == 0) { ix[k] = ii + 1; core_halo_index[l] = k; l++; } } } } vsize[0] = l; vsize[1] = ll; } /** Extract a submatrix from a matrix given a set of core+halo rows. * * \ingroup submatrix_group_C * * \param A Matrix A * \param B Submatrix B * \param core_halo_index Set of row indeces for submatrix * \param llsize Number of indeces */ void TYPED_FUNC( bml_matrix2submatrix_ellpack) ( bml_matrix_ellpack_t * A, bml_matrix_dense_t * B, int *core_halo_index, int lsize) { REAL_T *rvalue; int B_N = B->N; #ifdef BML_USE_MAGMA REAL_T *B_matrix = bml_allocate_memory(sizeof(REAL_T) * B->N * B->N); #else REAL_T *B_matrix = B->matrix; #endif #ifdef USE_OMP_OFFLOAD int A_N = A->N; int A_M = A->M; int *A_nnz = A->nnz; int *A_index = A->index; #pragma omp target update from(A_nnz[:A_N], A_index[:A_N*A_M]) #endif #pragma omp parallel for \ private(rvalue) \ shared(core_halo_index) \ shared(A, B_matrix, B_N) for (int jb = 0; jb < lsize; jb++) { rvalue = TYPED_FUNC(bml_getVector_ellpack) (A, core_halo_index, core_halo_index[jb], lsize); for (int j = 0; j < lsize; j++) { B_matrix[ROWMAJOR(jb, j, B_N, B_N)] = rvalue[j]; } bml_free_memory(rvalue); } #ifdef BML_USE_MAGMA MAGMA(setmatrix) (B_N, B_N, (MAGMA_T *) B_matrix, B_N, B->matrix, B->ld, bml_queue()); bml_free_memory(B_matrix); #endif } /** Assemble submatrix into a full matrix based on core+halo indeces. * * \ingroup submatrix_group_C * * \param A Submatrix A * \param B Matrix B * \param core_halo_index Set of submatrix row indeces * \param lsize Number of indeces * \param llsize Number of core positions */ void TYPED_FUNC( bml_submatrix2matrix_ellpack) ( bml_matrix_dense_t * A, bml_matrix_ellpack_t * B, int *core_halo_index, int lsize, int llsize, double threshold) { int A_N = A->N; #ifdef BML_USE_MAGMA REAL_T *A_matrix = bml_allocate_memory(sizeof(REAL_T) * A->N * A->N); MAGMA(getmatrix) (A->N, A->N, A->matrix, A->ld, (MAGMA_T *) A_matrix, A->N, bml_queue()); #else REAL_T *A_matrix = A->matrix; #endif int B_N = B->N; int B_M = B->M; int *B_nnz = B->nnz; int *B_index = B->index; REAL_T *B_value = B->value; int ii, icol; #ifdef USE_OMP_OFFLOAD #pragma omp target update from(B_nnz[:B_N], B_index[:B_N*B_M], B_value[:B_N*B_M]) #endif #pragma omp parallel for \ private(ii, icol) \ shared(core_halo_index) \ shared(A_N, A_matrix) \ shared(B_N, B_M, B_nnz, B_index, B_value) for (int ja = 0; ja < llsize; ja++) { ii = core_halo_index[ja]; icol = 0; for (int jb = 0; jb < lsize; jb++) { if (ABS(A_matrix[ROWMAJOR(ja, jb, A_N, A_N)]) > threshold) { B_index[ROWMAJOR(ii, icol, B_N, B_M)] = core_halo_index[jb]; B_value[ROWMAJOR(ii, icol, B_N, B_M)] = A_matrix[ROWMAJOR(ja, jb, A_N, A_N)]; icol++; } } if (icol > B_M) { LOG_ERROR("Number of non-zeroes per row >= M, Increase M\n"); } B_nnz[ii] = icol; } #ifdef BML_USE_MAGMA bml_free_memory(A_matrix); #endif #ifdef USE_OMP_OFFLOAD #pragma omp target update to(B_nnz[:B_N], B_index[:B_N*B_M], B_value[:B_N*B_M]) #endif } // Get matching vector of values void *TYPED_FUNC( bml_getVector_ellpack) ( bml_matrix_ellpack_t * A, int *jj, int irow, int colCnt) { REAL_T ZERO = 0.0; int A_N = A->N; int A_M = A->M; int *A_nnz = A->nnz; int *A_index = A->index; REAL_T *A_value = A->value; REAL_T *rvalue = bml_noinit_allocate_memory(colCnt * sizeof(REAL_T)); for (int i = 0; i < colCnt; i++) { for (int j = 0; j < A_nnz[irow]; j++) { if (A_index[ROWMAJOR(irow, j, A_N, A_M)] == jj[i]) { rvalue[i] = A_value[ROWMAJOR(irow, j, A_N, A_M)]; break; } rvalue[i] = ZERO; } } return rvalue; } /** Assemble matrix based on groups of rows from a matrix. * * \ingroup submatrix_group_C * * \param A Matrix A * \param hindex Indeces of nodes * \param ngroups Number of groups * \param threshold Threshold for graph */ bml_matrix_ellpack_t * TYPED_FUNC(bml_group_matrix_ellpack) (bml_matrix_ellpack_t * A, int *hindex, int ngroups, double threshold) { int A_N = A->N; int A_M = A->M; int *A_index = A->index; int *A_nnz = A->nnz; REAL_T *A_value = A->value; #ifdef USE_OMP_OFFLOAD #pragma omp target update from(A_nnz[:A_N], A_index[:A_N*A_M], A_value[:A_N*A_M]) #endif #if !(defined(__IBMC_) || defined(__ibmxl__)) int ix[ngroups]; memset(ix, 0, sizeof(int) * ngroups); #endif int hnode[A_N]; int hend; bml_matrix_dimension_t matrix_dimension = { ngroups, ngroups, ngroups }; bml_matrix_ellpack_t *B = TYPED_FUNC(bml_noinit_matrix_ellpack) (matrix_dimension, A->distribution_mode); int B_N = B->N; int B_M = B->M; int *B_index = B->index; int *B_nnz = B->nnz; REAL_T *B_value = B->value; #pragma omp parallel for \ private(hend) \ shared(hindex, hnode, A_N) for (int i = 0; i < ngroups; i++) { if (i == ngroups - 1) hend = A_N; else hend = hindex[i + 1] - 1; for (int j = hindex[i] - 1; j < hend; j++) { hnode[j] = i; } } #if defined(__IBMC_) || defined(__ibmxl__) #pragma omp parallel for \ private(hend) \ shared(hindex, hnode) \ shared(A_nnz, A_index, A_value, A_N, A_M) \ shared(B_nnz, B_index, B_value, B_N, B_M) #else #pragma omp parallel for \ private(hend) \ shared(hindex, hnode) \ shared(A_nnz, A_index, A_value, A_N, A_M) \ shared(B_nnz, B_index, B_value, B_N, B_M) \ firstprivate(ix) #endif for (int i = 0; i < B_N; i++) { #if defined(__IBMC_) || defined(__ibmxl__) int ix[ngroups]; memset(ix, 0, sizeof(int) * ngroups); #endif ix[i] = i + 1; B_index[ROWMAJOR(i, 0, B_N, B_M)] = i; B_value[ROWMAJOR(i, 0, B_N, B_M)] = 1.0; B_nnz[i] = 1; if (i == B_N - 1) hend = A_N; else hend = hindex[i + 1] - 1; for (int j = hindex[i] - 1; j < hend; j++) { for (int k = 0; k < A_nnz[j]; k++) { int ii = hnode[A_index[ROWMAJOR(j, k, A_N, A_M)]]; if (ix[ii] == 0 && ii != i) { //printf("row = %d col = %d val = %e\n", j, A_index[ROWMAJOR(j, k, A_N, A_M)], A_value[ROWMAJOR(j, k, A_N, A_M)]); if (is_above_threshold(A_value[ROWMAJOR(j, k, A_N, A_M)], threshold)) { ix[ii] = i + 1; B_index[ROWMAJOR(i, B_nnz[i], B_N, B_M)] = ii; B_value[ROWMAJOR(i, B_nnz[i], B_N, B_M)] = 1.0; B_nnz[i]++; } else { int kk = A_index[ROWMAJOR(j, k, A_N, A_M)]; for (int l = 0; l < A_nnz[kk]; l++) { int jj = hnode[A_index[ROWMAJOR(kk, l, A_N, A_M)]]; if (jj == i) { //printf("sym row = %d col = %d val = %e\n", kk, A_index[ROWMAJOR(kk, l, A_N, A_M)], A_value[ROWMAJOR(kk, l, A_N, A_M)]); if (is_above_threshold (A_value[ROWMAJOR(kk, l, A_N, A_M)], threshold)) { ix[ii] = i + 1; B_index[ROWMAJOR(i, B_nnz[i], B_N, B_M)] = ii; B_value[ROWMAJOR(i, B_nnz[i], B_N, B_M)] = 1.0; B_nnz[i]++; break; } } } } } } } } #ifdef USE_OMP_OFFLOAD #pragma omp target update to(B_nnz[:B_N], B_index[:B_N*B_M], B_value[:B_N*B_M]) #endif return B; } /** Extract submatrix into new matrix of same format * * \ingroup submatrix_group_C * * \param A Matrix A to extract submatrix from * \param irow Index of first row to extract * \param icol Index of first column to extract * \param B_N Number of rows/columns to extract * \param B_M Max number of non-zero elemnts/row in exttacted matrix */ bml_matrix_ellpack_t * TYPED_FUNC(bml_extract_submatrix_ellpack) (bml_matrix_ellpack_t * A, int irow, int icol, int B_N, int B_M) { int A_N = A->N; int A_M = A->M; int *A_index = A->index; int *A_nnz = A->nnz; REAL_T *A_value = A->value; bml_matrix_ellpack_t *B; B = TYPED_FUNC(bml_zero_matrix_ellpack) (B_N, B_M, A->distribution_mode); int *B_index = B->index; int *B_nnz = B->nnz; REAL_T *B_value = B->value; // loop over subset of rows of A for (int i = irow; i < irow + B_N; i++) { for (int jp = 0; jp < A_nnz[i]; jp++) { int j = A_index[ROWMAJOR(i, jp, A_N, A_M)]; if (j >= icol && j < icol + B_N) { int iB = i - irow; B_index[ROWMAJOR(i - irow, B_nnz[iB], B_N, B_M)] = j - icol; B_value[ROWMAJOR(i - irow, B_nnz[iB], B_N, B_M)] = A_value[ROWMAJOR(i, jp, A_N, A_M)]; B_nnz[iB]++; } } } return B; } /** Assign a block B into matrix A * * \param A Matrix A * \param B Matrix B * \param irow First row where to insert block B * \param icol Offset column to insert block B */ void TYPED_FUNC( bml_assign_submatrix_ellpack) ( bml_matrix_ellpack_t * A, bml_matrix_ellpack_t * B, int irow, int icol) { int A_N = A->N; int A_M = A->M; int *A_index = A->index; int *A_nnz = A->nnz; REAL_T *A_value = A->value; int B_N = B->N; int B_M = B->M; int *B_index = B->index; int *B_nnz = B->nnz; REAL_T *B_value = B->value; // loop over rows of B for (int i = 0; i < B_N; i++) { for (int jp = 0; jp < B_nnz[i]; jp++) { int jB = B_index[ROWMAJOR(i, jp, B_N, B_M)]; int jpA = A_nnz[i + irow]; A_value[ROWMAJOR(i + irow, jpA, A_N, A_M)] = B_value[ROWMAJOR(i, jp, B_N, B_M)]; A_index[ROWMAJOR(i + irow, jpA, A_N, A_M)] = jB + icol; A_nnz[i + irow]++; } } }
DigraphTemplate.h
/////////////////////////////////////////////////////////////////////////////// // SOFTWARE COPYRIGHT NOTICE AGREEMENT // // This software and its documentation are copyright (2014) by the // // Broad Institute. All rights are reserved. This software is supplied // // without any warranty or guaranteed support whatsoever. The Broad // // Institute is not responsible for its use, misuse, or functionality. // /////////////////////////////////////////////////////////////////////////////// // This file contains some template functions from Digraph.h. They are here in // a separate file so that these functions do not have to be inlined, thereby // allowing for reduction of compilation time and executable size (in principle). // // See Digraph.h for notes about usage of this file. // In particular, do not include this file to resolve link errors. // The digraph-derived template classes are explicitly instantiated in a single // module for each template parameter. This is typically in the .cc file // associated with the .h file that defines the template parameter. So, for // example, all the explicit instantiations of methods of digraphE<KmerPath> // are declared in KmerPath.cc. To resolve link errors, find the right place to // explicitly instantiate the missing method, and add it to the list of explicit // instantiations at the end of that file. #ifndef DIGRAPH_TEMPLATE_H #define DIGRAPH_TEMPLATE_H // MakeDepend: library OMP // MakeDepend: cflags OMP_FLAGS #include "CoreTools.h" #include "Equiv.h" #include "FeudalMimic.h" #include "Set.h" #include "VecUtilities.h" #include "graph/Digraph.h" #include <cstddef> template<class E> vec<int> digraphE<E>:: EdgesBoundedBy( const int e1, const int e2, const vec<int>& to_left, const vec<int>& to_right ) const { int v = to_right[e1], w = to_left[e2]; vec<int> edges, verts; set<int> edgesx, vertsx; edges.push_back( e1, e2 ); edgesx.insert(e1), edgesx.insert(e2); verts.push_back( v, w ); vertsx.insert(v), vertsx.insert(w); for ( int i = 0; i < verts.isize( ); i++ ) { int x = verts[i]; for ( int j = 0; j < To(x).isize( ); j++ ) { int y = To(x)[j]; if ( Member( vertsx, y ) ) continue; int e = EdgeObjectIndexByIndexTo( x, j ); if ( e == e1 || e == e2 ) continue; verts.push_back(y); vertsx.insert(y); if ( Member( edgesx, e ) ) continue; edges.push_back(e); edgesx.insert(e); } for ( int j = 0; j < From(x).isize( ); j++ ) { int y = From(x)[j]; if ( Member( vertsx, y ) ) continue; int e = EdgeObjectIndexByIndexFrom( x, j ); if ( e == e1 || e == e2 ) continue; verts.push_back(y); vertsx.insert(y); if ( Member( edgesx, e ) ) continue; edges.push_back(e); edgesx.insert(e); } } return edges; } template<class E> void digraphE<E>::InitialEdges( vec<int>& v ) const { v.clear( ); for ( int x = 0; x < N( ); x++ ) { if ( To(x).empty( ) ) { for ( int j = 0; j < From(x).isize( ); j++ ) v.push_back( EdgeObjectIndexByIndexFrom( x, j ) ); } } } template<class E> void digraphE<E>::TerminalEdges( vec<int>& v ) const { v.clear( ); for ( int x = 0; x < N( ); x++ ) { if ( From(x).empty( ) ) { for ( int j = 0; j < To(x).isize( ); j++ ) v.push_back( EdgeObjectIndexByIndexTo( x, j ) ); } } } template<class E> digraphE<E>::digraphE( const ConstructorType2 constructor_type, const digraphE& g, const vec<int>& ed, const vec<int>& to_left, const vec<int>& to_right ) { ForceAssertEq( (int) constructor_type, (int) COMPLETE_SUBGRAPH_EDGES ); ForceAssert( ed.UniqueOrdered( ) ); edges_.resize( ed.size( ) ); for ( int i = 0; i < ed.isize( ); i++ ) edges_[i] = g.EdgeObject( ed[i] ); vec<int> verts; for ( int i = 0; i < ed.isize( ); i++ ) verts.push_back( to_left[ ed[i] ], to_right[ ed[i] ] ); UniqueSort(verts); int N = verts.size( ); from_.resize(N), to_.resize(N); from_edge_obj_.resize(N), to_edge_obj_.resize(N); for ( int i = 0; i < ed.isize( ); i++ ) { int e = ed[i]; int v = to_left[e], w = to_right[e]; int iv = BinPosition( verts, v ), iw = BinPosition( verts, w ); from_[iv].push_back(iw); from_edge_obj_[iv].push_back(i); to_[iw].push_back(iv); to_edge_obj_[iw].push_back(i); } } template<class E> void digraphE<E>::Initialize( const ConstructorType1 constructor_type, const digraphE& g, const vec<int>& v ) { ForceAssertEq( (int) constructor_type, (int) COMPLETE_SUBGRAPH ); from_.resize( v.size( ) ), to_.resize( v.size( ) ); from_edge_obj_.resize( v.size( ) ), to_edge_obj_.resize( v.size( ) ); int edgecount = 0; vec<int> vsorted(v), vindex( v.size( ), vec<int>::IDENTITY ); SortSync( vsorted, vindex ); for ( int i = 0; i < v.isize( ); i++ ) { int x = v[i]; for ( int j = 0; j < g.From(x).isize( ); j++ ) { int y = g.From(x)[j]; int p2 = BinPosition( vsorted, y ); if ( p2 < 0 ) continue; int i2 = vindex[p2]; from_[i].push_back(i2); to_[i2].push_back(i); from_edge_obj_[i].push_back(edgecount); to_edge_obj_[i2].push_back(edgecount); ++edgecount; } } edges_.reserve(edgecount); for ( int i = 0; i < v.isize( ); i++ ) { int x = v[i]; for ( int j = 0; j < g.From(x).isize( ); j++ ) { int y = g.From(x)[j]; int p2 = BinPosition( vsorted, y ); if ( p2 < 0 ) continue; int i2 = vindex[p2]; edges_.push_back( g.EdgeObjectByIndexFrom( x, j ) ); } } for ( int i = 0; i < v.isize( ); i++ ) { SortSync( from_[i], from_edge_obj_[i] ); SortSync( to_[i], to_edge_obj_[i] ); } } template<class E> digraphE<E>::digraphE( const ConstructorType1 constructor_type, const digraphE& g, const vec<int>& v ) { Initialize( constructor_type, g, v ); } template<class E> vec<int> digraphE<E>::EdgesConnectedTo( const vec<int>& v ) const { vec<int> G = VerticesConnectedTo(v), e; for ( int x = 0; x < G.isize( ); x++ ) { for ( int j = 0; j < From( G[x] ).isize( ); j++ ) e.push_back( EdgeObjectIndexByIndexFrom( G[x], j ) ); for ( int j = 0; j < To( G[x] ).isize( ); j++ ) e.push_back( EdgeObjectIndexByIndexTo( G[x], j ) ); } UniqueSort(e); return e; } template<class E> digraphE<E> digraphE<E>::Subgraph( const vec<int>& v ) const { digraphE result; result.from_.resize( v.size( ) ); result.to_.resize( v.size( ) ); result.from_edge_obj_.resize( v.size( ) ); result.to_edge_obj_.resize( v.size( ) ); int edgecount = 0; vec<int> vsorted(v), vindex( v.size( ), vec<int>::IDENTITY ); SortSync( vsorted, vindex ); for ( int i = 0; i < v.isize( ); i++ ) { int x = v[i]; for ( int j = 0; j < From(x).isize( ); j++ ) { int y = From(x)[j]; int p2 = BinPosition( vsorted, y ); if ( p2 < 0 ) continue; int i2 = vindex[p2]; result.from_[i].push_back(i2); result.to_[i2].push_back(i); result.from_edge_obj_[i].push_back(edgecount); result.to_edge_obj_[i2].push_back(edgecount); ++edgecount; } } result.edges_.reserve(edgecount); for ( int i = 0; i < v.isize( ); i++ ) { int x = v[i]; for ( int j = 0; j < From(x).isize( ); j++ ) { int y = From(x)[j]; int p2 = BinPosition( vsorted, y ); if ( p2 < 0 ) continue; int i2 = vindex[p2]; result.edges_.push_back( EdgeObjectByIndexFrom( x, j ) ); } } for ( int i = 0; i < v.isize( ); i++ ) { SortSync( result.from_[i], result.from_edge_obj_[i] ); SortSync( result.to_[i], result.to_edge_obj_[i] ); } return result; } template<class E> digraphE<E>::digraphE( const ConstructorName cname, const digraphE& g, const vec< vec<int> >& C ) { ForceAssert( cname == FROM_SUBS ); int nedges = 0; for ( int i = 0; i < C.isize( ); i++ ) nedges += C[i].size( ); edges_.reserve(nedges); vec<int> to_left, to_right; g.ToLeft(to_left), g.ToRight(to_right); for ( int i = 0; i < C.isize( ); i++ ) { for ( int j = 0; j < C[i].isize( ); j++ ) edges_.push_back( g.EdgeObject( C[i][j] ) ); } for ( int pass = 1; pass <= 2; pass++ ) { int nverts = 0, nedges = 0; for ( int i = 0; i < C.isize( ); i++ ) { vec<int> verts; for ( int j = 0; j < C[i].isize( ); j++ ) verts.push_back( to_left[ C[i][j] ], to_right[ C[i][j] ] ); UniqueSort(verts); if ( pass == 2 ) { for ( int j = 0; j < C[i].isize( ); j++ ) { int v = BinPosition( verts, to_left[ C[i][j] ] ); int w = BinPosition( verts, to_right[ C[i][j] ] ); from_[ nverts + v ].push_back( nverts + w ); to_[ nverts + w ].push_back( nverts + v ); from_edge_obj_[ nverts + v ].push_back(nedges + j); to_edge_obj_[ nverts + w ].push_back(nedges + j); } } nverts += verts.size( ); nedges += C[i].size( ); } if ( pass == 1 ) { from_.resize(nverts), to_.resize(nverts); from_edge_obj_.resize(nverts), to_edge_obj_.resize(nverts); } } for ( int v = 0; v < N( ); v++ ) { SortSync( from_[v], from_edge_obj_[v] ); SortSync( to_[v], to_edge_obj_[v] ); } } template<class E> digraphE<E>::digraphE( const digraphE& g, int n ) { equiv_rel e; g.ComponentRelation(e); vec<int> reps, o; e.OrbitRepsAlt(reps); ForceAssertLt( n, reps.isize( ) ); e.Orbit( reps[n], o ); *this = g.Subgraph(o); } template<class E> void digraphE<E>::Initialize( const int n ){ edges_.resize(n); from_.resize(n); to_.resize(n); from_edge_obj_.resize(n); to_edge_obj_.resize(n); } template<class F> void digraphE<F>::EdgeEquivConstructor( const vec<F>& edges, const equiv_rel& e ) { edges_ = edges; int ne = edges.size( ); vec<int> reps; e.OrbitReps(reps); int nv = 2 * reps.isize( ); to_edge_obj_.resize(nv); from_edge_obj_.resize(nv); to_.resize(nv); from_.resize(nv); for ( int i = 0; i < reps.isize( ); i++ ) { vec<int> o; e.Orbit( reps[i], o ); for ( int j = 0; j < o.isize( ); j++ ) { from_[ 2*i ].push_back( 2*i + 1 ); from_edge_obj_[ 2*i ].push_back( o[j] ); to_[ 2*i + 1 ].push_back( 2*i ); to_edge_obj_[ 2*i + 1 ].push_back( o[j] ); } } } template<class F> digraphE<F>::digraphE( const vec<F>& edges, const equiv_rel& e ) { EdgeEquivConstructor( edges, e ); } template<class F> digraphE<F>::digraphE( const vec<F>& edges, const ConstructorBehavior constructor_type ) { edges_ = edges; int ne = edges.size( ); int nv = ( constructor_type == EDGES_SEPARATE ? ne * 2 : ne + 1 ); to_edge_obj_.resize(nv); from_edge_obj_.resize(nv); to_.resize(nv); from_.resize(nv); if ( constructor_type == EDGES_SEPARATE ) { for ( int i = 0; i < ne; i++ ) { from_[ 2*i ].push_back( 2*i + 1 ); from_edge_obj_[ 2*i ].push_back(i); to_[ 2*i + 1 ].push_back( 2*i ); to_edge_obj_[ 2*i + 1 ].push_back(i); } } else if ( constructor_type == EDGES_IN_LINE ) { for ( int i = 0; i <= ne; i++ ) { if ( i < ne ) { from_[i].push_back(i+1); from_edge_obj_[i].push_back(i); } if ( i > 0 ) { to_[i].push_back(i-1); to_edge_obj_[i].push_back(i-1); } } } else ForceAssert( 0 == 1 ); } template<class F> void digraphE<F>::Used( vec<Bool>& used ) const { used.resize_and_set( edges_.size( ), False ); for ( int i = 0; i < N( ); i++ ) { for ( int j = 0; j < to_edge_obj_[i].isize( ); j++ ) used[ to_edge_obj_[i][j] ] = True; } } template<class F> int digraphE<F>::UsedCount( ) const { vec<Bool> used; Used(used); return Sum(used); } template<class F> void digraphE<F>::JoinEdges( int x, const F& e ) { if ( from_[x].size( ) != 1 || to_[x].size( ) != 1 ) { cout << "Problem in JoinEdges.\n"; PRINT(x); cout << "edges in = " << printSeq( ToEdgeObj(x) ) << endl; cout << "edges out = " << printSeq( FromEdgeObj(x) ) << endl; } ForceAssert( from_[x].size( ) == 1 && to_[x].size( ) == 1 ); int v = to_[x][0], w = from_[x][0]; ForceAssert( x != v || x != w ); from_[x].clear( ), from_edge_obj_[x].clear( ); to_[x].clear( ), to_edge_obj_[x].clear( ); for ( int i = 0; i < from_[v].isize( ); i++ ) { if ( from_[v][i] == x ) { from_[v].erase( from_[v].begin( ) + i ); from_edge_obj_[v].erase( from_edge_obj_[v].begin( ) + i ); break; } } for ( int i = 0; i < to_[w].isize( ); i++ ) { if ( to_[w][i] == x ) { to_[w].erase( to_[w].begin( ) + i ); to_edge_obj_[w].erase( to_edge_obj_[w].begin( ) + i ); break; } } AddEdge( v, w, e ); } template<class F> void digraphE<F>::RemoveUnneededVertices( ) { for ( int i = 0; i < N( ); i++ ) { if ( From(i).size( ) == 1 && To(i).size( ) == 1 && From(i)[0] != i ) { F p = EdgeObjectByIndexTo( i, 0 ); p.append( EdgeObjectByIndexFrom( i, 0 ) ); JoinEdges( i, p ); } } RemoveEdgelessVertices( ); } // Input is a set of vertices v. Each v must be located at the opening of // a bubble, with exactly two edges that lead to the same successor w: // _-_ // --> v w --> // -_- template<class E> void digraphE<E>::PopBubbles( const vec<int> & bubble_vs ) { vec<int> bubble_edges; bubble_edges.reserve( bubble_vs.size() ); for ( int i = 0; i < bubble_vs.isize(); i++ ) { int v = bubble_vs[i]; ForceAssertEq( from_[v].size(), 2u ); ForceAssertEq( from_[v][0], from_[v][1] ); // Choose one of the edges that make up this bubble, and delete it. // Arbitrarily, we choose the higher-indexed path. bubble_edges.push_back( from_edge_obj_[v][1] ); } DeleteEdges( bubble_edges ); // Combine edges. For bubbles v->w in which v had only one predecessor // and/or w had only one successor, this will combine the remaining edge in // the bubble with the edge leading to/from the bubble. RemoveUnneededVertices( ); // Clear out edges that have been removed from the graph. RemoveDeadEdgeObjects( ); } // Input is a set of vertices v. Each v must be located at the opening of // a bubble, with two or more edges that lead to the same successor w: // _-_ // - - // --> v ----- w --> // _ _ // -_- template<class E> void digraphE<E>::PopHyperBubbles( const vec<int> & bubble_vs ) { vec<int> bubble_edges; bubble_edges.reserve( bubble_vs.size() ); for ( int i = 0; i < bubble_vs.isize(); i++ ) { int v = bubble_vs[i]; ForceAssertGe( from_[v].size(), 2u ); ForceAssertEq( Min(from_[v]), Max(from_[v]) ); // Choose one of the edges that make up this bubble, and delete it. // Arbitrarily, we choose the higher-indexed path. for ( size_t ib = 1; ib < from_edge_obj_[v].size(); ib++ ) bubble_edges.push_back( from_edge_obj_[v][ib] ); } DeleteEdges( bubble_edges ); // Combine edges. For bubbles v->w in which v had only one predecessor // and/or w had only one successor, this will combine the remaining edge in // the bubble with the edge leading to/from the bubble. RemoveUnneededVertices( ); // Clear out edges that have been removed from the graph. RemoveDeadEdgeObjects( ); } template<class E> void digraphE<E>::RemoveEdgelessVertices( const vec<int>& to_remove ) { vec<Bool> remove( N( ), False ); for ( int i = 0; i < to_remove.isize( ); i++ ) remove[ to_remove[i] ] = True; vec<int> new_vertex_id( N( ), -1 ); int id = 0; for ( int i = 0; i < N( ); i++ ) { if ( remove[i] ) { ForceAssert( from_[i].empty( ) ); ForceAssert( to_[i].empty( ) ); } else { new_vertex_id[i] = id; ++id; } } EraseIf( from_, remove ), EraseIf( from_edge_obj_, remove ); EraseIf( to_, remove ), EraseIf( to_edge_obj_, remove ); for ( int i = 0; i < N( ); i++ ) { for ( int j = 0; j < from_[i].isize( ); j++ ) from_[i][j] = new_vertex_id[ from_[i][j] ]; for ( int j = 0; j < to_[i].isize( ); j++ ) to_[i][j] = new_vertex_id[ to_[i][j] ]; } } template<class E> void digraphE<E>::RemoveEdgelessVertices( ) { vec<int> to_remove; for ( int i = 0; i < N( ); i++ ) if ( from_[i].empty( ) && to_[i].empty( ) ) to_remove.push_back(i); RemoveEdgelessVertices(to_remove); } template<class V> void digraphV<V>::RemoveEdgelessVertices( ) { vec<int> new_vertex_id( N( ), -1 ); int id = 0; vec<Bool> remove( N( ), False ); for ( int i = 0; i < N( ); i++ ) { if ( from_[i].empty( ) && to_[i].empty( ) ) remove[i] = True; else { new_vertex_id[i] = id; ++id; } } EraseIf( from_, remove ), EraseIf( to_, remove ), EraseIf( verts_, remove ); for ( int i = 0; i < N( ); i++ ) { for ( int j = 0; j < from_[i].isize( ); j++ ) from_[i][j] = new_vertex_id[ from_[i][j] ]; for ( int j = 0; j < to_[i].isize( ); j++ ) to_[i][j] = new_vertex_id[ to_[i][j] ]; } } template<class E> void digraphE<E>::Reverse( ) { for ( int i = 0; i < N( ); i++ ) { swap( from_[i], to_[i] ); swap( from_edge_obj_[i], to_edge_obj_[i] ); } } template<class E> void digraphE<E>::ReverseComponent( int x ) { equiv_rel e( N( ) ); for ( int v = 0; v < N( ); v++ ) { for ( int i = 0; i < from_[v].isize( ); i++ ) { int w = from_[v][i]; e.Join( v, w ); } } vec<int> o; e.Orbit( x, o ); for ( int j = 0; j < o.isize( ); j++ ) { int i = o[j]; swap( from_[i], to_[i] ); swap( from_edge_obj_[i], to_edge_obj_[i] ); } } template<class E> void digraphE<E>::ReorderVertices( const vec<int>& new_order ) { ForceAssertEq( new_order.isize( ), N( ) ); vec<int> order_new( N( ) ); for ( int i = 0; i < N( ); i++ ) order_new[ new_order[i] ] = i; PermuteVec( from_, order_new ); PermuteVec( from_edge_obj_, order_new ); PermuteVec( to_, order_new ); PermuteVec( to_edge_obj_, order_new ); for ( int v = 0; v < N( ); v++ ) { for ( int j = 0; j < from_[v].isize( ); j++ ) from_[v][j] = order_new[ from_[v][j] ]; for ( int j = 0; j < to_[v].isize( ); j++ ) to_[v][j] = order_new[ to_[v][j] ]; SortSync( from_[v], from_edge_obj_[v] ); SortSync( to_[v], to_edge_obj_[v] ); } } template<class E> void digraphE<E>::ReorderComponents( const vec<int>& new_order ) { equiv_rel e( N( ) ); for ( int v = 0; v < N( ); v++ ) { for ( int i = 0; i < from_[v].isize( ); i++ ) { int w = from_[v][i]; e.Join( v, w ); } } vec<int> reps; for ( int v = 0; v < N( ); v++ ) if ( e.Representative(v) ) reps.push_back(v); ForceAssertEq( new_order.size( ), reps.size( ) ); vec<int> new_vertex_order; for ( int i = 0; i < reps.isize( ); i++ ) { int v = reps[ new_order[i] ]; vec<int> o; e.Orbit( v, o ); new_vertex_order.append(o); } ReorderVertices(new_vertex_order); } template<class E> void digraphE<E>::ComponentEdges( vec< vec<edge_t> >& edges ) const { vec<vec<int> > vertices; Components( vertices ); int n = vertices.isize( ); edges.resize( 0 ); edges.resize( n ); for ( int i = 0; i < n; i++ ) { for ( int j = 0; j < vertices[i].isize( ); j++ ) edges[i].append( FromEdgeObj( vertices[i][j] ) ); UniqueSort( edges[i] ); } } template<class F> void digraphE<F>::Append( const digraphE<F>& D ) { int nedges = edges_.size( ); edges_.append( D.edges_ ); int nvertices = from_.size( ); from_.append( D.from_ ); to_.append( D.to_ ); from_edge_obj_.append( D.from_edge_obj_ ); to_edge_obj_.append( D.to_edge_obj_ ); for ( int i = nvertices; i < N( ); i++ ) { for ( int j = 0; j < from_[i].isize( ); j++ ) { from_[i][j] += nvertices; from_edge_obj_[i][j] += nedges; } for ( int j = 0; j < to_[i].isize( ); j++ ) { to_[i][j] += nvertices; to_edge_obj_[i][j] += nedges; } } } template<class F> void digraphE<F>::SplitEdge( int v, int j, const F& e1, const F& e2 ) { int n = N( ); int ne = edges_.size( ); edges_.push_back( e1, e2 ); int w = from_[v][j]; int we = from_edge_obj_[v][j]; int i = InputFromOutputTo( v, j ); from_[v].erase( from_[v].begin( ) + j ); from_edge_obj_[v].erase( from_edge_obj_[v].begin( ) + j ); to_[w].erase( to_[w].begin( ) + i ); to_edge_obj_[w].erase( to_edge_obj_[w].begin( ) + i ); from_[v].push_back(n), from_edge_obj_[v].push_back(ne); vec<int> nfrom, nto; vec<int> nfrom_edge_obj, nto_edge_obj; nfrom.push_back(w), nfrom_edge_obj.push_back(ne+1); nto.push_back(v), nto_edge_obj.push_back(ne); from_.push_back(nfrom), to_.push_back(nto); from_edge_obj_.push_back(nfrom_edge_obj); to_edge_obj_.push_back(nto_edge_obj); for ( int u = 0; u < to_[w].isize( ); u++ ) { if ( to_[w][u] == v && we == to_edge_obj_[w][u] ) { to_.erase( to_.begin( ) + u ); to_edge_obj_.erase( to_edge_obj_.begin( ) + u ); break; } } to_[w].push_back(n), to_edge_obj_[w].push_back(ne+1); } template<class F> void digraphE<F>::Glue( const EmbeddedSubPath<F>& a, const EmbeddedSubPath<F>& b, const vec<int>& EE, const vec<int>& FF, const digraphE<F>& c ) { // Sanity check. ForceAssertGe( a.NVertices( ), 2 ); ForceAssertGe( b.NVertices( ), 2 ); ForceAssert( !HasSharedEdge(a, b) ); ForceAssertEq( EE.isize( ), a.NVertices( ) ); ForceAssertEq( FF.isize( ), b.NVertices( ) ); vec<int> Esort = EE, Fsort = FF; Sort(Esort), Sort(Fsort); ForceAssert( Esort.UniqueOrdered( ) ); ForceAssert( Fsort.UniqueOrdered( ) ); ForceAssertEq( EE.front( ), 0 ); ForceAssertEq( EE.back( ), c.N( ) - 1 ); ForceAssertEq( FF.front( ), 0 ); ForceAssertEq( FF.back( ), c.N( ) - 1 ); // Delete edges appearing in a and b. for ( int i = 0; i < a.NVertices( ) - 1; i++ ) { int v = a.Vertex(i), w = a.Vertex(i+1); int e = a.EdgeObjectIndexAbs(i); int ef = EdgeObjectIndexToFromIndex( v, e ); int et = InputFromOutputTo( v, ef ); from_[v].erase( from_[v].begin( ) + ef ); from_edge_obj_[v].erase( from_edge_obj_[v].begin( ) + ef ); to_[w].erase( to_[w].begin( ) + et ); to_edge_obj_[w].erase( to_edge_obj_[w].begin( ) + et ); } for ( int i = 0; i < b.NVertices( ) - 1; i++ ) { int v = b.Vertex(i), w = b.Vertex(i+1); int e = b.EdgeObjectIndexAbs(i); int ef = EdgeObjectIndexToFromIndex( v, e ); int et = InputFromOutputTo( v, ef ); from_[v].erase( from_[v].begin( ) + ef ); from_edge_obj_[v].erase( from_edge_obj_[v].begin( ) + ef ); to_[w].erase( to_[w].begin( ) + et ); to_edge_obj_[w].erase( to_edge_obj_[w].begin( ) + et ); } // Attach c. int nvertices = N( ); Append(c); for ( int i = 0; i < a.NVertices( ); i++ ) TransferEdges( a.Vertex(i), EE[i] + nvertices ); for ( int i = 0; i < b.NVertices( ); i++ ) TransferEdges( b.Vertex(i), FF[i] + nvertices ); // If data implies that some vertices in c should be identified, do so. vec< vec<int> > sources( c.N( ) ); for ( int i = 0; i < a.NVertices( ); i++ ) sources[ EE[i] ].push_back( a.Vertex(i) ); for ( int i = 0; i < b.NVertices( ); i++ ) sources[ FF[i] ].push_back( b.Vertex(i) ); for ( int i = 0; i < c.N( ); i++ ) Sort( sources[i] ); for ( int i1 = 0; i1 < c.N( ); i1++ ) { for ( int i2 = i1 + 1; i2 < c.N( ); i2++ ) { if ( Meet( sources[i1], sources[i2] ) ) TransferEdges( i1 + nvertices, i2 + nvertices ); } } } template<class E> void digraphE<E>::TransferEdges( int v, int w, const Bool enter_only ) { ForceAssert( v != w ); // Change edges v --> v to edges w --> w. if ( !enter_only ) { vec<Bool> remove_from_v; remove_from_v.resize_and_set( from_[v].size( ), False ); for ( int i = 0; i < from_[v].isize( ); i++ ) { if ( from_[v][i] == v ) { from_[w].push_back(w); from_edge_obj_[w].push_back( from_edge_obj_[v][i] ); to_[w].push_back(w); to_edge_obj_[w].push_back( from_edge_obj_[v][i] ); remove_from_v[i] = True; int j = InputFromOutputTo( v, i ); to_[v].erase( to_[v].begin( ) + j ); to_edge_obj_[v].erase( to_edge_obj_[v].begin( ) + j ); } } EraseIf( from_[v], remove_from_v ); EraseIf( from_edge_obj_[v], remove_from_v ); SortSync( from_[w], from_edge_obj_[w] ); SortSync( to_[w], to_edge_obj_[w] ); } // Change edges u --> v to edges u --> w. for ( int i = 0; i < to_[v].isize( ); i++ ) { int u = to_[v][i]; int j = InputToOutputFrom( v, i ); from_[u][j] = w; SortSync( from_[u], from_edge_obj_[u] ); } // Change edges v --> x to edges w --> x. // if ( !enter_only ) { for ( int i = 0; i < from_[v].isize( ); i++ ) { int x = from_[v][i]; int j = InputFromOutputTo( v, i ); if ( !enter_only ) to_[x][j] = w; else to_[x][j] = v; SortSync( to_[x], to_edge_obj_[x] ); } } // Do the rest. if ( !enter_only ) { from_[w].append( from_[v] ); from_edge_obj_[w].append( from_edge_obj_[v] ); } SortSync( from_[w], from_edge_obj_[w] ); to_[w].append( to_[v] ); to_edge_obj_[w].append( to_edge_obj_[v] ); SortSync( to_[w], to_edge_obj_[w] ); to_[v].clear( ), to_edge_obj_[v].clear( ); if ( !enter_only ) { from_[v].clear( ), from_edge_obj_[v].clear( ); } } template<class E> void digraphE<E>::RemoveDuplicateEdges( ) { for ( int v = 0; v < N( ); v++ ) { vec<Bool> remove; remove.resize_and_set( from_[v].size( ), False ); for ( int j = 0; j < from_[v].isize( ); j++ ) { int k; for ( k = j + 1; k < from_[v].isize( ); k++ ) if ( from_[v][k] != from_[v][j] ) break; for ( int u1 = j; u1 < k; u1++ ) { if ( remove[u1] ) continue; for ( int u2 = u1 + 1; u2 < k; u2++ ) { if ( edges_[ from_edge_obj_[v][u1] ] == edges_[ from_edge_obj_[v][u2] ] ) { remove[u2] = True; } } } j = k - 1; } for ( int i = 0; i < remove.isize( ); i++ ) { if ( remove[i] ) { int w = from_[v][i]; int j = InputFromOutputTo( v, i ); to_[w].erase( to_[w].begin( ) + j ); to_edge_obj_[w].erase( to_edge_obj_[w].begin( ) + j ); } } EraseIf( from_[v], remove ); EraseIf( from_edge_obj_[v], remove ); } } template<class E> void digraphE<E>::DeleteEdgesAtVertex( int v ) { for ( int i = 0; i < from_[v].isize( ); i++ ) { int w = from_[v][i]; int j = InputFromOutputTo( v, i ); if ( v == w ) continue; to_[w].erase( to_[w].begin( ) + j ); to_edge_obj_[w].erase( to_edge_obj_[w].begin( ) + j ); } for ( int i = 0; i < to_[v].isize( ); i++ ) { int w = to_[v][i]; int j = InputToOutputFrom( v, i ); if ( v == w ) continue; from_[w].erase( from_[w].begin( ) + j ); from_edge_obj_[w].erase( from_edge_obj_[w].begin( ) + j ); } from_[v].clear( ), from_edge_obj_[v].clear( ); to_[v].clear( ), to_edge_obj_[v].clear( ); } template<class E> vec<int> digraphE<E>::RemoveDeadEdgeObjects( ) { vec<Bool> used; Used(used); int count = 0; vec<int> to_new_id( edges_.size( ), -1 ); for ( int i = 0; i < edges_.isize( ); i++ ) { if ( used[i] ) { if ( count != i ) edges_[count] = edges_[i]; to_new_id[i] = count; ++count; } } edges_.resize(count); for ( int v = 0; v < N( ); v++ ) { for ( int i = 0; i < from_[v].isize( ); i++ ) from_edge_obj_[v][i] = to_new_id[ from_edge_obj_[v][i] ]; for ( int i = 0; i < to_[v].isize( ); i++ ) to_edge_obj_[v][i] = to_new_id[ to_edge_obj_[v][i] ]; } return to_new_id; } template<class E> Bool digraphE<E>::TestValid( const Bool exit ) const { if ( !digraph(*this).TestValid( ) ) return False; if ( from_edge_obj_.size( ) != to_edge_obj_.size( ) ) DIGRAPH_INVALID( "sizes of from_edge_obj_ and to_edge_obj_ are different", exit ); if ( from_.size( ) != from_edge_obj_.size( ) ) DIGRAPH_INVALID( "sizes of from_ and from_edge_obj_ are different", exit ); for ( int v = 0; v < N( ); v++ ) { if ( from_[v].size( ) != from_edge_obj_[v].size( ) ) { DIGRAPH_INVALID( "sizes of from_[" << v << "] and " << "from_edge_obj_[" << v << "] are different", exit ); } } for ( int v = 0; v < N( ); v++ ) { if ( to_[v].size( ) != to_edge_obj_[v].size( ) ) { DIGRAPH_INVALID( "sizes of to_[" << v << "] and " << "to_edge_obj_[" << v << "] are different", exit ); } } for ( int v = 0; v < N( ); v++ ) { for ( int j = 0; j < from_[v].isize( ); j++ ) { int w = from_[v][j]; int ei = from_edge_obj_[v][j]; if ( ei < 0 || ei >= EdgeObjectCount( ) ) DIGRAPH_INVALID( "Illegal from_edge_obj value.", exit ); Bool found = False; for ( int r = 0; r < to_[w].isize( ); r++ ) if ( to_[w][r] == v && to_edge_obj_[w][r] == ei ) found = True; if ( !found ) { DIGRAPH_INVALID( "There is an edge from " << v << " to " << w << " in from_[" << v << "], but not in to_[" << w << "].", exit ); } } } for ( int v = 0; v < N( ); v++ ) { for ( int j = 0; j < to_[v].isize( ); j++ ) { int w = to_[v][j]; int ei = to_edge_obj_[v][j]; if ( ei < 0 || ei >= EdgeObjectCount( ) ) DIGRAPH_INVALID( "Illegal to_edge_obj value.", exit ); Bool found = False; for ( int r = 0; r < from_[w].isize( ); r++ ) { if ( from_[w][r] == v && from_edge_obj_[w][r] == ei ) found = True; } if ( !found ) { DIGRAPH_INVALID( "There is an edge from " << v << " to " << w << " in to_[" << v << "], but not in from_[" << w << "].", exit ); } } } return True; } template<class F> void digraphE<F>::Initialize( const vec< vec<int> >& from, const vec< vec<int> >& to, const vec<F>& edges, const vec< vec<int> >& to_edge_obj, const vec< vec<int> >& from_edge_obj, const Bool allow_unused_edges ) { digraph::Initialize( from, to ); edges_ = edges; to_edge_obj_ = to_edge_obj; from_edge_obj_ = from_edge_obj; int N = from.size( ); ForceAssertEq( N, to_edge_obj.isize( ) ); ForceAssertEq( N, from_edge_obj.isize( ) ); vec<int> used( edges.size( ), 0 ); for ( int i = 0; i < N; i++ ) { ForceAssertEq( to_edge_obj[i].size( ), to[i].size( ) ); ForceAssertEq( from_edge_obj[i].size( ), from[i].size( ) ); for ( int j = 0; j < to_edge_obj[i].isize( ); j++ ) { int o = to_edge_obj[i][j]; ForceAssertGe( o, 0 ); ForceAssertLt( o, edges.isize( ) ); ++used[o]; int w = i, v = to_[i][j]; int wf = BinPosition( from[v], w ); // The following assert won't do what we want if there are multiple // edges between two given vertices (in which case wf doesn't // make sense). // ForceAssertEq( o, from_edge_obj[v][wf] ); } } for ( int i = 0; i < used.isize( ); i++ ) { if ( used[i] > 1 || ( !allow_unused_edges && used[i] == 0 ) ) { cout << "Edge " << i << " is used " << used[i] << " times, whereas it should be used exactly once.\n"; } if (allow_unused_edges) ForceAssertLe( used[i], 1 ); else ForceAssertEq( used[i], 1 ); } } template<class F> digraphE<F>::digraphE( const vec< vec<int> >& from, const vec< vec<int> >& to, const vec<F>& edges, const vec< vec<int> >& to_edge_obj, const vec< vec<int> >& from_edge_obj, const Bool allow_unused_edges ) : digraph(from, to) // redundant with initialize? { Initialize( from, to, edges, to_edge_obj, from_edge_obj, allow_unused_edges ); } template<class V> void digraphV<V>::Initialize( const vec< vec<int> >& from, const vec< vec<int> >& to, const vec<V>& verts ) { digraph::Initialize( from, to ); verts_ = verts; ForceAssertEq( N( ), verts.isize( ) ); } template<class V> digraphV<V>::digraphV( const vec< vec<int> >& from, const vec< vec<int> >& to, const vec<V>& verts ) : digraph(from, to) // redundant with initialize? { Initialize( from, to, verts ); } template<class V, class E> void digraphVE<V,E>::Initialize( const vec< vec<int> >& from, const vec< vec<int> >& to, const vec<V>& verts, const vec<E>& edges, const vec< vec<int> >& to_edge_obj, const vec< vec<int> >& from_edge_obj ) { digraphE<E>::Initialize( from, to, edges, to_edge_obj, from_edge_obj ); verts_ = verts; ForceAssertEq( from.size( ), verts.size( ) ); } template<class V, class E> digraphVE<V,E>::digraphVE( const vec< vec<int> >& from, const vec< vec<int> >& to, const vec<V>& verts, const vec<E>& edges, const vec< vec<int> >& to_edge_obj, const vec< vec<int> >& from_edge_obj ) : digraphE<E>( from, to, edges, to_edge_obj, from_edge_obj ) // redundant?? { Initialize( from, to, verts, edges, to_edge_obj, from_edge_obj ); } template<class V, class E> digraphVE<V,E>::digraphVE( const digraphE<E>& G, const vec<V>& verts ) : digraphE<E>(G) { verts_ = verts; ForceAssertEq( G.N( ), verts.isize( ) ); } template<class E> Bool digraphE<E>::IsComplete( const vec<int>& vertices, const vec<int>& edges ) const { ForceAssert( vertices.UniqueOrdered( ) ); ForceAssert( edges.UniqueOrdered( ) ); for ( int u = 0; u < vertices.isize( ); u++ ) { int v = vertices[u]; for ( int j = 0; j < From(v).isize( ); j++ ) { int w = From(v)[j]; if ( !BinMember( vertices, w ) ) return False; int e = EdgeObjectIndexByIndexFrom( v, j ); if ( !BinMember( edges, e ) ) return False; } for ( int j = 0; j < To(v).isize( ); j++ ) { int w = To(v)[j]; if ( !BinMember( vertices, w ) ) return False; int e = EdgeObjectIndexByIndexTo( v, j ); if ( !BinMember( edges, e ) ) return False; } } return True; } template<class E> void digraphE<E>::DualComponentRelation( equiv_rel& e, const vec<Bool>& exclude ) const { e.Initialize( EdgeObjectCount( ) ); for ( int v = 0; v < N( ); v++ ) { for ( int j1 = 0; j1 < To(v).isize( ); j1++ ) { int e1 = EdgeObjectIndexByIndexTo( v, j1 ); if ( exclude.nonempty( ) && exclude[e1] ) continue; for ( int j2 = 0; j2 < From(v).isize( ); j2++ ) { int e2 = EdgeObjectIndexByIndexFrom( v, j2 ); if ( exclude.nonempty( ) && exclude[e2] ) continue; e.Join(e1, e2); } } } } template<class E> void digraphE<E>::Initialize( const digraphE& g, const equiv_rel& e ) { edges_ = g.edges_; vec<int> reps; e.OrbitRepsAlt(reps); int nreps = reps.size( ); vec<int> to_reps( g.N( ) ); for ( int i = 0; i < nreps; i++ ) { vec<int> o; e.Orbit( reps[i], o ); for ( int j = 0; j < o.isize( ); j++ ) to_reps[ o[j] ] = i; } from_.resize(nreps), to_.resize(nreps); from_edge_obj_.resize(nreps), to_edge_obj_.resize(nreps); int nedges = g.EdgeObjectCount( ); vec<int> to_left_vertex(nedges, -1), to_right_vertex(nedges, -1); for ( int w = 0; w < g.N( ); w++ ) { for ( int j = 0; j < g.To(w).isize( ); j++ ) { int m = g.EdgeObjectIndexByIndexTo( w, j ); int v = g.To(w)[j]; to_left_vertex[m] = v, to_right_vertex[m] = w; } } for ( int m = 0; m < nedges; m++ ) { if ( to_left_vertex[m] < 0 || to_right_vertex[m] < 0 ) continue; int v = to_reps[ to_left_vertex[m] ]; int w = to_reps[ to_right_vertex[m] ]; from_[v].push_back(w), to_[w].push_back(v); from_edge_obj_[v].push_back(m), to_edge_obj_[w].push_back(m); } for ( int v = 0; v < N( ); v++ ) { SortSync( from_[v], from_edge_obj_[v] ); SortSync( to_[v], to_edge_obj_[v] ); } } template<class E> digraphE<E>::digraphE( const digraphE& g, const equiv_rel& e ) : edges_( g.edges_ ) { Initialize( g, e ); } template<class E> digraphE<E>::digraphE( const vec<digraphE>& g ) { Initialize(g); } template<class E> void digraphE<E>::Initialize( const vec<digraphE>& g ) { for ( int i = 0; i < g.isize( ); i++ ) Append( g[i] ); } template<class F> void digraphE<F>::Initialize( const vec<digraphE>& g, const vec< pair< pair<int,int>, pair<int,int> > >& joins ) { digraphE<F> G(g); equiv_rel e( G.N( ) ); vec<int> start( g.isize( ) ); start[0] = 0; for ( int i = 1; i < g.isize( ); i++ ) start[i] = start[i-1] + g[i-1].N( ); for ( int i = 0; i < joins.isize( ); i++ ) { int v = start[ joins[i].first.first ] + joins[i].first.second; int w = start[ joins[i].second.first ] + joins[i].second.second; e.Join( v, w ); } Initialize( G, e ); } template<class F> digraphE<F>::digraphE( const vec<digraphE>& g, const vec< pair< pair<int,int>, pair<int,int> > >& joins ) { Initialize( g, joins ); } template<class F> void digraphE<F>::Initialize( const digraph& g, const vec<F>& edges ){ int nedges = g.N(); ForceAssertEq( nedges, edges.isize() ); equiv_rel e( 2*nedges ); for ( int v = 0; v < nedges; v++ ){ for ( size_t j = 0; j < g.From(v).size( ); j++ ){ int w = g.From(v)[j]; e.Join( 2*v + 1, 2*w ); } } vec<int> reps; e.OrbitRepsAlt(reps); int N = reps.size( ); vec< vec<int> > from(N), to(N); vec< vec<int> > from_edge_obj(N), to_edge_obj(N); for ( int i = 0; i < edges.isize( ); i++ ){ int x = BinPosition( reps, e.ClassId( 2*i ) ); int y = BinPosition( reps, e.ClassId( 2*i + 1 ) ); from[x].push_back(y), to[y].push_back(x); from_edge_obj[x].push_back(i), to_edge_obj[y].push_back(i); } for ( int i = 0; i < N; i++ ){ SortSync( from[i], from_edge_obj[i] ); SortSync( to[i], to_edge_obj[i] ); } Initialize( from, to, edges, to_edge_obj, from_edge_obj ); } template<class F> digraphE<F>::digraphE( const digraph& g, const vec<F>& edges ){ Initialize( g, edges ); } template<class E> digraphE<E>::digraphE( const digraph& g ){ vec<int> edges( g.N(), vec<int>::IDENTITY ); Initialize( g, edges ); } template<class F> Bool digraphE<F>::ThisClose( int v, int w, F d ) const { if ( d < 0 ) return False; if ( v == w ) return True; set< pair<int,F> > unprocessed, processed; unprocessed.insert( make_pair( v, 0 ) ); while( !unprocessed.empty( ) ) { int x = unprocessed.begin( )->first; F dx = unprocessed.begin( )->second; typename set< pair<int,F> >::iterator u = processed.lower_bound( make_pair( x, 0 ) ); unprocessed.erase( unprocessed.begin( ) ); if ( u != processed.end( ) && u->first == x ) { if ( u->second <= dx ) continue; processed.erase(u); } processed.insert( make_pair( x, dx ) ); for ( int j = 0; j < From(x).isize( ); j++ ) { int y = From(x)[j]; F dy = dx + EdgeObjectByIndexFrom( x, j ); if ( dy > d ) continue; if ( y == w ) return True; typename set< pair<int,F> >::iterator p = processed.lower_bound( make_pair( y, 0 ) ); if ( p != processed.end( ) && p->first == y ) { if ( p->second <= dy ) continue; processed.erase(p); } typename set< pair<int,F> >::iterator u = unprocessed.lower_bound( make_pair( y, 0 ) ); if ( u != unprocessed.end( ) && u->first == y ) { if ( u->second <= dy ) continue; unprocessed.erase(u); } unprocessed.insert( make_pair( y, dy ) ); } } return False; } template<class E> void digraphE<E>::ToLeft( vec<int>& to_left ) const { to_left.resize( EdgeObjectCount( ) ); for ( int i = 0; i < N( ); i++ ) { for ( int j = 0; j < From(i).isize( ); j++ ) { int e = EdgeObjectIndexByIndexFrom( i, j ); to_left[e] = i; } } } template<class E> void digraphE<E>::ToRight( vec<int>& to_right ) const { to_right.resize( EdgeObjectCount( ) ); for ( int i = 0; i < N( ); i++ ) { for ( int j = 0; j < To(i).isize( ); j++ ) { int e = EdgeObjectIndexByIndexTo( i, j ); to_right[e] = i; } } } template<class F> void digraphE<F>::GetSuccessors( const vec<int>& v, vec< pair<int,F> >& from_v ) { set< pair<int,F> > check, fromv; for ( int i = 0; i < v.isize( ); i++ ) check.insert( make_pair( v[i], 0 ) ); while( !check.empty( ) ) { int x = check.begin( )->first; F dx = check.begin( )->second; typename set< pair<int,F> >::iterator u = fromv.lower_bound( make_pair( x, 0 ) ); check.erase( check.begin( ) ); if ( u != fromv.end( ) && u->first == x ) { if ( u->second <= x ) continue; fromv.erase(u); } fromv.insert( make_pair( x, dx ) ); for ( int i = 0; i < From(x).isize( ); i++ ) { int y = From(x)[i]; F dy = dx + EdgeObjectByIndexFrom( x, i ); typename set< pair<int,F> >::iterator a = check.lower_bound( make_pair( y, 0 ) ); if ( a != check.end( ) && a->first == y ) { if ( a->second <= dy ) continue; check.erase(a); } typename set< pair<int,F> >::iterator b = fromv.lower_bound( make_pair( y, 0 ) ); if ( b != fromv.end( ) && b->first == y ) { if ( b->second <= dy ) continue; fromv.erase(b); } check.insert( make_pair( y, dy ) ); } } from_v.clear( ); for ( typename set< pair<int,F> >::iterator i = fromv.begin( ); i != fromv.end( ); ++i ) { from_v.push_back(*i); } } template<class F> void PrintEdge( const int v, const int w, const int ei, const vec<double>& lengths, const vec<Bool>* dashed, const vec<String>* edge_color, const int tiny_top, const typename digraphE<F>::edge_label_info eli, ostream& out ) { float wd = 0.1; // this value not used String color, label; Bool bold = False; Bool is_dashed = False; if ( dashed != NULL ) is_dashed = (*dashed)[ei]; double len = lengths[ei]; if ( len < tiny_top ) { color = "gray"; if ( v == w ) label = ToString( len, 0 ); wd = 1.0; } else if ( len >= tiny_top && len < 1000.0 ) { color = "black"; wd = 2.0; } else if ( len >= 1000.0 && len < 10000.0 ) { color = "red"; wd = 4.0; label = ToString( len/1000.0, 1 ) + " kb"; } else { color = "magenta"; bold = True; wd = 8.0; label = ToString( len/1000.0, 0 ) + " kb"; } if ( edge_color != NULL && (*edge_color)[ei] != "" ) color = (*edge_color)[ei]; out << v << " -> " << w << " [minlen=" << wd << ",color=" << color; if ( color == "brown" ) out << ",penwidth=4"; if (is_dashed) out << ",style=dashed"; else if (bold) out << ",style=bold"; if ( eli.edge_id_names != NULL ) { if ( label == "" ) label = (*eli.edge_id_names)[ei]; else { label = (*eli.edge_id_names)[ei] + " (" + label + ")"; } } else if ( eli.label_edges ) { if ( label == "" ) label = ( eli.edge_labels_base_alpha ? BaseAlpha(ei) : ToString(ei) ); else { label = ( eli.edge_labels_base_alpha ? BaseAlpha(ei) : ToString(ei) ) + " (" + label + ")"; } } if ( eli.label_edges_extra ) label += " " + (*eli.label_edges_extra)[ei]; if ( label != "" ) out << ",label=\"" << label << "\""; } template<class F> void PrintEdge2( const int v, const int w, const int ei, const vec<double>& lengths, const vec<Bool>* dashed, const vec<String>* edge_color, const int tiny_top, const typename digraphEX<F>::edge_label_info eli, ostream& out ) { float wd = 0.1; // this value not used String color, label; Bool bold = False; Bool is_dashed = False; if ( dashed != NULL ) is_dashed = (*dashed)[ei]; double len = lengths[ei]; if ( len < tiny_top ) { color = "gray"; if ( v == w ) label = ToString( len, 0 ); wd = 1.0; } else if ( len >= tiny_top && len < 1000.0 ) { color = "black"; wd = 2.0; } else if ( len >= 1000.0 && len < 10000.0 ) { color = "red"; wd = 4.0; label = ToString( len/1000.0, 1 ) + " kb"; } else { color = "magenta"; bold = True; wd = 8.0; label = ToString( len/1000.0, 0 ) + " kb"; } if ( edge_color != NULL && (*edge_color)[ei] != "" ) color = (*edge_color)[ei]; out << v << " -> " << w << " [minlen=" << wd << ",color=" << color; if ( color == "brown" ) out << ",penwidth=4"; if (is_dashed) out << ",style=dashed"; else if (bold) out << ",style=bold"; if ( eli.edge_id_names != NULL ) { if ( label == "" ) label = (*eli.edge_id_names)[ei]; else { label = (*eli.edge_id_names)[ei] + " (" + label + ")"; } } else if ( eli.label_edges ) { if ( label == "" ) label = ( eli.edge_labels_base_alpha ? BaseAlpha(ei) : ToString(ei) ); else { label = ( eli.edge_labels_base_alpha ? BaseAlpha(ei) : ToString(ei) ) + " (" + label + ")"; } } if ( eli.label_edges_extra ) label += " " + (*eli.label_edges_extra)[ei]; if ( label != "" ) out << ",label=\"" << label << "\""; } template<class E> void FindLeftMostVertex( const digraphE<E>& G, const vec<double>& lengths, const vec<int>& o, const vec<Bool>* invisible, int& leftv ) { // Restrict attention to visible vertices. vec<int> oo; for ( int i1 = 0; i1 < o.isize( ); i1++ ) { int v = o[i1]; if ( invisible != NULL ) { Bool owned = False; for ( int j = 0; j < G.From(v).isize( ); j++ ) { int e = G.EdgeObjectIndexByIndexFrom( v, j ); if ( !(*invisible)[e] ) owned = True; } for ( int j = 0; j < G.To(v).isize( ); j++ ) { int e = G.EdgeObjectIndexByIndexTo( v, j ); if ( !(*invisible)[e] ) owned = True; } if ( !owned ) continue; } oo.push_back(v); } Sort(oo); vec<float> pos( oo.size( ) ); vec<Bool> placed( oo.size( ), False ); pos[0] = 0.0, placed[0] = True; // Note that the following block of code is very slow on large components. // Performance may be OK now. while( Sum(placed) < oo.isize( ) ) { Bool progress = False; for ( int i1 = 0; i1 < oo.isize( ); i1++ ) { int v = oo[i1]; for ( int j = 0; j < G.From(v).isize( ); j++ ) { int w = G.From(v)[j]; int i2 = BinPosition( oo, w ); if ( i2 < 0 ) continue; if ( !( placed[i1] ^ placed[i2] ) ) continue; progress = True; edge_t e = G.EdgeObjectIndexByIndexFrom( v, j ); if ( placed[i1] ) pos[i2] = pos[i1] + lengths[e]; else pos[i1] = pos[i2] - lengths[e]; placed[i1] = placed[i2] = True; } } if ( !progress ) break; } float left = Min(pos); int leftj = 0; for ( leftj = 0; leftj < pos.isize( ); leftj++ ) if ( pos[leftj] == left ) break; leftv = oo[leftj]; } template<class E> void FindLeftMostVertex( const digraphEX<E>& G, const vec<double>& lengths, const vec<int>& o, const vec<Bool>* invisible, int& leftv ) { // Restrict attention to visible vertices. vec<int> oo; for ( int i1 = 0; i1 < o.isize( ); i1++ ) { int v = o[i1]; if ( invisible != NULL ) { Bool owned = False; for ( int j = 0; j < (int) G.From(v).size( ); j++ ) { int e = G.IFrom( v, j ); if ( !(*invisible)[e] ) owned = True; } for ( int j = 0; j < (int) G.To(v).size( ); j++ ) { int e = G.ITo( v, j ); if ( !(*invisible)[e] ) owned = True; } if ( !owned ) continue; } oo.push_back(v); } Sort(oo); vec<float> pos( oo.size( ) ); vec<Bool> placed( oo.size( ), False ); pos[0] = 0.0, placed[0] = True; // Note that the following block of code is very slow on large components. // Performance may be OK now. while( Sum(placed) < oo.isize( ) ) { Bool progress = False; for ( int i1 = 0; i1 < oo.isize( ); i1++ ) { int v = oo[i1]; for ( int j = 0; j < (int) G.From(v).size( ); j++ ) { int w = G.From(v)[j]; int i2 = BinPosition( oo, w ); if ( i2 < 0 ) continue; if ( !( placed[i1] ^ placed[i2] ) ) continue; progress = True; edge_t e = G.IFrom( v, j ); if ( placed[i1] ) pos[i2] = pos[i1] + lengths[e]; else pos[i1] = pos[i2] - lengths[e]; placed[i1] = placed[i2] = True; } } if ( !progress ) break; } float left = Min(pos); int leftj = 0; for ( leftj = 0; leftj < pos.isize( ); leftj++ ) if ( pos[leftj] == left ) break; leftv = oo[leftj]; } template<class E> void LabelTransitionVertices( const digraphE<E>& G, const int v, const vec<Bool>* invisible, ostream& out ) { int vis_count = 0, invis_count = 0; for ( int j = 0; j < G.From(v).isize( ); j++ ) { int ei = G.EdgeObjectIndexByIndexFrom( v, j ); if ( (*invisible)[ei] ) invis_count++; else vis_count++; } for ( int j = 0; j < G.To(v).isize( ); j++ ) { int ei = G.EdgeObjectIndexByIndexTo( v, j ); if ( (*invisible)[ei] ) invis_count++; else vis_count++; } if ( vis_count > 0 && invis_count > 0 ) out << v << " [color=red];\n"; } template<class E> void LabelTransitionVertices( const digraphEX<E>& G, const int v, const vec<Bool>* invisible, ostream& out ) { int vis_count = 0, invis_count = 0; for ( int j = 0; j < (int) G.From(v).size( ); j++ ) { int ei = G.IFrom( v, j ); if ( (*invisible)[ei] ) invis_count++; else vis_count++; } for ( int j = 0; j < (int) G.To(v).size( ); j++ ) { int ei = G.ITo( v, j ); if ( (*invisible)[ei] ) invis_count++; else vis_count++; } if ( vis_count > 0 && invis_count > 0 ) out << v << " [color=red];\n"; } template<class E> void CreateContigLabels( const vec<vec<int>>& components, const vec<String>* label_contigs_extra, vec<String>& contig_labels0, vec<String>& contig_labels ) { vec<int> label_distance; if ( label_contigs_extra ) contig_labels0 = *label_contigs_extra; else { contig_labels0.resize( components.size( ) ); for (size_t ii=0; ii<components.size( ); ii++) contig_labels0[ii] = "contig " + ToString( ii ); } label_distance.resize( contig_labels0.size( ), 0 ); for (int ii=0; ii<(int)contig_labels0.size( ); ii++) label_distance[ii] = 1 + (int)( contig_labels0[ii].size( ) / 2 ); for ( int i = 0; i < contig_labels0.isize( ); i++ ) { contig_labels.push_back( ",taillabel=\"" + contig_labels0[i] + "\",labelangle=180," + "weight=10000," + "labeldistance=" + ToString(label_distance[i]) + ",labelfontsize=18," + "labelfontname=\"Times-Bold\"" ); } } template<class E> void DotHeader( const Bool label_contigs, const Bool label_vertices, const String layout, const double fontsize, const double scale, ostream& out ) { out << "digraph G {\n\n"; if (label_vertices) { out << "node [width=" << scale * 0.1 << ",height=" << scale * 0.1 << ",fontsize=12,shape=plaintext];\n"; } else { out << "node [width=" << scale * 0.1 << ",height=" << scale * 0.1 << ",fontsize=10,shape=point];\n"; } out << "edge [fontsize=" << fontsize << ",penwidth=" << scale * 1.0 << ",arrowsize=" << scale * 1.0 << ",fontname=Arial];\n"; if (label_contigs) out << "margin=1.0;\n"; out << "rankdir=LR;\n"; out << "labeljust=l;\n"; out << "margin=0;\n"; if ( layout != "" ) out << "layout=" << layout << ";\n"; } template<class F> void digraphE<F>::PrettyDOT( ostream& out, const vec<double>& lengths, const edge_label_info eli, Bool label_contigs, Bool label_vertices, const vec<int>* componentsToPrint, const vec<String> *label_contigs_extra, const vec<int> *verticesToPrint, const vec<Bool>* dashed, const vec<Bool>* invisible, const vec<String>* edge_color, const vec<int>* pen_widths, const String layout, const double tiny_top, const double fontsize, const double scale ) const { // Define components and those that are selected. vec< vec<int> > components; if ( invisible == NULL ) Components(components); else { vec<int> to_left, to_right; ToLeft(to_left), ToRight(to_right); vec<Bool> invis( N( ), True ); for ( int e = 0; e < EdgeObjectCount( ); e++ ) { if ( !(*invisible)[e] ) invis[ to_left[e] ] = invis[ to_right[e] ] = False; } Components( components, &invis ); } vec<int> select; if (componentsToPrint) select = *componentsToPrint; else { select.reserve( components.size( ) ); for (int ii=0; ii<(int)components.size( ); ii++) select.push_back( ii ); } // Set up output and contig labels. DotHeader<F>( label_contigs, label_vertices, layout, fontsize, scale, out ); vec<String> contig_labels0, contig_labels; if (label_contigs) { CreateContigLabels<F>( components, label_contigs_extra, contig_labels0, contig_labels ); } // Define vertices to skip. vec<bool> skip_vtx; if (verticesToPrint) { skip_vtx.resize( this->N( ), true ); for (size_t ii=0; ii<verticesToPrint->size( ); ii++) skip_vtx[ (*verticesToPrint)[ii] ] = false; } // Print the contigs. We put each contig in its own cluster (the // subgraph's name MUST start with "cluster" for this to have any effect). for ( int sel_id = select.isize( ) - 1; sel_id >= 0; sel_id-- ) { int i = select[sel_id]; vec<int> &o = components[i]; if ( invisible != NULL ) { int vis_count = 0; for ( int vi = 0; vi < o.isize( ); vi++ ) { int v = o[vi]; if ( verticesToPrint && skip_vtx[v] ) continue; for ( int j = 0; j < From(v).isize( ); j++ ) { int ei = EdgeObjectIndexByIndexFrom( v, j ); if ( !(*invisible)[ei] ) vis_count++; } } if ( vis_count == 0 ) continue; } out << "\nsubgraph cluster" << i << " {\n"; out << "color=white;\n"; if ( label_contigs && label_contigs_extra ) { out << "label=\"" << contig_labels0[i] << "\"," << "fontsize=18," << "fontname=\"Times-Bold\"\n"; } // Find "leftmost" vertex in graph. Sort(o); int leftv; FindLeftMostVertex( *this, lengths, o, invisible, leftv ); // Print component. for ( int vi = 0; vi < o.isize( ); vi++ ) { int v = o[vi]; if ( verticesToPrint && skip_vtx[v] ) continue; if (label_vertices) { out << v << " [label=" << "\"" << v << "\"" << ",fontcolor=black];\n"; } // If some edges touching a vertex are invisible, and some are // visible, make vertex red. Note incompatibility with // label_vertices. else if ( invisible != NULL ) { LabelTransitionVertices( *this, v, invisible, out ); } for ( int j = 0; j < From(v).isize( ); j++ ) { int ei = EdgeObjectIndexByIndexFrom( v, j ); if ( invisible != NULL && (*invisible)[ei] ) continue; int w = From(v)[j]; PrintEdge<F>( v, w, ei, lengths, dashed, edge_color, tiny_top, eli, out ); if ( label_contigs && v == leftv && j == 0 && !label_contigs_extra ) { out << contig_labels[i]; } if ( pen_widths != NULL && (*pen_widths)[ei] > 0 ) out << ",penwidth=" << (*pen_widths)[ei]; out << "];\n"; } } out << "}\n"; } out << "\n}" << endl; out << "#done" << endl; } template<class F> void digraphEX<F>::PrettyDOT( ostream& out, const vec<double>& lengths, const edge_label_info eli, Bool label_contigs, Bool label_vertices, const vec<int>* componentsToPrint, const vec<String> *label_contigs_extra, const vec<int> *verticesToPrint, const vec<Bool>* dashed, const vec<Bool>* invisible, const vec<String>* edge_color, const vec<int>* pen_widths, const String layout, const double tiny_top, const double fontsize, const double scale ) const { // Define components and those that are selected. vec< vec<int> > components; if ( invisible == NULL ) Components(components); else { vec<Bool> invis( N( ), True ); for ( int e = 0; e < E( ); e++ ) { if ( !(*invisible)[e] ) invis[ ToLeft(e) ] = invis[ ToRight(e) ] = False; } Components( components, &invis ); } vec<int> select; if (componentsToPrint) select = *componentsToPrint; else { select.reserve( components.size( ) ); for (int ii=0; ii<(int)components.size( ); ii++) select.push_back( ii ); } // Set up output and contig labels. DotHeader<F>( label_contigs, label_vertices, layout, fontsize, scale, out ); vec<String> contig_labels0, contig_labels; if (label_contigs) { CreateContigLabels<F>( components, label_contigs_extra, contig_labels0, contig_labels ); } // Define vertices to skip. vec<bool> skip_vtx; if (verticesToPrint) { skip_vtx.resize( this->N( ), true ); for (size_t ii=0; ii<verticesToPrint->size( ); ii++) skip_vtx[ (*verticesToPrint)[ii] ] = false; } // Print the contigs. We put each contig in its own cluster (the // subgraph's name MUST start with "cluster" for this to have any effect). for ( int sel_id = select.isize( ) - 1; sel_id >= 0; sel_id-- ) { int i = select[sel_id]; vec<int> &o = components[i]; if ( invisible != NULL ) { int vis_count = 0; for ( int vi = 0; vi < o.isize( ); vi++ ) { int v = o[vi]; if ( verticesToPrint && skip_vtx[v] ) continue; for ( int j = 0; j < (int) From(v).size( ); j++ ) { int ei = IFrom( v, j ); if ( !(*invisible)[ei] ) vis_count++; } } if ( vis_count == 0 ) continue; } out << "\nsubgraph cluster" << i << " {\n"; out << "color=white;\n"; if ( label_contigs && label_contigs_extra ) { out << "label=\"" << contig_labels0[i] << "\"," << "fontsize=18," << "fontname=\"Times-Bold\"\n"; } // Find "leftmost" vertex in graph. Sort(o); int leftv; FindLeftMostVertex( *this, lengths, o, invisible, leftv ); // Print component. for ( int vi = 0; vi < o.isize( ); vi++ ) { int v = o[vi]; if ( verticesToPrint && skip_vtx[v] ) continue; if (label_vertices) { out << v << " [label=" << "\"" << v << "\"" << ",fontcolor=black];\n"; } // If some edges touching a vertex are invisible, and some are // visible, make vertex red. Note incompatibility with // label_vertices. else if ( invisible != NULL ) { LabelTransitionVertices( *this, v, invisible, out ); } for ( int j = 0; j < (int) From(v).size( ); j++ ) { int ei = IFrom( v, j ); if ( invisible != NULL && (*invisible)[ei] ) continue; int w = From(v)[j]; PrintEdge2<F>( v, w, ei, lengths, dashed, edge_color, tiny_top, eli, out ); if ( label_contigs && v == leftv && j == 0 && !label_contigs_extra ) { out << contig_labels[i]; } if ( pen_widths != NULL && (*pen_widths)[ei] > 0 ) out << ",penwidth=" << (*pen_widths)[ei]; out << "];\n"; } } out << "}\n"; } out << "\n}" << endl; out << "#done" << endl; } // Method: DumpGraphML // Output the digraph structure in a textual format that can be easily // read without reference to our code base. template<class E> void digraphE<E>::DumpGraphML( const String& graphMLFileName ) const { vec< vec< String > > edgeLabels( N() ); for ( int v = 0; v < N( ); v++ ) { for ( int j = 0; j < From(v).isize( ); j++ ) { int w = From(v)[j]; edgeLabels[ v ].push_back( BaseAlpha( EdgeObjectIndexByIndexFrom( v, j ) ) ); } } Ofstream( grml, graphMLFileName ); WriteGraphML( grml, edgeLabels ); } template<class E> void digraphE<E>::ComponentsE( vec< vec<int> >& comp ) const { comp.clear( ); equiv_rel e( N( ) ); for ( int v = 0; v < N( ); v++ ) { for ( int j = 0; j < From(v).isize( ); j++ ) e.Join( v, From(v)[j] ); } for ( int x = 0; x < N( ); x++ ) { if ( e.Representative(x) ) { vec<int> o; e.Orbit( x, o ); Sort(o); vec<int> C; for ( int i = 0; i < o.isize( ); i++ ) { int v = o[i]; for ( int j = 0; j < From(v).isize( ); j++ ) C.push_back( EdgeObjectIndexByIndexFrom( v, j ) ); } comp.push_back(C); } } } template<class E> void LongestPath( const digraphE<E>& G, int (E::*len)( ) const, vec<int>& a_longest_path ) { vec<int> D; const int infinity = 2000000000; DistancesToEnd( G, len, infinity, True, D ); int M = 0, v = 0; for ( int x = 0; x < G.N( ); x++ ) if ( D[x] > M ) { M = D[x], v = x; } a_longest_path.clear( ); while( G.From(v).nonempty( ) ) { for ( int j = 0; j < G.From(v).isize( ); j++ ) { int w = G.From(v)[j]; if ( D[w] == D[v] - ((G.EdgeObjectByIndexFrom( v, j )).*len)( ) ) { a_longest_path.push_back( G.EdgeObjectIndexByIndexFrom( v, j ) ); v = w; break; } } } } template<class E> void DistancesToEndArr( const digraphE<E>& G, vec<int> const& edgeLens, const int max_dist, const Bool fw, vec<int>& D ) { // Let D(v) be the maximum length of a path starting at v, to be computed. // Define initial values for D(v) to be 'undefined', except for sinks, // which are zero. D.resize_and_set( G.N( ), -1 ); for ( int v = 0; v < G.N( ); v++ ) { if ( fw && G.Sink(v) ) D[v] = 0; if ( !fw && G.Source(v) ) D[v] = 0; } // Initialize vertices to process. vec<Bool> to_process( G.N( ), False ); vec<int> to_processx; for ( int v = 0; v < G.N( ); v++ ) { if ( (fw && G.Sink(v)) || (!fw && G.Source(v)) ) { to_process[v] = True, to_processx.push_back(v); } } // Now compute D. Uncomputed values are set to 'infinity'. while( to_processx.nonempty( ) ) { int v = to_processx.back( ); to_processx.pop_back( ); to_process[v] = False; for ( int j = 0; j < (fw ? G.To(v) : G.From(v) ).isize( ); j++ ) { int w = ( fw ? G.To(v) : G.From(v) )[j]; if ( D[w] >= max_dist ) continue; int edgeId = ( fw ? G.EdgeObjectIndexByIndexTo(v, j) : G.EdgeObjectIndexByIndexFrom(v, j) ); int Dw_new = edgeLens[edgeId] + D[v]; if ( Dw_new > D[w] ) { D[w] = Dw_new; if ( !to_process[w] ) { to_process[w] = True; to_processx.push_back(w); } } } } for ( int v = 0; v < G.N( ); v++ ) if ( D[v] < 0 ) D[v] = max_dist; } template<class E> void RemoveHangingEnds( digraphE<E>& G, int (E::*len)( ) const, const int max_del, const double min_ratio ) { // Track hanging ends. vec<Bool> hanging( G.EdgeObjectCount( ), False ); // Define the maximum length that we care about. const int max_dist = int( ceil( double(max_del) * min_ratio ) ); // Go through two passes (forward and reverse). for ( int pass = 1; pass <= 2; pass++ ) { // Compute distances to end. vec<int> D; DistancesToEnd( G, len, max_dist, pass == 1, D ); // Identify hanging ends. for ( int v = 0; v < G.N( ); v++ ) { const vec<int>& V = ( pass == 1 ? G.From(v) : G.To(v) ); vec<int> d( V.size( ) ); vec<int> id( V.size( ), vec<int>::IDENTITY ); for ( int j = 0; j < V.isize( ); j++ ) { d[j] = ((pass == 1 ? G.EdgeObjectByIndexFrom(v,j) : G.EdgeObjectByIndexTo(v,j)) .*len)( ) + D[ V[j] ]; } ReverseSortSync( d, id ); for ( int j = 1; j < d.isize( ); j++ ) { if ( d[j] <= max_del && d[0] >= d[j] * min_ratio ) { hanging[ ( pass == 1 ? G.EdgeObjectIndexByIndexFrom( v, id[j] ) : G.EdgeObjectIndexByIndexTo( v, id[j] ) ) ] = True; } } } } // Remove hanging ends. vec<int> to_delete; for ( int i = 0; i < G.EdgeObjectCount( ); i++ ) if ( hanging[i] ) to_delete.push_back(i); G.DeleteEdges(to_delete); } // Remove short hanging ends. Look for // // x // | // e // | // u --c--> v --d--> w // // where x is a source or sink, e is short (and can go either way), whereas // c and d are long. Works for T = HyperKmerPath and T = HyperFastavector. template<class T> void RemoveHangingEnds2( T& h,const int max_del, const double min_ratio ) { for ( int x = 0; x < h.N( ); x++ ) { // Check that basic assumptions are satisfied, including length(e) <= 5kb. int v, c, d, e; if ( h.Source(x) && h.From(x).size( ) == 1 ) { v = h.From(x)[0]; e = h.EdgeObjectIndexByIndexFrom( x, 0 ); } else if ( h.Sink(x) && h.To(x).size( ) == 1 ) { v = h.To(x)[0]; e = h.EdgeObjectIndexByIndexTo( x, 0 ); } else continue; if ( h.EdgeLengthKmers(e) > max_del ) continue; if ( h.Source(x) ) { if ( !( h.From(v).size( ) == 1 && h.To(v).size( ) == 2 ) ) continue; d = h.EdgeObjectIndexByIndexFrom( v, 0 ); c = h.EdgeObjectIndexByIndexTo( v, 0 ); if ( c == e ) c = h.EdgeObjectIndexByIndexTo( v, 1 ); } else { if ( !( h.From(v).size( ) == 2 && h.To(v).size( ) == 1 ) ) continue; c = h.EdgeObjectIndexByIndexTo( v, 0 ); d = h.EdgeObjectIndexByIndexFrom( v, 0 ); if ( d == e ) d = h.EdgeObjectIndexByIndexFrom( v, 1 ); } // We require that there is an edge "competing with e", that is at least // 20 times longer. static vec<int> v_only(1), to_v, from_v; v_only[0] = v; int max_competitor = 0; if ( h.Source(x) ) { h.digraph::GetPredecessors( v_only, to_v ); for ( int j = 0; j < to_v.isize( ); j++ ) { int z = to_v[j]; for ( int i = 0; i < h.To(z).isize( ); i++ ) { int e = h.EdgeObjectIndexByIndexTo( z, i ); max_competitor = Max( max_competitor, h.EdgeLengthKmers(e) ); } } } else { h.digraph::GetSuccessors( v_only, from_v ); for ( int j = 0; j < from_v.isize( ); j++ ) { int z = from_v[j]; for ( int i = 0; i < h.From(z).isize( ); i++ ) { int e = h.EdgeObjectIndexByIndexFrom( z, i ); max_competitor = Max( max_competitor, h.EdgeLengthKmers(e) ); } } } if ( min_ratio * h.EdgeLengthKmers(e) > max_competitor ) continue; // Edit the graph. if ( h.Source(x) ) h.DeleteEdgeFrom( x, 0 ); else h.DeleteEdgeTo( x, 0 ); } } // Find the indices of all edges e that form self-loops, i.e., e goes from v -> v. template<class E> vec<int> digraphE<E>::SelfLoops( ) const { vec<int> to_left, to_right; ToLeft( to_left ); ToRight( to_right ); vec<Bool> used; Used( used ); vec<int> self_loops; for ( int i = 0; i < EdgeObjectCount(); i++ ) if ( to_left[i] == to_right[i] && used[i] ) self_loops.push_back( i ); return self_loops; } template<class E> void digraphE<E>::LoopSubgraph( vec<int>& loop_edges ) const { loop_edges.clear( ); vec< vec<int> > SCC; StronglyConnectedComponents(SCC); for ( int i = 0; i < SCC.isize( ); i++ ) { const vec<int>& V = SCC[i]; for ( int r = 0; r < V.isize( ); r++ ) { int v = V[r]; for ( int j = 0; j < From(v).isize( ); j++ ) { if ( BinMember( V, From(v)[j] ) ) { loop_edges.push_back( EdgeObjectIndexByIndexFrom( v, j ) ); } } } } Sort(loop_edges); } template<class E> void digraphE<E>::SplayVertex( const int v ) { int n = N( ); AddVertices( To(v).size( ) ); for ( int j = To(v).isize( ) - 1; j >= 0; j-- ) GiveEdgeNewToVx( EdgeObjectIndexByIndexTo( v, j ), v, n + j ); n = N( ); AddVertices( From(v).size( ) ); for ( int j = From(v).isize( ) - 1; j >= 0; j-- ) { GiveEdgeNewFromVx( EdgeObjectIndexByIndexFrom( v, j ), v, n + j ); } } template<class E> void digraphE<E>::LiberateEdge( const int e, const int v, const int w ) { int j = EdgeObjectIndexToFromIndex( v, e ); DeleteEdgeFrom( v, j ); SplayVertex(v), SplayVertex(w); } template<class E> void digraphE<E>::GiveEdgeNewFromVx ( int edge_id, int old_from_v, int new_from_v ) { int i = Position( from_edge_obj_[old_from_v], edge_id ); ForceAssert( i != -1 ); int w = from_[old_from_v][i]; int j = Position( to_edge_obj_[w],edge_id ); ForceAssert( j != -1 ); to_[w][j] = new_from_v; from_[old_from_v].erase( from_[old_from_v].begin() + i ); from_edge_obj_[old_from_v].erase( from_edge_obj_[old_from_v].begin() + i ); from_[new_from_v].push_back(w); from_edge_obj_[new_from_v].push_back(edge_id); SortSync( to_[w], to_edge_obj_[w] ); SortSync( from_[new_from_v], from_edge_obj_[new_from_v] ); } template<class E> void digraphE<E>::GiveEdgeNewToVx ( int edge_id, int old_to_w, int new_to_w ) { int j = Position( to_edge_obj_[old_to_w], edge_id ); ForceAssert( j != -1 ); int v = to_[old_to_w][j]; int i = Position( from_edge_obj_[v],edge_id ); ForceAssert( i != -1 ); from_[v][i] = new_to_w; to_[old_to_w].erase( to_[old_to_w].begin() + j ); to_edge_obj_[old_to_w].erase( to_edge_obj_[old_to_w].begin() + j ); to_[new_to_w].push_back(v); to_edge_obj_[new_to_w].push_back(edge_id); SortSync( from_[v], from_edge_obj_[v] ); SortSync( to_[new_to_w], to_edge_obj_[new_to_w] ); } template<class F> int digraphE<F>::AddEdge( int v, int w, const F& e ) { int n = EdgeObjectCount( ); edges_.push_back(e); int i = upper_bound( from_[v].begin(), from_[v].end(), w ) - from_[v].begin(); from_[v].insert( from_[v].begin()+i, w ); from_edge_obj_[v].insert( from_edge_obj_[v].begin()+i, n ); int j = upper_bound( to_[w].begin(), to_[w].end(), v ) - to_[w].begin(); to_[w].insert( to_[w].begin()+j, v ); to_edge_obj_[w].insert( to_edge_obj_[w].begin()+j, n ); return n; } template<class E> Bool digraphE<E>::EdgePaths( const vec<int>& left, const vec<int>& right, const int v, const int w, vec< vec<int> >& paths, const int max_copies, const int max_paths, const int max_iterations ) const { // Pretest to determine if the computation will explode. This only works if // max_copies is not set. if ( max_copies < 0 && ( max_paths >= 0 || max_iterations >= 0 ) ) { vec<int> subs; int path_count = 0; for ( int i = 0; i < From(v).isize( ); i++ ) { int e = EdgeObjectIndexByIndexFrom( v, i ); subs.push_back(e); } int iterations = 0; while( subs.nonempty( ) ) { if ( max_iterations > 0 && ++iterations > max_iterations ) return False; int p = subs.back( ); subs.pop_back( ); int x = right[p]; if ( x == w ) { if ( max_paths >= 0 && ++path_count > max_paths ) return False; } else { for ( int j = 0; j < From(x).isize( ); j++ ) { int e = EdgeObjectIndexByIndexFrom( x, j ); subs.push_back(e); } } } } // Now do the computation for real. vec< vec<int> > subs; paths.clear( ); for ( int i = 0; i < From(v).isize( ); i++ ) { int e = EdgeObjectIndexByIndexFrom( v, i ); vec<int> one; one.push_back(e); subs.push_back(one); } int iterations = 0; while( subs.nonempty( ) ) { if ( max_iterations > 0 && ++iterations > max_iterations ) return False; vec<int> p = subs.back( ); subs.resize( subs.isize( ) - 1 ); int x = right[ p.back( ) ]; if ( x == w ) { paths.push_back(p); if ( max_paths >= 0 && paths.isize( ) > max_paths ) return False; } else { for ( int j = 0; j < From(x).isize( ); j++ ) { int e = EdgeObjectIndexByIndexFrom( x, j ); vec<int> pp(p); pp.push_back(e); if ( max_copies >= 0 ) { vec<int> pps(pp); Sort(pps); Bool fail = False; for ( int r = 0; r < pps.isize( ); r++ ) { int s = pps.NextDiff(r); if ( s - r > max_copies ) { fail = True; break; } r = s - 1; } if (fail) continue; } subs.push_back(pp); } } } return True; } template<class E> Bool digraphE<E>::EdgePaths( const int v, const int w, vec< vec<int> >& paths, const int max_copies, const int max_paths, const int max_iterations ) const { vec<int> left, right; ToLeft(left), ToRight(right); return EdgePaths( left, right, v, w, paths, max_copies, max_paths, max_iterations ); } template<class E> void digraphE<E>::DeleteEdgeTo( int w, int j ) { int v = to_[w][j]; int i = InputToOutputFrom( w, j ); to_[w].erase( to_[w].begin( ) + j ); to_edge_obj_[w].erase( to_edge_obj_[w].begin( ) + j ); from_[v].erase( from_[v].begin( ) + i ); from_edge_obj_[v].erase( from_edge_obj_[v].begin( ) + i ); } template<class E> void digraphE<E>::DeleteEdgeFrom( int v, int j ) { int w = from_[v][j]; int i = InputFromOutputTo( v, j ); from_[v].erase( from_[v].begin( ) + j ); from_edge_obj_[v].erase( from_edge_obj_[v].begin( ) + j ); to_[w].erase( to_[w].begin( ) + i ); to_edge_obj_[w].erase( to_edge_obj_[w].begin( ) + i ); } template<class E> void digraphE<E>::DeleteEdgesTo( int w, const vec<int>& js ) { for ( int l = js.isize( ) - 1; l >= 0; l-- ) DeleteEdgeTo( w, js[l] ); } template<class E> void digraphE<E>::DeleteEdgesFrom( int v, const vec<int>& js ) { for ( int l = js.isize( ) - 1; l >= 0; l-- ) DeleteEdgeFrom( v, js[l] ); } template<class E> vec<int> digraphE<E>::EdgesBetween( const int v, const int w ) const { vec<int> b; for ( int i = 0; i < From(v).isize( ); i++ ) { if ( From(v)[i] == w ) b.push_back( EdgeObjectIndexByIndexFrom( v, i ) ); } return b; } template<class E> vec<int> digraphE<E>::EdgesBetween( const vec<int>& v ) const { vec<int> b; for ( int j = 0; j < v.isize( ); j++ ) { for ( int i = 0; i < From(v[j]).isize( ); i++ ) { if ( BinMember( v, From(v[j])[i] ) ) b.push_back( EdgeObjectIndexByIndexFrom( v[j], i ) ); } } Sort(b); return b; } template<class F> vec<F> digraphE<F>::EdgeObjectsBetween( const int v, const int w ) const { vec<F> b; for ( int i = 0; i < From(v).isize( ); i++ ) { if ( From(v)[i] == w ) b.push_back( EdgeObjectByIndexFrom( v, i ) ); } return b; } template<class E> int digraphE<E>::InputToOutputFrom( int w, int i ) const { int v = to_[w][i]; int ei = to_edge_obj_[w][i]; for ( int j = 0; j < from_[v].isize( ); j++ ) if ( from_edge_obj_[v][j] == ei ) return j; ForceAssert( 0 == 1 ); return -1; } template<class E> int digraphE<E>::InputFromOutputTo( int w, int i ) const { int v = from_[w][i]; int ei = from_edge_obj_[w][i]; for ( int j = 0; j < to_[v].isize( ); j++ ) if ( to_edge_obj_[v][j] == ei ) return j; ForceAssert( 0 == 1 ); return -1; } template<class F> void digraphE<F>::ChangeEdgeObjectFrom( int v, int i, const F& e ) { int ne = edges_.size( ); edges_.push_back(e); int w = From(v)[i]; int j = InputFromOutputTo( v, i ); from_edge_obj_[v][i] = ne; to_edge_obj_[w][j] = ne; } template<class F> F digraphE<F>::MinEdge( int v, int w ) { F m = 0; Bool first = True; for ( int j = 0; j < From(v).isize( ); j++ ) { if ( From(v)[j] != w ) continue; if (first) m = EdgeObjectByIndexFrom( v, j ); else m = Min( m, EdgeObjectByIndexFrom( v, j ) ); first = False; } ForceAssert( !first ); return m; } template<class F> F digraphE<F>::MaxEdge( int v, int w ) { F M = 0; Bool first = True; for ( int j = 0; j < From(v).isize( ); j++ ) { if ( From(v)[j] != w ) continue; if (first) M = EdgeObjectByIndexFrom( v, j ); else M = Max( M, EdgeObjectByIndexFrom( v, j ) ); first = False; } ForceAssert( !first ); return M; } template<class F> void digraphE<F>::AddVertices( int nadd ) { int nvert = N( ); from_.resize( nvert + nadd ); to_.resize( nvert + nadd ); from_edge_obj_.resize( nvert + nadd ); to_edge_obj_.resize( nvert + nadd ); } template<class F> void digraphE<F>::DeleteEdges( const vec<int>& to_delete ) { vec<int> to_delete_local; if ( !to_delete.UniqueOrdered( ) ) { to_delete_local = to_delete; UniqueSort(to_delete_local); } const vec<int>& tod = ( to_delete_local.nonempty( ) ? to_delete_local: to_delete ); for ( int v = 0; v < N( ); v++ ) { for ( int j = From(v).isize( ) - 1; j >= 0; j-- ) { int e = EdgeObjectIndexByIndexFrom( v, j ); if ( BinMember( tod, e ) ) DeleteEdgeFrom( v, j ); } } } template<class F> void digraphE<F>::DeleteEdgesParallel( const vec<Bool>& to_delete ) { ForceAssertEq(to_delete.isize(), EdgeObjectCount()); #pragma omp parallel for for ( int v = 0; v < N( ); v++ ) { for ( int j = From(v).isize( ) - 1; j >= 0; j-- ) { int e = from_edge_obj_[v][j]; if (to_delete[e]) { from_[v].erase( from_[v].begin( ) + j ); from_edge_obj_[v].erase( from_edge_obj_[v].begin( ) + j ); } } for ( int j = To(v).isize( ) - 1; j >= 0; j-- ) { int e = to_edge_obj_[v][j]; if (to_delete[e]) { to_[v].erase( to_[v].begin( ) + j ); to_edge_obj_[v].erase( to_edge_obj_[v].begin( ) + j ); } } } } template<class F> void digraphE<F>::DeleteEdges( const vec<int>& to_delete, const vec<int>& to_left ) { vec<int> to_delete_local; if ( !to_delete.UniqueOrdered( ) ) { to_delete_local = to_delete; UniqueSort(to_delete_local); } const vec<int>& tod = ( to_delete_local.nonempty( ) ? to_delete_local: to_delete ); vec<int> vs; for ( int i = 0; i < to_delete.isize( ); i++ ) vs.push_back( to_left[ to_delete[i] ] ); UniqueSort(vs); for ( int i = 0; i < vs.isize( ); i++ ) { int v = vs[i]; for ( int j = From(v).isize( ) - 1; j >= 0; j-- ) { int e = EdgeObjectIndexByIndexFrom( v, j ); if ( BinMember( tod, e ) ) DeleteEdgeFrom( v, j ); } } } template<class F> int digraphE<F>::EdgeObjectIndexByIndexTo( int v, int j ) const { CheckGoodVertex(v); AssertGe( j, 0 ); AssertLt( j, to_edge_obj_[v].isize( ) ); return to_edge_obj_[v][j]; } template<class F> int digraphE<F>::EdgeObjectIndexToFromIndex( int v, int e ) const { AssertGe( v, 0 ); AssertLt( v, from_edge_obj_.isize( ) ); for ( int i = 0; i < from_edge_obj_[v].isize( ); i++ ) if ( from_edge_obj_[v][i] == e ) return i; return -1; } template<class F> int digraphE<F>::EdgeObjectIndexToToIndex( int v, int e ) const { AssertGe( v, 0 ); AssertLt( v, to_edge_obj_.isize( ) ); for ( int i = 0; i < to_edge_obj_[v].isize( ); i++ ) if ( to_edge_obj_[v][i] == e ) return i; return -1; } template<class F> bool operator!=( const digraphE<F>& g1, const digraphE<F>& g2 ) { return !(g1==g2); } template<class F> bool EqualExceptEdgeObjectOrder( const digraphE<F>& g1, const digraphE<F>& g2 ) { if ( static_cast<digraph const&>(g1) != static_cast<digraph const&>(g2) ) return false; // digraphs are the same, now check edge objects typedef vec<int> V; typedef V::const_iterator VI; typedef vec<V> VV; typedef VV::const_iterator VVI; VV const& vv1 = g1.FromEdgeObj(); VV const& vv2 = g2.FromEdgeObj(); if ( vv1.size() != vv2.size() ) return false; VVI oE(vv1.end()); for ( VVI o1(vv1.begin()), o2(vv2.begin()); o1 != oE; ++o1, ++o2 ) { if ( o1->size() != o2->size() ) return false; VI iE(o1->end()); for ( VI i1(o1->begin()), i2(o2->begin()); i1 != iE; ++i1, ++i2 ) if ( !(g1.EdgeObject(*i1) == g2.EdgeObject(*i2)) ) return false; } return true; } template<class E> bool operator==( const digraphE<E>& g1, const digraphE<E>& g2 ) { if ( !EqualExceptEdgeObjectOrder( g1, g2 ) ) return false; return g1.Edges( ) == g2.Edges( ); } template<class E> void Compare( ostream& out, const digraphE<E>& g1, const digraphE<E>& g2 ) { if ( g1.N( ) != g2.N( ) ) cout << "first graph has " << g1.N( ) << " vertices but " << "second graph has " << g2.N( ) << "\n"; if ( g1.From( ) != g2.From( ) ) cout << "from_ not the same\n"; if ( g1.To( ) != g2.To( ) ) cout << "to_ not the same\n"; if ( g1.Edges( ) != g2.Edges( ) ) cout << "edges_ not the same\n"; if ( g1.ToEdgeObj( ) != g2.ToEdgeObj( ) ) cout << "to_edge_obj_ not the same\n"; if ( g1.FromEdgeObj( ) != g2.FromEdgeObj( ) ) cout << "from_edge_obj_ not the same\n"; if ( g1 != g2 ) cout << "DIGRAPHS ARE NOT EQUAL\n"; return; } template<class E> void digraphE<E>::Clear( ) { from_.clear( ), to_.clear( ); from_edge_obj_.clear( ), to_edge_obj_.clear( ); edges_.clear( ); } template<class E> const E& digraphE<E>::EdgeObject( int i ) const { AssertGe( i, 0 ); AssertLt( i, edges_.isize( ) ); return edges_[i]; } template<class E> E& digraphE<E>::EdgeObjectMutable( int i ) { AssertGe( i, 0 ); AssertLt( i, edges_.isize( ) ); return edges_[i]; } template<class V> const V& digraphV<V>::Vert( int v ) const { AssertGe( v, 0 ); AssertLt( v, N( ) ); return verts_[v]; } template<class V> V& digraphV<V>::VertMutable( int v ) { AssertGe( v, 0 ); AssertLt( v, N( ) ); return verts_[v]; } template<class V, class E> const V& digraphVE<V,E>::Vert( int v ) const { AssertGe( v, 0 ); AssertLt( v, N( ) ); return verts_[v]; } template<class V, class E> V& digraphVE<V,E>::VertMutable( int v ) { AssertGe( v, 0 ); AssertLt( v, N( ) ); return verts_[v]; } template<class V> void digraphV<V>::DeleteVertex( const int v ) { int n = N( ); AssertGe( v, 0 ); AssertLt( v, n ); DeleteEdgesAtVertex(v); verts_.erase( verts_.begin( ) + v ); from_.erase( from_.begin( ) + v ); to_.erase( to_.begin( ) + v ); for ( int x = 0; x < n - 1; x++ ) { for ( int j = 0; j < From(x).isize( ); j++ ) if ( From(x)[j] >= v ) FromMutable(x)[j]--; for ( int j = 0; j < To(x).isize( ); j++ ) if ( To(x)[j] >= v ) ToMutable(x)[j]--; } } template<class V> void digraphV<V>::DeleteVertices( const vec<int>& v ) { for ( int m = v.isize( ) - 1; m >= 0; m-- ) DeleteVertex( v[m] ); } template<class V> int digraphV<V>::AddVertex( const V& v ) { verts_.push_back(v); from_.resize( from_.size( ) + 1 ); to_.resize( to_.size( ) + 1 ); return verts_.size() - 1; } template<class V, class E> void digraphVE<V,E>::AddVertex( const V& v ) { verts_.push_back(v); this->FromMutable( ).resize( this->From( ).size( ) + 1 ); this->ToMutable( ).resize( this->To( ).size( ) + 1 ); this->FromEdgeObjMutable( ).resize( this->FromEdgeObj( ).size( ) + 1 ); this->ToEdgeObjMutable( ).resize( this->ToEdgeObj( ).size( ) + 1 ); } template<class V, class E> void digraphVE<V,E>::RemoveVertices( const vec<int>& to_remove ) { vec<Bool> to_delete(verts_.size(),False ); for( auto entry: to_remove){ to_delete[entry]=True; digraphE<E>::DeleteEdgesAtVertex(entry); } digraphE<E>::RemoveEdgelessVertices(to_remove); EraseIf(verts_,to_delete); } template<class E> vec<int> digraphE<E>::EdgesSomewhereBetween( const int v, const int w ) const { vec<int> answer, after_v, before_w, both; GetSuccessors1( v, after_v ), GetPredecessors1( w, before_w ); Intersection( after_v, before_w, both ); for ( int l = 0; l < both.isize( ); l++ ) { int s = both[l]; for ( int j = 0; j < From(s).isize( ); j++ ) { int t = From(s)[j]; if ( BinMember( both, t ) ) answer.append( EdgesBetween( s, t ) ); } } UniqueSort(answer); return answer; } template<class E> void digraphE<E>::writeBinary( BinaryWriter& writer ) const { digraph::writeBinary(writer); writer.write(from_edge_obj_); writer.write(to_edge_obj_); writer.write(edges_); } template<class E> void digraphE<E>::readBinary( BinaryReader& reader ) { digraph::readBinary(reader); reader.read(&from_edge_obj_); reader.read(&to_edge_obj_); reader.read(&edges_); } template<class F> void digraphEX<F>::writeBinary( BinaryWriter& writer ) const { digraphX::writeBinary(writer); writer.write(from_edge_obj_); writer.write(to_edge_obj_); writer.write(edges_); writer.write(to_left_); writer.write(to_right_); } template<class F> void digraphEX<F>::readBinary( BinaryReader& reader ) { digraphX::readBinary(reader); reader.read(&from_edge_obj_); reader.read(&to_edge_obj_); reader.read(&edges_); reader.read(&to_left_); reader.read(&to_right_); } template<class V> void digraphV<V>::writeBinary( BinaryWriter& writer ) const { digraph::writeBinary(writer); writer.write(verts_); } template<class V> void digraphV<V>::readBinary( BinaryReader& reader ) { digraph::readBinary(reader); reader.read( &verts_ ); } template<class V, class E> void digraphVE<V,E>::writeBinary( BinaryWriter& writer ) const { digraphE<E>::writeBinary(writer); writer.write(verts_); } template<class V, class E> void digraphVE<V,E>::readBinary( BinaryReader& reader ) { digraphE<E>::readBinary(reader); reader.read( &verts_ ); } template<class E> void EmbeddedSubPath<E>::TestValid( ) const { ForceAssertEq( e_.isize( ), a_.isize( ) - 1 ); for ( int u = 0; u < a_.isize( ) - 1; u++ ) { const vec<int>& fr = D_->From( a_[u] ); ForceAssertGe( e_[u], 0 ); ForceAssertLt( e_[u], fr.isize( ) ); ForceAssertEq( fr[ e_[u] ], a_[u+1] ); ForceAssertEq( D_->EdgeObjectIndexByIndexFrom( a_[u], e_[u] ), esafe_[u] ); } } template<class E> void EmbeddedSubPath<E>::Repair( ) { for ( int u = 0; u < e_.isize( ); u++ ) { if ( D_->EdgeObjectIndexByIndexFrom( a_[u], e_[u] ) != esafe_[u] ) e_[u] = D_->EdgeObjectIndexToFromIndex( a_[u], esafe_[u] ); } } template<class E> void DistancesToEnd3( const digraphE<E>& G, int (E::*len)( ) const, const int max_dist, const Bool fw, vec<int>& D, vec<Bool>& complete, const int max_paths ) { D.resize( G.N( ), 0 ); complete.resize( G.N( ) ); #pragma omp parallel for for ( int v = 0; v < D.isize( ); v++ ) { vec< pair< vec<int>, int > > paths( { make_pair( vec<int>({v}), 0 ) } ); while( paths.isize( ) <= max_paths ) { vec< pair< vec<int>, int > > paths2; for ( const auto& p : paths ) { int x = p.first.back( ); vec< pair<int,int> > ext; for ( int j = 0; j < ( fw ? G.From(x).isize( ) : G.To(x).isize( ) ); j++ ) { int y = ( fw ? G.From(x)[j] : G.To(x)[j] ); if ( Member( p.first, y ) ) continue; int e = ( fw ? G.EdgeObjectIndexByIndexFrom( x, j ) : G.EdgeObjectIndexByIndexTo( x, j ) ); int l = (G.EdgeObject(e).*len)( ); ext.push( y, l ); } ReverseSort(ext); for ( int i = 0; i < ext.isize( ); i++ ) { int j; for ( j = i + 1; j < ext.isize( ); j++ ) if ( ext[j].first != ext[i].first ) break; auto q(p); q.first.push_back( ext[i].first ); q.second += ext[i].second; paths2.push_back(q); i = j - 1; } if ( ext.empty( ) ) paths2.push_back(p); } if ( paths2 == paths ) break; paths = paths2; } complete[v] = ( paths.isize( ) <= max_paths ); for ( int i = 0; i < paths.isize( ); i++ ) D[v] = Max( D[v], paths[i].second ); } } template<class E> void RemoveHangingEnds3( digraphE<E>& G, int (E::*len)( ) const, const int max_del, const double min_ratio, const int max_paths ) { // Track hanging ends. vec<Bool> hanging( G.EdgeObjectCount( ), False ); // Define the maximum length that we care about. const int max_dist = int( ceil( double(max_del) * min_ratio ) ); // Go through two passes (forward and reverse). for ( int pass = 1; pass <= 2; pass++ ) { // Compute distances to end. vec<int> D; vec<Bool> complete; DistancesToEnd3( G, len, max_dist, pass == 1, D, complete, max_paths ); // Identify hanging ends. #pragma omp parallel for for ( int v = 0; v < G.N( ); v++ ) { const vec<int>& V = ( pass == 1 ? G.From(v) : G.To(v) ); vec<int> d( V.size( ) ), id( V.size( ), vec<int>::IDENTITY ); vec<Bool> c( V.size( ) ); for ( int j = 0; j < V.isize( ); j++ ) { d[j] = ((pass == 1 ? G.EdgeObjectByIndexFrom(v,j) : G.EdgeObjectByIndexTo(v,j)) .*len)( ) + D[ V[j] ]; c[j] = complete[ V[j] ]; } ReverseSortSync( d, c, id ); for ( int j = 1; j < d.isize( ); j++ ) { if ( d[j] <= max_del && d[0] >= d[j] * min_ratio && c[j] ) { hanging[ ( pass == 1 ? G.EdgeObjectIndexByIndexFrom( v, id[j] ) : G.EdgeObjectIndexByIndexTo( v, id[j] ) ) ] = True; } } } } // Remove hanging ends. vec<int> to_delete; for ( int i = 0; i < G.EdgeObjectCount( ); i++ ) if ( hanging[i] ) to_delete.push_back(i); G.DeleteEdges(to_delete); } template<class E> int64_t digraphE<E>::CheckSum( ) const { int64_t x = 0; for ( int v = 0; v < N( ); v++ ) { for ( int j = 0; j < From(v).isize( ); j++ ) x += ( v + 1 ) * (j + 1 ) * ( From(v)[j] + 1 ); for ( int j = 0; j < To(v).isize( ); j++ ) x += ( v + 1 ) * (j + 1 ) * ( To(v)[j] + 1 ); for ( int j = 0; j < FromEdgeObj(v).isize( ); j++ ) x += ( v + 1 ) * (j + 1 ) * ( FromEdgeObj(v)[j] + 1 ); for ( int j = 0; j < ToEdgeObj(v).isize( ); j++ ) x += ( v + 1 ) * (j + 1 ) * ( ToEdgeObj(v)[j] + 1 ); } return x; } template<class E> const E& digraphEX<E>::EdgeObject( int i ) const { AssertGe( i, 0 ); AssertLt( i, (int) edges_.size( ) ); return edges_[i]; } template<class F> digraphEX<F>::digraphEX( const digraphE<F>& G ) { from_.resize( G.N( ) ); to_.resize( G.N( ) ); from_edge_obj_.resize( G.N( ) ); to_edge_obj_.resize( G.N( ) ); for ( int i = 0; i < G.N( ); i++ ) { from_[i].resize( G.From(i).size( ) ); for ( int j = 0; j < G.From(i).isize( ); j++ ) from_[i][j] = G.From(i)[j]; to_[i].resize( G.To(i).size( ) ); for ( int j = 0; j < G.To(i).isize( ); j++ ) to_[i][j] = G.To(i)[j]; from_edge_obj_[i].resize( G.From(i).size( ) ); for ( int j = 0; j < G.From(i).isize( ); j++ ) from_edge_obj_[i][j] = G.IFrom( i, j ); to_edge_obj_[i].resize( G.To(i).size( ) ); for ( int j = 0; j < G.To(i).isize( ); j++ ) to_edge_obj_[i][j] = G.ITo( i, j ); } edges_.resize( G.EdgeObjectCount( ) ); for ( int e = 0; e < G.EdgeObjectCount( ); e++ ) edges_[e] = G.EdgeObject(e); to_left_.resize( E( ) ), to_right_.resize( E( ) ); for ( int v = 0; v < N( ); v++ ) { for ( int j = 0; j < (int) From(v).size( ); j++ ) to_left_[ IFrom( v, j ) ] = v; for ( int j = 0; j < (int) To(v).size( ); j++ ) to_right_[ ITo( v, j ) ] = v; } } template<class F> digraphE<F> digraphEX<F>::AsDigraphE() const { vec< vec<int> > from(N()), to(N()); vec< vec<int> > to_edge_obj(N()), from_edge_obj(N()); vec<F> edges(E()); for ( int i = 0; i < N( ); i++ ) { from[i].resize( From(i).size( ) ); for ( size_t j = 0; j < From(i).size( ); j++ ) from[i][j] = From(i)[j]; to[i].resize( To(i).size( ) ); for ( size_t j = 0; j < To(i).size( ); j++ ) to[i][j] = To(i)[j]; from_edge_obj[i].resize( From(i).size( ) ); for ( size_t j = 0; j < From(i).size( ); j++ ) from_edge_obj[i][j] = IFrom( i, j ); to_edge_obj[i].resize( To(i).size( ) ); for ( size_t j = 0; j < To(i).size( ); j++ ) to_edge_obj[i][j] = ITo( i, j ); } for ( int e = 0; e < E(); e++ ) edges[e] = EdgeObject(e); return digraphE<F>(from, to, edges, to_edge_obj, from_edge_obj, true); } #endif
mv_tools.h
#ifndef __MV_TOOLS_H #define __MV_TOOLS_H namespace blue_sky { namespace bos_helper { namespace helper { template <typename T> struct is_int { enum { value = 0}; }; template <> struct is_int <int> { enum { value = 1}; }; template <> struct is_int <unsigned int> { enum { value = 1}; }; template <> struct is_int <long> { enum { value = 1}; }; template <> struct is_int <unsigned long> { enum { value = 1}; }; } template <class vector_v1_t, class vector_v2_t> inline typename vector_v1_t::value_type mv_vector_inner_product (const vector_v1_t &v1, const vector_v2_t &v2, int /* obsolete */ = 0) { BOOST_STATIC_ASSERT (helper::is_int <typename vector_v1_t::value_type>::value == 0); BOOST_STATIC_ASSERT (helper::is_int <typename vector_v2_t::value_type>::value == 0); typename vector_v1_t::value_type sum = 0; size_t i = 0; size_t n = v1.size (); size_t n2 = n - (n % 4); BS_ASSERT (v1.size () == v2.size ()); #ifdef MV_VECTOR_INNER_PRODUCT_PARALLEL #pragma omp parallel for reduction (+: sum) #endif //MV_VECTOR_INNER_PRODUCT_PARALLEL for (i = 0; i < n2; i+=4) { sum += v1[i + 0] * v2[i + 0]; sum += v1[i + 1] * v2[i + 1]; sum += v1[i + 2] * v2[i + 2]; sum += v1[i + 3] * v2[i + 3]; } for (; i < n; ++i) { sum += v1[i] * v2[i]; } return sum; } #ifdef _MPI template <class T> inline T mv_vector_inner_product (const mpi_vector <T> &v1, const mpi_vector <T> &v2, int /* obsolete */ = 0) { const typename mpi_vector <T>::vector_t &v1_val = v1.get_local_part (); const typename mpi_vector <T>::vector_t &v2_val = v2.get_local_part (); BS_ASSERT (v1.size () == v2.size ()); BS_ASSERT (v1.size ()); BS_ASSERT (v1_val.size () == v2_val.size ()); BS_ASSERT (v1_val.size ()); double local_res, res; local_res = res = 0.0; for (int i = 0, n_local = (int)v1_val.size (); i < n_local; i++) local_res += v1_val[i] * v2_val[i]; MPI_Allreduce (&local_res, &res, 1, mpi_type_t<T>::value, MPI_SUM, MPI_COMM_WORLD); return res; } #endif } // namespace bos_helper template <class strategy_t> struct mv_tools { typedef typename strategy_t::item_t fp_type; typedef typename strategy_t::item_array_t item_array_t; static inline fp_type mv_vector_inner_product2 (const fp_type *v1, const fp_type *v2, const int n) { int i, istart = 0, iend = n; fp_type sum = 0; // double #ifdef MV_VECTOR_INNER_PRODUCT_PARALLEL int thread_num, n_threads; fp_type total_sum = 0; // double #pragma omp parallel private (i, sum, thread_num, n_threads, istart, iend) { sum = 0; thread_num = omp_get_thread_num (); n_threads = omp_get_max_threads (); istart = thread_num * n / n_threads; iend = (thread_num + 1) * n / n_threads; #endif //MV_VECTOR_INNER_PRODUCT_PARALLEL for (i = istart; i < iend; ++i) sum += v1[i] * v2[i]; #ifdef MV_VECTOR_INNER_PRODUCT_PARALLEL #pragma omp atomic total_sum += sum; } //end parallel sum = total_sum; #endif //MV_VECTOR_INNER_PRODUCT_PARALLEL return sum; } // r = a + cf1 * b + cf2 * c static inline void mv_lin_comb_1 (const int n, const fp_type cf1, const fp_type cf2, const fp_type *a, const fp_type *b, const fp_type *c, fp_type *r) { int i; #ifdef OTHER_NON_IMPORTANT_PARALLEL #pragma omp parallel for #endif //OTHER_NON_IMPORTANT_PARALLEL for (i = 0; i < n; ++i) r[i] = a[i] + cf1 * b[i] + cf2 * c[i]; } // r = a + cf * b static inline void mv_lin_comb_2 (const int n, const fp_type cf, const fp_type *a, const fp_type *b, fp_type *r) { int i; #ifdef OTHER_NON_IMPORTANT_PARALLEL #pragma omp parallel for #endif //OTHER_NON_IMPORTANT_PARALLEL for (i = 0; i < n; ++i) r[i] = a[i] + cf * b[i]; } static inline void mv_set (const int n, const fp_type *a, fp_type *r) { int i; for (i = 0; i < n; ++i) r[i] = a[i]; } static inline void mv_update_solution (const int n, const int k, const int m, fp_type *h, fp_type *x, fp_type *s, fp_type *v) { int i, j; fp_type *cur_h; //double fp_type d; // double // Backsolve: for (i = k; i >= 0; --i) { cur_h = h + (m + 1) * i; d = s[i] / cur_h[i]; for (j = i - 1; j >= 0; --j) s[j] -= cur_h[j] * d; s[i] = d; } for (j = 0; j <= k; ++j) { // x += alpha * phat mv_lin_comb_2 (n, s[j], x, v + n * j, x); } } static inline void mv_vector_print (const fp_type *v, const int n) { int i; for (i = 0; i < n; ++i) printf ("%d \t-- %lf\n", i, v[i]); } static inline void mv_vector_print_file (const fp_type *v, const int n, const char *name) { #ifndef UFA_SOLVER // TODO: IMPL #else FILE *f; int i; f = fopen (name, "w"); for (i = 0; i < n; ++i) fprintf (f, "%d \t-- %30.20lf\n", i, v[i]); fclose (f); #endif } }; // mv_tools } // namespace blue_sky #endif //__MV_TOOLS_H
Searching.202003021000.profile_para_top_m_search.h
// // Created by Zhen Peng on 11/11/19. // #ifndef BATCH_SEARCHING_SEARCHING_H #define BATCH_SEARCHING_SEARCHING_H #include <vector> #include <boost/dynamic_bitset.hpp> //#include <boost/sort/sort.hpp> #include <iostream> #include <fstream> #include <unordered_map> #include <immintrin.h> #include <cstring> #include <unordered_set> #include <set> #include <cfloat> //#include <omp.h> #include "../include/definitions.h" //#include "../include/efanna2e/neighbor.h" #include "../include/utils.h" #include "../include/Candidate.h" #include "../include/parallelization.h" #include "../include/bitvector.h" namespace PANNS { class Searching { //private: public: idi num_v_ = 0; edgei num_e_ = 0; idi num_queries_ = 0; int dimension_ = 0; idi width_ = 0; // NSG largest degree idi ep_ = 0; // Start point // std::vector<dataf> data_load_; // std::vector<dataf> queries_load_; // std::vector< std::vector<dataf> > data_load_; // std::vector< std::vector<dataf> > queries_load_; // std::vector<distf> norms_; dataf *data_load_ = nullptr; dataf *queries_load_ = nullptr; // dataf *norms_; // std::vector< std::vector<idi> > nsg_graph_; // idi *nsg_graph_indices_; // idi *nsg_graph_out_edges_; // std::vector< std::vector<idi> > edge_list_; char *opt_nsg_graph_ = nullptr; uint64_t data_bytes_; uint64_t neighbor_bytes_; uint64_t vertex_bytes_; // For multithreads int num_threads_ = 1; dataf compute_norm( const dataf *data) const; // idi vertex_id); // const std::vector<PANNS::dataf> &data); // size_t loc_start, // idi dimension) dataf compute_distance_with_norm( const dataf *v_data, const dataf *q_data, // idi vertex_id, // idi query_id, // const std::vector<dataf> &d_data, // const std::vector<dataf> &q_data, // PANNS::idi d_start, // PANNS::idi q_start, dataf vertex_norm) const; // idi dimension) static idi insert_into_queue( std::vector<Candidate> &c_queue, idi c_queue_top, Candidate cand); static idi add_into_queue( std::vector<PANNS::Candidate> &queue, idi &queue_top, const idi queue_size, const PANNS::Candidate &cand); static idi add_into_queue( std::vector<PANNS::Candidate> &queue, const idi queue_start, idi &queue_top, const idi queue_size, const PANNS::Candidate &cand); static void add_into_queue_at( const Candidate &cand, std::vector<Candidate> &queue, const idi insert_index, // The insertion location, independent with queue_start const idi queue_start, idi &queue_top, // The number of elements in queue, independent with queue_start const idi queue_size); // The maximum capacity of queue, independent with queue_start. static void insert_one_element_at( // const T &cand, // T *queue_base, const Candidate &cand, std::vector<Candidate> &queue_base, const idi insert_index, const idi queue_start, const idi queue_size); // idi insert_into_queue_nsg( // std::vector< Candidate > &c_queue, // idi c_queue_top, // Candidate cand); static idi merge_two_queues_into_1st_queue_seq_fixed( std::vector<Candidate> &queue1, const idi queue1_start, const idi queue1_size, std::vector<Candidate> &queue2, const idi queue2_start, const idi queue2_size); static void merge_two_queues_into_1st_queue_seq_incr( std::vector<Candidate> &queue1, const idi queue1_start, idi &queue1_size, // The number of element in queue1, independent with queue1_start. const idi queue1_length, // The maximum capacity of queue1, independent with queue1_start. std::vector<Candidate> &queue2, const idi queue2_start, const idi queue2_size); idi merge_all_queues_para_list( std::vector< std::vector<Candidate> > &local_queues_list, std::vector<idi> &local_queues_ends, std::vector<Candidate> &set_L, const idi L); idi merge_all_queues_para_array( // std::vector< std::vector<Candidate> > &local_queues_list, std::vector<Candidate> &local_queues_array, std::vector<idi> &local_queues_ends, const idi local_queue_length, std::vector<Candidate> &set_L, const idi L); public: // For Profiling // L3CacheMissRate cache_miss_kernel; uint64_t count_distance_computation_ = 0; distf dist_min_ = 0; distf dist_max_ = 0; // L3CacheMissRate profile_miss_rate; ~Searching() { free(data_load_); data_load_ = nullptr; // free(queries_load_); // _mm_free(data_load_); free(queries_load_); queries_load_ = nullptr; // free(norms_); // free(nsg_graph_indices_); // free(nsg_graph_out_edges_); free(opt_nsg_graph_); opt_nsg_graph_ = nullptr; } void load_data_load(char *filename); void load_queries_load(char *filename); void load_nsg_graph(char *filename); // void build_opt_graph(); void prepare_init_ids( std::vector<unsigned> &init_ids, unsigned L) const; // void prepare_candidate_queue_list( // const float *query_load, // std::vector<std::vector<efanna2e::Neighbor> > &retset_list, // std::vector<boost::dynamic_bitset<> > &is_visited_list, // const std::vector<unsigned> &init_ids, // const boost::dynamic_bitset<> &flags, // unsigned batch_start, // unsigned batch_size, // unsigned L); // void search_in_batch( //// const float *query_load, // size_t K, // size_t L, // unsigned batch_start, // unsigned batch_size, // std::vector< std::vector<Candidate> > &set_L_list, // std::vector< boost::dynamic_bitset<> > &is_visited_list, // const std::vector<idi> &init_ids, // const boost::dynamic_bitset<> &is_visited, // std::vector<std::vector<idi> > &set_K_list); void search_in_sequential( idi query_id, idi K, idi L, std::vector<Candidate> &set_L, // boost::dynamic_bitset<> &is_visited, // boost::dynamic_bitset<> is_visited, // std::vector<idi> &init_ids, const std::vector<idi> &init_ids, std::vector<idi> &set_K); void search_in_sequential_BitVector( const idi query_id, const idi K, const idi L, std::vector<Candidate> &set_L, const std::vector<idi> &init_ids, std::vector<idi> &set_K); void search_in_sequential_prune_neighbors( const idi query_id, const idi K, const idi L, std::vector<Candidate> &set_L, const std::vector<idi> &init_ids, std::vector<idi> &set_K); // idi get_out_degree(idi v_id) const // { // if (v_id < num_v_ - 1) { // return nsg_graph_indices_[v_id + 1] - nsg_graph_indices_[v_id]; // } else { // return num_e_ - nsg_graph_indices_[v_id]; // } // } void search_with_top_m( idi M, idi query_id, idi K, idi L, std::vector<Candidate> &set_L, const std::vector<idi> &init_ids, std::vector<idi> &set_K); // std::vector< std::vector<idi> > &top_m_list); void search_with_top_m_myths_M( const PANNS::idi M, const PANNS::idi query_id, const PANNS::idi K, const PANNS::idi L, std::vector<Candidate> &set_L, const std::vector<idi> &init_ids, std::vector<idi> &set_K); void search_with_top_m_to_get_distance_range( const PANNS::idi M, const PANNS::idi query_id, // const PANNS::idi K, const PANNS::idi L, std::vector<Candidate> &set_L, const std::vector<idi> &init_ids); void search_with_top_m_profile_bit_CAS( const PANNS::idi M, const PANNS::idi query_id, const PANNS::idi K, const PANNS::idi L, std::vector<Candidate> &set_L, const std::vector<idi> &init_ids, std::vector<idi> &set_K); void search_with_top_m_profile_prune_neighbors( const PANNS::idi M, const PANNS::idi query_id, const PANNS::idi K, const PANNS::idi L, std::vector<Candidate> &set_L, const std::vector<idi> &init_ids, std::vector<idi> &set_K); // void search_with_top_m_no_local_arrays( // const PANNS::idi M, // const PANNS::idi query_id, // const PANNS::idi K, // const PANNS::idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K, // boost::dynamic_bitset<> &is_visited); void search_with_top_m_in_batch( PANNS::idi M, PANNS::idi batch_start, PANNS::idi batch_size, PANNS::idi K, PANNS::idi L, std::vector< std::vector<Candidate> > &set_L_list, const std::vector<idi> &init_ids, std::vector< std::vector<idi> > &set_K_list); void para_search_with_top_m_critical_area( idi M, idi query_id, idi K, idi L, std::vector<Candidate> &set_L, const std::vector<idi> &init_ids, std::vector<idi> &set_K); void para_search_with_top_m_critical_area_no_omp( idi M, idi query_id, idi K, idi L, std::vector<Candidate> &set_L, const std::vector<idi> &init_ids, std::vector<idi> &set_K); void para_search_with_top_m_critical_area_yes_omp( idi M, idi query_id, idi K, idi L, std::vector<Candidate> &set_L, const std::vector<idi> &init_ids, std::vector<idi> &set_K); void para_search_with_top_m_visited_array( const PANNS::idi M, const PANNS::idi query_id, const PANNS::idi K, const PANNS::idi L, std::vector<Candidate> &set_L, const std::vector<idi> &init_ids, std::vector<idi> &set_K, std::vector<uint8_t> &is_visited); void para_search_with_top_m_merge_queues( const idi M, const idi query_id, const idi K, const idi L, std::vector<Candidate> &set_L, const std::vector<idi> &init_ids, std::vector<idi> &set_K); void para_search_with_top_m_queues_seq_merge( const PANNS::idi M, const PANNS::idi query_id, const PANNS::idi K, const PANNS::idi L, std::vector<Candidate> &set_L, const std::vector<idi> &init_ids, std::vector<idi> &set_K); void para_search_with_top_m_merge_queues_no_CAS( const idi M, const idi query_id, const idi K, const idi L, std::vector<Candidate> &set_L, const std::vector<idi> &init_ids, std::vector<idi> &set_K, const idi local_queue_length, std::vector< std::vector<Candidate> > &local_queues_list, std::vector<idi> &local_queues_ends, // std::vector<uint8_t> &is_visited); boost::dynamic_bitset<> &is_visited); // void para_search_with_top_m_merge_queues_in_array( void para_search_with_top_m_merge_queues_new_threshold( const idi M, const idi query_id, const idi K, const idi L, std::vector<Candidate> &set_L, const std::vector<idi> &init_ids, std::vector<idi> &set_K, const idi local_queue_length, // Maximum size of local queue // std::vector< std::vector<Candidate> > &local_queues_list, std::vector<Candidate> &local_queues_array, std::vector<idi> &local_queues_ends, // Sizes of local queue // BitVector &is_visited); // std::vector<uint8_t> &is_visited); boost::dynamic_bitset<> &is_visited); void para_search_with_top_m_merge_queues_by_sort( const idi M, const idi query_id, const idi K, const idi L, std::vector<Candidate> &set_L, const std::vector<idi> &init_ids, std::vector<idi> &set_K, const idi local_queue_length, // Maximum size of local queue // std::vector<Candidate> &local_queues_array, std::vector<idi> &local_queues_ends, // Sizes of local queue std::vector<idi> &dest_offsets, const std::vector<idi> &offsets_load_set_L, // Offsets for store into set_L. BitVector &is_visited); void para_search_with_top_m_merge_queues_myths( const idi M, const idi query_id, const idi K, const idi L, std::vector<Candidate> &set_L, const std::vector<idi> &init_ids, std::vector<idi> &set_K, const idi local_queue_length, // Maximum size of local queue // std::vector< std::vector<Candidate> > &local_queues_list, std::vector<Candidate> &local_queues_array, std::vector<idi> &local_queues_ends, // Sizes of local queue BitVector &is_visited); // std::vector<uint8_t> &is_visited); // boost::dynamic_bitset<> &is_visited); // void para_prepare_init_ids( // std::vector<unsigned> &init_ids, // unsigned L) const; void para_search_with_top_m_in_batch_embarassing_para( const PANNS::idi M, const PANNS::idi batch_start, const PANNS::idi batch_size, const PANNS::idi K, const PANNS::idi L, std::vector< std::vector<Candidate> > &set_L_list, const std::vector<idi> &init_ids, std::vector< std::vector<idi> > &set_K_list, std::vector< boost::dynamic_bitset<> > &is_visited_list); void load_true_NN( const char *filename, std::vector< std::vector<idi> > &true_nn_list); void get_recall_for_all_queries( const std::vector< std::vector<idi> > &true_nn_list, const std::vector<std::vector<unsigned>> &set_K_list, std::unordered_map<unsigned, double> &recalls) const; }; // Class Searching /** * Input the data from the file. * @param filename */ inline void Searching::load_data_load(char *filename) { auto old_d = dimension_; DiskIO::load_data( filename, data_load_, num_v_, dimension_); if (old_d) { if (old_d != dimension_) { std::cerr << "Error: data dimension " << dimension_ << " is not equal to query dimension " << old_d << "." << std::endl; exit(EXIT_FAILURE); } } } /** * Input queries from the file. * @param filename */ inline void Searching::load_queries_load(char *filename) { auto old_d = dimension_; DiskIO::load_data( filename, queries_load_, num_queries_, dimension_); if (old_d) { if (old_d != dimension_) { std::cerr << "Error: query dimension " << dimension_ << " is not equal to data dimension " << old_d << "." << std::endl; exit(EXIT_FAILURE); } } } /** * Input the NSG graph from the file. * Reference: https://github.com/ZJULearning/nsg/blob/master/src/index_nsg.cpp * @param filename */ inline void Searching::load_nsg_graph(char *filename) { std::ifstream fin(filename); if (!fin.is_open()) { std::cerr << "Error: cannot read file " << filename << " ." << std::endl; exit(EXIT_FAILURE); } fin.read(reinterpret_cast<char *>(&width_), sizeof(unsigned)); fin.read(reinterpret_cast<char *>(&ep_), sizeof(unsigned)); data_bytes_ = (1 + dimension_) * sizeof(dataf); neighbor_bytes_ = (1 + width_) * sizeof(idi); vertex_bytes_ = data_bytes_ + neighbor_bytes_; opt_nsg_graph_ = (char *) malloc(num_v_ * vertex_bytes_); if (!opt_nsg_graph_) { std::cerr << "Error: no enough memory for opt_nsg_graph_." << std::endl; exit(EXIT_FAILURE); } idi v_id = 0; num_e_ = 0; char *base_location = opt_nsg_graph_; while (true) { idi degree; fin.read(reinterpret_cast<char *>(&degree), sizeof(unsigned)); if (fin.eof()) { break; } num_e_ += degree; // std::vector<idi> tmp_ngbrs(degree); // fin.read(reinterpret_cast<char *>(tmp_ngbrs.data()), degree * sizeof(unsigned)); // Norm and data distf norm = compute_norm(data_load_ + v_id * dimension_); // distf norm = compute_norm(v_id); std::memcpy(base_location, &norm, sizeof(distf)); // Norm memcpy(base_location + sizeof(distf), data_load_ + v_id * dimension_, dimension_ * sizeof(dataf)); // Data base_location += data_bytes_; // Neighbors memcpy(base_location, &degree, sizeof(idi)); // Number of neighbors fin.read(base_location + sizeof(idi), degree * sizeof(unsigned)); // Neighbors // memcpy(location + sizeof(idi), tmp_ngbrs.data(), degree * sizeof(unsigned)); base_location += neighbor_bytes_; ++v_id; } if (v_id != num_v_) { std::cerr << "Error: NSG data has " << v_id << " vertices, but origin data has " << num_v_ << " vertices." << std::endl; exit(EXIT_FAILURE); } free(data_load_); data_load_ = nullptr; // //////////////////////// // idi v_id = 0; // num_e_ = 0; // while (true) { // idi degree; // fin.read(reinterpret_cast<char *>(&degree), sizeof(unsigned)); // if (fin.eof()) { // break; // } // num_e_ += degree; // // std::vector<idi> ngbrs(degree); // fin.read(reinterpret_cast<char *>(ngbrs.data()), degree * sizeof(unsigned)); //// nsg_graph_.push_back(ngbrs); //// tmp_edge_list.push_back(ngbrs); // edge_list_.push_back(ngbrs); // ++v_id; // } // if (v_id != num_v_) { // std::cerr << "Error: NSG data has " << v_id // << " vertices, but origin data has " << num_v_ << " vertices." << std::endl; // exit(EXIT_FAILURE); // } } /** * Load those true top-K neighbors (ground truth) of queries * @param filename * @param[out] true_nn_list */ inline void Searching::load_true_NN( const char *filename, std::vector< std::vector<idi> > &true_nn_list) // unsigned &t_K) { std::ifstream fin(filename); if (!fin.is_open()) { fprintf(stderr, "Error: cannot open file %s\n", filename); exit(EXIT_FAILURE); } idi t_query_num; idi t_K; // unsigned t_K; fin.read(reinterpret_cast<char *>(&t_query_num), sizeof(t_query_num)); fin.read(reinterpret_cast<char *>(&t_K), sizeof(t_K)); // if (t_query_num != query_num) { // fprintf(stderr, "Error: query_num %u is not equal to the record %u in true-NN file %s\n", // query_num, t_query_num, filename); // exit(EXIT_FAILURE); // } if (t_query_num < num_queries_) { fprintf(stderr, "Error: t_query_num %u is smaller than num_queries_ %u\n", t_query_num, num_queries_); exit(EXIT_FAILURE); } if (t_K < 100) { fprintf(stderr, "Error: t_K %u is smaller than 100.\n", t_K); exit(EXIT_FAILURE); } // data = new unsigned[(size_t) t_query_num * (size_t) t_K]; true_nn_list.resize(t_query_num); for (idi q_i = 0; q_i < t_query_num; ++q_i) { true_nn_list[q_i].resize(t_K); } for (unsigned q_i = 0; q_i < t_query_num; ++q_i) { // size_t offset = q_i * t_K; for (unsigned n_i = 0; n_i < t_K; ++n_i) { unsigned id; float dist; fin.read(reinterpret_cast<char *>(&id), sizeof(id)); fin.read(reinterpret_cast<char *>(&dist), sizeof(dist)); // data[offset + n_i] = id; true_nn_list[q_i][n_i] = id; } } fin.close(); } inline void Searching::get_recall_for_all_queries( const std::vector< std::vector<idi> > &true_nn_list, const std::vector<std::vector<unsigned>> &set_K_list, std::unordered_map<unsigned, double> &recalls) const { // if (t_K < 100) { // fprintf(stderr, "Error: t_K %u is smaller than 100.\n", t_K); // exit(EXIT_FAILURE); // } if (true_nn_list[0].size() < 100) { fprintf(stderr, "Error: Number of true nearest neighbors of a query is smaller than 100.\n"); exit(EXIT_FAILURE); } recalls[1] = 0.0; recalls[5] = 0.0; recalls[10] = 0.0; recalls[20] = 0.0; recalls[50] = 0.0; recalls[100] = 0.0; for (unsigned q_i = 0; q_i < num_queries_; ++q_i) { // size_t offset = q_i * t_K; for (unsigned top_i = 0; top_i < 100; ++top_i) { unsigned true_id = true_nn_list[q_i][top_i]; for (unsigned n_i = 0; n_i < 100; ++n_i) { if (set_K_list[q_i][n_i] == true_id) { if (n_i < 1) recalls[1] += 1; if (n_i < 5) recalls[5] += 1; if (n_i < 10) recalls[10] += 1; if (n_i < 20) recalls[20] += 1; if (n_i < 50) recalls[50] += 1; if (n_i < 100) recalls[100] += 1; } } } } recalls[1] /= 1.0 * num_queries_; recalls[5] /= 5.0 * num_queries_; recalls[10] /= 10.0 * num_queries_; recalls[20] /= 20.0 * num_queries_; recalls[50] /= 50.0 * num_queries_; recalls[100] /= 100.0 * num_queries_; } inline void Searching::search_in_sequential( const idi query_id, const idi K, const idi L, std::vector<Candidate> &set_L, const std::vector<idi> &init_ids, std::vector<idi> &set_K) { boost::dynamic_bitset<> is_visited(num_v_); for (idi v_i = 0; v_i < L; ++v_i) { is_visited[init_ids[v_i]] = true; } const dataf *query_data = queries_load_ + query_id * dimension_; for (idi v_i = 0; v_i < L; ++v_i) { idi v_id = init_ids[v_i]; _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0); } // Get the distances of all candidates, store in the set set_L. for (unsigned i = 0; i < L; i++) { unsigned v_id = init_ids[i]; auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_); dataf norm = *v_data++; ++count_distance_computation_; distf dist = compute_distance_with_norm(v_data, query_data, norm); set_L[i] = Candidate(v_id, dist, false); // False means not checked. } std::sort(set_L.begin(), set_L.begin() + L); idi k = 0; // Index of every queue's first unchecked candidate. while (k < L) { Candidate &top_cand = set_L[k]; unsigned nk = L; if (!top_cand.is_checked_) { top_cand.is_checked_ = true; idi v_id = top_cand.id_; // Vertex ID. _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); idi *out_edges = (idi *) (opt_nsg_graph_ + v_id * vertex_bytes_ + data_bytes_); idi out_degree = *out_edges++; for (idi n_i = 0; n_i < out_degree; ++n_i) { _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); } // Traverse v_id's all neighbors, pushing them into the queue for (idi e_i = 0; e_i < out_degree; ++e_i) { idi nb_id = out_edges[e_i]; if (is_visited[nb_id]) { continue; } is_visited[nb_id] = true; auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); dataf norm = *nb_data++; // Compute the distance ++count_distance_computation_; distf dist = compute_distance_with_norm(nb_data, query_data, norm); if (dist > set_L[L-1].distance_) { continue; } // if (dist >= set_L[L-1].distance_) { // continue; // } Candidate cand(nb_id, dist, false); // Insert into the queue idi r = insert_into_queue(set_L, L, cand); if (r < nk) { nk = r; } } } if (nk <= k) { k = nk; } else { ++k; } } // cache_miss_kernel.measure_stop(); for (size_t k_i = 0; k_i < K; ++k_i) { set_K[k_i] = set_L[k_i].id_; } } inline void Searching::search_in_sequential_BitVector( const idi query_id, const idi K, const idi L, std::vector<Candidate> &set_L, const std::vector<idi> &init_ids, std::vector<idi> &set_K) { // boost::dynamic_bitset<> is_visited(num_v_); BitVector is_visited(num_v_); #pragma omp parallel for for (idi v_i = 0; v_i < L; ++v_i) { // is_visited[init_ids[v_i]] = true; is_visited.atomic_set_bit(init_ids[v_i]); } const dataf *query_data = queries_load_ + query_id * dimension_; #pragma omp parallel for for (idi v_i = 0; v_i < L; ++v_i) { idi v_id = init_ids[v_i]; _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0); } // Get the distances of all candidates, store in the set set_L. #pragma omp parallel for for (unsigned i = 0; i < L; i++) { unsigned v_id = init_ids[i]; auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_); dataf norm = *v_data++; distf dist = compute_distance_with_norm(v_data, query_data, norm); set_L[i] = Candidate(v_id, dist, false); // False means not checked. } std::sort(set_L.begin(), set_L.begin() + L); idi k = 0; // Index of every queue's first unchecked candidate. while (k < L) { Candidate &top_cand = set_L[k]; unsigned nk = L; if (!top_cand.is_checked_) { top_cand.is_checked_ = true; idi v_id = top_cand.id_; // Vertex ID. _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); idi *out_edges = (idi *) (opt_nsg_graph_ + v_id * vertex_bytes_ + data_bytes_); idi out_degree = *out_edges++; for (idi n_i = 0; n_i < out_degree; ++n_i) { _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); } // Traverse v_id's all neighbors, pushing them into the queue for (idi e_i = 0; e_i < out_degree; ++e_i) { idi nb_id = out_edges[e_i]; // if (is_visited[nb_id]) { // continue; // } // is_visited[nb_id] = true; {// Self-defined BitVector if (is_visited.atomic_is_bit_set(nb_id)) { continue; } is_visited.atomic_set_bit(nb_id); } auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); dataf norm = *nb_data++; // Compute the distance distf dist = compute_distance_with_norm(nb_data, query_data, norm); if (dist > set_L[L-1].distance_) { continue; } // if (dist >= set_L[L-1].distance_) { // continue; // } Candidate cand(nb_id, dist, false); // Insert into the queue idi r = insert_into_queue(set_L, L, cand); if (r < nk) { nk = r; } } } if (nk <= k) { k = nk; } else { ++k; } } // cache_miss_kernel.measure_stop(); #pragma omp parallel for for (size_t k_i = 0; k_i < K; ++k_i) { set_K[k_i] = set_L[k_i].id_; } } inline void Searching::search_in_sequential_prune_neighbors( const idi query_id, const idi K, const idi L, std::vector<Candidate> &set_L, const std::vector<idi> &init_ids, std::vector<idi> &set_K) { boost::dynamic_bitset<> is_visited(num_v_); // BitVector is_visited(num_v_); //#pragma omp parallel for for (idi v_i = 0; v_i < L; ++v_i) { is_visited[init_ids[v_i]] = true; // is_visited.atomic_set_bit(init_ids[v_i]); } const dataf *query_data = queries_load_ + query_id * dimension_; //#pragma omp parallel for for (idi v_i = 0; v_i < L; ++v_i) { idi v_id = init_ids[v_i]; _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0); } // Get the distances of all candidates, store in the set set_L. //#pragma omp parallel for for (unsigned i = 0; i < L; i++) { unsigned v_id = init_ids[i]; auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_); dataf norm = *v_data++; ++count_distance_computation_; distf dist = compute_distance_with_norm(v_data, query_data, norm); set_L[i] = Candidate(v_id, dist, false); // False means not checked. } std::sort(set_L.begin(), set_L.begin() + L); idi k = 0; // Index of every queue's first unchecked candidate. while (k < L) { Candidate &top_cand = set_L[k]; unsigned nk = L; if (!top_cand.is_checked_) { top_cand.is_checked_ = true; idi v_id = top_cand.id_; // Vertex ID. _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); idi *out_edges = (idi *) (opt_nsg_graph_ + v_id * vertex_bytes_ + data_bytes_); idi out_degree = *out_edges++; for (idi n_i = 0; n_i < out_degree; ++n_i) { _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); } // Traverse v_id's all neighbors, pushing them into the queue for (idi e_i = 0; e_i < out_degree; ++e_i) { idi nb_id = out_edges[e_i]; if (is_visited[nb_id]) { continue; } is_visited[nb_id] = true; // {// Self-defined BitVector // if (is_visited.atomic_is_bit_set(nb_id)) { // continue; // } // is_visited.atomic_set_bit(nb_id); // } auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); dataf norm = *nb_data++; // Compute the distance ++count_distance_computation_; distf dist = compute_distance_with_norm(nb_data, query_data, norm); if (dist > set_L[L-1].distance_) { continue; } // if (dist >= set_L[L-1].distance_) { // continue; // } Candidate cand(nb_id, dist, false); // Insert into the queue idi r = insert_into_queue(set_L, L, cand); if (r < nk) { nk = r; } } } if (nk <= k) { k = nk; } else { ++k; } } // cache_miss_kernel.measure_stop(); //#pragma omp parallel for for (size_t k_i = 0; k_i < K; ++k_i) { set_K[k_i] = set_L[k_i].id_; } } /** * Prepare init_ids and flags, as they are constant for all queries. * @param[out] init_ids * @param L */ inline void Searching::prepare_init_ids( std::vector<unsigned int> &init_ids, unsigned L) const { // idi num_ngbrs = get_out_degree(ep_); // edgei edge_start = nsg_graph_indices_[ep_]; // // Store ep_'s neighbors as candidates // idi tmp_l = 0; // for (; tmp_l < L && tmp_l < num_ngbrs; tmp_l++) { // init_ids[tmp_l] = nsg_graph_out_edges_[edge_start + tmp_l]; // } // std::unordered_set<idi> visited_ids; boost::dynamic_bitset<> is_selected(num_v_); idi *out_edges = (idi *) (opt_nsg_graph_ + ep_ * vertex_bytes_ + data_bytes_); idi out_degree = *out_edges++; idi init_ids_end = 0; // for (; tmp_l < L && tmp_l < out_degree; tmp_l++) { for (idi e_i = 0; e_i < out_degree && init_ids_end < L; ++e_i) { // idi v_id = out_edges[tmp_l]; idi v_id = out_edges[e_i]; if(is_selected[v_id]) { continue; } is_selected[v_id] = true; // init_ids[tmp_l] = v_id; init_ids[init_ids_end++] = v_id; // init_ids[tmp_l] = out_edges[tmp_l]; // visited_ids.insert(init_ids[tmp_l]); } // for (idi i = 0; i < tmp_l; ++i) { // is_visited[init_ids[i]] = true; // } // If ep_'s neighbors are not enough, add other random vertices idi tmp_id = ep_ + 1; // use tmp_id to replace rand(). while (init_ids_end < L) { tmp_id %= num_v_; idi v_id = tmp_id++; if (is_selected[v_id]) { continue; } // if (visited_ids.find(id) != visited_ids.end()) { // continue; // } is_selected[v_id] = true; // visited_ids.insert(id); init_ids[init_ids_end++] = v_id; // tmp_l++; } } // TODO: re-code in AVX-512 inline dataf Searching::compute_norm( const dataf *data) const // idi vertex_id) // const std::vector<PANNS::dataf> &data) // size_t loc_start, // idi dimension) { // const dataf *a = data.data() + loc_start; // const dataf *a = data_load_ + vertex_id * dimension_; // idi size = dimension_; dataf result = 0; //#define AVX_L2NORM(addr, dest, tmp) \ // tmp = _mm256_load_ps(addr); \ // tmp = _mm256_mul_ps(tmp, tmp); \ // dest = _mm256_add_ps(dest, tmp); #define AVX_L2NORM(addr, dest, tmp) \ tmp = _mm256_loadu_ps(addr); \ tmp = _mm256_mul_ps(tmp, tmp); \ dest = _mm256_add_ps(dest, tmp); __m256 sum; __m256 l0, l1; unsigned D = (dimension_ + 7) & ~7U; unsigned DR = D % 16; unsigned DD = D - DR; const float *l = data; const float *e_l = l + DD; float unpack[8] __attribute__ ((aligned (32))) = {0, 0, 0, 0, 0, 0, 0, 0}; sum = _mm256_load_ps(unpack); // sum = _mm256_loadu_ps(unpack); if (DR) { AVX_L2NORM(e_l, sum, l0); } for (unsigned i = 0; i < DD; i += 16, l += 16) { AVX_L2NORM(l, sum, l0); AVX_L2NORM(l + 8, sum, l1); } _mm256_store_ps(unpack, sum); // _mm256_storeu_ps(unpack, sum); result = unpack[0] + unpack[1] + unpack[2] + unpack[3] + unpack[4] + unpack[5] + unpack[6] + unpack[7]; return result; } inline dataf Searching::compute_distance_with_norm( const dataf *v_data, const dataf *q_data, // idi vertex_id, // idi query_id, // const std::vector<PANNS::dataf> &d_data, // const std::vector<PANNS::dataf> &q_data, // PANNS::idi d_start, // PANNS::idi q_start, dataf vertex_norm) const // idi dimension) { // idi size = dimension_; float result = 0; //#define AVX_DOT(addr1, addr2, dest, tmp1, tmp2) \ // tmp1 = _mm256_load_ps(addr1);\ // tmp2 = _mm256_load_ps(addr2);\ // tmp1 = _mm256_mul_ps(tmp1, tmp2); \ // dest = _mm256_add_ps(dest, tmp1); #define AVX_DOT(addr1, addr2, dest, tmp1, tmp2) \ tmp1 = _mm256_loadu_ps(addr1);\ tmp2 = _mm256_loadu_ps(addr2);\ tmp1 = _mm256_mul_ps(tmp1, tmp2); \ dest = _mm256_add_ps(dest, tmp1); __m256 sum; __m256 l0, l1; __m256 r0, r1; unsigned D = (dimension_ + 7) & ~7U; unsigned DR = D % 16; unsigned DD = D - DR; const float *l = v_data; const float *r = q_data; // const float *l = (float *) (opt_nsg_graph_ + vertex_id * vertex_bytes_ + sizeof(distf)); // const float *r = queries_load_ + query_id * dimension_; const float *e_l = l + DD; const float *e_r = r + DD; float unpack[8] __attribute__ ((aligned (32))) = {0, 0, 0, 0, 0, 0, 0, 0}; sum = _mm256_load_ps(unpack); // sum = _mm256_loadu_ps(unpack); if (DR) { AVX_DOT(e_l, e_r, sum, l0, r0); } for (unsigned i = 0; i < DD; i += 16, l += 16, r += 16) { AVX_DOT(l, r, sum, l0, r0); AVX_DOT(l + 8, r + 8, sum, l1, r1); } _mm256_store_ps(unpack, sum); // _mm256_storeu_ps(unpack, sum); result = unpack[0] + unpack[1] + unpack[2] + unpack[3] + unpack[4] + unpack[5] + unpack[6] + unpack[7]; result = -2 * result + vertex_norm; return result; } //// DEPRECATED. // The difference from insert_into_queue is that add_into_queue will increase the queue size by 1. //inline idi Searching::add_into_queue( // std::vector<PANNS::Candidate> &queue, // idi &queue_top, // const idi queue_size, // const PANNS::Candidate &cand) //{ // assert(queue_size > 1); // if (0 == queue_top) { // queue[queue_top++] = cand; // return 0; // } else if (1 == queue_top) { // if (queue[0] < cand) { // queue[queue_top++] = cand; // return 1; // } else { // queue[++queue_top] = queue[0]; // queue[0] = cand; // return 0; // } // } // // if (queue[queue_top - 1] < cand) { // if (queue_top < queue_size) { // queue[queue_top++] = cand; // } // return queue_top; // } // // idi r = insert_into_queue( // queue, // queue_top - 1, // cand); //// {//test //// printf("r: %u" //// "queue_top: %u " //// "queue_size: %u\n", //// r, //// queue_top, //// queue_size); //// } // return r; // //// ///////////////////////////////////////////////////////////// //// // Find the insert location //// auto it_loc = std::lower_bound(queue.begin(), queue.begin() + queue_top, cand); //// idi insert_loc = it_loc - queue.begin(); //// if (insert_loc == queue_size) { //// return queue_size; //// } //// //// // Insert ////// if (queue_top == queue_size) { ////// // If full already ////// --queue_top; ////// } //// memmove(reinterpret_cast<char *>(queue.data() + insert_loc + 1), //// reinterpret_cast<char *>(queue.data() + insert_loc), //// (queue_top - insert_loc) * sizeof(Candidate)); ////// for (idi q_i = queue_top; q_i > insert_loc; --q_i) { ////// queue.at(q_i) = queue.at(q_i - 1); ////// } //// queue[insert_loc] = cand; //// ++queue_top; //// return insert_loc; //} // The difference from insert_into_queue is that add_into_queue will increase the queue size by 1. inline idi Searching::add_into_queue( std::vector<PANNS::Candidate> &queue, idi &queue_top, const idi queue_size, const PANNS::Candidate &cand) { if (0 == queue_top) { queue[queue_top++] = cand; return 0; } // Find the insert location auto it_loc = std::lower_bound(queue.begin(), queue.begin() + queue_top, cand); idi insert_loc = it_loc - queue.begin(); if (insert_loc == queue_size) { return queue_size; } // Insert if (queue_top == queue_size) { // If full already --queue_top; } memmove(reinterpret_cast<char *>(queue.data() + insert_loc + 1), reinterpret_cast<char *>(queue.data() + insert_loc), (queue_top - insert_loc) * sizeof(Candidate)); // for (idi q_i = queue_top; q_i > insert_loc; --q_i) { // queue.at(q_i) = queue.at(q_i - 1); // } queue[insert_loc] = cand; ++queue_top; return insert_loc; } // add_into_queue with a queue_start inline idi Searching::add_into_queue( std::vector<PANNS::Candidate> &queue, const idi queue_start, idi &queue_top, // The insertion location starting from queue_start const idi queue_size, // The maximum capacity of queue, independent with queue_start. const PANNS::Candidate &cand) { if (0 == queue_top) { queue[queue_start + queue_top++] = cand; return 0; } idi queue_end = queue_start + queue_top; // Find the insert location auto it_loc = std::lower_bound(queue.begin() + queue_start, queue.begin() + queue_end, cand); // auto it_loc = std::lower_bound(queue.begin(), queue.begin() + queue_top, cand); idi insert_loc = it_loc - queue.begin(); if (insert_loc == queue_size + queue_start) { // if (insert_loc == queue_size) { return queue_size + queue_start; // return queue_size; } // Insert if (queue_top == queue_size) { // If full already --queue_top; --queue_end; } memmove(reinterpret_cast<char *>(queue.data() + insert_loc + 1), reinterpret_cast<char *>(queue.data() + insert_loc), (queue_end - insert_loc) * sizeof(Candidate)); // (queue_top - insert_loc) * sizeof(Candidate)); // for (idi q_i = queue_top; q_i > insert_loc; --q_i) { // queue.at(q_i) = queue.at(q_i - 1); // } queue[insert_loc] = cand; ++queue_top; return insert_loc; } inline void Searching::add_into_queue_at( const Candidate &cand, std::vector<Candidate> &queue, const idi insert_index, // The insertion location, independent with queue_start const idi queue_start, idi &queue_size, // The number of elements in queue, independent with queue_start const idi queue_length) // The maximum capacity of queue, independent with queue_start. { const idi dest_index = queue_start + insert_index; if (queue_size == queue_length) { --queue_size; } memmove(reinterpret_cast<char *>(queue.data() + dest_index + 1), reinterpret_cast<char *>(queue.data() + dest_index), (queue_size - insert_index) * sizeof(Candidate)); queue[dest_index] = cand; ++queue_size; } inline void Searching::insert_one_element_at( // const T &cand, // T *queue_base, const Candidate &cand, std::vector<Candidate> &queue, const idi insert_index, const idi queue_start, const idi queue_size) { const idi dest_index = queue_start + insert_index; memmove(reinterpret_cast<char *>(queue.data() + dest_index + 1), reinterpret_cast<char *>(queue.data() + dest_index), (queue_size - insert_index - 1) * sizeof(Candidate)); queue[dest_index] = cand; // memmove(reinterpret_cast<char *>(queue_base + dest_index + 1), // reinterpret_cast<char *>(queue_base + dest_index), // (queue_size - insert_index - 1) * sizeof(T)); // for (idi q_i = queue_size - 1; q_i > insert_index; --q_i) { // queue_base.at(q_i + queue_start) = queue_base.at(q_i - 1 + queue_start); // } // queue_base[dest_index] = cand; } /** * PANNS version of InsertIntoPool(): binary-search to find the insert place and then move. * @param[out] c_queue * @param c_queue_top * @param cand * @return */ inline idi Searching::insert_into_queue( std::vector<PANNS::Candidate> &c_queue, PANNS::idi c_queue_top, PANNS::Candidate cand) { if (c_queue[0].distance_ > cand.distance_) { // If the first memmove(reinterpret_cast<char *>(c_queue.data() + 1), reinterpret_cast<char *>(c_queue.data()), c_queue_top * sizeof(Candidate)); c_queue[0] = cand; return 0; } else if (c_queue[c_queue_top - 1].distance_ == cand.distance_) { // If the last if (c_queue[c_queue_top - 1].id_ > cand.id_) { // Use ID as the second metrics for ordering c_queue[c_queue_top - 1] = cand; return c_queue_top - 1; } else { return c_queue_top; } } idi left = 0; idi right = c_queue_top; while (left < right) { idi mid = (right - left) / 2 + left; if (c_queue[mid].distance_ > cand.distance_) { right = mid; } else { left = mid + 1; } } // If the distance is the same if (0 != left && c_queue[left - 1].distance_ != cand.distance_) { ; } else { while (0 != left && c_queue[left - 1].distance_ == cand.distance_ && c_queue[left - 1].id_ > cand.id_) { // Use ID as the second metrics for ordering --left; } } // Insert to left memmove(reinterpret_cast<char *>(c_queue.data() + left + 1), reinterpret_cast<char *>(c_queue.data() + left), (c_queue_top - left) * sizeof(Candidate)); c_queue[left] = cand; return left; } //inline void Searching::cand_pushes_ngbrs_into_queue( // idi cand_id, // const dataf *query_data, // idi L, // idi &new_k, // boost::dynamic_bitset<> &is_visited, // std::vector<Candidate> &set_L) //{ // _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); // idi *out_edges = (idi *) (opt_nsg_graph_ + v_id * vertex_bytes_ + data_bytes_); // idi out_degree = *out_edges++; // for (idi n_i = 0; n_i < out_degree; ++n_i) { // _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); // } // for (idi e_i = 0; e_i < out_degree; ++e_i) { // idi nb_id = out_edges[e_i]; // if (is_visited[nb_id]) { // continue; // } // is_visited[nb_id] = true; // auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); // dataf norm = *nb_data++; // distf dist = compute_distance_with_norm(nb_data, query_data, norm); // if (dist >= set_L[L-1].distance_) { // continue; // } // Candidate cand(nb_id, dist, false); // idi r = insert_into_queue(set_L, L, cand); // if (r < nk) { // nk = r; // } // } //} //inline void Searching::search_in_sequential( // const idi query_id, // const idi K, // const idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K) const //{ // boost::dynamic_bitset<> is_visited(num_v_); // // for (idi v_i = 0; v_i < L; ++v_i) { // is_visited[init_ids[v_i]] = true; // } // const dataf *query_data = queries_load_ + query_id * dimension_; // // for (idi v_i = 0; v_i < L; ++v_i) { // idi v_id = init_ids[v_i]; // _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0); // } // // Get the distances of all candidates, store in the set set_L. // for (unsigned i = 0; i < L; i++) { // unsigned v_id = init_ids[i]; // auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_); // dataf norm = *v_data++; // distf dist = compute_distance_with_norm(v_data, query_data, norm); // set_L[i] = Candidate(v_id, dist, false); // False means not checked. // } // std::sort(set_L.begin(), set_L.begin() + L); // idi k = 0; // Index of every queue's first unchecked candidate. // while (k < L) { // Candidate &top_cand = set_L[k]; // unsigned nk = L; // if (!top_cand.is_checked_) { // top_cand.is_checked_ = true; // idi v_id = top_cand.id_; // Vertex ID. // _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); // idi *out_edges = (idi *) (opt_nsg_graph_ + v_id * vertex_bytes_ + data_bytes_); // idi out_degree = *out_edges++; // for (idi n_i = 0; n_i < out_degree; ++n_i) { // _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); // } // // Traverse v_id's all neighbors, pushing them into the queue // for (idi e_i = 0; e_i < out_degree; ++e_i) { // idi nb_id = out_edges[e_i]; // if (is_visited[nb_id]) { // continue; // } // is_visited[nb_id] = true; // auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); // dataf norm = *nb_data++; // // Compute the distance // distf dist = compute_distance_with_norm(nb_data, query_data, norm); // if (dist > set_L[L-1].distance_) { // continue; // } // Candidate cand(nb_id, dist, false); // // Insert into the queue // idi r = insert_into_queue(set_L, L, cand); // if (r < nk) { // nk = r; // } // } // } // if (nk <= k) { // k = nk; // } else { // ++k; // } // } // // for (size_t k_i = 0; k_i < K; ++k_i) { // set_K[k_i] = set_L[k_i].id_; // } //} // Deprecated: cannot use std::set, because its element is constant. //inline void Searching::search_in_sequential( // const idi query_id, // const idi K, // const idi L, //// std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K) const //{ // std::set<Candidate> set_L; // boost::dynamic_bitset<> is_visited(num_v_); // // for (idi v_i = 0; v_i < L; ++v_i) { // is_visited[init_ids[v_i]] = true; // } // const dataf *query_data = queries_load_ + query_id * dimension_; // // for (idi v_i = 0; v_i < L; ++v_i) { // idi v_id = init_ids[v_i]; // _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0); // } // // Get the distances of all candidates, store in the set set_L. // for (unsigned i = 0; i < L; i++) { // unsigned v_id = init_ids[i]; // auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_); // dataf norm = *v_data++; // distf dist = compute_distance_with_norm(v_data, query_data, norm); //// set_L[i] = Candidate(v_id, dist, false); // False means not checked. // set_L.emplace(v_id, dist, false); // } //// std::sort(set_L.begin(), set_L.begin() + L); // idi k = 0; // Index of every queue's first unchecked candidate. // while (k < L) { //// Candidate &top_cand = set_L[k]; // std::set<Candidate>::iterator top_cand = std::next(set_L.begin(), k); // unsigned nk = L; // if (!top_cand->is_checked_) { // top_cand->is_checked_ = true; // idi v_id = top_cand.id_; // Vertex ID. // _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); // idi *out_edges = (idi *) (opt_nsg_graph_ + v_id * vertex_bytes_ + data_bytes_); // idi out_degree = *out_edges++; // for (idi n_i = 0; n_i < out_degree; ++n_i) { // _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); // } // // Traverse v_id's all neighbors, pushing them into the queue // for (idi e_i = 0; e_i < out_degree; ++e_i) { // idi nb_id = out_edges[e_i]; // if (is_visited[nb_id]) { // continue; // } // is_visited[nb_id] = true; // auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); // dataf norm = *nb_data++; // // Compute the distance // distf dist = compute_distance_with_norm(nb_data, query_data, norm); // if (dist > set_L[L-1].distance_) { // continue; // } // Candidate cand(nb_id, dist, false); // // Insert into the queue // idi r = insert_into_queue(set_L, L, cand); // if (r < nk) { // nk = r; // } // } // } // if (nk <= k) { // k = nk; // } else { // ++k; // } // } // // for (size_t k_i = 0; k_i < K; ++k_i) { // set_K[k_i] = set_L[k_i].id_; // } //} /* Function: * queue1_size is fixed. */ inline idi Searching::merge_two_queues_into_1st_queue_seq_fixed( std::vector<Candidate> &queue1, const idi queue1_start, const idi queue1_size, std::vector<Candidate> &queue2, const idi queue2_start, const idi queue2_size) // const idi limit_size) { assert(queue1_size && queue2_size); // Record the lowest insert location. auto it_loc = std::lower_bound( queue1.begin() + queue1_start, queue1.begin() + queue1_start + queue1_size, queue2[queue2_start]); idi insert_index = it_loc - (queue1.begin() + queue1_start); if (insert_index == queue1_size) { return insert_index; } else if (insert_index == queue1_size - 1) { queue1[queue1_start + insert_index] = queue2[queue2_start]; return insert_index; } // auto *queue1_base = queue1.data() + queue1_start; // Insert the 1st of queue2 insert_one_element_at( queue2[queue2_start], // queue1.data(), queue1, insert_index, queue1_start, queue1_size); if (queue2_size == 1) { return insert_index; } // memmove(reinterpret_cast<char *>(queue1_base + insert_index + 1), // reinterpret_cast<char *>(queue1_base + insert_index), // (queue1_size - insert_index) * sizeof(Candidate)); // queue1[insert_index] = queue2[queue2_start]; // Insert idi q_i_1 = insert_index + 1 + queue1_start; idi q_i_2 = queue2_start + 1; const idi q_i_1_bound = queue1_start + queue1_size; const idi q_i_2_bound = queue2_start + queue2_size; // const idi insert_i_bound = queue1_start + limit_size; for (idi insert_i = insert_index + 1; insert_i < queue1_size; ++insert_i) { // for (idi insert_i = insert_index + 1; insert_i < q_i_1_bound; ++insert_i) { if (q_i_1 >= q_i_1_bound || q_i_2 >= q_i_2_bound) { // queue1 or queue2 finished traverse. Rest o break; } else if (queue1[q_i_1] < queue2[q_i_2]) { ++q_i_1; } else { // Insert queue2[q_i_2] into queue1 insert_one_element_at( queue2[q_i_2++], // queue1.data(), queue1, insert_i, queue1_start, queue1_size); ++q_i_1; } } // // // Merge queue1 and queue2 into tmp_queue. // std::vector<Candidate> tmp_queue(queue1_size + queue2_size); // std::merge(queue1.begin() + queue1_start, // queue1.begin() + queue1_start + queue1_size, // queue2.begin() + queue2_start, // queue2.begin() + queue2_start + queue2_size, // tmp_queue.begin()); // // Resize tmp_queue. // tmp_queue.resize(limit_size); // // // Swap queue1 and tmp_queue // queue1.swap(tmp_queue); return insert_index; } /* Function: * queue1_size should be updated. * queue1_length should be provided. */ inline void Searching::merge_two_queues_into_1st_queue_seq_incr( std::vector<Candidate> &queue1, const idi queue1_start, idi &queue1_size, // The number of element in queue1, independent with queue1_start. const idi queue1_length, // The maximum capacity of queue1, independent with queue1_start. std::vector<Candidate> &queue2, const idi queue2_start, const idi queue2_size) // const idi limit_size) { assert(queue1_size && queue2_size); // Record the lowest insert location. auto it_loc = std::lower_bound( queue1.begin() + queue1_start, queue1.begin() + queue1_start + queue1_size, queue2[queue2_start]); idi insert_index = it_loc - (queue1.begin() + queue1_start); // if (insert_index == queue1_size) { // return insert_index; // } else if (insert_index == queue1_size - 1) { // queue1[queue1_start + insert_index] = queue2[queue2_start]; // return insert_index; // } //// auto *queue1_base = queue1.data() + queue1_start; // // Insert the 1st of queue2 // insert_one_element_at( // queue2[queue2_start], //// queue1.data(), // queue1, // insert_index, // queue1_start, // queue1_size); add_into_queue_at( queue2[queue2_start], queue1, insert_index, queue1_start, queue1_size, queue1_length); if (queue2_size == 1) { return; } // memmove(reinterpret_cast<char *>(queue1_base + insert_index + 1), // reinterpret_cast<char *>(queue1_base + insert_index), // (queue1_size - insert_index) * sizeof(Candidate)); // queue1[insert_index] = queue2[queue2_start]; // Insert idi q_i_1 = insert_index + 1 + queue1_start; idi q_i_2 = queue2_start + 1; // const idi q_i_1_bound = queue1_start + queue1_size; idi q_i_1_bound = queue1_start + queue1_size; // WHen queue1_size is updated, so should be q_i_1_bound. const idi q_i_2_bound = queue2_start + queue2_size; idi insert_i; for (insert_i = insert_index + 1; insert_i < queue1_length; ++insert_i) { // for (idi insert_i = insert_index + 1; insert_i < queue1_size; ++insert_i) { if (q_i_1 >= q_i_1_bound) { queue1_size += std::min(queue1_length - insert_i, q_i_2_bound - q_i_2); while (insert_i < queue1_length && q_i_2 < q_i_2_bound) { queue1[queue1_start + insert_i++] = queue2[q_i_2++]; } break; } else if (q_i_2 >= q_i_2_bound) { break; } // if (q_i_1 >= q_i_1_bound || q_i_2 >= q_i_2_bound) { // // queue1 or queue2 finished traverse. Rest o // break; // } else if (queue1[q_i_1] < queue2[q_i_2]) { ++q_i_1; } else { // if (queue1_size < queue1_length) { // ++q_i_1_bound; // also needs to update q_i_1_bound // } add_into_queue_at( queue2[q_i_2++], queue1, insert_i, queue1_start, queue1_size, queue1_length); // // Insert queue2[q_i_2] into queue1 // insert_one_element_at( // queue2[q_i_2++], //// queue1.data(), // queue1, // insert_i, // queue1_start, // queue1_size); ++q_i_1; q_i_1_bound = queue1_start + queue1_size; } } // queue1_size = insert_i; // // // Merge queue1 and queue2 into tmp_queue. // std::vector<Candidate> tmp_queue(queue1_size + queue2_size); // std::merge(queue1.begin() + queue1_start, // queue1.begin() + queue1_start + queue1_size, // queue2.begin() + queue2_start, // queue2.begin() + queue2_start + queue2_size, // tmp_queue.begin()); // // Resize tmp_queue. // tmp_queue.resize(limit_size); // // // Swap queue1 and tmp_queue // queue1.swap(tmp_queue); // return insert_index; } inline idi Searching::merge_all_queues_para_list( std::vector< std::vector<Candidate> > &local_queues_list, std::vector<idi> &local_queues_ends, std::vector<Candidate> &set_L, const idi L) { int size = 1 << (static_cast<idi>(log2(num_threads_))); idi log2size = static_cast<idi>(log2(size)); for (idi d = 0; d < log2size; ++d) { uint32_t by = 1 << (d + 1); #pragma omp parallel for for (int i = 0; i < size; i += by) { idi ai = i + (1 << (d + 1)) - 1; // i + 2^(d+1) - 1 idi bi = i + (1 << d) - 1; // i + 2^d - 1 if (0 == local_queues_ends[bi]) { continue; } if (local_queues_ends[ai] == 0) { local_queues_list[ai].swap(local_queues_list[bi]); std::swap(local_queues_ends[ai], local_queues_ends[bi]); continue; } // else if (local_queues_ends[ai] < L && local_queues_ends[bi] >= L) { // local_queues_list[ai].swap(local_queues_list[bi]); // std::swap(local_queues_ends[ai], local_queues_ends[bi]); // } // merge_two_queues_into_1st_queue_seq( // local_queues_list[ai], // 0, // local_queues_ends[ai], // local_queues_list[bi], // 0, // local_queues_ends[bi]); idi tmp_length = local_queues_ends[ai] + local_queues_ends[bi]; std::vector<Candidate> tmp_queue(tmp_length); std::merge( local_queues_list[ai].begin(), local_queues_list[ai].begin() + local_queues_ends[ai], local_queues_list[bi].begin(), local_queues_list[bi].begin() + local_queues_ends[bi], tmp_queue.begin()); if (tmp_length > L) { tmp_queue.resize(L); tmp_length = L; } else if (tmp_length < L) { tmp_queue.resize(L); } local_queues_list[ai].swap(tmp_queue); local_queues_ends[ai] = tmp_length; // {// Print queue a // printf("d: %u " // "i: %u " // "ai: %u " // "local_queues_ends[%d]: %d\n", // d, // i, // ai, // ai, // local_queues_ends[ai]); // for (idi i_q = 0; i_q < local_queues_ends[ai]; ++i_q) { // printf("[%u]: " // "id: %u " // "dist: %f\n", // i_q, // local_queues_list[ai][i_q].id_, // local_queues_list[ai][i_q].distance_); // } // } } } // Remain, prefix-sum-like merge if (size != num_threads_) { for (int i = size; i < num_threads_; ++i) { idi ai = i; idi bi = i - 1; if (0 == local_queues_ends[bi]) { continue; } if (local_queues_ends[ai] == 0) { local_queues_list[ai].swap(local_queues_list[bi]); std::swap(local_queues_ends[ai], local_queues_ends[bi]); continue; } // else if (local_queues_ends[ai] < L && local_queues_ends[bi] >= L) { // local_queues_list[ai].swap(local_queues_list[bi]); // std::swap(local_queues_ends[ai], local_queues_ends[bi]); // } // merge_two_queues_into_1st_queue_seq( // local_queues_list[ai], // 0, // local_queues_ends[ai], // local_queues_list[bi], // 0, // local_queues_ends[bi]); idi tmp_length = local_queues_ends[ai] + local_queues_ends[bi]; std::vector<Candidate> tmp_queue(tmp_length); std::merge( local_queues_list[ai].begin(), local_queues_list[ai].begin() + local_queues_ends[ai], local_queues_list[bi].begin(), local_queues_list[bi].begin() + local_queues_ends[bi], tmp_queue.begin()); if (tmp_length > L) { tmp_queue.resize(L); tmp_length = L; } else if (tmp_length < L) { tmp_queue.resize(L); } local_queues_list[ai].swap(tmp_queue); local_queues_ends[ai] = tmp_length; } } // Merge into set_L idi r = L; if (local_queues_ends[num_threads_ - 1]) { r = merge_two_queues_into_1st_queue_seq_fixed( set_L, 0, L, local_queues_list[num_threads_ - 1], 0, local_queues_ends[num_threads_ - 1]); } // Reset local_queues_ends std::fill(local_queues_ends.begin(), local_queues_ends.end(), 0); return r; } /* Function: * Use large local_queues_array as a concatenation of all queues */ inline idi Searching::merge_all_queues_para_array( // std::vector< std::vector<Candidate> > &local_queues_list, std::vector<Candidate> &local_queues_array, std::vector<idi> &local_queues_ends, const idi local_queue_length, std::vector<Candidate> &set_L, const idi L) { int size = 1 << (static_cast<idi>(log2(num_threads_))); idi log2size = static_cast<idi>(log2(size)); for (idi d = 0; d < log2size; ++d) { uint32_t by = 1 << (d + 1); #pragma omp parallel for for (int i = 0; i < size; i += by) { idi ai = i + (1 << (d + 1)) - 1; // i + 2^(d+1) - 1 idi a_start = ai * local_queue_length; idi bi = i + (1 << d) - 1; // i + 2^d - 1 idi b_start = bi * local_queue_length; if (0 == local_queues_ends[bi]) { continue; } if (local_queues_ends[ai] == 0) { // local_queues_list[ai].swap(local_queues_list[bi]); std::copy(local_queues_array.begin() + b_start, local_queues_array.begin() + b_start + local_queues_ends[bi], local_queues_array.begin() + a_start); // Copy bi to ai // std::swap(local_queues_ends[ai], local_queues_ends[bi]); local_queues_ends[ai] = local_queues_ends[bi]; local_queues_ends[bi] = 0; continue; } merge_two_queues_into_1st_queue_seq_incr( local_queues_array, a_start, local_queues_ends[ai], local_queue_length, local_queues_array, b_start, local_queues_ends[bi]); // { // idi tmp_length = local_queues_ends[ai] + local_queues_ends[bi]; // std::vector<Candidate> tmp_queue(tmp_length); // std::merge( // local_queues_array.begin() + a_start, // local_queues_array.begin() + a_start + local_queues_ends[ai], // local_queues_array.begin() + b_start, // local_queues_array.begin() + b_start + local_queues_ends[bi], // tmp_queue.begin()); // if (tmp_length > L) { // tmp_queue.resize(L); // tmp_length = L; // } else if (tmp_length < L) { // tmp_queue.resize(L); // } //// local_queues_list[ai].swap(tmp_queue); // std::copy(tmp_queue.begin(), tmp_queue.end(), local_queues_array.begin() + a_start); // local_queues_ends[ai] = tmp_length; // } // {// Print queue a // printf("d: %u " // "i: %u " // "ai: %u " // "local_queues_ends[%d]: %d\n", // d, // i, // ai, // ai, // local_queues_ends[ai]); // for (idi i_q = a_start; i_q < a_start + local_queues_ends[ai]; ++i_q) { // printf("[%u]: " // "id: %u " // "dist: %f\n", // i_q - a_start, // local_queues_array[i_q].id_, // local_queues_array[i_q].distance_); // } // } } } // Remain, prefix-sum-like merge if (size != num_threads_) { for (int i = size; i < num_threads_; ++i) { idi ai = i; idi a_start = ai * local_queue_length; idi bi = i - 1; idi b_start = bi * local_queue_length; if (0 == local_queues_ends[bi]) { continue; } if (local_queues_ends[ai] == 0) { // local_queues_list[ai].swap(local_queues_list[bi]); std::copy(local_queues_array.begin() + b_start, local_queues_array.begin() + b_start + local_queues_ends[bi], local_queues_array.begin() + a_start); // Copy bi to ai // std::swap(local_queues_ends[ai], local_queues_ends[bi]); local_queues_ends[ai] = local_queues_ends[bi]; local_queues_ends[bi] = 0; continue; } merge_two_queues_into_1st_queue_seq_incr( local_queues_array, a_start, local_queues_ends[ai], local_queue_length, local_queues_array, b_start, local_queues_ends[bi]); // { // idi tmp_length = local_queues_ends[ai] + local_queues_ends[bi]; // std::vector<Candidate> tmp_queue(tmp_length); // std::merge( // local_queues_array.begin() + a_start, // local_queues_array.begin() + a_start + local_queues_ends[ai], // local_queues_array.begin() + b_start, // local_queues_array.begin() + b_start + local_queues_ends[bi], // tmp_queue.begin()); // if (tmp_length > L) { // tmp_queue.resize(L); // tmp_length = L; // } else if (tmp_length < L) { // tmp_queue.resize(L); // } //// local_queues_list[ai].swap(tmp_queue); // std::copy(tmp_queue.begin(), tmp_queue.end(), local_queues_array.begin() + a_start); // local_queues_ends[ai] = tmp_length; // } } } // Merge into set_L idi r = L; if (local_queues_ends[num_threads_ - 1]) { r = merge_two_queues_into_1st_queue_seq_fixed( set_L, 0, L, // local_queues_list[num_threads_ - 1], local_queues_array, // 0, (num_threads_ - 1) * local_queue_length, local_queues_ends[num_threads_ - 1]); } // Reset local_queues_ends std::fill(local_queues_ends.begin(), local_queues_ends.end(), 0); return r; } //void Searching::search_with_top_m( inline void Searching::search_with_top_m( const PANNS::idi M, const PANNS::idi query_id, const PANNS::idi K, const PANNS::idi L, std::vector<Candidate> &set_L, const std::vector<idi> &init_ids, std::vector<idi> &set_K) { boost::dynamic_bitset<> is_visited(num_v_); { for (idi c_i = 0; c_i < L; ++c_i) { is_visited[init_ids[c_i]] = true; } } const dataf *query_data = queries_load_ + query_id * dimension_; for (idi v_i = 0; v_i < L; ++v_i) { idi v_id = init_ids[v_i]; _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0); } // Get the distances of all candidates, store in the set set_L. for (unsigned i = 0; i < L; i++) { unsigned v_id = init_ids[i]; auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_); dataf norm = *v_data++; ++count_distance_computation_; distf dist = compute_distance_with_norm(v_data, query_data, norm); set_L[i] = Candidate(v_id, dist, false); // False means not checked. } std::sort(set_L.begin(), set_L.begin() + L); std::vector<idi> top_m_candidates(M); idi top_m_candidates_end = 0; idi k = 0; // Index of first unchecked candidate. idi tmp_count = 0; // for debug while (k < L) { ++tmp_count; unsigned nk = L; // Select M candidates idi last_k = L; for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) { if (set_L[c_i].is_checked_) { continue; } last_k = c_i; // Record the location of the last candidate selected. set_L[c_i].is_checked_ = true; top_m_candidates[top_m_candidates_end++] = set_L[c_i].id_; } // Push M candidates' neighbors into the queue. for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) { idi cand_id = top_m_candidates[c_i]; _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_); idi out_degree = *out_edges++; for (idi n_i = 0; n_i < out_degree; ++n_i) { _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); } for (idi e_i = 0; e_i < out_degree; ++e_i) { idi nb_id = out_edges[e_i]; if (is_visited[nb_id]) { continue; } is_visited[nb_id] = true; auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); dataf norm = *nb_data++; ++count_distance_computation_; distf dist = compute_distance_with_norm(nb_data, query_data, norm); if (dist > set_L[L-1].distance_) { continue; } // if (dist >= set_L[L-1].distance_) { // continue; // } Candidate cand(nb_id, dist, false); idi r = insert_into_queue(set_L, L, cand); if (r < nk) { nk = r; } } } top_m_candidates_end = 0; // Clear top_m_candidates if (nk <= last_k) { k = nk; } else { k = last_k + 1; } } for (idi k_i = 0; k_i < K; ++k_i) { set_K[k_i] = set_L[k_i].id_; } // // {//test // for (idi k_i = 0; k_i < K; ++k_i) { // printf("%u: %u: %u %f\n", // query_id, // k_i, set_L[k_i].id_, set_L[k_i].distance_); // } // exit(1); // } } //void Searching::search_with_top_m( inline void Searching::search_with_top_m_to_get_distance_range( const PANNS::idi M, const PANNS::idi query_id, // const PANNS::idi K, const PANNS::idi L, std::vector<Candidate> &set_L, const std::vector<idi> &init_ids) // std::vector<idi> &set_K) { dist_max_ = -FLT_MAX; dist_min_ = FLT_MAX; boost::dynamic_bitset<> is_visited(num_v_); { for (idi c_i = 0; c_i < L; ++c_i) { is_visited[init_ids[c_i]] = true; } } const dataf *query_data = queries_load_ + query_id * dimension_; for (idi v_i = 0; v_i < L; ++v_i) { idi v_id = init_ids[v_i]; _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0); } // Get the distances of all candidates, store in the set set_L. for (unsigned i = 0; i < L; i++) { unsigned v_id = init_ids[i]; auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_); dataf norm = *v_data++; // ++count_distance_computation; distf dist = compute_distance_with_norm(v_data, query_data, norm); set_L[i] = Candidate(v_id, dist, false); // False means not checked. // {// For distance range // if (dist > dist_max_) { // dist_max_ = dist; // } // if (dist < dist_min_) { // dist_min_ = dist; // } // } } std::sort(set_L.begin(), set_L.begin() + L); std::vector<idi> top_m_candidates(M); idi top_m_candidates_end = 0; idi k = 0; // Index of first unchecked candidate. idi tmp_count = 0; // for debug while (k < L) { ++tmp_count; unsigned nk = L; // Select M candidates idi last_k = L; for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) { if (set_L[c_i].is_checked_) { continue; } last_k = c_i; // Record the location of the last candidate selected. set_L[c_i].is_checked_ = true; top_m_candidates[top_m_candidates_end++] = set_L[c_i].id_; } // Push M candidates' neighbors into the queue. for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) { idi cand_id = top_m_candidates[c_i]; _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_); idi out_degree = *out_edges++; for (idi n_i = 0; n_i < out_degree; ++n_i) { _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); } for (idi e_i = 0; e_i < out_degree; ++e_i) { idi nb_id = out_edges[e_i]; if (is_visited[nb_id]) { continue; } is_visited[nb_id] = true; auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); dataf norm = *nb_data++; // ++count_distance_computation; distf dist = compute_distance_with_norm(nb_data, query_data, norm); if (dist > set_L[L-1].distance_) { continue; } // if (dist >= set_L[L-1].distance_) { // continue; // } Candidate cand(nb_id, dist, false); idi r = insert_into_queue(set_L, L, cand); if (r < nk) { nk = r; } // {// For distance range // if (dist > dist_max_) { // dist_max_ = dist; // } // if (dist < dist_min_) { // dist_min_ = dist; // } // } } } top_m_candidates_end = 0; // Clear top_m_candidates if (nk <= last_k) { k = nk; } else { k = last_k + 1; } {// For histogram for (idi i_l = 0; i_l < L; ++i_l) { distf dist = set_L[i_l].distance_; {// For distance range if (dist > dist_max_) { dist_max_ = dist; } if (dist < dist_min_) { dist_min_ = dist; } } } } } // for (idi k_i = 0; k_i < K; ++k_i) { // set_K[k_i] = set_L[k_i].id_; // } } //void Searching::search_with_top_m( inline void Searching::search_with_top_m_myths_M( const PANNS::idi M, const PANNS::idi query_id, const PANNS::idi K, const PANNS::idi L, std::vector<Candidate> &set_L, const std::vector<idi> &init_ids, std::vector<idi> &set_K) { // {//test // printf("query_id: %u\n", query_id); // } const idi loc_range = L / 3; boost::dynamic_bitset<> is_visited(num_v_); { for (idi c_i = 0; c_i < L; ++c_i) { is_visited[init_ids[c_i]] = true; } } const dataf *query_data = queries_load_ + query_id * dimension_; for (idi v_i = 0; v_i < L; ++v_i) { idi v_id = init_ids[v_i]; _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0); } // Get the distances of all candidates, store in the set set_L. for (unsigned i = 0; i < L; i++) { unsigned v_id = init_ids[i]; auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_); dataf norm = *v_data++; // ++count_distance_computation; distf dist = compute_distance_with_norm(v_data, query_data, norm); set_L[i] = Candidate(v_id, dist, false); // False means not checked. } std::sort(set_L.begin(), set_L.begin() + L); // {// For histogram // const distf dist_range = dist_max_ - dist_min_; // printf("iter:%u\n", 0); // for (idi i_l = 0; i_l < L; ++i_l) { // printf("%f\n", (set_L[i_l].distance_ - dist_min_) / dist_range * 100.0); // } // } std::vector<idi> top_m_candidates(M); idi top_m_candidates_end = 0; idi k = 0; // Index of first unchecked candidate. idi tmp_count = 0; // for debug while (k < L) { std::vector<idi> range_count(3, 0); idi zero_inserted_count = 0; // {//test // printf("tmp_count: %u\n", tmp_count); // } ++tmp_count; unsigned nk = L; // Select M candidates idi last_k = L; for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) { if (set_L[c_i].is_checked_) { continue; } last_k = c_i; // Record the location of the last candidate selected. set_L[c_i].is_checked_ = true; top_m_candidates[top_m_candidates_end++] = set_L[c_i].id_; } // {//test // printf("top_m_candidates_ends: %u\n", top_m_candidates_end); // } { if (0 == top_m_candidates_end) { break; } } uint64_t count_neighbors = 0; uint64_t count_inserted = 0; std::vector<idi> locs_to_count(M); // Push M candidates' neighbors into the queue. for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) { idi cand_id = top_m_candidates[c_i]; _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_); idi out_degree = *out_edges++; for (idi n_i = 0; n_i < out_degree; ++n_i) { _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); } count_neighbors += out_degree; idi num_inserted = 0; for (idi e_i = 0; e_i < out_degree; ++e_i) { idi nb_id = out_edges[e_i]; if (is_visited[nb_id]) { continue; } is_visited[nb_id] = true; auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); dataf norm = *nb_data++; // ++count_distance_computation; distf dist = compute_distance_with_norm(nb_data, query_data, norm); if (dist > set_L[L-1].distance_) { continue; } // if (dist >= set_L[L-1].distance_) { // continue; // } ++num_inserted; Candidate cand(nb_id, dist, false); idi r = insert_into_queue(set_L, L, cand); // { // printf("c_i: %u " // "count: %u " // "loc_inserted: %u\n", // c_i, // num_inserted, // r); // } if (r < nk) { nk = r; } { ++range_count[r / loc_range]; } } { if (0 == num_inserted) { ++zero_inserted_count; } locs_to_count[c_i] = num_inserted; count_inserted += num_inserted; } // { // printf("c_i: %u " // "num_inserted: %u\n", // c_i, // num_inserted); // } } // { // for (idi c_i = top_m_candidates_end; c_i < M; ++c_i) { // locs_to_count[c_i] = 0; // } // printf("iter:%u\n", tmp_count); // for (idi c_i = 0; c_i < M; ++c_i) { // printf("%u %u\n", c_i, locs_to_count[c_i]); // } // } // {//test // idi sum = 0; // for (const idi ct : range_count) sum += ct; // printf("tmp_count: %u " // "k: %u " // "actual_M: %u %.1f%% " // "zero_ins: %u %.1f%% " // "1/3: %u %.1f%% " // "2/3: %u %.1f%% " // "3/3: %u %.1f%%\n", // tmp_count, // k, // top_m_candidates_end, 100.0 * top_m_candidates_end / M, // zero_inserted_count, 100.0 * zero_inserted_count / top_m_candidates_end, // range_count[0], 100.0 * range_count[0] / sum, // range_count[1], 100.0 * range_count[1] / sum, // range_count[2], 100.0 * range_count[2] / sum); // } top_m_candidates_end = 0; // Clear top_m_candidates if (nk <= last_k) { k = nk; } else { k = last_k + 1; } { printf("query:%uiter: %u " "#neighbors: %lu " "#inserted: %lu " "ratio: %.2f%%\n", query_id, tmp_count, count_neighbors, count_inserted, 100.0 * count_inserted / count_neighbors); } // {// For histogram //// const auto it_min = std::min_element(set_L.begin(), set_L.end()); //// const auto it_max = std::max_element(set_L.begin(), set_L.end()); //// const distf dist_min = it_min->distance_; //// const distf dist_max = it_max->distance_; //// const distf dist_min = it_min->distance_ - 1.0; //// const distf dist_max = it_max->distance_ + 1.0; // const distf dist_range = dist_max_ - dist_min_; //// const distf dist_range = dist_max - dist_min; //// { //// printf("it_min->distance_: %f dist_min: %f\n", //// it_min->distance_, dist_min); //// } //// const distf dist_range = it_max->distance_ - it_min->distance_; // printf("iter:%u\n", tmp_count); // for (idi i_l = 0; i_l < L; ++i_l) { //// printf("%f\n", set_L[i_l].distance_); //// printf("%f\n", (set_L[i_l].distance_ - dist_min) / dist_range * 100.0); // printf("%f\n", (set_L[i_l].distance_ - dist_min_) / dist_range * 100.0); //// printf("%.2f\n", (set_L[i_l].distance_ - it_min->distance_) / dist_range * 100.0); // } // } } for (idi k_i = 0; k_i < K; ++k_i) { set_K[k_i] = set_L[k_i].id_; } if (query_id == 3) { exit(1); } } // Sequential Top-M algorithm for profiling purpose: byte array, CAS, and OpenMP //void Searching::search_with_top_m( inline void Searching::search_with_top_m_profile_bit_CAS( const PANNS::idi M, const PANNS::idi query_id, const PANNS::idi K, const PANNS::idi L, std::vector<Candidate> &set_L, const std::vector<idi> &init_ids, std::vector<idi> &set_K) { // std::vector<uint8_t> is_visited(num_v_, 0); // Byte array // boost::dynamic_bitset<> is_visited(num_v_); // Bit array BitVector is_visited(num_v_); { #pragma omp parallel for for (idi c_i = 0; c_i < L; ++c_i) { // is_visited[init_ids[c_i]] = true; is_visited.atomic_set_bit(init_ids[c_i]); } } const dataf *query_data = queries_load_ + query_id * dimension_; #pragma omp parallel for for (idi v_i = 0; v_i < L; ++v_i) { idi v_id = init_ids[v_i]; _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0); } // Get the distances of all candidates, store in the set set_L. #pragma omp parallel for for (unsigned i = 0; i < L; i++) { unsigned v_id = init_ids[i]; auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_); dataf norm = *v_data++; // ++count_distance_computation; distf dist = compute_distance_with_norm(v_data, query_data, norm); set_L[i] = Candidate(v_id, dist, false); // False means not checked. } std::sort(set_L.begin(), set_L.begin() + L); std::vector<idi> top_m_candidates(M); idi top_m_candidates_end = 0; idi k = 0; // Index of first unchecked candidate. idi tmp_count = 0; // for debug while (k < L) { ++tmp_count; unsigned nk = L; // Select M candidates idi last_k = L; for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) { if (set_L[c_i].is_checked_) { continue; } last_k = c_i; // Record the location of the last candidate selected. set_L[c_i].is_checked_ = true; top_m_candidates[top_m_candidates_end++] = set_L[c_i].id_; } // Push M candidates' neighbors into the queue. #pragma omp parallel for for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) { idi cand_id = top_m_candidates[c_i]; _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_); idi out_degree = *out_edges++; for (idi n_i = 0; n_i < out_degree; ++n_i) { _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); } for (idi e_i = 0; e_i < out_degree; ++e_i) { idi nb_id = out_edges[e_i]; // if (is_visited[nb_id]) { // continue; // } // is_visited[nb_id] = true; // if (!AtomicOps::CAS(is_visited.data() + nb_id, // static_cast<uint8_t>(0), // static_cast<uint8_t>(1))) { // continue; // } {// Self-defined BitVector if (is_visited.atomic_is_bit_set(nb_id)) { continue; } is_visited.atomic_set_bit(nb_id); } auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); dataf norm = *nb_data++; // ++count_distance_computation; distf dist = compute_distance_with_norm(nb_data, query_data, norm); if (dist > set_L[L-1].distance_) { continue; } // if (dist >= set_L[L-1].distance_) { // continue; // } Candidate cand(nb_id, dist, false); idi r = insert_into_queue(set_L, L, cand); if (r < nk) { nk = r; } } } top_m_candidates_end = 0; // Clear top_m_candidates if (nk <= last_k) { k = nk; } else { k = last_k + 1; } } #pragma omp parallel for for (idi k_i = 0; k_i < K; ++k_i) { set_K[k_i] = set_L[k_i].id_; } // // {//test // for (idi k_i = 0; k_i < K; ++k_i) { // printf("%u: %u: %u %f\n", // query_id, // k_i, set_L[k_i].id_, set_L[k_i].distance_); // } // exit(1); // } } inline void Searching::search_with_top_m_profile_prune_neighbors( const PANNS::idi M, const PANNS::idi query_id, const PANNS::idi K, const PANNS::idi L, std::vector<Candidate> &set_L, const std::vector<idi> &init_ids, std::vector<idi> &set_K) { // std::vector<uint8_t> is_visited(num_v_, 0); // Byte array boost::dynamic_bitset<> is_visited(num_v_); // Bit array // BitVector is_visited(num_v_); { //#pragma omp parallel for for (idi c_i = 0; c_i < L; ++c_i) { is_visited[init_ids[c_i]] = true; // is_visited.atomic_set_bit(init_ids[c_i]); } } const dataf *query_data = queries_load_ + query_id * dimension_; //#pragma omp parallel for for (idi v_i = 0; v_i < L; ++v_i) { idi v_id = init_ids[v_i]; _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0); } // Get the distances of all candidates, store in the set set_L. //#pragma omp parallel for for (unsigned i = 0; i < L; i++) { unsigned v_id = init_ids[i]; auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_); dataf norm = *v_data++; ++count_distance_computation_; distf dist = compute_distance_with_norm(v_data, query_data, norm); set_L[i] = Candidate(v_id, dist, false); // False means not checked. } std::sort(set_L.begin(), set_L.begin() + L); std::vector<Candidate> top_m_candidates(M); // std::vector<idi> top_m_candidates(M); idi top_m_candidates_end = 0; idi k = 0; // Index of first unchecked candidate. idi tmp_count = 0; // for debug while (k < L) { ++tmp_count; std::vector<distf> local_thresholds(num_threads_, -FLT_MAX); unsigned nk = L; // Select M candidates idi last_k = L; for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) { if (set_L[c_i].is_checked_) { continue; } last_k = c_i; // Record the location of the last candidate selected. set_L[c_i].is_checked_ = true; { idi tid = top_m_candidates_end % num_threads_; if (tid != 0) { if (local_thresholds[tid - 1] < set_L[c_i].distance_) { local_thresholds[tid - 1] = set_L[c_i].distance_; } } } top_m_candidates[top_m_candidates_end++] = set_L[c_i]; // top_m_candidates[top_m_candidates_end++] = set_L[c_i].id_; } {//test printf("iter: %u ", tmp_count); for (int tid = 0; tid < num_threads_; ++tid) { if (tid == 0) { printf(" [%d]:%f", tid, set_L[L - 1].distance_); } else { printf(" [%d]:%f", tid, local_thresholds[tid - 1]); } } printf("\n"); } int thread_chunk_size = (top_m_candidates_end + num_threads_ - 1) / num_threads_; // Push M candidates' neighbors into the queue. //#pragma omp parallel for for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) { int tid = c_i / thread_chunk_size; idi cand_id = top_m_candidates[c_i].id_; // idi cand_id = top_m_candidates[c_i]; distf cand_dist = top_m_candidates[c_i].distance_; _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_); idi out_degree = *out_edges++; for (idi n_i = 0; n_i < out_degree; ++n_i) { _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); } for (idi e_i = 0; e_i < out_degree; ++e_i) { idi nb_id = out_edges[e_i]; if (is_visited[nb_id]) { continue; } is_visited[nb_id] = true; // if (!AtomicOps::CAS(is_visited.data() + nb_id, // static_cast<uint8_t>(0), // static_cast<uint8_t>(1))) { // continue; // } // {// Self-defined BitVector // if (is_visited.atomic_is_bit_set(nb_id)) { // continue; // } // is_visited.atomic_set_bit(nb_id); // } auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); dataf norm = *nb_data++; ++count_distance_computation_; distf dist = compute_distance_with_norm(nb_data, query_data, norm); if (dist > set_L[L-1].distance_) { continue; } // if (dist >= set_L[L-1].distance_) { // continue; // } // {// Father // if (0 != c_i // && dist > cand_dist) { // is_visited[nb_id] = false; // continue; // } // } // { // if (0 != c_i // && dist > set_L[L/2].distance_) { // continue; // } // } { if (0 != tid && -FLT_MAX != local_thresholds[tid - 1] && dist > local_thresholds[tid - 1]) { continue; } } Candidate cand(nb_id, dist, false); idi r = insert_into_queue(set_L, L, cand); if (r < nk) { nk = r; } } } top_m_candidates_end = 0; // Clear top_m_candidates if (nk <= last_k) { k = nk; } else { k = last_k + 1; } } #pragma omp parallel for for (idi k_i = 0; k_i < K; ++k_i) { set_K[k_i] = set_L[k_i].id_; } {//test exit(1); } } ///// Backup //inline void Searching::search_with_top_m( // const PANNS::idi M, // const PANNS::idi query_id, // const PANNS::idi K, // const PANNS::idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K) //// std::vector< std::vector<idi> > &top_m_list) //{ // boost::dynamic_bitset<> is_visited(num_v_); // // { // for (idi c_i = 0; c_i < L; ++c_i) { // is_visited[init_ids[c_i]] = true; // } // } // // const dataf *query_data = queries_load_ + query_id * dimension_; // for (idi v_i = 0; v_i < L; ++v_i) { // idi v_id = init_ids[v_i]; // _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0); // } // // Get the distances of all candidates, store in the set set_L. // for (unsigned i = 0; i < L; i++) { // unsigned v_id = init_ids[i]; // auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_); // dataf norm = *v_data++; //// ++count_distance_computation; // distf dist = compute_distance_with_norm(v_data, query_data, norm); // set_L[i] = Candidate(v_id, dist, false); // False means not checked. // } // std::sort(set_L.begin(), set_L.begin() + L); // // std::vector<idi> top_m_candidates(M); // idi top_m_candidates_end = 0; // idi k = 0; // Index of first unchecked candidate. // idi tmp_count = 0; // for debug // while (k < L) { // ++tmp_count; // // unsigned nk = L; // // // Select M candidates // idi last_k = L; // for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) { // if (set_L[c_i].is_checked_) { // continue; // } // last_k = c_i; // Record the location of the last candidate selected. // set_L[c_i].is_checked_ = true; // top_m_candidates[top_m_candidates_end++] = set_L[c_i].id_; // } // // // Push M candidates' neighbors into the queue. // for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) { // idi cand_id = top_m_candidates[c_i]; // _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); // idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_); // idi out_degree = *out_edges++; // for (idi n_i = 0; n_i < out_degree; ++n_i) { // _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); // } // for (idi e_i = 0; e_i < out_degree; ++e_i) { // idi nb_id = out_edges[e_i]; // if (is_visited[nb_id]) { // continue; // } // is_visited[nb_id] = true; // auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); // dataf norm = *nb_data++; //// ++count_distance_computation; // distf dist = compute_distance_with_norm(nb_data, query_data, norm); // if (dist > set_L[L-1].distance_) { // continue; // } //// if (dist >= set_L[L-1].distance_) { //// continue; //// } // Candidate cand(nb_id, dist, false); // idi r = insert_into_queue(set_L, L, cand); // if (r < nk) { // nk = r; // } // } // } // top_m_candidates_end = 0; // Clear top_m_candidates // // if (nk <= last_k) { // k = nk; // } else { // k = last_k + 1; // } // } // // for (idi k_i = 0; k_i < K; ++k_i) { // set_K[k_i] = set_L[k_i].id_; // } //// //// {//test //// for (idi k_i = 0; k_i < K; ++k_i) { //// printf("%u: %u: %u %f\n", //// query_id, //// k_i, set_L[k_i].id_, set_L[k_i].distance_); //// } //// exit(1); //// } //} // ////// DEPRECATED: the is_visited array cannot be shared among threads. //inline void Searching::search_with_top_m_no_local_arrays( // const PANNS::idi M, // const PANNS::idi query_id, // const PANNS::idi K, // const PANNS::idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K, // boost::dynamic_bitset<> &is_visited) //// std::vector< std::vector<idi> > &top_m_list) //{ //// boost::dynamic_bitset<> is_visited(num_v_); // // { // for (idi c_i = 0; c_i < L; ++c_i) { // is_visited[init_ids[c_i]] = true; // } // } // // const dataf *query_data = queries_load_ + query_id * dimension_; // for (idi v_i = 0; v_i < L; ++v_i) { // idi v_id = init_ids[v_i]; // _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0); // } // // Get the distances of all candidates, store in the set set_L. // for (unsigned i = 0; i < L; i++) { // unsigned v_id = init_ids[i]; // auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_); // dataf norm = *v_data++; //// ++count_distance_computation; // distf dist = compute_distance_with_norm(v_data, query_data, norm); // set_L[i] = Candidate(v_id, dist, false); // False means not checked. // } // std::sort(set_L.begin(), set_L.begin() + L); // // std::vector<idi> top_m_candidates(M); // idi top_m_candidates_end = 0; // idi k = 0; // Index of first unchecked candidate. // idi tmp_count = 0; // for debug // while (k < L) { // ++tmp_count; // // unsigned nk = L; // // // Select M candidates // idi last_k = L; // for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) { // if (set_L[c_i].is_checked_) { // continue; // } // last_k = c_i; // Record the location of the last candidate selected. // set_L[c_i].is_checked_ = true; // top_m_candidates[top_m_candidates_end++] = set_L[c_i].id_; // } // // // Push M candidates' neighbors into the queue. // for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) { // idi cand_id = top_m_candidates[c_i]; // _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); // idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_); // idi out_degree = *out_edges++; // for (idi n_i = 0; n_i < out_degree; ++n_i) { // _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); // } // for (idi e_i = 0; e_i < out_degree; ++e_i) { // idi nb_id = out_edges[e_i]; // if (is_visited[nb_id]) { // continue; // } // is_visited[nb_id] = true; // auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); // dataf norm = *nb_data++; //// ++count_distance_computation; // distf dist = compute_distance_with_norm(nb_data, query_data, norm); // if (dist > set_L[L-1].distance_) { // continue; // } //// if (dist >= set_L[L-1].distance_) { //// continue; //// } // Candidate cand(nb_id, dist, false); // idi r = insert_into_queue(set_L, L, cand); // if (r < nk) { // nk = r; // } // } // } // top_m_candidates_end = 0; // Clear top_m_candidates // // if (nk <= last_k) { // k = nk; // } else { // k = last_k + 1; // } // } // // for (idi k_i = 0; k_i < K; ++k_i) { // set_K[k_i] = set_L[k_i].id_; // } //// //// {//test //// for (idi k_i = 0; k_i < K; ++k_i) { //// printf("%u: %u: %u %f\n", //// query_id, //// k_i, set_L[k_i].id_, set_L[k_i].distance_); //// } //// exit(1); //// } //} inline void Searching::search_with_top_m_in_batch( const PANNS::idi M, const PANNS::idi batch_start, const PANNS::idi batch_size, const PANNS::idi K, const PANNS::idi L, std::vector< std::vector<Candidate> > &set_L_list, const std::vector<idi> &init_ids, std::vector< std::vector<idi> > &set_K_list) { std::vector< boost::dynamic_bitset<> > is_visited_list(batch_size, boost::dynamic_bitset<> (num_v_)); // Prepare the init_ids { //#pragma omp parallel for for (idi q_i = 0; q_i < batch_size; ++q_i) { auto &is_visited = is_visited_list[q_i]; for (idi c_i = 0; c_i < L; ++c_i) { is_visited[init_ids[c_i]] = true; } } } // Initialize set_L_list { //#pragma omp parallel for for (idi q_i = 0; q_i < batch_size; ++q_i) { const dataf *query_data = queries_load_ + (q_i + batch_start) * dimension_; for (idi i = 0; i < L; i++) { idi v_id = init_ids[i]; auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_); dataf norm = *v_data++; // ++count_distance_computation_; distf dist = compute_distance_with_norm(v_data, query_data, norm); set_L_list[q_i][i] = Candidate(v_id, dist, false); // False means not checked. } std::sort(set_L_list[q_i].begin(), set_L_list[q_i].begin() + L); } } { std::vector<idi> joint_queue(M * batch_size); // Joint queue for all shared top-M candidates idi joint_queue_end = 0; boost::dynamic_bitset<> is_in_joint_queue(num_v_); // std::vector< std::vector<idi> > cands_query_ids(num_v_, std::vector<idi>(batch_size)); // If candidate cand_id is selected by query q_i, q_i should be in cands_query_ids[cand_id]. // std::vector<idi> cands_query_ids_ends(num_v_, 0); std::unordered_map< idi, std::vector<idi> > cands_query_ids(batch_size * M); std::vector<idi> ks(batch_size, 0); // Indices of every queue's first unchecked candidate. std::vector<idi> nks(batch_size, L); // Indices of highest candidate inserted std::vector<idi> last_ks(batch_size, L); // Indices of lowest candidate unchecked std::vector<idi> queries_not_finished(batch_size); idi queries_not_finished_end = batch_size; for (idi q_i = 0; q_i < batch_size; ++q_i) { queries_not_finished[q_i] = q_i; } bool is_finished = false; idi counter_for_debug = 0; while (!is_finished) { ++counter_for_debug; // Build the new joint queue // Traverse every query's queue for(idi q_i = 0; q_i < queries_not_finished_end; ++q_i) { idi q_local_id = queries_not_finished[q_i]; // last_ks[q_local_id] = L; auto &set_L = set_L_list[q_local_id]; idi top_m_count = 0; for (idi c_i = ks[q_local_id]; c_i < L && top_m_count < M; ++c_i) { if (set_L[c_i].is_checked_) { continue; } set_L[c_i].is_checked_ = true; last_ks[q_local_id] = c_i; ++top_m_count; idi cand_id = set_L[c_i].id_; // Record which query selected cand_id auto tmp_c = cands_query_ids.find(cand_id); if (tmp_c != cands_query_ids.end()) { tmp_c->second.push_back(q_local_id); } else { cands_query_ids.emplace(cand_id, std::vector<idi>()); cands_query_ids[cand_id].reserve(batch_size); cands_query_ids[cand_id].push_back(q_local_id); } // cands_query_ids[cand_id][cands_query_ids_ends[cand_id]++] = q_local_id; // Add candidate cand_id into the joint queue if (is_in_joint_queue[cand_id]) { continue; } is_in_joint_queue[cand_id] = true; joint_queue[joint_queue_end++] = cand_id; } } queries_not_finished_end = 0; // Clear queries_not_finished // Traverse every shared candidate for (idi c_i = 0; c_i < joint_queue_end; ++c_i) { idi cand_id = joint_queue[c_i]; is_in_joint_queue[cand_id] = false; // Reset is_in_joint_queue idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_); idi out_degree = *out_edges++; const auto &query_local_ids = cands_query_ids[cand_id]; // Push neighbors to every queue of the queries that selected cand_id. // Traverse cand_id's neighbors // idi &q_i_bound = cands_query_ids_ends[cand_id]; // for (idi q_i = 0; q_i < q_i_bound; ++q_i) { // idi q_local_id = query_local_ids[q_i]; for (idi q_local_id : query_local_ids) { dataf *query_data = queries_load_ + (q_local_id + batch_start) * dimension_; auto &is_visited = is_visited_list[q_local_id]; auto &set_L = set_L_list[q_local_id]; // // Traverse cand_id's neighbors for (idi e_i = 0; e_i < out_degree; ++e_i) { idi nb_id = out_edges[e_i]; if (is_visited[nb_id]) { continue; } is_visited[nb_id] = true; auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); dataf norm = *nb_data++; // ++count_distance_computation_; distf dist = compute_distance_with_norm(nb_data, query_data, norm); if (dist > set_L[L-1].distance_) { continue; } // if (dist >= set_L[L-1].distance_) { // continue; // } Candidate new_cand(nb_id, dist, false); idi insert_loc = insert_into_queue(set_L, L, new_cand); if (insert_loc < nks[q_local_id]) { nks[q_local_id] = insert_loc; } } } cands_query_ids.erase(cand_id); // q_i_bound = 0; // Clear cands_query_ids[cand_id] } joint_queue_end = 0; // Clear joint_queue for (idi q_local_id = 0; q_local_id < batch_size; ++q_local_id) { if (nks[q_local_id] <= last_ks[q_local_id]) { ks[q_local_id] = nks[q_local_id]; } else { ks[q_local_id] = last_ks[q_local_id] + 1; } nks[q_local_id] = L; last_ks[q_local_id] = L; if (ks[q_local_id] < L) { queries_not_finished[queries_not_finished_end++] = q_local_id; } } if (!queries_not_finished_end) { is_finished = true; } } } { for (idi q_i = 0; q_i < batch_size; ++q_i) { for (idi c_i = 0; c_i < K && c_i < L; ++c_i) { set_K_list[q_i + batch_start][c_i] = set_L_list[q_i][c_i].id_; } } } //// // {//test // for (idi q_i = 0; q_i < batch_size; ++q_i) { // printf("query: %u\n", q_i + batch_start); // for (idi c_i = 0; c_i < K; ++c_i) { // printf("%u: %u %f\n", c_i, set_L_list[q_i][c_i].id_, set_L_list[q_i][c_i].distance_); // } // } // } } inline void Searching::para_search_with_top_m_critical_area( const PANNS::idi M, const PANNS::idi query_id, const PANNS::idi K, const PANNS::idi L, std::vector<Candidate> &set_L, const std::vector<idi> &init_ids, std::vector<idi> &set_K) // std::vector< std::vector<idi> > &top_m_list) { std::vector<uint8_t> is_visited(num_v_, 0); // boost::dynamic_bitset<> is_visited(num_v_); { //#pragma omp parallel for for (idi c_i = 0; c_i < L; ++c_i) { is_visited[init_ids[c_i]] = 1; } } const dataf *query_data = queries_load_ + query_id * dimension_; for (idi v_i = 0; v_i < L; ++v_i) { idi v_id = init_ids[v_i]; _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0); } // Get the distances of all candidates, store in the set set_L. //#pragma omp parallel for for (unsigned i = 0; i < L; i++) { unsigned v_id = init_ids[i]; auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_); dataf norm = *v_data++; ++count_distance_computation_; distf dist = compute_distance_with_norm(v_data, query_data, norm); set_L[i] = Candidate(v_id, dist, false); // False means not checked. } std::sort(set_L.begin(), set_L.begin() + L); std::vector<idi> top_m_candidates(M); idi top_m_candidates_end = 0; idi k = 0; // Index of first unchecked candidate. idi tmp_count = 0; // for debug while (k < L) { ++tmp_count; unsigned nk = L; // int nk = L; // Select M candidates idi last_k = L; for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) { if (set_L[c_i].is_checked_) { continue; } last_k = c_i; // Record the location of the last candidate selected. set_L[c_i].is_checked_ = true; top_m_candidates[top_m_candidates_end++] = set_L[c_i].id_; } // Push M candidates' neighbors into the queue. // OpenMP reduction(min : nk) has a problem if nk is unsigned. nk might end up with being MAX_UINT. //#pragma omp parallel for //#pragma omp parallel for reduction(min : nk) for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) { idi cand_id = top_m_candidates[c_i]; _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_); idi out_degree = *out_edges++; for (idi n_i = 0; n_i < out_degree; ++n_i) { _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); } for (idi e_i = 0; e_i < out_degree; ++e_i) { idi nb_id = out_edges[e_i]; // if (is_visited[nb_id]) { // continue; // } // is_visited[nb_id] = 1; if (!AtomicOps::CAS(is_visited.data() + nb_id, static_cast<uint8_t>(0), static_cast<uint8_t>(1))) { continue; } auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); dataf norm = *nb_data++; ++count_distance_computation_; distf dist = compute_distance_with_norm(nb_data, query_data, norm); if (dist > set_L[L-1].distance_) { continue; } // if (dist >= set_L[L-1].distance_) { // continue; // } Candidate cand(nb_id, dist, false); idi r; //#pragma omp critical { r = insert_into_queue(set_L, L, cand); if (r < nk) { nk = r; } } } } top_m_candidates_end = 0; // Clear top_m_candidates if (nk <= last_k) { k = nk; } else { k = last_k + 1; } } //#pragma omp parallel for for (idi k_i = 0; k_i < K; ++k_i) { set_K[k_i] = set_L[k_i].id_; } } inline void Searching::para_search_with_top_m_critical_area_no_omp( const PANNS::idi M, const PANNS::idi query_id, const PANNS::idi K, const PANNS::idi L, std::vector<Candidate> &set_L, const std::vector<idi> &init_ids, std::vector<idi> &set_K) // std::vector< std::vector<idi> > &top_m_list) { std::vector<uint8_t> is_visited(num_v_, 0); // boost::dynamic_bitset<> is_visited(num_v_); { //#pragma omp parallel for for (idi c_i = 0; c_i < L; ++c_i) { is_visited[init_ids[c_i]] = 1; } } const dataf *query_data = queries_load_ + query_id * dimension_; for (idi v_i = 0; v_i < L; ++v_i) { idi v_id = init_ids[v_i]; _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0); } // Get the distances of all candidates, store in the set set_L. //#pragma omp parallel for for (unsigned i = 0; i < L; i++) { unsigned v_id = init_ids[i]; auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_); dataf norm = *v_data++; ++count_distance_computation_; distf dist = compute_distance_with_norm(v_data, query_data, norm); set_L[i] = Candidate(v_id, dist, false); // False means not checked. } std::sort(set_L.begin(), set_L.begin() + L); std::vector<idi> top_m_candidates(M); idi top_m_candidates_end = 0; idi k = 0; // Index of first unchecked candidate. idi tmp_count = 0; // for debug while (k < L) { ++tmp_count; unsigned nk = L; // int nk = L; // Select M candidates idi last_k = L; for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) { if (set_L[c_i].is_checked_) { continue; } last_k = c_i; // Record the location of the last candidate selected. set_L[c_i].is_checked_ = true; top_m_candidates[top_m_candidates_end++] = set_L[c_i].id_; } // Push M candidates' neighbors into the queue. // OpenMP reduction(min : nk) has a problem if nk is unsigned. nk might end up with being MAX_UINT. //#pragma omp parallel for //#pragma omp parallel for reduction(min : nk) for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) { idi cand_id = top_m_candidates[c_i]; _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_); idi out_degree = *out_edges++; for (idi n_i = 0; n_i < out_degree; ++n_i) { _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); } for (idi e_i = 0; e_i < out_degree; ++e_i) { idi nb_id = out_edges[e_i]; // if (is_visited[nb_id]) { // continue; // } // is_visited[nb_id] = 1; if (!AtomicOps::CAS(is_visited.data() + nb_id, static_cast<uint8_t>(0), static_cast<uint8_t>(1))) { continue; } auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); dataf norm = *nb_data++; ++count_distance_computation_; distf dist = compute_distance_with_norm(nb_data, query_data, norm); if (dist > set_L[L-1].distance_) { continue; } // if (dist >= set_L[L-1].distance_) { // continue; // } Candidate cand(nb_id, dist, false); idi r; //#pragma omp critical { r = insert_into_queue(set_L, L, cand); if (r < nk) { nk = r; } } } } top_m_candidates_end = 0; // Clear top_m_candidates if (nk <= last_k) { k = nk; } else { k = last_k + 1; } } //#pragma omp parallel for for (idi k_i = 0; k_i < K; ++k_i) { set_K[k_i] = set_L[k_i].id_; } } inline void Searching::para_search_with_top_m_critical_area_yes_omp( const PANNS::idi M, const PANNS::idi query_id, const PANNS::idi K, const PANNS::idi L, std::vector<Candidate> &set_L, const std::vector<idi> &init_ids, std::vector<idi> &set_K) // std::vector< std::vector<idi> > &top_m_list) { std::vector<uint8_t> is_visited(num_v_, 0); // boost::dynamic_bitset<> is_visited(num_v_); { #pragma omp parallel for for (idi c_i = 0; c_i < L; ++c_i) { is_visited[init_ids[c_i]] = 1; } } const dataf *query_data = queries_load_ + query_id * dimension_; for (idi v_i = 0; v_i < L; ++v_i) { idi v_id = init_ids[v_i]; _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0); } // Get the distances of all candidates, store in the set set_L. //#pragma omp parallel for for (unsigned i = 0; i < L; i++) { unsigned v_id = init_ids[i]; auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_); dataf norm = *v_data++; ++count_distance_computation_; distf dist = compute_distance_with_norm(v_data, query_data, norm); set_L[i] = Candidate(v_id, dist, false); // False means not checked. } std::sort(set_L.begin(), set_L.begin() + L); std::vector<idi> top_m_candidates(M); idi top_m_candidates_end = 0; idi k = 0; // Index of first unchecked candidate. idi tmp_count = 0; // for debug while (k < L) { ++tmp_count; unsigned nk = L; // int nk = L; // Select M candidates idi last_k = L; for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) { if (set_L[c_i].is_checked_) { continue; } last_k = c_i; // Record the location of the last candidate selected. set_L[c_i].is_checked_ = true; top_m_candidates[top_m_candidates_end++] = set_L[c_i].id_; } // Push M candidates' neighbors into the queue. // OpenMP reduction(min : nk) has a problem if nk is unsigned. nk might end up with being MAX_UINT. //#pragma omp parallel for //#pragma omp parallel for reduction(min : nk) for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) { idi cand_id = top_m_candidates[c_i]; _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_); idi out_degree = *out_edges++; for (idi n_i = 0; n_i < out_degree; ++n_i) { _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); } for (idi e_i = 0; e_i < out_degree; ++e_i) { idi nb_id = out_edges[e_i]; // if (is_visited[nb_id]) { // continue; // } // is_visited[nb_id] = 1; if (!AtomicOps::CAS(is_visited.data() + nb_id, static_cast<uint8_t>(0), static_cast<uint8_t>(1))) { continue; } auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); dataf norm = *nb_data++; ++count_distance_computation_; distf dist = compute_distance_with_norm(nb_data, query_data, norm); if (dist > set_L[L-1].distance_) { continue; } // if (dist >= set_L[L-1].distance_) { // continue; // } Candidate cand(nb_id, dist, false); idi r; //#pragma omp critical { r = insert_into_queue(set_L, L, cand); if (r < nk) { nk = r; } } } } top_m_candidates_end = 0; // Clear top_m_candidates if (nk <= last_k) { k = nk; } else { k = last_k + 1; } } //#pragma omp parallel for for (idi k_i = 0; k_i < K; ++k_i) { set_K[k_i] = set_L[k_i].id_; } } inline void Searching::para_search_with_top_m_visited_array( const PANNS::idi M, const PANNS::idi query_id, const PANNS::idi K, const PANNS::idi L, std::vector<Candidate> &set_L, const std::vector<idi> &init_ids, std::vector<idi> &set_K, std::vector<uint8_t> &is_visited) // std::vector< std::vector<idi> > &top_m_list) { // uint64_t count_visited = 0; // std::vector<uint8_t> is_visited(num_v_, 0); // boost::dynamic_bitset<> is_visited(num_v_); { //#pragma omp parallel for for (idi c_i = 0; c_i < L; ++c_i) { is_visited[init_ids[c_i]] = 1; // ++count_visited; } } const dataf *query_data = queries_load_ + query_id * dimension_; for (idi v_i = 0; v_i < L; ++v_i) { idi v_id = init_ids[v_i]; _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0); } // Get the distances of all candidates, store in the set set_L. //#pragma omp parallel for for (unsigned i = 0; i < L; i++) { unsigned v_id = init_ids[i]; auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_); dataf norm = *v_data++; ++count_distance_computation_; distf dist = compute_distance_with_norm(v_data, query_data, norm); set_L[i] = Candidate(v_id, dist, false); // False means not checked. } std::sort(set_L.begin(), set_L.begin() + L); std::vector<idi> top_m_candidates(M); idi top_m_candidates_end = 0; idi k = 0; // Index of first unchecked candidate. idi tmp_count = 0; // for debug while (k < L) { ++tmp_count; unsigned nk = L; // int nk = L; // Select M candidates idi last_k = L; for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) { if (set_L[c_i].is_checked_) { continue; } last_k = c_i; // Record the location of the last candidate selected. set_L[c_i].is_checked_ = true; top_m_candidates[top_m_candidates_end++] = set_L[c_i].id_; } // Push M candidates' neighbors into the queue. // OpenMP reduction(min : nk) has a problem if nk is unsigned. nk might end up with being MAX_UINT. //#pragma omp parallel for //#pragma omp parallel for reduction(min : nk) for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) { idi cand_id = top_m_candidates[c_i]; _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_); idi out_degree = *out_edges++; for (idi n_i = 0; n_i < out_degree; ++n_i) { _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); } for (idi e_i = 0; e_i < out_degree; ++e_i) { idi nb_id = out_edges[e_i]; // if (is_visited[nb_id]) { // continue; // } // is_visited[nb_id] = 1; if (!AtomicOps::CAS(is_visited.data() + nb_id, static_cast<uint8_t>(0), static_cast<uint8_t>(1))) { continue; } // ++count_visited; auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); dataf norm = *nb_data++; ++count_distance_computation_; distf dist = compute_distance_with_norm(nb_data, query_data, norm); if (dist > set_L[L-1].distance_) { continue; } // if (dist >= set_L[L-1].distance_) { // continue; // } Candidate cand(nb_id, dist, false); idi r; //#pragma omp critical { r = insert_into_queue(set_L, L, cand); if (r < nk) { nk = r; } } } } top_m_candidates_end = 0; // Clear top_m_candidates if (nk <= last_k) { k = nk; } else { k = last_k + 1; } } //#pragma omp parallel for for (idi k_i = 0; k_i < K; ++k_i) { set_K[k_i] = set_L[k_i].id_; } // { // printf("query_id: %u " // "count_visited: %lu %f%%\n", // query_id, // count_visited, // 100.0 * count_visited / num_v_); // } } inline void Searching::para_search_with_top_m_merge_queues( const idi M, const idi query_id, const idi K, const idi L, std::vector<Candidate> &set_L, const std::vector<idi> &init_ids, std::vector<idi> &set_K) { // {//test // printf("query_id: %u\n", query_id); // } // const idi local_queue_length = ((M - 1) / num_threads_ + 1) * width_; const idi local_queue_length = L; std::vector< std::vector<Candidate> > local_queues_list(num_threads_, std::vector<Candidate>(local_queue_length)); std::vector<idi> local_queues_ends(num_threads_, 0); std::vector<uint8_t> is_visited(num_v_, 0); // boost::dynamic_bitset<> is_visited(num_v_); { #pragma omp parallel for for (idi c_i = 0; c_i < L; ++c_i) { is_visited[init_ids[c_i]] = 1; } } const dataf *query_data = queries_load_ + query_id * dimension_; #pragma omp parallel for for (idi v_i = 0; v_i < L; ++v_i) { idi v_id = init_ids[v_i]; _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0); } // Get the distances of all candidates, store in the set set_L. #pragma omp parallel for for (unsigned i = 0; i < L; i++) { unsigned v_id = init_ids[i]; auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_); dataf norm = *v_data++; // ++count_distance_computation; distf dist = compute_distance_with_norm(v_data, query_data, norm); set_L[i] = Candidate(v_id, dist, false); // False means not checked. } std::sort(set_L.begin(), set_L.begin() + L); std::vector<idi> top_m_candidates(M); idi top_m_candidates_end = 0; idi k = 0; // Index of first unchecked candidate. idi tmp_count = 0; // for debug while (k < L) { ++tmp_count; // {//test // printf("tmp_count: %d\n", tmp_count); // } // Select M candidates idi last_k = L; // Cannot use OpenMP here because this for-loop needs early break by the 2nd condition. for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) { if (set_L[c_i].is_checked_) { continue; } last_k = c_i; // Record the location of the last candidate selected. set_L[c_i].is_checked_ = true; top_m_candidates[top_m_candidates_end++] = set_L[c_i].id_; } // Push M candidates' neighbors into the queue. #pragma omp parallel for for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) { int tid = omp_get_thread_num(); idi cand_id = top_m_candidates[c_i]; _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_); idi out_degree = *out_edges++; for (idi n_i = 0; n_i < out_degree; ++n_i) { _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); } for (idi e_i = 0; e_i < out_degree; ++e_i) { idi nb_id = out_edges[e_i]; // if (is_visited[nb_id]) { // continue; // } // is_visited[nb_id] = 1; if (!AtomicOps::CAS(is_visited.data() + nb_id, static_cast<uint8_t>(0), static_cast<uint8_t>(1))) { continue; } auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); dataf norm = *nb_data++; // ++count_distance_computation; distf dist = compute_distance_with_norm(nb_data, query_data, norm); if (dist > set_L[L-1].distance_) { continue; } // if (dist >= set_L[L-1].distance_) { // continue; // } Candidate cand(nb_id, dist, false); // Add to the local queue. add_into_queue(local_queues_list[tid], local_queues_ends[tid], local_queue_length, cand); } } top_m_candidates_end = 0; // Clear top_m_candidates idi nk = L; // // Merge. Parallel merging in every two queues. // { // for (int tid = 0; tid < num_threads_; ++tid) { // if (0 == local_queues_ends[tid]) continue; // idi r = merge_two_queues_into_1st_queue_para( // set_L, // 0, // L, // local_queues_list[tid], // 0, // local_queues_ends[tid]); //// idi r = merge_two_queues_into_1st_queue_seq( //// set_L, //// 0, //// L, //// local_queues_list[tid], //// 0, //// local_queues_ends[tid]); // local_queues_ends[tid] = 0; // Reset the local queue // if (r < nk) { // nk = r; // } // } // } // {// text // if (query_id == 4 && // tmp_count == 5) { // // Print local queues // for (int t_i = 0; t_i < num_threads_; ++t_i) { //// idi start_i = t_i * local_queue_length; // for (idi q_i = 0; q_i < local_queues_ends[t_i]; ++q_i) { // printf("t[%u][%u]: " // "id: %u " // "dist: %f\n", // t_i, q_i, // local_queues_list[t_i][q_i].id_, // local_queues_list[t_i][q_i].distance_); // } // } // printf("----------\n"); // for (idi i = 0; i < L; ++i) { // printf("set_L[%u]: " // "id: %u " // "dist: %f\n", // i, // set_L[i].id_, // set_L[i].distance_); // } // printf("----------\n"); // } // } // Merge. Merge all queues in parallel. { if (num_threads_ > 1) { idi r = merge_all_queues_para_list( local_queues_list, local_queues_ends, set_L, L); if (r < nk) { nk = r; } } else { if (local_queues_ends[0]) { idi r = merge_two_queues_into_1st_queue_seq_fixed( set_L, 0, L, local_queues_list[0], 0, local_queues_ends[0]); local_queues_ends[0] = 0; if (r < nk) { nk = r; } } } } // {//test // if (query_id == 4) { // for (idi i = 0; i < L; ++i) { // printf("tmp_count: %u " // "set_L[%u]: " // "id: %u " // "dist: %f\n", // tmp_count, // i, // set_L[i].id_, // set_L[i].distance_); // } // } // // } if (nk <= last_k) { k = nk; } else { k = last_k + 1; } } #pragma omp parallel for for (idi k_i = 0; k_i < K; ++k_i) { set_K[k_i] = set_L[k_i].id_; } // { // exit(1); // } // {//test // //// if (query_id == 4) { // for (idi i = 0; i < L; ++i) { // printf("set_L[%u]: " // "id: %u " // "dist: %f\n", // i, // set_L[i].id_, // set_L[i].distance_); // } //// exit(1); //// } // } } //// Using local queue and then sequential merge. inline void Searching::para_search_with_top_m_queues_seq_merge( const PANNS::idi M, const PANNS::idi query_id, const PANNS::idi K, const PANNS::idi L, std::vector<Candidate> &set_L, const std::vector<idi> &init_ids, std::vector<idi> &set_K) // std::vector< std::vector<idi> > &top_m_list) { // const idi local_queue_length = ((L - 1) / num_threads_ + 1) * width_; const idi local_queue_length = L; std::vector< std::vector<Candidate> > local_queues_list(num_threads_, std::vector<Candidate>(local_queue_length)); std::vector<idi> local_queues_ends(num_threads_, 0); std::vector<uint8_t> is_visited(num_v_, 0); // boost::dynamic_bitset<> is_visited(num_v_); { #pragma omp parallel for for (idi c_i = 0; c_i < L; ++c_i) { is_visited[init_ids[c_i]] = 1; } } const dataf *query_data = queries_load_ + query_id * dimension_; // for (idi v_i = 0; v_i < L; ++v_i) { // idi v_id = init_ids[v_i]; // _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0); // } // Get the distances of all candidates, store in the set set_L. #pragma omp parallel for for (unsigned i = 0; i < L; i++) { unsigned v_id = init_ids[i]; auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_); dataf norm = *v_data++; // ++count_distance_computation; distf dist = compute_distance_with_norm(v_data, query_data, norm); set_L[i] = Candidate(v_id, dist, false); // False means not checked. } std::sort(set_L.begin(), set_L.begin() + L); std::vector<idi> top_m_candidates(M); idi top_m_candidates_end = 0; idi k = 0; // Index of first unchecked candidate. idi tmp_count = 0; // for debug while (k < L) { ++tmp_count; // { // printf("tmp_count: %u " // "k: %u\n", // tmp_count, // k); // } // unsigned nk = L; // int nk = L; // Select M candidates idi last_k = L; for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) { if (set_L[c_i].is_checked_) { continue; } last_k = c_i; // Record the location of the last candidate selected. set_L[c_i].is_checked_ = true; top_m_candidates[top_m_candidates_end++] = set_L[c_i].id_; } // Push M candidates' neighbors into the queue. #pragma omp parallel for for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) { int tid = omp_get_thread_num(); idi cand_id = top_m_candidates[c_i]; _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_); idi out_degree = *out_edges++; for (idi n_i = 0; n_i < out_degree; ++n_i) { _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); } for (idi e_i = 0; e_i < out_degree; ++e_i) { idi nb_id = out_edges[e_i]; // if (is_visited[nb_id]) { // continue; // } // is_visited[nb_id] = 1; if (!AtomicOps::CAS(is_visited.data() + nb_id, static_cast<uint8_t>(0), static_cast<uint8_t>(1))) { continue; } auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); dataf norm = *nb_data++; // ++count_distance_computation; distf dist = compute_distance_with_norm(nb_data, query_data, norm); if (dist > set_L[L-1].distance_) { continue; } // if (dist >= set_L[L-1].distance_) { // continue; // } Candidate cand(nb_id, dist, false); // idi r; //#pragma omp critical // { // r = insert_into_queue(set_L, L, cand); // if (r < nk) { // nk = r; // } // } // Add to the local queue. add_into_queue(local_queues_list[tid], local_queues_ends[tid], local_queue_length, cand); } } top_m_candidates_end = 0; // Clear top_m_candidates idi nk = L; // Merge { for (int tid = 0; tid < num_threads_; ++tid) { if (0 == local_queues_ends[tid]) continue; idi r = merge_two_queues_into_1st_queue_seq_fixed( set_L, 0, L, local_queues_list[tid], 0, local_queues_ends[tid]); // L + 1); local_queues_ends[tid] = 0; // Reset the local queue if (r < nk) { nk = r; } } } if (nk <= last_k) { k = nk; } else { k = last_k + 1; } } #pragma omp parallel for for (idi k_i = 0; k_i < K; ++k_i) { set_K[k_i] = set_L[k_i].id_; } // // {//test // for (idi k_i = 0; k_i < K; ++k_i) { // printf("%u: %u: %u %f\n", // query_id, // k_i, set_L[k_i].id_, set_L[k_i].distance_); // } // exit(1); // } } inline void Searching::para_search_with_top_m_merge_queues_no_CAS( const idi M, const idi query_id, const idi K, const idi L, std::vector<Candidate> &set_L, const std::vector<idi> &init_ids, std::vector<idi> &set_K, const idi local_queue_length, std::vector< std::vector<Candidate> > &local_queues_list, std::vector<idi> &local_queues_ends, // std::vector<uint8_t> &is_visited) boost::dynamic_bitset<> &is_visited) { //// const idi local_queue_length = ((M - 1) / num_threads_ + 1) * width_; // const idi local_queue_length = L; // std::vector< std::vector<Candidate> > local_queues_list(num_threads_, std::vector<Candidate>(local_queue_length)); // std::vector<idi> local_queues_ends(num_threads_, 0); //// std::vector<uint8_t> is_visited(num_v_, 0); // boost::dynamic_bitset<> is_visited(num_v_); { #pragma omp parallel for for (idi c_i = 0; c_i < L; ++c_i) { is_visited[init_ids[c_i]] = 1; } } const dataf *query_data = queries_load_ + query_id * dimension_; #pragma omp parallel for for (idi v_i = 0; v_i < L; ++v_i) { idi v_id = init_ids[v_i]; _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0); } // Get the distances of all candidates, store in the set set_L. #pragma omp parallel for for (unsigned i = 0; i < L; i++) { unsigned v_id = init_ids[i]; auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_); dataf norm = *v_data++; // ++count_distance_computation; distf dist = compute_distance_with_norm(v_data, query_data, norm); set_L[i] = Candidate(v_id, dist, false); // False means not checked. } std::sort(set_L.begin(), set_L.begin() + L); std::vector<idi> top_m_candidates(M); idi top_m_candidates_end = 0; idi k = 0; // Index of first unchecked candidate. idi tmp_count = 0; // for debug while (k < L) { ++tmp_count; // Select M candidates idi last_k = L; // Cannot use OpenMP here because this for-loop needs early break by the 2nd condition. for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) { if (set_L[c_i].is_checked_) { continue; } last_k = c_i; // Record the location of the last candidate selected. set_L[c_i].is_checked_ = true; top_m_candidates[top_m_candidates_end++] = set_L[c_i].id_; } // Push M candidates' neighbors into the queue. #pragma omp parallel for for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) { int tid = omp_get_thread_num(); idi cand_id = top_m_candidates[c_i]; _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_); idi out_degree = *out_edges++; for (idi n_i = 0; n_i < out_degree; ++n_i) { _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); } for (idi e_i = 0; e_i < out_degree; ++e_i) { idi nb_id = out_edges[e_i]; if (is_visited[nb_id]) { continue; } is_visited[nb_id] = 1; // if (!AtomicOps::CAS(is_visited.data() + nb_id, // static_cast<uint8_t>(0), // static_cast<uint8_t>(1))) { // continue; // } auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); dataf norm = *nb_data++; // ++count_distance_computation; distf dist = compute_distance_with_norm(nb_data, query_data, norm); if (dist > set_L[L-1].distance_) { continue; } // if (dist >= set_L[L-1].distance_) { // continue; // } Candidate cand(nb_id, dist, false); // Add to the local queue. add_into_queue(local_queues_list[tid], local_queues_ends[tid], local_queue_length, cand); } } top_m_candidates_end = 0; // Clear top_m_candidates idi nk = L; // // Merge. Parallel merging in every two queues. // { // for (int tid = 0; tid < num_threads_; ++tid) { // if (0 == local_queues_ends[tid]) continue; // idi r = merge_two_queues_into_1st_queue_para( // set_L, // 0, // L, // local_queues_list[tid], // 0, // local_queues_ends[tid]); //// idi r = merge_two_queues_into_1st_queue_seq( //// set_L, //// 0, //// L, //// local_queues_list[tid], //// 0, //// local_queues_ends[tid]); // local_queues_ends[tid] = 0; // Reset the local queue // if (r < nk) { // nk = r; // } // } // } // // Merge. Merge all queues in parallel. // { // if (num_threads_ > 1) { // idi r = merge_all_queues_para( // local_queues_list, // local_queues_ends, // set_L, // L); // if (r < nk) { // nk = r; // } // } else { // if (local_queues_ends[0]) { // idi r = merge_two_queues_into_1st_queue_seq( // set_L, // 0, // L, // local_queues_list[0], // 0, // local_queues_ends[0]); // local_queues_ends[0] = 0; // if (r < nk) { // nk = r; // } // } // } // } // Merge { for (int tid = 0; tid < num_threads_; ++tid) { if (0 == local_queues_ends[tid]) continue; idi r = merge_two_queues_into_1st_queue_seq_fixed( set_L, 0, L, local_queues_list[tid], 0, local_queues_ends[tid]); // L + 1); local_queues_ends[tid] = 0; // Reset the local queue if (r < nk) { nk = r; } } } if (nk <= last_k) { k = nk; } else { k = last_k + 1; } } #pragma omp parallel for for (idi k_i = 0; k_i < K; ++k_i) { set_K[k_i] = set_L[k_i].id_; } {// Reset is_visited.reset(); // std::fill(is_visited.begin(), is_visited.end(), 0); std::fill(local_queues_ends.begin(), local_queues_ends.end(), 0); } } //inline void Searching::para_search_with_top_m_merge_queues_in_array( inline void Searching::para_search_with_top_m_merge_queues_new_threshold( const idi M, const idi query_id, const idi K, const idi L, std::vector<Candidate> &set_L, const std::vector<idi> &init_ids, std::vector<idi> &set_K, const idi local_queue_length, // Maximum size of local queue // std::vector< std::vector<Candidate> > &local_queues_list, std::vector<Candidate> &local_queues_array, std::vector<idi> &local_queues_ends, // Sizes of local queue // BitVector &is_visited) // std::vector<uint8_t> &is_visited) boost::dynamic_bitset<> &is_visited) { { #pragma omp parallel for for (idi c_i = 0; c_i < L; ++c_i) { is_visited[init_ids[c_i]] = 1; // is_visited.atomic_set_bit(init_ids[c_i]); } } const dataf *query_data = queries_load_ + query_id * dimension_; #pragma omp parallel for for (idi v_i = 0; v_i < L; ++v_i) { idi v_id = init_ids[v_i]; _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0); } uint64_t tmp_count_computation = 0; // Get the distances of all candidates, store in the set set_L. #pragma omp parallel for reduction(+ : tmp_count_computation) for (unsigned i = 0; i < L; i++) { unsigned v_id = init_ids[i]; auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_); dataf norm = *v_data++; // ++count_distance_computation; ++tmp_count_computation; distf dist = compute_distance_with_norm(v_data, query_data, norm); set_L[i] = Candidate(v_id, dist, false); // False means not checked. } count_distance_computation_ += tmp_count_computation; tmp_count_computation = 0; std::sort(set_L.begin(), set_L.begin() + L); // idi min_index = L - 1; // distf min_1st = set_L[min_index].distance_; std::vector<idi> top_m_candidates(M); idi top_m_candidates_end = 0; idi k = 0; // Index of first unchecked candidate. idi tmp_count = 0; // for debug while (k < L) { ++tmp_count; // {//test // printf("tmp_count: %d\n", tmp_count); // } // Select M candidates idi last_k = L; // Cannot use OpenMP here because this for-loop needs early break by the 2nd condition. for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) { if (set_L[c_i].is_checked_) { continue; } last_k = c_i; // Record the location of the last candidate selected. set_L[c_i].is_checked_ = true; top_m_candidates[top_m_candidates_end++] = set_L[c_i].id_; } // Push M candidates' neighbors into the queue. #pragma omp parallel for reduction(+ : tmp_count_computation) for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) { int tid = omp_get_thread_num(); const idi local_queue_start = tid * local_queue_length; idi cand_id = top_m_candidates[c_i]; _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_); idi out_degree = *out_edges++; for (idi n_i = 0; n_i < out_degree; ++n_i) { _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); } for (idi e_i = 0; e_i < out_degree; ++e_i) { idi nb_id = out_edges[e_i]; { // Sequential edition if (is_visited[nb_id]) { continue; } is_visited[nb_id] = 1; } // { // __ATOMIC_SEQ_CST edition // if (!AtomicOps::CAS(is_visited.data() + nb_id, // static_cast<uint8_t>(0), // static_cast<uint8_t>(1))) { // continue; // } // } // {// Acquire and Release edition // if (__atomic_load_n(is_visited.data() + nb_id, __ATOMIC_ACQUIRE)) { // continue; // } // __atomic_store_n(is_visited.data() + nb_id, 1, __ATOMIC_RELEASE); // } // {// Self-defined BitVector // if (is_visited.atomic_is_bit_set(nb_id)) { // continue; // } // is_visited.atomic_set_bit(nb_id); // } auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); dataf norm = *nb_data++; // ++count_distance_computation; ++tmp_count_computation; distf dist = compute_distance_with_norm(nb_data, query_data, norm); // if (dist > min_1st) { // continue; // } else if (min_index > 0) { // // Inserted, so min_1st needs update // if (dist > set_L[min_index - 1].distance_) { // min_1st = dist; // if (min_index < L - 1) { // ++min_index; // } // } else { // min_1st = set_L[--min_index].distance_; // } //// min_1st = set_L[--min_index].distance_; // } if (dist > set_L[L-1].distance_) { continue; } Candidate cand(nb_id, dist, false); // Add to the local queue. add_into_queue(local_queues_array, local_queue_start, local_queues_ends[tid], local_queue_length, cand); } } top_m_candidates_end = 0; // Clear top_m_candidates count_distance_computation_ += tmp_count_computation; tmp_count_computation = 0; idi nk = L; // // Merge. Parallel merging in every two queues. // { // for (int tid = 0; tid < num_threads_; ++tid) { // if (0 == local_queues_ends[tid]) continue; // idi r = merge_two_queues_into_1st_queue_para( // set_L, // 0, // L, // local_queues_list[tid], // 0, // local_queues_ends[tid]); //// idi r = merge_two_queues_into_1st_queue_seq( //// set_L, //// 0, //// L, //// local_queues_list[tid], //// 0, //// local_queues_ends[tid]); // local_queues_ends[tid] = 0; // Reset the local queue // if (r < nk) { // nk = r; // } // } // } // Merge. Merge all queues in parallel. { if (num_threads_ > 1) { idi r = merge_all_queues_para_array( // local_queues_list, local_queues_array, local_queues_ends, local_queue_length, set_L, L); if (r < nk) { nk = r; } } else { if (local_queues_ends[0]) { idi r = merge_two_queues_into_1st_queue_seq_fixed( set_L, 0, L, // local_queues_list[0], local_queues_array, 0, local_queues_ends[0]); local_queues_ends[0] = 0; if (r < nk) { nk = r; } } } } // // Merge Sequentially // { // for (int tid = 0; tid < num_threads_; ++tid) { // if (0 == local_queues_ends[tid]) continue; // idi r = merge_two_queues_into_1st_queue_seq_fixed( // set_L, // 0, // L, //// local_queues_list[tid], //// 0, // local_queues_array, // tid * local_queue_length, // local_queues_ends[tid]); //// L + 1); // local_queues_ends[tid] = 0; // Reset the local queue // if (r < nk) { // nk = r; // } // } // } if (nk <= last_k) { k = nk; } else { k = last_k + 1; } } #pragma omp parallel for for (idi k_i = 0; k_i < K; ++k_i) { set_K[k_i] = set_L[k_i].id_; } {// Reset is_visited.reset(); // std::fill(is_visited.begin(), is_visited.end(), 0); // is_visited.clear_all(); std::fill(local_queues_ends.begin(), local_queues_ends.end(), 0); } } inline void Searching::para_search_with_top_m_merge_queues_by_sort( const idi M, const idi query_id, const idi K, const idi L, std::vector<Candidate> &set_L, const std::vector<idi> &init_ids, std::vector<idi> &set_K, const idi local_queue_length, // Maximum size of local queue // std::vector<Candidate> &local_queues_array, std::vector<idi> &local_queues_ends, // Sizes of local queue std::vector<idi> &dest_offsets, const std::vector<idi> &offsets_load_set_L, // Offsets for reading from set_L. BitVector &is_visited) { { #pragma omp parallel for for (idi c_i = 0; c_i < L; ++c_i) { // is_visited[init_ids[c_i]] = 1; is_visited.atomic_set_bit(init_ids[c_i]); } } const dataf *query_data = queries_load_ + query_id * dimension_; #pragma omp parallel for for (idi v_i = 0; v_i < L; ++v_i) { idi v_id = init_ids[v_i]; _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0); } // Get the distances of all candidates, store in the set set_L. #pragma omp parallel for for (unsigned i = 0; i < L; i++) { unsigned v_id = init_ids[i]; auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_); dataf norm = *v_data++; // ++count_distance_computation; distf dist = compute_distance_with_norm(v_data, query_data, norm); set_L[i] = Candidate(v_id, dist, false); // False means not checked. } std::sort(set_L.begin(), set_L.begin() + L); // boost::sort::block_indirect_sort(set_L.begin(), set_L.begin() + L, num_threads_); local_queues_ends[0] = L; std::vector<idi> top_m_candidates(M); idi top_m_candidates_end = 0; idi k = 0; // Index of first unchecked candidate. idi tmp_count = 0; // for debug // while(true) { while (k < L) { ++tmp_count; // {//test // printf("tmp_count: %d\n", tmp_count); // } // Select M candidates idi last_k = L; // Cannot use OpenMP here because this for-loop needs early break by the 2nd condition. // for (idi c_i = 0; c_i < L && top_m_candidates_end < M; ++c_i) { for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) { if (set_L[c_i].is_checked_) { continue; } last_k = c_i; // Record the location of the last candidate selected. set_L[c_i].is_checked_ = true; top_m_candidates[top_m_candidates_end++] = set_L[c_i].id_; } if (!top_m_candidates_end) { break; } // Push M candidates' neighbors into the queue. #pragma omp parallel for for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) { int tid = omp_get_thread_num(); // const idi local_queue_start = tid * local_queue_length; idi cand_id = top_m_candidates[c_i]; _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_); idi out_degree = *out_edges++; for (idi n_i = 0; n_i < out_degree; ++n_i) { _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); } for (idi e_i = 0; e_i < out_degree; ++e_i) { idi nb_id = out_edges[e_i]; // { // Sequential edition // if (is_visited[nb_id]) { // continue; // } // is_visited[nb_id] = 1; // } {// Self-defined BitVector if (is_visited.atomic_is_bit_set(nb_id)) { continue; } is_visited.atomic_set_bit(nb_id); } auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); dataf norm = *nb_data++; // ++count_distance_computation; distf dist = compute_distance_with_norm(nb_data, query_data, norm); if (dist > set_L[L-1].distance_) { continue; } Candidate cand(nb_id, dist, false); // Add to the local queue. if (0 == tid) { add_into_queue(set_L, 0, local_queues_ends[0], L, cand); } else { add_into_queue(set_L, offsets_load_set_L[tid], local_queues_ends[tid], local_queue_length, cand); } // add_into_queue(local_queues_array, local_queue_start, local_queues_ends[tid], local_queue_length, cand); } } top_m_candidates_end = 0; // Clear top_m_candidates // Sort { if (num_threads_ == 1) { continue; } std::copy(local_queues_ends.begin(), local_queues_ends.end(), dest_offsets.begin()); idi total_cands = PANNS::ParallelOps::prefix_sum_for_offsets(dest_offsets); // Shrink sparse array into a dense array. for (int i_t = 2; i_t < num_threads_; ++i_t) { memmove( set_L.data() + dest_offsets[i_t], set_L.data() + offsets_load_set_L[i_t], local_queues_ends[i_t] * sizeof(Candidate)); } // Sort the array. std::sort(set_L.begin(), set_L.begin() + total_cands); // boost::sort::block_indirect_sort(set_L.begin(), set_L.begin() + total_cands, num_threads_); // Reset std::fill(local_queues_ends.begin() + 1, local_queues_ends.end(), 0); } // idi nk = L; // // Merge. Merge all queues in parallel. // { // if (num_threads_ > 1) { // idi r = merge_all_queues_para_array( //// local_queues_list, // local_queues_array, // local_queues_ends, // local_queue_length, // set_L, // L); // if (r < nk) { // nk = r; // } // } else { // if (local_queues_ends[0]) { // idi r = merge_two_queues_into_1st_queue_seq_fixed( // set_L, // 0, // L, //// local_queues_list[0], // local_queues_array, // 0, // local_queues_ends[0]); // local_queues_ends[0] = 0; // if (r < nk) { // nk = r; // } // } // } // } // if (nk <= last_k) { // k = nk; // } else { // k = last_k + 1; // } } #pragma omp parallel for for (idi k_i = 0; k_i < K; ++k_i) { set_K[k_i] = set_L[k_i].id_; } {// Reset is_visited.clear_all(); std::fill(local_queues_ends.begin(), local_queues_ends.end(), 0); } } inline void Searching::para_search_with_top_m_merge_queues_myths( const idi M, const idi query_id, const idi K, const idi L, std::vector<Candidate> &set_L, const std::vector<idi> &init_ids, std::vector<idi> &set_K, const idi local_queue_length, // Maximum size of local queue // std::vector< std::vector<Candidate> > &local_queues_list, std::vector<Candidate> &local_queues_array, std::vector<idi> &local_queues_ends, // Sizes of local queue BitVector &is_visited) // std::vector<uint8_t> &is_visited) // boost::dynamic_bitset<> &is_visited) { { #pragma omp parallel for for (idi c_i = 0; c_i < L; ++c_i) { is_visited.atomic_set_bit(init_ids[c_i]); } } const dataf *query_data = queries_load_ + query_id * dimension_; #pragma omp parallel for for (idi v_i = 0; v_i < L; ++v_i) { idi v_id = init_ids[v_i]; _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0); } // Get the distances of all candidates, store in the set set_L. #pragma omp parallel for for (unsigned i = 0; i < L; i++) { unsigned v_id = init_ids[i]; auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_); dataf norm = *v_data++; // ++count_distance_computation; distf dist = compute_distance_with_norm(v_data, query_data, norm); set_L[i] = Candidate(v_id, dist, false); // False means not checked. } std::sort(set_L.begin(), set_L.begin() + L); std::vector<idi> top_m_candidates(M); idi top_m_candidates_end = 0; idi k = 0; // Index of first unchecked candidate. idi tmp_count = 0; // for debug while (k < L) { ++tmp_count; // {//test // printf("tmp_count: %d\n", tmp_count); // } // Select M candidates idi last_k = L; // Cannot use OpenMP here because this for-loop needs early break by the 2nd condition. for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) { if (set_L[c_i].is_checked_) { continue; } last_k = c_i; // Record the location of the last candidate selected. set_L[c_i].is_checked_ = true; top_m_candidates[top_m_candidates_end++] = set_L[c_i].id_; } // Push M candidates' neighbors into the queue. #pragma omp parallel for for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) { int tid = omp_get_thread_num(); const idi local_queue_start = tid * local_queue_length; idi cand_id = top_m_candidates[c_i]; _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_); idi out_degree = *out_edges++; for (idi n_i = 0; n_i < out_degree; ++n_i) { _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); } for (idi e_i = 0; e_i < out_degree; ++e_i) { idi nb_id = out_edges[e_i]; {// Self-defined BitVector if (is_visited.atomic_is_bit_set(nb_id)) { continue; } is_visited.atomic_set_bit(nb_id); } auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); dataf norm = *nb_data++; // ++count_distance_computation; distf dist = compute_distance_with_norm(nb_data, query_data, norm); if (dist > set_L[L-1].distance_) { continue; } // if (dist >= set_L[L-1].distance_) { // continue; // } Candidate cand(nb_id, dist, false); // Add to the local queue. add_into_queue(local_queues_array, local_queue_start, local_queues_ends[tid], local_queue_length, cand); // add_into_queue(local_queues_list[tid], local_queues_ends[tid], local_queue_length, cand); } } top_m_candidates_end = 0; // Clear top_m_candidates // {// Print all sizes of local queues // printf("query%u:iter: %u", query_id, tmp_count); // for (int i_t = 0; i_t < num_threads_; ++i_t) { // printf(" [%u]: %u", i_t, local_queues_ends[i_t]); // } // printf("\n"); // } idi nk = L; // // Merge. Parallel merging in every two queues. // { // for (int tid = 0; tid < num_threads_; ++tid) { // if (0 == local_queues_ends[tid]) continue; // idi r = merge_two_queues_into_1st_queue_para( // set_L, // 0, // L, // local_queues_list[tid], // 0, // local_queues_ends[tid]); //// idi r = merge_two_queues_into_1st_queue_seq( //// set_L, //// 0, //// L, //// local_queues_list[tid], //// 0, //// local_queues_ends[tid]); // local_queues_ends[tid] = 0; // Reset the local queue // if (r < nk) { // nk = r; // } // } // } // {// text // if (query_id == 4 && // tmp_count == 5) { // // Print local queues // for (int t_i = 0; t_i < num_threads_; ++t_i) { // idi start_i = t_i * local_queue_length; // for (idi q_i = 0; q_i < local_queues_ends[t_i]; ++q_i) { // printf("t[%u][%u]: " // "id: %u " // "dist: %f\n", // t_i, q_i, // local_queues_array[q_i + start_i].id_, // local_queues_array[q_i + start_i].distance_); // } // } // printf("----------\n"); // for (idi i = 0; i < L; ++i) { // printf("set_L[%u]: " // "id: %u " // "dist: %f\n", // i, // set_L[i].id_, // set_L[i].distance_); // } // printf("----------\n"); // } // } // Merge. Merge all queues in parallel. { if (num_threads_ > 1) { idi r = merge_all_queues_para_array( // local_queues_list, local_queues_array, local_queues_ends, local_queue_length, set_L, L); if (r < nk) { nk = r; } } else { if (local_queues_ends[0]) { idi r = merge_two_queues_into_1st_queue_seq_fixed( set_L, 0, L, // local_queues_list[0], local_queues_array, 0, local_queues_ends[0]); local_queues_ends[0] = 0; if (r < nk) { nk = r; } } } } // // Merge Sequentially // { // for (int tid = 0; tid < num_threads_; ++tid) { // if (0 == local_queues_ends[tid]) continue; // idi r = merge_two_queues_into_1st_queue_seq_fixed( // set_L, // 0, // L, //// local_queues_list[tid], //// 0, // local_queues_array, // tid * local_queue_length, // local_queues_ends[tid]); //// L + 1); // local_queues_ends[tid] = 0; // Reset the local queue // if (r < nk) { // nk = r; // } // } // } if (nk <= last_k) { k = nk; } else { k = last_k + 1; } } #pragma omp parallel for for (idi k_i = 0; k_i < K; ++k_i) { set_K[k_i] = set_L[k_i].id_; } {// Reset // is_visited.reset(); // std::fill(is_visited.begin(), is_visited.end(), 0); is_visited.clear_all(); std::fill(local_queues_ends.begin(), local_queues_ends.end(), 0); } // { // exit(1); // } // { // if (query_id == 3) { // exit(1); // } // } } inline void Searching::para_search_with_top_m_in_batch_embarassing_para( const PANNS::idi M, const PANNS::idi batch_start, const PANNS::idi batch_size, const PANNS::idi K, const PANNS::idi L, std::vector< std::vector<Candidate> > &set_L_list, const std::vector<idi> &init_ids, std::vector< std::vector<idi> > &set_K_list, std::vector< boost::dynamic_bitset<> > &is_visited_list) { // std::vector< boost::dynamic_bitset<> > is_visited_list(batch_size, boost::dynamic_bitset<> (num_v_)); // std::vector< std::vector<bool> > is_visited_list(batch_size, std::vector<bool>(num_v_)); // Prepare the init_ids { //#pragma omp parallel for for (idi q_i = 0; q_i < batch_size; ++q_i) { auto &is_visited = is_visited_list[q_i]; for (idi c_i = 0; c_i < L; ++c_i) { is_visited[init_ids[c_i]] = true; } } } // Initialize set_L_list { //#pragma omp parallel for for (idi q_i = 0; q_i < batch_size; ++q_i) { const dataf *query_data = queries_load_ + (q_i + batch_start) * dimension_; for (idi i = 0; i < L; i++) { idi v_id = init_ids[i]; auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_); dataf norm = *v_data++; // ++count_distance_computation_; distf dist = compute_distance_with_norm(v_data, query_data, norm); set_L_list[q_i][i] = Candidate(v_id, dist, false); // False means not checked. } std::sort(set_L_list[q_i].begin(), set_L_list[q_i].begin() + L); } } { std::vector<idi> joint_queue(M * batch_size); // Joint queue for all shared top-M candidates idi joint_queue_end = 0; boost::dynamic_bitset<> is_in_joint_queue(num_v_); // std::vector< std::vector<idi> > cands_query_ids(num_v_, std::vector<idi>(batch_size)); // If candidate cand_id is selected by query q_i, q_i should be in cands_query_ids[cand_id]. // std::vector<idi> cands_query_ids_ends(num_v_, 0); std::unordered_map< idi, std::vector<idi> > cands_query_ids(batch_size * M); std::vector<idi> ks(batch_size, 0); // Indices of every queue's first unchecked candidate. std::vector<idi> nks(batch_size, L); // Indices of highest candidate inserted std::vector<idi> last_ks(batch_size, L); // Indices of lowest candidate unchecked std::vector<idi> queries_not_finished(batch_size); idi queries_not_finished_end = batch_size; for (idi q_i = 0; q_i < batch_size; ++q_i) { queries_not_finished[q_i] = q_i; } bool is_finished = false; idi counter_for_debug = 0; while (!is_finished) { ++counter_for_debug; // Build the new joint queue // Traverse every query's queue for(idi q_i = 0; q_i < queries_not_finished_end; ++q_i) { idi q_local_id = queries_not_finished[q_i]; // last_ks[q_local_id] = L; auto &set_L = set_L_list[q_local_id]; idi top_m_count = 0; for (idi c_i = ks[q_local_id]; c_i < L && top_m_count < M; ++c_i) { if (set_L[c_i].is_checked_) { continue; } set_L[c_i].is_checked_ = true; last_ks[q_local_id] = c_i; ++top_m_count; idi cand_id = set_L[c_i].id_; // Record which query selected cand_id auto tmp_c = cands_query_ids.find(cand_id); if (tmp_c != cands_query_ids.end()) { tmp_c->second.push_back(q_local_id); } else { cands_query_ids.emplace(cand_id, std::vector<idi>()); cands_query_ids[cand_id].reserve(batch_size); cands_query_ids[cand_id].push_back(q_local_id); } // cands_query_ids[cand_id][cands_query_ids_ends[cand_id]++] = q_local_id; // Add candidate cand_id into the joint queue if (is_in_joint_queue[cand_id]) { continue; } is_in_joint_queue[cand_id] = true; joint_queue[joint_queue_end++] = cand_id; } } queries_not_finished_end = 0; // Clear queries_not_finished // Traverse every shared candidate for (idi c_i = 0; c_i < joint_queue_end; ++c_i) { idi cand_id = joint_queue[c_i]; is_in_joint_queue[cand_id] = false; // Reset is_in_joint_queue idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_); idi out_degree = *out_edges++; const auto &query_local_ids = cands_query_ids[cand_id]; // Push neighbors to every queue of the queries that selected cand_id. // Traverse cand_id's neighbors // idi &q_i_bound = cands_query_ids_ends[cand_id]; // for (idi q_i = 0; q_i < q_i_bound; ++q_i) { // idi q_local_id = query_local_ids[q_i]; for (idi q_local_id : query_local_ids) { dataf *query_data = queries_load_ + (q_local_id + batch_start) * dimension_; auto &is_visited = is_visited_list[q_local_id]; auto &set_L = set_L_list[q_local_id]; // // Traverse cand_id's neighbors for (idi e_i = 0; e_i < out_degree; ++e_i) { idi nb_id = out_edges[e_i]; if (is_visited[nb_id]) { continue; } is_visited[nb_id] = true; auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); dataf norm = *nb_data++; // ++count_distance_computation_; distf dist = compute_distance_with_norm(nb_data, query_data, norm); if (dist > set_L[L-1].distance_) { continue; } // if (dist >= set_L[L-1].distance_) { // continue; // } Candidate new_cand(nb_id, dist, false); idi insert_loc = insert_into_queue(set_L, L, new_cand); if (insert_loc < nks[q_local_id]) { nks[q_local_id] = insert_loc; } } } cands_query_ids.erase(cand_id); // q_i_bound = 0; // Clear cands_query_ids[cand_id] } joint_queue_end = 0; // Clear joint_queue for (idi q_local_id = 0; q_local_id < batch_size; ++q_local_id) { if (nks[q_local_id] <= last_ks[q_local_id]) { ks[q_local_id] = nks[q_local_id]; } else { ks[q_local_id] = last_ks[q_local_id] + 1; } nks[q_local_id] = L; last_ks[q_local_id] = L; if (ks[q_local_id] < L) { queries_not_finished[queries_not_finished_end++] = q_local_id; } } if (!queries_not_finished_end) { is_finished = true; } } } { for (idi q_i = 0; q_i < batch_size; ++q_i) { for (idi c_i = 0; c_i < K && c_i < L; ++c_i) { set_K_list[q_i + batch_start][c_i] = set_L_list[q_i][c_i].id_; } } } //// // {//test // for (idi q_i = 0; q_i < batch_size; ++q_i) { // printf("query: %u\n", q_i + batch_start); // for (idi c_i = 0; c_i < K; ++c_i) { // printf("%u: %u %f\n", c_i, set_L_list[q_i][c_i].id_, set_L_list[q_i][c_i].distance_); // } // } // } {// Reset is_visited_list for (idi q_i = 0; q_i < batch_size; ++q_i) { is_visited_list[q_i].reset(); } } } // DEPRECATED. No enough workload for OpenMP, and hard to implement efficiently. ///** // * Prepare init_ids and flags, as they are constant for all queries. // * @param[out] init_ids // * @param L // */ //inline void Searching::para_prepare_init_ids( // std::vector<unsigned int> &init_ids, // unsigned L) const //{ //// idi num_ngbrs = get_out_degree(ep_); //// edgei edge_start = nsg_graph_indices_[ep_]; //// // Store ep_'s neighbors as candidates //// idi tmp_l = 0; //// for (; tmp_l < L && tmp_l < num_ngbrs; tmp_l++) { //// init_ids[tmp_l] = nsg_graph_out_edges_[edge_start + tmp_l]; //// } //// std::unordered_set<idi> visited_ids; // std::vector<uint8_t> is_selected(num_v_, 0); //// boost::dynamic_bitset<> is_selected(num_v_); // idi *out_edges = (idi *) (opt_nsg_graph_ + ep_ * vertex_bytes_ + data_bytes_); // idi out_degree = *out_edges++; // idi init_ids_end = 0; //// idi e_i_bound = out_degree <= L ? out_degree : L; //#pragma omp parallel for // for (idi e_i = 0; e_i < out_degree && init_ids_end < L; ++e_i) { //// for (idi e_i = 0; e_i < e_i_bound; ++e_i) { // idi v_id = out_edges[e_i]; //// if(is_selected[v_id]) { //// continue; //// } //// is_selected[v_id] = 1; // // if (!AtomicOps::CAS(is_selected.data() + v_id, // static_cast<uint8_t>(0), // static_cast<uint8_t>(1))) { // continue; // } // //// init_ids[init_ids_end++] = v_id; // volatile idi old_v = init_ids_end; // volatile idi new_v = old_v + 1; // while (!AtomicOps::CAS(&init_ids_end, old_v, new_v)) { // old_v = init_ids_end; // new_v = old_v + 1; // } // init_ids[old_v] = v_id; // } // //// for (idi i = 0; i < tmp_l; ++i) { //// is_visited[init_ids[i]] = true; //// } // // // If ep_'s neighbors are not enough, add other random vertices // idi tmp_id = ep_ + 1; // use tmp_id to replace rand(). // while (init_ids_end < L) { // tmp_id %= num_v_; // idi v_id = tmp_id++; // if (is_selected[v_id]) { // continue; // } //// if (visited_ids.find(id) != visited_ids.end()) { //// continue; //// } // is_selected[v_id] = 1; //// visited_ids.insert(id); // init_ids[init_ids_end++] = v_id; //// tmp_l++; // } //} } // namespace PANNS #endif //BATCH_SEARCHING_SEARCHING_H
androidfde_fmt_plug.c
/* androidfde.c * * hashkill - a hash cracking tool * Copyright (C) 2010 Milen Rangelov <gat3way@gat3way.eu> * * Modified for JtR and made stuff more generic * This software is Copyright (c) 2013 Dhiru Kholia <dhiru at openwall.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #if FMT_EXTERNS_H extern struct fmt_main fmt_fde; #elif FMT_REGISTERS_H john_register_one(&fmt_fde); #else #include <stdio.h> #include <string.h> #include <assert.h> #include <errno.h> #include "os.h" #include <stdint.h> #include <stdlib.h> #include <sys/types.h> #include <string.h> #ifdef _OPENMP static int omp_t = 1; #include <omp.h> #ifndef OMP_SCALE #define OMP_SCALE 1 #endif #endif #include "arch.h" #include "johnswap.h" #include "misc.h" #include "common.h" #include "formats.h" #include "params.h" #include "options.h" #include "memory.h" #include "pbkdf2_hmac_sha1.h" #include "aes.h" #include "sha2.h" #include "memdbg.h" #define FORMAT_TAG "$fde$" #define TAG_LENGTH (sizeof(FORMAT_TAG)-1) #define FORMAT_LABEL "fde" #define FORMAT_NAME "Android FDE" #ifdef SIMD_COEF_32 #define ALGORITHM_NAME "PBKDF2-SHA1 " SHA1_ALGORITHM_NAME " SHA256/AES" #else #define ALGORITHM_NAME "PBKDF2-SHA1 SHA256/AES 32/" ARCH_BITS_STR #endif #define BENCHMARK_COMMENT "" #define PLAINTEXT_LENGTH 64 #define BENCHMARK_LENGTH -1 #define BINARY_SIZE 0 #define BINARY_ALIGN 1 #define SALT_ALIGN sizeof(int) #define SALT_SIZE sizeof(struct custom_salt) #ifdef SIMD_COEF_32 #define MIN_KEYS_PER_CRYPT SSE_GROUP_SZ_SHA1 #define MAX_KEYS_PER_CRYPT SSE_GROUP_SZ_SHA1 #else #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 #endif static struct fmt_tests fde_tests[] = { {"$fde$16$04b36d4290b56e0fcca9778b74719ab8$16$b45f0f051f13f84872d1ef1abe0ada59$0f61d28f7466c0435040cc845a67e6734500de15df3ba6f48d2534ca2a7b8f910d7547357e8f1ec7364bab41383f5df9b5fb43fcd4a1e06189ce3c6ba77ec908b066e73a508e201c941fb409e9abdc051c3c052a735b01e56be61efa635e82cbceab18db1ba645b93f7befb83155852f0004a7c7d6800e9fa5f0d3c133dd2496f92110c3cdcfb16dcf57df8de830969e18514a34d4917de14597da19f9f7dc81eca2d7d461c91e0a8aeac06bafe89866d24f2b4991b4295b6277d0ff4ad97f1fa58e20f8a24e2062f84c318eb36cfbb4671117bc3522afcf7737353589cae0dce0d7c3341f457af654543758f3f005bd4d68fa2b35777cb2ea5f8f69c4debcfb1d8b2a601320e4f8621dc6e99434007388bdc0ceebc722f9ed44cbce3914bf144db332276e719f6b48108cde55916d861d19dc8c03ac76a2dad322457073111e441488228f13649073aa3aadfab51dadf89a0827acba284154a9e18d926facef43852a0733660a1fbcca8e81d2f41efd9f645a61f9395b75fc7ad446885d304808d511f2ba2e7c6138588c4292aee4ef6f2537bb00c7b015cee4a91d2defa87b67abc1315e71f0489e271673b36412377219e93aba6af3cfd504bf3f6bc24f2b6148536339d91ddd2f013314544650c1c11e7317028a7014909d0c850f78692e476c4f57da586fe26786504130aba22ba5261b989aeb47483d8cb9d5052120a4e5690b5b0cd009aadaadc351db7b6a230ebc1fa771651cb64d78daa56b7a6c6808db3b688afee9b7edaa617d8cb16ac7290465987bd443ea41ce38aa14e0c88874fb2707394b83679de82134efe351b4d021c63b2992a8314b2e93908906400628a7f753c9a4d85e917a207561b7840ce121800fab4026508d1b00fe8e7e756573743e11380f76f6bb7c0e528cb98875e6ad88bff51236601e6942964e37ffe0316b1a1f7bc0d84334fa024bf03c261bd06a07c01f099ad23fb9a1d8c98447463b8988cb33f3e1fb7d7a7c547f9a6d51cf7b75649d3c8cb5bf93be79eba1a961659b5fe928a1c7e80aca857825c6bc11493cb230e66126ef7b7284abe0823b5735bb1dfe844029f175c63442ca774784b775ecf02e48d029ac0f236813be91aca66905640666b89bd08118e3c18c75764bc49d00d1fe53ee92ccaa487852c613cba91f637b6de06dcaa1953a7cfb5333df573273a67f0157b63fbbf48c48f16c423caefaf29cdb5d34b19ac0f57b972b9e5ff1bc5cf25bdcdf8d29fb75865c4501458f19bfd64c844fd52a27feec97dc31ba922aea75706404d853071707d0c6001c59664676be6426ca5c7efbfc09ffa9acac91441f9175fd3148fb046c31a49d7c7ad10bf3c4b413dd148666b72b5a533f600cb02d7623270e5d1ad33355dd318d06aa8b3d7517cb7d5be40d222a026380cfbf5b79014e7631d677b07bcd805d9ea7103cf1d057bf883b29fb99b064c4e3cb4271596a74895c1c3f7c7c49d2be54b1435af4440ecd019dde11cee14a320712c9275bef339a15d3a18d9f38918d7af0a50a35199980429d74d4cc2a16dea619619a7c19827f4f78d3ebaf13340abf6717cec6bff8399b067fb17f11cdb1f9909c51253f7466ee769546d1d96319bcc1b04a6b1f8d8068f96b959d507c9004d75717792733fadb7a94a2d5db514a61cbd90eef89d1ace5a3138120168d62f1ebef5efbbd4e7f7e987834db81fe8c4877f3edcc71c61e96b20ca26c5a91e28fa11e484c1dcbfd5a0461065fe52f042ee9a09687d800c90a0a792f3dbe257965247f8eecd122b9b234b734454fa1477212a0295a347ae44463de4de405bf4fd91cde400b63d7fced6d7ccd20d79a4899139a79085f8742c3dfe7fbadca56c4e8aa95ce7841ad9675659349f6671d047efa0951feb9c61381f5f9e39182c1ec0a3ebd2ef5e036312c6ed6a0e59777813229ffdac771788e609c7d9f96848f63b428789c55e85c509068df8d5a0a7fc066be8c76205860d86d6c5bb7c2bc85a922a2ad86e6a791fe238420eedd1cf7ac770dd8316ca30c9577441a34873cdf0c5dc2103457a93fa0dd42da5eb2d6f82e9ff47b4bb6cd1d3fcba5645caace577a89c7bd70ff432f8dae113a7877a41a41043dac4c0d21860ad8198a1b9640d979322a20d4b90caa77a5d2b31c5bd06e", "strongpassword"}, {NULL} }; static char (*saved_key)[PLAINTEXT_LENGTH + 1]; static int *cracked; static int max_cracked; static struct custom_salt { int loaded; unsigned char *cipherbuf; int keysize; int iterations; // NOTE, not used. Hard coded to 2000 for FDE from droid <= 4.3 (PBKDF2-sha1) int saltlen; unsigned char data[512 * 3]; unsigned char salt[16]; unsigned char mkey[64]; unsigned char iv[16]; } *cur_salt; static void init(struct fmt_main *self) { #ifdef _OPENMP omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif max_cracked = self->params.max_keys_per_crypt; saved_key = mem_calloc(self->params.max_keys_per_crypt, sizeof(*saved_key)); cracked = mem_calloc(self->params.max_keys_per_crypt, sizeof(*cracked)); } static void done(void) { MEM_FREE(cracked); MEM_FREE(saved_key); } static int valid(char *ciphertext, struct fmt_main *self) { char *ctcopy, *keeptr; int saltlen, keysize, extra; char *p; if (strncmp(ciphertext, FORMAT_TAG, TAG_LENGTH) != 0) return 0; ctcopy = strdup(ciphertext); keeptr = ctcopy; ctcopy += TAG_LENGTH; if ((p = strtokm(ctcopy, "$")) == NULL) goto err; if (!isdec(p)) goto err; saltlen = atoi(p); if (saltlen > 16) /* saltlen */ goto err; if ((p = strtokm(NULL, "$")) == NULL) /* salt */ goto err; if (hexlenl(p, &extra) != saltlen * 2 || extra) goto err; if ((p = strtokm(NULL, "$")) == NULL) /* keysize */ goto err; if (!isdec(p)) goto err; keysize = atoi(p); if (keysize > 64) goto err; if ((p = strtokm(NULL, "$")) == NULL) /* key */ goto err; if (hexlenl(p, &extra) != keysize * 2 || extra) goto err; if ((p = strtokm(NULL, "$")) == NULL) /* data */ goto err; if (hexlenl(p, &extra) != 512 * 3 * 2 || extra) goto err; MEM_FREE(keeptr); return 1; err: MEM_FREE(keeptr); return 0; } static void *get_salt(char *ciphertext) { char *ctcopy = strdup(ciphertext); char *keeptr = ctcopy; char *p; // int res; int i; static struct custom_salt cs; memset(&cs, 0, sizeof(cs)); ctcopy += TAG_LENGTH; p = strtokm(ctcopy, "$"); cs.saltlen = atoi(p); p = strtokm(NULL, "$"); for (i = 0; i < cs.saltlen; i++) { cs.salt[i] = (atoi16[ARCH_INDEX(*p)] << 4) | atoi16[ARCH_INDEX(p[1])]; p += 2; } p = strtokm(NULL, "$"); cs.keysize = atoi(p); p = strtokm(NULL, "$"); for (i = 0; i < cs.keysize; i++) { cs.mkey[i] = (atoi16[ARCH_INDEX(*p)] << 4) | atoi16[ARCH_INDEX(p[1])]; p += 2; } p = strtokm(NULL, "$"); for (i = 0; i < 512 * 3; i++) { cs.data[i] = (atoi16[ARCH_INDEX(*p)] << 4) | atoi16[ARCH_INDEX(p[1])]; p += 2; } MEM_FREE(keeptr); return (void *)&cs; } static void set_salt(void *salt) { cur_salt = (struct custom_salt *)salt; } // Not reference implementation - this is modified for use by androidfde! static void AES_cbc_essiv(unsigned char *src, unsigned char *dst, unsigned char *key, int startsector,int size) { AES_KEY aeskey; unsigned char essiv[16]; unsigned char essivhash[32]; SHA256_CTX ctx; unsigned char sectorbuf[16]; unsigned char zeroiv[16]; SHA256_Init(&ctx); SHA256_Update(&ctx, key, cur_salt->keysize); SHA256_Final(essivhash, &ctx); memset(sectorbuf,0,16); memset(zeroiv,0,16); memset(essiv,0,16); memcpy(sectorbuf,&startsector,4); AES_set_encrypt_key(essivhash, 256, &aeskey); AES_cbc_encrypt(sectorbuf, essiv, 16, &aeskey, zeroiv, AES_ENCRYPT); AES_set_decrypt_key(key, cur_salt->keysize*8, &aeskey); AES_cbc_encrypt(src, dst, size, &aeskey, essiv, AES_DECRYPT); } // cracked[index] = hash_plugin_check_hash(saved_key[index]); void hash_plugin_check_hash(int index) { unsigned char keycandidate2[255]; unsigned char decrypted1[512]; // FAT unsigned char decrypted2[512]; // ext3/4 AES_KEY aeskey; uint16_t v2,v3,v4; uint32_t v1,v5; int j = 0; #ifdef SIMD_COEF_32 unsigned char *keycandidate, Keycandidate[SSE_GROUP_SZ_SHA1][255]; int lens[SSE_GROUP_SZ_SHA1], i; unsigned char *pin[SSE_GROUP_SZ_SHA1]; union { uint32_t *pout[SSE_GROUP_SZ_SHA1]; unsigned char *poutc; } x; for (i = 0; i < SSE_GROUP_SZ_SHA1; ++i) { lens[i] = strlen(saved_key[index+i]); pin[i] = (unsigned char*)saved_key[index+i]; x.pout[i] = (uint32_t*)(Keycandidate[i]); } pbkdf2_sha1_sse((const unsigned char **)pin, lens, cur_salt->salt, 16, 2000, &(x.poutc), cur_salt->keysize + 16, 0); #else unsigned char keycandidate[255]; char *password = saved_key[index]; pbkdf2_sha1((const uint8_t*)password, strlen(password), (const uint8_t*)(cur_salt->salt), 16, 2000, keycandidate, cur_salt->keysize + 16, 0); #endif j = 0; #ifdef SIMD_COEF_32 for (; j < SSE_GROUP_SZ_SHA1; ++j) { keycandidate = Keycandidate[j]; #endif AES_set_decrypt_key(keycandidate, cur_salt->keysize*8, &aeskey); AES_cbc_encrypt(cur_salt->mkey, keycandidate2, 16, &aeskey, keycandidate+16, AES_DECRYPT); AES_cbc_essiv(cur_salt->data, decrypted1, keycandidate2,0,32); AES_cbc_essiv(cur_salt->data + 1024, decrypted2, keycandidate2,2,128); // Check for FAT if ((memcmp(decrypted1+3,"MSDOS5.0",8)==0)) cracked[index+j] = 1; else { // Check for extfs memcpy(&v1,decrypted2+72,4); memcpy(&v2,decrypted2+0x3a,2); memcpy(&v3,decrypted2+0x3c,2); memcpy(&v4,decrypted2+0x4c,2); memcpy(&v5,decrypted2+0x48,4); #if !ARCH_LITTLE_ENDIAN v1 = JOHNSWAP(v1); v2 = JOHNSWAP(v2); v3 = JOHNSWAP(v3); v4 = JOHNSWAP(v4); v5 = JOHNSWAP(v5); #endif if ((v1<5)&&(v2<4)&&(v3<5)&&(v4<2)&&(v5<5)) cracked[index+j] = 1; } #ifdef SIMD_COEF_32 } #endif } static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; int index = 0; memset(cracked, 0, sizeof(cracked[0])*max_cracked); #ifdef _OPENMP #pragma omp parallel for #endif for (index = 0; index < count; index += MAX_KEYS_PER_CRYPT) { hash_plugin_check_hash(index); } return count; } static int cmp_all(void *binary, int count) { int index; for (index = 0; index < count; index++) if (cracked[index]) return 1; return 0; } static int cmp_one(void *binary, int index) { return cracked[index]; } static int cmp_exact(char *source, int index) { return 1; } static void fde_set_key(char *key, int index) { int saved_len = strlen(key); if (saved_len > PLAINTEXT_LENGTH) saved_len = PLAINTEXT_LENGTH; memcpy(saved_key[index], key, saved_len); saved_key[index][saved_len] = 0; } static char *get_key(int index) { return saved_key[index]; } struct fmt_main fmt_fde = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP | FMT_HUGE_INPUT, { NULL }, { FORMAT_TAG }, fde_tests }, { init, done, fmt_default_reset, fmt_default_prepare, valid, fmt_default_split, fmt_default_binary, get_salt, { NULL }, fmt_default_source, { fmt_default_binary_hash }, fmt_default_salt_hash, NULL, set_salt, fde_set_key, get_key, fmt_default_clear_keys, crypt_all, { fmt_default_get_hash }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
GB_unaryop__abs_uint8_uint64.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__abs_uint8_uint64 // op(A') function: GB_tran__abs_uint8_uint64 // C type: uint8_t // A type: uint64_t // cast: uint8_t cij = (uint8_t) aij // unaryop: cij = aij #define GB_ATYPE \ uint64_t #define GB_CTYPE \ uint8_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CASTING(z, x) \ uint8_t z = (uint8_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ABS || GxB_NO_UINT8 || GxB_NO_UINT64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__abs_uint8_uint64 ( uint8_t *restrict Cx, const uint64_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__abs_uint8_uint64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
matrix.c
#include <math.h> #include <stdlib.h> #include <float.h> #include "matrix.h" void MatrixAllocate(Matrix *mat, int w, int h) { mat->width = w; mat->height = h; if (w > 0 && h > 0) mat->data = (float*)malloc(w * h * sizeof(float)); else mat->data = 0; } float get(Matrix *mat, int x, int y) { return mat->data[y * mat->width + x]; } inline void set(Matrix *mat, int x, int y, float v) { if (x < 0 || y < 0 || x >= mat->width || y >= mat->height) return; else mat->data[y * mat->width + x] = v; } void LoadToMatrix(Matrix *mat, int width, int height, unsigned char *to) { MatrixAllocate(mat, width, height); for (int i = 0; i < mat->width * mat->height; i++) mat->data[i] = (float)to[i] / 255.0f; } float** channel_split(unsigned char* buffer, int *width, int *height, int num_channels) { int i, num_frames = *width * *height; float **chan_buffers = (float**)malloc(num_channels * sizeof(float*)); for (i = 0; i < num_channels; i++) chan_buffers[i] = (float*)malloc(*width * *height * sizeof(float)); int samples = num_frames * num_channels; for (i = 0; i < samples; i++) chan_buffers[(i % num_channels)][i / num_channels] = buffer[i] / 255.0f; float **chan_buffersFinal = (float**)malloc(num_channels * sizeof(float*)); int inc1 = 0, inc2 = 0; if (*width % 2) inc1 = 1; if (*height % 2) inc2 = 1; for (i = 0; i < num_channels; i++) chan_buffersFinal[i] = (float*)calloc((*width + inc1) * (*height + inc2), sizeof(float)); for (int c = 0; c < num_channels; c++) for (int j = 0; j < *width + inc1; j++) for (i = 0; i < *height + inc2; i++) { if (j == *width && i == *height) chan_buffersFinal[c][i * (*width + inc1) + j] = chan_buffers[c][(i - 1) * *width + *width - 1]; else if (j == *width && i != *height) chan_buffersFinal[c][i * (*width + inc1) + j] = chan_buffers[c][i * *width + *width - 1]; else if (j != *width && i == *height) chan_buffersFinal[c][i * (*width + inc1) + j] = chan_buffers[c][(*height - 1) * *width + j]; else chan_buffersFinal[c][i * (*width + inc1) + j] = chan_buffers[c][i * *width + j]; } for (int c = 0; c < num_channels; c++) free(chan_buffers[c]); free(chan_buffers); if (*width % 2) *width = *width + 1; if (*height % 2) *height = *height + 1; return chan_buffersFinal; } unsigned char* channel_join(float** chan_buffers, int num_frames, int num_channels) { unsigned char *buffer = (unsigned char*)malloc(num_frames * num_channels * sizeof(unsigned char)); for (int i = 0; i < num_frames * num_channels; i++) { float temp = chan_buffers[i % num_channels][i / num_channels] * 255.0f; if (temp > 255.0f) buffer[i] = 255; else if (temp < 0.0f) buffer[i] = 0; else buffer[i] = (unsigned char)temp; } return buffer; } #ifndef M_PI #define M_PI 3.141592653589793 #endif void fspecial_gaussian(double *kernel, const int N, const double sigma) { double mean = (double)(N - 1) / 2.0; double sum = 0.0; // For accumulating the kernel values for (int x = 0; x < N; ++x) for (int y = 0; y < N; ++y) { kernel[x * N + y] = exp(-0.5 * (pow((x - mean) / sigma, 2.0) + pow((y - mean) / sigma, 2.0))) / (2.0 * M_PI * sigma * sigma); sum += kernel[x * N + y]; } for (int x = 0; x < N*N; ++x) kernel[x] /= sum; } /* Full: dstWidth = srcWidth + kernelWidth - 1 dstHeight = srcHeight + kernelHeight - 1 Same: dstWidth = srcWidth dstHeight = srcHeight Valid: dstWidth = srcWidth - kernelWidth + 1; dstHeight = srcHeight - kernelHeight + 1; Performance: Very low Numerical accuracy: Very high */ void conv2(Matrix *src, Matrix *dst, Matrix *kernel, int shape) { int src_cols = src->width; int src_rows = src->height; int kernel_cols = kernel->width; int kernel_rows = kernel->height; int edge_rows = 0, edge_cols = 0; int i, j, kernel_i, kernel_j, src_i, src_j; float *p_src = NULL; float *p_dst = NULL; float *p_kernel = NULL; float *p_dst_line_i = NULL; float *ptr_src_line_i = NULL; float *ptr_kernel_line_i = NULL; float sum; p_src = src->data; p_dst = dst->data; p_kernel = kernel->data; switch (shape) { case 0: dst->height = src_rows + kernel_rows - 1; dst->width = src_cols + kernel_cols - 1; edge_rows = kernel_rows - 1; edge_cols = kernel_cols - 1; break; case 1: dst->height = src_rows; dst->width = src_cols; edge_rows = (kernel_rows - 1) / 2; edge_cols = (kernel_cols - 1) / 2; break; case 2: dst->height = src_rows - kernel_rows + 1; dst->width = src_cols - kernel_cols + 1; edge_rows = edge_cols = 0; break; } for (i = 0; i < dst->height; i++) { p_dst_line_i = (float*)(p_dst + dst->width * i); for (j = 0; j < dst->width; j++) { sum = 0.0f; kernel_i = kernel_rows - 1 - max(0, edge_rows - i); src_i = max(0, i - edge_rows); for (; (kernel_i >= 0) && (src_i < src_rows); kernel_i--, src_i++) { kernel_j = kernel_cols - 1 - max(0, edge_cols - j); src_j = max(0, j - edge_cols); ptr_src_line_i = (float*)(p_src + src_cols * src_i); ptr_kernel_line_i = (float*)(p_kernel + kernel_cols * kernel_i); ptr_src_line_i += src_j; ptr_kernel_line_i += kernel_j; for (; kernel_j >= 0 && src_j < src_cols; kernel_j--, src_j++) sum += *ptr_src_line_i++ * *ptr_kernel_line_i--; } p_dst_line_i[j] = sum; } } } /* Matlab conv2 with 'same' Performance: Middle - Low Numerical accuracy: Very high */ void convolve2D(int32_t* in, int32_t* out, int width, int height, short* kernel, int kernelSizeX, int kernelSizeY) { int i, j, m, n; int32_t *inPtr, *inPtr2, *outPtr; short *kPtr; int32_t acc; int kCenterX, kCenterY; int rowMin, rowMax; // to check boundary of input array int colMin, colMax; // find center position of kernel (half of kernel size) kCenterX = kernelSizeX >> 1; kCenterY = kernelSizeY >> 1; // init working pointers inPtr = inPtr2 = &in[width * kCenterY + kCenterX]; // note that it is shifted (kCenterX, kCenterY), outPtr = out; kPtr = kernel; // start convolution for (i = 0; i < height; ++i) // number of rows { // compute the range of convolution, the current row of kernel should be between these rowMax = i + kCenterY; rowMin = i - height + kCenterY; for (j = 0; j < width; ++j) // number of columns { // compute the range of convolution, the current column of kernel should be between these colMax = j + kCenterX; colMin = j - width + kCenterX; acc = 0; // set to 0 before accumulate (Clear dst value) // flip the kernel and traverse all the kernel values // multiply each kernel value with underlying input data for (m = 0; m < kernelSizeY; ++m) // kernel rows { // check if the index is out of bound of input array if (m <= rowMax && m > rowMin) { for (n = 0; n < kernelSizeX; ++n) { // check the boundary of array if (n <= colMax && n > colMin) acc += *(inPtr - n) * *kPtr; ++kPtr; // next kernel } } else kPtr += kernelSizeX; // out of bound, move to next row of kernel inPtr -= width; // move input data 1 raw up } *outPtr = acc; kPtr = kernel; // reset kernel to (0,0) inPtr = ++inPtr2; // next input ++outPtr; // next output } } } void convolve2DStridedOffset(int32_t* in, int32_t* out, int width, int height, short* kernel, int kernelSizeX, int kernelSizeY, int stride, int offset) { int i, j, m, n; int32_t *inPtr, *inPtr2, *outPtr; short *kPtr; int32_t acc; int kCenterX, kCenterY; int rowMin, rowMax; // to check boundary of input array int colMin, colMax; // find center position of kernel (half of kernel size) kCenterX = kernelSizeX >> 1; kCenterY = kernelSizeY >> 1; // init working pointers inPtr = inPtr2 = &in[width * kCenterY + kCenterX + width + offset]; // note that it is shifted (kCenterX, kCenterY), outPtr = out; kPtr = kernel; // start convolution for (i = offset; i < height; i += stride) // number of rows { // compute the range of convolution, the current row of kernel should be between these rowMax = i + kCenterY; rowMin = i - height + kCenterY; for (j = offset; j < width; j += stride) // number of columns { // compute the range of convolution, the current column of kernel should be between these colMax = j + kCenterX; colMin = j - width + kCenterX; acc = 0; // set to 0 before accumulate (Clear dst value) // flip the kernel and traverse all the kernel values // multiply each kernel value with underlying input data for (m = 0; m < kernelSizeY; ++m) // kernel rows { // check if the index is out of bound of input array if (m <= rowMax && m > rowMin) { for (n = 0; n < kernelSizeX; ++n) { // check the boundary of array if (n <= colMax && n > colMin) acc += *(inPtr - n) * *kPtr; ++kPtr; // next kernel } } else kPtr += kernelSizeX; // out of bound, move to next row of kernel inPtr -= width; // move input data 1 raw up } *outPtr = acc; kPtr = kernel; // reset kernel to (0,0) inPtr2 += stride; inPtr = inPtr2; // next input ++outPtr; // next output } inPtr2 += (width * (stride - 1)); inPtr = inPtr2; // next input } } void convolve2D3x3(int32_t* in, int32_t* out, const int width, int const height, short* kernel) { int i, j, m, n; int32_t *inPtr, *inPtr2, *outPtr; short *kPtr; int32_t acc; int rowMin, rowMax; int colMin, colMax; inPtr = inPtr2 = &in[width + 1]; outPtr = out; kPtr = kernel; for (i = 0; i < height; ++i) { rowMax = i + 1; rowMin = i - height + 1; for (j = 0; j < width; ++j) { colMax = j + 1; colMin = j - width + 1; acc = 0; for (m = 0; m < 3; ++m) { if (m <= rowMax && m > rowMin) { for (n = 0; n < 3; ++n) { if (n <= colMax && n > colMin) acc += *(inPtr - n) * *kPtr; ++kPtr; } } else kPtr += 3; inPtr -= width; } *outPtr = acc; kPtr = kernel; inPtr = ++inPtr2; ++outPtr; } } } void convolve2D3x3Acc(int32_t* in, int32_t* out, const int width, int const height, short* kernel) { int i, j, m, n; int32_t *inPtr, *inPtr2, *outPtr; short *kPtr; int32_t acc; int rowMin, rowMax; int colMin, colMax; inPtr = inPtr2 = &in[width + 1]; outPtr = out; kPtr = kernel; for (i = 0; i < height; ++i) { rowMax = i + 1; rowMin = i - height + 1; for (j = 0; j < width; ++j) { colMax = j + 1; colMin = j - width + 1; acc = 0; for (m = 0; m < 3; ++m) { if (m <= rowMax && m > rowMin) { for (n = 0; n < 3; ++n) { if (n <= colMax && n > colMin) acc += *(inPtr - n) * *kPtr; ++kPtr; } } else kPtr += 3; inPtr -= width; } *outPtr += acc; kPtr = kernel; inPtr = ++inPtr2; ++outPtr; } } } /* Optimized 3x3 convolution Performance: Very high Numerical accuracy: Low */ #include <emmintrin.h> void conv2DSSEOMP3x3(float* in, float* out, const int width, const int height, float* kernel) { int begin = 1 + (width - 1) / 4 * 4; if (width % 4 != 0) begin -= 4; const int start = begin; int range = (start - 1) / 16 * 16 + 1; int y, x, j, i; for (y = 0; y < height; y++) { // left section from 0 column to 1 for (x = 0; x < 1; x++) for (j = -1; j <= 1; j++) for (i = -1; i <= 1; i++) if (x + i > -1 && x + i<width && y + j>-1 && y + j < height) out[x + y * width] += kernel[(1 - i) + (1 - j)*3] * in[(x + i) + (y + j)*width]; for (x = 1; x < range; x += 16) { __m128 out_vec = _mm_loadu_ps(out + x + y * width); __m128 out_vec1 = _mm_loadu_ps(out + x + 4 + y * width); __m128 out_vec2 = _mm_loadu_ps(out + x + 8 + y * width); __m128 out_vec3 = _mm_loadu_ps(out + x + 12 + y * width); for (j = -1; j <= 1; j++) { if (y + j < 0 || y + j >= height) continue; for (i = -1; i <= 1; i++) { __m128 ker_vec = _mm_load1_ps(kernel + (1 - i) + (1 - j)*3); __m128 in_vec = _mm_loadu_ps(in + x + i + (y + j)*width); __m128 in_vec1 = _mm_loadu_ps(in + x + 4 + i + (y + j)*width); __m128 in_vec2 = _mm_loadu_ps(in + x + 8 + i + (y + j)*width); __m128 in_vec3 = _mm_loadu_ps(in + x + 12 + i + (y + j)*width); out_vec = _mm_add_ps(out_vec, _mm_mul_ps(ker_vec, in_vec)); out_vec1 = _mm_add_ps(out_vec1, _mm_mul_ps(ker_vec, in_vec1)); out_vec2 = _mm_add_ps(out_vec2, _mm_mul_ps(ker_vec, in_vec2)); out_vec3 = _mm_add_ps(out_vec3, _mm_mul_ps(ker_vec, in_vec3)); } } _mm_storeu_ps(out + x + y * width, out_vec); _mm_storeu_ps(out + x + 4 + y * width, out_vec1); _mm_storeu_ps(out + x + 8 + y * width, out_vec2); _mm_storeu_ps(out + x + 12 + y * width, out_vec3); } for (x = range; x < start; x += 4) { __m128 out_vec = _mm_loadu_ps(out + x + y * width); for (j = -1; j <= 1; j++) { if (y + j < 0 || y + j >= height) continue; for (i = -1; i <= 1; i++) { __m128 in_vec = _mm_loadu_ps(in + x + i + (y + j)*width); __m128 ker_vec = _mm_load1_ps(kernel + (1 - i) + (1 - j)*3); out_vec = _mm_add_ps(out_vec, _mm_mul_ps(ker_vec, in_vec)); } } _mm_storeu_ps(out + x + y * width, out_vec); } // right section from the starting to the end of the matrix for (x = start; x < width; x++) for (j = -1; j <= 1; j++) for (i = -1; i <= 1; i++) if (x + i > -1 && x + i<width && y + j>-1 && y + j < height) out[x + y * width] += kernel[(1 - i) + (1 - j) * 3] * in[(x + i) + (y + j)*width]; } } void conv2DSSEFast(float* in, float* out, const int width, const int height, float* kernel, const int kernel_x, const int kernel_y) { int stride = 2; // the x coordinate of the kernel's center const int kern_cent_X = (kernel_x - 1) / 2; // the y coordinate of the kernel's center const int kern_cent_Y = (kernel_y - 1) / 2; const int kern_size = kern_cent_Y * kern_cent_X; int offset = 0; if (kernel_x > 5 && kernel_x < 15) offset = 1; else if (kernel_x >= 15) offset = 2; int begin = kern_cent_X + (width - kern_cent_X) / 4 * 4 - (4 * offset); if (width % 4 != 0) begin -= 4; const int start = begin; int y; for (y = 0; y < height; y++) { // left section from 0 column to kern_cent_X for (int x = 0; x < kern_cent_X; x++) { for (int j = -kern_cent_Y; j <= kern_cent_Y; j++) { for (int i = -kern_cent_X; i <= kern_cent_X; i++) { if (x + i > -1 && x + i<width && y + j>-1 && y + j < height) out[x + y * width] += kernel[(kern_cent_X - i) + (kern_cent_Y - j)*kernel_x] * in[(x + i) + (y + j)*width]; } } } for (int x = kern_cent_X; x < (start - kern_cent_X) / 16 * 16 + kern_cent_X; x += 16) { __m128 out_vec = _mm_loadu_ps(out + x + y * width); __m128 out_vec1 = _mm_loadu_ps(out + x + 4 + y * width); __m128 out_vec2 = _mm_loadu_ps(out + x + 8 + y * width); __m128 out_vec3 = _mm_loadu_ps(out + x + 12 + y * width); for (int j = -kern_cent_Y; j <= kern_cent_Y; j++) { if (y + j < 0 || y + j >= height) continue; for (int i = -kern_cent_X; i <= kern_cent_X; i++) { __m128 ker_vec = _mm_load1_ps(kernel + (kern_cent_X - i) + (kern_cent_Y - j)*kernel_x); __m128 in_vec = _mm_loadu_ps(in + x + i + (y + j)*width); __m128 in_vec1 = _mm_loadu_ps(in + x + 4 + i + (y + j)*width); __m128 in_vec2 = _mm_loadu_ps(in + x + 8 + i + (y + j)*width); __m128 in_vec3 = _mm_loadu_ps(in + x + 12 + i + (y + j)*width); out_vec = _mm_add_ps(out_vec, _mm_mul_ps(ker_vec, in_vec)); out_vec1 = _mm_add_ps(out_vec1, _mm_mul_ps(ker_vec, in_vec1)); out_vec2 = _mm_add_ps(out_vec2, _mm_mul_ps(ker_vec, in_vec2)); out_vec3 = _mm_add_ps(out_vec3, _mm_mul_ps(ker_vec, in_vec3)); } } _mm_storeu_ps(out + x + y * width, out_vec); _mm_storeu_ps(out + x + 4 + y * width, out_vec1); _mm_storeu_ps(out + x + 8 + y * width, out_vec2); _mm_storeu_ps(out + x + 12 + y * width, out_vec3); } for (int x = (start - kern_cent_X) / 16 * 16 + kern_cent_X; x < start; x += 4) { __m128 out_vec = _mm_loadu_ps(out + x + y * width); for (int j = -kern_cent_Y; j <= kern_cent_Y; j++) { if (y + j < 0 || y + j >= height) { continue; } for (int i = -kern_cent_X; i <= kern_cent_X; i++) { __m128 in_vec = _mm_loadu_ps(in + x + i + (y + j)*width); __m128 ker_vec = _mm_load1_ps(kernel + (kern_cent_X - i) + (kern_cent_Y - j)*kernel_x); out_vec = _mm_add_ps(out_vec, _mm_mul_ps(ker_vec, in_vec)); } } _mm_storeu_ps(out + x + y * width, out_vec); } // right section from the starting to the end of the matrix for (int x = start; x < width; x++) { for (int j = -kern_cent_Y; j <= kern_cent_Y; j++) { for (int i = -kern_cent_X; i <= kern_cent_X; i++) { if (x + i > -1 && x + i<width && y + j>-1 && y + j < height) out[x + y * width] += kernel[(kern_cent_X - i) + (kern_cent_Y - j)*kernel_x] * in[(x + i) + (y + j)*width]; } } } } } #include <immintrin.h> void conv2DAVXOMP3x3(float* in, float* out, const int width, const int height, float* kernel) { int begin = 1 + (width - 1) / 8 * 8; if (width % 8 != 0) begin -= 8; const int start = begin; int range = (start - 1) / 32 * 32 + 1; int y, x, j, i; #pragma omp parallel for firstprivate(in, out, kernel) for (y = 0; y < height; y++) { // left section from 0 column to 1 for (x = 0; x < 1; x++) for (j = -1; j <= 1; j++) for (i = -1; i <= 1; i++) if (x + i > -1 && x + i<width && y + j>-1 && y + j < height) out[x + y * width] += kernel[(1 - i) + (1 - j) * 3] * in[(x + i) + (y + j)*width]; for (x = 1; x < range; x += 32) { __m256 out_vec = _mm256_loadu_ps(out + x + y * width); __m256 out_vec1 = _mm256_loadu_ps(out + x + 8 + y * width); __m256 out_vec2 = _mm256_loadu_ps(out + x + 16 + y * width); __m256 out_vec3 = _mm256_loadu_ps(out + x + 24 + y * width); for (j = -1; j <= 1; j++) { if (y + j < 0 || y + j >= height) continue; for (i = -1; i <= 1; i++) { __m256 ker_vec = _mm256_broadcast_ss(kernel + (1 - i) + (1 - j) * 3); __m256 in_vec = _mm256_loadu_ps(in + x + i + (y + j)*width); __m256 in_vec1 = _mm256_loadu_ps(in + x + 8 + i + (y + j)*width); __m256 in_vec2 = _mm256_loadu_ps(in + x + 16 + i + (y + j)*width); __m256 in_vec3 = _mm256_loadu_ps(in + x + 24 + i + (y + j)*width); out_vec = _mm256_add_ps(out_vec, _mm256_mul_ps(ker_vec, in_vec)); out_vec1 = _mm256_add_ps(out_vec1, _mm256_mul_ps(ker_vec, in_vec1)); out_vec2 = _mm256_add_ps(out_vec2, _mm256_mul_ps(ker_vec, in_vec2)); out_vec3 = _mm256_add_ps(out_vec3, _mm256_mul_ps(ker_vec, in_vec3)); } } _mm256_storeu_ps(out + x + y * width, out_vec); _mm256_storeu_ps(out + x + 8 + y * width, out_vec1); _mm256_storeu_ps(out + x + 16 + y * width, out_vec2); _mm256_storeu_ps(out + x + 24 + y * width, out_vec3); } for (x = range; x < start; x += 8) { __m256 out_vec = _mm256_loadu_ps(out + x + y * width); for (j = -1; j <= 1; j++) { if (y + j < 0 || y + j >= height) continue; for (i = -1; i <= 1; i++) { __m256 in_vec = _mm256_loadu_ps(in + x + i + (y + j)*width); __m256 ker_vec = _mm256_broadcast_ss(kernel + (1 - i) + (1 - j) * 3); out_vec = _mm256_add_ps(out_vec, _mm256_mul_ps(ker_vec, in_vec)); } } _mm256_storeu_ps(out + x + y * width, out_vec); } // right section from the starting to the end of the matrix for (x = start; x < width; x++) for (j = -1; j <= 1; j++) for (i = -1; i <= 1; i++) if (x + i > -1 && x + i<width && y + j>-1 && y + j < height) out[x + y * width] += kernel[(1 - i) + (1 - j) * 3] * in[(x + i) + (y + j)*width]; } } void conv2DAVXFast(float* in, float* out, const int width, const int height, float* kernel, const int kernel_x, const int kernel_y) { // the x coordinate of the kernel's center const int kern_cent_X = (kernel_x - 1) / 2; // the y coordinate of the kernel's center const int kern_cent_Y = (kernel_y - 1) / 2; const int kern_size = kern_cent_Y * kern_cent_X; int offset = 0; if (kernel_x > 9 && kernel_x < 35) offset = 1; else if (kernel_x >= 35) offset = 2; int begin = kern_cent_X + (width - kern_cent_X) / 8 * 8 - (8 * offset); if (width % 8 != 0) begin -= 8; const int start = begin; int y; #pragma omp parallel for firstprivate(in, out, kernel) for (y = 0; y < height; y++) { // left section from 0 column to kern_cent_X for (int x = 0; x < kern_cent_X; x++) { for (int j = -kern_cent_Y; j <= kern_cent_Y; j++) { for (int i = -kern_cent_X; i <= kern_cent_X; i++) { if (x + i > -1 && x + i<width && y + j>-1 && y + j < height) out[x + y * width] += kernel[(kern_cent_X - i) + (kern_cent_Y - j)*kernel_x] * in[(x + i) + (y + j)*width]; } } } for (int x = kern_cent_X; x < (start - kern_cent_X) / 32 * 32 + kern_cent_X; x += 32) { __m256 out_vec = _mm256_loadu_ps(out + x + y * width); __m256 out_vec1 = _mm256_loadu_ps(out + x + 8 + y * width); __m256 out_vec2 = _mm256_loadu_ps(out + x + 16 + y * width); __m256 out_vec3 = _mm256_loadu_ps(out + x + 24 + y * width); for (int j = -kern_cent_Y; j <= kern_cent_Y; j++) { if (y + j < 0 || y + j >= height) continue; for (int i = -kern_cent_X; i <= kern_cent_X; i++) { __m256 ker_vec = _mm256_broadcast_ss(kernel + (kern_cent_X - i) + (kern_cent_Y - j)*kernel_x); __m256 in_vec = _mm256_loadu_ps(in + x + i + (y + j)*width); __m256 in_vec1 = _mm256_loadu_ps(in + x + 8 + i + (y + j)*width); __m256 in_vec2 = _mm256_loadu_ps(in + x + 16 + i + (y + j)*width); __m256 in_vec3 = _mm256_loadu_ps(in + x + 24 + i + (y + j)*width); out_vec = _mm256_add_ps(out_vec, _mm256_mul_ps(ker_vec, in_vec)); out_vec1 = _mm256_add_ps(out_vec1, _mm256_mul_ps(ker_vec, in_vec1)); out_vec2 = _mm256_add_ps(out_vec2, _mm256_mul_ps(ker_vec, in_vec2)); out_vec3 = _mm256_add_ps(out_vec3, _mm256_mul_ps(ker_vec, in_vec3)); } } _mm256_storeu_ps(out + x + y * width, out_vec); _mm256_storeu_ps(out + x + 8 + y * width, out_vec1); _mm256_storeu_ps(out + x + 16 + y * width, out_vec2); _mm256_storeu_ps(out + x + 24 + y * width, out_vec3); } for (int x = (start - kern_cent_X) / 32 * 32 + kern_cent_X; x < start; x += 8) { __m256 out_vec = _mm256_loadu_ps(out + x + y * width); for (int j = -kern_cent_Y; j <= kern_cent_Y; j++) { if (y + j < 0 || y + j >= height) { continue; } for (int i = -kern_cent_X; i <= kern_cent_X; i++) { __m256 in_vec = _mm256_loadu_ps(in + x + i + (y + j)*width); __m256 ker_vec = _mm256_broadcast_ss(kernel + (kern_cent_X - i) + (kern_cent_Y - j)*kernel_x); out_vec = _mm256_add_ps(out_vec, _mm256_mul_ps(ker_vec, in_vec)); } } _mm256_storeu_ps(out + x + y * width, out_vec); } // right section from the starting to the end of the matrix for (int x = start; x < width; x++) { for (int j = -kern_cent_Y; j <= kern_cent_Y; j++) { for (int i = -kern_cent_X; i <= kern_cent_X; i++) { if (x + i > -1 && x + i<width && y + j>-1 && y + j < height) out[x + y * width] += kernel[(kern_cent_X - i) + (kern_cent_Y - j)*kernel_x] * in[(x + i) + (y + j)*width]; } } } } } void maxpooling2D(float *map, float *y, int w, int h, int k_w, int k_h, int s_w, int s_h) { int out_row = (w - k_h) / s_h + 1; int out_col = (h - k_w) / s_w + 1; for (int i = 0; i < out_col; i++) for (int j = 0; j < out_row; j++) { int start_x = j * s_w; int start_y = i * s_h; float maxVal = -FLT_MAX; for (int ii = 0; ii < k_w; ii++) for (int jj = 0; jj < k_h; jj++) { if (map[(start_y + jj) * w + (start_x + ii)] > maxVal) maxVal = map[(start_y + jj) * w + (start_x + ii)]; } y[i * out_row + j] = maxVal; } } void maxpooling2DLayer(float *x, float *y, int c, int w, int h, int poolSize, int stride, int outW, int outH) { for (int s = 0; s < c; s++) maxpooling2D(&x[s * w * h], &y[s * outW * outH], w, h, poolSize, poolSize, stride, stride); } void avgpooling2D(int32_t *map, int32_t *y, int w, int h, int k_w, int k_h, int s_w, int s_h) { int out_row = (w - k_h) / s_h + 1; int out_col = (h - k_w) / s_w + 1; int32_t scale = sdiv(1, k_w * k_h); for (int i = 0; i < out_col; i++) for (int j = 0; j < out_row; j++) { int start_x = j * s_w; int start_y = i * s_h; int32_t maxVal = 0; for (int ii = 0; ii < k_w; ii++) for (int jj = 0; jj < k_h; jj++) { maxVal += map[(start_y + jj) * w + (start_x + ii)]; } //printf("%1.6f\n", sround11(maxVal * scale) / 2047.0f); y[i * out_row + j] = sround11(maxVal * scale); } }
row_wise_v2.c
#include<stdio.h> #include<string.h> #include <stdlib.h> #include<mpi.h> #include<omp.h> #include<time.h> //macros #define ALPHABET_LENGTH 4 #define max(x,y) ((x)>(y)?(x):(y)) //global variables char *string_A; char *string_B; char *unique_chars_C; //unique alphabets int c_len; int *P_Matrix; int *DP_Results; //to store the DP values int *dp_prev_row; //function prototypes int get_index_of_character(char *str,char x, int len); void print_matrix(int **x, int row, int col); void calc_P_matrix_v2(int *P, char *b, int len_b, char *c, int len_c, int myrank, int chunk_size); int lcs_yang_v2(int *DP,int *prev_row, int *P, char *A, char *B, char *C, int m, int n, int u, int myrank, int chunk_size); int lcs(int **DP, char *A, char *B, int m, int n); int get_index_of_character(char *str,char x, int len) { for(int i=0;i<len;i++) { if(str[i]== x) { return i; } } return -1;//not found the character x in str } void print_matrix(int **x, int row, int col) { for(int i=0;i<row;i++) { for(int j=0;j<col;j++) { printf("%d ",x[i][j]); } printf("\n"); } } void calc_P_matrix_v2(int *P, char *b, int len_b, char *c, int len_c, int myrank, int chunk_size) { char receive_array_for_scatter_c[chunk_size]; int receive_array_for_scatter_p[chunk_size*(len_b+1)]; //Scatter the char array chunks by sending each process a particular chunk MPI_Scatter(c, chunk_size, MPI_CHAR,&receive_array_for_scatter_c,chunk_size,MPI_CHAR, 0, MPI_COMM_WORLD); //Scatter the char array chunks by sending each process a particular chunk MPI_Scatter(P, chunk_size*(len_b+1),MPI_INT ,&receive_array_for_scatter_p,chunk_size*(len_b+1),MPI_INT, 0, MPI_COMM_WORLD); // Broadcast the whole b array to everybody MPI_Bcast(b, len_b, MPI_CHAR, 0, MPI_COMM_WORLD); #pragma omp parallel for for(int i=0;i<chunk_size;i++) { for(int j=1;j<len_b+1;j++) { if(b[j-1]==receive_array_for_scatter_c[i]) { receive_array_for_scatter_p[(i*(len_b+1))+j] = j; } else { receive_array_for_scatter_p[(i*(len_b+1))+j] = receive_array_for_scatter_p[(i*(len_b+1))+j-1]; } } } //now gather all the calculated values of P matrix in process 0 MPI_Gather(receive_array_for_scatter_p, chunk_size*(len_b+1),MPI_INT , P, chunk_size*(len_b+1), MPI_INT, 0, MPI_COMM_WORLD); } int lcs_yang_v2(int *DP, int *prev_row, int *P, char *A, char *B, char *C, int m, int n, int u, int myrank, int chunk_size) { MPI_Bcast(P, (u*(n+1)),MPI_INT , 0, MPI_COMM_WORLD); for(int i=1;i<m+1;i++) { int c_i = get_index_of_character(C,A[i-1],u); int dp_i_receive[chunk_size]; MPI_Scatter(DP, chunk_size, MPI_INT ,&dp_i_receive,chunk_size,MPI_INT, 0, MPI_COMM_WORLD); int start_id = (myrank * chunk_size); int end_id = (myrank * chunk_size) + chunk_size; // printf("rank %d, start: %d, end: %d\n",myrank, start_id, end_id); int t,s; #pragma omp parallel for private(t,s) schedule(static) for(int j=start_id;j<end_id;j++) { if(j==start_id && myrank==0)j=j+1; t= (0-P[(c_i*(n+1))+j])<0; s= (0 - (prev_row[j] - (t*prev_row[P[(c_i*(n+1))+j]-1]) )); dp_i_receive[j-start_id] = ((t^1)||(s^0))*(prev_row[j]) + (!((t^1)||(s^0)))*(prev_row[P[(c_i*(n+1))+j]-1] + 1); } //now gather all the calculated values of P matrix in process 0 MPI_Allgather(dp_i_receive, chunk_size,MPI_INT ,DP, chunk_size, MPI_INT, MPI_COMM_WORLD); #pragma omp parallel for schedule(static) for(int j=0;j<(n+1);j++){ prev_row[j] = DP[j]; } } return DP[n]; } int lcs(int **DP, char *A, char *B, int m, int n) { // printf("%s %d \n%s %d\n",A,m,B,n ); //print_matrix(DP,m+1,n+1); for(int i=1;i<(m+1);i++) { for(int j=1;j<(n+1);j++) { // if(i==0 || j==0) // { // DP[i][j]=0; // } if(A[i-1] == B[j-1]) { DP[i][j] = DP[i-1][j-1] + 1; } else { DP[i][j] = max(DP[i-1][j],DP[i][j-1]); } } } return DP[m][n]; } int main(int argc, char *argv[]) { if(argc <= 1){ printf("Error: No input file specified! Please specify the input file, and run again!\n"); return 0; } int my_rank; int num_procs; int chunk_size_p,chunk_size_dp;//chunk_size for P matrix and DP matrix int res; MPI_Init(&argc, &argv); MPI_Comm_rank(MPI_COMM_WORLD, &my_rank); //grab this process's rank MPI_Comm_size(MPI_COMM_WORLD, &num_procs); //grab the total num of processes FILE *fp; int len_a,len_b; double start_time,stop_time,start_time_yang,stop_time_yang; if(my_rank == 0)printf("\nYour input file: %s \n",argv[1]); fp = fopen(argv[1], "r"); fscanf(fp, "%d %d %d", &len_a, &len_b, &c_len); string_A = (char *)malloc((len_a+1) * sizeof(char *)); string_B = (char *)malloc((len_b+1) * sizeof(char *)); unique_chars_C = (char *)malloc((c_len+1) * sizeof(char *)); fscanf(fp, "%s %s %s", string_A,string_B,unique_chars_C); chunk_size_p = (c_len/num_procs); chunk_size_dp = ((len_b+1)/num_procs); if(my_rank==0) { printf("chunk_p: %d chunk_dp: %d procs: %d\n",chunk_size_p,chunk_size_dp,num_procs); } DP_Results = (int *)malloc((len_b+1) * sizeof(int)); dp_prev_row = (int *)malloc((len_b+1) * sizeof(int)); P_Matrix = (int *)malloc((c_len*(len_b+1)) * sizeof(int)); if(my_rank ==0) { // start_time = MPI_Wtime(); // printf("lcs is: %d\n",lcs(DP_Results,string_A,string_B,len_a,len_b)); // stop_time = MPI_Wtime(); // printf("time taken by normal algorithm is: %lf\n",stop_time-start_time); } /* for(int k=0;k<len_a+1;k++) { for(int l=0;l<len_b+1;l++) { DP_Results[k][l]=0; } } */ start_time_yang = MPI_Wtime(); calc_P_matrix_v2(P_Matrix,string_B,len_b,unique_chars_C,c_len, my_rank, chunk_size_p); res = lcs_yang_v2(DP_Results, dp_prev_row, P_Matrix,string_A,string_B,unique_chars_C,len_a,len_b,c_len,my_rank, chunk_size_dp); stop_time_yang = MPI_Wtime(); if(my_rank == 0) { printf("lcs_yang_v2 is: %d\n",res); printf("time taken for lcs_yang_v2 is: %lf\n",stop_time_yang-start_time_yang); } //deallocate pointers free(P_Matrix); free(DP_Results); // Shutdown MPI (important - don't forget!) MPI_Finalize(); return 0; }
flops_AVX.h
/* flops_AVX.h - AVX Benchmarks * * Author : Alexander J. Yee * Date Created : 10/21/2011 * Last Modified : 01/25/2012 * * * * And of course... The typical copyright stuff... * * Redistribution of this program in both source or binary, regardless of * form, with or without modification is permitted as long as the following * conditions are met: * 1. This copyright notice is maintained either inline in the source * or distributed with the binary. * 2. A list of all contributing authors along with their contributions * is included either inline in the source or distributed with the * binary. * 3. The following disclaimer is maintained either inline in the * source or distributed with the binary. * * Disclaimer: * This software is provided "as is", without any guarantee made to its * suitability or fitness for any particular use. It may contain bugs so use * of this program is at your own risk. I take no responsibility for any * damage that may unintentionally be caused through its use. */ #ifndef _AVX_h #define _AVX_h #include <immintrin.h> #include "flops.h" //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// double test_dp_add_AVX_internal(double x, double y, size_t iterations){ register __m256d r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, rA, rB; r0 = _mm256_set1_pd(x); r1 = _mm256_set1_pd(y); r8 = _mm256_set1_pd(-0.0); r9 = _mm256_set1_pd(0.5); r2 = _mm256_xor_pd(r0, r8); r3 = _mm256_or_pd(r0, r8); r4 = _mm256_andnot_pd(r8, r0); r5 = _mm256_mul_pd(r1, r9); r6 = _mm256_add_pd(r1, r9); r7 = _mm256_sub_pd(r1, r9); r8 = _mm256_add_pd(r0, _mm256_set1_pd(2.3)); r9 = _mm256_sub_pd(r1, _mm256_set1_pd(2.3)); uint64 iMASK = 0x800fffffffffffffull; __m256d MASK = _mm256_set1_pd(*(double*)&iMASK); __m256d vONE = _mm256_set1_pd(1.0); rA = _mm256_set1_pd(0.1); rB = _mm256_set1_pd(0.1001); // wclk start = wclk_now(); size_t c = 0; while (c < iterations){ size_t i = 0; while (i < 1000){ r0 = _mm256_add_pd(r0, rA); r1 = _mm256_add_pd(r1, rA); r2 = _mm256_add_pd(r2, rA); r3 = _mm256_add_pd(r3, rA); r4 = _mm256_add_pd(r4, rA); r5 = _mm256_add_pd(r5, rA); r6 = _mm256_add_pd(r6, rA); r7 = _mm256_add_pd(r7, rA); r8 = _mm256_add_pd(r8, rA); r9 = _mm256_add_pd(r9, rA); r0 = _mm256_sub_pd(r0, rB); r1 = _mm256_sub_pd(r1, rB); r2 = _mm256_sub_pd(r2, rB); r3 = _mm256_sub_pd(r3, rB); r4 = _mm256_sub_pd(r4, rB); r5 = _mm256_sub_pd(r5, rB); r6 = _mm256_sub_pd(r6, rB); r7 = _mm256_sub_pd(r7, rB); r8 = _mm256_sub_pd(r8, rB); r9 = _mm256_sub_pd(r9, rB); //r8 = _mm256_add_pd(r0, r1); //r9 = _mm256_add_pd(r2, r3); //rA = _mm256_add_pd(r4, r5); //rB = _mm256_add_pd(r6, r7); //r0 = _mm256_sub_pd(r0, r4); //r1 = _mm256_sub_pd(r1, r5); //r2 = _mm256_sub_pd(r2, r6); //r3 = _mm256_sub_pd(r3, r7); //r4 = _mm256_add_pd(r4, r8); //r5 = _mm256_add_pd(r5, r9); //r6 = _mm256_add_pd(r6, rA); //r7 = _mm256_add_pd(r7, rB); i++; } //print(r0); //print(r1); //print(r2); //print(r3); //print(r4); //print(r5); //print(r6); //print(r7); //cout << endl; r0 = _mm256_and_pd(r0, MASK); r1 = _mm256_and_pd(r1, MASK); r2 = _mm256_and_pd(r2, MASK); r3 = _mm256_and_pd(r3, MASK); r4 = _mm256_and_pd(r4, MASK); r5 = _mm256_and_pd(r5, MASK); r6 = _mm256_and_pd(r6, MASK); r7 = _mm256_and_pd(r7, MASK); r8 = _mm256_and_pd(r8, MASK); r9 = _mm256_and_pd(r9, MASK); r0 = _mm256_or_pd(r0, vONE); r1 = _mm256_or_pd(r1, vONE); r2 = _mm256_or_pd(r2, vONE); r3 = _mm256_or_pd(r3, vONE); r4 = _mm256_or_pd(r4, vONE); r5 = _mm256_or_pd(r5, vONE); r6 = _mm256_or_pd(r6, vONE); r7 = _mm256_or_pd(r7, vONE); r8 = _mm256_or_pd(r8, vONE); r9 = _mm256_or_pd(r9, vONE); c++; } // wclk end = wclk_now(); // double secs = wclk_secs_since(start); // uint64 ops = 12 * 1000 * c * 4; // cout << "Seconds = " << secs << endl; // cout << "FP Ops = " << ops << endl; // cout << "FLOPs = " << ops / secs << endl; r0 = _mm256_add_pd(r0, r1); r2 = _mm256_add_pd(r2, r3); r4 = _mm256_add_pd(r4, r5); r6 = _mm256_add_pd(r6, r7); r8 = _mm256_add_pd(r8, r9); r0 = _mm256_add_pd(r0, r2); r4 = _mm256_add_pd(r4, r6); r0 = _mm256_add_pd(r0, r4); r0 = _mm256_add_pd(r0, r8); double out = 0; __m256d tmp = r0; out += ((double*)&tmp)[0]; out += ((double*)&tmp)[1]; out += ((double*)&tmp)[2]; out += ((double*)&tmp)[3]; return out; } void test_dp_add_AVX(int tds, size_t iterations){ printf("Testing AVX Add:\n"); double *sum = (double*)malloc(tds * sizeof(double)); wclk start = wclk_now(); #pragma omp parallel num_threads(tds) { double ret = test_dp_add_AVX_internal(1.1, 2.1, iterations); sum[omp_get_thread_num()] = ret; } double secs = wclk_secs_since(start); uint64 ops = 20 * 1000 * iterations * tds * 4; printf("Seconds = %g\n", secs); printf("FP Ops = %llu\n", (unsigned long long)ops); printf("FLOPs = %g\n", ops / secs); double out = 0; int c = 0; while (c < tds){ out += sum[c++]; } printf("sum = %g\n\n", out); free(sum); } //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// double test_dp_mul_AVX_internal(double x, double y, size_t iterations){ register __m256d r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, rA, rB; r0 = _mm256_set1_pd(x); r1 = _mm256_set1_pd(y); r8 = _mm256_set1_pd(-0.0); r2 = _mm256_xor_pd(r0, r8); r3 = _mm256_or_pd(r0, r8); r4 = _mm256_andnot_pd(r8, r0); r5 = _mm256_mul_pd(r1, _mm256_set1_pd(0.37796447300922722721)); r6 = _mm256_mul_pd(r1, _mm256_set1_pd(0.24253562503633297352)); r7 = _mm256_mul_pd(r1, _mm256_set1_pd(4.1231056256176605498)); r8 = _mm256_add_pd(r0, _mm256_set1_pd(2.3)); r9 = _mm256_sub_pd(r1, _mm256_set1_pd(2.3)); // r8 = _mm256_set1_pd(1.4142135623730950488); // r9 = _mm256_set1_pd(1.7320508075688772935); // rA = _mm256_set1_pd(0.57735026918962576451); // rB = _mm256_set1_pd(0.70710678118654752440); rA = _mm256_set1_pd(1.4142135623730950488); rB = _mm256_set1_pd(0.70710678118654752440); uint64 iMASK = 0x800fffffffffffffull; __m256d MASK = _mm256_set1_pd(*(double*)&iMASK); __m256d vONE = _mm256_set1_pd(1.0); size_t c = 0; while (c < iterations){ size_t i = 0; while (i < 1000){ r0 = _mm256_mul_pd(r0, rA); r1 = _mm256_mul_pd(r1, rA); r2 = _mm256_mul_pd(r2, rA); r3 = _mm256_mul_pd(r3, rA); r4 = _mm256_mul_pd(r4, rA); r5 = _mm256_mul_pd(r5, rA); r6 = _mm256_mul_pd(r6, rA); r7 = _mm256_mul_pd(r7, rA); r8 = _mm256_mul_pd(r8, rA); r9 = _mm256_mul_pd(r9, rA); r0 = _mm256_mul_pd(r0, rB); r1 = _mm256_mul_pd(r1, rB); r2 = _mm256_mul_pd(r2, rB); r3 = _mm256_mul_pd(r3, rB); r4 = _mm256_mul_pd(r4, rB); r5 = _mm256_mul_pd(r5, rB); r6 = _mm256_mul_pd(r6, rB); r7 = _mm256_mul_pd(r7, rB); r8 = _mm256_mul_pd(r8, rB); r9 = _mm256_mul_pd(r9, rB); i++; } //print(r0); //print(r1); //print(r2); //print(r3); //print(r4); //print(r5); //print(r6); //print(r7); //cout << endl; r0 = _mm256_and_pd(r0, MASK); r1 = _mm256_and_pd(r1, MASK); r2 = _mm256_and_pd(r2, MASK); r3 = _mm256_and_pd(r3, MASK); r4 = _mm256_and_pd(r4, MASK); r5 = _mm256_and_pd(r5, MASK); r6 = _mm256_and_pd(r6, MASK); r7 = _mm256_and_pd(r7, MASK); r8 = _mm256_and_pd(r8, MASK); r9 = _mm256_and_pd(r9, MASK); r0 = _mm256_or_pd(r0, vONE); r1 = _mm256_or_pd(r1, vONE); r2 = _mm256_or_pd(r2, vONE); r3 = _mm256_or_pd(r3, vONE); r4 = _mm256_or_pd(r4, vONE); r5 = _mm256_or_pd(r5, vONE); r6 = _mm256_or_pd(r6, vONE); r7 = _mm256_or_pd(r7, vONE); r8 = _mm256_or_pd(r8, vONE); r9 = _mm256_or_pd(r9, vONE); c++; } // wclk end = wclk_now(); // double secs = wclk_secs_since(start); // uint64 ops = 12 * 1000 * c * 2; // cout << "Seconds = " << secs << endl; // cout << "FP Ops = " << ops << endl; // cout << "FLOPs = " << ops / secs << endl; r0 = _mm256_add_pd(r0, r1); r2 = _mm256_add_pd(r2, r3); r4 = _mm256_add_pd(r4, r5); r6 = _mm256_add_pd(r6, r7); r8 = _mm256_add_pd(r8, r9); r0 = _mm256_add_pd(r0, r2); r4 = _mm256_add_pd(r4, r6); r0 = _mm256_add_pd(r0, r4); r0 = _mm256_add_pd(r0, r8); double out = 0; __m256d tmp = r0; out += ((double*)&tmp)[0]; out += ((double*)&tmp)[1]; out += ((double*)&tmp)[2]; out += ((double*)&tmp)[3]; return out; } void test_dp_mul_AVX(int tds, size_t iterations){ printf("Testing AVX Mul:\n"); double *sum = (double*)malloc(tds * sizeof(double)); wclk start = wclk_now(); #pragma omp parallel num_threads(tds) { double ret = test_dp_mul_AVX_internal(1.1, 2.1, iterations); sum[omp_get_thread_num()] = ret; } double secs = wclk_secs_since(start); uint64 ops = 20 * 1000 * iterations * tds * 4; printf("Seconds = %g\n", secs); printf("FP Ops = %llu\n", (unsigned long long)ops); printf("FLOPs = %g\n", ops / secs); double out = 0; int c = 0; while (c < tds){ out += sum[c++]; } printf("sum = %g\n\n", out); free(sum); } //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// double test_dp_mac_AVX_internal(double x, double y, size_t iterations){ register __m256d r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, rA, rB, rC, rD, rE, rF; r0 = _mm256_set1_pd(x); r1 = _mm256_set1_pd(y); r8 = _mm256_set1_pd(-0.0); r2 = _mm256_xor_pd(r0, r8); r3 = _mm256_or_pd(r0, r8); r4 = _mm256_andnot_pd(r8, r0); r5 = _mm256_mul_pd(r1, _mm256_set1_pd(0.37796447300922722721)); r6 = _mm256_mul_pd(r1, _mm256_set1_pd(0.24253562503633297352)); r7 = _mm256_mul_pd(r1, _mm256_set1_pd(4.1231056256176605498)); r8 = _mm256_add_pd(r0, _mm256_set1_pd(0.37796447300922722721)); r9 = _mm256_add_pd(r1, _mm256_set1_pd(0.24253562503633297352)); rA = _mm256_sub_pd(r0, _mm256_set1_pd(4.1231056256176605498)); rB = _mm256_sub_pd(r1, _mm256_set1_pd(4.1231056256176605498)); rC = _mm256_set1_pd(1.4142135623730950488); rD = _mm256_set1_pd(1.7320508075688772935); rE = _mm256_set1_pd(0.57735026918962576451); rF = _mm256_set1_pd(0.70710678118654752440); uint64 iMASK = 0x800fffffffffffffull; __m256d MASK = _mm256_set1_pd(*(double*)&iMASK); __m256d vONE = _mm256_set1_pd(1.0); size_t c = 0; while (c < iterations){ size_t i = 0; while (i < 1000){ r0 = _mm256_mul_pd(r0, rC); r1 = _mm256_add_pd(r1, rD); r2 = _mm256_mul_pd(r2, rE); r3 = _mm256_sub_pd(r3, rF); r4 = _mm256_mul_pd(r4, rC); r5 = _mm256_add_pd(r5, rD); r6 = _mm256_mul_pd(r6, rE); r7 = _mm256_sub_pd(r7, rF); r8 = _mm256_mul_pd(r8, rC); r9 = _mm256_add_pd(r9, rD); rA = _mm256_mul_pd(rA, rE); rB = _mm256_sub_pd(rB, rF); r0 = _mm256_add_pd(r0, rF); r1 = _mm256_mul_pd(r1, rE); r2 = _mm256_sub_pd(r2, rD); r3 = _mm256_mul_pd(r3, rC); r4 = _mm256_add_pd(r4, rF); r5 = _mm256_mul_pd(r5, rE); r6 = _mm256_sub_pd(r6, rD); r7 = _mm256_mul_pd(r7, rC); r8 = _mm256_add_pd(r8, rF); r9 = _mm256_mul_pd(r9, rE); rA = _mm256_sub_pd(rA, rD); rB = _mm256_mul_pd(rB, rC); r0 = _mm256_mul_pd(r0, rC); r1 = _mm256_add_pd(r1, rD); r2 = _mm256_mul_pd(r2, rE); r3 = _mm256_sub_pd(r3, rF); r4 = _mm256_mul_pd(r4, rC); r5 = _mm256_add_pd(r5, rD); r6 = _mm256_mul_pd(r6, rE); r7 = _mm256_sub_pd(r7, rF); r8 = _mm256_mul_pd(r8, rC); r9 = _mm256_add_pd(r9, rD); rA = _mm256_mul_pd(rA, rE); rB = _mm256_sub_pd(rB, rF); r0 = _mm256_add_pd(r0, rF); r1 = _mm256_mul_pd(r1, rE); r2 = _mm256_sub_pd(r2, rD); r3 = _mm256_mul_pd(r3, rC); r4 = _mm256_add_pd(r4, rF); r5 = _mm256_mul_pd(r5, rE); r6 = _mm256_sub_pd(r6, rD); r7 = _mm256_mul_pd(r7, rC); r8 = _mm256_add_pd(r8, rF); r9 = _mm256_mul_pd(r9, rE); rA = _mm256_sub_pd(rA, rD); rB = _mm256_mul_pd(rB, rC); i++; } //print(r0); //print(r1); //print(r2); //print(r3); //print(r4); //print(r5); //print(r6); //print(r7); //print(r8); //print(r9); //print(rA); //print(rB); //cout << endl; r0 = _mm256_and_pd(r0, MASK); r1 = _mm256_and_pd(r1, MASK); r2 = _mm256_and_pd(r2, MASK); r3 = _mm256_and_pd(r3, MASK); r4 = _mm256_and_pd(r4, MASK); r5 = _mm256_and_pd(r5, MASK); r6 = _mm256_and_pd(r6, MASK); r7 = _mm256_and_pd(r7, MASK); r8 = _mm256_and_pd(r8, MASK); r9 = _mm256_and_pd(r9, MASK); rA = _mm256_and_pd(rA, MASK); rB = _mm256_and_pd(rB, MASK); r0 = _mm256_or_pd(r0, vONE); r1 = _mm256_or_pd(r1, vONE); r2 = _mm256_or_pd(r2, vONE); r3 = _mm256_or_pd(r3, vONE); r4 = _mm256_or_pd(r4, vONE); r5 = _mm256_or_pd(r5, vONE); r6 = _mm256_or_pd(r6, vONE); r7 = _mm256_or_pd(r7, vONE); r8 = _mm256_or_pd(r8, vONE); r9 = _mm256_or_pd(r9, vONE); rA = _mm256_or_pd(rA, vONE); rB = _mm256_or_pd(rB, vONE); c++; } // wclk end = wclk_now(); // double secs = wclk_secs_since(start); // uint64 ops = 12 * 1000 * c * 2; // cout << "Seconds = " << secs << endl; // cout << "FP Ops = " << ops << endl; // cout << "FLOPs = " << ops / secs << endl; r0 = _mm256_add_pd(r0, r1); r2 = _mm256_add_pd(r2, r3); r4 = _mm256_add_pd(r4, r5); r6 = _mm256_add_pd(r6, r7); r8 = _mm256_add_pd(r8, r9); rA = _mm256_add_pd(rA, rB); r0 = _mm256_add_pd(r0, r2); r4 = _mm256_add_pd(r4, r6); r8 = _mm256_add_pd(r8, rA); r0 = _mm256_add_pd(r0, r4); r0 = _mm256_add_pd(r0, r8); double out = 0; __m256d tmp = r0; out += ((double*)&tmp)[0]; out += ((double*)&tmp)[1]; out += ((double*)&tmp)[2]; out += ((double*)&tmp)[3]; return out; } void test_dp_mac_AVX(int tds, size_t iterations){ printf("Testing AVX Mul + Add:\n"); double *sum = (double*)malloc(tds * sizeof(double)); wclk start = wclk_now(); #pragma omp parallel num_threads(tds) { double ret = test_dp_mac_AVX_internal(1.1, 2.1, iterations); sum[omp_get_thread_num()] = ret; } double secs = wclk_secs_since(start); uint64 ops = 48 * 1000 * iterations * tds * 4; printf("Seconds = %g\n", secs); printf("FP Ops = %llu\n", (unsigned long long)ops); printf("FLOPs = %g\n", ops / secs); double out = 0; int c = 0; while (c < tds){ out += sum[c++]; } printf("sum = %g\n\n", out); free(sum); } //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// #endif
distribute_simd_misc_messages.c
// RUN: %clang_cc1 -fsyntax-only -fopenmp -fopenmp-version=45 -verify=expected,omp45 %s -Wuninitialized // RUN: %clang_cc1 -fsyntax-only -fopenmp -fopenmp-version=50 -verify=expected,omp50 %s -Wuninitialized // RUN: %clang_cc1 -fsyntax-only -fopenmp-simd -fopenmp-version=45 -verify=expected,omp45 %s -Wuninitialized // RUN: %clang_cc1 -fsyntax-only -fopenmp-simd -fopenmp-version=50 -verify=expected,omp50 %s -Wuninitialized void xxx(int argc) { int x; // expected-note {{initialize the variable 'x' to silence this warning}} #pragma omp distribute simd for (int i = 0; i < 10; ++i) argc = x; // expected-warning {{variable 'x' is uninitialized when used here}} } // expected-error@+1 {{unexpected OpenMP directive '#pragma omp distribute simd'}} #pragma omp distribute simd // expected-error@+1 {{unexpected OpenMP directive '#pragma omp distribute simd'}} #pragma omp distribute simd foo // expected-error@+1 {{unexpected OpenMP directive '#pragma omp distribute simd'}} #pragma omp distribute simd safelen(4) void test_no_clause() { int i; #pragma omp target #pragma omp teams #pragma omp distribute simd for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{statement after '#pragma omp distribute simd' must be a for loop}} #pragma omp distribute simd ++i; } void test_branch_protected_scope() { int i = 0; L1: ++i; int x[24]; #pragma omp target #pragma omp teams #pragma omp distribute simd for (i = 0; i < 16; ++i) { if (i == 5) goto L1; // expected-error {{use of undeclared label 'L1'}} else if (i == 6) return; // expected-error {{cannot return from OpenMP region}} else if (i == 7) goto L2; else if (i == 8) { L2: x[i]++; } } if (x[0] == 0) goto L2; // expected-error {{use of undeclared label 'L2'}} else if (x[1] == 1) goto L1; } void test_invalid_clause() { int i; #pragma omp target #pragma omp teams // expected-warning@+1 {{extra tokens at the end of '#pragma omp distribute simd' are ignored}} #pragma omp distribute simd foo bar for (i = 0; i < 16; ++i) ; } void test_non_identifiers() { int i, x; #pragma omp target #pragma omp teams // expected-warning@+1 {{extra tokens at the end of '#pragma omp distribute simd' are ignored}} #pragma omp distribute simd; for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-warning@+1 {{extra tokens at the end of '#pragma omp distribute simd' are ignored}} #pragma omp distribute simd private(x); for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-warning@+1 {{extra tokens at the end of '#pragma omp distribute simd' are ignored}} #pragma omp distribute simd, private(x); for (i = 0; i < 16; ++i) ; } extern int foo(); void test_safelen() { int i; #pragma omp target #pragma omp teams // expected-error@+1 {{expected '('}} #pragma omp distribute simd safelen for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute simd safelen( for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} #pragma omp distribute simd safelen() for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute simd safelen(, for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute simd safelen(, ) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-warning@+2 {{extra tokens at the end of '#pragma omp distribute simd' are ignored}} // expected-error@+1 {{expected '('}} #pragma omp distribute simd safelen 4) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp distribute simd safelen(4 for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp distribute simd safelen(4, for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp distribute simd safelen(4, ) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // xxpected-error@+1 {{expected expression}} #pragma omp distribute simd safelen(4) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp distribute simd safelen(4 4) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp distribute simd safelen(4, , 4) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams #pragma omp distribute simd safelen(4) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp distribute simd safelen(4, 8) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expression is not an integer constant expression}} #pragma omp distribute simd safelen(2.5) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expression is not an integer constant expression}} #pragma omp distribute simd safelen(foo()) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{argument to 'safelen' clause must be a strictly positive integer value}} #pragma omp distribute simd safelen(-5) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{argument to 'safelen' clause must be a strictly positive integer value}} #pragma omp distribute simd safelen(0) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{argument to 'safelen' clause must be a strictly positive integer value}} #pragma omp distribute simd safelen(5 - 5) for (i = 0; i < 16; ++i) ; } void test_simdlen() { int i; #pragma omp target #pragma omp teams // expected-error@+1 {{expected '('}} #pragma omp distribute simd simdlen for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute simd simdlen( for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} #pragma omp distribute simd simdlen() for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute simd simdlen(, for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute simd simdlen(, ) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-warning@+2 {{extra tokens at the end of '#pragma omp distribute simd' are ignored}} // expected-error@+1 {{expected '('}} #pragma omp distribute simd simdlen 4) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp distribute simd simdlen(4 for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp distribute simd simdlen(4, for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp distribute simd simdlen(4, ) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams #pragma omp distribute simd simdlen(4) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp distribute simd simdlen(4 4) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp distribute simd simdlen(4, , 4) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams #pragma omp distribute simd simdlen(4) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp distribute simd simdlen(4, 8) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expression is not an integer constant expression}} #pragma omp distribute simd simdlen(2.5) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expression is not an integer constant expression}} #pragma omp distribute simd simdlen(foo()) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{argument to 'simdlen' clause must be a strictly positive integer value}} #pragma omp distribute simd simdlen(-5) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{argument to 'simdlen' clause must be a strictly positive integer value}} #pragma omp distribute simd simdlen(0) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{argument to 'simdlen' clause must be a strictly positive integer value}} #pragma omp distribute simd simdlen(5 - 5) for (i = 0; i < 16; ++i) ; } void test_safelen_simdlen() { int i; #pragma omp target #pragma omp teams // expected-error@+1 {{the value of 'simdlen' parameter must be less than or equal to the value of the 'safelen' parameter}} #pragma omp distribute simd simdlen(6) safelen(5) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{the value of 'simdlen' parameter must be less than or equal to the value of the 'safelen' parameter}} #pragma omp distribute simd safelen(5) simdlen(6) for (i = 0; i < 16; ++i) ; } void test_collapse() { int i; #pragma omp target #pragma omp teams // expected-error@+1 {{expected '('}} #pragma omp distribute simd collapse for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute simd collapse( for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} #pragma omp distribute simd collapse() for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute simd collapse(, for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute simd collapse(, ) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-warning@+2 {{extra tokens at the end of '#pragma omp distribute simd' are ignored}} // expected-error@+1 {{expected '('}} #pragma omp distribute simd collapse 4) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp distribute simd collapse(4 for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp distribute simd', but found only 1}} #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp distribute simd collapse(4, for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp distribute simd', but found only 1}} #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp distribute simd collapse(4, ) for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp distribute simd', but found only 1}} #pragma omp target #pragma omp teams // xxpected-error@+1 {{expected expression}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp distribute simd collapse(4) for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp distribute simd', but found only 1}} #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp distribute simd collapse(4 4) for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp distribute simd', but found only 1}} #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp distribute simd collapse(4, , 4) for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp distribute simd', but found only 1}} #pragma omp target #pragma omp teams #pragma omp distribute simd collapse(4) for (int i1 = 0; i1 < 16; ++i1) for (int i2 = 0; i2 < 16; ++i2) for (int i3 = 0; i3 < 16; ++i3) for (int i4 = 0; i4 < 16; ++i4) foo(); #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp distribute simd collapse(4, 8) for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp distribute simd', but found only 1}} #pragma omp target #pragma omp teams // expected-error@+1 {{expression is not an integer constant expression}} #pragma omp distribute simd collapse(2.5) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expression is not an integer constant expression}} #pragma omp distribute simd collapse(foo()) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}} #pragma omp distribute simd collapse(-5) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}} #pragma omp distribute simd collapse(0) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}} #pragma omp distribute simd collapse(5 - 5) for (i = 0; i < 16; ++i) ; // expected-note@+3 2 {{defined as reduction}} #pragma omp target #pragma omp teams #pragma omp distribute simd collapse(2) reduction(+ : i) for (i = 0; i < 16; ++i) // expected-error {{loop iteration variable in the associated loop of 'omp distribute simd' directive may not be reduction, predetermined as lastprivate}} // expected-note@+1 {{variable with automatic storage duration is predetermined as private; perhaps you forget to enclose 'omp for' directive into a parallel or another task region?}} for (int j = 0; j < 16; ++j) // expected-error@+2 2 {{reduction variable must be shared}} // expected-error@+1 {{OpenMP constructs may not be nested inside a simd region}} #pragma omp for reduction(+ : i, j) for (int k = 0; k < 16; ++k) i += j; #pragma omp target #pragma omp teams for (i = 0; i < 16; ++i) for (int j = 0; j < 16; ++j) #pragma omp distribute simd reduction(+ : i, j) for (int k = 0; k < 16; ++k) i += j; } void test_linear() { int i; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute simd linear( for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected expression}} // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute simd linear(, for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected expression}} // expected-error@+1 {{expected expression}} #pragma omp distribute simd linear(, ) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} #pragma omp distribute simd linear() for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} #pragma omp distribute simd linear(int) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected variable name}} #pragma omp distribute simd linear(0) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{use of undeclared identifier 'x'}} #pragma omp distribute simd linear(x) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{use of undeclared identifier 'x'}} // expected-error@+1 {{use of undeclared identifier 'y'}} #pragma omp distribute simd linear(x, y) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+3 {{use of undeclared identifier 'x'}} // expected-error@+2 {{use of undeclared identifier 'y'}} // expected-error@+1 {{use of undeclared identifier 'z'}} #pragma omp distribute simd linear(x, y, z) for (i = 0; i < 16; ++i) ; } void test_aligned() { int i; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute simd aligned( for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected expression}} // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute simd aligned(, for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected expression}} // expected-error@+1 {{expected expression}} #pragma omp distribute simd aligned(, ) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} #pragma omp distribute simd aligned() for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} #pragma omp distribute simd aligned(int) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected variable name}} #pragma omp distribute simd aligned(0) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{use of undeclared identifier 'x'}} #pragma omp distribute simd aligned(x) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{use of undeclared identifier 'x'}} // expected-error@+1 {{use of undeclared identifier 'y'}} #pragma omp distribute simd aligned(x, y) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+3 {{use of undeclared identifier 'x'}} // expected-error@+2 {{use of undeclared identifier 'y'}} // expected-error@+1 {{use of undeclared identifier 'z'}} #pragma omp distribute simd aligned(x, y, z) for (i = 0; i < 16; ++i) ; int *x, y, z[25]; // expected-note 4 {{'y' defined here}} #pragma omp target #pragma omp teams #pragma omp distribute simd aligned(x) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams #pragma omp distribute simd aligned(z) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} #pragma omp distribute simd aligned(x :) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute simd aligned(x :, ) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams #pragma omp distribute simd aligned(x : 1) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams #pragma omp distribute simd aligned(x : 2 * 2) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute simd aligned(x : 1, y) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute simd aligned(x : 1, y, z : 1) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{argument of aligned clause should be array or pointer, not 'int'}} #pragma omp distribute simd aligned(x, y) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{argument of aligned clause should be array or pointer, not 'int'}} #pragma omp distribute simd aligned(x, y, z) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-note@+2 {{defined as aligned}} // expected-error@+1 {{a variable cannot appear in more than one aligned clause}} #pragma omp distribute simd aligned(x) aligned(z, x) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-note@+3 {{defined as aligned}} // expected-error@+2 {{a variable cannot appear in more than one aligned clause}} // expected-error@+1 2 {{argument of aligned clause should be array or pointer, not 'int'}} #pragma omp distribute simd aligned(x, y, z) aligned(y, z) for (i = 0; i < 16; ++i) ; } void test_private() { int i; #pragma omp target #pragma omp teams // expected-error@+2 {{expected expression}} // expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute simd private( for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 2 {{expected expression}} #pragma omp distribute simd private(, for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 2 {{expected expression}} #pragma omp distribute simd private(, ) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} #pragma omp distribute simd private() for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} #pragma omp distribute simd private(int) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected variable name}} #pragma omp distribute simd private(0) for (i = 0; i < 16; ++i) ; int x, y, z; #pragma omp target #pragma omp teams #pragma omp distribute simd private(x) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams #pragma omp distribute simd private(x, y) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams #pragma omp distribute simd private(x, y, z) for (i = 0; i < 16; ++i) { x = y * i + z; } } void test_firstprivate() { int i; #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 {{expected expression}} #pragma omp distribute simd firstprivate( for (i = 0; i < 16; ++i) ; } void test_lastprivate() { int i; #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 {{expected expression}} #pragma omp distribute simd lastprivate( for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 2 {{expected expression}} #pragma omp distribute simd lastprivate(, for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 2 {{expected expression}} #pragma omp distribute simd lastprivate(, ) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} #pragma omp distribute simd lastprivate() for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} #pragma omp distribute simd lastprivate(int) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected variable name}} #pragma omp distribute simd lastprivate(0) for (i = 0; i < 16; ++i) ; int x, y, z; #pragma omp target #pragma omp teams #pragma omp distribute simd lastprivate(x) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams #pragma omp distribute simd lastprivate(x, y) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams #pragma omp distribute simd lastprivate(x, y, z) for (i = 0; i < 16; ++i) ; } void test_reduction() { int i, x, y; #pragma omp target #pragma omp teams // expected-error@+3 {{expected ')'}} expected-note@+3 {{to match this '('}} // expected-error@+2 {{expected identifier}} // expected-warning@+1 {{missing ':' after reduction identifier - ignoring}} #pragma omp distribute simd reduction( for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected identifier}} // expected-warning@+1 {{missing ':' after reduction identifier - ignoring}} #pragma omp distribute simd reduction() for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected expression}} // expected-warning@+1 {{missing ':' after reduction identifier - ignoring}} #pragma omp distribute simd reduction(x) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected identifier}} #pragma omp distribute simd reduction( : x) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+3 {{expected ')'}} expected-note@+3 {{to match this '('}} // expected-error@+2 {{expected identifier}} // expected-warning@+1 {{missing ':' after reduction identifier - ignoring}} #pragma omp distribute simd reduction(, for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+3 {{expected ')'}} expected-note@+3 {{to match this '('}} // expected-error@+2 {{expected expression}} // expected-warning@+1 {{missing ':' after reduction identifier - ignoring}} #pragma omp distribute simd reduction(+ for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+3 {{expected ')'}} expected-note@+3 {{to match this '('}} // // expected-error@+1 {{expected expression}} #pragma omp distribute simd reduction(+: for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} #pragma omp distribute simd reduction(+ :) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} #pragma omp distribute simd reduction(+ :, y) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} #pragma omp distribute simd reduction(+ : x, + : y) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected identifier}} #pragma omp distribute simd reduction(% : x) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams #pragma omp distribute simd reduction(+ : x) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams #pragma omp distribute simd reduction(* : x) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams #pragma omp distribute simd reduction(- : x) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams #pragma omp distribute simd reduction(& : x) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams #pragma omp distribute simd reduction(| : x) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams #pragma omp distribute simd reduction(^ : x) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams #pragma omp distribute simd reduction(&& : x) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams #pragma omp distribute simd reduction(|| : x) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams #pragma omp distribute simd reduction(max : x) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams #pragma omp distribute simd reduction(min : x) for (i = 0; i < 16; ++i) ; struct X { int x; }; struct X X; #pragma omp target #pragma omp teams // expected-error@+1 {{expected variable name}} #pragma omp distribute simd reduction(+ : X.x) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected variable name}} #pragma omp distribute simd reduction(+ : x + x) for (i = 0; i < 16; ++i) ; } void test_loop_messages() { float a[100], b[100], c[100]; #pragma omp target #pragma omp teams // expected-error@+2 {{variable must be of integer or pointer type}} #pragma omp distribute simd for (float fi = 0; fi < 10.0; fi++) { c[(int)fi] = a[(int)fi] + b[(int)fi]; } #pragma omp target #pragma omp teams // expected-error@+2 {{variable must be of integer or pointer type}} #pragma omp distribute simd for (double fi = 0; fi < 10.0; fi++) { c[(int)fi] = a[(int)fi] + b[(int)fi]; } } void linear_modifiers(int argc) { int k; #pragma omp target #pragma omp teams #pragma omp distribute simd linear(k) for (k = 0; k < argc; ++k) ++k; #pragma omp target #pragma omp teams #pragma omp distribute simd linear(val(k)) for (k = 0; k < argc; ++k) ++k; #pragma omp target #pragma omp teams #pragma omp distribute simd linear(uval(k)) // expected-error {{expected 'val' modifier}} for (k = 0; k < argc; ++k) ++k; #pragma omp target #pragma omp teams #pragma omp distribute simd linear(ref(k)) // expected-error {{expected 'val' modifier}} for (k = 0; k < argc; ++k) ++k; #pragma omp target #pragma omp teams #pragma omp distribute simd linear(foo(k)) // expected-error {{expected 'val' modifier}} for (k = 0; k < argc; ++k) ++k; } void test_nontemporal() { int i; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp distribute simd'}} expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute simd nontemporal( for (i = 0; i < 16; ++i) ; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp distribute simd'}} expected-error@+1 2 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute simd nontemporal(, for (i = 0; i < 16; ++i) ; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp distribute simd'}} expected-error@+1 2 {{expected expression}} #pragma omp distribute simd nontemporal(, ) for (i = 0; i < 16; ++i) ; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp distribute simd'}} expected-error@+1 {{expected expression}} #pragma omp distribute simd nontemporal() for (i = 0; i < 16; ++i) ; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp distribute simd'}} expected-error@+1 {{expected expression}} #pragma omp distribute simd nontemporal(int) for (i = 0; i < 16; ++i) ; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp distribute simd'}} omp50-error@+1 {{expected variable name}} #pragma omp distribute simd nontemporal(0) for (i = 0; i < 16; ++i) ; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp distribute simd'}} expected-error@+1 {{use of undeclared identifier 'x'}} #pragma omp distribute simd nontemporal(x) for (i = 0; i < 16; ++i) ; // expected-error@+2 {{use of undeclared identifier 'x'}} // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp distribute simd'}} expected-error@+1 {{use of undeclared identifier 'y'}} #pragma omp distribute simd nontemporal(x, y) for (i = 0; i < 16; ++i) ; // expected-error@+3 {{use of undeclared identifier 'x'}} // expected-error@+2 {{use of undeclared identifier 'y'}} // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp distribute simd'}} expected-error@+1 {{use of undeclared identifier 'z'}} #pragma omp distribute simd nontemporal(x, y, z) for (i = 0; i < 16; ++i) ; int x, y; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp distribute simd'}} expected-error@+1 {{expected ',' or ')' in 'nontemporal' clause}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute simd nontemporal(x :) for (i = 0; i < 16; ++i) ; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp distribute simd'}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} expected-error@+1 {{expected ',' or ')' in 'nontemporal' clause}} #pragma omp distribute simd nontemporal(x :, ) for (i = 0; i < 16; ++i) ; // omp50-note@+2 {{defined as nontemporal}} // omp45-error@+1 2 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp distribute simd'}} omp50-error@+1 {{a variable cannot appear in more than one nontemporal clause}} #pragma omp distribute simd nontemporal(x) nontemporal(x) for (i = 0; i < 16; ++i) ; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp distribute simd'}} #pragma omp distribute simd private(x) nontemporal(x) for (i = 0; i < 16; ++i) ; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp distribute simd'}} #pragma omp distribute simd nontemporal(x) private(x) for (i = 0; i < 16; ++i) ; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp distribute simd'}} expected-note@+1 {{to match this '('}} expected-error@+1 {{expected ',' or ')' in 'nontemporal' clause}} expected-error@+1 {{expected ')'}} #pragma omp distribute simd nontemporal(x, y : 0) for (i = 0; i < 16; ++i) ; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp distribute simd'}} #pragma omp distribute simd nontemporal(x) lastprivate(x) for (i = 0; i < 16; ++i) ; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp distribute simd'}} #pragma omp distribute simd lastprivate(x) nontemporal(x) for (i = 0; i < 16; ++i) ; }
GB_binop__pair_int8.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCUDA_DEV #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__pair_int8) // A.*B function (eWiseMult): GB ((none)) // A.*B function (eWiseMult): GB ((none)) // A.*B function (eWiseMult): GB ((none)) // A.*B function (eWiseMult): GB ((none)) // A*D function (colscale): GB ((none)) // D*A function (rowscale): GB ((none)) // C+=B function (dense accum): GB (_Cdense_accumB__pair_int8) // C+=b function (dense accum): GB (_Cdense_accumb__pair_int8) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__pair_int8) // C=scalar+B GB ((none)) // C=scalar+B' GB ((none)) // C=A+scalar GB ((none)) // C=A'+scalar GB ((none)) // C type: int8_t // A type: int8_t // A pattern? 1 // B type: int8_t // B pattern? 1 // BinaryOp: cij = 1 #define GB_ATYPE \ int8_t #define GB_BTYPE \ int8_t #define GB_CTYPE \ int8_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ ; // true if values of A are not used #define GB_A_IS_PATTERN \ 1 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ ; // true if values of B are not used #define GB_B_IS_PATTERN \ 1 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int8_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = 1 ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_PAIR || GxB_NO_INT8 || GxB_NO_PAIR_INT8) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__pair_int8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__pair_int8) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__pair_int8) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int8_t int8_t bwork = (*((int8_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *restrict Cx = (int8_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *restrict Cx = (int8_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__pair_int8) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; int8_t alpha_scalar ; int8_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((int8_t *) alpha_scalar_in)) ; beta_scalar = (*((int8_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *Cx = (int8_t *) Cx_output ; int8_t x = (*((int8_t *) x_input)) ; int8_t *Bx = (int8_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; ; ; Cx [p] = 1 ; } return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int8_t *Cx = (int8_t *) Cx_output ; int8_t *Ax = (int8_t *) Ax_input ; int8_t y = (*((int8_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; ; ; Cx [p] = 1 ; } return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ #if 0 // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ ; ; \ Cx [pC] = 1 ; \ } GrB_Info GB ((none)) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int8_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t x = (*((const int8_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int8_t } #endif //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ #if 0 // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ ; ; \ Cx [pC] = 1 ; \ } GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t y = (*((const int8_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif #endif
composite.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % CCCC OOO M M PPPP OOO SSSSS IIIII TTTTT EEEEE % % C O O MM MM P P O O SS I T E % % C O O M M M PPPP O O SSS I T EEE % % C O O M M P O O SS I T E % % CCCC OOO M M P OOO SSSSS IIIII T EEEEE % % % % % % MagickCore Image Composite Methods % % % % Software Design % % Cristy % % July 1992 % % % % % % Copyright 1999-2016 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % http://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "magick/studio.h" #include "magick/accelerate.h" #include "magick/artifact.h" #include "magick/cache-view.h" #include "magick/channel.h" #include "magick/client.h" #include "magick/color.h" #include "magick/color-private.h" #include "magick/colorspace.h" #include "magick/colorspace-private.h" #include "magick/composite.h" #include "magick/composite-private.h" #include "magick/constitute.h" #include "magick/draw.h" #include "magick/fx.h" #include "magick/gem.h" #include "magick/geometry.h" #include "magick/image.h" #include "magick/image-private.h" #include "magick/list.h" #include "magick/log.h" #include "magick/monitor.h" #include "magick/monitor-private.h" #include "magick/memory_.h" #include "magick/option.h" #include "magick/pixel-private.h" #include "magick/property.h" #include "magick/quantum.h" #include "magick/resample.h" #include "magick/resource_.h" #include "magick/string_.h" #include "magick/thread-private.h" #include "magick/threshold.h" #include "magick/token.h" #include "magick/utility.h" #include "magick/version.h" /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o m p o s i t e I m a g e C h a n n e l % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CompositeImageChannel() returns the second image composited onto the first % at the specified offset, using the specified composite method. % % The format of the CompositeImageChannel method is: % % MagickBooleanType CompositeImage(Image *image, % const CompositeOperator compose,Image *source_image, % const ssize_t x_offset,const ssize_t y_offset) % MagickBooleanType CompositeImageChannel(Image *image, % const ChannelType channel,const CompositeOperator compose, % Image *source_image,const ssize_t x_offset,const ssize_t y_offset) % % A description of each parameter follows: % % o image: the canvas image, modified by he composition % % o channel: the channel. % % o compose: This operator affects how the composite is applied to % the image. The operators and how they are utilized are listed here % http://www.w3.org/TR/SVG12/#compositing. % % o source_image: the composite (source) image. % % o x_offset: the column offset of the composited image. % % o y_offset: the row offset of the composited image. % % Extra Controls from Image meta-data in 'source_image' (artifacts) % % o "compose:args" % A string containing extra numerical arguments for specific compose % methods, generally expressed as a 'geometry' or a comma separated list % of numbers. % % Compose methods needing such arguments include "BlendCompositeOp" and % "DisplaceCompositeOp". % % o "compose:outside-overlay" % Modify how the composition is to effect areas not directly covered % by the 'source_image' at the offset given. Normally this is % dependant on the 'compose' method, especially Duff-Porter methods. % % If set to "false" then disable all normal handling of pixels not % covered by the source_image. Typically used for repeated tiling % of the source_image by the calling API. % % Previous to IM v6.5.3-3 this was called "modify-outside-overlay" % */ /* ** Programmers notes on SVG specification. ** ** A Composition is defined by... ** Color Function : f(Sc,Dc) where Sc and Dc are the normizalized colors ** Blending areas : X = 1 for area of overlap ie: f(Sc,Dc) ** Y = 1 for source preserved ** Z = 1 for canvas preserved ** ** Conversion to transparency (then optimized) ** Dca' = f(Sc, Dc)*Sa*Da + Y*Sca*(1-Da) + Z*Dca*(1-Sa) ** Da' = X*Sa*Da + Y*Sa*(1-Da) + Z*Da*(1-Sa) ** ** Where... ** Sca = Sc*Sa normalized Source color divided by Source alpha ** Dca = Dc*Da normalized Dest color divided by Dest alpha ** Dc' = Dca'/Da' the desired color value for this channel. ** ** Da' in in the follow formula as 'gamma' The resulting alpla value. ** ** ** Most functions use a blending mode of over (X=1,Y=1,Z=1) ** this results in the following optimizations... ** gamma = Sa+Da-Sa*Da; ** gamma = 1 - QuantiumScale*alpha * QuantiumScale*beta; ** opacity = QuantiumScale*alpha*beta; // over blend, optimized 1-Gamma ** ** The above SVG definitions also definate that Mathematical Composition ** methods should use a 'Over' blending mode for Alpha Channel. ** It however was not applied for composition modes of 'Plus', 'Minus', ** the modulus versions of 'Add' and 'Subtract'. ** ** ** Mathematical operator changes to be applied from IM v6.7... ** ** 1/ Modulus modes 'Add' and 'Subtract' are obsoleted and renamed ** 'ModulusAdd' and 'ModulusSubtract' for clarity. ** ** 2/ All mathematical compositions work as per the SVG specification ** with regard to blending. This now includes 'ModulusAdd' and ** 'ModulusSubtract'. ** ** 3/ When the special channel flag 'sync' (syncronize channel updates) ** is turned off (enabled by default) then mathematical compositions are ** only performed on the channels specified, and are applied ** independantally of each other. In other words the mathematics is ** performed as 'pure' mathematical operations, rather than as image ** operations. */ static inline MagickRealType Atop(const MagickRealType p, const MagickRealType Sa,const MagickRealType q, const MagickRealType magick_unused(Da)) { magick_unreferenced(Da); return(p*Sa+q*(1.0-Sa)); /* Da optimized out, Da/gamma => 1.0 */ } static inline void CompositeAtop(const MagickPixelPacket *p, const MagickPixelPacket *q,MagickPixelPacket *composite) { MagickRealType Sa; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ composite->opacity=q->opacity; /* optimized Da = 1.0-Gamma */ composite->red=Atop(p->red,Sa,q->red,1.0); composite->green=Atop(p->green,Sa,q->green,1.0); composite->blue=Atop(p->blue,Sa,q->blue,1.0); if (q->colorspace == CMYKColorspace) composite->index=Atop(p->index,Sa,q->index,1.0); } /* What is this Composition method for? Can't find any specification! WARNING this is not doing correct 'over' blend handling (Anthony Thyssen). */ static inline void CompositeBumpmap(const MagickPixelPacket *p, const MagickPixelPacket *q,MagickPixelPacket *composite) { MagickRealType intensity; intensity=MagickPixelIntensity(p); composite->red=QuantumScale*intensity*q->red; composite->green=QuantumScale*intensity*q->green; composite->blue=QuantumScale*intensity*q->blue; composite->opacity=(MagickRealType) QuantumScale*intensity*p->opacity; if (q->colorspace == CMYKColorspace) composite->index=QuantumScale*intensity*q->index; } static inline void CompositeClear(const MagickPixelPacket *q, MagickPixelPacket *composite) { composite->opacity=(MagickRealType) TransparentOpacity; composite->red=0.0; composite->green=0.0; composite->blue=0.0; if (q->colorspace == CMYKColorspace) composite->index=0.0; } static MagickRealType ColorBurn(const MagickRealType Sca, const MagickRealType Sa,const MagickRealType Dca,const MagickRealType Da) { #if 0 /* Oct 2004 SVG specification. */ if (Sca*Da + Dca*Sa <= Sa*Da) return(Sca*(1.0-Da)+Dca*(1.0-Sa)); return(Sa*(Sca*Da+Dca*Sa-Sa*Da)/Sca + Sca*(1.0-Da) + Dca*(1.0-Sa)); #else /* March 2009 SVG specification. */ if ((fabs(Sca) < MagickEpsilon) && (fabs(Dca-Da) < MagickEpsilon)) return(Sa*Da+Dca*(1.0-Sa)); if (Sca < MagickEpsilon) return(Dca*(1.0-Sa)); return(Sa*Da-Sa*MagickMin(Da,(Da-Dca)*Sa/Sca)+Sca*(1.0-Da)+Dca*(1.0-Sa)); #endif } static inline void CompositeColorBurn(const MagickPixelPacket *p, const MagickPixelPacket *q,MagickPixelPacket *composite) { MagickRealType Da, gamma, Sa; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ Da=1.0-QuantumScale*q->opacity; gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */ composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); gamma=QuantumRange/(fabs(gamma) < MagickEpsilon ? MagickEpsilon : gamma); composite->red=gamma*ColorBurn(QuantumScale*p->red*Sa,Sa,QuantumScale* q->red*Da,Da); composite->green=gamma*ColorBurn(QuantumScale*p->green*Sa,Sa,QuantumScale* q->green*Da,Da); composite->blue=gamma*ColorBurn(QuantumScale*p->blue*Sa,Sa,QuantumScale* q->blue*Da,Da); if (q->colorspace == CMYKColorspace) composite->index=gamma*ColorBurn(QuantumScale*p->index*Sa,Sa,QuantumScale* q->index*Da,Da); } static MagickRealType ColorDodge(const MagickRealType Sca, const MagickRealType Sa,const MagickRealType Dca,const MagickRealType Da) { /* Oct 2004 SVG specification. */ if ((Sca*Da+Dca*Sa) >= Sa*Da) return(Sa*Da+Sca*(1.0-Da)+Dca*(1.0-Sa)); return(Dca*Sa*Sa/(Sa-Sca)+Sca*(1.0-Da)+Dca*(1.0-Sa)); #if 0 /* New specification, March 2009 SVG specification. This specification was also wrong of non-overlap cases. */ if ((fabs(Sca-Sa) < MagickEpsilon) && (fabs(Dca) < MagickEpsilon)) return(Sca*(1.0-Da)); if (fabs(Sca-Sa) < MagickEpsilon) return(Sa*Da+Sca*(1.0-Da)+Dca*(1.0-Sa)); return(Sa*MagickMin(Da,Dca*Sa/(Sa-Sca))); #endif #if 0 /* Working from first principles using the original formula: f(Sc,Dc) = Dc/(1-Sc) This works correctly! Looks like the 2004 model was right but just required a extra condition for correct handling. */ if ((fabs(Sca-Sa) < MagickEpsilon) && (fabs(Dca) < MagickEpsilon)) return(Sca*(1.0-Da)+Dca*(1.0-Sa)); if (fabs(Sca-Sa) < MagickEpsilon) return(Sa*Da+Sca*(1.0-Da)+Dca*(1.0-Sa)); return(Dca*Sa*Sa/(Sa-Sca)+Sca*(1.0-Da)+Dca*(1.0-Sa)); #endif } static inline void CompositeColorDodge(const MagickPixelPacket *p, const MagickPixelPacket *q,MagickPixelPacket *composite) { MagickRealType Da, gamma, Sa; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ Da=1.0-QuantumScale*q->opacity; gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */ composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); gamma=QuantumRange/(fabs(gamma) < MagickEpsilon ? MagickEpsilon : gamma); composite->red=gamma*ColorDodge(QuantumScale*p->red*Sa,Sa,QuantumScale* q->red*Da,Da); composite->green=gamma*ColorDodge(QuantumScale*p->green*Sa,Sa,QuantumScale* q->green*Da,Da); composite->blue=gamma*ColorDodge(QuantumScale*p->blue*Sa,Sa,QuantumScale* q->blue*Da,Da); if (q->colorspace == CMYKColorspace) composite->index=gamma*ColorDodge(QuantumScale*p->index*Sa,Sa,QuantumScale* q->index*Da,Da); } static inline MagickRealType Darken(const MagickRealType p, const MagickRealType alpha,const MagickRealType q,const MagickRealType beta) { if (p < q) return(MagickOver_(p,alpha,q,beta)); /* src-over */ return(MagickOver_(q,beta,p,alpha)); /* dst-over */ } static inline void CompositeDarken(const MagickPixelPacket *p, const MagickPixelPacket *q,const ChannelType channel, MagickPixelPacket *composite) { /* Darken is equivalent to a 'Minimum' method OR a greyscale version of a binary 'Or' OR the 'Intersection' of pixel sets. */ double gamma; if ( (channel & SyncChannels) != 0 ) { composite->opacity=QuantumScale*p->opacity*q->opacity; /* Over Blend */ gamma=1.0-QuantumScale*composite->opacity; gamma=PerceptibleReciprocal(gamma); composite->red=gamma*Darken(p->red,p->opacity,q->red,q->opacity); composite->green=gamma*Darken(p->green,p->opacity,q->green,q->opacity); composite->blue=gamma*Darken(p->blue,p->opacity,q->blue,q->opacity); if (q->colorspace == CMYKColorspace) composite->index=gamma*Darken(p->index,p->opacity,q->index,q->opacity); } else { /* handle channels as separate grayscale channels */ if ( (channel & AlphaChannel) != 0 ) composite->opacity=MagickMax(p->opacity,q->opacity); if ( (channel & RedChannel) != 0 ) composite->red=MagickMin(p->red,q->red); if ( (channel & GreenChannel) != 0 ) composite->green=MagickMin(p->green,q->green); if ( (channel & BlueChannel) != 0 ) composite->blue=MagickMin(p->blue,q->blue); if ( (channel & IndexChannel) != 0 && q->colorspace == CMYKColorspace) composite->index=MagickMin(p->index,q->index); } } static inline void CompositeDarkenIntensity(const MagickPixelPacket *p, const MagickPixelPacket *q,const ChannelType channel, MagickPixelPacket *composite) { /* Select the pixel based on the intensity level. If 'Sync' flag select whole pixel based on alpha weighted intensity. Otherwise use intensity only, but restrict copy according to channel. */ if ( (channel & SyncChannels) != 0 ) { MagickRealType Da, Sa; Sa=1.0-QuantumScale*p->opacity; Da=1.0-QuantumScale*q->opacity; *composite = (Sa*MagickPixelIntensity(p) < Da*MagickPixelIntensity(q)) ? *p : *q; } else { int from_p = (MagickPixelIntensity(p) < MagickPixelIntensity(q)); if ( (channel & AlphaChannel) != 0 ) composite->opacity = from_p ? p->opacity : q->opacity; if ( (channel & RedChannel) != 0 ) composite->red = from_p ? p->red : q->red; if ( (channel & GreenChannel) != 0 ) composite->green = from_p ? p->green : q->green; if ( (channel & BlueChannel) != 0 ) composite->blue = from_p ? p->blue : q->blue; if ( (channel & IndexChannel) != 0 && q->colorspace == CMYKColorspace) composite->index = from_p ? p->index : q->index; } } static inline MagickRealType Difference(const MagickRealType p, const MagickRealType Sa,const MagickRealType q,const MagickRealType Da) { /* Optimized by Multipling by QuantumRange (taken from gamma). */ return(Sa*p+Da*q-Sa*Da*2.0*MagickMin(p,q)); } static inline void CompositeDifference(const MagickPixelPacket *p, const MagickPixelPacket *q,const ChannelType channel, MagickPixelPacket *composite) { double gamma; MagickRealType Da, Sa; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ Da=1.0-QuantumScale*q->opacity; if ( (channel & SyncChannels) != 0 ) { gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */ composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); gamma=PerceptibleReciprocal(gamma); /* Values are not normalized as an optimization. */ composite->red=gamma*Difference(p->red,Sa,q->red,Da); composite->green=gamma*Difference(p->green,Sa,q->green,Da); composite->blue=gamma*Difference(p->blue,Sa,q->blue,Da); if (q->colorspace == CMYKColorspace) composite->index=gamma*Difference(p->index,Sa,q->index,Da); } else { /* handle channels as separate grayscale channels */ if ( (channel & AlphaChannel) != 0 ) composite->opacity=QuantumRange-fabs((double) (p->opacity-q->opacity)); if ( (channel & RedChannel) != 0 ) composite->red=fabs((double) (p->red-q->red)); if ( (channel & GreenChannel) != 0 ) composite->green=fabs((double) (p->green-q->green)); if ( (channel & BlueChannel) != 0 ) composite->blue=fabs((double) (p->blue-q->blue)); if ( (channel & IndexChannel) != 0 && q->colorspace == CMYKColorspace) composite->index=fabs((double) (p->index-q->index)); } } static MagickRealType Divide(const MagickRealType Sca,const MagickRealType Sa, const MagickRealType Dca,const MagickRealType Da) { /* Divide Source by Destination f(Sc,Dc) = Sc / Dc But with appropriate handling for special case of Dc == 0 specifically so that f(Black,Black)=Black and f(non-Black,Black)=White. It is however also important to correctly do 'over' alpha blending which is why the formula becomes so complex. */ if ((fabs(Sca) < MagickEpsilon) && (fabs(Dca) < MagickEpsilon)) return(Sca*(1.0-Da)+Dca*(1.0-Sa)); if (fabs(Dca) < MagickEpsilon) return(Sa*Da+Sca*(1.0-Da)+Dca*(1.0-Sa)); return(Sca*Da*Da/Dca+Sca*(1.0-Da)+Dca*(1.0-Sa)); } static inline void CompositeDivide(const MagickPixelPacket *p, const MagickPixelPacket *q,const ChannelType channel, MagickPixelPacket *composite) { MagickRealType Da, gamma, Sa; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ Da=1.0-QuantumScale*q->opacity; if ( (channel & SyncChannels) != 0 ) { gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */ composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); gamma=QuantumRange/(fabs(gamma) < MagickEpsilon ? MagickEpsilon : gamma); composite->red=gamma*Divide(QuantumScale*p->red*Sa,Sa,QuantumScale* q->red*Da,Da); composite->green=gamma*Divide(QuantumScale*p->green*Sa,Sa,QuantumScale* q->green*Da,Da); composite->blue=gamma*Divide(QuantumScale*p->blue*Sa,Sa,QuantumScale* q->blue*Da,Da); if (q->colorspace == CMYKColorspace) composite->index=gamma*Divide(QuantumScale*p->index*Sa,Sa,QuantumScale* q->index*Da,Da); } else { /* handle channels as separate grayscale channels */ if ( (channel & AlphaChannel) != 0 ) composite->opacity=QuantumRange*(1.0-Divide(Sa,1.0,Da,1.0)); if ( (channel & RedChannel) != 0 ) composite->red=QuantumRange* Divide(QuantumScale*p->red,1.0,QuantumScale*q->red,1.0); if ( (channel & GreenChannel) != 0 ) composite->green=QuantumRange* Divide(QuantumScale*p->green,1.0,QuantumScale*q->green,1.0); if ( (channel & BlueChannel) != 0 ) composite->blue=QuantumRange* Divide(QuantumScale*p->blue,1.0,QuantumScale*q->blue,1.0); if ( (channel & IndexChannel) != 0 && q->colorspace == CMYKColorspace) composite->index=QuantumRange* Divide(QuantumScale*p->index,1.0,QuantumScale*q->index,1.0); } } static MagickRealType Exclusion(const MagickRealType Sca, const MagickRealType Sa,const MagickRealType Dca,const MagickRealType Da) { return(Sca*Da+Dca*Sa-2.0*Sca*Dca+Sca*(1.0-Da)+Dca*(1.0-Sa)); } static inline void CompositeExclusion(const MagickPixelPacket *p, const MagickPixelPacket *q,const ChannelType channel, MagickPixelPacket *composite) { MagickRealType gamma, Sa, Da; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ Da=1.0-QuantumScale*q->opacity; if ( (channel & SyncChannels) != 0 ) { gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */ composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); gamma=QuantumRange/(fabs(gamma) < MagickEpsilon ? MagickEpsilon : gamma); composite->red=gamma*Exclusion(QuantumScale*p->red*Sa,Sa,QuantumScale* q->red*Da,Da); composite->green=gamma*Exclusion(QuantumScale*p->green*Sa,Sa,QuantumScale* q->green*Da,Da); composite->blue=gamma*Exclusion(QuantumScale*p->blue*Sa,Sa,QuantumScale* q->blue*Da,Da); if (q->colorspace == CMYKColorspace) composite->index=gamma*Exclusion(QuantumScale*p->index*Sa,Sa,QuantumScale* q->index*Da,Da); } else { /* handle channels as separate grayscale channels */ if ((channel & AlphaChannel) != 0) composite->opacity=QuantumRange*(1.0-Exclusion(Sa,1.0,Da,1.0)); if ((channel & RedChannel) != 0) composite->red=QuantumRange*Exclusion(QuantumScale*p->red,1.0, QuantumScale*q->red,1.0); if ((channel & GreenChannel) != 0) composite->green=QuantumRange*Exclusion(QuantumScale*p->green,1.0, QuantumScale*q->green,1.0); if ((channel & BlueChannel) != 0) composite->blue=QuantumRange*Exclusion(QuantumScale*p->blue,1.0, QuantumScale*q->blue,1.0); if (((channel & IndexChannel) != 0) && (q->colorspace == CMYKColorspace)) composite->index=QuantumRange*Exclusion(QuantumScale*p->index,1.0, QuantumScale*q->index,1.0); } } static MagickRealType HardLight(const MagickRealType Sca, const MagickRealType Sa,const MagickRealType Dca,const MagickRealType Da) { if ((2.0*Sca) < Sa) return(2.0*Sca*Dca+Sca*(1.0-Da)+Dca*(1.0-Sa)); return(Sa*Da-2.0*(Da-Dca)*(Sa-Sca)+Sca*(1.0-Da)+Dca*(1.0-Sa)); } static inline void CompositeHardLight(const MagickPixelPacket *p, const MagickPixelPacket *q,MagickPixelPacket *composite) { MagickRealType Da, gamma, Sa; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ Da=1.0-QuantumScale*q->opacity; gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */ composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); gamma=QuantumRange/(fabs(gamma) < MagickEpsilon ? MagickEpsilon : gamma); composite->red=gamma*HardLight(QuantumScale*p->red*Sa,Sa,QuantumScale* q->red*Da,Da); composite->green=gamma*HardLight(QuantumScale*p->green*Sa,Sa,QuantumScale* q->green*Da,Da); composite->blue=gamma*HardLight(QuantumScale*p->blue*Sa,Sa,QuantumScale* q->blue*Da,Da); if (q->colorspace == CMYKColorspace) composite->index=gamma*HardLight(QuantumScale*p->index*Sa,Sa,QuantumScale* q->index*Da,Da); } static MagickRealType HardMix(const MagickRealType Sca, const MagickRealType Dca) { if ((Sca+Dca) < QuantumRange) return(0.0); else return(1.0); } static inline void CompositeHardMix(const MagickPixelPacket *p, const MagickPixelPacket *q,MagickPixelPacket *composite) { MagickRealType Da, gamma, Sa; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ Da=1.0-QuantumScale*q->opacity; gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */ composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); gamma=QuantumRange/(fabs(gamma) < MagickEpsilon ? MagickEpsilon : gamma); composite->red=gamma*HardMix(p->red*Sa,q->red*Da); composite->green=gamma*HardMix(p->green*Sa,q->green*Da); composite->blue=gamma*HardMix(p->blue*Sa,q->blue*Da); if (q->colorspace == CMYKColorspace) composite->index=gamma*HardMix(p->index*Sa,q->index*Da); } static void HCLComposite(const double hue,const double chroma,const double luma, MagickRealType *red,MagickRealType *green,MagickRealType *blue) { double b, c, g, h, m, r, x; /* Convert HCL to RGB colorspace. */ assert(red != (MagickRealType *) NULL); assert(green != (MagickRealType *) NULL); assert(blue != (MagickRealType *) NULL); h=6.0*hue; c=chroma; x=c*(1.0-fabs(fmod(h,2.0)-1.0)); r=0.0; g=0.0; b=0.0; if ((0.0 <= h) && (h < 1.0)) { r=c; g=x; } else if ((1.0 <= h) && (h < 2.0)) { r=x; g=c; } else if ((2.0 <= h) && (h < 3.0)) { g=c; b=x; } else if ((3.0 <= h) && (h < 4.0)) { g=x; b=c; } else if ((4.0 <= h) && (h < 5.0)) { r=x; b=c; } else if ((5.0 <= h) && (h < 6.0)) { r=c; b=x; } m=luma-(0.298839*r+0.586811*g+0.114350*b); *red=QuantumRange*(r+m); *green=QuantumRange*(g+m); *blue=QuantumRange*(b+m); } static void CompositeHCL(const MagickRealType red,const MagickRealType green, const MagickRealType blue,double *hue,double *chroma,double *luma) { double b, c, g, h, max, r; /* Convert RGB to HCL colorspace. */ assert(hue != (double *) NULL); assert(chroma != (double *) NULL); assert(luma != (double *) NULL); r=(double) red; g=(double) green; b=(double) blue; max=MagickMax(r,MagickMax(g,b)); c=max-(double) MagickMin(r,MagickMin(g,b)); h=0.0; if (c == 0) h=0.0; else if (red == (MagickRealType) max) h=fmod((g-b)/c+6.0,6.0); else if (green == (MagickRealType) max) h=((b-r)/c)+2.0; else if (blue == (MagickRealType) max) h=((r-g)/c)+4.0; *hue=(h/6.0); *chroma=QuantumScale*c; *luma=QuantumScale*(0.298839*r+0.586811*g+0.114350*b); } static inline MagickRealType In(const MagickRealType p,const MagickRealType Sa, const MagickRealType magick_unused(q),const MagickRealType Da) { magick_unreferenced(q); return(Sa*p*Da); } static inline void CompositeIn(const MagickPixelPacket *p, const MagickPixelPacket *q,MagickPixelPacket *composite) { double gamma; MagickRealType Sa, Da; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ Da=1.0-QuantumScale*q->opacity; gamma=Sa*Da; composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); gamma=PerceptibleReciprocal(gamma); composite->red=gamma*In(p->red,Sa,q->red,Da); composite->green=gamma*In(p->green,Sa,q->green,Da); composite->blue=gamma*In(p->blue,Sa,q->blue,Da); if (q->colorspace == CMYKColorspace) composite->index=gamma*In(p->index,Sa,q->index,Da); } static inline MagickRealType Lighten(const MagickRealType p, const MagickRealType alpha,const MagickRealType q,const MagickRealType beta) { if (p > q) return(MagickOver_(p,alpha,q,beta)); /* src-over */ return(MagickOver_(q,beta,p,alpha)); /* dst-over */ } static inline void CompositeLighten(const MagickPixelPacket *p, const MagickPixelPacket *q,const ChannelType channel, MagickPixelPacket *composite) { /* Lighten is also equvalent to a 'Maximum' method OR a greyscale version of a binary 'And' OR the 'Union' of pixel sets. */ double gamma; if ( (channel & SyncChannels) != 0 ) { composite->opacity=QuantumScale*p->opacity*q->opacity; /* Over Blend */ gamma=1.0-QuantumScale*composite->opacity; gamma=PerceptibleReciprocal(gamma); composite->red=gamma*Lighten(p->red,p->opacity,q->red,q->opacity); composite->green=gamma*Lighten(p->green,p->opacity,q->green,q->opacity); composite->blue=gamma*Lighten(p->blue,p->opacity,q->blue,q->opacity); if (q->colorspace == CMYKColorspace) composite->index=gamma*Lighten(p->index,p->opacity,q->index,q->opacity); } else { /* handle channels as separate grayscale channels */ if ( (channel & AlphaChannel) != 0 ) composite->opacity=MagickMin(p->opacity,q->opacity); if ( (channel & RedChannel) != 0 ) composite->red=MagickMax(p->red,q->red); if ( (channel & GreenChannel) != 0 ) composite->green=MagickMax(p->green,q->green); if ( (channel & BlueChannel) != 0 ) composite->blue=MagickMax(p->blue,q->blue); if ( (channel & IndexChannel) != 0 && q->colorspace == CMYKColorspace) composite->index=MagickMax(p->index,q->index); } } static inline void CompositeLightenIntensity(const MagickPixelPacket *p, const MagickPixelPacket *q,const ChannelType channel, MagickPixelPacket *composite) { /* Select the pixel based on the intensity level. If 'Sync' flag select whole pixel based on alpha weighted intensity. Otherwise use Intenisty only, but restrict copy according to channel. */ if ( (channel & SyncChannels) != 0 ) { MagickRealType Da, Sa; Sa=1.0-QuantumScale*p->opacity; Da=1.0-QuantumScale*q->opacity; *composite = (Sa*MagickPixelIntensity(p) > Da*MagickPixelIntensity(q)) ? *p : *q; } else { int from_p = (MagickPixelIntensity(p) > MagickPixelIntensity(q)); if ( (channel & AlphaChannel) != 0 ) composite->opacity = from_p ? p->opacity : q->opacity; if ( (channel & RedChannel) != 0 ) composite->red = from_p ? p->red : q->red; if ( (channel & GreenChannel) != 0 ) composite->green = from_p ? p->green : q->green; if ( (channel & BlueChannel) != 0 ) composite->blue = from_p ? p->blue : q->blue; if ( (channel & IndexChannel) != 0 && q->colorspace == CMYKColorspace) composite->index = from_p ? p->index : q->index; } } #if 0 static inline MagickRealType LinearDodge(const MagickRealType Sca, const MagickRealType Sa,const MagickRealType Dca,const MagickRealType Da) { /* LinearDodge: simplifies to a trivial formula f(Sc,Dc) = Sc + Dc Dca' = Sca + Dca */ return(Sca+Dca); } #endif static inline void CompositeLinearDodge(const MagickPixelPacket *p, const MagickPixelPacket *q,MagickPixelPacket *composite) { double gamma; MagickRealType Da, Sa; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ Da=1.0-QuantumScale*q->opacity; gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */ composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); gamma=PerceptibleReciprocal(gamma); composite->red=gamma*(p->red*Sa+q->red*Da); composite->green=gamma*(p->green*Sa+q->green*Da); composite->blue=gamma*(p->blue*Sa+q->blue*Da); if (q->colorspace == CMYKColorspace) composite->index=gamma*(p->index*Sa+q->index*Da); } static inline MagickRealType LinearBurn(const MagickRealType Sca, const MagickRealType Sa,const MagickRealType Dca,const MagickRealType Da) { /* LinearBurn: as defined by Abode Photoshop, according to http://www.simplefilter.de/en/basics/mixmods.html is: f(Sc,Dc) = Sc + Dc - 1 */ return(Sca+Dca-Sa*Da); } static inline void CompositeLinearBurn(const MagickPixelPacket *p, const MagickPixelPacket *q,MagickPixelPacket *composite) { MagickRealType Da, gamma, Sa; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ Da=1.0-QuantumScale*q->opacity; gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */ composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); gamma=QuantumRange/(fabs(gamma) < MagickEpsilon ? MagickEpsilon : gamma); composite->red=gamma*LinearBurn(QuantumScale*p->red*Sa,Sa,QuantumScale* q->red*Da,Da); composite->green=gamma*LinearBurn(QuantumScale*p->green*Sa,Sa,QuantumScale* q->green*Da,Da); composite->blue=gamma*LinearBurn(QuantumScale*p->blue*Sa,Sa,QuantumScale* q->blue*Da,Da); if (q->colorspace == CMYKColorspace) composite->index=gamma*LinearBurn(QuantumScale*p->index*Sa,Sa,QuantumScale* q->index*Da,Da); } static inline MagickRealType LinearLight(const MagickRealType Sca, const MagickRealType Sa,const MagickRealType Dca,const MagickRealType Da) { #if 0 /* Previous formula, was only valid for fully-opaque images. */ return(Dca+2*Sca-1.0); #else /* LinearLight: as defined by Abode Photoshop, according to http://www.simplefilter.de/en/basics/mixmods.html is: f(Sc,Dc) = Dc + 2*Sc - 1 */ return((Sca-Sa)*Da+Sca+Dca); #endif } static inline void CompositeLinearLight(const MagickPixelPacket *p, const MagickPixelPacket *q,MagickPixelPacket *composite) { MagickRealType Da, gamma, Sa; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ Da=1.0-QuantumScale*q->opacity; gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */ composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); gamma=QuantumRange/(fabs(gamma) < MagickEpsilon ? MagickEpsilon : gamma); composite->red=gamma*LinearLight(QuantumScale*p->red*Sa,Sa,QuantumScale* q->red*Da,Da); composite->green=gamma*LinearLight(QuantumScale*p->green*Sa,Sa,QuantumScale* q->green*Da,Da); composite->blue=gamma*LinearLight(QuantumScale*p->blue*Sa,Sa,QuantumScale* q->blue*Da,Da); if (q->colorspace == CMYKColorspace) composite->index=gamma*LinearLight(QuantumScale*p->index*Sa,Sa,QuantumScale* q->index*Da,Da); } static inline MagickRealType Mathematics(const MagickRealType Sca, const MagickRealType Sa,const MagickRealType Dca,const MagickRealType Da, const GeometryInfo *geometry_info) { /* 'Mathematics' a free form user control mathematical composition is defined as... f(Sc,Dc) = A*Sc*Dc + B*Sc + C*Dc + D Where the arguments A,B,C,D are (currently) passed to composite as a command separated 'geometry' string in "compose:args" image artifact. A = a->rho, B = a->sigma, C = a->xi, D = a->psi Applying the SVG transparency formula (see above), we get... Dca' = Sa*Da*f(Sc,Dc) + Sca*(1.0-Da) + Dca*(1.0-Sa) Dca' = A*Sca*Dca + B*Sca*Da + C*Dca*Sa + D*Sa*Da + Sca*(1.0-Da) + Dca*(1.0-Sa) */ return(geometry_info->rho*Sca*Dca+geometry_info->sigma*Sca*Da+ geometry_info->xi*Dca*Sa+geometry_info->psi*Sa*Da+Sca*(1.0-Da)+ Dca*(1.0-Sa)); } static inline void CompositeMathematics(const MagickPixelPacket *p, const MagickPixelPacket *q,const ChannelType channel, const GeometryInfo *args, MagickPixelPacket *composite) { double gamma; MagickRealType Da, Sa; Sa=1.0-QuantumScale*p->opacity; /* ??? - AT */ Da=1.0-QuantumScale*q->opacity; if ( (channel & SyncChannels) != 0 ) { gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */ composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); gamma=QuantumRange/(fabs(gamma) < MagickEpsilon ? MagickEpsilon : gamma); composite->red=gamma*Mathematics(QuantumScale*p->red*Sa,Sa,QuantumScale* q->red*Da,Da,args); composite->green=gamma*Mathematics(QuantumScale*p->green*Sa,Sa,QuantumScale* q->green*Da,Da,args); composite->blue=gamma*Mathematics(QuantumScale*p->blue*Sa,Sa,QuantumScale* q->blue*Da,Da,args); if (q->colorspace == CMYKColorspace) composite->index=gamma*Mathematics(QuantumScale*p->index*Sa,Sa,QuantumScale* q->index*Da,Da,args); } else { /* handle channels as separate grayscale channels */ if ( (channel & AlphaChannel) != 0 ) composite->opacity=QuantumRange*(1.0-Mathematics(Sa,1.0,Da,1.0,args)); if ( (channel & RedChannel) != 0 ) composite->red=QuantumRange* Mathematics(QuantumScale*p->red,1.0,QuantumScale*q->red,1.0,args); if ( (channel & GreenChannel) != 0 ) composite->green=QuantumRange* Mathematics(QuantumScale*p->green,1.0,QuantumScale*q->green,1.0,args); if ( (channel & BlueChannel) != 0 ) composite->blue=QuantumRange* Mathematics(QuantumScale*p->blue,1.0,QuantumScale*q->blue,1.0,args); if ( (channel & IndexChannel) != 0 && q->colorspace == CMYKColorspace) composite->index=QuantumRange* Mathematics(QuantumScale*p->index,1.0,QuantumScale*q->index,1.0,args); } } static inline void CompositePlus(const MagickPixelPacket *p, const MagickPixelPacket *q,const ChannelType channel, MagickPixelPacket *composite) { if ( (channel & SyncChannels) != 0 ) { /* NOTE: "Plus" does not use 'over' alpha-blending but uses a special 'plus' form of alph-blending. It is the ONLY mathematical operator to do this. this is what makes it different to the otherwise equivalent "LinearDodge" composition method. Note however that color channels are still effected by the alpha channel as a result of the blending, making it just as useless for independant channel maths, just like all other mathematical composition methods. As such the removal of the 'sync' flag, is still a usful convention. The MagickPixelCompositePlus() function is defined in "composite-private.h" so it can also be used for Image Blending. */ MagickPixelCompositePlus(p,p->opacity,q,q->opacity,composite); } else { /* handle channels as separate grayscale channels */ if ( (channel & AlphaChannel) != 0 ) composite->opacity=p->opacity+q->opacity-QuantumRange; if ( (channel & RedChannel) != 0 ) composite->red=p->red+q->red; if ( (channel & GreenChannel) != 0 ) composite->green=p->green+q->green; if ( (channel & BlueChannel) != 0 ) composite->blue=p->blue+q->blue; if ( (channel & IndexChannel) != 0 && q->colorspace == CMYKColorspace) composite->index=p->index+q->index; } } static inline MagickRealType Minus(const MagickRealType Sca, const MagickRealType Sa,const MagickRealType Dca, const MagickRealType magick_unused(Da)) { /* Minus Source from Destination f(Sc,Dc) = Sc - Dc */ magick_unreferenced(Da); return(Sca+Dca-2*Dca*Sa); } static inline void CompositeMinus(const MagickPixelPacket *p, const MagickPixelPacket *q,const ChannelType channel, MagickPixelPacket *composite) { double gamma; MagickRealType Da, Sa; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ Da=1.0-QuantumScale*q->opacity; if ( (channel & SyncChannels) != 0 ) { gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */ composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); gamma=PerceptibleReciprocal(gamma); composite->red=gamma*Minus(p->red*Sa,Sa,q->red*Da,Da); composite->green=gamma*Minus(p->green*Sa,Sa,q->green*Da,Da); composite->blue=gamma*Minus(p->blue*Sa,Sa,q->blue*Da,Da); if (q->colorspace == CMYKColorspace) composite->index=gamma*Minus(p->index*Sa,Sa,q->index*Da,Da); } else { /* handle channels as separate grayscale channels */ if ( (channel & AlphaChannel) != 0 ) composite->opacity=QuantumRange*(1.0-(Sa-Da)); if ( (channel & RedChannel) != 0 ) composite->red=p->red-q->red; if ( (channel & GreenChannel) != 0 ) composite->green=p->green-q->green; if ( (channel & BlueChannel) != 0 ) composite->blue=p->blue-q->blue; if ( (channel & IndexChannel) != 0 && q->colorspace == CMYKColorspace) composite->index=p->index-q->index; } } static inline MagickRealType ModulusAdd(const MagickRealType p, const MagickRealType Sa,const MagickRealType q,const MagickRealType Da) { MagickRealType pixel; pixel=p+q; while (pixel > QuantumRange) pixel-=QuantumRange; while (pixel < 0.0) pixel+=QuantumRange; return(pixel*Sa*Da+p*Sa*(1.0-Da)+q*Da*(1.0-Sa)); } static inline void CompositeModulusAdd(const MagickPixelPacket *p, const MagickPixelPacket *q,const ChannelType channel, MagickPixelPacket *composite) { if ( (channel & SyncChannels) != 0 ) { double gamma; MagickRealType Sa, Da; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ Da=1.0-QuantumScale*q->opacity; gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */ composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); gamma=PerceptibleReciprocal(gamma); composite->red=ModulusAdd(p->red,Sa,q->red,Da); composite->green=ModulusAdd(p->green,Sa,q->green,Da); composite->blue=ModulusAdd(p->blue,Sa,q->blue,Da); if (q->colorspace == CMYKColorspace) composite->index=ModulusAdd(p->index,Sa,q->index,Da); } else { /* handle channels as separate grayscale channels */ if ( (channel & AlphaChannel) != 0 ) composite->opacity=QuantumRange-ModulusAdd(QuantumRange-p->opacity, 1.0,QuantumRange-q->opacity,1.0); if ( (channel & RedChannel) != 0 ) composite->red=ModulusAdd(p->red,1.0,q->red,1.0); if ( (channel & GreenChannel) != 0 ) composite->green=ModulusAdd(p->green,1.0,q->green,1.0); if ( (channel & BlueChannel) != 0 ) composite->blue=ModulusAdd(p->blue,1.0,q->blue,1.0); if ( (channel & IndexChannel) != 0 && q->colorspace == CMYKColorspace) composite->index=ModulusAdd(p->index,1.0,q->index,1.0); } } static inline MagickRealType ModulusSubtract(const MagickRealType p, const MagickRealType Sa,const MagickRealType q,const MagickRealType Da) { MagickRealType pixel; pixel=p-q; while (pixel > QuantumRange) pixel-=QuantumRange; while (pixel < 0.0) pixel+=QuantumRange; return(pixel*Sa*Da+p*Sa*(1.0-Da)+q*Da*(1.0-Sa)); } static inline void CompositeModulusSubtract(const MagickPixelPacket *p, const MagickPixelPacket *q,const ChannelType channel, MagickPixelPacket *composite) { if ( (channel & SyncChannels) != 0 ) { double gamma; MagickRealType Da, Sa; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ Da=1.0-QuantumScale*q->opacity; gamma = RoundToUnity(Sa+Da-Sa*Da); composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); gamma=PerceptibleReciprocal(gamma); composite->red=ModulusSubtract(p->red,Sa,q->red,Da); composite->green=ModulusSubtract(p->green,Sa,q->green,Da); composite->blue=ModulusSubtract(p->blue,Sa,q->blue,Da); if (q->colorspace == CMYKColorspace) composite->index=ModulusSubtract(p->index,Sa,q->index,Da); } else { /* handle channels as separate grayscale channels */ if ( (channel & AlphaChannel) != 0 ) composite->opacity=QuantumRange-ModulusSubtract(QuantumRange-p->opacity, 1.0,QuantumRange-q->opacity,1.0); if ( (channel & RedChannel) != 0 ) composite->red=ModulusSubtract(p->red,1.0,q->red,1.0); if ( (channel & GreenChannel) != 0 ) composite->green=ModulusSubtract(p->green,1.0,q->green,1.0); if ( (channel & BlueChannel) != 0 ) composite->blue=ModulusSubtract(p->blue,1.0,q->blue,1.0); if ( (channel & IndexChannel) != 0 && q->colorspace == CMYKColorspace) composite->index=ModulusSubtract(p->index,1.0,q->index,1.0); } } static inline MagickRealType Multiply(const MagickRealType Sca, const MagickRealType Sa,const MagickRealType Dca,const MagickRealType Da) { return(Sca*Dca+Sca*(1.0-Da)+Dca*(1.0-Sa)); } static inline void CompositeMultiply(const MagickPixelPacket *p, const MagickPixelPacket *q,const ChannelType channel, MagickPixelPacket *composite) { MagickRealType Da, gamma, Sa; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ Da=1.0-QuantumScale*q->opacity; if ( (channel & SyncChannels) != 0 ) { gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */ composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); gamma=QuantumRange/(fabs(gamma) < MagickEpsilon ? MagickEpsilon : gamma); composite->red=gamma*Multiply(QuantumScale*p->red*Sa,Sa,QuantumScale* q->red*Da,Da); composite->green=gamma*Multiply(QuantumScale*p->green*Sa,Sa,QuantumScale* q->green*Da,Da); composite->blue=gamma*Multiply(QuantumScale*p->blue*Sa,Sa,QuantumScale* q->blue*Da,Da); if (q->colorspace == CMYKColorspace) composite->index=gamma*Multiply(QuantumScale*p->index*Sa,Sa,QuantumScale* q->index*Da,Da); } else { /* handle channels as separate grayscale channels */ if ( (channel & AlphaChannel) != 0 ) composite->opacity=QuantumRange*(1.0-Sa*Da); if ( (channel & RedChannel) != 0 ) composite->red=QuantumScale*p->red*q->red; if ( (channel & GreenChannel) != 0 ) composite->green=QuantumScale*p->green*q->green; if ( (channel & BlueChannel) != 0 ) composite->blue=QuantumScale*p->blue*q->blue; if ( (channel & IndexChannel) != 0 && q->colorspace == CMYKColorspace) composite->index=QuantumScale*p->index*q->index; } } static inline MagickRealType Out(const MagickRealType p, const MagickRealType Sa,const MagickRealType magick_unused(q), const MagickRealType Da) { magick_unreferenced(q); return(Sa*p*(1.0-Da)); } static inline void CompositeOut(const MagickPixelPacket *p, const MagickPixelPacket *q,MagickPixelPacket *composite) { double gamma; MagickRealType Da, Sa; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ Da=1.0-QuantumScale*q->opacity; gamma=Sa*(1.0-Da); composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); gamma=PerceptibleReciprocal(gamma); composite->red=gamma*Out(p->red,Sa,q->red,Da); composite->green=gamma*Out(p->green,Sa,q->green,Da); composite->blue=gamma*Out(p->blue,Sa,q->blue,Da); if (q->colorspace == CMYKColorspace) composite->index=gamma*Out(p->index,Sa,q->index,Da); } static MagickRealType PegtopLight(const MagickRealType Sca, const MagickRealType Sa,const MagickRealType Dca,const MagickRealType Da) { /* PegTop: A Soft-Light alternative: A continuous version of the Softlight function, producing very similar results. f(Sc,Dc) = Dc^2*(1-2*Sc) + 2*Sc*Dc See http://www.pegtop.net/delphi/articles/blendmodes/softlight.htm. */ if (fabs(Da) < MagickEpsilon) return(Sca); return(Dca*Dca*(Sa-2.0*Sca)/Da+Sca*(2.0*Dca+1.0-Da)+Dca*(1.0-Sa)); } static inline void CompositePegtopLight(const MagickPixelPacket *p, const MagickPixelPacket *q,MagickPixelPacket *composite) { MagickRealType Da, gamma, Sa; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ Da=1.0-QuantumScale*q->opacity; gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */ composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); gamma=QuantumRange/(fabs(gamma) < MagickEpsilon ? MagickEpsilon : gamma); composite->red=gamma*PegtopLight(QuantumScale*p->red*Sa,Sa,QuantumScale* q->red*Da,Da); composite->green=gamma*PegtopLight(QuantumScale*p->green*Sa,Sa,QuantumScale* q->green*Da,Da); composite->blue=gamma*PegtopLight(QuantumScale*p->blue*Sa,Sa,QuantumScale* q->blue*Da,Da); if (q->colorspace == CMYKColorspace) composite->index=gamma*PegtopLight(QuantumScale*p->index*Sa,Sa,QuantumScale* q->index*Da,Da); } static MagickRealType PinLight(const MagickRealType Sca, const MagickRealType Sa,const MagickRealType Dca,const MagickRealType Da) { /* PinLight: A Photoshop 7 composition method http://www.simplefilter.de/en/basics/mixmods.html f(Sc,Dc) = Dc<2*Sc-1 ? 2*Sc-1 : Dc>2*Sc ? 2*Sc : Dc */ if (Dca*Sa < Da*(2*Sca-Sa)) return(Sca*(Da+1.0)-Sa*Da+Dca*(1.0-Sa)); if ((Dca*Sa) > (2*Sca*Da)) return(Sca*Da+Sca+Dca*(1.0-Sa)); return(Sca*(1.0-Da)+Dca); } static inline void CompositePinLight(const MagickPixelPacket *p, const MagickPixelPacket *q,MagickPixelPacket *composite) { MagickRealType Da, gamma, Sa; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ Da=1.0-QuantumScale*q->opacity; gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */ composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); gamma=QuantumRange/(fabs(gamma) < MagickEpsilon ? MagickEpsilon : gamma); composite->red=gamma*PinLight(QuantumScale*p->red*Sa,Sa,QuantumScale* q->red*Da,Da); composite->green=gamma*PinLight(QuantumScale*p->green*Sa,Sa,QuantumScale* q->green*Da,Da); composite->blue=gamma*PinLight(QuantumScale*p->blue*Sa,Sa,QuantumScale* q->blue*Da,Da); if (q->colorspace == CMYKColorspace) composite->index=gamma*PinLight(QuantumScale*p->index*Sa,Sa,QuantumScale* q->index*Da,Da); } static inline MagickRealType Screen(const MagickRealType Sca, const MagickRealType Dca) { /* Screen: A negated multiply f(Sc,Dc) = 1.0-(1.0-Sc)*(1.0-Dc) */ return(Sca+Dca-Sca*Dca); } static inline void CompositeScreen(const MagickPixelPacket *p, const MagickPixelPacket *q,const ChannelType channel, MagickPixelPacket *composite) { double gamma; MagickRealType Da, Sa; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ Da=1.0-QuantumScale*q->opacity; if ( (channel & SyncChannels) != 0 ) { gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */ composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); Sa*=(MagickRealType) QuantumScale; Da*=(MagickRealType) QuantumScale; /* optimization */ gamma=QuantumRange/(fabs(gamma) < MagickEpsilon ? MagickEpsilon : gamma); composite->red=gamma*Screen(p->red*Sa,q->red*Da); composite->green=gamma*Screen(p->green*Sa,q->green*Da); composite->blue=gamma*Screen(p->blue*Sa,q->blue*Da); if (q->colorspace == CMYKColorspace) composite->index=gamma*Screen(p->index*Sa,q->index*Da); } else { /* handle channels as separate grayscale channels */ if ( (channel & AlphaChannel) != 0 ) composite->opacity=QuantumRange*(1.0-Screen(Sa,Da)); if ( (channel & RedChannel) != 0 ) composite->red=QuantumRange*Screen(QuantumScale*p->red, QuantumScale*q->red); if ( (channel & GreenChannel) != 0 ) composite->green=QuantumRange*Screen(QuantumScale*p->green, QuantumScale*q->green); if ( (channel & BlueChannel) != 0 ) composite->blue=QuantumRange*Screen(QuantumScale*p->blue, QuantumScale*q->blue); if ( (channel & IndexChannel) != 0 && q->colorspace == CMYKColorspace) composite->index=QuantumRange*Screen(QuantumScale*p->index, QuantumScale*q->index); } } static MagickRealType SoftLight(const MagickRealType Sca, const MagickRealType Sa, const MagickRealType Dca, const MagickRealType Da) { #if 0 /* Oct 2004 SVG specification -- was found to be incorrect See http://lists.w3.org/Archives/Public/www-svg/2009Feb/0014.html. */ if (2.0*Sca < Sa) return(Dca*(Sa-(1.0-Dca/Da)*(2.0*Sca-Sa))+Sca*(1.0-Da)+Dca*(1.0-Sa)); if (8.0*Dca <= Da) return(Dca*(Sa-(1.0-Dca/Da)*(2.0*Sca-Sa)*(3.0-8.0*Dca/Da))+ Sca*(1.0-Da)+Dca*(1.0-Sa)); return((Dca*Sa+(pow(Dca/Da,0.5)*Da-Dca)*(2.0*Sca-Sa))+Sca*(1.0-Da)+ Dca*(1.0-Sa)); #else MagickRealType alpha, beta; /* New specification: March 2009 SVG specification. */ alpha=Dca/Da; if ((2.0*Sca) < Sa) return(Dca*(Sa+(2.0*Sca-Sa)*(1.0-alpha))+Sca*(1.0-Da)+Dca*(1.0-Sa)); if (((2.0*Sca) > Sa) && ((4.0*Dca) <= Da)) { beta=Dca*Sa+Da*(2.0*Sca-Sa)*(4.0*alpha*(4.0*alpha+1.0)*(alpha-1.0)+7.0* alpha)+Sca*(1.0-Da)+Dca*(1.0-Sa); return(beta); } beta=Dca*Sa+Da*(2.0*Sca-Sa)*(pow(alpha,0.5)-alpha)+Sca*(1.0-Da)+Dca*(1.0-Sa); return(beta); #endif } static inline void CompositeSoftLight(const MagickPixelPacket *p, const MagickPixelPacket *q,MagickPixelPacket *composite) { MagickRealType Da, gamma, Sa; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ Da=1.0-QuantumScale*q->opacity; gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */ composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); gamma=QuantumRange/(fabs(gamma) < MagickEpsilon ? MagickEpsilon : gamma); composite->red=gamma*SoftLight(QuantumScale*p->red*Sa,Sa,QuantumScale* q->red*Da,Da); composite->green=gamma*SoftLight(QuantumScale*p->green*Sa,Sa,QuantumScale* q->green*Da,Da); composite->blue=gamma*SoftLight(QuantumScale*p->blue*Sa,Sa,QuantumScale* q->blue*Da,Da); if (q->colorspace == CMYKColorspace) composite->index=gamma*SoftLight(QuantumScale*p->index*Sa,Sa,QuantumScale* q->index*Da,Da); } /* Deprecated Multiply difference by amount, if differance larger than threshold??? What use this is is completely unknown The Opacity calculation appears to be inverted -- Anthony Thyssen */ static inline MagickRealType Threshold(const MagickRealType p, const MagickRealType q,const MagickRealType threshold, const MagickRealType amount) { MagickRealType delta; delta=p-q; if ((MagickRealType) fabs((double) (2.0*delta)) < threshold) return(q); return(q+delta*amount); } static inline void CompositeThreshold(const MagickPixelPacket *p, const MagickPixelPacket *q,const MagickRealType threshold, const MagickRealType amount,MagickPixelPacket *composite) { composite->red=Threshold(p->red,q->red,threshold,amount); composite->green=Threshold(p->green,q->green,threshold,amount); composite->blue=Threshold(p->blue,q->blue,threshold,amount); composite->opacity=QuantumRange-Threshold(p->opacity,q->opacity, threshold,amount); if (q->colorspace == CMYKColorspace) composite->index=Threshold(p->index,q->index,threshold,amount); } static MagickRealType VividLight(const MagickRealType Sca, const MagickRealType Sa, const MagickRealType Dca, const MagickRealType Da) { /* VividLight: A Photoshop 7 composition method. See http://www.simplefilter.de/en/basics/mixmods.html. f(Sc,Dc) = (2*Sc < 1) ? 1-(1-Dc)/(2*Sc) : Dc/(2*(1-Sc)) */ if ((fabs(Sa) < MagickEpsilon) || (fabs(Sca-Sa) < MagickEpsilon)) return(Sa*Da+Sca*(1.0-Da)+Dca*(1.0-Sa)); if ((2*Sca) <= Sa) return(Sa*(Da+Sa*(Dca-Da)/(2.0*Sca))+Sca*(1.0-Da)+Dca*(1.0-Sa)); return(Dca*Sa*Sa/(2.0*(Sa-Sca))+Sca*(1.0-Da)+Dca*(1.0-Sa)); } static inline void CompositeVividLight(const MagickPixelPacket *p, const MagickPixelPacket *q,MagickPixelPacket *composite) { MagickRealType Da, gamma, Sa; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ Da=1.0-QuantumScale*q->opacity; gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */ composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); gamma=QuantumRange/(fabs(gamma) < MagickEpsilon ? MagickEpsilon : gamma); composite->red=gamma*VividLight(QuantumScale*p->red*Sa,Sa,QuantumScale* q->red*Da,Da); composite->green=gamma*VividLight(QuantumScale*p->green*Sa,Sa,QuantumScale* q->green*Da,Da); composite->blue=gamma*VividLight(QuantumScale*p->blue*Sa,Sa,QuantumScale* q->blue*Da,Da); if (q->colorspace == CMYKColorspace) composite->index=gamma*VividLight(QuantumScale*p->index*Sa,Sa,QuantumScale* q->index*Da,Da); } static MagickRealType Xor(const MagickRealType Sca,const MagickRealType Sa, const MagickRealType Dca,const MagickRealType Da) { return(Sca*(1.0-Da)+Dca*(1.0-Sa)); } static inline void CompositeXor(const MagickPixelPacket *p, const MagickPixelPacket *q,MagickPixelPacket *composite) { MagickRealType Da, gamma, Sa; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ Da=1.0-QuantumScale*q->opacity; gamma=Sa+Da-2*Sa*Da; /* Xor blend mode X=0,Y=1,Z=1 */ composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); gamma=PerceptibleReciprocal(gamma); composite->red=gamma*Xor(p->red*Sa,Sa,q->red*Da,Da); composite->green=gamma*Xor(p->green*Sa,Sa,q->green*Da,Da); composite->blue=gamma*Xor(p->blue*Sa,Sa,q->blue*Da,Da); if (q->colorspace == CMYKColorspace) composite->index=gamma*Xor(p->index*Sa,Sa,q->index*Da,Da); } MagickExport MagickBooleanType CompositeImage(Image *image, const CompositeOperator compose,const Image *source_image, const ssize_t x_offset,const ssize_t y_offset) { MagickBooleanType status; status=CompositeImageChannel(image,DefaultChannels,compose,source_image, x_offset,y_offset); return(status); } MagickExport MagickBooleanType CompositeImageChannel(Image *image, const ChannelType channel,const CompositeOperator compose, const Image *composite,const ssize_t x_offset,const ssize_t y_offset) { #define CompositeImageTag "Composite/Image" CacheView *source_view, *image_view; const char *value; ExceptionInfo *exception; GeometryInfo geometry_info; Image *canvas_image, *source_image; MagickBooleanType clamp, clip_to_self, status; MagickOffsetType progress; MagickPixelPacket zero; MagickRealType amount, canvas_dissolve, midpoint, percent_luma, percent_chroma, source_dissolve, threshold; MagickStatusType flags; ssize_t y; /* Prepare composite image. */ assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(composite != (Image *) NULL); assert(composite->signature == MagickSignature); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); exception=(&image->exception); source_image=CloneImage(composite,0,0,MagickTrue,exception); if (source_image == (const Image *) NULL) return(MagickFalse); if (IsGrayColorspace(image->colorspace) != MagickFalse) (void) SetImageColorspace(image,sRGBColorspace); (void) SetImageColorspace(source_image,image->colorspace); GetMagickPixelPacket(image,&zero); canvas_image=(Image *) NULL; amount=0.5; canvas_dissolve=1.0; clip_to_self=MagickTrue; percent_luma=100.0; percent_chroma=100.0; source_dissolve=1.0; threshold=0.05f; switch (compose) { case ClearCompositeOp: case SrcCompositeOp: case InCompositeOp: case SrcInCompositeOp: case OutCompositeOp: case SrcOutCompositeOp: case DstInCompositeOp: case DstAtopCompositeOp: { /* Modify canvas outside the overlaid region. */ clip_to_self=MagickFalse; break; } case OverCompositeOp: { if (image->matte != MagickFalse) break; if (source_image->matte != MagickFalse) break; } case CopyCompositeOp: { if ((x_offset < 0) || (y_offset < 0)) break; if ((x_offset+(ssize_t) source_image->columns) >= (ssize_t) image->columns) break; if ((y_offset+(ssize_t) source_image->rows) >= (ssize_t) image->rows) break; status=MagickTrue; source_view=AcquireVirtualCacheView(source_image,exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(source_image,image,source_image->rows,1) #endif for (y=0; y < (ssize_t) source_image->rows; y++) { MagickBooleanType sync; register const IndexPacket *source_indexes; register const PixelPacket *p; register IndexPacket *indexes; register PixelPacket *q; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(source_view,0,y,source_image->columns, 1,exception); q=GetCacheViewAuthenticPixels(image_view,x_offset,y+y_offset, source_image->columns,1,exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } source_indexes=GetCacheViewVirtualIndexQueue(source_view); indexes=GetCacheViewAuthenticIndexQueue(image_view); (void) CopyMagickMemory(q,p,source_image->columns*sizeof(*p)); if ((indexes != (IndexPacket *) NULL) && (source_indexes != (const IndexPacket *) NULL)) (void) CopyMagickMemory(indexes,source_indexes, source_image->columns*sizeof(*indexes)); sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_CompositeImage) #endif proceed=SetImageProgress(image,CompositeImageTag, (MagickOffsetType) y,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } source_view=DestroyCacheView(source_view); image_view=DestroyCacheView(image_view); source_image=DestroyImage(source_image); return(status); } case CopyOpacityCompositeOp: case ChangeMaskCompositeOp: { /* Modify canvas outside the overlaid region and require an alpha channel to exist, to add transparency. */ if (image->matte == MagickFalse) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel); clip_to_self=MagickFalse; break; } case BlurCompositeOp: { CacheView *canvas_view, *source_view; MagickPixelPacket pixel; MagickRealType angle_range, angle_start, height, width; ResampleFilter *resample_filter; SegmentInfo blur; /* Blur Image by resampling. Blur Image dictated by an overlay gradient map: X = red_channel; Y = green_channel; compose:args = x_scale[,y_scale[,angle]]. */ canvas_image=CloneImage(image,image->columns,image->rows,MagickTrue, exception); if (canvas_image == (Image *) NULL) { source_image=DestroyImage(source_image); return(MagickFalse); } /* Gather the maximum blur sigma values from user. */ SetGeometryInfo(&geometry_info); flags=NoValue; value=GetImageArtifact(source_image,"compose:args"); if (value != (char *) NULL) flags=ParseGeometry(value,&geometry_info); if ((flags & WidthValue) == 0) { (void) ThrowMagickException(exception,GetMagickModule(), OptionWarning,"InvalidGeometry","'%s' '%s'","compose:args",value); source_image=DestroyImage(source_image); canvas_image=DestroyImage(canvas_image); return(MagickFalse); } /* Users input sigma now needs to be converted to the EWA ellipse size. The filter defaults to a sigma of 0.5 so to make this match the users input the ellipse size needs to be doubled. */ width=height=geometry_info.rho*2.0; if ((flags & HeightValue) != 0 ) height=geometry_info.sigma*2.0; /* default the unrotated ellipse width and height axis vectors */ blur.x1=width; blur.x2=0.0; blur.y1=0.0; blur.y2=height; /* rotate vectors if a rotation angle is given */ if ((flags & XValue) != 0 ) { MagickRealType angle; angle=DegreesToRadians(geometry_info.xi); blur.x1=width*cos(angle); blur.x2=width*sin(angle); blur.y1=(-height*sin(angle)); blur.y2=height*cos(angle); } /* Otherwise lets set a angle range and calculate in the loop */ angle_start=0.0; angle_range=0.0; if ((flags & YValue) != 0 ) { angle_start=DegreesToRadians(geometry_info.xi); angle_range=DegreesToRadians(geometry_info.psi)-angle_start; } /* Set up a gaussian cylindrical filter for EWA Bluring. As the minimum ellipse radius of support*1.0 the EWA algorithm can only produce a minimum blur of 0.5 for Gaussian (support=2.0) This means that even 'No Blur' will be still a little blurry! The solution (as well as the problem of preventing any user expert filter settings, is to set our own user settings, then restore them afterwards. */ resample_filter=AcquireResampleFilter(image,exception); SetResampleFilter(resample_filter,GaussianFilter,1.0); /* do the variable blurring of each pixel in image */ pixel=zero; source_view=AcquireVirtualCacheView(source_image,exception); canvas_view=AcquireAuthenticCacheView(canvas_image,exception); for (y=0; y < (ssize_t) source_image->rows; y++) { MagickBooleanType sync; register const PixelPacket *magick_restrict p; register PixelPacket *magick_restrict r; register IndexPacket *magick_restrict canvas_indexes; register ssize_t x; if (((y+y_offset) < 0) || ((y+y_offset) >= (ssize_t) image->rows)) continue; p=GetCacheViewVirtualPixels(source_view,0,y,source_image->columns, 1,exception); r=QueueCacheViewAuthenticPixels(canvas_view,0,y,canvas_image->columns, 1,exception); if ((p == (const PixelPacket *) NULL) || (r == (PixelPacket *) NULL)) break; canvas_indexes=GetCacheViewAuthenticIndexQueue(canvas_view); for (x=0; x < (ssize_t) source_image->columns; x++) { if (((x_offset+x) < 0) || ((x_offset+x) >= (ssize_t) image->columns)) { p++; continue; } if (fabs(angle_range) > MagickEpsilon) { MagickRealType angle; angle=angle_start+angle_range*QuantumScale*GetPixelBlue(p); blur.x1=width*cos(angle); blur.x2=width*sin(angle); blur.y1=(-height*sin(angle)); blur.y2=height*cos(angle); } #if 0 if ( x == 10 && y == 60 ) { fprintf(stderr, "blur.x=%lf,%lf, blur.y=%lf,%lf\n", blur.x1, blur.x2, blur.y1, blur.y2); fprintf(stderr, "scaled by=%lf,%lf\n", QuantumScale*GetPixelRed(p), QuantumScale*GetPixelGreen(p)); } #endif ScaleResampleFilter(resample_filter, blur.x1*QuantumScale*GetPixelRed(p), blur.y1*QuantumScale*GetPixelGreen(p), blur.x2*QuantumScale*GetPixelRed(p), blur.y2*QuantumScale*GetPixelGreen(p)); (void) ResamplePixelColor(resample_filter,(double) x_offset+x,(double) y_offset+y,&pixel); SetPixelPacket(canvas_image,&pixel,r,canvas_indexes+x); p++; r++; } sync=SyncCacheViewAuthenticPixels(canvas_view,exception); if (sync == MagickFalse) break; } resample_filter=DestroyResampleFilter(resample_filter); source_view=DestroyCacheView(source_view); canvas_view=DestroyCacheView(canvas_view); source_image=DestroyImage(source_image); source_image=canvas_image; break; } case DisplaceCompositeOp: case DistortCompositeOp: { CacheView *canvas_view, *source_view, *image_view; MagickPixelPacket pixel; MagickRealType horizontal_scale, vertical_scale; PointInfo center, offset; register IndexPacket *magick_restrict canvas_indexes; register PixelPacket *magick_restrict r; /* Displace/Distort based on overlay gradient map: X = red_channel; Y = green_channel; compose:args = x_scale[,y_scale[,center.x,center.y]] */ canvas_image=CloneImage(image,image->columns,image->rows,MagickTrue, exception); if (canvas_image == (Image *) NULL) { source_image=DestroyImage(source_image); return(MagickFalse); } SetGeometryInfo(&geometry_info); flags=NoValue; value=GetImageArtifact(source_image,"compose:args"); if (value != (char *) NULL) flags=ParseGeometry(value,&geometry_info); if ((flags & (WidthValue | HeightValue)) == 0 ) { if ((flags & AspectValue) == 0) { horizontal_scale=(MagickRealType) (source_image->columns-1)/2.0; vertical_scale=(MagickRealType) (source_image->rows-1)/2.0; } else { horizontal_scale=(MagickRealType) (image->columns-1)/2.0; vertical_scale=(MagickRealType) (image->rows-1)/2.0; } } else { horizontal_scale=geometry_info.rho; vertical_scale=geometry_info.sigma; if ((flags & PercentValue) != 0) { if ((flags & AspectValue) == 0) { horizontal_scale*=(source_image->columns-1)/200.0; vertical_scale*=(source_image->rows-1)/200.0; } else { horizontal_scale*=(image->columns-1)/200.0; vertical_scale*=(image->rows-1)/200.0; } } if ((flags & HeightValue) == 0) vertical_scale=horizontal_scale; } /* Determine fixed center point for absolute distortion map Absolute distort == Displace offset relative to a fixed absolute point Select that point according to +X+Y user inputs. default = center of overlay image arg flag '!' = locations/percentage relative to background image */ center.x=(MagickRealType) x_offset; center.y=(MagickRealType) y_offset; if (compose == DistortCompositeOp) { if ((flags & XValue) == 0) if ((flags & AspectValue) != 0) center.x=((MagickRealType) image->columns-1)/2.0; else center.x=(MagickRealType) (x_offset+(source_image->columns-1)/ 2.0); else if ((flags & AspectValue) == 0) center.x=(MagickRealType) (x_offset+geometry_info.xi); else center.x=geometry_info.xi; if ((flags & YValue) == 0) if ((flags & AspectValue) != 0) center.y=((MagickRealType) image->rows-1)/2.0; else center.y=(MagickRealType) (y_offset+(source_image->rows-1)/2.0); else if ((flags & AspectValue) != 0) center.y=geometry_info.psi; else center.y=(MagickRealType) (y_offset+geometry_info.psi); } /* Shift the pixel offset point as defined by the provided, displacement/distortion map. -- Like a lens... */ pixel=zero; image_view=AcquireVirtualCacheView(image,exception); source_view=AcquireVirtualCacheView(source_image,exception); canvas_view=AcquireAuthenticCacheView(canvas_image,exception); for (y=0; y < (ssize_t) source_image->rows; y++) { MagickBooleanType sync; register const PixelPacket *magick_restrict p; register ssize_t x; if (((y+y_offset) < 0) || ((y+y_offset) >= (ssize_t) image->rows)) continue; p=GetCacheViewVirtualPixels(source_view,0,y,source_image->columns, 1,exception); r=QueueCacheViewAuthenticPixels(canvas_view,0,y,canvas_image->columns, 1,exception); if ((p == (const PixelPacket *) NULL) || (r == (PixelPacket *) NULL)) break; canvas_indexes=GetCacheViewAuthenticIndexQueue(canvas_view); for (x=0; x < (ssize_t) source_image->columns; x++) { if (((x_offset+x) < 0) || ((x_offset+x) >= (ssize_t) image->columns)) { p++; continue; } /* Displace the offset. */ offset.x=(double) ((horizontal_scale*(GetPixelRed(p)- (((MagickRealType) QuantumRange+1.0)/2.0)))/(((MagickRealType) QuantumRange+1.0)/2.0)+center.x+((compose == DisplaceCompositeOp) ? x : 0)); offset.y=(double) ((vertical_scale*(GetPixelGreen(p)- (((MagickRealType) QuantumRange+1.0)/2.0)))/(((MagickRealType) QuantumRange+1.0)/2.0)+center.y+((compose == DisplaceCompositeOp) ? y : 0)); (void) InterpolateMagickPixelPacket(image,image_view, UndefinedInterpolatePixel,(double) offset.x,(double) offset.y, &pixel,exception); /* Mask with the 'invalid pixel mask' in alpha channel. */ pixel.opacity=(MagickRealType) QuantumRange*(1.0-(1.0-QuantumScale* pixel.opacity)*(1.0-QuantumScale*GetPixelOpacity(p))); SetPixelPacket(canvas_image,&pixel,r,canvas_indexes+x); p++; r++; } sync=SyncCacheViewAuthenticPixels(canvas_view,exception); if (sync == MagickFalse) break; } canvas_view=DestroyCacheView(canvas_view); source_view=DestroyCacheView(source_view); image_view=DestroyCacheView(image_view); source_image=DestroyImage(source_image); source_image=canvas_image; break; } case DissolveCompositeOp: { /* Geometry arguments to dissolve factors. */ value=GetImageArtifact(source_image,"compose:args"); if (value != (char *) NULL) { flags=ParseGeometry(value,&geometry_info); source_dissolve=geometry_info.rho/100.0; canvas_dissolve=1.0; if ((source_dissolve-MagickEpsilon) < 0.0) source_dissolve=0.0; if ((source_dissolve+MagickEpsilon) > 1.0) { canvas_dissolve=2.0-source_dissolve; source_dissolve=1.0; } if ((flags & SigmaValue) != 0) canvas_dissolve=geometry_info.sigma/100.0; if ((canvas_dissolve-MagickEpsilon) < 0.0) canvas_dissolve=0.0; clip_to_self=MagickFalse; if ((canvas_dissolve+MagickEpsilon) > 1.0 ) { canvas_dissolve=1.0; clip_to_self=MagickTrue; } } break; } case BlendCompositeOp: { value=GetImageArtifact(source_image,"compose:args"); if (value != (char *) NULL) { flags=ParseGeometry(value,&geometry_info); source_dissolve=geometry_info.rho/100.0; canvas_dissolve=1.0-source_dissolve; if ((flags & SigmaValue) != 0) canvas_dissolve=geometry_info.sigma/100.0; clip_to_self=MagickFalse; if ((canvas_dissolve+MagickEpsilon) > 1.0) clip_to_self=MagickTrue; } break; } case MathematicsCompositeOp: { /* Just collect the values from "compose:args", setting. Unused values are set to zero automagically. Arguments are normally a comma separated list, so this probably should be changed to some 'general comma list' parser, (with a minimum number of values) */ SetGeometryInfo(&geometry_info); value=GetImageArtifact(source_image,"compose:args"); if (value != (char *) NULL) (void) ParseGeometry(value,&geometry_info); break; } case ModulateCompositeOp: { /* Determine the luma and chroma scale. */ value=GetImageArtifact(source_image,"compose:args"); if (value != (char *) NULL) { flags=ParseGeometry(value,&geometry_info); percent_luma=geometry_info.rho; if ((flags & SigmaValue) != 0) percent_chroma=geometry_info.sigma; } break; } case ThresholdCompositeOp: { /* Determine the amount and threshold. This Composition method is deprecated */ value=GetImageArtifact(source_image,"compose:args"); if (value != (char *) NULL) { flags=ParseGeometry(value,&geometry_info); amount=geometry_info.rho; threshold=geometry_info.sigma; if ((flags & SigmaValue) == 0) threshold=0.05f; } threshold*=QuantumRange; break; } default: break; } value=GetImageArtifact(source_image,"compose:outside-overlay"); if (value != (const char *) NULL) clip_to_self=IsMagickTrue(value) == MagickFalse ? MagickTrue : MagickFalse; clamp=MagickTrue; value=GetImageArtifact(source_image,"compose:clamp"); if (value != (const char *) NULL) clamp=IsMagickTrue(value); /* Composite image. */ status=AccelerateCompositeImage(image,channel,compose,source_image, x_offset,y_offset,canvas_dissolve,source_dissolve,exception); if (status != MagickFalse) return(status); status=MagickTrue; progress=0; midpoint=((MagickRealType) QuantumRange+1.0)/2; GetMagickPixelPacket(source_image,&zero); source_view=AcquireVirtualCacheView(source_image,exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(source_image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { const PixelPacket *pixels; double luma, hue, chroma, sans; MagickPixelPacket composite, canvas, source; register const IndexPacket *magick_restrict source_indexes; register const PixelPacket *magick_restrict p; register IndexPacket *magick_restrict indexes; register ssize_t x; register PixelPacket *magick_restrict q; if (status == MagickFalse) continue; if (clip_to_self != MagickFalse) { if (y < y_offset) continue; if ((y-y_offset) >= (ssize_t) source_image->rows) continue; } /* If pixels is NULL, y is outside overlay region. */ pixels=(PixelPacket *) NULL; p=(PixelPacket *) NULL; if ((y >= y_offset) && ((y-y_offset) < (ssize_t) source_image->rows)) { p=GetCacheViewVirtualPixels(source_view,0,y-y_offset, source_image->columns,1,exception); if (p == (const PixelPacket *) NULL) { status=MagickFalse; continue; } pixels=p; if (x_offset < 0) p-=x_offset; } q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); source_indexes=GetCacheViewVirtualIndexQueue(source_view); GetMagickPixelPacket(source_image,&source); GetMagickPixelPacket(image,&canvas); hue=0.0; chroma=0.0; luma=0.0; for (x=0; x < (ssize_t) image->columns; x++) { if (clip_to_self != MagickFalse) { if (x < x_offset) { q++; continue; } if ((x-x_offset) >= (ssize_t) source_image->columns) break; } canvas.red=(MagickRealType) GetPixelRed(q); canvas.green=(MagickRealType) GetPixelGreen(q); canvas.blue=(MagickRealType) GetPixelBlue(q); if (image->matte != MagickFalse) canvas.opacity=(MagickRealType) GetPixelOpacity(q); if (image->colorspace == CMYKColorspace) canvas.index=(MagickRealType) GetPixelIndex(indexes+x); if (image->colorspace == CMYKColorspace) { canvas.red=(MagickRealType) QuantumRange-canvas.red; canvas.green=(MagickRealType) QuantumRange-canvas.green; canvas.blue=(MagickRealType) QuantumRange-canvas.blue; canvas.index=(MagickRealType) QuantumRange-canvas.index; } /* Handle canvas modifications outside overlaid region. */ composite=canvas; if ((pixels == (PixelPacket *) NULL) || (x < x_offset) || ((x-x_offset) >= (ssize_t) source_image->columns)) { switch (compose) { case DissolveCompositeOp: case BlendCompositeOp: { composite.opacity=(MagickRealType) (QuantumRange-canvas_dissolve* (QuantumRange-composite.opacity)); break; } case ClearCompositeOp: case SrcCompositeOp: { CompositeClear(&canvas,&composite); break; } case InCompositeOp: case SrcInCompositeOp: case OutCompositeOp: case SrcOutCompositeOp: case DstInCompositeOp: case DstAtopCompositeOp: case CopyOpacityCompositeOp: case ChangeMaskCompositeOp: { composite.opacity=(MagickRealType) TransparentOpacity; break; } default: { (void) GetOneVirtualMagickPixel(source_image,x-x_offset, y-y_offset,&composite,exception); break; } } if (image->colorspace == CMYKColorspace) { composite.red=(MagickRealType) QuantumRange-composite.red; composite.green=(MagickRealType) QuantumRange-composite.green; composite.blue=(MagickRealType) QuantumRange-composite.blue; composite.index=(MagickRealType) QuantumRange-composite.index; } SetPixelRed(q,clamp != MagickFalse ? ClampPixel(composite.red) : ClampToQuantum(composite.red)); SetPixelGreen(q,clamp != MagickFalse ? ClampPixel(composite.green) : ClampToQuantum(composite.green)); SetPixelBlue(q,clamp != MagickFalse ? ClampPixel(composite.blue) : ClampToQuantum(composite.blue)); if (image->matte != MagickFalse) SetPixelOpacity(q,clamp != MagickFalse ? ClampPixel(composite.opacity) : ClampToQuantum(composite.opacity)); if (image->colorspace == CMYKColorspace) SetPixelIndex(indexes+x,clamp != MagickFalse ? ClampPixel(composite.index) : ClampToQuantum(composite.index)); q++; continue; } /* Handle normal overlay of source onto canvas. */ source.red=(MagickRealType) GetPixelRed(p); source.green=(MagickRealType) GetPixelGreen(p); source.blue=(MagickRealType) GetPixelBlue(p); if (source_image->matte != MagickFalse) source.opacity=(MagickRealType) GetPixelOpacity(p); if (source_image->colorspace == CMYKColorspace) source.index=(MagickRealType) GetPixelIndex(source_indexes+ x-x_offset); if (source_image->colorspace == CMYKColorspace) { source.red=(MagickRealType) QuantumRange-source.red; source.green=(MagickRealType) QuantumRange-source.green; source.blue=(MagickRealType) QuantumRange-source.blue; source.index=(MagickRealType) QuantumRange-source.index; } switch (compose) { /* Duff-Porter Compositions */ case ClearCompositeOp: { CompositeClear(&canvas,&composite); break; } case SrcCompositeOp: case CopyCompositeOp: case ReplaceCompositeOp: { composite=source; break; } case NoCompositeOp: case DstCompositeOp: break; case OverCompositeOp: case SrcOverCompositeOp: { MagickPixelCompositeOver(&source,source.opacity,&canvas, canvas.opacity,&composite); break; } case DstOverCompositeOp: { MagickPixelCompositeOver(&canvas,canvas.opacity,&source, source.opacity,&composite); break; } case SrcInCompositeOp: case InCompositeOp: { CompositeIn(&source,&canvas,&composite); break; } case DstInCompositeOp: { CompositeIn(&canvas,&source,&composite); break; } case OutCompositeOp: case SrcOutCompositeOp: { CompositeOut(&source,&canvas,&composite); break; } case DstOutCompositeOp: { CompositeOut(&canvas,&source,&composite); break; } case AtopCompositeOp: case SrcAtopCompositeOp: { CompositeAtop(&source,&canvas,&composite); break; } case DstAtopCompositeOp: { CompositeAtop(&canvas,&source,&composite); break; } case XorCompositeOp: { CompositeXor(&source,&canvas,&composite); break; } /* Mathematical Compositions */ case PlusCompositeOp: { CompositePlus(&source,&canvas,channel,&composite); break; } case MinusDstCompositeOp: { CompositeMinus(&source,&canvas,channel,&composite); break; } case MinusSrcCompositeOp: { CompositeMinus(&canvas,&source,channel,&composite); break; } case ModulusAddCompositeOp: { CompositeModulusAdd(&source,&canvas,channel,&composite); break; } case ModulusSubtractCompositeOp: { CompositeModulusSubtract(&source,&canvas,channel,&composite); break; } case DifferenceCompositeOp: { CompositeDifference(&source,&canvas,channel,&composite); break; } case ExclusionCompositeOp: { CompositeExclusion(&source,&canvas,channel,&composite); break; } case MultiplyCompositeOp: { CompositeMultiply(&source,&canvas,channel,&composite); break; } case ScreenCompositeOp: { CompositeScreen(&source,&canvas,channel,&composite); break; } case DivideDstCompositeOp: { CompositeDivide(&source,&canvas,channel,&composite); break; } case DivideSrcCompositeOp: { CompositeDivide(&canvas,&source,channel,&composite); break; } case DarkenCompositeOp: { CompositeDarken(&source,&canvas,channel,&composite); break; } case LightenCompositeOp: { CompositeLighten(&source,&canvas,channel,&composite); break; } case DarkenIntensityCompositeOp: { CompositeDarkenIntensity(&source,&canvas,channel,&composite); break; } case LightenIntensityCompositeOp: { CompositeLightenIntensity(&source,&canvas,channel,&composite); break; } case MathematicsCompositeOp: { CompositeMathematics(&source,&canvas,channel,&geometry_info, &composite); break; } /* Lighting Compositions */ case ColorDodgeCompositeOp: { CompositeColorDodge(&source,&canvas,&composite); break; } case ColorBurnCompositeOp: { CompositeColorBurn(&source,&canvas,&composite); break; } case LinearDodgeCompositeOp: { CompositeLinearDodge(&source,&canvas,&composite); break; } case LinearBurnCompositeOp: { CompositeLinearBurn(&source,&canvas,&composite); break; } case HardLightCompositeOp: { CompositeHardLight(&source,&canvas,&composite); break; } case HardMixCompositeOp: { CompositeHardMix(&source,&canvas,&composite); break; } case OverlayCompositeOp: { /* Overlay = Reversed HardLight. */ CompositeHardLight(&canvas,&source,&composite); break; } case SoftLightCompositeOp: { CompositeSoftLight(&source,&canvas,&composite); break; } case LinearLightCompositeOp: { CompositeLinearLight(&source,&canvas,&composite); break; } case PegtopLightCompositeOp: { CompositePegtopLight(&source,&canvas,&composite); break; } case VividLightCompositeOp: { CompositeVividLight(&source,&canvas,&composite); break; } case PinLightCompositeOp: { CompositePinLight(&source,&canvas,&composite); break; } /* Other Composition */ case ChangeMaskCompositeOp: { if ((composite.opacity > ((MagickRealType) QuantumRange/2.0)) || (IsMagickColorSimilar(&source,&canvas) != MagickFalse)) composite.opacity=(MagickRealType) TransparentOpacity; else composite.opacity=(MagickRealType) OpaqueOpacity; break; } case BumpmapCompositeOp: { if (source.opacity == TransparentOpacity) break; CompositeBumpmap(&source,&canvas,&composite); break; } case DissolveCompositeOp: { MagickPixelCompositeOver(&source,(MagickRealType) (QuantumRange- source_dissolve*(QuantumRange-source.opacity)),&canvas, (MagickRealType) (QuantumRange-canvas_dissolve*(QuantumRange- canvas.opacity)),&composite); break; } case BlendCompositeOp: { MagickPixelCompositeBlend(&source,source_dissolve,&canvas, canvas_dissolve,&composite); break; } case ThresholdCompositeOp: { CompositeThreshold(&source,&canvas,threshold,amount,&composite); break; } case ModulateCompositeOp: { ssize_t offset; if (source.opacity == TransparentOpacity) break; offset=(ssize_t) (MagickPixelIntensityToQuantum(&source)-midpoint); if (offset == 0) break; CompositeHCL(canvas.red,canvas.green,canvas.blue,&hue, &chroma,&luma); luma+=(0.01*percent_luma*offset)/midpoint; chroma*=0.01*percent_chroma; HCLComposite(hue,chroma,luma,&composite.red,&composite.green, &composite.blue); break; } case HueCompositeOp: { if (source.opacity == TransparentOpacity) break; if (canvas.opacity == TransparentOpacity) { composite=source; break; } CompositeHCL(canvas.red,canvas.green,canvas.blue,&hue, &chroma,&luma); CompositeHCL(source.red,source.green,source.blue,&hue,&sans,&sans); HCLComposite(hue,chroma,luma,&composite.red, &composite.green,&composite.blue); if (source.opacity < canvas.opacity) composite.opacity=source.opacity; break; } case SaturateCompositeOp: { if (source.opacity == TransparentOpacity) break; if (canvas.opacity == TransparentOpacity) { composite=source; break; } CompositeHCL(canvas.red,canvas.green,canvas.blue,&hue, &chroma,&luma); CompositeHCL(source.red,source.green,source.blue,&sans,&chroma, &sans); HCLComposite(hue,chroma,luma,&composite.red, &composite.green,&composite.blue); if (source.opacity < canvas.opacity) composite.opacity=source.opacity; break; } case LuminizeCompositeOp: { if (source.opacity == TransparentOpacity) break; if (canvas.opacity == TransparentOpacity) { composite=source; break; } CompositeHCL(canvas.red,canvas.green,canvas.blue,&hue, &chroma,&luma); CompositeHCL(source.red,source.green,source.blue,&sans,&sans, &luma); HCLComposite(hue,chroma,luma,&composite.red, &composite.green,&composite.blue); if (source.opacity < canvas.opacity) composite.opacity=source.opacity; break; } case ColorizeCompositeOp: { if (source.opacity == TransparentOpacity) break; if (canvas.opacity == TransparentOpacity) { composite=source; break; } CompositeHCL(canvas.red,canvas.green,canvas.blue,&sans, &sans,&luma); CompositeHCL(source.red,source.green,source.blue,&hue,&chroma,&sans); HCLComposite(hue,chroma,luma,&composite.red, &composite.green,&composite.blue); if (source.opacity < canvas.opacity) composite.opacity=source.opacity; break; } case CopyRedCompositeOp: case CopyCyanCompositeOp: { composite.red=source.red; break; } case CopyGreenCompositeOp: case CopyMagentaCompositeOp: { composite.green=source.green; break; } case CopyBlueCompositeOp: case CopyYellowCompositeOp: { composite.blue=source.blue; break; } case CopyOpacityCompositeOp: { if (source.matte == MagickFalse) composite.opacity=(MagickRealType) (QuantumRange- MagickPixelIntensityToQuantum(&source)); else composite.opacity=source.opacity; break; } case CopyBlackCompositeOp: { if (source.colorspace != CMYKColorspace) ConvertRGBToCMYK(&source); composite.index=QuantumRange-source.index; break; } /* compose methods that are already handled */ case BlurCompositeOp: case DisplaceCompositeOp: case DistortCompositeOp: { composite=source; break; } default: break; } if (image->colorspace == CMYKColorspace) { composite.red=(MagickRealType) QuantumRange-composite.red; composite.green=(MagickRealType) QuantumRange-composite.green; composite.blue=(MagickRealType) QuantumRange-composite.blue; composite.index=(MagickRealType) QuantumRange-composite.index; } SetPixelRed(q,clamp != MagickFalse ? ClampPixel(composite.red) : ClampToQuantum(composite.red)); SetPixelGreen(q,clamp != MagickFalse ? ClampPixel(composite.green) : ClampToQuantum(composite.green)); SetPixelBlue(q,clamp != MagickFalse ? ClampPixel(composite.blue) : ClampToQuantum(composite.blue)); SetPixelOpacity(q,clamp != MagickFalse ? ClampPixel(composite.opacity) : ClampToQuantum(composite.opacity)); if (image->colorspace == CMYKColorspace) SetPixelIndex(indexes+x,clamp != MagickFalse ? ClampPixel(composite.index) : ClampToQuantum(composite.index)); p++; if (p >= (pixels+source_image->columns)) p=pixels; q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_CompositeImageChannel) #endif proceed=SetImageProgress(image,CompositeImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } source_view=DestroyCacheView(source_view); image_view=DestroyCacheView(image_view); if (canvas_image != (Image * ) NULL) canvas_image=DestroyImage(canvas_image); else source_image=DestroyImage(source_image); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T e x t u r e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TextureImage() repeatedly tiles the texture image across and down the image % canvas. % % The format of the TextureImage method is: % % MagickBooleanType TextureImage(Image *image,const Image *texture) % % A description of each parameter follows: % % o image: the image. % % o texture: This image is the texture to layer on the background. % */ MagickExport MagickBooleanType TextureImage(Image *image,const Image *texture) { #define TextureImageTag "Texture/Image" CacheView *image_view, *texture_view; ExceptionInfo *exception; Image *texture_image; MagickBooleanType status; ssize_t y; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickSignature); if (texture == (const Image *) NULL) return(MagickFalse); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); exception=(&image->exception); texture_image=CloneImage(texture,0,0,MagickTrue,exception); if (texture_image == (const Image *) NULL) return(MagickFalse); (void) TransformImageColorspace(texture_image,image->colorspace); (void) SetImageVirtualPixelMethod(texture_image,TileVirtualPixelMethod); status=MagickTrue; if ((image->compose != CopyCompositeOp) && ((image->compose != OverCompositeOp) || (image->matte != MagickFalse) || (texture_image->matte != MagickFalse))) { /* Tile texture onto the image background. */ for (y=0; y < (ssize_t) image->rows; y+=(ssize_t) texture_image->rows) { register ssize_t x; if (status == MagickFalse) continue; for (x=0; x < (ssize_t) image->columns; x+=(ssize_t) texture_image->columns) { MagickBooleanType thread_status; thread_status=CompositeImage(image,image->compose,texture_image,x+ texture_image->tile_offset.x,y+texture_image->tile_offset.y); if (thread_status == MagickFalse) { status=thread_status; break; } } if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,TextureImageTag,(MagickOffsetType) y,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } (void) SetImageProgress(image,TextureImageTag,(MagickOffsetType) image->rows,image->rows); texture_image=DestroyImage(texture_image); return(status); } /* Tile texture onto the image background (optimized). */ status=MagickTrue; texture_view=AcquireVirtualCacheView(texture_image,exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,texture_image,1,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register const IndexPacket *texture_indexes; register const PixelPacket *p; register IndexPacket *indexes; register ssize_t x; register PixelPacket *q; size_t width; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(texture_view,texture_image->tile_offset.x,(y+ texture_image->tile_offset.y) % texture_image->rows, texture_image->columns,1,exception); q=QueueCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } texture_indexes=GetCacheViewVirtualIndexQueue(texture_view); indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x+=(ssize_t) texture_image->columns) { width=texture_image->columns; if ((x+(ssize_t) width) > (ssize_t) image->columns) width=image->columns-x; (void) CopyMagickMemory(q,p,width*sizeof(*p)); if ((image->colorspace == CMYKColorspace) && (texture_image->colorspace == CMYKColorspace)) { (void) CopyMagickMemory(indexes,texture_indexes,width* sizeof(*indexes)); indexes+=width; } q+=width; } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_TextureImage) #endif proceed=SetImageProgress(image,TextureImageTag,(MagickOffsetType) y, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } texture_view=DestroyCacheView(texture_view); image_view=DestroyCacheView(image_view); texture_image=DestroyImage(texture_image); return(status); }
pr81052.c
/* PR middle-end/81052 */ /* { dg-do compile } */ /* { dg-options "-fopenmp-simd -O2" } */ int foo (int x, int y) { int i; #pragma omp simd for (i = x; i < y; ++i) return 0; /* { dg-error "invalid branch to/from OpenMP structured block" } */ return 1; } #ifdef __cplusplus template <typename T> T bar (T x, T y) { T i; #pragma omp simd for (i = x; i < y; ++i) return 0; /* { dg-error "invalid branch to/from OpenMP structured block" "" { target c++ } } */ return 1; } int x = bar (1, 7); #endif
GB_binop__pair_fc64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__pair_fc64) // A.*B function (eWiseMult): GB ((none)) // A.*B function (eWiseMult): GB ((none)) // A.*B function (eWiseMult): GB ((none)) // A.*B function (eWiseMult): GB ((none)) // A*D function (colscale): GB ((none)) // D*A function (rowscale): GB ((none)) // C+=B function (dense accum): GB (_Cdense_accumB__pair_fc64) // C+=b function (dense accum): GB (_Cdense_accumb__pair_fc64) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__pair_fc64) // C=scalar+B GB ((none)) // C=scalar+B' GB ((none)) // C=A+scalar GB ((none)) // C=A'+scalar GB ((none)) // C type: GxB_FC64_t // A type: GxB_FC64_t // A pattern? 1 // B type: GxB_FC64_t // B pattern? 1 // BinaryOp: cij = GxB_CMPLX(1,0) #define GB_ATYPE \ GxB_FC64_t #define GB_BTYPE \ GxB_FC64_t #define GB_CTYPE \ GxB_FC64_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ ; // true if values of A are not used #define GB_A_IS_PATTERN \ 1 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ ; // true if values of B are not used #define GB_B_IS_PATTERN \ 1 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ GxB_FC64_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = GxB_CMPLX(1,0) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_PAIR || GxB_NO_FC64 || GxB_NO_PAIR_FC64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__pair_fc64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__pair_fc64) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__pair_fc64) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type GxB_FC64_t GxB_FC64_t bwork = (*((GxB_FC64_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC64_t *restrict Cx = (GxB_FC64_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC64_t *restrict Cx = (GxB_FC64_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__pair_fc64) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; GxB_FC64_t alpha_scalar ; GxB_FC64_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((GxB_FC64_t *) alpha_scalar_in)) ; beta_scalar = (*((GxB_FC64_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC64_t *Cx = (GxB_FC64_t *) Cx_output ; GxB_FC64_t x = (*((GxB_FC64_t *) x_input)) ; GxB_FC64_t *Bx = (GxB_FC64_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; ; ; Cx [p] = GxB_CMPLX(1,0) ; } return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; GxB_FC64_t *Cx = (GxB_FC64_t *) Cx_output ; GxB_FC64_t *Ax = (GxB_FC64_t *) Ax_input ; GxB_FC64_t y = (*((GxB_FC64_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; ; ; Cx [p] = GxB_CMPLX(1,0) ; } return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ #if 0 // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ ; ; \ Cx [pC] = GxB_CMPLX(1,0) ; \ } GrB_Info GB ((none)) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ GxB_FC64_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC64_t x = (*((const GxB_FC64_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ GxB_FC64_t } #endif //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ #if 0 // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ ; ; \ Cx [pC] = GxB_CMPLX(1,0) ; \ } GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC64_t y = (*((const GxB_FC64_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif #endif
GB_binop__hypot_fp64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__hypot_fp64) // A.*B function (eWiseMult): GB (_AemultB_08__hypot_fp64) // A.*B function (eWiseMult): GB (_AemultB_02__hypot_fp64) // A.*B function (eWiseMult): GB (_AemultB_04__hypot_fp64) // A.*B function (eWiseMult): GB (_AemultB_bitmap__hypot_fp64) // A*D function (colscale): GB ((none)) // D*A function (rowscale): GB ((none)) // C+=B function (dense accum): GB (_Cdense_accumB__hypot_fp64) // C+=b function (dense accum): GB (_Cdense_accumb__hypot_fp64) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__hypot_fp64) // C=scalar+B GB (_bind1st__hypot_fp64) // C=scalar+B' GB (_bind1st_tran__hypot_fp64) // C=A+scalar GB (_bind2nd__hypot_fp64) // C=A'+scalar GB (_bind2nd_tran__hypot_fp64) // C type: double // A type: double // A pattern? 0 // B type: double // B pattern? 0 // BinaryOp: cij = hypot (aij, bij) #define GB_ATYPE \ double #define GB_BTYPE \ double #define GB_CTYPE \ double // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ double aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ double bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ double t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = hypot (x, y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_HYPOT || GxB_NO_FP64 || GxB_NO_HYPOT_FP64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__hypot_fp64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__hypot_fp64) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__hypot_fp64) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type double double bwork = (*((double *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double *restrict Cx = (double *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double *restrict Cx = (double *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__hypot_fp64) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; double alpha_scalar ; double beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((double *) alpha_scalar_in)) ; beta_scalar = (*((double *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__hypot_fp64) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__hypot_fp64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__hypot_fp64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__hypot_fp64) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__hypot_fp64) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double *Cx = (double *) Cx_output ; double x = (*((double *) x_input)) ; double *Bx = (double *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; double bij = GBX (Bx, p, false) ; Cx [p] = hypot (x, bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__hypot_fp64) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; double *Cx = (double *) Cx_output ; double *Ax = (double *) Ax_input ; double y = (*((double *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; double aij = GBX (Ax, p, false) ; Cx [p] = hypot (aij, y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ double aij = GBX (Ax, pA, false) ; \ Cx [pC] = hypot (x, aij) ; \ } GrB_Info GB (_bind1st_tran__hypot_fp64) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ double #if GB_DISABLE return (GrB_NO_VALUE) ; #else double x = (*((const double *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ double } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ double aij = GBX (Ax, pA, false) ; \ Cx [pC] = hypot (aij, y) ; \ } GrB_Info GB (_bind2nd_tran__hypot_fp64) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double y = (*((const double *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_unaryop__identity_int64_int32.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__identity_int64_int32 // op(A') function: GB_tran__identity_int64_int32 // C type: int64_t // A type: int32_t // cast: int64_t cij = (int64_t) aij // unaryop: cij = aij #define GB_ATYPE \ int32_t #define GB_CTYPE \ int64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CASTING(z, x) \ int64_t z = (int64_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_INT64 || GxB_NO_INT32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__identity_int64_int32 ( int64_t *restrict Cx, const int32_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__identity_int64_int32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
lock-nested-unrelated.c
/* * lock-nested-unrelated.c -- Archer testcase */ //===----------------------------------------------------------------------===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // // See tools/archer/LICENSE.txt for details. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // RUN: %libarcher-compile-and-run-race | FileCheck %s // RUN: %libarcher-compile-and-run-race-noserial | FileCheck %s // REQUIRES: tsan #include <omp.h> #include <stdio.h> int main(int argc, char *argv[]) { int var = 0; omp_nest_lock_t lock; omp_init_nest_lock(&lock); #pragma omp parallel num_threads(2) shared(var) { omp_set_nest_lock(&lock); omp_set_nest_lock(&lock); // Dummy locking. omp_unset_nest_lock(&lock); omp_unset_nest_lock(&lock); var++; } omp_destroy_nest_lock(&lock); fprintf(stderr, "DONE\n"); } // CHECK: WARNING: ThreadSanitizer: data race // CHECK-NEXT: {{(Write|Read)}} of size 4 // CHECK-NEXT: #0 {{.*}}lock-nested-unrelated.c:33 // CHECK: Previous write of size 4 // CHECK-NEXT: #0 {{.*}}lock-nested-unrelated.c:33 // CHECK: DONE // CHECK: ThreadSanitizer: reported 1 warnings
ParFriends.h
#ifndef _PAR_FRIENDS_H_ #define _PAR_FRIENDS_H_ #include "mpi.h" #include "sys/time.h" #include <iostream> #include "SpParMat.h" #include "SpParHelper.h" #include "MPIType.h" #include "Friends.h" #include "OptBuf.h" using namespace std; template <class IT, class NT, class DER> class SpParMat; /*************************************************************************************************/ /**************************** FRIEND FUNCTIONS FOR PARALLEL CLASSES ******************************/ /*************************************************************************************************/ /** * Parallel C = A*B routine that uses a double buffered broadcasting scheme * Most memory efficient version available. Total stages: 2*sqrt(p) * Memory requirement during first sqrt(p) stages: <= (3/2)*(nnz(A)+nnz(B))+(1/2)*nnz(C) * Memory requirement during second sqrt(p) stages: <= nnz(A)+nnz(B)+nnz(C) * Final memory requirement: nnz(C) if clearA and clearB are true **/ template <typename SR, typename IU, typename NU1, typename NU2, typename UDERA, typename UDERB> SpParMat<IU,typename promote_trait<NU1,NU2>::T_promote,typename promote_trait<UDERA,UDERB>::T_promote> Mult_AnXBn_DoubleBuff (SpParMat<IU,NU1,UDERA> & A, SpParMat<IU,NU2,UDERB> & B, bool clearA = false, bool clearB = false ) { typedef typename promote_trait<NU1,NU2>::T_promote N_promote; typedef typename promote_trait<UDERA,UDERB>::T_promote DER_promote; IU ncolA = A.getncol(); IU nrowB = B.getnrow(); if(ncolA != nrowB) { ostringstream outs; outs << "Can not multiply, dimensions does not match"<< endl; outs << ncolA << " != " << nrowB << endl; SpParHelper::Print(outs.str()); MPI::COMM_WORLD.Abort(DIMMISMATCH); return SpParMat< IU,N_promote,DER_promote >(); } int stages, dummy; // last two parameters of ProductGrid are ignored for Synch multiplication shared_ptr<CommGrid> GridC = ProductGrid((A.commGrid).get(), (B.commGrid).get(), stages, dummy, dummy); IU C_m = A.spSeq->getnrow(); IU C_n = B.spSeq->getncol(); UDERA * A1seq = new UDERA(); UDERA * A2seq = new UDERA(); UDERB * B1seq = new UDERB(); UDERB * B2seq = new UDERB(); (A.spSeq)->Split( *A1seq, *A2seq); const_cast< UDERB* >(B.spSeq)->Transpose(); (B.spSeq)->Split( *B1seq, *B2seq); GridC->GetWorld().Barrier(); IU ** ARecvSizes = SpHelper::allocate2D<IU>(UDERA::esscount, stages); IU ** BRecvSizes = SpHelper::allocate2D<IU>(UDERB::esscount, stages); SpParHelper::GetSetSizes( *A1seq, ARecvSizes, (A.commGrid)->GetRowWorld()); SpParHelper::GetSetSizes( *B1seq, BRecvSizes, (B.commGrid)->GetColWorld()); // Remotely fetched matrices are stored as pointers UDERA * ARecv; UDERB * BRecv; vector< SpTuples<IU,N_promote> *> tomerge; int Aself = (A.commGrid)->GetRankInProcRow(); int Bself = (B.commGrid)->GetRankInProcCol(); for(int i = 0; i < stages; ++i) { vector<IU> ess; if(i == Aself) { ARecv = A1seq; // shallow-copy } else { ess.resize(UDERA::esscount); for(int j=0; j< UDERA::esscount; ++j) { ess[j] = ARecvSizes[j][i]; // essentials of the ith matrix in this row } ARecv = new UDERA(); // first, create the object } SpParHelper::BCastMatrix(GridC->GetRowWorld(), *ARecv, ess, i); // then, receive its elements ess.clear(); if(i == Bself) { BRecv = B1seq; // shallow-copy } else { ess.resize(UDERB::esscount); for(int j=0; j< UDERB::esscount; ++j) { ess[j] = BRecvSizes[j][i]; } BRecv = new UDERB(); } SpParHelper::BCastMatrix(GridC->GetColWorld(), *BRecv, ess, i); // then, receive its elements SpTuples<IU,N_promote> * C_cont = MultiplyReturnTuples<SR> (*ARecv, *BRecv, // parameters themselves false, true, // transpose information (B is transposed) i != Aself, // 'delete A' condition i != Bself); // 'delete B' condition if(!C_cont->isZero()) tomerge.push_back(C_cont); } if(clearA) { delete A1seq; } if(clearB) { delete B1seq; } // Set the new dimensions SpParHelper::GetSetSizes( *A2seq, ARecvSizes, (A.commGrid)->GetRowWorld()); SpParHelper::GetSetSizes( *B2seq, BRecvSizes, (B.commGrid)->GetColWorld()); // Start the second round for(int i = 0; i < stages; ++i) { vector<IU> ess; if(i == Aself) { ARecv = A2seq; // shallow-copy } else { ess.resize(UDERA::esscount); for(int j=0; j< UDERA::esscount; ++j) { ess[j] = ARecvSizes[j][i]; // essentials of the ith matrix in this row } ARecv = new UDERA(); // first, create the object } SpParHelper::BCastMatrix(GridC->GetRowWorld(), *ARecv, ess, i); // then, receive its elements ess.clear(); if(i == Bself) { BRecv = B2seq; // shallow-copy } else { ess.resize(UDERB::esscount); for(int j=0; j< UDERB::esscount; ++j) { ess[j] = BRecvSizes[j][i]; } BRecv = new UDERB(); } SpParHelper::BCastMatrix(GridC->GetColWorld(), *BRecv, ess, i); // then, receive its elements SpTuples<IU,N_promote> * C_cont = MultiplyReturnTuples<SR> (*ARecv, *BRecv, // parameters themselves false, true, // transpose information (B is transposed) i != Aself, // 'delete A' condition i != Bself); // 'delete B' condition if(!C_cont->isZero()) tomerge.push_back(C_cont); } SpHelper::deallocate2D(ARecvSizes, UDERA::esscount); SpHelper::deallocate2D(BRecvSizes, UDERB::esscount); if(clearA) { delete A2seq; delete A.spSeq; A.spSeq = NULL; } else { (A.spSeq)->Merge(*A1seq, *A2seq); delete A1seq; delete A2seq; } if(clearB) { delete B2seq; delete B.spSeq; B.spSeq = NULL; } else { (B.spSeq)->Merge(*B1seq, *B2seq); delete B1seq; delete B2seq; const_cast< UDERB* >(B.spSeq)->Transpose(); // transpose back to original } DER_promote * C = new DER_promote(MergeAll<SR>(tomerge, C_m, C_n,true), false, NULL); // First get the result in SpTuples, then convert to UDER return SpParMat<IU,N_promote,DER_promote> (C, GridC); // return the result object } /** * Parallel A = B*C routine that uses only MPI-1 features * Relies on simple blocking broadcast **/ template <typename SR, typename IU, typename NU1, typename NU2, typename UDERA, typename UDERB> SpParMat<IU,typename promote_trait<NU1,NU2>::T_promote,typename promote_trait<UDERA,UDERB>::T_promote> Mult_AnXBn_Synch (SpParMat<IU,NU1,UDERA> & A, SpParMat<IU,NU2,UDERB> & B, bool clearA = false, bool clearB = false ) { typedef typename promote_trait<NU1,NU2>::T_promote N_promote; typedef typename promote_trait<UDERA,UDERB>::T_promote DER_promote; IU ncolA = A.getncol(); IU nrowB = B.getnrow(); if(ncolA != nrowB) { ostringstream outs; outs << "Can not multiply, dimensions does not match"<< endl; outs << ncolA << " != " << nrowB << endl; SpParHelper::Print(outs.str()); MPI::COMM_WORLD.Abort(DIMMISMATCH); return SpParMat< IU,N_promote,DER_promote >(); } int stages, dummy; // last two parameters of ProductGrid are ignored for Synch multiplication shared_ptr<CommGrid> GridC = ProductGrid((A.commGrid).get(), (B.commGrid).get(), stages, dummy, dummy); IU C_m = A.spSeq->getnrow(); IU C_n = B.spSeq->getncol(); const_cast< UDERB* >(B.spSeq)->Transpose(); GridC->GetWorld().Barrier(); IU ** ARecvSizes = SpHelper::allocate2D<IU>(UDERA::esscount, stages); IU ** BRecvSizes = SpHelper::allocate2D<IU>(UDERB::esscount, stages); SpParHelper::GetSetSizes( *(A.spSeq), ARecvSizes, (A.commGrid)->GetRowWorld()); SpParHelper::GetSetSizes( *(B.spSeq), BRecvSizes, (B.commGrid)->GetColWorld()); // Remotely fetched matrices are stored as pointers UDERA * ARecv; UDERB * BRecv; vector< SpTuples<IU,N_promote> *> tomerge; int Aself = (A.commGrid)->GetRankInProcRow(); int Bself = (B.commGrid)->GetRankInProcCol(); for(int i = 0; i < stages; ++i) { vector<IU> ess; if(i == Aself) { ARecv = A.spSeq; // shallow-copy } else { ess.resize(UDERA::esscount); for(int j=0; j< UDERA::esscount; ++j) { ess[j] = ARecvSizes[j][i]; // essentials of the ith matrix in this row } ARecv = new UDERA(); // first, create the object } SpParHelper::BCastMatrix(GridC->GetRowWorld(), *ARecv, ess, i); // then, receive its elements ess.clear(); if(i == Bself) { BRecv = B.spSeq; // shallow-copy } else { ess.resize(UDERB::esscount); for(int j=0; j< UDERB::esscount; ++j) { ess[j] = BRecvSizes[j][i]; } BRecv = new UDERB(); } SpParHelper::BCastMatrix(GridC->GetColWorld(), *BRecv, ess, i); // then, receive its elements SpTuples<IU,N_promote> * C_cont = MultiplyReturnTuples<SR> (*ARecv, *BRecv, // parameters themselves false, true, // transpose information (B is transposed) i != Aself, // 'delete A' condition i != Bself); // 'delete B' condition if(!C_cont->isZero()) tomerge.push_back(C_cont); #ifndef NDEBUG ostringstream outs; outs << i << "th SUMMA iteration"<< endl; SpParHelper::Print(outs.str()); #endif } if(clearA && A.spSeq != NULL) { delete A.spSeq; A.spSeq = NULL; } if(clearB && B.spSeq != NULL) { delete B.spSeq; B.spSeq = NULL; } SpHelper::deallocate2D(ARecvSizes, UDERA::esscount); SpHelper::deallocate2D(BRecvSizes, UDERB::esscount); DER_promote * C = new DER_promote(MergeAll<SR>(tomerge, C_m, C_n,true), false, NULL); // First get the result in SpTuples, then convert to UDER // the last parameter to MergeAll deletes tomerge arrays if(!clearB) const_cast< UDERB* >(B.spSeq)->Transpose(); // transpose back to original return SpParMat<IU,N_promote,DER_promote> (C, GridC); // return the result object } /** * Parallel A = B*C routine that uses one-sided MPI-2 features * General active target syncronization via MPI_Win_Post, MPI_Win_Start, MPI_Win_Complete, MPI_Win_Wait * Tested on my dual core Macbook with 1,4,9,16,25 MPI processes * No memory hog: splits the matrix into two along the column, prefetches the next half matrix while computing on the current one **/ template <typename SR, typename IU, typename NU1, typename NU2, typename UDERA, typename UDERB> SpParMat<IU,typename promote_trait<NU1,NU2>::T_promote,typename promote_trait<UDERA,UDERB>::T_promote> Mult_AnXBn_ActiveTarget (const SpParMat<IU,NU1,UDERA> & A, const SpParMat<IU,NU2,UDERB> & B ) { typedef typename promote_trait<NU1,NU2>::T_promote N_promote; typedef typename promote_trait<UDERA,UDERB>::T_promote DER_promote; if(A.getncol() != B.getnrow()) { cout<<"Can not multiply, dimensions does not match"<<endl; MPI::COMM_WORLD.Abort(DIMMISMATCH); return SpParMat< IU,N_promote,DER_promote >(); } int stages, Aoffset, Boffset; // stages = inner dimension of matrix blocks shared_ptr<CommGrid> GridC = ProductGrid((A.commGrid).get(), (B.commGrid).get(), stages, Aoffset, Boffset); IU C_m = A.spSeq->getnrow(); IU C_n = B.spSeq->getncol(); UDERA A1seq, A2seq; (A.spSeq)->Split( A1seq, A2seq); // ABAB: It should be able to perform split/merge with the transpose option [single function call] const_cast< UDERB* >(B.spSeq)->Transpose(); UDERB B1seq, B2seq; (B.spSeq)->Split( B1seq, B2seq); // Create row and column windows (collective operation, i.e. everybody exposes its window to others) vector<MPI::Win> rowwins1, rowwins2, colwins1, colwins2; SpParHelper::SetWindows((A.commGrid)->GetRowWorld(), A1seq, rowwins1); SpParHelper::SetWindows((A.commGrid)->GetRowWorld(), A2seq, rowwins2); SpParHelper::SetWindows((B.commGrid)->GetColWorld(), B1seq, colwins1); SpParHelper::SetWindows((B.commGrid)->GetColWorld(), B2seq, colwins2); SpParHelper::SetWinErrHandler(rowwins1); // set the error handler to THROW_EXCEPTIONS SpParHelper::SetWinErrHandler(rowwins2); SpParHelper::SetWinErrHandler(colwins1); SpParHelper::SetWinErrHandler(colwins2); // ABAB: We can optimize the call to create windows in the absence of passive synchronization // MPI_Info info; // MPI_Info_create ( &info ); // MPI_Info_set( info, "no_locks", "true" ); // MPI_Win_create( . . ., info, . . . ); // MPI_Info_free( &info ); IU ** ARecvSizes1 = SpHelper::allocate2D<IU>(UDERA::esscount, stages); IU ** ARecvSizes2 = SpHelper::allocate2D<IU>(UDERA::esscount, stages); IU ** BRecvSizes1 = SpHelper::allocate2D<IU>(UDERB::esscount, stages); IU ** BRecvSizes2 = SpHelper::allocate2D<IU>(UDERB::esscount, stages); SpParHelper::GetSetSizes( A1seq, ARecvSizes1, (A.commGrid)->GetRowWorld()); SpParHelper::GetSetSizes( A2seq, ARecvSizes2, (A.commGrid)->GetRowWorld()); SpParHelper::GetSetSizes( B1seq, BRecvSizes1, (B.commGrid)->GetColWorld()); SpParHelper::GetSetSizes( B2seq, BRecvSizes2, (B.commGrid)->GetColWorld()); // Remotely fetched matrices are stored as pointers UDERA * ARecv1, * ARecv2; UDERB * BRecv1, * BRecv2; vector< SpTuples<IU,N_promote> *> tomerge; MPI::Group row_group = (A.commGrid)->GetRowWorld().Get_group(); MPI::Group col_group = (B.commGrid)->GetColWorld().Get_group(); int Aself = (A.commGrid)->GetRankInProcRow(); int Bself = (B.commGrid)->GetRankInProcCol(); GridC->GetWorld().Barrier(); SpParHelper::Print("Writing to file\n"); ofstream oput; GridC->OpenDebugFile("deb", oput); oput << "A1seq: " << A1seq.getnrow() << " " << A1seq.getncol() << " " << A1seq.getnnz() << endl; oput << "A2seq: " << A2seq.getnrow() << " " << A2seq.getncol() << " " << A2seq.getnnz() << endl; oput << "B1seq: " << B1seq.getnrow() << " " << B1seq.getncol() << " " << B1seq.getnnz() << endl; oput << "B2seq: " << B2seq.getnrow() << " " << B2seq.getncol() << " " << B2seq.getnnz() << endl; SpParHelper::Print("Wrote to file\n"); GridC->GetWorld().Barrier(); // Start exposure epochs to all windows try { SpParHelper::PostExposureEpoch(Aself, rowwins1, row_group); SpParHelper::PostExposureEpoch(Aself, rowwins2, row_group); SpParHelper::PostExposureEpoch(Bself, colwins1, col_group); SpParHelper::PostExposureEpoch(Bself, colwins2, col_group); } catch(MPI::Exception e) { oput << "Exception while posting exposure epoch" << endl; oput << e.Get_error_string() << endl; } GridC->GetWorld().Barrier(); SpParHelper::Print("Exposure epochs posted\n"); GridC->GetWorld().Barrier(); int Aowner = (0+Aoffset) % stages; int Bowner = (0+Boffset) % stages; try { SpParHelper::AccessNFetch(ARecv1, Aowner, rowwins1, row_group, ARecvSizes1); SpParHelper::AccessNFetch(ARecv2, Aowner, rowwins2, row_group, ARecvSizes2); // Start prefetching next half for(int j=0; j< rowwins1.size(); ++j) // wait for the first half to complete rowwins1[j].Complete(); SpParHelper::AccessNFetch(BRecv1, Bowner, colwins1, col_group, BRecvSizes1); SpParHelper::AccessNFetch(BRecv2, Bowner, colwins2, col_group, BRecvSizes2); // Start prefetching next half for(int j=0; j< colwins1.size(); ++j) colwins1[j].Complete(); } catch(MPI::Exception e) { oput << "Exception while starting access epoch or the first fetch" << endl; oput << e.Get_error_string() << endl; } for(int i = 1; i < stages; ++i) { SpParHelper::Print("Stage starting\n"); SpTuples<IU,N_promote> * C_cont = MultiplyReturnTuples<SR>(*ARecv1, *BRecv1, false, true); SpParHelper::Print("Multiplied\n"); if(!C_cont->isZero()) tomerge.push_back(C_cont); SpParHelper::Print("Pushed back\n"); GridC->GetWorld().Barrier(); bool remoteA = false; bool remoteB = false; delete ARecv1; // free the memory of the previous first half for(int j=0; j< rowwins2.size(); ++j) // wait for the previous second half to complete rowwins2[j].Complete(); SpParHelper::Print("Completed A\n"); delete BRecv1; for(int j=0; j< colwins2.size(); ++j) // wait for the previous second half to complete colwins2[j].Complete(); SpParHelper::Print("Completed B\n"); GridC->GetWorld().Barrier(); Aowner = (i+Aoffset) % stages; Bowner = (i+Boffset) % stages; // start fetching the current first half SpParHelper::AccessNFetch(ARecv1, Aowner, rowwins1, row_group, ARecvSizes1); SpParHelper::AccessNFetch(BRecv1, Bowner, colwins1, col_group, BRecvSizes1); SpParHelper::Print("Fetched next\n"); GridC->GetWorld().Barrier(); // while multiplying the already completed previous second halfs C_cont = MultiplyReturnTuples<SR>(*ARecv2, *BRecv2, false, true); if(!C_cont->isZero()) tomerge.push_back(C_cont); SpParHelper::Print("Multiplied and pushed\n"); GridC->GetWorld().Barrier(); delete ARecv2; // free memory of the previous second half delete BRecv2; for(int j=0; j< rowwins1.size(); ++j) // wait for the current first half to complte rowwins1[j].Complete(); for(int j=0; j< colwins1.size(); ++j) colwins1[j].Complete(); SpParHelper::Print("Completed next\n"); GridC->GetWorld().Barrier(); // start prefetching the current second half SpParHelper::AccessNFetch(ARecv2, Aowner, rowwins2, row_group, ARecvSizes2); SpParHelper::AccessNFetch(BRecv2, Bowner, colwins2, col_group, BRecvSizes2); } //SpParHelper::Print("Stages finished\n"); SpTuples<IU,N_promote> * C_cont = MultiplyReturnTuples<SR>(*ARecv1, *BRecv1, false, true); if(!C_cont->isZero()) tomerge.push_back(C_cont); delete ARecv1; // free the memory of the previous first half for(int j=0; j< rowwins2.size(); ++j) // wait for the previous second half to complete rowwins2[j].Complete(); delete BRecv1; for(int j=0; j< colwins2.size(); ++j) // wait for the previous second half to complete colwins2[j].Complete(); C_cont = MultiplyReturnTuples<SR>(*ARecv2, *BRecv2, false, true); if(!C_cont->isZero()) tomerge.push_back(C_cont); delete ARecv2; delete BRecv2; SpHelper::deallocate2D(ARecvSizes1, UDERA::esscount); SpHelper::deallocate2D(ARecvSizes2, UDERA::esscount); SpHelper::deallocate2D(BRecvSizes1, UDERB::esscount); SpHelper::deallocate2D(BRecvSizes2, UDERB::esscount); DER_promote * C = new DER_promote(MergeAll<SR>(tomerge, C_m, C_n), false, NULL); // First get the result in SpTuples, then convert to UDER for(int i=0; i<tomerge.size(); ++i) { delete tomerge[i]; } // MPI_Win_Wait() works like a barrier as it waits for all origins to finish their remote memory operation on "this" window SpParHelper::WaitNFree(rowwins1); SpParHelper::WaitNFree(rowwins2); SpParHelper::WaitNFree(colwins1); SpParHelper::WaitNFree(colwins2); (A.spSeq)->Merge(A1seq, A2seq); (B.spSeq)->Merge(B1seq, B2seq); row_group.Free(); col_group.Free(); const_cast< UDERB* >(B.spSeq)->Transpose(); // transpose back to original return SpParMat<IU,N_promote,DER_promote> (C, GridC); // return the result object } /** * Parallel A = B*C routine that uses one-sided MPI-2 features * Passive target syncronization via MPI_Win_Lock, MPI_Win_Unlock * No memory hog: splits the matrix into two along the column, prefetches the next half matrix while computing on the current one **/ template <typename SR, typename IU, typename NU1, typename NU2, typename UDERA, typename UDERB> SpParMat<IU,typename promote_trait<NU1,NU2>::T_promote,typename promote_trait<UDERA,UDERB>::T_promote> Mult_AnXBn_PassiveTarget (const SpParMat<IU,NU1,UDERA> & A, const SpParMat<IU,NU2,UDERB> & B ) { typedef typename promote_trait<NU1,NU2>::T_promote N_promote; typedef typename promote_trait<UDERA,UDERB>::T_promote DER_promote; if(A.getncol() != B.getnrow()) { cout<<"Can not multiply, dimensions does not match"<<endl; MPI::COMM_WORLD.Abort(DIMMISMATCH); return SpParMat< IU,N_promote,DER_promote >(); } int stages, Aoffset, Boffset; // stages = inner dimension of matrix blocks shared_ptr<CommGrid> GridC = ProductGrid((A.commGrid).get(), (B.commGrid).get(), stages, Aoffset, Boffset); IU C_m = A.spSeq->getnrow(); IU C_n = B.spSeq->getncol(); UDERA A1seq, A2seq; (A.spSeq)->Split( A1seq, A2seq); // ABAB: It should be able to perform split/merge with the transpose option [single function call] const_cast< UDERB* >(B.spSeq)->Transpose(); UDERB B1seq, B2seq; (B.spSeq)->Split( B1seq, B2seq); // Create row and column windows (collective operation, i.e. everybody exposes its window to others) vector<MPI::Win> rowwins1, rowwins2, colwins1, colwins2; SpParHelper::SetWindows((A.commGrid)->GetRowWorld(), A1seq, rowwins1); SpParHelper::SetWindows((A.commGrid)->GetRowWorld(), A2seq, rowwins2); SpParHelper::SetWindows((B.commGrid)->GetColWorld(), B1seq, colwins1); SpParHelper::SetWindows((B.commGrid)->GetColWorld(), B2seq, colwins2); IU ** ARecvSizes1 = SpHelper::allocate2D<IU>(UDERA::esscount, stages); IU ** ARecvSizes2 = SpHelper::allocate2D<IU>(UDERA::esscount, stages); IU ** BRecvSizes1 = SpHelper::allocate2D<IU>(UDERB::esscount, stages); IU ** BRecvSizes2 = SpHelper::allocate2D<IU>(UDERB::esscount, stages); SpParHelper::GetSetSizes( A1seq, ARecvSizes1, (A.commGrid)->GetRowWorld()); SpParHelper::GetSetSizes( A2seq, ARecvSizes2, (A.commGrid)->GetRowWorld()); SpParHelper::GetSetSizes( B1seq, BRecvSizes1, (B.commGrid)->GetColWorld()); SpParHelper::GetSetSizes( B2seq, BRecvSizes2, (B.commGrid)->GetColWorld()); // Remotely fetched matrices are stored as pointers UDERA * ARecv1, * ARecv2; UDERB * BRecv1, * BRecv2; vector< SpTuples<IU,N_promote> *> tomerge; // sorted triples to be merged MPI::Group row_group = (A.commGrid)->GetRowWorld().Get_group(); MPI::Group col_group = (B.commGrid)->GetColWorld().Get_group(); int Aself = (A.commGrid)->GetRankInProcRow(); int Bself = (B.commGrid)->GetRankInProcCol(); int Aowner = (0+Aoffset) % stages; int Bowner = (0+Boffset) % stages; SpParHelper::LockNFetch(ARecv1, Aowner, rowwins1, row_group, ARecvSizes1); SpParHelper::LockNFetch(ARecv2, Aowner, rowwins2, row_group, ARecvSizes2); // Start prefetching next half SpParHelper::LockNFetch(BRecv1, Bowner, colwins1, col_group, BRecvSizes1); SpParHelper::LockNFetch(BRecv2, Bowner, colwins2, col_group, BRecvSizes2); // Start prefetching next half // Finish the first halfs SpParHelper::UnlockWindows(Aowner, rowwins1); SpParHelper::UnlockWindows(Bowner, colwins1); for(int i = 1; i < stages; ++i) { SpTuples<IU,N_promote> * C_cont = MultiplyReturnTuples<SR>(*ARecv1, *BRecv1, false, true); if(!C_cont->isZero()) tomerge.push_back(C_cont); bool remoteA = false; bool remoteB = false; delete ARecv1; // free the memory of the previous first half delete BRecv1; SpParHelper::UnlockWindows(Aowner, rowwins2); // Finish the second half SpParHelper::UnlockWindows(Bowner, colwins2); Aowner = (i+Aoffset) % stages; Bowner = (i+Boffset) % stages; // start fetching the current first half SpParHelper::LockNFetch(ARecv1, Aowner, rowwins1, row_group, ARecvSizes1); SpParHelper::LockNFetch(BRecv1, Bowner, colwins1, col_group, BRecvSizes1); // while multiplying the already completed previous second halfs C_cont = MultiplyReturnTuples<SR>(*ARecv2, *BRecv2, false, true); if(!C_cont->isZero()) tomerge.push_back(C_cont); delete ARecv2; // free memory of the previous second half delete BRecv2; // wait for the current first half to complte SpParHelper::UnlockWindows(Aowner, rowwins1); SpParHelper::UnlockWindows(Bowner, colwins1); // start prefetching the current second half SpParHelper::LockNFetch(ARecv2, Aowner, rowwins2, row_group, ARecvSizes2); SpParHelper::LockNFetch(BRecv2, Bowner, colwins2, col_group, BRecvSizes2); } SpTuples<IU,N_promote> * C_cont = MultiplyReturnTuples<SR>(*ARecv1, *BRecv1, false, true); if(!C_cont->isZero()) tomerge.push_back(C_cont); delete ARecv1; // free the memory of the previous first half delete BRecv1; SpParHelper::UnlockWindows(Aowner, rowwins2); SpParHelper::UnlockWindows(Bowner, colwins2); C_cont = MultiplyReturnTuples<SR>(*ARecv2, *BRecv2, false, true); if(!C_cont->isZero()) tomerge.push_back(C_cont); delete ARecv2; delete BRecv2; SpHelper::deallocate2D(ARecvSizes1, UDERA::esscount); SpHelper::deallocate2D(ARecvSizes2, UDERA::esscount); SpHelper::deallocate2D(BRecvSizes1, UDERB::esscount); SpHelper::deallocate2D(BRecvSizes2, UDERB::esscount); DER_promote * C = new DER_promote(MergeAll<SR>(tomerge, C_m, C_n), false, NULL); // First get the result in SpTuples, then convert to UDER for(int i=0; i<tomerge.size(); ++i) { delete tomerge[i]; } SpParHelper::FreeWindows(rowwins1); SpParHelper::FreeWindows(rowwins2); SpParHelper::FreeWindows(colwins1); SpParHelper::FreeWindows(colwins2); (A.spSeq)->Merge(A1seq, A2seq); (B.spSeq)->Merge(B1seq, B2seq); row_group.Free(); col_group.Free(); const_cast< UDERB* >(B.spSeq)->Transpose(); // transpose back to original return SpParMat<IU,N_promote,DER_promote> (C, GridC); // return the result object } /** * Parallel A = B*C routine that uses one-sided MPI-2 features * Syncronization is through MPI_Win_Fence * Buggy as of September, 2009 **/ template <typename SR, typename IU, typename NU1, typename NU2, typename UDERA, typename UDERB> SpParMat<IU,typename promote_trait<NU1,NU2>::T_promote,typename promote_trait<UDERA,UDERB>::T_promote> Mult_AnXBn_Fence (const SpParMat<IU,NU1,UDERA> & A, const SpParMat<IU,NU2,UDERB> & B ) { typedef typename promote_trait<NU1,NU2>::T_promote N_promote; typedef typename promote_trait<UDERA,UDERB>::T_promote DER_promote; if(A.getncol() != B.getnrow()) { cout<<"Can not multiply, dimensions does not match"<<endl; MPI::COMM_WORLD.Abort(DIMMISMATCH); return SpParMat< IU,N_promote,DER_promote >(); } int stages, Aoffset, Boffset; // stages = inner dimension of matrix blocks shared_ptr<CommGrid> GridC = ProductGrid((A.commGrid).get(), (B.commGrid).get(), stages, Aoffset, Boffset); ofstream oput; GridC->OpenDebugFile("deb", oput); const_cast< UDERB* >(B.spSeq)->Transpose(); // set row & col window handles vector<MPI::Win> rowwindows, colwindows; vector<MPI::Win> rowwinnext, colwinnext; SpParHelper::SetWindows((A.commGrid)->GetRowWorld(), *(A.spSeq), rowwindows); SpParHelper::SetWindows((B.commGrid)->GetColWorld(), *(B.spSeq), colwindows); SpParHelper::SetWindows((A.commGrid)->GetRowWorld(), *(A.spSeq), rowwinnext); SpParHelper::SetWindows((B.commGrid)->GetColWorld(), *(B.spSeq), colwinnext); IU ** ARecvSizes = SpHelper::allocate2D<IU>(UDERA::esscount, stages); IU ** BRecvSizes = SpHelper::allocate2D<IU>(UDERB::esscount, stages); SpParHelper::GetSetSizes( *(A.spSeq), ARecvSizes, (A.commGrid)->GetRowWorld()); SpParHelper::GetSetSizes( *(B.spSeq), BRecvSizes, (B.commGrid)->GetColWorld()); UDERA * ARecv, * ARecvNext; UDERB * BRecv, * BRecvNext; vector< SpTuples<IU,N_promote> *> tomerge; // Prefetch first for(int j=0; j< rowwindows.size(); ++j) MPI_Win_fence(MPI_MODE_NOPRECEDE, rowwindows[j]); for(int j=0; j< colwindows.size(); ++j) MPI_Win_fence(MPI_MODE_NOPRECEDE, colwindows[j]); for(int j=0; j< rowwinnext.size(); ++j) MPI_Win_fence(MPI_MODE_NOPRECEDE, rowwinnext[j]); for(int j=0; j< colwinnext.size(); ++j) MPI_Win_fence(MPI_MODE_NOPRECEDE, colwinnext[j]); int Aownind = (0+Aoffset) % stages; int Bownind = (0+Boffset) % stages; if(Aownind == (A.commGrid)->GetRankInProcRow()) { ARecv = A.spSeq; // shallow-copy } else { vector<IU> ess1(UDERA::esscount); // pack essentials to a vector for(int j=0; j< UDERA::esscount; ++j) { ess1[j] = ARecvSizes[j][Aownind]; } ARecv = new UDERA(); // create the object first oput << "For A (out), Fetching " << (void*)rowwindows[0] << endl; SpParHelper::FetchMatrix(*ARecv, ess1, rowwindows, Aownind); // fetch its elements later } if(Bownind == (B.commGrid)->GetRankInProcCol()) { BRecv = B.spSeq; // shallow-copy } else { vector<IU> ess2(UDERB::esscount); // pack essentials to a vector for(int j=0; j< UDERB::esscount; ++j) { ess2[j] = BRecvSizes[j][Bownind]; } BRecv = new UDERB(); oput << "For B (out), Fetching " << (void*)colwindows[0] << endl; SpParHelper::FetchMatrix(*BRecv, ess2, colwindows, Bownind); // No lock version, only get ! } int Aownprev = Aownind; int Bownprev = Bownind; for(int i = 1; i < stages; ++i) { Aownind = (i+Aoffset) % stages; Bownind = (i+Boffset) % stages; if(i % 2 == 1) // Fetch RecvNext via winnext, fence on Recv via windows { if(Aownind == (A.commGrid)->GetRankInProcRow()) { ARecvNext = A.spSeq; // shallow-copy } else { vector<IU> ess1(UDERA::esscount); // pack essentials to a vector for(int j=0; j< UDERA::esscount; ++j) { ess1[j] = ARecvSizes[j][Aownind]; } ARecvNext = new UDERA(); // create the object first oput << "For A, Fetching " << (void*) rowwinnext[0] << endl; SpParHelper::FetchMatrix(*ARecvNext, ess1, rowwinnext, Aownind); } if(Bownind == (B.commGrid)->GetRankInProcCol()) { BRecvNext = B.spSeq; // shallow-copy } else { vector<IU> ess2(UDERB::esscount); // pack essentials to a vector for(int j=0; j< UDERB::esscount; ++j) { ess2[j] = BRecvSizes[j][Bownind]; } BRecvNext = new UDERB(); oput << "For B, Fetching " << (void*)colwinnext[0] << endl; SpParHelper::FetchMatrix(*BRecvNext, ess2, colwinnext, Bownind); // No lock version, only get ! } oput << "Fencing " << (void*) rowwindows[0] << endl; oput << "Fencing " << (void*) colwindows[0] << endl; for(int j=0; j< rowwindows.size(); ++j) MPI_Win_fence(MPI_MODE_NOSTORE, rowwindows[j]); // Synch using "other" windows for(int j=0; j< colwindows.size(); ++j) MPI_Win_fence(MPI_MODE_NOSTORE, colwindows[j]); SpTuples<IU,N_promote> * C_cont = MultiplyReturnTuples<SR>(*ARecv, *BRecv, false, true); if(!C_cont->isZero()) tomerge.push_back(C_cont); if(Aownprev != (A.commGrid)->GetRankInProcRow()) delete ARecv; if(Bownprev != (B.commGrid)->GetRankInProcCol()) delete BRecv; Aownprev = Aownind; Bownprev = Bownind; } else // fetch to Recv via windows, fence on RecvNext via winnext { if(Aownind == (A.commGrid)->GetRankInProcRow()) { ARecv = A.spSeq; // shallow-copy } else { vector<IU> ess1(UDERA::esscount); // pack essentials to a vector for(int j=0; j< UDERA::esscount; ++j) { ess1[j] = ARecvSizes[j][Aownind]; } ARecv = new UDERA(); // create the object first oput << "For A, Fetching " << (void*) rowwindows[0] << endl; SpParHelper::FetchMatrix(*ARecv, ess1, rowwindows, Aownind); } if(Bownind == (B.commGrid)->GetRankInProcCol()) { BRecv = B.spSeq; // shallow-copy } else { vector<IU> ess2(UDERB::esscount); // pack essentials to a vector for(int j=0; j< UDERB::esscount; ++j) { ess2[j] = BRecvSizes[j][Bownind]; } BRecv = new UDERB(); oput << "For B, Fetching " << (void*)colwindows[0] << endl; SpParHelper::FetchMatrix(*BRecv, ess2, colwindows, Bownind); // No lock version, only get ! } oput << "Fencing " << (void*) rowwinnext[0] << endl; oput << "Fencing " << (void*) rowwinnext[0] << endl; for(int j=0; j< rowwinnext.size(); ++j) MPI_Win_fence(MPI_MODE_NOSTORE, rowwinnext[j]); // Synch using "other" windows for(int j=0; j< colwinnext.size(); ++j) MPI_Win_fence(MPI_MODE_NOSTORE, colwinnext[j]); SpTuples<IU,N_promote> * C_cont = MultiplyReturnTuples<SR>(*ARecvNext, *BRecvNext, false, true); if(!C_cont->isZero()) tomerge.push_back(C_cont); if(Aownprev != (A.commGrid)->GetRankInProcRow()) delete ARecvNext; if(Bownprev != (B.commGrid)->GetRankInProcCol()) delete BRecvNext; Aownprev = Aownind; Bownprev = Bownind; } } if(stages % 2 == 1) // fence on Recv via windows { oput << "Fencing " << (void*) rowwindows[0] << endl; oput << "Fencing " << (void*) colwindows[0] << endl; for(int j=0; j< rowwindows.size(); ++j) MPI_Win_fence(MPI_MODE_NOSUCCEED, rowwindows[j]); // Synch using "prev" windows for(int j=0; j< colwindows.size(); ++j) MPI_Win_fence(MPI_MODE_NOSUCCEED, colwindows[j]); SpTuples<IU,N_promote> * C_cont = MultiplyReturnTuples<SR>(*ARecv, *BRecv, false, true); if(!C_cont->isZero()) tomerge.push_back(C_cont); if(Aownprev != (A.commGrid)->GetRankInProcRow()) delete ARecv; if(Bownprev != (B.commGrid)->GetRankInProcRow()) delete BRecv; } else // fence on RecvNext via winnext { oput << "Fencing " << (void*) rowwinnext[0] << endl; oput << "Fencing " << (void*) colwinnext[0] << endl; for(int j=0; j< rowwinnext.size(); ++j) MPI_Win_fence(MPI_MODE_NOSUCCEED, rowwinnext[j]); // Synch using "prev" windows for(int j=0; j< colwinnext.size(); ++j) MPI_Win_fence(MPI_MODE_NOSUCCEED, colwinnext[j]); SpTuples<IU,N_promote> * C_cont = MultiplyReturnTuples<SR>(*ARecvNext, *BRecvNext, false, true); if(!C_cont->isZero()) tomerge.push_back(C_cont); if(Aownprev != (A.commGrid)->GetRankInProcRow()) delete ARecvNext; if(Bownprev != (B.commGrid)->GetRankInProcRow()) delete BRecvNext; } for(int i=0; i< rowwindows.size(); ++i) { rowwindows[i].Free(); rowwinnext[i].Free(); } for(int i=0; i< colwindows.size(); ++i) { colwindows[i].Free(); colwinnext[i].Free(); } GridC->GetWorld().Barrier(); IU C_m = A.spSeq->getnrow(); IU C_n = B.spSeq->getncol(); DER_promote * C = new DER_promote(MergeAll<SR>(tomerge, C_m, C_n), false, NULL); // First get the result in SpTuples, then convert to UDER for(int i=0; i<tomerge.size(); ++i) { delete tomerge[i]; } SpHelper::deallocate2D(ARecvSizes, UDERA::esscount); SpHelper::deallocate2D(BRecvSizes, UDERB::esscount); const_cast< UDERB* >(B.spSeq)->Transpose(); // transpose back to original return SpParMat<IU,N_promote,DER_promote> (C, GridC); // return the result object } // Randomly permutes an already existing vector // Preserves the data distribution (doesn't rebalance) template <typename IU> void RandPerm(SpParVec<IU,IU> & V) { SpParHelper::Print("COMBBLAS: This version of RandPerm(SpParVec &) is obsolete, please use DenseParVec::RandPerm()\n"); MPI::Intracomm DiagWorld = V.commGrid->GetDiagWorld(); if(DiagWorld != MPI::COMM_NULL) // Diagonal processors only { pair<double,IU> * vecpair = new pair<double,IU>[V.getlocnnz()]; int nproc = DiagWorld.Get_size(); int diagrank = DiagWorld.Get_rank(); long * dist = new long[nproc]; dist[diagrank] = (long) V.getlocnnz(); DiagWorld.Allgather(MPI::IN_PLACE, 0, MPIType<long>(), dist, 1, MPIType<long>()); MTRand M; // generate random numbers with Mersenne Twister for(int i=0; i<V.getlocnnz(); ++i) { vecpair[i].first = M.rand(); vecpair[i].second = V.num[i]; } // less< pair<T1,T2> > works correctly (sorts wrt first elements) vpsort::parallel_sort (vecpair, vecpair + V.getlocnnz(), dist, DiagWorld); vector< IU > nind(V.getlocnnz()); vector< IU > nnum(V.getlocnnz()); for(int i=0; i<V.getlocnnz(); ++i) { nind[i] = i; nnum[i] = vecpair[i].second; } delete [] vecpair; delete [] dist; V.ind.swap(nind); V.num.swap(nnum); } } template <typename SR, typename IU, typename NUM, typename NUV, typename UDER> DenseParVec<IU,typename promote_trait<NUM,NUV>::T_promote> SpMV (const SpParMat<IU,NUM,UDER> & A, const DenseParVec<IU,NUV> & x ) { typedef typename promote_trait<NUM,NUV>::T_promote T_promote; IU ncolA = A.getncol(); if(ncolA != x.getTotalLength()) { ostringstream outs; outs << "Can not multiply, dimensions does not match"<< endl; outs << ncolA << " != " << x.getTotalLength() << endl; SpParHelper::Print(outs.str()); MPI::COMM_WORLD.Abort(DIMMISMATCH); } if(!(*A.commGrid == *x.commGrid)) { cout << "Grids are not comparable for SpMV" << endl; MPI::COMM_WORLD.Abort(GRIDMISMATCH); } MPI::Intracomm DiagWorld = x.commGrid->GetDiagWorld(); MPI::Intracomm ColWorld = x.commGrid->GetColWorld(); MPI::Intracomm RowWorld = x.commGrid->GetRowWorld(); int diaginrow = x.commGrid->GetDiagOfProcRow(); int diagincol = x.commGrid->GetDiagOfProcCol(); T_promote id = (T_promote) 0; // do we need a better identity? DenseParVec<IU, T_promote> y ( x.commGrid, id); IU ysize = A.getlocalrows(); if(x.diagonal) { IU size = x.arr.size(); ColWorld.Bcast(&size, 1, MPIType<IU>(), diagincol); ColWorld.Bcast(const_cast<NUV*>(&x.arr[0]), size, MPIType<NUV>(), diagincol); T_promote * localy = new T_promote[ysize]; fill_n(localy, ysize, id); dcsc_gespmv<SR>(*(A.spSeq), &x.arr[0], localy); // IntraComm::Reduce(sendbuf, recvbuf, count, type, op, root) RowWorld.Reduce(MPI::IN_PLACE, localy, ysize, MPIType<T_promote>(), SR::mpi_op(), diaginrow); y.arr.resize(ysize); copy(localy, localy+ysize, y.arr.begin()); delete [] localy; } else { IU size; ColWorld.Bcast(&size, 1, MPIType<IU>(), diagincol); NUV * localx = new NUV[size]; ColWorld.Bcast(localx, size, MPIType<NUV>(), diagincol); T_promote * localy = new T_promote[ysize]; fill_n(localy, ysize, id); dcsc_gespmv<SR>(*(A.spSeq), localx, localy); delete [] localx; RowWorld.Reduce(localy, NULL, ysize, MPIType<T_promote>(), SR::mpi_op(), diaginrow); delete [] localy; } return y; } template <typename SR, typename IU, typename NUM, typename NUV, typename UDER> SpParVec<IU,typename promote_trait<NUM,NUV>::T_promote> SpMV (const SpParMat<IU,NUM,UDER> & A, const SpParVec<IU,NUV> & x ) { typedef typename promote_trait<NUM,NUV>::T_promote T_promote; IU ncolA = A.getncol(); if(ncolA != x.getTotalLength()) { ostringstream outs; outs << "Can not multiply, dimensions does not match"<< endl; outs << ncolA << " != " << x.getTotalLength() << endl; SpParHelper::Print(outs.str()); MPI::COMM_WORLD.Abort(DIMMISMATCH); } if(!(*A.commGrid == *x.commGrid)) { cout << "Grids are not comparable for SpMV" << endl; MPI::COMM_WORLD.Abort(GRIDMISMATCH); } MPI::Intracomm DiagWorld = x.commGrid->GetDiagWorld(); MPI::Intracomm ColWorld = x.commGrid->GetColWorld(); MPI::Intracomm RowWorld = x.commGrid->GetRowWorld(); int diaginrow = x.commGrid->GetDiagOfProcRow(); int diagincol = x.commGrid->GetDiagOfProcCol(); SpParVec<IU, T_promote> y ( x.commGrid); // identity doesn't matter for sparse vectors IU ysize = A.getlocalrows(); if(x.diagonal) { IU nnzx = x.getlocnnz(); ColWorld.Bcast(&nnzx, 1, MPIType<IU>(), diagincol); ColWorld.Bcast(const_cast<IU*>(&x.ind[0]), nnzx, MPIType<IU>(), diagincol); ColWorld.Bcast(const_cast<NUV*>(&x.num[0]), nnzx, MPIType<NUV>(), diagincol); // define a SPA-like data structure T_promote * localy = new T_promote[ysize]; bool * isthere = new bool[ysize]; vector<IU> nzinds; // nonzero indices fill_n(isthere, ysize, false); // serial SpMV with sparse vector vector< IU > indy; vector< T_promote > numy; dcsc_gespmv<SR>(*(A.spSeq), &x.ind[0], &x.num[0], nnzx, indy, numy); int proccols = x.commGrid->GetGridCols(); int * gsizes = new int[proccols]; // # of processor columns = number of processors in the RowWorld int mysize = indy.size(); RowWorld.Gather(&mysize, 1, MPI::INT, gsizes, 1, MPI::INT, diaginrow); int maxnnz = std::accumulate(gsizes, gsizes+proccols, 0); int * dpls = new int[proccols](); // displacements (zero initialized pid) std::partial_sum(gsizes, gsizes+proccols-1, dpls+1); IU * indbuf = new IU[maxnnz]; T_promote * numbuf = new T_promote[maxnnz]; // IntraComm::GatherV(sendbuf, int sentcnt, sendtype, recvbuf, int * recvcnts, int * displs, recvtype, root) RowWorld.Gatherv(&(indy[0]), mysize, MPIType<IU>(), indbuf, gsizes, dpls, MPIType<IU>(), diaginrow); RowWorld.Gatherv(&(numy[0]), mysize, MPIType<T_promote>(), numbuf, gsizes, dpls, MPIType<T_promote>(), diaginrow); for(int i=0; i< maxnnz; ++i) { if(!isthere[indbuf[i]]) { localy[indbuf[i]] = numbuf[i]; // initial assignment nzinds.push_back(indbuf[i]); isthere[indbuf[i]] = true; } else { localy[indbuf[i]] = SR::add(localy[indbuf[i]], numbuf[i]); } } DeleteAll(gsizes, dpls, indbuf, numbuf,isthere); sort(nzinds.begin(), nzinds.end()); int nnzy = nzinds.size(); y.ind.resize(nnzy); y.num.resize(nnzy); for(int i=0; i< nnzy; ++i) { y.ind[i] = nzinds[i]; y.num[i] = localy[nzinds[i]]; } y.length = ysize; delete [] localy; } else { IU nnzx; ColWorld.Bcast(&nnzx, 1, MPIType<IU>(), diagincol); IU * xinds = new IU[nnzx]; NUV * xnums = new NUV[nnzx]; ColWorld.Bcast(xinds, nnzx, MPIType<IU>(), diagincol); ColWorld.Bcast(xnums, nnzx, MPIType<NUV>(), diagincol); // serial SpMV with sparse vector vector< IU > indy; vector< T_promote > numy; dcsc_gespmv<SR>(*(A.spSeq), xinds, xnums, nnzx, indy, numy); int mysize = indy.size(); RowWorld.Gather(&mysize, 1, MPI::INT, NULL, 1, MPI::INT, diaginrow); // IntraComm::GatherV(sendbuf, int sentcnt, sendtype, recvbuf, int * recvcnts, int * displs, recvtype, root) RowWorld.Gatherv(&(indy[0]), mysize, MPIType<IU>(), NULL, NULL, NULL, MPIType<IU>(), diaginrow); RowWorld.Gatherv(&(numy[0]), mysize, MPIType<T_promote>(), NULL, NULL, NULL, MPIType<T_promote>(), diaginrow); delete [] xinds; delete [] xnums; } return y; } template <typename SR, typename IU, typename NUM, typename UDER> FullyDistSpVec<IU,typename promote_trait<NUM,IU>::T_promote> SpMV (const SpParMat<IU,NUM,UDER> & A, const FullyDistSpVec<IU,IU> & x, bool indexisvalue, OptBuf<IU, typename promote_trait<NUM,IU>::T_promote > & optbuf); template <typename SR, typename IU, typename NUM, typename UDER> FullyDistSpVec<IU,typename promote_trait<NUM,IU>::T_promote> SpMV (const SpParMat<IU,NUM,UDER> & A, const FullyDistSpVec<IU,IU> & x, bool indexisvalue) { typedef typename promote_trait<NUM,IU>::T_promote T_promote; OptBuf<IU, T_promote > optbuf = OptBuf<IU, T_promote >(); return SpMV<SR>(A, x, indexisvalue, optbuf); } //! The last parameter is a hint to the function //! If indexisvalues = true, then we do not need to transfer values for x //! This happens for BFS iterations with boolean matrices and integer rhs vectors template <typename SR, typename IU, typename NUM, typename UDER> FullyDistSpVec<IU,typename promote_trait<NUM,IU>::T_promote> SpMV (const SpParMat<IU,NUM,UDER> & A, const FullyDistSpVec<IU,IU> & x, bool indexisvalue, OptBuf<IU, typename promote_trait<NUM,IU>::T_promote > & optbuf) { typedef typename promote_trait<NUM,IU>::T_promote T_promote; IU ncolA = A.getncol(); if(ncolA != x.TotalLength()) { ostringstream outs; outs << "Can not multiply, dimensions does not match"<< endl; outs << ncolA << " != " << x.TotalLength() << endl; SpParHelper::Print(outs.str()); MPI::COMM_WORLD.Abort(DIMMISMATCH); } if(!(*A.commGrid == *x.commGrid)) { cout << "Grids are not comparable for SpMV" << endl; MPI::COMM_WORLD.Abort(GRIDMISMATCH); } MPI::Intracomm World = x.commGrid->GetWorld(); MPI::Intracomm ColWorld = x.commGrid->GetColWorld(); MPI::Intracomm RowWorld = x.commGrid->GetRowWorld(); IU xlocnz = x.getlocnnz(); IU roffst = x.RowLenUntil(); IU luntil = x.LengthUntil(); IU trxlocnz, roffset, lenuntil; int diagneigh = x.commGrid->GetComplementRank(); World.Sendrecv(&xlocnz, 1, MPIType<IU>(), diagneigh, TRNNZ, &trxlocnz, 1, MPIType<IU>(), diagneigh, TRNNZ); World.Sendrecv(&roffst, 1, MPIType<IU>(), diagneigh, TROST, &roffset, 1, MPIType<IU>(), diagneigh, TROST); World.Sendrecv(&luntil, 1, MPIType<IU>(), diagneigh, TRLUT, &lenuntil, 1, MPIType<IU>(), diagneigh, TRLUT); // ABAB: Important observation is that local indices (given by x.ind) is 32-bit addressible // Copy them to 32 bit integers and transfer that to save 50% of off-node bandwidth IU * trxinds = new IU[trxlocnz]; IU * trxnums; World.Sendrecv(const_cast<IU*>(&x.ind[0]), xlocnz, MPIType<IU>(), diagneigh, TRI, trxinds, trxlocnz, MPIType<IU>(), diagneigh, TRI); if(!indexisvalue) { trxnums = new IU[trxlocnz]; World.Sendrecv(const_cast<IU*>(&x.num[0]), xlocnz, MPIType<IU>(), diagneigh, TRX, trxnums, trxlocnz, MPIType<IU>(), diagneigh, TRX); } transform(trxinds, trxinds+trxlocnz, trxinds, bind2nd(plus<IU>(), roffset)); // fullydist indexing (p pieces) -> matrix indexing (sqrt(p) pieces) int colneighs = ColWorld.Get_size(); int colrank = ColWorld.Get_rank(); int * colnz = new int[colneighs]; colnz[colrank] = static_cast<int>(trxlocnz); ColWorld.Allgather(MPI::IN_PLACE, 1, MPI::INT, colnz, 1, MPI::INT); int * dpls = new int[colneighs](); // displacements (zero initialized pid) std::partial_sum(colnz, colnz+colneighs-1, dpls+1); int accnz = std::accumulate(colnz, colnz+colneighs, 0); IU * indacc = new IU[accnz]; IU * numacc = new IU[accnz]; // ABAB: Future issues here, colnz is of type int (MPI limitation) // What if the aggregate vector size along the processor row/column is not 32-bit addressible? // This will happen when n/sqrt(p) > 2^31 // Currently we can solve a small problem (scale 32) with 4096 processor // For a medium problem (scale 35), we'll need 32K processors which gives sqrt(p) ~ 180 // 2^35 / 180 ~ 2^29 / 3 which is not an issue ! #ifdef TIMING World.Barrier(); double t0=MPI::Wtime(); #endif ColWorld.Allgatherv(trxinds, trxlocnz, MPIType<IU>(), indacc, colnz, dpls, MPIType<IU>()); #ifdef TIMING World.Barrier(); double t1=MPI::Wtime(); cblas_allgathertime += (t1-t0); #endif delete [] trxinds; if(indexisvalue) { IU lenuntilcol; if(colrank == 0) { lenuntilcol = lenuntil; } ColWorld.Bcast(&lenuntilcol, 1, MPIType<IU>(), 0); transform(indacc, indacc+accnz, numacc, bind2nd(plus<IU>(), lenuntilcol)); // fill numerical values from indices } else { ColWorld.Allgatherv(trxnums, trxlocnz, MPIType<IU>(), numacc, colnz, dpls, MPIType<IU>()); delete [] trxnums; } DeleteAll(colnz,dpls); int rowneighs = RowWorld.Get_size(); int * sendcnt = new int[rowneighs](); FullyDistSpVec<IU, T_promote> y ( x.commGrid, A.getnrow()); // identity doesn't matter for sparse vectors IU * sendindbuf; T_promote * sendnumbuf; int * sdispls; if(optbuf.totmax > 0) // graph500 optimization enabled { if(A.spSeq->getnsplit() > 0) { SpParHelper::Print("Preallocated buffers can not be used with multithreaded code yet\n"); // sendindbuf/sendnumbuf/sdispls are all allocated and filled by dcsc_gespmv_threaded int totalsent = dcsc_gespmv_threaded<SR> (*(A.spSeq), indacc, numacc, static_cast<IU>(accnz), sendindbuf, sendnumbuf, sdispls, rowneighs); for(int i=0; i<rowneighs-1; ++i) sendcnt[i] = sdispls[i+1] + sdispls[i]; sendcnt[rowneighs-1] = totalsent - sdispls[rowneighs-1]; } else { dcsc_gespmv<SR> (*(A.spSeq), indacc, numacc, static_cast<IU>(accnz), optbuf.inds, optbuf.nums, sendcnt, optbuf.dspls, rowneighs); } DeleteAll(indacc,numacc); } else { if(A.spSeq->getnsplit() > 0) { // sendindbuf/sendnumbuf/sdispls are all allocated and filled by dcsc_gespmv_threaded int totalsent = dcsc_gespmv_threaded<SR> (*(A.spSeq), indacc, numacc, static_cast<IU>(accnz), sendindbuf, sendnumbuf, sdispls, rowneighs); DeleteAll(indacc, numacc); for(int i=0; i<rowneighs-1; ++i) sendcnt[i] = sdispls[i+1] - sdispls[i]; sendcnt[rowneighs-1] = totalsent - sdispls[rowneighs-1]; } else { // serial SpMV with sparse vector vector< IU > indy; vector< T_promote > numy; dcsc_gespmv<SR>(*(A.spSeq), indacc, numacc, static_cast<IU>(accnz), indy, numy); // actual multiplication DeleteAll(indacc, numacc); IU bufsize = indy.size(); // as compact as possible sendindbuf = new IU[bufsize]; sendnumbuf = new T_promote[bufsize]; IU perproc = A.getlocalrows() / static_cast<IU>(rowneighs); int k = 0; // index to buffer for(int i=0; i<rowneighs; ++i) { IU end_this = (i==rowneighs-1) ? A.getlocalrows(): (i+1)*perproc; while(k < bufsize && indy[k] < end_this) { sendindbuf[k] = indy[k] - i*perproc; sendnumbuf[k] = numy[k]; ++sendcnt[i]; ++k; } } sdispls = new int[rowneighs](); partial_sum(sendcnt, sendcnt+rowneighs-1, sdispls+1); } } int * rdispls = new int[rowneighs]; int * recvcnt = new int[rowneighs]; RowWorld.Alltoall(sendcnt, 1, MPI::INT, recvcnt, 1, MPI::INT); // share the request counts // receive displacements are exact whereas send displacements have slack rdispls[0] = 0; for(int i=0; i<rowneighs-1; ++i) { rdispls[i+1] = rdispls[i] + recvcnt[i]; } int totrecv = accumulate(recvcnt,recvcnt+rowneighs,0); IU * recvindbuf = new IU[totrecv]; T_promote * recvnumbuf = new T_promote[totrecv]; #ifdef TIMING World.Barrier(); double t2=MPI::Wtime(); #endif if(optbuf.totmax > 0 && A.spSeq->getnsplit() == 0) // graph500 optimization enabled { RowWorld.Alltoallv(optbuf.inds, sendcnt, optbuf.dspls, MPIType<IU>(), recvindbuf, recvcnt, rdispls, MPIType<IU>()); RowWorld.Alltoallv(optbuf.nums, sendcnt, optbuf.dspls, MPIType<T_promote>(), recvnumbuf, recvcnt, rdispls, MPIType<T_promote>()); // T_promote=NUM delete [] sendcnt; } else { /* ofstream oput; x.commGrid->OpenDebugFile("Send", oput); oput << "To displacements: "; copy(sdispls, sdispls+rowneighs, ostream_iterator<int>(oput, " ")); oput << endl; oput << "To counts: "; copy(sendcnt, sendcnt+rowneighs, ostream_iterator<int>(oput, " ")); oput << endl; for(int i=0; i< rowneighs; ++i) { oput << "To neighbor: " << i << endl; copy(sendindbuf+sdispls[i], sendindbuf+sdispls[i]+sendcnt[i], ostream_iterator<IU>(oput, " ")); oput << endl; copy(sendnumbuf+sdispls[i], sendnumbuf+sdispls[i]+sendcnt[i], ostream_iterator<T_promote>(oput, " ")); oput << endl; } oput.close(); */ RowWorld.Alltoallv(sendindbuf, sendcnt, sdispls, MPIType<IU>(), recvindbuf, recvcnt, rdispls, MPIType<IU>()); RowWorld.Alltoallv(sendnumbuf, sendcnt, sdispls, MPIType<T_promote>(), recvnumbuf, recvcnt, rdispls, MPIType<T_promote>()); DeleteAll(sendindbuf, sendnumbuf); DeleteAll(sendcnt, sdispls); } #ifdef TIMING World.Barrier(); double t3=MPI::Wtime(); cblas_alltoalltime += (t3-t2); #endif // ofstream output; // A.commGrid->OpenDebugFile("Recv", output); // copy(recvindbuf, recvindbuf+totrecv, ostream_iterator<IU>(output," ")); output << endl; // output.close(); #ifndef HEAPMERGE // Alternative 1: SPA-like data structure DeleteAll(recvcnt, rdispls); IU ysize = y.MyLocLength(); // my local length is only O(n/p) T_promote * localy = new T_promote[ysize]; bool * isthere = new bool[ysize]; vector<IU> nzinds; // nonzero indices fill_n(isthere, ysize, false); for(int i=0; i< totrecv; ++i) { IU topush = recvindbuf[i]; if(!isthere[topush]) { localy[topush] = recvnumbuf[i]; // initial assignment nzinds.push_back(topush); isthere[topush] = true; } else { localy[topush] = SR::add(localy[topush], recvnumbuf[i]); } } DeleteAll(isthere, recvindbuf, recvnumbuf); sort(nzinds.begin(), nzinds.end()); int nnzy = nzinds.size(); y.ind.resize(nnzy); y.num.resize(nnzy); for(int i=0; i< nnzy; ++i) { y.ind[i] = nzinds[i]; y.num[i] = localy[nzinds[i]]; } delete [] localy; #else // Alternative 2: Heap-merge IU hsize = 0; IU inf = numeric_limits<IU>::min(); IU sup = numeric_limits<IU>::max(); KNHeap< IU, IU > sHeap(sup, inf); int * processed = new int[rowneighs](); for(int i=0; i<rowneighs; ++i) { if(recvcnt[i] > 0) { // key, proc_id sHeap.insert(recvindbuf[rdispls[i]], i); ++hsize; } } IU key, locv; if(hsize > 0) { sHeap.deleteMin(&key, &locv); y.ind.push_back(key); y.num.push_back(recvnumbuf[rdispls[locv]]); // nothing is processed yet if( (++(processed[locv])) < recvcnt[locv] ) sHeap.insert(recvindbuf[rdispls[locv]+processed[locv]], locv); else --hsize; } while(hsize > 0) { sHeap.deleteMin(&key, &locv); IU deref = rdispls[locv] + processed[locv]; if(y.ind.back() == key) // y.ind is surely not empty { y.num.back() = SR::add(y.num.back(), recvnumbuf[deref]); // ABAB: Benchmark actually allows us to be non-deterministic in terms of parent selection // We can just skip this addition operator (if it's a max/min select) } else { y.ind.push_back(key); y.num.push_back(recvnumbuf[deref]); } if( (++(processed[locv])) < recvcnt[locv] ) sHeap.insert(recvindbuf[rdispls[locv]+processed[locv]], locv); else --hsize; } DeleteAll(recvcnt, rdispls,processed); DeleteAll(recvindbuf, recvnumbuf); #endif return y; } template <typename SR, typename IU, typename NUM, typename NUV, typename UDER> FullyDistVec<IU,typename promote_trait<NUM,NUV>::T_promote> SpMV (const SpParMat<IU,NUM,UDER> & A, const FullyDistVec<IU,NUV> & x ) { typedef typename promote_trait<NUM,NUV>::T_promote T_promote; IU ncolA = A.getncol(); if(ncolA != x.TotalLength()) { ostringstream outs; outs << "Can not multiply, dimensions does not match"<< endl; outs << ncolA << " != " << x.TotalLength() << endl; SpParHelper::Print(outs.str()); MPI::COMM_WORLD.Abort(DIMMISMATCH); } if(!(*A.commGrid == *x.commGrid)) { cout << "Grids are not comparable for SpMV" << endl; MPI::COMM_WORLD.Abort(GRIDMISMATCH); } MPI::Intracomm World = x.commGrid->GetWorld(); MPI::Intracomm ColWorld = x.commGrid->GetColWorld(); MPI::Intracomm RowWorld = x.commGrid->GetRowWorld(); int xsize = (int) x.LocArrSize(); int trxsize = 0; int diagneigh = x.commGrid->GetComplementRank(); World.Sendrecv(&xsize, 1, MPI::INT, diagneigh, TRX, &trxsize, 1, MPI::INT, diagneigh, TRX); NUV * trxnums = new NUV[trxsize]; World.Sendrecv(const_cast<NUV*>(&x.arr[0]), xsize, MPIType<NUV>(), diagneigh, TRX, trxnums, trxsize, MPIType<NUV>(), diagneigh, TRX); int colneighs = ColWorld.Get_size(); int colrank = ColWorld.Get_rank(); int * colsize = new int[colneighs]; colsize[colrank] = trxsize; ColWorld.Allgather(MPI::IN_PLACE, 1, MPI::INT, colsize, 1, MPI::INT); int * dpls = new int[colneighs](); // displacements (zero initialized pid) std::partial_sum(colsize, colsize+colneighs-1, dpls+1); int accsize = std::accumulate(colsize, colsize+colneighs, 0); NUV * numacc = new NUV[accsize]; ColWorld.Allgatherv(trxnums, trxsize, MPIType<NUV>(), numacc, colsize, dpls, MPIType<NUV>()); delete [] trxnums; // serial SpMV with dense vector T_promote id = SR::id(); IU ysize = A.getlocalrows(); T_promote * localy = new T_promote[ysize]; fill_n(localy, ysize, id); dcsc_gespmv<SR>(*(A.spSeq), numacc, localy); //ofstream oput; //A.commGrid->OpenDebugFile("localy", oput); //copy(localy, localy+ysize, ostream_iterator<T_promote>(oput, " ")); oput << endl; //oput.close(); DeleteAll(numacc,colsize, dpls); // FullyDistVec<IT,NT>(shared_ptr<CommGrid> grid, IT globallen, NT initval, NT id) FullyDistVec<IU, T_promote> y ( x.commGrid, A.getnrow(), id, id); IU yintlen = y.MyRowLength(); int rowneighs = RowWorld.Get_size(); int rowrank = RowWorld.Get_rank(); IU begptr, endptr; for(int i=0; i< rowneighs; ++i) { begptr = y.RowLenUntil(i); if(i == rowneighs-1) { endptr = ysize; } else { endptr = y.RowLenUntil(i+1); } // IntraComm::Reduce(sendbuf, recvbuf, count, type, op, root), recvbuf is irrelevant except root RowWorld.Reduce(localy+begptr, &(y.arr[0]), endptr-begptr, MPIType<T_promote>(), SR::mpi_op(), i); } delete [] localy; return y; } template <typename SR, typename IU, typename NUM, typename NUV, typename UDER> FullyDistSpVec<IU,typename promote_trait<NUM,NUV>::T_promote> SpMV (const SpParMat<IU,NUM,UDER> & A, const FullyDistSpVec<IU,NUV> & x) { typedef typename promote_trait<NUM,NUV>::T_promote T_promote; IU ncolA = A.getncol(); if(ncolA != x.TotalLength()) { ostringstream outs; outs << "Can not multiply, dimensions does not match"<< endl; outs << ncolA << " != " << x.TotalLength() << endl; SpParHelper::Print(outs.str()); MPI::COMM_WORLD.Abort(DIMMISMATCH); } if(!(*A.commGrid == *x.commGrid)) { cout << "Grids are not comparable for SpMV" << endl; MPI::COMM_WORLD.Abort(GRIDMISMATCH); } MPI::Intracomm World = x.commGrid->GetWorld(); MPI::Intracomm ColWorld = x.commGrid->GetColWorld(); MPI::Intracomm RowWorld = x.commGrid->GetRowWorld(); int xlocnz = (int) x.getlocnnz(); int trxlocnz = 0; int roffst = x.RowLenUntil(); int offset; int diagneigh = x.commGrid->GetComplementRank(); World.Sendrecv(&xlocnz, 1, MPI::INT, diagneigh, TRX, &trxlocnz, 1, MPI::INT, diagneigh, TRX); World.Sendrecv(&roffst, 1, MPI::INT, diagneigh, TROST, &offset, 1, MPI::INT, diagneigh, TROST); IU * trxinds = new IU[trxlocnz]; NUV * trxnums = new NUV[trxlocnz]; World.Sendrecv(const_cast<IU*>(&x.ind[0]), xlocnz, MPIType<IU>(), diagneigh, TRX, trxinds, trxlocnz, MPIType<IU>(), diagneigh, TRX); World.Sendrecv(const_cast<NUV*>(&x.num[0]), xlocnz, MPIType<NUV>(), diagneigh, TRX, trxnums, trxlocnz, MPIType<NUV>(), diagneigh, TRX); transform(trxinds, trxinds+trxlocnz, trxinds, bind2nd(plus<IU>(), offset)); // fullydist indexing (n pieces) -> matrix indexing (sqrt(p) pieces) int colneighs = ColWorld.Get_size(); int colrank = ColWorld.Get_rank(); int * colnz = new int[colneighs]; colnz[colrank] = trxlocnz; ColWorld.Allgather(MPI::IN_PLACE, 1, MPI::INT, colnz, 1, MPI::INT); int * dpls = new int[colneighs](); // displacements (zero initialized pid) std::partial_sum(colnz, colnz+colneighs-1, dpls+1); int accnz = std::accumulate(colnz, colnz+colneighs, 0); IU * indacc = new IU[accnz]; NUV * numacc = new NUV[accnz]; // ABAB: Future issues here, colnz is of type int (MPI limitation) // What if the aggregate vector size along the processor row/column is not 32-bit addressible? ColWorld.Allgatherv(trxinds, trxlocnz, MPIType<IU>(), indacc, colnz, dpls, MPIType<IU>()); ColWorld.Allgatherv(trxnums, trxlocnz, MPIType<NUV>(), numacc, colnz, dpls, MPIType<NUV>()); DeleteAll(trxinds, trxnums); // serial SpMV with sparse vector vector< IU > indy; vector< T_promote > numy; dcsc_gespmv<SR>(*(A.spSeq), indacc, numacc, static_cast<IU>(accnz), indy, numy); // actual multiplication DeleteAll(indacc, numacc); DeleteAll(colnz, dpls); FullyDistSpVec<IU, T_promote> y ( x.commGrid, A.getnrow()); // identity doesn't matter for sparse vectors IU yintlen = y.MyRowLength(); int rowneighs = RowWorld.Get_size(); vector< vector<IU> > sendind(rowneighs); vector< vector<T_promote> > sendnum(rowneighs); typename vector<IU>::size_type outnz = indy.size(); for(typename vector<IU>::size_type i=0; i< outnz; ++i) { IU locind; int rown = y.OwnerWithinRow(yintlen, indy[i], locind); sendind[rown].push_back(locind); sendnum[rown].push_back(numy[i]); } IU * sendindbuf = new IU[outnz]; T_promote * sendnumbuf = new T_promote[outnz]; int * sendcnt = new int[rowneighs]; int * sdispls = new int[rowneighs]; for(int i=0; i<rowneighs; ++i) sendcnt[i] = sendind[i].size(); int * rdispls = new int[rowneighs]; int * recvcnt = new int[rowneighs]; RowWorld.Alltoall(sendcnt, 1, MPI::INT, recvcnt, 1, MPI::INT); // share the request counts sdispls[0] = 0; rdispls[0] = 0; for(int i=0; i<rowneighs-1; ++i) { sdispls[i+1] = sdispls[i] + sendcnt[i]; rdispls[i+1] = rdispls[i] + recvcnt[i]; } int totrecv = accumulate(recvcnt,recvcnt+rowneighs,0); IU * recvindbuf = new IU[totrecv]; T_promote * recvnumbuf = new T_promote[totrecv]; for(int i=0; i<rowneighs; ++i) { copy(sendind[i].begin(), sendind[i].end(), sendindbuf+sdispls[i]); vector<IU>().swap(sendind[i]); } for(int i=0; i<rowneighs; ++i) { copy(sendnum[i].begin(), sendnum[i].end(), sendnumbuf+sdispls[i]); vector<T_promote>().swap(sendnum[i]); } RowWorld.Alltoallv(sendindbuf, sendcnt, sdispls, MPIType<IU>(), recvindbuf, recvcnt, rdispls, MPIType<IU>()); RowWorld.Alltoallv(sendnumbuf, sendcnt, sdispls, MPIType<T_promote>(), recvnumbuf, recvcnt, rdispls, MPIType<T_promote>()); DeleteAll(sendindbuf, sendnumbuf); DeleteAll(sendcnt, recvcnt, sdispls, rdispls); // define a SPA-like data structure IU ysize = y.MyLocLength(); T_promote * localy = new T_promote[ysize]; bool * isthere = new bool[ysize]; vector<IU> nzinds; // nonzero indices fill_n(isthere, ysize, false); for(int i=0; i< totrecv; ++i) { if(!isthere[recvindbuf[i]]) { localy[recvindbuf[i]] = recvnumbuf[i]; // initial assignment nzinds.push_back(recvindbuf[i]); isthere[recvindbuf[i]] = true; } else { localy[recvindbuf[i]] = SR::add(localy[recvindbuf[i]], recvnumbuf[i]); } } DeleteAll(isthere, recvindbuf, recvnumbuf); sort(nzinds.begin(), nzinds.end()); int nnzy = nzinds.size(); y.ind.resize(nnzy); y.num.resize(nnzy); for(int i=0; i< nnzy; ++i) { y.ind[i] = nzinds[i]; y.num[i] = localy[nzinds[i]]; } delete [] localy; return y; } ///////////////////// // Apply // based on SpMV template <typename _BinaryOperation, typename IU, typename NUM, typename NUV, typename UDER> void ColWiseApply (const SpParMat<IU,NUM,UDER> & A, const FullyDistSpVec<IU,NUV> & x, _BinaryOperation __binary_op) { if(!(*A.commGrid == *x.commGrid)) { cout << "Grids are not comparable for ColWiseApply" << endl; MPI::COMM_WORLD.Abort(GRIDMISMATCH); } MPI::Intracomm World = x.commGrid->GetWorld(); MPI::Intracomm ColWorld = x.commGrid->GetColWorld(); MPI::Intracomm RowWorld = x.commGrid->GetRowWorld(); int xlocnz = (int) x.getlocnnz(); int trxlocnz = 0; int roffst = x.RowLenUntil(); int offset; int diagneigh = x.commGrid->GetComplementRank(); World.Sendrecv(&xlocnz, 1, MPI::INT, diagneigh, TRX, &trxlocnz, 1, MPI::INT, diagneigh, TRX); World.Sendrecv(&roffst, 1, MPI::INT, diagneigh, TROST, &offset, 1, MPI::INT, diagneigh, TROST); IU * trxinds = new IU[trxlocnz]; NUV * trxnums = new NUV[trxlocnz]; World.Sendrecv(const_cast<IU*>(&x.ind[0]), xlocnz, MPIType<IU>(), diagneigh, TRX, trxinds, trxlocnz, MPIType<IU>(), diagneigh, TRX); World.Sendrecv(const_cast<NUV*>(&x.num[0]), xlocnz, MPIType<NUV>(), diagneigh, TRX, trxnums, trxlocnz, MPIType<NUV>(), diagneigh, TRX); transform(trxinds, trxinds+trxlocnz, trxinds, bind2nd(plus<IU>(), offset)); // fullydist indexing (n pieces) -> matrix indexing (sqrt(p) pieces) int colneighs = ColWorld.Get_size(); int colrank = ColWorld.Get_rank(); int * colnz = new int[colneighs]; colnz[colrank] = trxlocnz; ColWorld.Allgather(MPI::IN_PLACE, 1, MPI::INT, colnz, 1, MPI::INT); int * dpls = new int[colneighs](); // displacements (zero initialized pid) std::partial_sum(colnz, colnz+colneighs-1, dpls+1); int accnz = std::accumulate(colnz, colnz+colneighs, 0); IU * indacc = new IU[accnz]; NUV * numacc = new NUV[accnz]; // ABAB: Future issues here, colnz is of type int (MPI limitation) // What if the aggregate vector size along the processor row/column is not 32-bit addressible? ColWorld.Allgatherv(trxinds, trxlocnz, MPIType<IU>(), indacc, colnz, dpls, MPIType<IU>()); ColWorld.Allgatherv(trxnums, trxlocnz, MPIType<NUV>(), numacc, colnz, dpls, MPIType<NUV>()); DeleteAll(trxinds, trxnums); // serial SpMV with sparse vector //dcsc_gespmv<SR>(*(A.spSeq), indacc, numacc, static_cast<IU>(accnz), indy, numy); // actual multiplication dcsc_colwise_apply(*(A.spSeq), indacc, numacc, static_cast<IU>(accnz), __binary_op); // actual operation DeleteAll(indacc, numacc); DeleteAll(colnz, dpls); } ///////////////////// template <typename IU, typename NU1, typename NU2, typename UDERA, typename UDERB> SpParMat<IU,typename promote_trait<NU1,NU2>::T_promote,typename promote_trait<UDERA,UDERB>::T_promote> EWiseMult (const SpParMat<IU,NU1,UDERA> & A, const SpParMat<IU,NU2,UDERB> & B , bool exclude) { typedef typename promote_trait<NU1,NU2>::T_promote N_promote; typedef typename promote_trait<UDERA,UDERB>::T_promote DER_promote; if(*(A.commGrid) == *(B.commGrid)) { DER_promote * result = new DER_promote( EWiseMult(*(A.spSeq),*(B.spSeq),exclude) ); return SpParMat<IU, N_promote, DER_promote> (result, A.commGrid); } else { cout << "Grids are not comparable elementwise multiplication" << endl; MPI::COMM_WORLD.Abort(GRIDMISMATCH); return SpParMat< IU,N_promote,DER_promote >(); } } template <typename IU, typename NU1, typename NU2, typename UDERA, typename UDERB, typename _BinaryOperation> SpParMat<IU,typename promote_trait<NU1,NU2>::T_promote,typename promote_trait<UDERA,UDERB>::T_promote> EWiseApply (const SpParMat<IU,NU1,UDERA> & A, const SpParMat<IU,NU2,UDERB> & B, _BinaryOperation __binary_op, bool notB, const NU2& defaultBVal) { typedef typename promote_trait<NU1,NU2>::T_promote N_promote; typedef typename promote_trait<UDERA,UDERB>::T_promote DER_promote; if(*(A.commGrid) == *(B.commGrid)) { DER_promote * result = new DER_promote( EWiseApply(*(A.spSeq),*(B.spSeq), __binary_op, notB, defaultBVal) ); return SpParMat<IU, N_promote, DER_promote> (result, A.commGrid); } else { cout << "Grids are not comparable elementwise apply" << endl; MPI::COMM_WORLD.Abort(GRIDMISMATCH); return SpParMat< IU,N_promote,DER_promote >(); } } /** * if exclude is true, then we prune all entries W[i] != zero from V * if exclude is false, then we perform a proper elementwise multiplication **/ template <typename IU, typename NU1, typename NU2> SpParVec<IU,typename promote_trait<NU1,NU2>::T_promote> EWiseMult (const SpParVec<IU,NU1> & V, const DenseParVec<IU,NU2> & W , bool exclude, NU2 zero) { typedef typename promote_trait<NU1,NU2>::T_promote T_promote; if(*(V.commGrid) == *(W.commGrid)) { SpParVec< IU, T_promote> Product(V.commGrid); Product.length = V.length; if(Product.diagonal) { if(exclude) { IU size= V.ind.size(); for(IU i=0; i<size; ++i) { if(W.arr.size() <= V.ind[i] || W.arr[V.ind[i]] == zero) // keep only those { Product.ind.push_back(V.ind[i]); Product.num.push_back(V.num[i]); } } } else { IU size= V.ind.size(); for(IU i=0; i<size; ++i) { if(W.arr.size() > V.ind[i] && W.arr[V.ind[i]] != zero) // keep only those { Product.ind.push_back(V.ind[i]); Product.num.push_back(V.num[i] * W.arr[V.ind[i]]); } } } } return Product; } else { cout << "Grids are not comparable elementwise multiplication" << endl; MPI::COMM_WORLD.Abort(GRIDMISMATCH); return SpParVec< IU,T_promote>(); } } /** * if exclude is true, then we prune all entries W[i] != zero from V * if exclude is false, then we perform a proper elementwise multiplication **/ template <typename IU, typename NU1, typename NU2> FullyDistSpVec<IU,typename promote_trait<NU1,NU2>::T_promote> EWiseMult (const FullyDistSpVec<IU,NU1> & V, const FullyDistVec<IU,NU2> & W , bool exclude, NU2 zero) { typedef typename promote_trait<NU1,NU2>::T_promote T_promote; if(*(V.commGrid) == *(W.commGrid)) { FullyDistSpVec< IU, T_promote> Product(V.commGrid); if(V.glen != W.glen) { cerr << "Vector dimensions don't match for EWiseMult\n"; MPI::COMM_WORLD.Abort(DIMMISMATCH); } else { Product.glen = V.glen; IU size= V.getlocnnz(); if(exclude) { #if defined(_OPENMP) && defined(CBLAS_EXPERIMENTAL) // not faster than serial int actual_splits = cblas_splits * 1; // 1 is the parallel slackness vector <IU> tlosizes (actual_splits, 0); vector < vector<IU> > tlinds(actual_splits); vector < vector<T_promote> > tlnums(actual_splits); IU tlsize = size / actual_splits; #pragma omp parallel for //schedule(dynamic, 1) for(IU t = 0; t < actual_splits; ++t) { IU tlbegin = t*tlsize; IU tlend = (t==actual_splits-1)? size : (t+1)*tlsize; for(IU i=tlbegin; i<tlend; ++i) { if(W.arr[V.ind[i]] == zero) // keep only those { tlinds[t].push_back(V.ind[i]); tlnums[t].push_back(V.num[i]); tlosizes[t]++; } } } vector<IU> prefix_sum(actual_splits+1,0); partial_sum(tlosizes.begin(), tlosizes.end(), prefix_sum.begin()+1); Product.ind.resize(prefix_sum[actual_splits]); Product.num.resize(prefix_sum[actual_splits]); #pragma omp parallel for //schedule(dynamic, 1) for(IU t=0; t< actual_splits; ++t) { copy(tlinds[t].begin(), tlinds[t].end(), Product.ind.begin()+prefix_sum[t]); copy(tlnums[t].begin(), tlnums[t].end(), Product.num.begin()+prefix_sum[t]); } #else for(IU i=0; i<size; ++i) { if(W.arr[V.ind[i]] == zero) // keep only those { Product.ind.push_back(V.ind[i]); Product.num.push_back(V.num[i]); } } #endif } else { for(IU i=0; i<size; ++i) { if(W.arr[V.ind[i]] != zero) // keep only those { Product.ind.push_back(V.ind[i]); Product.num.push_back(V.num[i] * W.arr[V.ind[i]]); } } } } return Product; } else { cout << "Grids are not comparable elementwise multiplication" << endl; MPI::COMM_WORLD.Abort(GRIDMISMATCH); return FullyDistSpVec< IU,T_promote>(); } } template <typename IU, typename NU1, typename NU2, typename _BinaryOperation> FullyDistSpVec<IU,typename promote_trait<NU1,NU2>::T_promote> EWiseApply (const FullyDistSpVec<IU,NU1> & V, const FullyDistVec<IU,NU2> & W , _BinaryOperation _binary_op, typename promote_trait<NU1,NU2>::T_promote zero) { typedef typename promote_trait<NU1,NU2>::T_promote T_promote; if(*(V.commGrid) == *(W.commGrid)) { //V.zero = zero; //W.zero = zero; FullyDistSpVec< IU, T_promote> Product(V.commGrid); Product.zero = zero; FullyDistVec< IU, NU1> DV (V); if(V.glen != W.glen) { cerr << "Vector dimensions don't match for EWiseMult\n"; MPI::COMM_WORLD.Abort(DIMMISMATCH); } else { Product.glen = V.glen; Product.zero = zero; IU size= W.LocArrSize(); IU sp_iter = 0; for(IU i=0; i<size; ++i) { T_promote pro; if(V.ind[sp_iter] == i) { pro = _binary_op(V.num[i], W.arr[i]); sp_iter++; } else { pro = _binary_op(zero, W.arr[i]); } if ( pro != zero) // keep only those { Product.ind.push_back(i); Product.num.push_back(pro); } } } return Product; } else { cout << "Grids are not comparable elementwise multiplication" << endl; MPI::COMM_WORLD.Abort(GRIDMISMATCH); return FullyDistSpVec< IU,T_promote>(); } } #endif
convolution_3x3_pack1to4_bf16s.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void conv3x3s1_pack1to4_bf16s_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt) { int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; #if __ARM_NEON && __aarch64__ Mat top_blob_fp32(outw, outh, opt.num_threads, (size_t)4u * 4 * 2, 4 * 2, opt.workspace_allocator); #else Mat top_blob_fp32(outw, outh, opt.num_threads, (size_t)4u * 4, 4, opt.workspace_allocator); #endif const float* bias = _bias; int remain_outch_start = 0; #if __ARM_NEON && __aarch64__ int nn_outch = 0; nn_outch = outch >> 1; remain_outch_start = nn_outch << 1; #pragma omp parallel for num_threads(opt.num_threads) for (int pp = 0; pp < nn_outch; pp++) { int p = pp * 2; Mat out0 = top_blob_fp32.channel(get_omp_thread_num()); float32x4_t _bias0 = bias ? vld1q_f32((const float*)bias + p * 4) : vdupq_n_f32(0.f); float32x4_t _bias1 = bias ? vld1q_f32((const float*)bias + (p + 1) * 4) : vdupq_n_f32(0.f); { float* ptr = (float*)out0; for (int i = 0; i < outh; i++) { int j = 0; for (; j + 3 < outw; j += 4) { vst1q_f32(ptr, _bias0); vst1q_f32(ptr + 4, _bias0); vst1q_f32(ptr + 8, _bias0); vst1q_f32(ptr + 12, _bias0); vst1q_f32(ptr + 16, _bias1); vst1q_f32(ptr + 20, _bias1); vst1q_f32(ptr + 24, _bias1); vst1q_f32(ptr + 28, _bias1); ptr += 32; } for (; j + 1 < outw; j += 2) { vst1q_f32(ptr, _bias0); vst1q_f32(ptr + 4, _bias0); vst1q_f32(ptr + 8, _bias1); vst1q_f32(ptr + 12, _bias1); ptr += 16; } for (; j < outw; j++) { vst1q_f32(ptr, _bias0); vst1q_f32(ptr + 4, _bias1); ptr += 8; } } } const unsigned short* k0 = kernel.channel(p); const unsigned short* k1 = kernel.channel(p + 1); int q = 0; for (; q < inch - 1; q++) { float* outptr0 = out0; const Mat img0 = bottom_blob.channel(q); const unsigned short* r0 = img0.row<const unsigned short>(0); const unsigned short* r1 = img0.row<const unsigned short>(1); const unsigned short* r2 = img0.row<const unsigned short>(2); float32x4_t _k00_0 = vcvt_f32_bf16(vld1_u16(k0)); float32x4_t _k01_0 = vcvt_f32_bf16(vld1_u16(k0 + 4)); float32x4_t _k02_0 = vcvt_f32_bf16(vld1_u16(k0 + 8)); float32x4_t _k10_0 = vcvt_f32_bf16(vld1_u16(k0 + 12)); float32x4_t _k11_0 = vcvt_f32_bf16(vld1_u16(k0 + 16)); float32x4_t _k12_0 = vcvt_f32_bf16(vld1_u16(k0 + 20)); float32x4_t _k20_0 = vcvt_f32_bf16(vld1_u16(k0 + 24)); float32x4_t _k21_0 = vcvt_f32_bf16(vld1_u16(k0 + 28)); float32x4_t _k22_0 = vcvt_f32_bf16(vld1_u16(k0 + 32)); float32x4_t _k00_1 = vcvt_f32_bf16(vld1_u16(k1)); float32x4_t _k01_1 = vcvt_f32_bf16(vld1_u16(k1 + 4)); float32x4_t _k02_1 = vcvt_f32_bf16(vld1_u16(k1 + 8)); float32x4_t _k10_1 = vcvt_f32_bf16(vld1_u16(k1 + 12)); float32x4_t _k11_1 = vcvt_f32_bf16(vld1_u16(k1 + 16)); float32x4_t _k12_1 = vcvt_f32_bf16(vld1_u16(k1 + 20)); float32x4_t _k20_1 = vcvt_f32_bf16(vld1_u16(k1 + 24)); float32x4_t _k21_1 = vcvt_f32_bf16(vld1_u16(k1 + 28)); float32x4_t _k22_1 = vcvt_f32_bf16(vld1_u16(k1 + 32)); int i = 0; for (; i < outh; i++) { int j = 0; for (; j + 3 < outw; j += 4) { asm volatile( "prfm pldl1keep, [%1, #64] \n" "ld1 {v0.4h}, [%1], #8 \n" "ld1 {v1.s}[0], [%1] \n" "prfm pldl1keep, [%0, #512] \n" "ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%0], #64 \n" "shll v0.4s, v0.4h, #16 \n" "shll v1.4s, v1.4h, #16 \n" "prfm pldl1keep, [%0, #512] \n" "ld1 {v28.4s, v29.4s, v30.4s, v31.4s}, [%0] \n" "fmla v24.4s, %8.4s, v0.s[0] \n" "fmla v25.4s, %8.4s, v0.s[1] \n" "fmla v26.4s, %8.4s, v0.s[2] \n" "fmla v27.4s, %8.4s, v0.s[3] \n" "fmla v28.4s, %17.4s, v0.s[0] \n" "fmla v29.4s, %17.4s, v0.s[1] \n" "fmla v30.4s, %17.4s, v0.s[2] \n" "fmla v31.4s, %17.4s, v0.s[3] \n" "prfm pldl1keep, [%2, #64] \n" "ld1 {v2.4h}, [%2], #8 \n" "ld1 {v3.s}[0], [%2] \n" "fmla v24.4s, %9.4s, v0.s[1] \n" "fmla v25.4s, %9.4s, v0.s[2] \n" "fmla v26.4s, %9.4s, v0.s[3] \n" "fmla v27.4s, %9.4s, v1.s[0] \n" "fmla v28.4s, %18.4s, v0.s[1] \n" "fmla v29.4s, %18.4s, v0.s[2] \n" "fmla v30.4s, %18.4s, v0.s[3] \n" "fmla v31.4s, %18.4s, v1.s[0] \n" "shll v2.4s, v2.4h, #16 \n" "shll v3.4s, v3.4h, #16 \n" "fmla v24.4s, %10.4s, v0.s[2] \n" "fmla v25.4s, %10.4s, v0.s[3] \n" "fmla v26.4s, %10.4s, v1.s[0] \n" "fmla v27.4s, %10.4s, v1.s[1] \n" "fmla v28.4s, %19.4s, v0.s[2] \n" "fmla v29.4s, %19.4s, v0.s[3] \n" "fmla v30.4s, %19.4s, v1.s[0] \n" "fmla v31.4s, %19.4s, v1.s[1] \n" "fmla v24.4s, %11.4s, v2.s[0] \n" "fmla v25.4s, %11.4s, v2.s[1] \n" "fmla v26.4s, %11.4s, v2.s[2] \n" "fmla v27.4s, %11.4s, v2.s[3] \n" "fmla v28.4s, %20.4s, v2.s[0] \n" "fmla v29.4s, %20.4s, v2.s[1] \n" "fmla v30.4s, %20.4s, v2.s[2] \n" "fmla v31.4s, %20.4s, v2.s[3] \n" "prfm pldl1keep, [%3, #64] \n" "ld1 {v0.4h}, [%3], #8 \n" "ld1 {v1.s}[0], [%3] \n" "fmla v24.4s, %12.4s, v2.s[1] \n" "fmla v25.4s, %12.4s, v2.s[2] \n" "fmla v26.4s, %12.4s, v2.s[3] \n" "fmla v27.4s, %12.4s, v3.s[0] \n" "fmla v28.4s, %21.4s, v2.s[1] \n" "fmla v29.4s, %21.4s, v2.s[2] \n" "fmla v30.4s, %21.4s, v2.s[3] \n" "fmla v31.4s, %21.4s, v3.s[0] \n" "shll v0.4s, v0.4h, #16 \n" "shll v1.4s, v1.4h, #16 \n" "fmla v24.4s, %13.4s, v2.s[2] \n" "fmla v25.4s, %13.4s, v2.s[3] \n" "fmla v26.4s, %13.4s, v3.s[0] \n" "fmla v27.4s, %13.4s, v3.s[1] \n" "fmla v28.4s, %22.4s, v2.s[2] \n" "fmla v29.4s, %22.4s, v2.s[3] \n" "fmla v30.4s, %22.4s, v3.s[0] \n" "fmla v31.4s, %22.4s, v3.s[1] \n" "fmla v24.4s, %14.4s, v0.s[0] \n" "fmla v25.4s, %14.4s, v0.s[1] \n" "fmla v26.4s, %14.4s, v0.s[2] \n" "fmla v27.4s, %14.4s, v0.s[3] \n" "fmla v28.4s, %23.4s, v0.s[0] \n" "fmla v29.4s, %23.4s, v0.s[1] \n" "fmla v30.4s, %23.4s, v0.s[2] \n" "fmla v31.4s, %23.4s, v0.s[3] \n" "fmla v24.4s, %15.4s, v0.s[1] \n" "fmla v25.4s, %15.4s, v0.s[2] \n" "fmla v26.4s, %15.4s, v0.s[3] \n" "fmla v27.4s, %15.4s, v1.s[0] \n" "fmla v28.4s, %24.4s, v0.s[1] \n" "fmla v29.4s, %24.4s, v0.s[2] \n" "fmla v30.4s, %24.4s, v0.s[3] \n" "fmla v31.4s, %24.4s, v1.s[0] \n" "sub %0, %0, #64 \n" "fmla v24.4s, %16.4s, v0.s[2] \n" "fmla v25.4s, %16.4s, v0.s[3] \n" "fmla v26.4s, %16.4s, v1.s[0] \n" "fmla v27.4s, %16.4s, v1.s[1] \n" "fmla v28.4s, %25.4s, v0.s[2] \n" "fmla v29.4s, %25.4s, v0.s[3] \n" "fmla v30.4s, %25.4s, v1.s[0] \n" "fmla v31.4s, %25.4s, v1.s[1] \n" "st1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%0], #64 \n" "st1 {v28.4s, v29.4s, v30.4s, v31.4s}, [%0], #64 \n" : "=r"(outptr0), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2) // %3 : "0"(outptr0), "1"(r0), "2"(r1), "3"(r2), "w"(_k00_0), // %8 "w"(_k01_0), // %9 "w"(_k02_0), // %10 "w"(_k10_0), // %11 "w"(_k11_0), // %12 "w"(_k12_0), // %13 "w"(_k20_0), // %14 "w"(_k21_0), // %15 "w"(_k22_0), // %16 "w"(_k00_1), // %17 "w"(_k01_1), // %18 "w"(_k02_1), // %19 "w"(_k10_1), // %20 "w"(_k11_1), // %21 "w"(_k12_1), // %22 "w"(_k20_1), // %23 "w"(_k21_1), // %24 "w"(_k22_1) // %25 : "memory", "v0", "v1", "v2", "v3", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); } for (; j + 1 < outw; j += 2) { asm volatile( "prfm pldl1keep, [%1, #64] \n" "ld1 {v0.4h}, [%1] \n" "prfm pldl1keep, [%0, #512] \n" "ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%0] \n" "shll v0.4s, v0.4h, #16 \n" "fmla v24.4s, %8.4s, v0.s[0] \n" "fmla v25.4s, %8.4s, v0.s[1] \n" "fmla v26.4s, %17.4s, v0.s[0] \n" "fmla v27.4s, %17.4s, v0.s[1] \n" "prfm pldl1keep, [%2, #64] \n" "ld1 {v1.4h}, [%2] \n" "fmla v24.4s, %9.4s, v0.s[1] \n" "fmla v25.4s, %9.4s, v0.s[2] \n" "fmla v26.4s, %18.4s, v0.s[1] \n" "fmla v27.4s, %18.4s, v0.s[2] \n" "shll v1.4s, v1.4h, #16 \n" "fmla v24.4s, %10.4s, v0.s[2] \n" "fmla v25.4s, %10.4s, v0.s[3] \n" "fmla v26.4s, %19.4s, v0.s[2] \n" "fmla v27.4s, %19.4s, v0.s[3] \n" "fmla v24.4s, %11.4s, v1.s[0] \n" "fmla v25.4s, %11.4s, v1.s[1] \n" "fmla v26.4s, %20.4s, v1.s[0] \n" "fmla v27.4s, %20.4s, v1.s[1] \n" "prfm pldl1keep, [%3, #64] \n" "ld1 {v0.4h}, [%3] \n" "fmla v24.4s, %12.4s, v1.s[1] \n" "fmla v25.4s, %12.4s, v1.s[2] \n" "fmla v26.4s, %21.4s, v1.s[1] \n" "fmla v27.4s, %21.4s, v1.s[2] \n" "shll v0.4s, v0.4h, #16 \n" "fmla v24.4s, %13.4s, v1.s[2] \n" "fmla v25.4s, %13.4s, v1.s[3] \n" "fmla v26.4s, %22.4s, v1.s[2] \n" "fmla v27.4s, %22.4s, v1.s[3] \n" "fmla v24.4s, %14.4s, v0.s[0] \n" "fmla v25.4s, %14.4s, v0.s[1] \n" "fmla v26.4s, %23.4s, v0.s[0] \n" "fmla v27.4s, %23.4s, v0.s[1] \n" "add %1, %1, #4 \n" "fmla v24.4s, %15.4s, v0.s[1] \n" "fmla v25.4s, %15.4s, v0.s[2] \n" "fmla v26.4s, %24.4s, v0.s[1] \n" "fmla v27.4s, %24.4s, v0.s[2] \n" "add %2, %2, #4 \n" "fmla v24.4s, %16.4s, v0.s[2] \n" "fmla v25.4s, %16.4s, v0.s[3] \n" "fmla v26.4s, %25.4s, v0.s[2] \n" "fmla v27.4s, %25.4s, v0.s[3] \n" "add %3, %3, #4 \n" "st1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%0], #64 \n" : "=r"(outptr0), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2) // %3 : "0"(outptr0), "1"(r0), "2"(r1), "3"(r2), "w"(_k00_0), // %8 "w"(_k01_0), // %9 "w"(_k02_0), // %10 "w"(_k10_0), // %11 "w"(_k11_0), // %12 "w"(_k12_0), // %13 "w"(_k20_0), // %14 "w"(_k21_0), // %15 "w"(_k22_0), // %16 "w"(_k00_1), // %17 "w"(_k01_1), // %18 "w"(_k02_1), // %19 "w"(_k10_1), // %20 "w"(_k11_1), // %21 "w"(_k12_1), // %22 "w"(_k20_1), // %23 "w"(_k21_1), // %24 "w"(_k22_1) // %25 : "memory", "v0", "v1", "v24", "v25", "v26", "v27"); } for (; j < outw; j++) { float32x4_t _sum00 = vld1q_f32(outptr0); float32x4_t _sum10 = vld1q_f32(outptr0 + 4); float32x4_t _r0 = vcvt_f32_bf16(vld1_u16(r0)); float32x4_t _r1 = vcvt_f32_bf16(vld1_u16(r1)); float32x4_t _r2 = vcvt_f32_bf16(vld1_u16(r2)); _sum00 = vfmaq_laneq_f32(_sum00, _k00_0, _r0, 0); _sum00 = vfmaq_laneq_f32(_sum00, _k01_0, _r0, 1); _sum00 = vfmaq_laneq_f32(_sum00, _k02_0, _r0, 2); _sum00 = vfmaq_laneq_f32(_sum00, _k10_0, _r1, 0); _sum00 = vfmaq_laneq_f32(_sum00, _k11_0, _r1, 1); _sum00 = vfmaq_laneq_f32(_sum00, _k12_0, _r1, 2); _sum00 = vfmaq_laneq_f32(_sum00, _k20_0, _r2, 0); _sum00 = vfmaq_laneq_f32(_sum00, _k21_0, _r2, 1); _sum00 = vfmaq_laneq_f32(_sum00, _k22_0, _r2, 2); _sum10 = vfmaq_laneq_f32(_sum10, _k00_1, _r0, 0); _sum10 = vfmaq_laneq_f32(_sum10, _k01_1, _r0, 1); _sum10 = vfmaq_laneq_f32(_sum10, _k02_1, _r0, 2); _sum10 = vfmaq_laneq_f32(_sum10, _k10_1, _r1, 0); _sum10 = vfmaq_laneq_f32(_sum10, _k11_1, _r1, 1); _sum10 = vfmaq_laneq_f32(_sum10, _k12_1, _r1, 2); _sum10 = vfmaq_laneq_f32(_sum10, _k20_1, _r2, 0); _sum10 = vfmaq_laneq_f32(_sum10, _k21_1, _r2, 1); _sum10 = vfmaq_laneq_f32(_sum10, _k22_1, _r2, 2); vst1q_f32(outptr0, _sum00); vst1q_f32(outptr0 + 4, _sum10); r0 += 1; r1 += 1; r2 += 1; outptr0 += 8; } r0 += 2; r1 += 2; r2 += 2; } k0 += 9 * 4; k1 += 9 * 4; } for (; q < inch; q++) { unsigned short* outptr0_bf16 = top_blob.channel(p); unsigned short* outptr1_bf16 = top_blob.channel(p + 1); const float* outptr0 = out0; const Mat img0 = bottom_blob.channel(q); const unsigned short* r0 = img0.row<const unsigned short>(0); const unsigned short* r1 = img0.row<const unsigned short>(1); const unsigned short* r2 = img0.row<const unsigned short>(2); float32x4_t _k00_0 = vcvt_f32_bf16(vld1_u16(k0)); float32x4_t _k01_0 = vcvt_f32_bf16(vld1_u16(k0 + 4)); float32x4_t _k02_0 = vcvt_f32_bf16(vld1_u16(k0 + 8)); float32x4_t _k10_0 = vcvt_f32_bf16(vld1_u16(k0 + 12)); float32x4_t _k11_0 = vcvt_f32_bf16(vld1_u16(k0 + 16)); float32x4_t _k12_0 = vcvt_f32_bf16(vld1_u16(k0 + 20)); float32x4_t _k20_0 = vcvt_f32_bf16(vld1_u16(k0 + 24)); float32x4_t _k21_0 = vcvt_f32_bf16(vld1_u16(k0 + 28)); float32x4_t _k22_0 = vcvt_f32_bf16(vld1_u16(k0 + 32)); float32x4_t _k00_1 = vcvt_f32_bf16(vld1_u16(k1)); float32x4_t _k01_1 = vcvt_f32_bf16(vld1_u16(k1 + 4)); float32x4_t _k02_1 = vcvt_f32_bf16(vld1_u16(k1 + 8)); float32x4_t _k10_1 = vcvt_f32_bf16(vld1_u16(k1 + 12)); float32x4_t _k11_1 = vcvt_f32_bf16(vld1_u16(k1 + 16)); float32x4_t _k12_1 = vcvt_f32_bf16(vld1_u16(k1 + 20)); float32x4_t _k20_1 = vcvt_f32_bf16(vld1_u16(k1 + 24)); float32x4_t _k21_1 = vcvt_f32_bf16(vld1_u16(k1 + 28)); float32x4_t _k22_1 = vcvt_f32_bf16(vld1_u16(k1 + 32)); int i = 0; for (; i < outh; i++) { int j = 0; for (; j + 3 < outw; j += 4) { asm volatile( "prfm pldl1keep, [%3, #64] \n" "ld1 {v0.4h}, [%3], #8 \n" "ld1 {v1.s}[0], [%3] \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%2], #64 \n" "shll v0.4s, v0.4h, #16 \n" "shll v1.4s, v1.4h, #16 \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v28.4s, v29.4s, v30.4s, v31.4s}, [%2], #64 \n" "fmla v24.4s, %12.4s, v0.s[0] \n" "fmla v25.4s, %12.4s, v0.s[1] \n" "fmla v26.4s, %12.4s, v0.s[2] \n" "fmla v27.4s, %12.4s, v0.s[3] \n" "fmla v28.4s, %21.4s, v0.s[0] \n" "fmla v29.4s, %21.4s, v0.s[1] \n" "fmla v30.4s, %21.4s, v0.s[2] \n" "fmla v31.4s, %21.4s, v0.s[3] \n" "fmla v24.4s, %13.4s, v0.s[1] \n" "fmla v25.4s, %13.4s, v0.s[2] \n" "fmla v26.4s, %13.4s, v0.s[3] \n" "fmla v27.4s, %13.4s, v1.s[0] \n" "fmla v28.4s, %22.4s, v0.s[1] \n" "fmla v29.4s, %22.4s, v0.s[2] \n" "fmla v30.4s, %22.4s, v0.s[3] \n" "fmla v31.4s, %22.4s, v1.s[0] \n" "prfm pldl1keep, [%4, #64] \n" "ld1 {v2.4h}, [%4], #8 \n" "ld1 {v3.s}[0], [%4] \n" "fmla v24.4s, %14.4s, v0.s[2] \n" "fmla v25.4s, %14.4s, v0.s[3] \n" "fmla v26.4s, %14.4s, v1.s[0] \n" "fmla v27.4s, %14.4s, v1.s[1] \n" "fmla v28.4s, %23.4s, v0.s[2] \n" "fmla v29.4s, %23.4s, v0.s[3] \n" "fmla v30.4s, %23.4s, v1.s[0] \n" "fmla v31.4s, %23.4s, v1.s[1] \n" "shll v2.4s, v2.4h, #16 \n" "shll v3.4s, v3.4h, #16 \n" "fmla v24.4s, %15.4s, v2.s[0] \n" "fmla v25.4s, %15.4s, v2.s[1] \n" "fmla v26.4s, %15.4s, v2.s[2] \n" "fmla v27.4s, %15.4s, v2.s[3] \n" "fmla v28.4s, %24.4s, v2.s[0] \n" "fmla v29.4s, %24.4s, v2.s[1] \n" "fmla v30.4s, %24.4s, v2.s[2] \n" "fmla v31.4s, %24.4s, v2.s[3] \n" "fmla v24.4s, %16.4s, v2.s[1] \n" "fmla v25.4s, %16.4s, v2.s[2] \n" "fmla v26.4s, %16.4s, v2.s[3] \n" "fmla v27.4s, %16.4s, v3.s[0] \n" "fmla v28.4s, %25.4s, v2.s[1] \n" "fmla v29.4s, %25.4s, v2.s[2] \n" "fmla v30.4s, %25.4s, v2.s[3] \n" "fmla v31.4s, %25.4s, v3.s[0] \n" "prfm pldl1keep, [%5, #64] \n" "ld1 {v0.4h}, [%5], #8 \n" "ld1 {v1.s}[0], [%5] \n" "fmla v24.4s, %17.4s, v2.s[2] \n" "fmla v25.4s, %17.4s, v2.s[3] \n" "fmla v26.4s, %17.4s, v3.s[0] \n" "fmla v27.4s, %17.4s, v3.s[1] \n" "fmla v28.4s, %26.4s, v2.s[2] \n" "fmla v29.4s, %26.4s, v2.s[3] \n" "fmla v30.4s, %26.4s, v3.s[0] \n" "fmla v31.4s, %26.4s, v3.s[1] \n" "shll v0.4s, v0.4h, #16 \n" "shll v1.4s, v1.4h, #16 \n" "fmla v24.4s, %18.4s, v0.s[0] \n" "fmla v25.4s, %18.4s, v0.s[1] \n" "fmla v26.4s, %18.4s, v0.s[2] \n" "fmla v27.4s, %18.4s, v0.s[3] \n" "fmla v28.4s, %27.4s, v0.s[0] \n" "fmla v29.4s, %27.4s, v0.s[1] \n" "fmla v30.4s, %27.4s, v0.s[2] \n" "fmla v31.4s, %27.4s, v0.s[3] \n" "fmla v24.4s, %19.4s, v0.s[1] \n" "fmla v25.4s, %19.4s, v0.s[2] \n" "fmla v26.4s, %19.4s, v0.s[3] \n" "fmla v27.4s, %19.4s, v1.s[0] \n" "fmla v28.4s, %28.4s, v0.s[1] \n" "fmla v29.4s, %28.4s, v0.s[2] \n" "fmla v30.4s, %28.4s, v0.s[3] \n" "fmla v31.4s, %28.4s, v1.s[0] \n" "fmla v24.4s, %20.4s, v0.s[2] \n" "fmla v25.4s, %20.4s, v0.s[3] \n" "fmla v26.4s, %20.4s, v1.s[0] \n" "fmla v27.4s, %20.4s, v1.s[1] \n" "fmla v28.4s, %29.4s, v0.s[2] \n" "fmla v29.4s, %29.4s, v0.s[3] \n" "fmla v30.4s, %29.4s, v1.s[0] \n" "fmla v31.4s, %29.4s, v1.s[1] \n" "shrn v24.4h, v24.4s, #16 \n" "shrn v25.4h, v25.4s, #16 \n" "shrn v26.4h, v26.4s, #16 \n" "shrn v27.4h, v27.4s, #16 \n" "shrn v28.4h, v28.4s, #16 \n" "shrn v29.4h, v29.4s, #16 \n" "shrn v30.4h, v30.4s, #16 \n" "shrn v31.4h, v31.4s, #16 \n" "st1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%0], #32 \n" "st1 {v28.4h, v29.4h, v30.4h, v31.4h}, [%1], #32 \n" : "=r"(outptr0_bf16), // %0 "=r"(outptr1_bf16), // %1 "=r"(outptr0), // %2 "=r"(r0), // %3 "=r"(r1), // %4 "=r"(r2) // %5 : "0"(outptr0_bf16), "1"(outptr1_bf16), "2"(outptr0), "3"(r0), "4"(r1), "5"(r2), "w"(_k00_0), // %12 "w"(_k01_0), // %13 "w"(_k02_0), // %14 "w"(_k10_0), // %15 "w"(_k11_0), // %16 "w"(_k12_0), // %17 "w"(_k20_0), // %18 "w"(_k21_0), // %19 "w"(_k22_0), // %20 "w"(_k00_1), // %21 "w"(_k01_1), // %22 "w"(_k02_1), // %23 "w"(_k10_1), // %24 "w"(_k11_1), // %25 "w"(_k12_1), // %26 "w"(_k20_1), // %27 "w"(_k21_1), // %28 "w"(_k22_1) // %29 : "memory", "v0", "v1", "v2", "v3", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); } for (; j + 1 < outw; j += 2) { asm volatile( "prfm pldl1keep, [%3, #64] \n" "ld1 {v0.4h}, [%3] \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%2], #64 \n" "shll v0.4s, v0.4h, #16 \n" "fmla v24.4s, %12.4s, v0.s[0] \n" "fmla v25.4s, %12.4s, v0.s[1] \n" "fmla v26.4s, %21.4s, v0.s[0] \n" "fmla v27.4s, %21.4s, v0.s[1] \n" "prfm pldl1keep, [%4, #64] \n" "ld1 {v1.4h}, [%4] \n" "fmla v24.4s, %13.4s, v0.s[1] \n" "fmla v25.4s, %13.4s, v0.s[2] \n" "fmla v26.4s, %22.4s, v0.s[1] \n" "fmla v27.4s, %22.4s, v0.s[2] \n" "shll v1.4s, v1.4h, #16 \n" "fmla v24.4s, %14.4s, v0.s[2] \n" "fmla v25.4s, %14.4s, v0.s[3] \n" "fmla v26.4s, %23.4s, v0.s[2] \n" "fmla v27.4s, %23.4s, v0.s[3] \n" "fmla v24.4s, %15.4s, v1.s[0] \n" "fmla v25.4s, %15.4s, v1.s[1] \n" "fmla v26.4s, %24.4s, v1.s[0] \n" "fmla v27.4s, %24.4s, v1.s[1] \n" "prfm pldl1keep, [%5, #64] \n" "ld1 {v0.4h}, [%5] \n" "fmla v24.4s, %16.4s, v1.s[1] \n" "fmla v25.4s, %16.4s, v1.s[2] \n" "fmla v26.4s, %25.4s, v1.s[1] \n" "fmla v27.4s, %25.4s, v1.s[2] \n" "shll v0.4s, v0.4h, #16 \n" "fmla v24.4s, %17.4s, v1.s[2] \n" "fmla v25.4s, %17.4s, v1.s[3] \n" "fmla v26.4s, %26.4s, v1.s[2] \n" "fmla v27.4s, %26.4s, v1.s[3] \n" "fmla v24.4s, %18.4s, v0.s[0] \n" "fmla v25.4s, %18.4s, v0.s[1] \n" "fmla v26.4s, %27.4s, v0.s[0] \n" "fmla v27.4s, %27.4s, v0.s[1] \n" "fmla v24.4s, %19.4s, v0.s[1] \n" "fmla v25.4s, %19.4s, v0.s[2] \n" "fmla v26.4s, %28.4s, v0.s[1] \n" "fmla v27.4s, %28.4s, v0.s[2] \n" "add %3, %3, #4 \n" "fmla v24.4s, %20.4s, v0.s[2] \n" "fmla v25.4s, %20.4s, v0.s[3] \n" "fmla v26.4s, %29.4s, v0.s[2] \n" "fmla v27.4s, %29.4s, v0.s[3] \n" "add %4, %4, #4 \n" "shrn v24.4h, v24.4s, #16 \n" "shrn v25.4h, v25.4s, #16 \n" "shrn v26.4h, v26.4s, #16 \n" "shrn v27.4h, v27.4s, #16 \n" "add %5, %5, #4 \n" "st1 {v24.4h, v25.4h}, [%0], #16 \n" "st1 {v26.4h, v27.4h}, [%1], #16 \n" : "=r"(outptr0_bf16), // %0 "=r"(outptr1_bf16), // %1 "=r"(outptr0), // %2 "=r"(r0), // %3 "=r"(r1), // %4 "=r"(r2) // %5 : "0"(outptr0_bf16), "1"(outptr1_bf16), "2"(outptr0), "3"(r0), "4"(r1), "5"(r2), "w"(_k00_0), // %12 "w"(_k01_0), // %13 "w"(_k02_0), // %14 "w"(_k10_0), // %15 "w"(_k11_0), // %16 "w"(_k12_0), // %17 "w"(_k20_0), // %18 "w"(_k21_0), // %19 "w"(_k22_0), // %20 "w"(_k00_1), // %21 "w"(_k01_1), // %22 "w"(_k02_1), // %23 "w"(_k10_1), // %24 "w"(_k11_1), // %25 "w"(_k12_1), // %26 "w"(_k20_1), // %27 "w"(_k21_1), // %28 "w"(_k22_1) // %29 : "memory", "v0", "v1", "v24", "v25", "v26", "v27"); } for (; j < outw; j++) { float32x4_t _sum00 = vld1q_f32(outptr0); float32x4_t _sum10 = vld1q_f32(outptr0 + 4); float32x4_t _r0 = vcvt_f32_bf16(vld1_u16(r0)); float32x4_t _r1 = vcvt_f32_bf16(vld1_u16(r1)); float32x4_t _r2 = vcvt_f32_bf16(vld1_u16(r2)); _sum00 = vfmaq_laneq_f32(_sum00, _k00_0, _r0, 0); _sum00 = vfmaq_laneq_f32(_sum00, _k01_0, _r0, 1); _sum00 = vfmaq_laneq_f32(_sum00, _k02_0, _r0, 2); _sum00 = vfmaq_laneq_f32(_sum00, _k10_0, _r1, 0); _sum00 = vfmaq_laneq_f32(_sum00, _k11_0, _r1, 1); _sum00 = vfmaq_laneq_f32(_sum00, _k12_0, _r1, 2); _sum00 = vfmaq_laneq_f32(_sum00, _k20_0, _r2, 0); _sum00 = vfmaq_laneq_f32(_sum00, _k21_0, _r2, 1); _sum00 = vfmaq_laneq_f32(_sum00, _k22_0, _r2, 2); _sum10 = vfmaq_laneq_f32(_sum10, _k00_1, _r0, 0); _sum10 = vfmaq_laneq_f32(_sum10, _k01_1, _r0, 1); _sum10 = vfmaq_laneq_f32(_sum10, _k02_1, _r0, 2); _sum10 = vfmaq_laneq_f32(_sum10, _k10_1, _r1, 0); _sum10 = vfmaq_laneq_f32(_sum10, _k11_1, _r1, 1); _sum10 = vfmaq_laneq_f32(_sum10, _k12_1, _r1, 2); _sum10 = vfmaq_laneq_f32(_sum10, _k20_1, _r2, 0); _sum10 = vfmaq_laneq_f32(_sum10, _k21_1, _r2, 1); _sum10 = vfmaq_laneq_f32(_sum10, _k22_1, _r2, 2); vst1_u16(outptr0_bf16, vcvt_bf16_f32(_sum00)); vst1_u16(outptr1_bf16, vcvt_bf16_f32(_sum10)); r0 += 1; r1 += 1; r2 += 1; outptr0 += 8; outptr0_bf16 += 4; outptr1_bf16 += 4; } r0 += 2; r1 += 2; r2 += 2; } k0 += 9 * 4; k1 += 9 * 4; } } #endif // __ARM_NEON && __aarch64__ #pragma omp parallel for num_threads(opt.num_threads) for (int p = remain_outch_start; p < outch; p++) { Mat out0 = top_blob_fp32.channel(get_omp_thread_num()); float32x4_t _bias0 = bias ? vld1q_f32((const float*)bias + p * 4) : vdupq_n_f32(0.f); out0.fill(_bias0); const unsigned short* k0 = kernel.channel(p); int q = 0; for (; q < inch - 1; q++) { float* outptr0 = out0.row(0); const Mat img0 = bottom_blob.channel(q); const unsigned short* r0 = img0.row<unsigned short>(0); const unsigned short* r1 = img0.row<unsigned short>(1); const unsigned short* r2 = img0.row<unsigned short>(2); float32x4_t _k00 = vcvt_f32_bf16(vld1_u16(k0)); float32x4_t _k01 = vcvt_f32_bf16(vld1_u16(k0 + 4)); float32x4_t _k02 = vcvt_f32_bf16(vld1_u16(k0 + 8)); float32x4_t _k10 = vcvt_f32_bf16(vld1_u16(k0 + 12)); float32x4_t _k11 = vcvt_f32_bf16(vld1_u16(k0 + 16)); float32x4_t _k12 = vcvt_f32_bf16(vld1_u16(k0 + 20)); float32x4_t _k20 = vcvt_f32_bf16(vld1_u16(k0 + 24)); float32x4_t _k21 = vcvt_f32_bf16(vld1_u16(k0 + 28)); float32x4_t _k22 = vcvt_f32_bf16(vld1_u16(k0 + 32)); int i = 0; for (; i < outh; i++) { int j = 0; #if __aarch64__ for (; j + 7 < outw; j += 8) { asm volatile( "prfm pldl1keep, [%0, #512] \n" "ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%0], #64 \n" // "prfm pldl1keep, [%0, #512] \n" "ld1 {v28.4s, v29.4s, v30.4s, v31.4s}, [%0] \n" "prfm pldl1keep, [%1, #128] \n" "ld1 {v0.4h, v1.4h}, [%1], #16 \n" "ld1 {v2.s}[0], [%1] \n" "shll v0.4s, v0.4h, #16 \n" "shll v1.4s, v1.4h, #16 \n" "shll v2.4s, v2.4h, #16 \n" "fmla v24.4s, %8.4s, v0.s[0] \n" "fmla v25.4s, %8.4s, v0.s[1] \n" "fmla v26.4s, %8.4s, v0.s[2] \n" "fmla v27.4s, %8.4s, v0.s[3] \n" "fmla v28.4s, %8.4s, v1.s[0] \n" "fmla v29.4s, %8.4s, v1.s[1] \n" "fmla v30.4s, %8.4s, v1.s[2] \n" "fmla v31.4s, %8.4s, v1.s[3] \n" "fmla v24.4s, %9.4s, v0.s[1] \n" "fmla v25.4s, %9.4s, v0.s[2] \n" "fmla v26.4s, %9.4s, v0.s[3] \n" "fmla v27.4s, %9.4s, v1.s[0] \n" "fmla v28.4s, %9.4s, v1.s[1] \n" "fmla v29.4s, %9.4s, v1.s[2] \n" "fmla v30.4s, %9.4s, v1.s[3] \n" "fmla v31.4s, %9.4s, v2.s[0] \n" "fmla v24.4s, %10.4s, v0.s[2] \n" "fmla v25.4s, %10.4s, v0.s[3] \n" "fmla v26.4s, %10.4s, v1.s[0] \n" "fmla v27.4s, %10.4s, v1.s[1] \n" "fmla v28.4s, %10.4s, v1.s[2] \n" "fmla v29.4s, %10.4s, v1.s[3] \n" "fmla v30.4s, %10.4s, v2.s[0] \n" "fmla v31.4s, %10.4s, v2.s[1] \n" "prfm pldl1keep, [%2, #128] \n" "ld1 {v4.4h, v5.4h}, [%2], #16 \n" "ld1 {v2.s}[0], [%2] \n" "shll v4.4s, v4.4h, #16 \n" "shll v5.4s, v5.4h, #16 \n" "shll v2.4s, v2.4h, #16 \n" "fmla v24.4s, %11.4s, v4.s[0] \n" "fmla v25.4s, %11.4s, v4.s[1] \n" "fmla v26.4s, %11.4s, v4.s[2] \n" "fmla v27.4s, %11.4s, v4.s[3] \n" "fmla v28.4s, %11.4s, v5.s[0] \n" "fmla v29.4s, %11.4s, v5.s[1] \n" "fmla v30.4s, %11.4s, v5.s[2] \n" "fmla v31.4s, %11.4s, v5.s[3] \n" "fmla v24.4s, %12.4s, v4.s[1] \n" "fmla v25.4s, %12.4s, v4.s[2] \n" "fmla v26.4s, %12.4s, v4.s[3] \n" "fmla v27.4s, %12.4s, v5.s[0] \n" "fmla v28.4s, %12.4s, v5.s[1] \n" "fmla v29.4s, %12.4s, v5.s[2] \n" "fmla v30.4s, %12.4s, v5.s[3] \n" "fmla v31.4s, %12.4s, v2.s[0] \n" "fmla v24.4s, %13.4s, v4.s[2] \n" "fmla v25.4s, %13.4s, v4.s[3] \n" "fmla v26.4s, %13.4s, v5.s[0] \n" "fmla v27.4s, %13.4s, v5.s[1] \n" "fmla v28.4s, %13.4s, v5.s[2] \n" "fmla v29.4s, %13.4s, v5.s[3] \n" "fmla v30.4s, %13.4s, v2.s[0] \n" "fmla v31.4s, %13.4s, v2.s[1] \n" "prfm pldl1keep, [%3, #128] \n" "ld1 {v0.4h, v1.4h}, [%3], #16 \n" "ld1 {v2.s}[0], [%3] \n" "shll v0.4s, v0.4h, #16 \n" "shll v1.4s, v1.4h, #16 \n" "shll v2.4s, v2.4h, #16 \n" "fmla v24.4s, %14.4s, v0.s[0] \n" "fmla v25.4s, %14.4s, v0.s[1] \n" "fmla v26.4s, %14.4s, v0.s[2] \n" "fmla v27.4s, %14.4s, v0.s[3] \n" "fmla v28.4s, %14.4s, v1.s[0] \n" "fmla v29.4s, %14.4s, v1.s[1] \n" "fmla v30.4s, %14.4s, v1.s[2] \n" "fmla v31.4s, %14.4s, v1.s[3] \n" "fmla v24.4s, %15.4s, v0.s[1] \n" "fmla v25.4s, %15.4s, v0.s[2] \n" "fmla v26.4s, %15.4s, v0.s[3] \n" "fmla v27.4s, %15.4s, v1.s[0] \n" "fmla v28.4s, %15.4s, v1.s[1] \n" "fmla v29.4s, %15.4s, v1.s[2] \n" "fmla v30.4s, %15.4s, v1.s[3] \n" "fmla v31.4s, %15.4s, v2.s[0] \n" "sub %0, %0, #64 \n" "fmla v24.4s, %16.4s, v0.s[2] \n" "fmla v25.4s, %16.4s, v0.s[3] \n" "fmla v26.4s, %16.4s, v1.s[0] \n" "fmla v27.4s, %16.4s, v1.s[1] \n" "fmla v28.4s, %16.4s, v1.s[2] \n" "fmla v29.4s, %16.4s, v1.s[3] \n" "fmla v30.4s, %16.4s, v2.s[0] \n" "fmla v31.4s, %16.4s, v2.s[1] \n" "st1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%0], #64 \n" "st1 {v28.4s, v29.4s, v30.4s, v31.4s}, [%0], #64 \n" : "=r"(outptr0), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2) // %3 : "0"(outptr0), "1"(r0), "2"(r1), "3"(r2), "w"(_k00), // %8 "w"(_k01), // %9 "w"(_k02), // %10 "w"(_k10), // %11 "w"(_k11), // %12 "w"(_k12), // %13 "w"(_k20), // %14 "w"(_k21), // %15 "w"(_k22) // %16 : "memory", "v0", "v1", "v2", "v4", "v5", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); } #endif // __aarch64__ for (; j + 3 < outw; j += 4) { #if __aarch64__ asm volatile( "prfm pldl1keep, [%1, #64] \n" "ld1 {v0.4h}, [%1], #8 \n" "prfm pldl1keep, [%0, #512] \n" "ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%0] \n" "shll v0.4s, v0.4h, #16 \n" "ld1 {v1.s}[0], [%1] \n" "fmla v24.4s, %8.4s, v0.s[0] \n" "fmla v25.4s, %8.4s, v0.s[1] \n" "fmla v26.4s, %8.4s, v0.s[2] \n" "fmla v27.4s, %8.4s, v0.s[3] \n" "shll v1.4s, v1.4h, #16 \n" "fmla v24.4s, %9.4s, v0.s[1] \n" "fmla v25.4s, %9.4s, v0.s[2] \n" "prfm pldl1keep, [%2, #64] \n" "ld1 {v2.4h}, [%2], #8 \n" "fmla v26.4s, %9.4s, v0.s[3] \n" "fmla v27.4s, %9.4s, v1.s[0] \n" "ld1 {v3.s}[0], [%2] \n" "fmla v24.4s, %10.4s, v0.s[2] \n" "fmla v25.4s, %10.4s, v0.s[3] \n" "shll v2.4s, v2.4h, #16 \n" "fmla v26.4s, %10.4s, v1.s[0] \n" "fmla v27.4s, %10.4s, v1.s[1] \n" "fmla v24.4s, %11.4s, v2.s[0] \n" "fmla v25.4s, %11.4s, v2.s[1] \n" "fmla v26.4s, %11.4s, v2.s[2] \n" "fmla v27.4s, %11.4s, v2.s[3] \n" "shll v3.4s, v3.4h, #16 \n" "fmla v24.4s, %12.4s, v2.s[1] \n" "fmla v25.4s, %12.4s, v2.s[2] \n" "prfm pldl1keep, [%3, #64] \n" "ld1 {v0.4h}, [%3], #8 \n" "fmla v26.4s, %12.4s, v2.s[3] \n" "fmla v27.4s, %12.4s, v3.s[0] \n" "ld1 {v1.s}[0], [%3] \n" "fmla v24.4s, %13.4s, v2.s[2] \n" "fmla v25.4s, %13.4s, v2.s[3] \n" "shll v0.4s, v0.4h, #16 \n" "fmla v26.4s, %13.4s, v3.s[0] \n" "fmla v27.4s, %13.4s, v3.s[1] \n" "fmla v24.4s, %14.4s, v0.s[0] \n" "fmla v25.4s, %14.4s, v0.s[1] \n" "fmla v26.4s, %14.4s, v0.s[2] \n" "fmla v27.4s, %14.4s, v0.s[3] \n" "shll v1.4s, v1.4h, #16 \n" "fmla v24.4s, %15.4s, v0.s[1] \n" "fmla v25.4s, %15.4s, v0.s[2] \n" "fmla v26.4s, %15.4s, v0.s[3] \n" "fmla v27.4s, %15.4s, v1.s[0] \n" "fmla v24.4s, %16.4s, v0.s[2] \n" "fmla v25.4s, %16.4s, v0.s[3] \n" "fmla v26.4s, %16.4s, v1.s[0] \n" "fmla v27.4s, %16.4s, v1.s[1] \n" "st1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%0], #64 \n" : "=r"(outptr0), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2) // %3 : "0"(outptr0), "1"(r0), "2"(r1), "3"(r2), "w"(_k00), // %8 "w"(_k01), // %9 "w"(_k02), // %10 "w"(_k10), // %11 "w"(_k11), // %12 "w"(_k12), // %13 "w"(_k20), // %14 "w"(_k21), // %15 "w"(_k22) // %16 : "memory", "v0", "v1", "v2", "v3", "v24", "v25", "v26", "v27"); #else // __aarch64__ asm volatile( "pld [%0, #512] \n" "vldm %0, {d24-d31} \n" "pld [%1, #64] \n" "vld1.u16 {d1}, [%1]! \n" "vld1.u32 {d2[0]}, [%1] \n" "vshll.u16 q0, d1, #16 \n" "vshll.u16 q1, d2, #16 \n" "vmla.f32 q12, %q8, d0[0] \n" "vmla.f32 q13, %q8, d0[1] \n" "vmla.f32 q14, %q8, d1[0] \n" "vmla.f32 q15, %q8, d1[1] \n" "vmla.f32 q12, %q9, d0[1] \n" "vmla.f32 q13, %q9, d1[0] \n" "vmla.f32 q14, %q9, d1[1] \n" "vmla.f32 q15, %q9, d2[0] \n" "vmla.f32 q12, %q10, d1[0] \n" "vmla.f32 q13, %q10, d1[1] \n" "vmla.f32 q14, %q10, d2[0] \n" "vmla.f32 q15, %q10, d2[1] \n" "pld [%2, #64] \n" "vld1.u16 {d5}, [%2]! \n" "vld1.u32 {d3[0]}, [%2] \n" "vshll.u16 q2, d5, #16 \n" "vshll.u16 q1, d3, #16 \n" "vmla.f32 q12, %q11, d4[0] \n" "vmla.f32 q13, %q11, d4[1] \n" "vmla.f32 q14, %q11, d5[0] \n" "vmla.f32 q15, %q11, d5[1] \n" "vmla.f32 q12, %q12, d4[1] \n" "vmla.f32 q13, %q12, d5[0] \n" "vmla.f32 q14, %q12, d5[1] \n" "vmla.f32 q15, %q12, d2[0] \n" "vmla.f32 q12, %q13, d5[0] \n" "vmla.f32 q13, %q13, d5[1] \n" "vmla.f32 q14, %q13, d2[0] \n" "vmla.f32 q15, %q13, d2[1] \n" "pld [%3, #64] \n" "vld1.u16 {d1}, [%3]! \n" "vld1.u32 {d2[0]}, [%3] \n" "vshll.u16 q0, d1, #16 \n" "vshll.u16 q1, d2, #16 \n" "vmla.f32 q12, %q14, d0[0] \n" "vmla.f32 q13, %q14, d0[1] \n" "vmla.f32 q14, %q14, d1[0] \n" "vmla.f32 q15, %q14, d1[1] \n" "vmla.f32 q12, %q15, d0[1] \n" "vmla.f32 q13, %q15, d1[0] \n" "vmla.f32 q14, %q15, d1[1] \n" "vmla.f32 q15, %q15, d2[0] \n" "vmla.f32 q12, %q16, d1[0] \n" "vmla.f32 q13, %q16, d1[1] \n" "vmla.f32 q14, %q16, d2[0] \n" "vmla.f32 q15, %q16, d2[1] \n" "vstm %0!, {d24-d31} \n" : "=r"(outptr0), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2) // %3 : "0"(outptr0), "1"(r0), "2"(r1), "3"(r2), "w"(_k00), // %8 "w"(_k01), // %9 "w"(_k02), // %10 "w"(_k10), // %11 "w"(_k11), // %12 "w"(_k12), // %13 "w"(_k20), // %14 "w"(_k21), // %15 "w"(_k22) // %16 : "memory", "q0", "q1", "q2", "q12", "q13", "q14", "q15"); #endif // __aarch64__ } for (; j + 1 < outw; j += 2) { #if __aarch64__ asm volatile( "prfm pldl1keep, [%1, #64] \n" "ld1 {v0.4h}, [%1] \n" "prfm pldl1keep, [%0, #256] \n" "ld1 {v28.4s, v29.4s}, [%0] \n" "shll v0.4s, v0.4h, #16 \n" "fmul v24.4s, %8.4s, v0.s[0] \n" "fmul v25.4s, %8.4s, v0.s[1] \n" "prfm pldl1keep, [%2, #64] \n" "ld1 {v1.4h}, [%2] \n" "fmul v26.4s, %9.4s, v0.s[1] \n" "fmul v27.4s, %9.4s, v0.s[2] \n" "shll v1.4s, v1.4h, #16 \n" "fmla v28.4s, %10.4s, v0.s[2] \n" "fmla v29.4s, %10.4s, v0.s[3] \n" "fmla v24.4s, %11.4s, v1.s[0] \n" "fmla v25.4s, %11.4s, v1.s[1] \n" "prfm pldl1keep, [%3, #64] \n" "ld1 {v0.4h}, [%3] \n" "fmla v26.4s, %12.4s, v1.s[1] \n" "fmla v27.4s, %12.4s, v1.s[2] \n" "shll v0.4s, v0.4h, #16 \n" "fmla v28.4s, %13.4s, v1.s[2] \n" "fmla v29.4s, %13.4s, v1.s[3] \n" "fmla v24.4s, %14.4s, v0.s[0] \n" "fmla v25.4s, %14.4s, v0.s[1] \n" "fmla v26.4s, %15.4s, v0.s[1] \n" "fmla v27.4s, %15.4s, v0.s[2] \n" "fmla v28.4s, %16.4s, v0.s[2] \n" "fmla v29.4s, %16.4s, v0.s[3] \n" "add %1, %1, #4 \n" "fadd v24.4s, v24.4s, v26.4s \n" "fadd v25.4s, v25.4s, v27.4s \n" "add %2, %2, #4 \n" "fadd v28.4s, v28.4s, v24.4s \n" "fadd v29.4s, v29.4s, v25.4s \n" "add %3, %3, #4 \n" "st1 {v28.4s, v29.4s}, [%0], #32 \n" : "=r"(outptr0), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2) // %3 : "0"(outptr0), "1"(r0), "2"(r1), "3"(r2), "w"(_k00), // %8 "w"(_k01), // %9 "w"(_k02), // %10 "w"(_k10), // %11 "w"(_k11), // %12 "w"(_k12), // %13 "w"(_k20), // %14 "w"(_k21), // %15 "w"(_k22) // %16 : "memory", "v0", "v1", "v24", "v25", "v26", "v27", "v28", "v29"); #else // __aarch64__ asm volatile( "pld [%1, #64] \n" "vld1.u16 {d1}, [%1] \n" "pld [%0, #256] \n" "vld1.f32 {d24-d27}, [%0 :128] \n" "vshll.u16 q0, d1, #16 \n" "vmul.f32 q14, %q8, d0[0] \n" "vmul.f32 q15, %q8, d0[1] \n" "vmla.f32 q12, %q9, d0[1] \n" "vmla.f32 q13, %q9, d1[0] \n" "pld [%2, #64] \n" "vld1.u16 {d3}, [%2] \n" "vmla.f32 q14, %q10, d1[0] \n" "vmla.f32 q15, %q10, d1[1] \n" "vshll.u16 q1, d3, #16 \n" "vmla.f32 q12, %q11, d2[0] \n" "vmla.f32 q13, %q11, d2[1] \n" "vmla.f32 q14, %q12, d2[1] \n" "vmla.f32 q15, %q12, d3[0] \n" "pld [%3, #64] \n" "vld1.u16 {d1}, [%3] \n" "vmla.f32 q12, %q13, d3[0] \n" "vmla.f32 q13, %q13, d3[1] \n" "vshll.u16 q0, d1, #16 \n" "vmla.f32 q14, %q14, d0[0] \n" "vmla.f32 q15, %q14, d0[1] \n" "vmla.f32 q12, %q15, d0[1] \n" "vmla.f32 q13, %q15, d1[0] \n" "add %1, %1, #4 \n" "vmla.f32 q14, %q16, d1[0] \n" "vmla.f32 q15, %q16, d1[1] \n" "add %2, %2, #4 \n" "vadd.f32 q12, q12, q14 \n" "vadd.f32 q13, q13, q15 \n" "add %3, %3, #4 \n" "vst1.f32 {d24-d27}, [%0 :128]! \n" : "=r"(outptr0), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2) // %3 : "0"(outptr0), "1"(r0), "2"(r1), "3"(r2), "w"(_k00), // %8 "w"(_k01), // %9 "w"(_k02), // %10 "w"(_k10), // %11 "w"(_k11), // %12 "w"(_k12), // %13 "w"(_k20), // %14 "w"(_k21), // %15 "w"(_k22) // %16 : "memory", "q0", "q1", "q12", "q13", "q14", "q15"); #endif // __aarch64__ } for (; j < outw; j++) { float32x4_t _sum0 = vld1q_f32(outptr0); float32x4_t _r0 = vcvt_f32_bf16(vld1_u16(r0)); float32x4_t _r1 = vcvt_f32_bf16(vld1_u16(r1)); float32x4_t _r2 = vcvt_f32_bf16(vld1_u16(r2)); #if __aarch64__ _sum0 = vfmaq_laneq_f32(_sum0, _k00, _r0, 0); _sum0 = vfmaq_laneq_f32(_sum0, _k01, _r0, 1); _sum0 = vfmaq_laneq_f32(_sum0, _k02, _r0, 2); _sum0 = vfmaq_laneq_f32(_sum0, _k10, _r1, 0); _sum0 = vfmaq_laneq_f32(_sum0, _k11, _r1, 1); _sum0 = vfmaq_laneq_f32(_sum0, _k12, _r1, 2); _sum0 = vfmaq_laneq_f32(_sum0, _k20, _r2, 0); _sum0 = vfmaq_laneq_f32(_sum0, _k21, _r2, 1); _sum0 = vfmaq_laneq_f32(_sum0, _k22, _r2, 2); #else _sum0 = vmlaq_lane_f32(_sum0, _k00, vget_low_f32(_r0), 0); _sum0 = vmlaq_lane_f32(_sum0, _k01, vget_low_f32(_r0), 1); _sum0 = vmlaq_lane_f32(_sum0, _k02, vget_high_f32(_r0), 0); _sum0 = vmlaq_lane_f32(_sum0, _k10, vget_low_f32(_r1), 0); _sum0 = vmlaq_lane_f32(_sum0, _k11, vget_low_f32(_r1), 1); _sum0 = vmlaq_lane_f32(_sum0, _k12, vget_high_f32(_r1), 0); _sum0 = vmlaq_lane_f32(_sum0, _k20, vget_low_f32(_r2), 0); _sum0 = vmlaq_lane_f32(_sum0, _k21, vget_low_f32(_r2), 1); _sum0 = vmlaq_lane_f32(_sum0, _k22, vget_high_f32(_r2), 0); #endif vst1q_f32(outptr0, _sum0); r0 += 1; r1 += 1; r2 += 1; outptr0 += 4; } r0 += 2; r1 += 2; r2 += 2; } k0 += 9 * 4; } for (; q < inch; q++) { unsigned short* outptr0_bf16 = top_blob.channel(p); const float* outptr0 = out0.row(0); const Mat img0 = bottom_blob.channel(q); const unsigned short* r0 = img0.row<unsigned short>(0); const unsigned short* r1 = img0.row<unsigned short>(1); const unsigned short* r2 = img0.row<unsigned short>(2); float32x4_t _k00 = vcvt_f32_bf16(vld1_u16(k0)); float32x4_t _k01 = vcvt_f32_bf16(vld1_u16(k0 + 4)); float32x4_t _k02 = vcvt_f32_bf16(vld1_u16(k0 + 8)); float32x4_t _k10 = vcvt_f32_bf16(vld1_u16(k0 + 12)); float32x4_t _k11 = vcvt_f32_bf16(vld1_u16(k0 + 16)); float32x4_t _k12 = vcvt_f32_bf16(vld1_u16(k0 + 20)); float32x4_t _k20 = vcvt_f32_bf16(vld1_u16(k0 + 24)); float32x4_t _k21 = vcvt_f32_bf16(vld1_u16(k0 + 28)); float32x4_t _k22 = vcvt_f32_bf16(vld1_u16(k0 + 32)); int i = 0; for (; i < outh; i++) { int j = 0; #if __aarch64__ for (; j + 7 < outw; j += 8) { asm volatile( "prfm pldl1keep, [%1, #512] \n" "ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%1], #64 \n" "prfm pldl1keep, [%1, #512] \n" "ld1 {v28.4s, v29.4s, v30.4s, v31.4s}, [%1], #64 \n" "prfm pldl1keep, [%2, #128] \n" "ld1 {v0.4h, v1.4h}, [%2], #16 \n" "ld1 {v2.s}[0], [%2] \n" "shll v0.4s, v0.4h, #16 \n" "shll v1.4s, v1.4h, #16 \n" "shll v2.4s, v2.4h, #16 \n" "fmla v24.4s, %10.4s, v0.s[0] \n" "fmla v25.4s, %10.4s, v0.s[1] \n" "fmla v26.4s, %10.4s, v0.s[2] \n" "fmla v27.4s, %10.4s, v0.s[3] \n" "fmla v28.4s, %10.4s, v1.s[0] \n" "fmla v29.4s, %10.4s, v1.s[1] \n" "fmla v30.4s, %10.4s, v1.s[2] \n" "fmla v31.4s, %10.4s, v1.s[3] \n" "fmla v24.4s, %11.4s, v0.s[1] \n" "fmla v25.4s, %11.4s, v0.s[2] \n" "fmla v26.4s, %11.4s, v0.s[3] \n" "fmla v27.4s, %11.4s, v1.s[0] \n" "fmla v28.4s, %11.4s, v1.s[1] \n" "fmla v29.4s, %11.4s, v1.s[2] \n" "fmla v30.4s, %11.4s, v1.s[3] \n" "fmla v31.4s, %11.4s, v2.s[0] \n" "fmla v24.4s, %12.4s, v0.s[2] \n" "fmla v25.4s, %12.4s, v0.s[3] \n" "fmla v26.4s, %12.4s, v1.s[0] \n" "fmla v27.4s, %12.4s, v1.s[1] \n" "fmla v28.4s, %12.4s, v1.s[2] \n" "fmla v29.4s, %12.4s, v1.s[3] \n" "fmla v30.4s, %12.4s, v2.s[0] \n" "fmla v31.4s, %12.4s, v2.s[1] \n" "prfm pldl1keep, [%3, #128] \n" "ld1 {v4.4h, v5.4h}, [%3], #16 \n" "ld1 {v2.s}[0], [%3] \n" "shll v4.4s, v4.4h, #16 \n" "shll v5.4s, v5.4h, #16 \n" "shll v2.4s, v2.4h, #16 \n" "fmla v24.4s, %13.4s, v4.s[0] \n" "fmla v25.4s, %13.4s, v4.s[1] \n" "fmla v26.4s, %13.4s, v4.s[2] \n" "fmla v27.4s, %13.4s, v4.s[3] \n" "fmla v28.4s, %13.4s, v5.s[0] \n" "fmla v29.4s, %13.4s, v5.s[1] \n" "fmla v30.4s, %13.4s, v5.s[2] \n" "fmla v31.4s, %13.4s, v5.s[3] \n" "fmla v24.4s, %14.4s, v4.s[1] \n" "fmla v25.4s, %14.4s, v4.s[2] \n" "fmla v26.4s, %14.4s, v4.s[3] \n" "fmla v27.4s, %14.4s, v5.s[0] \n" "fmla v28.4s, %14.4s, v5.s[1] \n" "fmla v29.4s, %14.4s, v5.s[2] \n" "fmla v30.4s, %14.4s, v5.s[3] \n" "fmla v31.4s, %14.4s, v2.s[0] \n" "fmla v24.4s, %15.4s, v4.s[2] \n" "fmla v25.4s, %15.4s, v4.s[3] \n" "fmla v26.4s, %15.4s, v5.s[0] \n" "fmla v27.4s, %15.4s, v5.s[1] \n" "fmla v28.4s, %15.4s, v5.s[2] \n" "fmla v29.4s, %15.4s, v5.s[3] \n" "fmla v30.4s, %15.4s, v2.s[0] \n" "fmla v31.4s, %15.4s, v2.s[1] \n" "prfm pldl1keep, [%4, #128] \n" "ld1 {v0.4h, v1.4h}, [%4], #16 \n" "ld1 {v2.s}[0], [%4] \n" "shll v0.4s, v0.4h, #16 \n" "shll v1.4s, v1.4h, #16 \n" "shll v2.4s, v2.4h, #16 \n" "fmla v24.4s, %16.4s, v0.s[0] \n" "fmla v25.4s, %16.4s, v0.s[1] \n" "fmla v26.4s, %16.4s, v0.s[2] \n" "fmla v27.4s, %16.4s, v0.s[3] \n" "fmla v28.4s, %16.4s, v1.s[0] \n" "fmla v29.4s, %16.4s, v1.s[1] \n" "fmla v30.4s, %16.4s, v1.s[2] \n" "fmla v31.4s, %16.4s, v1.s[3] \n" "fmla v24.4s, %17.4s, v0.s[1] \n" "fmla v25.4s, %17.4s, v0.s[2] \n" "fmla v26.4s, %17.4s, v0.s[3] \n" "fmla v27.4s, %17.4s, v1.s[0] \n" "fmla v28.4s, %17.4s, v1.s[1] \n" "fmla v29.4s, %17.4s, v1.s[2] \n" "fmla v30.4s, %17.4s, v1.s[3] \n" "fmla v31.4s, %17.4s, v2.s[0] \n" "fmla v24.4s, %18.4s, v0.s[2] \n" "fmla v25.4s, %18.4s, v0.s[3] \n" "fmla v26.4s, %18.4s, v1.s[0] \n" "fmla v27.4s, %18.4s, v1.s[1] \n" "fmla v28.4s, %18.4s, v1.s[2] \n" "fmla v29.4s, %18.4s, v1.s[3] \n" "fmla v30.4s, %18.4s, v2.s[0] \n" "fmla v31.4s, %18.4s, v2.s[1] \n" "shrn v24.4h, v24.4s, #16 \n" "shrn v25.4h, v25.4s, #16 \n" "shrn v26.4h, v26.4s, #16 \n" "shrn v27.4h, v27.4s, #16 \n" "shrn v28.4h, v28.4s, #16 \n" "shrn v29.4h, v29.4s, #16 \n" "shrn v30.4h, v30.4s, #16 \n" "shrn v31.4h, v31.4s, #16 \n" "st1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%0], #32 \n" "st1 {v28.4h, v29.4h, v30.4h, v31.4h}, [%0], #32 \n" : "=r"(outptr0_bf16), // %0 "=r"(outptr0), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2) // %4 : "0"(outptr0_bf16), "1"(outptr0), "2"(r0), "3"(r1), "4"(r2), "w"(_k00), // %10 "w"(_k01), // %11 "w"(_k02), // %12 "w"(_k10), // %13 "w"(_k11), // %14 "w"(_k12), // %15 "w"(_k20), // %16 "w"(_k21), // %17 "w"(_k22) // %18 : "memory", "v0", "v1", "v2", "v4", "v5", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); } #endif // __aarch64__ for (; j + 3 < outw; j += 4) { #if __aarch64__ asm volatile( "prfm pldl1keep, [%2, #64] \n" "ld1 {v0.4h}, [%2], #8 \n" "prfm pldl1keep, [%1, #512] \n" "ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%1], #64 \n" "shll v0.4s, v0.4h, #16 \n" "ld1 {v1.s}[0], [%2] \n" "fmla v24.4s, %10.4s, v0.s[0] \n" "fmla v25.4s, %10.4s, v0.s[1] \n" "fmla v26.4s, %10.4s, v0.s[2] \n" "fmla v27.4s, %10.4s, v0.s[3] \n" "shll v1.4s, v1.4h, #16 \n" "fmla v24.4s, %11.4s, v0.s[1] \n" "fmla v25.4s, %11.4s, v0.s[2] \n" "prfm pldl1keep, [%3, #64] \n" "ld1 {v2.4h}, [%3], #8 \n" "fmla v26.4s, %11.4s, v0.s[3] \n" "fmla v27.4s, %11.4s, v1.s[0] \n" "ld1 {v3.s}[0], [%3] \n" "fmla v24.4s, %12.4s, v0.s[2] \n" "fmla v25.4s, %12.4s, v0.s[3] \n" "shll v2.4s, v2.4h, #16 \n" "fmla v26.4s, %12.4s, v1.s[0] \n" "fmla v27.4s, %12.4s, v1.s[1] \n" "fmla v24.4s, %13.4s, v2.s[0] \n" "fmla v25.4s, %13.4s, v2.s[1] \n" "fmla v26.4s, %13.4s, v2.s[2] \n" "fmla v27.4s, %13.4s, v2.s[3] \n" "shll v3.4s, v3.4h, #16 \n" "fmla v24.4s, %14.4s, v2.s[1] \n" "fmla v25.4s, %14.4s, v2.s[2] \n" "prfm pldl1keep, [%4, #64] \n" "ld1 {v0.4h}, [%4], #8 \n" "fmla v26.4s, %14.4s, v2.s[3] \n" "fmla v27.4s, %14.4s, v3.s[0] \n" "ld1 {v1.s}[0], [%4] \n" "fmla v24.4s, %15.4s, v2.s[2] \n" "fmla v25.4s, %15.4s, v2.s[3] \n" "shll v0.4s, v0.4h, #16 \n" "fmla v26.4s, %15.4s, v3.s[0] \n" "fmla v27.4s, %15.4s, v3.s[1] \n" "fmla v24.4s, %16.4s, v0.s[0] \n" "fmla v25.4s, %16.4s, v0.s[1] \n" "fmla v26.4s, %16.4s, v0.s[2] \n" "fmla v27.4s, %16.4s, v0.s[3] \n" "shll v1.4s, v1.4h, #16 \n" "fmla v24.4s, %17.4s, v0.s[1] \n" "fmla v25.4s, %17.4s, v0.s[2] \n" "fmla v26.4s, %17.4s, v0.s[3] \n" "fmla v27.4s, %17.4s, v1.s[0] \n" "fmla v24.4s, %18.4s, v0.s[2] \n" "fmla v25.4s, %18.4s, v0.s[3] \n" "fmla v26.4s, %18.4s, v1.s[0] \n" "fmla v27.4s, %18.4s, v1.s[1] \n" "shrn v24.4h, v24.4s, #16 \n" "shrn v25.4h, v25.4s, #16 \n" "shrn v26.4h, v26.4s, #16 \n" "shrn v27.4h, v27.4s, #16 \n" "st1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%0], #32 \n" : "=r"(outptr0_bf16), // %0 "=r"(outptr0), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2) // %4 : "0"(outptr0_bf16), "1"(outptr0), "2"(r0), "3"(r1), "4"(r2), "w"(_k00), // %10 "w"(_k01), // %11 "w"(_k02), // %12 "w"(_k10), // %13 "w"(_k11), // %14 "w"(_k12), // %15 "w"(_k20), // %16 "w"(_k21), // %17 "w"(_k22) // %18 : "memory", "v0", "v1", "v2", "v3", "v24", "v25", "v26", "v27"); #else // __aarch64__ asm volatile( "pld [%1, #512] \n" "vldm %1!, {d24-d31} \n" "pld [%2, #64] \n" "vld1.u16 {d1}, [%2]! \n" "vld1.u32 {d2[0]}, [%2] \n" "vshll.u16 q0, d1, #16 \n" "vshll.u16 q1, d2, #16 \n" "vmla.f32 q12, %q10, d0[0] \n" "vmla.f32 q13, %q10, d0[1] \n" "vmla.f32 q14, %q10, d1[0] \n" "vmla.f32 q15, %q10, d1[1] \n" "vmla.f32 q12, %q11, d0[1] \n" "vmla.f32 q13, %q11, d1[0] \n" "vmla.f32 q14, %q11, d1[1] \n" "vmla.f32 q15, %q11, d2[0] \n" "vmla.f32 q12, %q12, d1[0] \n" "vmla.f32 q13, %q12, d1[1] \n" "vmla.f32 q14, %q12, d2[0] \n" "vmla.f32 q15, %q12, d2[1] \n" "pld [%3, #64] \n" "vld1.u16 {d5}, [%3]! \n" "vld1.u32 {d3[0]}, [%3] \n" "vshll.u16 q2, d5, #16 \n" "vshll.u16 q1, d3, #16 \n" "vmla.f32 q12, %q13, d4[0] \n" "vmla.f32 q13, %q13, d4[1] \n" "vmla.f32 q14, %q13, d5[0] \n" "vmla.f32 q15, %q13, d5[1] \n" "vmla.f32 q12, %q14, d4[1] \n" "vmla.f32 q13, %q14, d5[0] \n" "vmla.f32 q14, %q14, d5[1] \n" "vmla.f32 q15, %q14, d2[0] \n" "vmla.f32 q12, %q15, d5[0] \n" "vmla.f32 q13, %q15, d5[1] \n" "vmla.f32 q14, %q15, d2[0] \n" "vmla.f32 q15, %q15, d2[1] \n" "pld [%4, #64] \n" "vld1.u16 {d1}, [%4]! \n" "vld1.u32 {d2[0]}, [%4] \n" "vshll.u16 q0, d1, #16 \n" "vshll.u16 q1, d2, #16 \n" "vmla.f32 q12, %q16, d0[0] \n" "vmla.f32 q13, %q16, d0[1] \n" "vmla.f32 q14, %q16, d1[0] \n" "vmla.f32 q15, %q16, d1[1] \n" "vmla.f32 q12, %q17, d0[1] \n" "vmla.f32 q13, %q17, d1[0] \n" "vmla.f32 q14, %q17, d1[1] \n" "vmla.f32 q15, %q17, d2[0] \n" "vmla.f32 q12, %q18, d1[0] \n" "vmla.f32 q13, %q18, d1[1] \n" "vmla.f32 q14, %q18, d2[0] \n" "vmla.f32 q15, %q18, d2[1] \n" "vshrn.s32 d24, q12, #16 \n" "vshrn.s32 d25, q13, #16 \n" "vshrn.s32 d26, q14, #16 \n" "vshrn.s32 d27, q15, #16 \n" "vst1.u16 {d24-d27}, [%0 :64]! \n" : "=r"(outptr0_bf16), // %0 "=r"(outptr0), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2) // %4 : "0"(outptr0_bf16), "1"(outptr0), "2"(r0), "3"(r1), "4"(r2), "w"(_k00), // %10 "w"(_k01), // %11 "w"(_k02), // %12 "w"(_k10), // %13 "w"(_k11), // %14 "w"(_k12), // %15 "w"(_k20), // %16 "w"(_k21), // %17 "w"(_k22) // %18 : "memory", "q0", "q1", "q2", "q12", "q13", "q14", "q15"); #endif // __aarch64__ } for (; j + 1 < outw; j += 2) { #if __aarch64__ asm volatile( "prfm pldl1keep, [%2, #64] \n" "ld1 {v0.4h}, [%2] \n" "prfm pldl1keep, [%1, #256] \n" "ld1 {v28.4s, v29.4s}, [%1], #32 \n" "shll v0.4s, v0.4h, #16 \n" "fmul v24.4s, %10.4s, v0.s[0] \n" "fmul v25.4s, %10.4s, v0.s[1] \n" "prfm pldl1keep, [%3, #64] \n" "ld1 {v1.4h}, [%3] \n" "fmul v26.4s, %11.4s, v0.s[1] \n" "fmul v27.4s, %11.4s, v0.s[2] \n" "shll v1.4s, v1.4h, #16 \n" "fmla v28.4s, %12.4s, v0.s[2] \n" "fmla v29.4s, %12.4s, v0.s[3] \n" "fmla v24.4s, %13.4s, v1.s[0] \n" "fmla v25.4s, %13.4s, v1.s[1] \n" "prfm pldl1keep, [%4, #64] \n" "ld1 {v0.4h}, [%4] \n" "fmla v26.4s, %14.4s, v1.s[1] \n" "fmla v27.4s, %14.4s, v1.s[2] \n" "shll v0.4s, v0.4h, #16 \n" "fmla v28.4s, %15.4s, v1.s[2] \n" "fmla v29.4s, %15.4s, v1.s[3] \n" "fmla v24.4s, %16.4s, v0.s[0] \n" "fmla v25.4s, %16.4s, v0.s[1] \n" "fmla v26.4s, %17.4s, v0.s[1] \n" "fmla v27.4s, %17.4s, v0.s[2] \n" "fmla v28.4s, %18.4s, v0.s[2] \n" "fmla v29.4s, %18.4s, v0.s[3] \n" "add %2, %2, #4 \n" "fadd v24.4s, v24.4s, v26.4s \n" "fadd v25.4s, v25.4s, v27.4s \n" "add %3, %3, #4 \n" "fadd v28.4s, v28.4s, v24.4s \n" "fadd v29.4s, v29.4s, v25.4s \n" "add %4, %4, #4 \n" "shrn v28.4h, v28.4s, #16 \n" "shrn v29.4h, v29.4s, #16 \n" "st1 {v28.4h, v29.4h}, [%0], #16 \n" : "=r"(outptr0_bf16), // %0 "=r"(outptr0), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2) // %4 : "0"(outptr0_bf16), "1"(outptr0), "2"(r0), "3"(r1), "4"(r2), "w"(_k00), // %10 "w"(_k01), // %11 "w"(_k02), // %12 "w"(_k10), // %13 "w"(_k11), // %14 "w"(_k12), // %15 "w"(_k20), // %16 "w"(_k21), // %17 "w"(_k22) // %18 : "memory", "v0", "v1", "v24", "v25", "v26", "v27", "v28", "v29"); #else // __aarch64__ asm volatile( "pld [%2, #64] \n" "vld1.u16 {d1}, [%2] \n" "pld [%1, #256] \n" "vld1.f32 {d24-d27}, [%1 :128]! \n" "vshll.u16 q0, d1, #16 \n" "vmul.f32 q14, %q10, d0[0] \n" "vmul.f32 q15, %q10, d0[1] \n" "vmla.f32 q12, %q11, d0[1] \n" "vmla.f32 q13, %q11, d1[0] \n" "pld [%3, #64] \n" "vld1.u16 {d3}, [%3] \n" "vmla.f32 q14, %q12, d1[0] \n" "vmla.f32 q15, %q12, d1[1] \n" "vshll.u16 q1, d3, #16 \n" "vmla.f32 q12, %q13, d2[0] \n" "vmla.f32 q13, %q13, d2[1] \n" "vmla.f32 q14, %q14, d2[1] \n" "vmla.f32 q15, %q14, d3[0] \n" "pld [%4, #64] \n" "vld1.u16 {d1}, [%4] \n" "vmla.f32 q12, %q15, d3[0] \n" "vmla.f32 q13, %q15, d3[1] \n" "vshll.u16 q0, d1, #16 \n" "vmla.f32 q14, %q16, d0[0] \n" "vmla.f32 q15, %q16, d0[1] \n" "vmla.f32 q12, %q17, d0[1] \n" "vmla.f32 q13, %q17, d1[0] \n" "add %2, %2, #4 \n" "vmla.f32 q14, %q18, d1[0] \n" "vmla.f32 q15, %q18, d1[1] \n" "add %3, %3, #4 \n" "vadd.f32 q12, q12, q14 \n" "vadd.f32 q13, q13, q15 \n" "add %4, %4, #4 \n" "vshrn.s32 d24, q12, #16 \n" "vshrn.s32 d25, q13, #16 \n" "vst1.f32 {d24-d25}, [%0 :64]! \n" : "=r"(outptr0_bf16), // %0 "=r"(outptr0), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2) // %4 : "0"(outptr0_bf16), "1"(outptr0), "2"(r0), "3"(r1), "4"(r2), "w"(_k00), // %10 "w"(_k01), // %11 "w"(_k02), // %12 "w"(_k10), // %13 "w"(_k11), // %14 "w"(_k12), // %15 "w"(_k20), // %16 "w"(_k21), // %17 "w"(_k22) // %18 : "memory", "q0", "q1", "q12", "q13", "q14", "q15"); #endif // __aarch64__ } for (; j < outw; j++) { float32x4_t _sum0 = vld1q_f32(outptr0); float32x4_t _r0 = vcvt_f32_bf16(vld1_u16(r0)); float32x4_t _r1 = vcvt_f32_bf16(vld1_u16(r1)); float32x4_t _r2 = vcvt_f32_bf16(vld1_u16(r2)); #if __aarch64__ _sum0 = vfmaq_laneq_f32(_sum0, _k00, _r0, 0); _sum0 = vfmaq_laneq_f32(_sum0, _k01, _r0, 1); _sum0 = vfmaq_laneq_f32(_sum0, _k02, _r0, 2); _sum0 = vfmaq_laneq_f32(_sum0, _k10, _r1, 0); _sum0 = vfmaq_laneq_f32(_sum0, _k11, _r1, 1); _sum0 = vfmaq_laneq_f32(_sum0, _k12, _r1, 2); _sum0 = vfmaq_laneq_f32(_sum0, _k20, _r2, 0); _sum0 = vfmaq_laneq_f32(_sum0, _k21, _r2, 1); _sum0 = vfmaq_laneq_f32(_sum0, _k22, _r2, 2); #else _sum0 = vmlaq_lane_f32(_sum0, _k00, vget_low_f32(_r0), 0); _sum0 = vmlaq_lane_f32(_sum0, _k01, vget_low_f32(_r0), 1); _sum0 = vmlaq_lane_f32(_sum0, _k02, vget_high_f32(_r0), 0); _sum0 = vmlaq_lane_f32(_sum0, _k10, vget_low_f32(_r1), 0); _sum0 = vmlaq_lane_f32(_sum0, _k11, vget_low_f32(_r1), 1); _sum0 = vmlaq_lane_f32(_sum0, _k12, vget_high_f32(_r1), 0); _sum0 = vmlaq_lane_f32(_sum0, _k20, vget_low_f32(_r2), 0); _sum0 = vmlaq_lane_f32(_sum0, _k21, vget_low_f32(_r2), 1); _sum0 = vmlaq_lane_f32(_sum0, _k22, vget_high_f32(_r2), 0); #endif vst1_u16(outptr0_bf16, vcvt_bf16_f32(_sum0)); r0 += 1; r1 += 1; r2 += 1; outptr0 += 4; outptr0_bf16 += 4; } r0 += 2; r1 += 2; r2 += 2; } k0 += 9 * 4; } } } static void conv3x3s2_pack1to4_bf16s_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt) { int w = bottom_blob.w; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; #if __ARM_NEON && __aarch64__ Mat top_blob_fp32(outw, outh, opt.num_threads, (size_t)4u * 4 * 2, 4 * 2, opt.workspace_allocator); #else Mat top_blob_fp32(outw, outh, opt.num_threads, (size_t)4u * 4, 4, opt.workspace_allocator); #endif const int tailstep = w - 2 * outw + w; const float* bias = _bias; int remain_outch_start = 0; #if __ARM_NEON && __aarch64__ int nn_outch = 0; nn_outch = outch >> 1; remain_outch_start = nn_outch << 1; #pragma omp parallel for num_threads(opt.num_threads) for (int pp = 0; pp < nn_outch; pp++) { int p = pp * 2; Mat out0 = top_blob_fp32.channel(get_omp_thread_num()); float32x4_t _bias0 = bias ? vld1q_f32((const float*)bias + p * 4) : vdupq_n_f32(0.f); float32x4_t _bias1 = bias ? vld1q_f32((const float*)bias + (p + 1) * 4) : vdupq_n_f32(0.f); { float* ptr = (float*)out0; for (int i = 0; i < outh; i++) { int j = 0; for (; j + 3 < outw; j += 4) { vst1q_f32(ptr, _bias0); vst1q_f32(ptr + 4, _bias0); vst1q_f32(ptr + 8, _bias0); vst1q_f32(ptr + 12, _bias0); vst1q_f32(ptr + 16, _bias1); vst1q_f32(ptr + 20, _bias1); vst1q_f32(ptr + 24, _bias1); vst1q_f32(ptr + 28, _bias1); ptr += 32; } for (; j + 1 < outw; j += 2) { vst1q_f32(ptr, _bias0); vst1q_f32(ptr + 4, _bias0); vst1q_f32(ptr + 8, _bias1); vst1q_f32(ptr + 12, _bias1); ptr += 16; } for (; j < outw; j++) { vst1q_f32(ptr, _bias0); vst1q_f32(ptr + 4, _bias1); ptr += 8; } } } const unsigned short* k0 = kernel.channel(p); const unsigned short* k1 = kernel.channel(p + 1); int q = 0; for (; q < inch - 1; q++) { float* outptr0 = out0; const Mat img0 = bottom_blob.channel(q); const unsigned short* r0 = img0.row<const unsigned short>(0); const unsigned short* r1 = img0.row<const unsigned short>(1); const unsigned short* r2 = img0.row<const unsigned short>(2); float32x4_t _k00_0 = vcvt_f32_bf16(vld1_u16(k0)); float32x4_t _k01_0 = vcvt_f32_bf16(vld1_u16(k0 + 4)); float32x4_t _k02_0 = vcvt_f32_bf16(vld1_u16(k0 + 8)); float32x4_t _k10_0 = vcvt_f32_bf16(vld1_u16(k0 + 12)); float32x4_t _k11_0 = vcvt_f32_bf16(vld1_u16(k0 + 16)); float32x4_t _k12_0 = vcvt_f32_bf16(vld1_u16(k0 + 20)); float32x4_t _k20_0 = vcvt_f32_bf16(vld1_u16(k0 + 24)); float32x4_t _k21_0 = vcvt_f32_bf16(vld1_u16(k0 + 28)); float32x4_t _k22_0 = vcvt_f32_bf16(vld1_u16(k0 + 32)); float32x4_t _k00_1 = vcvt_f32_bf16(vld1_u16(k1)); float32x4_t _k01_1 = vcvt_f32_bf16(vld1_u16(k1 + 4)); float32x4_t _k02_1 = vcvt_f32_bf16(vld1_u16(k1 + 8)); float32x4_t _k10_1 = vcvt_f32_bf16(vld1_u16(k1 + 12)); float32x4_t _k11_1 = vcvt_f32_bf16(vld1_u16(k1 + 16)); float32x4_t _k12_1 = vcvt_f32_bf16(vld1_u16(k1 + 20)); float32x4_t _k20_1 = vcvt_f32_bf16(vld1_u16(k1 + 24)); float32x4_t _k21_1 = vcvt_f32_bf16(vld1_u16(k1 + 28)); float32x4_t _k22_1 = vcvt_f32_bf16(vld1_u16(k1 + 32)); int i = 0; for (; i < outh; i++) { int j = 0; for (; j + 3 < outw; j += 4) { asm volatile( // r0 "prfm pldl1keep, [%1, #128] \n" "ld1 {v0.4h, v1.4h}, [%1], #16 \n" "prfm pldl1keep, [%0, #512] \n" "ld1 {v6.4s, v7.4s, v8.4s, v9.4s}, [%0], #64 \n" // sum0 "shll v0.4s, v0.4h, #16 \n" // "prfm pldl1keep, [%0, #512] \n" "ld1 {v10.4s, v11.4s, v12.4s, v13.4s}, [%0] \n" // sum1 "shll v1.4s, v1.4h, #16 \n" "fmla v6.4s, %8.4s, v0.s[0] \n" "fmla v7.4s, %8.4s, v0.s[2] \n" "fmla v8.4s, %8.4s, v1.s[0] \n" "fmla v9.4s, %8.4s, v1.s[2] \n" "fmla v10.4s, %17.4s, v0.s[0] \n" "fmla v11.4s, %17.4s, v0.s[2] \n" "fmla v12.4s, %17.4s, v1.s[0] \n" "fmla v13.4s, %17.4s, v1.s[2] \n" "ld1 {v4.h}[0], [%1] \n" "fmla v6.4s, %9.4s, v0.s[1] \n" "fmla v7.4s, %9.4s, v0.s[3] \n" "fmla v8.4s, %9.4s, v1.s[1] \n" "fmla v9.4s, %9.4s, v1.s[3] \n" "fmla v10.4s, %18.4s, v0.s[1] \n" "fmla v11.4s, %18.4s, v0.s[3] \n" "fmla v12.4s, %18.4s, v1.s[1] \n" "fmla v13.4s, %18.4s, v1.s[3] \n" "shll v4.4s, v4.4h, #16 \n" // r1 "prfm pldl1keep, [%2, #128] \n" "ld1 {v2.4h, v3.4h}, [%2], #16 \n" "fmla v6.4s, %10.4s, v0.s[2] \n" "fmla v7.4s, %10.4s, v1.s[0] \n" "fmla v8.4s, %10.4s, v1.s[2] \n" "fmla v9.4s, %10.4s, v4.s[0] \n" "shll v2.4s, v2.4h, #16 \n" "fmla v10.4s, %19.4s, v0.s[2] \n" "fmla v11.4s, %19.4s, v1.s[0] \n" "fmla v12.4s, %19.4s, v1.s[2] \n" "fmla v13.4s, %19.4s, v4.s[0] \n" "shll v3.4s, v3.4h, #16 \n" "fmla v6.4s, %11.4s, v2.s[0] \n" "fmla v7.4s, %11.4s, v2.s[2] \n" "fmla v8.4s, %11.4s, v3.s[0] \n" "fmla v9.4s, %11.4s, v3.s[2] \n" "fmla v10.4s, %20.4s, v2.s[0] \n" "fmla v11.4s, %20.4s, v2.s[2] \n" "fmla v12.4s, %20.4s, v3.s[0] \n" "fmla v13.4s, %20.4s, v3.s[2] \n" "ld1 {v5.h}[0], [%2] \n" "fmla v6.4s, %12.4s, v2.s[1] \n" "fmla v7.4s, %12.4s, v2.s[3] \n" "fmla v8.4s, %12.4s, v3.s[1] \n" "fmla v9.4s, %12.4s, v3.s[3] \n" "shll v5.4s, v5.4h, #16 \n" "fmla v10.4s, %21.4s, v2.s[1] \n" "fmla v11.4s, %21.4s, v2.s[3] \n" "fmla v12.4s, %21.4s, v3.s[1] \n" "fmla v13.4s, %21.4s, v3.s[3] \n" // r2 "prfm pldl1keep, [%3, #128] \n" "ld1 {v0.4h, v1.4h}, [%3], #16 \n" "fmla v6.4s, %13.4s, v2.s[2] \n" "fmla v7.4s, %13.4s, v3.s[0] \n" "fmla v8.4s, %13.4s, v3.s[2] \n" "fmla v9.4s, %13.4s, v5.s[0] \n" "shll v0.4s, v0.4h, #16 \n" "fmla v10.4s, %22.4s, v2.s[2] \n" "fmla v11.4s, %22.4s, v3.s[0] \n" "fmla v12.4s, %22.4s, v3.s[2] \n" "fmla v13.4s, %22.4s, v5.s[0] \n" "shll v1.4s, v1.4h, #16 \n" "fmla v6.4s, %14.4s, v0.s[0] \n" "fmla v7.4s, %14.4s, v0.s[2] \n" "fmla v8.4s, %14.4s, v1.s[0] \n" "fmla v9.4s, %14.4s, v1.s[2] \n" "fmla v10.4s, %23.4s, v0.s[0] \n" "fmla v11.4s, %23.4s, v0.s[2] \n" "fmla v12.4s, %23.4s, v1.s[0] \n" "fmla v13.4s, %23.4s, v1.s[2] \n" "ld1 {v4.h}[0], [%3] \n" "fmla v6.4s, %15.4s, v0.s[1] \n" "fmla v7.4s, %15.4s, v0.s[3] \n" "fmla v8.4s, %15.4s, v1.s[1] \n" "fmla v9.4s, %15.4s, v1.s[3] \n" "fmla v10.4s, %24.4s, v0.s[1] \n" "fmla v11.4s, %24.4s, v0.s[3] \n" "fmla v12.4s, %24.4s, v1.s[1] \n" "fmla v13.4s, %24.4s, v1.s[3] \n" "shll v4.4s, v4.4h, #16 \n" "fmla v6.4s, %16.4s, v0.s[2] \n" "fmla v7.4s, %16.4s, v1.s[0] \n" "fmla v8.4s, %16.4s, v1.s[2] \n" "fmla v9.4s, %16.4s, v4.s[0] \n" "sub %0, %0, #64 \n" "fmla v10.4s, %25.4s, v0.s[2] \n" "fmla v11.4s, %25.4s, v1.s[0] \n" "fmla v12.4s, %25.4s, v1.s[2] \n" "fmla v13.4s, %25.4s, v4.s[0] \n" "st1 {v6.4s, v7.4s, v8.4s, v9.4s}, [%0], #64 \n" "st1 {v10.4s, v11.4s, v12.4s, v13.4s}, [%0], #64 \n" : "=r"(outptr0), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2) // %3 : "0"(outptr0), "1"(r0), "2"(r1), "3"(r2), "w"(_k00_0), // %8 "w"(_k01_0), // %9 "w"(_k02_0), // %10 "w"(_k10_0), // %11 "w"(_k11_0), // %12 "w"(_k12_0), // %13 "w"(_k20_0), // %14 "w"(_k21_0), // %15 "w"(_k22_0), // %16 "w"(_k00_1), // %17 "w"(_k01_1), // %18 "w"(_k02_1), // %19 "w"(_k10_1), // %20 "w"(_k11_1), // %21 "w"(_k12_1), // %22 "w"(_k20_1), // %23 "w"(_k21_1), // %24 "w"(_k22_1) // %25 : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13"); } for (; j + 1 < outw; j += 2) { asm volatile( // r0 "prfm pldl1keep, [%1, #64] \n" "ld1 {v0.4h}, [%1], #8 \n" "prfm pldl1keep, [%0, #512] \n" "ld1 {v10.4s, v11.4s, v12.4s, v13.4s}, [%0] \n" // sum0 sum1 "shll v0.4s, v0.4h, #16 \n" "fmla v10.4s, %8.4s, v0.s[0] \n" "fmla v11.4s, %8.4s, v0.s[2] \n" "fmla v12.4s, %17.4s, v0.s[0] \n" "fmla v13.4s, %17.4s, v0.s[2] \n" "ld1 {v1.h}[0], [%1] \n" "fmla v10.4s, %9.4s, v0.s[1] \n" "fmla v11.4s, %9.4s, v0.s[3] \n" "fmla v12.4s, %18.4s, v0.s[1] \n" "fmla v13.4s, %18.4s, v0.s[3] \n" "shll v1.4s, v1.4h, #16 \n" // r1 "prfm pldl1keep, [%2, #64] \n" "ld1 {v2.4h}, [%2], #8 \n" "fmla v10.4s, %10.4s, v0.s[2] \n" "fmla v11.4s, %10.4s, v1.s[0] \n" "fmla v12.4s, %19.4s, v0.s[2] \n" "fmla v13.4s, %19.4s, v1.s[0] \n" "shll v2.4s, v2.4h, #16 \n" "fmla v10.4s, %11.4s, v2.s[0] \n" "fmla v11.4s, %11.4s, v2.s[2] \n" "fmla v12.4s, %20.4s, v2.s[0] \n" "fmla v13.4s, %20.4s, v2.s[2] \n" "ld1 {v3.h}[0], [%2] \n" "fmla v10.4s, %12.4s, v2.s[1] \n" "fmla v11.4s, %12.4s, v2.s[3] \n" "fmla v12.4s, %21.4s, v2.s[1] \n" "fmla v13.4s, %21.4s, v2.s[3] \n" "shll v3.4s, v3.4h, #16 \n" // r2 "prfm pldl1keep, [%3, #64] \n" "ld1 {v0.4h}, [%3], #8 \n" "fmla v10.4s, %13.4s, v2.s[2] \n" "fmla v11.4s, %13.4s, v3.s[0] \n" "fmla v12.4s, %22.4s, v2.s[2] \n" "fmla v13.4s, %22.4s, v3.s[0] \n" "shll v0.4s, v0.4h, #16 \n" "fmla v10.4s, %14.4s, v0.s[0] \n" "fmla v11.4s, %14.4s, v0.s[2] \n" "fmla v12.4s, %23.4s, v0.s[0] \n" "fmla v13.4s, %23.4s, v0.s[2] \n" "ld1 {v1.h}[0], [%3] \n" "fmla v10.4s, %15.4s, v0.s[1] \n" "fmla v11.4s, %15.4s, v0.s[3] \n" "fmla v12.4s, %24.4s, v0.s[1] \n" "fmla v13.4s, %24.4s, v0.s[3] \n" "shll v1.4s, v1.4h, #16 \n" "fmla v10.4s, %16.4s, v0.s[2] \n" "fmla v11.4s, %16.4s, v1.s[0] \n" "fmla v12.4s, %25.4s, v0.s[2] \n" "fmla v13.4s, %25.4s, v1.s[0] \n" "st1 {v10.4s, v11.4s, v12.4s, v13.4s}, [%0], #64 \n" : "=r"(outptr0), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2) // %3 : "0"(outptr0), "1"(r0), "2"(r1), "3"(r2), "w"(_k00_0), // %8 "w"(_k01_0), // %9 "w"(_k02_0), // %10 "w"(_k10_0), // %11 "w"(_k11_0), // %12 "w"(_k12_0), // %13 "w"(_k20_0), // %14 "w"(_k21_0), // %15 "w"(_k22_0), // %16 "w"(_k00_1), // %17 "w"(_k01_1), // %18 "w"(_k02_1), // %19 "w"(_k10_1), // %20 "w"(_k11_1), // %21 "w"(_k12_1), // %22 "w"(_k20_1), // %23 "w"(_k21_1), // %24 "w"(_k22_1) // %25 : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13"); } for (; j < outw; j++) { float32x4_t _sum0 = vld1q_f32(outptr0); float32x4_t _sum1 = vld1q_f32(outptr0 + 4); float32x4_t _r0 = vcvt_f32_bf16(vld1_u16(r0)); float32x4_t _r1 = vcvt_f32_bf16(vld1_u16(r1)); float32x4_t _r2 = vcvt_f32_bf16(vld1_u16(r2)); _sum0 = vfmaq_laneq_f32(_sum0, _k00_0, _r0, 0); _sum0 = vfmaq_laneq_f32(_sum0, _k01_0, _r0, 1); _sum0 = vfmaq_laneq_f32(_sum0, _k02_0, _r0, 2); _sum0 = vfmaq_laneq_f32(_sum0, _k10_0, _r1, 0); _sum0 = vfmaq_laneq_f32(_sum0, _k11_0, _r1, 1); _sum0 = vfmaq_laneq_f32(_sum0, _k12_0, _r1, 2); _sum0 = vfmaq_laneq_f32(_sum0, _k20_0, _r2, 0); _sum0 = vfmaq_laneq_f32(_sum0, _k21_0, _r2, 1); _sum0 = vfmaq_laneq_f32(_sum0, _k22_0, _r2, 2); _sum1 = vfmaq_laneq_f32(_sum1, _k00_1, _r0, 0); _sum1 = vfmaq_laneq_f32(_sum1, _k01_1, _r0, 1); _sum1 = vfmaq_laneq_f32(_sum1, _k02_1, _r0, 2); _sum1 = vfmaq_laneq_f32(_sum1, _k10_1, _r1, 0); _sum1 = vfmaq_laneq_f32(_sum1, _k11_1, _r1, 1); _sum1 = vfmaq_laneq_f32(_sum1, _k12_1, _r1, 2); _sum1 = vfmaq_laneq_f32(_sum1, _k20_1, _r2, 0); _sum1 = vfmaq_laneq_f32(_sum1, _k21_1, _r2, 1); _sum1 = vfmaq_laneq_f32(_sum1, _k22_1, _r2, 2); vst1q_f32(outptr0, _sum0); vst1q_f32(outptr0 + 4, _sum1); r0 += 2; r1 += 2; r2 += 2; outptr0 += 8; } r0 += tailstep; r1 += tailstep; r2 += tailstep; } k0 += 9 * 4; k1 += 9 * 4; } for (; q < inch; q++) { unsigned short* outptr0_bf16 = top_blob.channel(p); unsigned short* outptr1_bf16 = top_blob.channel(p + 1); const float* outptr0 = out0; const Mat img0 = bottom_blob.channel(q); const unsigned short* r0 = img0.row<const unsigned short>(0); const unsigned short* r1 = img0.row<const unsigned short>(1); const unsigned short* r2 = img0.row<const unsigned short>(2); float32x4_t _k00_0 = vcvt_f32_bf16(vld1_u16(k0)); float32x4_t _k01_0 = vcvt_f32_bf16(vld1_u16(k0 + 4)); float32x4_t _k02_0 = vcvt_f32_bf16(vld1_u16(k0 + 8)); float32x4_t _k10_0 = vcvt_f32_bf16(vld1_u16(k0 + 12)); float32x4_t _k11_0 = vcvt_f32_bf16(vld1_u16(k0 + 16)); float32x4_t _k12_0 = vcvt_f32_bf16(vld1_u16(k0 + 20)); float32x4_t _k20_0 = vcvt_f32_bf16(vld1_u16(k0 + 24)); float32x4_t _k21_0 = vcvt_f32_bf16(vld1_u16(k0 + 28)); float32x4_t _k22_0 = vcvt_f32_bf16(vld1_u16(k0 + 32)); float32x4_t _k00_1 = vcvt_f32_bf16(vld1_u16(k1)); float32x4_t _k01_1 = vcvt_f32_bf16(vld1_u16(k1 + 4)); float32x4_t _k02_1 = vcvt_f32_bf16(vld1_u16(k1 + 8)); float32x4_t _k10_1 = vcvt_f32_bf16(vld1_u16(k1 + 12)); float32x4_t _k11_1 = vcvt_f32_bf16(vld1_u16(k1 + 16)); float32x4_t _k12_1 = vcvt_f32_bf16(vld1_u16(k1 + 20)); float32x4_t _k20_1 = vcvt_f32_bf16(vld1_u16(k1 + 24)); float32x4_t _k21_1 = vcvt_f32_bf16(vld1_u16(k1 + 28)); float32x4_t _k22_1 = vcvt_f32_bf16(vld1_u16(k1 + 32)); int i = 0; for (; i < outh; i++) { int j = 0; for (; j + 3 < outw; j += 4) { asm volatile( // r0 "prfm pldl1keep, [%3, #128] \n" "ld1 {v0.4h, v1.4h}, [%3], #16 \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v6.4s, v7.4s, v8.4s, v9.4s}, [%2], #64 \n" // sum0 "shll v0.4s, v0.4h, #16 \n" "shll v1.4s, v1.4h, #16 \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v10.4s, v11.4s, v12.4s, v13.4s}, [%2], #64 \n" // sum1 "fmla v6.4s, %12.4s, v0.s[0] \n" "fmla v7.4s, %12.4s, v0.s[2] \n" "fmla v8.4s, %12.4s, v1.s[0] \n" "fmla v9.4s, %12.4s, v1.s[2] \n" "fmla v10.4s, %21.4s, v0.s[0] \n" "fmla v11.4s, %21.4s, v0.s[2] \n" "fmla v12.4s, %21.4s, v1.s[0] \n" "fmla v13.4s, %21.4s, v1.s[2] \n" "ld1 {v4.h}[0], [%3] \n" "fmla v6.4s, %13.4s, v0.s[1] \n" "fmla v7.4s, %13.4s, v0.s[3] \n" "fmla v8.4s, %13.4s, v1.s[1] \n" "fmla v9.4s, %13.4s, v1.s[3] \n" "shll v4.4s, v4.4h, #16 \n" "fmla v10.4s, %22.4s, v0.s[1] \n" "fmla v11.4s, %22.4s, v0.s[3] \n" "fmla v12.4s, %22.4s, v1.s[1] \n" "fmla v13.4s, %22.4s, v1.s[3] \n" // r1 "prfm pldl1keep, [%4, #128] \n" "ld1 {v2.4h, v3.4h}, [%4], #16 \n" "fmla v6.4s, %14.4s, v0.s[2] \n" "fmla v7.4s, %14.4s, v1.s[0] \n" "fmla v8.4s, %14.4s, v1.s[2] \n" "fmla v9.4s, %14.4s, v4.s[0] \n" "shll v2.4s, v2.4h, #16 \n" "fmla v10.4s, %23.4s, v0.s[2] \n" "fmla v11.4s, %23.4s, v1.s[0] \n" "fmla v12.4s, %23.4s, v1.s[2] \n" "fmla v13.4s, %23.4s, v4.s[0] \n" "shll v3.4s, v3.4h, #16 \n" "fmla v6.4s, %15.4s, v2.s[0] \n" "fmla v7.4s, %15.4s, v2.s[2] \n" "fmla v8.4s, %15.4s, v3.s[0] \n" "fmla v9.4s, %15.4s, v3.s[2] \n" "fmla v10.4s, %24.4s, v2.s[0] \n" "fmla v11.4s, %24.4s, v2.s[2] \n" "fmla v12.4s, %24.4s, v3.s[0] \n" "fmla v13.4s, %24.4s, v3.s[2] \n" "ld1 {v5.h}[0], [%4] \n" "fmla v6.4s, %16.4s, v2.s[1] \n" "fmla v7.4s, %16.4s, v2.s[3] \n" "fmla v8.4s, %16.4s, v3.s[1] \n" "fmla v9.4s, %16.4s, v3.s[3] \n" "shll v5.4s, v5.4h, #16 \n" "fmla v10.4s, %25.4s, v2.s[1] \n" "fmla v11.4s, %25.4s, v2.s[3] \n" "fmla v12.4s, %25.4s, v3.s[1] \n" "fmla v13.4s, %25.4s, v3.s[3] \n" // r2 "prfm pldl1keep, [%5, #128] \n" "ld1 {v0.4h, v1.4h}, [%5], #16 \n" "fmla v6.4s, %17.4s, v2.s[2] \n" "fmla v7.4s, %17.4s, v3.s[0] \n" "fmla v8.4s, %17.4s, v3.s[2] \n" "fmla v9.4s, %17.4s, v5.s[0] \n" "shll v0.4s, v0.4h, #16 \n" "fmla v10.4s, %26.4s, v2.s[2] \n" "fmla v11.4s, %26.4s, v3.s[0] \n" "fmla v12.4s, %26.4s, v3.s[2] \n" "fmla v13.4s, %26.4s, v5.s[0] \n" "shll v1.4s, v1.4h, #16 \n" "fmla v6.4s, %18.4s, v0.s[0] \n" "fmla v7.4s, %18.4s, v0.s[2] \n" "fmla v8.4s, %18.4s, v1.s[0] \n" "fmla v9.4s, %18.4s, v1.s[2] \n" "fmla v10.4s, %27.4s, v0.s[0] \n" "fmla v11.4s, %27.4s, v0.s[2] \n" "fmla v12.4s, %27.4s, v1.s[0] \n" "fmla v13.4s, %27.4s, v1.s[2] \n" "ld1 {v4.h}[0], [%5] \n" "fmla v6.4s, %19.4s, v0.s[1] \n" "fmla v7.4s, %19.4s, v0.s[3] \n" "fmla v8.4s, %19.4s, v1.s[1] \n" "fmla v9.4s, %19.4s, v1.s[3] \n" "fmla v10.4s, %28.4s, v0.s[1] \n" "fmla v11.4s, %28.4s, v0.s[3] \n" "fmla v12.4s, %28.4s, v1.s[1] \n" "fmla v13.4s, %28.4s, v1.s[3] \n" "shll v4.4s, v4.4h, #16 \n" "fmla v6.4s, %20.4s, v0.s[2] \n" "fmla v7.4s, %20.4s, v1.s[0] \n" "fmla v8.4s, %20.4s, v1.s[2] \n" "fmla v9.4s, %20.4s, v4.s[0] \n" "fmla v10.4s, %29.4s, v0.s[2] \n" "fmla v11.4s, %29.4s, v1.s[0] \n" "fmla v12.4s, %29.4s, v1.s[2] \n" "fmla v13.4s, %29.4s, v4.s[0] \n" "shrn v6.4h, v6.4s, #16 \n" "shrn v7.4h, v7.4s, #16 \n" "shrn v8.4h, v8.4s, #16 \n" "shrn v9.4h, v9.4s, #16 \n" "shrn v10.4h, v10.4s, #16 \n" "shrn v11.4h, v11.4s, #16 \n" "st1 {v6.4h, v7.4h, v8.4h, v9.4h}, [%0], #32 \n" "shrn v12.4h, v12.4s, #16 \n" "shrn v13.4h, v13.4s, #16 \n" "st1 {v10.4h, v11.4h, v12.4h, v13.4h}, [%1], #32 \n" : "=r"(outptr0_bf16), // %0 "=r"(outptr1_bf16), // %1 "=r"(outptr0), // %2 "=r"(r0), // %3 "=r"(r1), // %4 "=r"(r2) // %5 : "0"(outptr0_bf16), "1"(outptr1_bf16), "2"(outptr0), "3"(r0), "4"(r1), "5"(r2), "w"(_k00_0), // %12 "w"(_k01_0), // %13 "w"(_k02_0), // %14 "w"(_k10_0), // %15 "w"(_k11_0), // %16 "w"(_k12_0), // %17 "w"(_k20_0), // %18 "w"(_k21_0), // %19 "w"(_k22_0), // %20 "w"(_k00_1), // %21 "w"(_k01_1), // %22 "w"(_k02_1), // %23 "w"(_k10_1), // %24 "w"(_k11_1), // %25 "w"(_k12_1), // %26 "w"(_k20_1), // %27 "w"(_k21_1), // %28 "w"(_k22_1) // %29 : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13"); } for (; j + 1 < outw; j += 2) { asm volatile( // r0 "prfm pldl1keep, [%3, #64] \n" "ld1 {v0.4h}, [%3], #8 \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v10.4s, v11.4s, v12.4s, v13.4s}, [%2], #64 \n" // sum0 sum1 "shll v0.4s, v0.4h, #16 \n" "fmla v10.4s, %12.4s, v0.s[0] \n" "fmla v11.4s, %12.4s, v0.s[2] \n" "fmla v12.4s, %21.4s, v0.s[0] \n" "fmla v13.4s, %21.4s, v0.s[2] \n" "ld1 {v1.h}[0], [%3] \n" "fmla v10.4s, %13.4s, v0.s[1] \n" "fmla v11.4s, %13.4s, v0.s[3] \n" "fmla v12.4s, %22.4s, v0.s[1] \n" "fmla v13.4s, %22.4s, v0.s[3] \n" "shll v1.4s, v1.4h, #16 \n" // r1 "prfm pldl1keep, [%4, #64] \n" "ld1 {v2.4h}, [%4], #8 \n" "fmla v10.4s, %14.4s, v0.s[2] \n" "fmla v11.4s, %14.4s, v1.s[0] \n" "fmla v12.4s, %23.4s, v0.s[2] \n" "fmla v13.4s, %23.4s, v1.s[0] \n" "shll v2.4s, v2.4h, #16 \n" "fmla v10.4s, %15.4s, v2.s[0] \n" "fmla v11.4s, %15.4s, v2.s[2] \n" "fmla v12.4s, %24.4s, v2.s[0] \n" "fmla v13.4s, %24.4s, v2.s[2] \n" "ld1 {v3.h}[0], [%4] \n" "fmla v10.4s, %16.4s, v2.s[1] \n" "fmla v11.4s, %16.4s, v2.s[3] \n" "fmla v12.4s, %25.4s, v2.s[1] \n" "fmla v13.4s, %25.4s, v2.s[3] \n" "shll v3.4s, v3.4h, #16 \n" // r2 "prfm pldl1keep, [%5, #64] \n" "ld1 {v0.4h}, [%5], #8 \n" "fmla v10.4s, %17.4s, v2.s[2] \n" "fmla v11.4s, %17.4s, v3.s[0] \n" "fmla v12.4s, %26.4s, v2.s[2] \n" "fmla v13.4s, %26.4s, v3.s[0] \n" "shll v0.4s, v0.4h, #16 \n" "fmla v10.4s, %18.4s, v0.s[0] \n" "fmla v11.4s, %18.4s, v0.s[2] \n" "fmla v12.4s, %27.4s, v0.s[0] \n" "fmla v13.4s, %27.4s, v0.s[2] \n" "ld1 {v1.h}[0], [%5] \n" "fmla v10.4s, %19.4s, v0.s[1] \n" "fmla v11.4s, %19.4s, v0.s[3] \n" "fmla v12.4s, %28.4s, v0.s[1] \n" "fmla v13.4s, %28.4s, v0.s[3] \n" "shll v1.4s, v1.4h, #16 \n" "fmla v10.4s, %20.4s, v0.s[2] \n" "fmla v11.4s, %20.4s, v1.s[0] \n" "fmla v12.4s, %29.4s, v0.s[2] \n" "fmla v13.4s, %29.4s, v1.s[0] \n" "shrn v10.4h, v10.4s, #16 \n" "shrn v11.4h, v11.4s, #16 \n" "shrn v12.4h, v12.4s, #16 \n" "shrn v13.4h, v13.4s, #16 \n" "st1 {v10.4h, v11.4h}, [%0], #16 \n" "st1 {v12.4h, v13.4h}, [%1], #16 \n" : "=r"(outptr0_bf16), // %0 "=r"(outptr1_bf16), // %1 "=r"(outptr0), // %2 "=r"(r0), // %3 "=r"(r1), // %4 "=r"(r2) // %5 : "0"(outptr0_bf16), "1"(outptr1_bf16), "2"(outptr0), "3"(r0), "4"(r1), "5"(r2), "w"(_k00_0), // %12 "w"(_k01_0), // %13 "w"(_k02_0), // %14 "w"(_k10_0), // %15 "w"(_k11_0), // %16 "w"(_k12_0), // %17 "w"(_k20_0), // %18 "w"(_k21_0), // %19 "w"(_k22_0), // %20 "w"(_k00_1), // %21 "w"(_k01_1), // %22 "w"(_k02_1), // %23 "w"(_k10_1), // %24 "w"(_k11_1), // %25 "w"(_k12_1), // %26 "w"(_k20_1), // %27 "w"(_k21_1), // %28 "w"(_k22_1) // %29 : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13"); } for (; j < outw; j++) { float32x4_t _sum0 = vld1q_f32(outptr0); float32x4_t _sum1 = vld1q_f32(outptr0 + 4); float32x4_t _r0 = vcvt_f32_bf16(vld1_u16(r0)); float32x4_t _r1 = vcvt_f32_bf16(vld1_u16(r1)); float32x4_t _r2 = vcvt_f32_bf16(vld1_u16(r2)); _sum0 = vfmaq_laneq_f32(_sum0, _k00_0, _r0, 0); _sum0 = vfmaq_laneq_f32(_sum0, _k01_0, _r0, 1); _sum0 = vfmaq_laneq_f32(_sum0, _k02_0, _r0, 2); _sum0 = vfmaq_laneq_f32(_sum0, _k10_0, _r1, 0); _sum0 = vfmaq_laneq_f32(_sum0, _k11_0, _r1, 1); _sum0 = vfmaq_laneq_f32(_sum0, _k12_0, _r1, 2); _sum0 = vfmaq_laneq_f32(_sum0, _k20_0, _r2, 0); _sum0 = vfmaq_laneq_f32(_sum0, _k21_0, _r2, 1); _sum0 = vfmaq_laneq_f32(_sum0, _k22_0, _r2, 2); _sum1 = vfmaq_laneq_f32(_sum1, _k00_1, _r0, 0); _sum1 = vfmaq_laneq_f32(_sum1, _k01_1, _r0, 1); _sum1 = vfmaq_laneq_f32(_sum1, _k02_1, _r0, 2); _sum1 = vfmaq_laneq_f32(_sum1, _k10_1, _r1, 0); _sum1 = vfmaq_laneq_f32(_sum1, _k11_1, _r1, 1); _sum1 = vfmaq_laneq_f32(_sum1, _k12_1, _r1, 2); _sum1 = vfmaq_laneq_f32(_sum1, _k20_1, _r2, 0); _sum1 = vfmaq_laneq_f32(_sum1, _k21_1, _r2, 1); _sum1 = vfmaq_laneq_f32(_sum1, _k22_1, _r2, 2); vst1_u16(outptr0_bf16, vcvt_bf16_f32(_sum0)); vst1_u16(outptr1_bf16, vcvt_bf16_f32(_sum1)); r0 += 2; r1 += 2; r2 += 2; outptr0 += 8; outptr0_bf16 += 4; outptr1_bf16 += 4; } r0 += tailstep; r1 += tailstep; r2 += tailstep; } k0 += 9 * 4; k1 += 9 * 4; } } #endif // __ARM_NEON && __aarch64__ #pragma omp parallel for num_threads(opt.num_threads) for (int p = remain_outch_start; p < outch; p++) { Mat out0 = top_blob_fp32.channel(get_omp_thread_num()); float32x4_t _bias0 = bias ? vld1q_f32((const float*)bias + p * 4) : vdupq_n_f32(0.f); out0.fill(_bias0); const unsigned short* k0 = kernel.channel(p); int q = 0; for (; q < inch - 1; q++) { float* outptr0 = out0; const Mat img0 = bottom_blob.channel(q); const unsigned short* r0 = img0.row<const unsigned short>(0); const unsigned short* r1 = img0.row<const unsigned short>(1); const unsigned short* r2 = img0.row<const unsigned short>(2); float32x4_t _k00 = vcvt_f32_bf16(vld1_u16(k0)); float32x4_t _k01 = vcvt_f32_bf16(vld1_u16(k0 + 4)); float32x4_t _k02 = vcvt_f32_bf16(vld1_u16(k0 + 8)); float32x4_t _k10 = vcvt_f32_bf16(vld1_u16(k0 + 12)); float32x4_t _k11 = vcvt_f32_bf16(vld1_u16(k0 + 16)); float32x4_t _k12 = vcvt_f32_bf16(vld1_u16(k0 + 20)); float32x4_t _k20 = vcvt_f32_bf16(vld1_u16(k0 + 24)); float32x4_t _k21 = vcvt_f32_bf16(vld1_u16(k0 + 28)); float32x4_t _k22 = vcvt_f32_bf16(vld1_u16(k0 + 32)); int i = 0; for (; i < outh; i++) { int j = 0; for (; j + 3 < outw; j += 4) { #if __aarch64__ asm volatile( // r0 "prfm pldl1keep, [%1, #128] \n" "ld1 {v0.4h, v1.4h}, [%1], #16 \n" "prfm pldl1keep, [%0, #512] \n" "ld1 {v6.4s, v7.4s, v8.4s, v9.4s}, [%0] \n" // sum0 "shll v0.4s, v0.4h, #16 \n" "shll v1.4s, v1.4h, #16 \n" "fmla v6.4s, %8.4s, v0.s[0] \n" "fmla v7.4s, %8.4s, v0.s[2] \n" "fmla v8.4s, %8.4s, v1.s[0] \n" "fmla v9.4s, %8.4s, v1.s[2] \n" "ld1 {v4.h}[0], [%1] \n" "fmla v6.4s, %9.4s, v0.s[1] \n" "fmla v7.4s, %9.4s, v0.s[3] \n" "fmla v8.4s, %9.4s, v1.s[1] \n" "fmla v9.4s, %9.4s, v1.s[3] \n" "shll v4.4s, v4.4h, #16 \n" // r1 "prfm pldl1keep, [%2, #128] \n" "ld1 {v2.4h, v3.4h}, [%2], #16 \n" "fmla v6.4s, %10.4s, v0.s[2] \n" "fmla v7.4s, %10.4s, v1.s[0] \n" "fmla v8.4s, %10.4s, v1.s[2] \n" "fmla v9.4s, %10.4s, v4.s[0] \n" "shll v2.4s, v2.4h, #16 \n" "shll v3.4s, v3.4h, #16 \n" "fmla v6.4s, %11.4s, v2.s[0] \n" "fmla v7.4s, %11.4s, v2.s[2] \n" "fmla v8.4s, %11.4s, v3.s[0] \n" "fmla v9.4s, %11.4s, v3.s[2] \n" "ld1 {v5.h}[0], [%2] \n" "fmla v6.4s, %12.4s, v2.s[1] \n" "fmla v7.4s, %12.4s, v2.s[3] \n" "fmla v8.4s, %12.4s, v3.s[1] \n" "fmla v9.4s, %12.4s, v3.s[3] \n" "shll v5.4s, v5.4h, #16 \n" // r2 "prfm pldl1keep, [%3, #128] \n" "ld1 {v0.4h, v1.4h}, [%3], #16 \n" "fmla v6.4s, %13.4s, v2.s[2] \n" "fmla v7.4s, %13.4s, v3.s[0] \n" "fmla v8.4s, %13.4s, v3.s[2] \n" "fmla v9.4s, %13.4s, v5.s[0] \n" "shll v0.4s, v0.4h, #16 \n" "shll v1.4s, v1.4h, #16 \n" "fmla v6.4s, %14.4s, v0.s[0] \n" "fmla v7.4s, %14.4s, v0.s[2] \n" "fmla v8.4s, %14.4s, v1.s[0] \n" "fmla v9.4s, %14.4s, v1.s[2] \n" "ld1 {v4.h}[0], [%3] \n" "fmla v6.4s, %15.4s, v0.s[1] \n" "fmla v7.4s, %15.4s, v0.s[3] \n" "fmla v8.4s, %15.4s, v1.s[1] \n" "fmla v9.4s, %15.4s, v1.s[3] \n" "shll v4.4s, v4.4h, #16 \n" "fmla v6.4s, %16.4s, v0.s[2] \n" "fmla v7.4s, %16.4s, v1.s[0] \n" "fmla v8.4s, %16.4s, v1.s[2] \n" "fmla v9.4s, %16.4s, v4.s[0] \n" "st1 {v6.4s, v7.4s, v8.4s, v9.4s}, [%0], #64 \n" : "=r"(outptr0), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2) // %3 : "0"(outptr0), "1"(r0), "2"(r1), "3"(r2), "w"(_k00), // %8 "w"(_k01), // %9 "w"(_k02), // %10 "w"(_k10), // %11 "w"(_k11), // %12 "w"(_k12), // %13 "w"(_k20), // %14 "w"(_k21), // %15 "w"(_k22) // %16 : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9"); #else // __aarch64__ asm volatile( // r0 "pld [%1, #128] \n" "vld1.u16 {d12-d13}, [%1]! \n" "pld [%0, #512] \n" "vldm %0, {d0-d7} \n" // sum0 "vshll.u16 q4, d12, #16 \n" "vshll.u16 q5, d13, #16 \n" "vld1.u16 {d12[0]}, [%1] \n" "vmla.f32 q0, %q8, d8[0] \n" "vmla.f32 q1, %q8, d9[0] \n" "vmla.f32 q2, %q8, d10[0] \n" "vmla.f32 q3, %q8, d11[0] \n" "vmla.f32 q0, %q9, d8[1] \n" "vmla.f32 q1, %q9, d9[1] \n" "vshl.u32 d8, d12, #16 \n" "vmla.f32 q2, %q9, d10[1] \n" "vmla.f32 q3, %q9, d11[1] \n" // r1 "pld [%2, #128] \n" "vld1.u16 {d12-d13}, [%2]! \n" "vmla.f32 q0, %q10, d9[0] \n" "vmla.f32 q1, %q10, d10[0] \n" "vmla.f32 q2, %q10, d11[0] \n" "vmla.f32 q3, %q10, d8[0] \n" "vshll.u16 q4, d12, #16 \n" "vshll.u16 q5, d13, #16 \n" "vld1.u16 {d12[0]}, [%2] \n" "vmla.f32 q0, %q11, d8[0] \n" "vmla.f32 q1, %q11, d9[0] \n" "vmla.f32 q2, %q11, d10[0] \n" "vmla.f32 q3, %q11, d11[0] \n" "vmla.f32 q0, %q12, d8[1] \n" "vmla.f32 q1, %q12, d9[1] \n" "vshl.u32 d8, d12, #16 \n" "vmla.f32 q2, %q12, d10[1] \n" "vmla.f32 q3, %q12, d11[1] \n" // r2 "pld [%3, #128] \n" "vld1.u16 {d12-d13}, [%3]! \n" "vmla.f32 q0, %q13, d9[0] \n" "vmla.f32 q1, %q13, d10[0] \n" "vmla.f32 q2, %q13, d11[0] \n" "vmla.f32 q3, %q13, d8[0] \n" "vshll.u16 q4, d12, #16 \n" "vshll.u16 q5, d13, #16 \n" "vld1.u16 {d12[0]}, [%3] \n" "vmla.f32 q0, %q14, d8[0] \n" "vmla.f32 q1, %q14, d9[0] \n" "vmla.f32 q2, %q14, d10[0] \n" "vmla.f32 q3, %q14, d11[0] \n" "vmla.f32 q0, %q15, d8[1] \n" "vmla.f32 q1, %q15, d9[1] \n" "vshl.u32 d8, d12, #16 \n" "vmla.f32 q2, %q15, d10[1] \n" "vmla.f32 q3, %q15, d11[1] \n" "vmla.f32 q0, %q16, d9[0] \n" "vmla.f32 q1, %q16, d10[0] \n" "vmla.f32 q2, %q16, d11[0] \n" "vmla.f32 q3, %q16, d8[0] \n" "vstm %0!, {d0-d7} \n" : "=r"(outptr0), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2) // %3 : "0"(outptr0), "1"(r0), "2"(r1), "3"(r2), "w"(_k00), // %8 "w"(_k01), // %9 "w"(_k02), // %10 "w"(_k10), // %11 "w"(_k11), // %12 "w"(_k12), // %13 "w"(_k20), // %14 "w"(_k21), // %15 "w"(_k22) // %16 : "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6"); #endif // __aarch64__ } for (; j + 1 < outw; j += 2) { #if __aarch64__ asm volatile( // r0 "prfm pldl1keep, [%1, #64] \n" "ld1 {v0.4h}, [%1], #8 \n" "prfm pldl1keep, [%0, #256] \n" "ld1 {v8.4s, v9.4s}, [%0] \n" // sum0 "shll v0.4s, v0.4h, #16 \n" "fmul v6.4s, %8.4s, v0.s[0] \n" "fmul v7.4s, %8.4s, v0.s[2] \n" "ld1 {v1.h}[0], [%1] \n" "fmla v8.4s, %9.4s, v0.s[1] \n" "fmla v9.4s, %9.4s, v0.s[3] \n" "shll v1.4s, v1.4h, #16 \n" // r1 "prfm pldl1keep, [%2, #64] \n" "ld1 {v2.4h}, [%2], #8 \n" "fmla v6.4s, %10.4s, v0.s[2] \n" "fmla v7.4s, %10.4s, v1.s[0] \n" "shll v2.4s, v2.4h, #16 \n" "fmla v8.4s, %11.4s, v2.s[0] \n" "fmla v9.4s, %11.4s, v2.s[2] \n" "ld1 {v3.h}[0], [%2] \n" "fmla v6.4s, %12.4s, v2.s[1] \n" "fmla v7.4s, %12.4s, v2.s[3] \n" "shll v3.4s, v3.4h, #16 \n" // r2 "prfm pldl1keep, [%3, #64] \n" "ld1 {v0.4h}, [%3], #8 \n" "fmla v8.4s, %13.4s, v2.s[2] \n" "fmla v9.4s, %13.4s, v3.s[0] \n" "shll v0.4s, v0.4h, #16 \n" "fmla v6.4s, %14.4s, v0.s[0] \n" "fmla v7.4s, %14.4s, v0.s[2] \n" "ld1 {v1.h}[0], [%3] \n" "fmla v8.4s, %15.4s, v0.s[1] \n" "fmla v9.4s, %15.4s, v0.s[3] \n" "shll v1.4s, v1.4h, #16 \n" "fmla v6.4s, %16.4s, v0.s[2] \n" "fmla v7.4s, %16.4s, v1.s[0] \n" "fadd v8.4s, v8.4s, v6.4s \n" "fadd v9.4s, v9.4s, v7.4s \n" "st1 {v8.4s, v9.4s}, [%0], #32 \n" : "=r"(outptr0), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2) // %3 : "0"(outptr0), "1"(r0), "2"(r1), "3"(r2), "w"(_k00), // %8 "w"(_k01), // %9 "w"(_k02), // %10 "w"(_k10), // %11 "w"(_k11), // %12 "w"(_k12), // %13 "w"(_k20), // %14 "w"(_k21), // %15 "w"(_k22) // %16 : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9"); #else // __aarch64__ asm volatile( // r0 "pld [%1, #64] \n" "vld1.u16 {d9}, [%1]! \n" "pld [%0, #256] \n" "vld1.f32 {d4-d7}, [%0] \n" // sum0 "vshll.u16 q4, d9, #16 \n" "vmul.f32 q0, %q8, d8[0] \n" "vmul.f32 q1, %q8, d9[0] \n" "vld1.u16 {d11[]}, [%1] \n" "vmla.f32 q2, %q9, d8[1] \n" "vmla.f32 q3, %q9, d9[1] \n" "vshll.u16 q5, d11, #16 \n" // r1 "pld [%2, #64] \n" "vld1.u16 {d13}, [%2]! \n" "vmla.f32 q0, %q10, d9[0] \n" "vmla.f32 q1, %q10, d10[0] \n" "vshll.u16 q6, d13, #16 \n" "vmla.f32 q2, %q11, d12[0] \n" "vmla.f32 q3, %q11, d13[0] \n" "vld1.u16 {d9[]}, [%2] \n" "vmla.f32 q0, %q12, d12[1] \n" "vmla.f32 q1, %q12, d13[1] \n" "vshll.u16 q4, d9, #16 \n" // r2 "pld [%3, #64] \n" "vld1.u16 {d11}, [%3]! \n" "vmla.f32 q2, %q13, d13[0] \n" "vmla.f32 q3, %q13, d8[0] \n" "vshll.u16 q5, d11, #16 \n" "vmla.f32 q0, %q14, d10[0] \n" "vmla.f32 q1, %q14, d11[0] \n" "vld1.u16 {d13[]}, [%3] \n" "vmla.f32 q2, %q15, d10[1] \n" "vmla.f32 q3, %q15, d11[1] \n" "vshll.u16 q6, d13, #16 \n" "vmla.f32 q0, %q16, d11[0] \n" "vmla.f32 q1, %q16, d12[0] \n" "vadd.f32 q2, q2, q0 \n" "vadd.f32 q3, q3, q1 \n" "vst1.f32 {d4-d7}, [%0]! \n" : "=r"(outptr0), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2) // %3 : "0"(outptr0), "1"(r0), "2"(r1), "3"(r2), "w"(_k00), // %8 "w"(_k01), // %9 "w"(_k02), // %10 "w"(_k10), // %11 "w"(_k11), // %12 "w"(_k12), // %13 "w"(_k20), // %14 "w"(_k21), // %15 "w"(_k22) // %16 : "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6"); #endif // __aarch64__ } for (; j < outw; j++) { float32x4_t _sum0 = vld1q_f32(outptr0); float32x4_t _r0 = vcvt_f32_bf16(vld1_u16(r0)); float32x4_t _r1 = vcvt_f32_bf16(vld1_u16(r1)); float32x4_t _r2 = vcvt_f32_bf16(vld1_u16(r2)); #if __aarch64__ _sum0 = vfmaq_laneq_f32(_sum0, _k00, _r0, 0); _sum0 = vfmaq_laneq_f32(_sum0, _k01, _r0, 1); _sum0 = vfmaq_laneq_f32(_sum0, _k02, _r0, 2); _sum0 = vfmaq_laneq_f32(_sum0, _k10, _r1, 0); _sum0 = vfmaq_laneq_f32(_sum0, _k11, _r1, 1); _sum0 = vfmaq_laneq_f32(_sum0, _k12, _r1, 2); _sum0 = vfmaq_laneq_f32(_sum0, _k20, _r2, 0); _sum0 = vfmaq_laneq_f32(_sum0, _k21, _r2, 1); _sum0 = vfmaq_laneq_f32(_sum0, _k22, _r2, 2); #else _sum0 = vmlaq_lane_f32(_sum0, _k00, vget_low_f32(_r0), 0); _sum0 = vmlaq_lane_f32(_sum0, _k01, vget_low_f32(_r0), 1); _sum0 = vmlaq_lane_f32(_sum0, _k02, vget_high_f32(_r0), 0); _sum0 = vmlaq_lane_f32(_sum0, _k10, vget_low_f32(_r1), 0); _sum0 = vmlaq_lane_f32(_sum0, _k11, vget_low_f32(_r1), 1); _sum0 = vmlaq_lane_f32(_sum0, _k12, vget_high_f32(_r1), 0); _sum0 = vmlaq_lane_f32(_sum0, _k20, vget_low_f32(_r2), 0); _sum0 = vmlaq_lane_f32(_sum0, _k21, vget_low_f32(_r2), 1); _sum0 = vmlaq_lane_f32(_sum0, _k22, vget_high_f32(_r2), 0); #endif vst1q_f32(outptr0, _sum0); r0 += 2; r1 += 2; r2 += 2; outptr0 += 4; } r0 += tailstep; r1 += tailstep; r2 += tailstep; } k0 += 9 * 4; } for (; q < inch; q++) { unsigned short* outptr0_bf16 = top_blob.channel(p); const float* outptr0 = out0; const Mat img0 = bottom_blob.channel(q); const unsigned short* r0 = img0.row<const unsigned short>(0); const unsigned short* r1 = img0.row<const unsigned short>(1); const unsigned short* r2 = img0.row<const unsigned short>(2); float32x4_t _k00 = vcvt_f32_bf16(vld1_u16(k0)); float32x4_t _k01 = vcvt_f32_bf16(vld1_u16(k0 + 4)); float32x4_t _k02 = vcvt_f32_bf16(vld1_u16(k0 + 8)); float32x4_t _k10 = vcvt_f32_bf16(vld1_u16(k0 + 12)); float32x4_t _k11 = vcvt_f32_bf16(vld1_u16(k0 + 16)); float32x4_t _k12 = vcvt_f32_bf16(vld1_u16(k0 + 20)); float32x4_t _k20 = vcvt_f32_bf16(vld1_u16(k0 + 24)); float32x4_t _k21 = vcvt_f32_bf16(vld1_u16(k0 + 28)); float32x4_t _k22 = vcvt_f32_bf16(vld1_u16(k0 + 32)); int i = 0; for (; i < outh; i++) { int j = 0; for (; j + 3 < outw; j += 4) { #if __aarch64__ asm volatile( // r0 "prfm pldl1keep, [%2, #128] \n" "ld1 {v0.4h, v1.4h}, [%2], #16 \n" "prfm pldl1keep, [%1, #512] \n" "ld1 {v6.4s, v7.4s, v8.4s, v9.4s}, [%1], #64 \n" // sum0 "shll v0.4s, v0.4h, #16 \n" "shll v1.4s, v1.4h, #16 \n" "fmla v6.4s, %10.4s, v0.s[0] \n" "fmla v7.4s, %10.4s, v0.s[2] \n" "fmla v8.4s, %10.4s, v1.s[0] \n" "fmla v9.4s, %10.4s, v1.s[2] \n" "ld1 {v4.h}[0], [%2] \n" "fmla v6.4s, %11.4s, v0.s[1] \n" "fmla v7.4s, %11.4s, v0.s[3] \n" "fmla v8.4s, %11.4s, v1.s[1] \n" "fmla v9.4s, %11.4s, v1.s[3] \n" "shll v4.4s, v4.4h, #16 \n" // r1 "prfm pldl1keep, [%3, #128] \n" "ld1 {v2.4h, v3.4h}, [%3], #16 \n" "fmla v6.4s, %12.4s, v0.s[2] \n" "fmla v7.4s, %12.4s, v1.s[0] \n" "fmla v8.4s, %12.4s, v1.s[2] \n" "fmla v9.4s, %12.4s, v4.s[0] \n" "shll v2.4s, v2.4h, #16 \n" "shll v3.4s, v3.4h, #16 \n" "fmla v6.4s, %13.4s, v2.s[0] \n" "fmla v7.4s, %13.4s, v2.s[2] \n" "fmla v8.4s, %13.4s, v3.s[0] \n" "fmla v9.4s, %13.4s, v3.s[2] \n" "ld1 {v5.h}[0], [%3] \n" "fmla v6.4s, %14.4s, v2.s[1] \n" "fmla v7.4s, %14.4s, v2.s[3] \n" "fmla v8.4s, %14.4s, v3.s[1] \n" "fmla v9.4s, %14.4s, v3.s[3] \n" "shll v5.4s, v5.4h, #16 \n" // r2 "prfm pldl1keep, [%4, #128] \n" "ld1 {v0.4h, v1.4h}, [%4], #16 \n" "fmla v6.4s, %15.4s, v2.s[2] \n" "fmla v7.4s, %15.4s, v3.s[0] \n" "fmla v8.4s, %15.4s, v3.s[2] \n" "fmla v9.4s, %15.4s, v5.s[0] \n" "shll v0.4s, v0.4h, #16 \n" "shll v1.4s, v1.4h, #16 \n" "fmla v6.4s, %16.4s, v0.s[0] \n" "fmla v7.4s, %16.4s, v0.s[2] \n" "fmla v8.4s, %16.4s, v1.s[0] \n" "fmla v9.4s, %16.4s, v1.s[2] \n" "ld1 {v4.h}[0], [%4] \n" "fmla v6.4s, %17.4s, v0.s[1] \n" "fmla v7.4s, %17.4s, v0.s[3] \n" "fmla v8.4s, %17.4s, v1.s[1] \n" "fmla v9.4s, %17.4s, v1.s[3] \n" "shll v4.4s, v4.4h, #16 \n" "fmla v6.4s, %18.4s, v0.s[2] \n" "fmla v7.4s, %18.4s, v1.s[0] \n" "fmla v8.4s, %18.4s, v1.s[2] \n" "fmla v9.4s, %18.4s, v4.s[0] \n" "shrn v6.4h, v6.4s, #16 \n" "shrn v7.4h, v7.4s, #16 \n" "shrn v8.4h, v8.4s, #16 \n" "shrn v9.4h, v9.4s, #16 \n" "st1 {v6.4h, v7.4h, v8.4h, v9.4h}, [%0], #32 \n" : "=r"(outptr0_bf16), // %0 "=r"(outptr0), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2) // %4 : "0"(outptr0_bf16), "1"(outptr0), "2"(r0), "3"(r1), "4"(r2), "w"(_k00), // %10 "w"(_k01), // %11 "w"(_k02), // %12 "w"(_k10), // %13 "w"(_k11), // %14 "w"(_k12), // %15 "w"(_k20), // %16 "w"(_k21), // %17 "w"(_k22) // %18 : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9"); #else // __aarch64__ asm volatile( // r0 "pld [%2, #128] \n" "vld1.u16 {d12-d13}, [%2]! \n" "pld [%1, #512] \n" "vldm %1!, {d0-d7} \n" // sum0 "vshll.u16 q4, d12, #16 \n" "vshll.u16 q5, d13, #16 \n" "vld1.u16 {d12[0]}, [%2] \n" "vmla.f32 q0, %q10, d8[0] \n" "vmla.f32 q1, %q10, d9[0] \n" "vmla.f32 q2, %q10, d10[0] \n" "vmla.f32 q3, %q10, d11[0] \n" "vmla.f32 q0, %q11, d8[1] \n" "vmla.f32 q1, %q11, d9[1] \n" "vshl.u32 d8, d12, #16 \n" "vmla.f32 q2, %q11, d10[1] \n" "vmla.f32 q3, %q11, d11[1] \n" // r1 "pld [%3, #128] \n" "vld1.u16 {d12-d13}, [%3]! \n" "vmla.f32 q0, %q12, d9[0] \n" "vmla.f32 q1, %q12, d10[0] \n" "vmla.f32 q2, %q12, d11[0] \n" "vmla.f32 q3, %q12, d8[0] \n" "vshll.u16 q4, d12, #16 \n" "vshll.u16 q5, d13, #16 \n" "vld1.u16 {d12[0]}, [%3] \n" "vmla.f32 q0, %q13, d8[0] \n" "vmla.f32 q1, %q13, d9[0] \n" "vmla.f32 q2, %q13, d10[0] \n" "vmla.f32 q3, %q13, d11[0] \n" "vmla.f32 q0, %q14, d8[1] \n" "vmla.f32 q1, %q14, d9[1] \n" "vshl.u32 d8, d12, #16 \n" "vmla.f32 q2, %q14, d10[1] \n" "vmla.f32 q3, %q14, d11[1] \n" // r2 "pld [%4, #128] \n" "vld1.u16 {d12-d13}, [%4]! \n" "vmla.f32 q0, %q15, d9[0] \n" "vmla.f32 q1, %q15, d10[0] \n" "vmla.f32 q2, %q15, d11[0] \n" "vmla.f32 q3, %q15, d8[0] \n" "vshll.u16 q4, d12, #16 \n" "vshll.u16 q5, d13, #16 \n" "vld1.u16 {d12[0]}, [%4] \n" "vmla.f32 q0, %q16, d8[0] \n" "vmla.f32 q1, %q16, d9[0] \n" "vmla.f32 q2, %q16, d10[0] \n" "vmla.f32 q3, %q16, d11[0] \n" "vmla.f32 q0, %q17, d8[1] \n" "vmla.f32 q1, %q17, d9[1] \n" "vshl.u32 d8, d12, #16 \n" "vmla.f32 q2, %q17, d10[1] \n" "vmla.f32 q3, %q17, d11[1] \n" "vmla.f32 q0, %q18, d9[0] \n" "vmla.f32 q1, %q18, d10[0] \n" "vmla.f32 q2, %q18, d11[0] \n" "vmla.f32 q3, %q18, d8[0] \n" "vshrn.u32 d0, q0, #16 \n" "vshrn.u32 d1, q1, #16 \n" "vshrn.u32 d2, q2, #16 \n" "vshrn.u32 d3, q3, #16 \n" "vst1.u16 {d0-d3}, [%0 :64]! \n" : "=r"(outptr0_bf16), // %0 "=r"(outptr0), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2) // %4 : "0"(outptr0_bf16), "1"(outptr0), "2"(r0), "3"(r1), "4"(r2), "w"(_k00), // %10 "w"(_k01), // %11 "w"(_k02), // %12 "w"(_k10), // %13 "w"(_k11), // %14 "w"(_k12), // %15 "w"(_k20), // %16 "w"(_k21), // %17 "w"(_k22) // %18 : "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6"); #endif // __aarch64__ } for (; j + 1 < outw; j += 2) { #if __aarch64__ asm volatile( // r0 "prfm pldl1keep, [%2, #64] \n" "ld1 {v0.4h}, [%2], #8 \n" "prfm pldl1keep, [%1, #256] \n" "ld1 {v8.4s, v9.4s}, [%1], #32 \n" // sum0 "shll v0.4s, v0.4h, #16 \n" "fmul v6.4s, %10.4s, v0.s[0] \n" "fmul v7.4s, %10.4s, v0.s[2] \n" "ld1 {v1.h}[0], [%2] \n" "fmla v8.4s, %11.4s, v0.s[1] \n" "fmla v9.4s, %11.4s, v0.s[3] \n" "shll v1.4s, v1.4h, #16 \n" // r1 "prfm pldl1keep, [%3, #64] \n" "ld1 {v2.4h}, [%3], #8 \n" "fmla v6.4s, %12.4s, v0.s[2] \n" "fmla v7.4s, %12.4s, v1.s[0] \n" "shll v2.4s, v2.4h, #16 \n" "fmla v8.4s, %13.4s, v2.s[0] \n" "fmla v9.4s, %13.4s, v2.s[2] \n" "ld1 {v3.h}[0], [%3] \n" "fmla v6.4s, %14.4s, v2.s[1] \n" "fmla v7.4s, %14.4s, v2.s[3] \n" "shll v3.4s, v3.4h, #16 \n" // r2 "prfm pldl1keep, [%4, #64] \n" "ld1 {v0.4h}, [%4], #8 \n" "fmla v8.4s, %15.4s, v2.s[2] \n" "fmla v9.4s, %15.4s, v3.s[0] \n" "shll v0.4s, v0.4h, #16 \n" "fmla v6.4s, %16.4s, v0.s[0] \n" "fmla v7.4s, %16.4s, v0.s[2] \n" "ld1 {v1.h}[0], [%4] \n" "fmla v8.4s, %17.4s, v0.s[1] \n" "fmla v9.4s, %17.4s, v0.s[3] \n" "shll v1.4s, v1.4h, #16 \n" "fmla v6.4s, %18.4s, v0.s[2] \n" "fmla v7.4s, %18.4s, v1.s[0] \n" "fadd v8.4s, v8.4s, v6.4s \n" "fadd v9.4s, v9.4s, v7.4s \n" "shrn v8.4h, v8.4s, #16 \n" "shrn v9.4h, v9.4s, #16 \n" "st1 {v8.4h, v9.4h}, [%0], #16 \n" : "=r"(outptr0_bf16), // %0 "=r"(outptr0), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2) // %4 : "0"(outptr0_bf16), "1"(outptr0), "2"(r0), "3"(r1), "4"(r2), "w"(_k00), // %10 "w"(_k01), // %11 "w"(_k02), // %12 "w"(_k10), // %13 "w"(_k11), // %14 "w"(_k12), // %15 "w"(_k20), // %16 "w"(_k21), // %17 "w"(_k22) // %18 : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9"); #else // __aarch64__ asm volatile( // r0 "pld [%2, #64] \n" "vld1.u16 {d9}, [%2]! \n" "pld [%1, #256] \n" "vld1.f32 {d4-d7}, [%1]! \n" // sum0 "vshll.u16 q4, d9, #16 \n" "vmul.f32 q0, %q10, d8[0] \n" "vmul.f32 q1, %q10, d9[0] \n" "vld1.u16 {d11[]}, [%2] \n" "vmla.f32 q2, %q11, d8[1] \n" "vmla.f32 q3, %q11, d9[1] \n" "vshll.u16 q5, d11, #16 \n" // r1 "pld [%3, #64] \n" "vld1.u16 {d13}, [%3]! \n" "vmla.f32 q0, %q12, d9[0] \n" "vmla.f32 q1, %q12, d10[0] \n" "vshll.u16 q6, d13, #16 \n" "vmla.f32 q2, %q13, d12[0] \n" "vmla.f32 q3, %q13, d13[0] \n" "vld1.u16 {d9[]}, [%3] \n" "vmla.f32 q0, %q14, d12[1] \n" "vmla.f32 q1, %q14, d13[1] \n" "vshll.u16 q4, d9, #16 \n" // r2 "pld [%4, #64] \n" "vld1.u16 {d11}, [%4]! \n" "vmla.f32 q2, %q15, d13[0] \n" "vmla.f32 q3, %q15, d8[0] \n" "vshll.u16 q5, d11, #16 \n" "vmla.f32 q0, %q16, d10[0] \n" "vmla.f32 q1, %q16, d11[0] \n" "vld1.u16 {d13[]}, [%4] \n" "vmla.f32 q2, %q17, d10[1] \n" "vmla.f32 q3, %q17, d11[1] \n" "vshll.u16 q6, d13, #16 \n" "vmla.f32 q0, %q18, d11[0] \n" "vmla.f32 q1, %q18, d12[0] \n" "vadd.f32 q2, q2, q0 \n" "vadd.f32 q3, q3, q1 \n" "vshrn.u32 d2, q2, #16 \n" "vshrn.u32 d3, q3, #16 \n" "vst1.u16 {d2-d3}, [%0 :64]! \n" : "=r"(outptr0_bf16), // %0 "=r"(outptr0), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2) // %4 : "0"(outptr0_bf16), "1"(outptr0), "2"(r0), "3"(r1), "4"(r2), "w"(_k00), // %10 "w"(_k01), // %11 "w"(_k02), // %12 "w"(_k10), // %13 "w"(_k11), // %14 "w"(_k12), // %15 "w"(_k20), // %16 "w"(_k21), // %17 "w"(_k22) // %18 : "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6"); #endif // __aarch64__ } for (; j < outw; j++) { float32x4_t _sum0 = vld1q_f32(outptr0); float32x4_t _r0 = vcvt_f32_bf16(vld1_u16(r0)); float32x4_t _r1 = vcvt_f32_bf16(vld1_u16(r1)); float32x4_t _r2 = vcvt_f32_bf16(vld1_u16(r2)); #if __aarch64__ _sum0 = vfmaq_laneq_f32(_sum0, _k00, _r0, 0); _sum0 = vfmaq_laneq_f32(_sum0, _k01, _r0, 1); _sum0 = vfmaq_laneq_f32(_sum0, _k02, _r0, 2); _sum0 = vfmaq_laneq_f32(_sum0, _k10, _r1, 0); _sum0 = vfmaq_laneq_f32(_sum0, _k11, _r1, 1); _sum0 = vfmaq_laneq_f32(_sum0, _k12, _r1, 2); _sum0 = vfmaq_laneq_f32(_sum0, _k20, _r2, 0); _sum0 = vfmaq_laneq_f32(_sum0, _k21, _r2, 1); _sum0 = vfmaq_laneq_f32(_sum0, _k22, _r2, 2); #else _sum0 = vmlaq_lane_f32(_sum0, _k00, vget_low_f32(_r0), 0); _sum0 = vmlaq_lane_f32(_sum0, _k01, vget_low_f32(_r0), 1); _sum0 = vmlaq_lane_f32(_sum0, _k02, vget_high_f32(_r0), 0); _sum0 = vmlaq_lane_f32(_sum0, _k10, vget_low_f32(_r1), 0); _sum0 = vmlaq_lane_f32(_sum0, _k11, vget_low_f32(_r1), 1); _sum0 = vmlaq_lane_f32(_sum0, _k12, vget_high_f32(_r1), 0); _sum0 = vmlaq_lane_f32(_sum0, _k20, vget_low_f32(_r2), 0); _sum0 = vmlaq_lane_f32(_sum0, _k21, vget_low_f32(_r2), 1); _sum0 = vmlaq_lane_f32(_sum0, _k22, vget_high_f32(_r2), 0); #endif vst1_u16(outptr0_bf16, vcvt_bf16_f32(_sum0)); r0 += 2; r1 += 2; r2 += 2; outptr0 += 4; outptr0_bf16 += 4; } r0 += tailstep; r1 += tailstep; r2 += tailstep; } k0 += 9 * 4; } } }
visual-effects.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % V V IIIII SSSSS U U AAA L % % V V I SS U U A A L % % V V I SSS U U AAAAA L % % V V I SS U U A A L % % V IIIII SSSSS UUU A A LLLLL % % % % EEEEE FFFFF FFFFF EEEEE CCCC TTTTT SSSSS % % E F F E C T SS % % EEE FFF FFF EEE C T SSS % % E F F E C T SS % % EEEEE F F EEEEE CCCC T SSSSS % % % % % % MagickCore Image Special Effects Methods % % % % Software Design % % Cristy % % October 1996 % % % % % % % % Copyright 1999-2021 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/accelerate-private.h" #include "MagickCore/annotate.h" #include "MagickCore/artifact.h" #include "MagickCore/attribute.h" #include "MagickCore/cache.h" #include "MagickCore/cache-view.h" #include "MagickCore/channel.h" #include "MagickCore/color.h" #include "MagickCore/color-private.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/composite.h" #include "MagickCore/decorate.h" #include "MagickCore/distort.h" #include "MagickCore/draw.h" #include "MagickCore/effect.h" #include "MagickCore/enhance.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/gem.h" #include "MagickCore/gem-private.h" #include "MagickCore/geometry.h" #include "MagickCore/layer.h" #include "MagickCore/list.h" #include "MagickCore/log.h" #include "MagickCore/image.h" #include "MagickCore/image-private.h" #include "MagickCore/magick.h" #include "MagickCore/memory_.h" #include "MagickCore/memory-private.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/option.h" #include "MagickCore/pixel.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/property.h" #include "MagickCore/quantum.h" #include "MagickCore/quantum-private.h" #include "MagickCore/random_.h" #include "MagickCore/random-private.h" #include "MagickCore/resample.h" #include "MagickCore/resample-private.h" #include "MagickCore/resize.h" #include "MagickCore/resource_.h" #include "MagickCore/splay-tree.h" #include "MagickCore/statistic.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/thread-private.h" #include "MagickCore/threshold.h" #include "MagickCore/transform.h" #include "MagickCore/transform-private.h" #include "MagickCore/utility.h" #include "MagickCore/visual-effects.h" /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A d d N o i s e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AddNoiseImage() adds random noise to the image. % % The format of the AddNoiseImage method is: % % Image *AddNoiseImage(const Image *image,const NoiseType noise_type, % const double attenuate,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel type. % % o noise_type: The type of noise: Uniform, Gaussian, Multiplicative, % Impulse, Laplacian, or Poisson. % % o attenuate: attenuate the random distribution. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *AddNoiseImage(const Image *image,const NoiseType noise_type, const double attenuate,ExceptionInfo *exception) { #define AddNoiseImageTag "AddNoise/Image" CacheView *image_view, *noise_view; Image *noise_image; MagickBooleanType status; MagickOffsetType progress; RandomInfo **magick_restrict random_info; ssize_t y; #if defined(MAGICKCORE_OPENMP_SUPPORT) unsigned long key; #endif /* Initialize noise image attributes. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); #if defined(MAGICKCORE_OPENCL_SUPPORT) noise_image=AccelerateAddNoiseImage(image,noise_type,attenuate,exception); if (noise_image != (Image *) NULL) return(noise_image); #endif noise_image=CloneImage(image,0,0,MagickTrue,exception); if (noise_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(noise_image,DirectClass,exception) == MagickFalse) { noise_image=DestroyImage(noise_image); return((Image *) NULL); } /* Add noise in each row. */ status=MagickTrue; progress=0; random_info=AcquireRandomInfoThreadSet(); image_view=AcquireVirtualCacheView(image,exception); noise_view=AcquireAuthenticCacheView(noise_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) key=GetRandomSecretKey(random_info[0]); #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,noise_image,image->rows,key == ~0UL) #endif for (y=0; y < (ssize_t) image->rows; y++) { const int id = GetOpenMPThreadId(); MagickBooleanType sync; const Quantum *magick_restrict p; ssize_t x; Quantum *magick_restrict q; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=QueueCacheViewAuthenticPixels(noise_view,0,y,noise_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait noise_traits=GetPixelChannelTraits(noise_image,channel); if ((traits == UndefinedPixelTrait) || (noise_traits == UndefinedPixelTrait)) continue; if ((noise_traits & CopyPixelTrait) != 0) { SetPixelChannel(noise_image,channel,p[i],q); continue; } SetPixelChannel(noise_image,channel,ClampToQuantum( GenerateDifferentialNoise(random_info[id],p[i],noise_type,attenuate)), q); } p+=GetPixelChannels(image); q+=GetPixelChannels(noise_image); } sync=SyncCacheViewAuthenticPixels(noise_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,AddNoiseImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } noise_view=DestroyCacheView(noise_view); image_view=DestroyCacheView(image_view); random_info=DestroyRandomInfoThreadSet(random_info); if (status == MagickFalse) noise_image=DestroyImage(noise_image); return(noise_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % B l u e S h i f t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % BlueShiftImage() mutes the colors of the image to simulate a scene at % nighttime in the moonlight. % % The format of the BlueShiftImage method is: % % Image *BlueShiftImage(const Image *image,const double factor, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o factor: the shift factor. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *BlueShiftImage(const Image *image,const double factor, ExceptionInfo *exception) { #define BlueShiftImageTag "BlueShift/Image" CacheView *image_view, *shift_view; Image *shift_image; MagickBooleanType status; MagickOffsetType progress; ssize_t y; /* Allocate blue shift image. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); shift_image=CloneImage(image,0,0,MagickTrue,exception); if (shift_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(shift_image,DirectClass,exception) == MagickFalse) { shift_image=DestroyImage(shift_image); return((Image *) NULL); } /* Blue-shift DirectClass image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); shift_view=AcquireAuthenticCacheView(shift_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,shift_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; PixelInfo pixel; Quantum quantum; const Quantum *magick_restrict p; ssize_t x; Quantum *magick_restrict q; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=QueueCacheViewAuthenticPixels(shift_view,0,y,shift_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { quantum=GetPixelRed(image,p); if (GetPixelGreen(image,p) < quantum) quantum=GetPixelGreen(image,p); if (GetPixelBlue(image,p) < quantum) quantum=GetPixelBlue(image,p); pixel.red=0.5*(GetPixelRed(image,p)+factor*quantum); pixel.green=0.5*(GetPixelGreen(image,p)+factor*quantum); pixel.blue=0.5*(GetPixelBlue(image,p)+factor*quantum); quantum=GetPixelRed(image,p); if (GetPixelGreen(image,p) > quantum) quantum=GetPixelGreen(image,p); if (GetPixelBlue(image,p) > quantum) quantum=GetPixelBlue(image,p); pixel.red=0.5*(pixel.red+factor*quantum); pixel.green=0.5*(pixel.green+factor*quantum); pixel.blue=0.5*(pixel.blue+factor*quantum); SetPixelRed(shift_image,ClampToQuantum(pixel.red),q); SetPixelGreen(shift_image,ClampToQuantum(pixel.green),q); SetPixelBlue(shift_image,ClampToQuantum(pixel.blue),q); p+=GetPixelChannels(image); q+=GetPixelChannels(shift_image); } sync=SyncCacheViewAuthenticPixels(shift_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,BlueShiftImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); shift_view=DestroyCacheView(shift_view); if (status == MagickFalse) shift_image=DestroyImage(shift_image); return(shift_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C h a r c o a l I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CharcoalImage() creates a new image that is a copy of an existing one with % the edge highlighted. It allocates the memory necessary for the new Image % structure and returns a pointer to the new image. % % The format of the CharcoalImage method is: % % Image *CharcoalImage(const Image *image,const double radius, % const double sigma,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the pixel neighborhood. % % o sigma: the standard deviation of the Gaussian, in pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *CharcoalImage(const Image *image,const double radius, const double sigma,ExceptionInfo *exception) { Image *charcoal_image, *edge_image; MagickBooleanType status; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); edge_image=EdgeImage(image,radius,exception); if (edge_image == (Image *) NULL) return((Image *) NULL); edge_image->alpha_trait=UndefinedPixelTrait; charcoal_image=(Image *) NULL; status=ClampImage(edge_image,exception); if (status != MagickFalse) charcoal_image=BlurImage(edge_image,radius,sigma,exception); edge_image=DestroyImage(edge_image); if (charcoal_image == (Image *) NULL) return((Image *) NULL); status=NormalizeImage(charcoal_image,exception); if (status != MagickFalse) status=NegateImage(charcoal_image,MagickFalse,exception); if (status != MagickFalse) status=GrayscaleImage(charcoal_image,image->intensity,exception); if (status == MagickFalse) charcoal_image=DestroyImage(charcoal_image); return(charcoal_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o l o r i z e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ColorizeImage() blends the fill color with each pixel in the image. % A percentage blend is specified with opacity. Control the application % of different color components by specifying a different percentage for % each component (e.g. 90/100/10 is 90% red, 100% green, and 10% blue). % % The format of the ColorizeImage method is: % % Image *ColorizeImage(const Image *image,const char *blend, % const PixelInfo *colorize,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o blend: A character string indicating the level of blending as a % percentage. % % o colorize: A color value. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ColorizeImage(const Image *image,const char *blend, const PixelInfo *colorize,ExceptionInfo *exception) { #define ColorizeImageTag "Colorize/Image" #define Colorize(pixel,blend_percentage,colorize) \ (((pixel)*(100.0-(blend_percentage))+(colorize)*(blend_percentage))/100.0) CacheView *image_view; GeometryInfo geometry_info; Image *colorize_image; MagickBooleanType status; MagickOffsetType progress; MagickStatusType flags; PixelInfo blend_percentage; ssize_t y; /* Allocate colorized image. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); colorize_image=CloneImage(image,0,0,MagickTrue,exception); if (colorize_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(colorize_image,DirectClass,exception) == MagickFalse) { colorize_image=DestroyImage(colorize_image); return((Image *) NULL); } if ((IsGrayColorspace(colorize_image->colorspace) != MagickFalse) || (IsPixelInfoGray(colorize) != MagickFalse)) (void) SetImageColorspace(colorize_image,sRGBColorspace,exception); if ((colorize_image->alpha_trait == UndefinedPixelTrait) && (colorize->alpha_trait != UndefinedPixelTrait)) (void) SetImageAlpha(colorize_image,OpaqueAlpha,exception); if (blend == (const char *) NULL) return(colorize_image); GetPixelInfo(colorize_image,&blend_percentage); flags=ParseGeometry(blend,&geometry_info); blend_percentage.red=geometry_info.rho; blend_percentage.green=geometry_info.rho; blend_percentage.blue=geometry_info.rho; blend_percentage.black=geometry_info.rho; blend_percentage.alpha=(MagickRealType) TransparentAlpha; if ((flags & SigmaValue) != 0) blend_percentage.green=geometry_info.sigma; if ((flags & XiValue) != 0) blend_percentage.blue=geometry_info.xi; if ((flags & PsiValue) != 0) blend_percentage.alpha=geometry_info.psi; if (blend_percentage.colorspace == CMYKColorspace) { if ((flags & PsiValue) != 0) blend_percentage.black=geometry_info.psi; if ((flags & ChiValue) != 0) blend_percentage.alpha=geometry_info.chi; } /* Colorize DirectClass image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(colorize_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(colorize_image,colorize_image,colorize_image->rows,1) #endif for (y=0; y < (ssize_t) colorize_image->rows; y++) { MagickBooleanType sync; Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,colorize_image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) colorize_image->columns; x++) { ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(colorize_image); i++) { PixelTrait traits = GetPixelChannelTraits(colorize_image, (PixelChannel) i); if (traits == UndefinedPixelTrait) continue; if ((traits & CopyPixelTrait) != 0) continue; SetPixelChannel(colorize_image,(PixelChannel) i,ClampToQuantum( Colorize(q[i],GetPixelInfoChannel(&blend_percentage,(PixelChannel) i), GetPixelInfoChannel(colorize,(PixelChannel) i))),q); } q+=GetPixelChannels(colorize_image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,ColorizeImageTag,progress, colorize_image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); if (status == MagickFalse) colorize_image=DestroyImage(colorize_image); return(colorize_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o l o r M a t r i x I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ColorMatrixImage() applies color transformation to an image. This method % permits saturation changes, hue rotation, luminance to alpha, and various % other effects. Although variable-sized transformation matrices can be used, % typically one uses a 5x5 matrix for an RGBA image and a 6x6 for CMYKA % (or RGBA with offsets). The matrix is similar to those used by Adobe Flash % except offsets are in column 6 rather than 5 (in support of CMYKA images) % and offsets are normalized (divide Flash offset by 255). % % The format of the ColorMatrixImage method is: % % Image *ColorMatrixImage(const Image *image, % const KernelInfo *color_matrix,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o color_matrix: the color matrix. % % o exception: return any errors or warnings in this structure. % */ /* FUTURE: modify to make use of a MagickMatrix Mutliply function That should be provided in "matrix.c" (ASIDE: actually distorts should do this too but currently doesn't) */ MagickExport Image *ColorMatrixImage(const Image *image, const KernelInfo *color_matrix,ExceptionInfo *exception) { #define ColorMatrixImageTag "ColorMatrix/Image" CacheView *color_view, *image_view; double ColorMatrix[6][6] = { { 1.0, 0.0, 0.0, 0.0, 0.0, 0.0 }, { 0.0, 1.0, 0.0, 0.0, 0.0, 0.0 }, { 0.0, 0.0, 1.0, 0.0, 0.0, 0.0 }, { 0.0, 0.0, 0.0, 1.0, 0.0, 0.0 }, { 0.0, 0.0, 0.0, 0.0, 1.0, 0.0 }, { 0.0, 0.0, 0.0, 0.0, 0.0, 1.0 } }; Image *color_image; MagickBooleanType status; MagickOffsetType progress; ssize_t i; ssize_t u, v, y; /* Map given color_matrix, into a 6x6 matrix RGBKA and a constant */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); i=0; for (v=0; v < (ssize_t) color_matrix->height; v++) for (u=0; u < (ssize_t) color_matrix->width; u++) { if ((v < 6) && (u < 6)) ColorMatrix[v][u]=color_matrix->values[i]; i++; } /* Initialize color image. */ color_image=CloneImage(image,0,0,MagickTrue,exception); if (color_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(color_image,DirectClass,exception) == MagickFalse) { color_image=DestroyImage(color_image); return((Image *) NULL); } if (image->debug != MagickFalse) { char format[MagickPathExtent], *message; (void) LogMagickEvent(TransformEvent,GetMagickModule(), " ColorMatrix image with color matrix:"); message=AcquireString(""); for (v=0; v < 6; v++) { *message='\0'; (void) FormatLocaleString(format,MagickPathExtent,"%.20g: ",(double) v); (void) ConcatenateString(&message,format); for (u=0; u < 6; u++) { (void) FormatLocaleString(format,MagickPathExtent,"%+f ", ColorMatrix[v][u]); (void) ConcatenateString(&message,format); } (void) LogMagickEvent(TransformEvent,GetMagickModule(),"%s",message); } message=DestroyString(message); } /* Apply the ColorMatrix to image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); color_view=AcquireAuthenticCacheView(color_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,color_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { PixelInfo pixel; const Quantum *magick_restrict p; Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=GetCacheViewAuthenticPixels(color_view,0,y,color_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } GetPixelInfo(image,&pixel); for (x=0; x < (ssize_t) image->columns; x++) { ssize_t h; size_t height; GetPixelInfoPixel(image,p,&pixel); height=color_matrix->height > 6 ? 6UL : color_matrix->height; for (h=0; h < (ssize_t) height; h++) { double sum; sum=ColorMatrix[h][0]*GetPixelRed(image,p)+ColorMatrix[h][1]* GetPixelGreen(image,p)+ColorMatrix[h][2]*GetPixelBlue(image,p); if (image->colorspace == CMYKColorspace) sum+=ColorMatrix[h][3]*GetPixelBlack(image,p); if (image->alpha_trait != UndefinedPixelTrait) sum+=ColorMatrix[h][4]*GetPixelAlpha(image,p); sum+=QuantumRange*ColorMatrix[h][5]; switch (h) { case 0: pixel.red=sum; break; case 1: pixel.green=sum; break; case 2: pixel.blue=sum; break; case 3: pixel.black=sum; break; case 4: pixel.alpha=sum; break; default: break; } } SetPixelViaPixelInfo(color_image,&pixel,q); p+=GetPixelChannels(image); q+=GetPixelChannels(color_image); } if (SyncCacheViewAuthenticPixels(color_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,ColorMatrixImageTag,progress, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } color_view=DestroyCacheView(color_view); image_view=DestroyCacheView(image_view); if (status == MagickFalse) color_image=DestroyImage(color_image); return(color_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I m p l o d e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ImplodeImage() creates a new image that is a copy of an existing % one with the image pixels "implode" by the specified percentage. It % allocates the memory necessary for the new Image structure and returns a % pointer to the new image. % % The format of the ImplodeImage method is: % % Image *ImplodeImage(const Image *image,const double amount, % const PixelInterpolateMethod method,ExceptionInfo *exception) % % A description of each parameter follows: % % o implode_image: Method ImplodeImage returns a pointer to the image % after it is implode. A null image is returned if there is a memory % shortage. % % o image: the image. % % o amount: Define the extent of the implosion. % % o method: the pixel interpolation method. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ImplodeImage(const Image *image,const double amount, const PixelInterpolateMethod method,ExceptionInfo *exception) { #define ImplodeImageTag "Implode/Image" CacheView *canvas_view, *implode_view, *interpolate_view; double radius; Image *canvas_image, *implode_image; MagickBooleanType status; MagickOffsetType progress; PointInfo center, scale; ssize_t y; /* Initialize implode image attributes. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); canvas_image=CloneImage(image,0,0,MagickTrue,exception); if (canvas_image == (Image *) NULL) return((Image *) NULL); if ((canvas_image->alpha_trait == UndefinedPixelTrait) && (canvas_image->background_color.alpha != OpaqueAlpha)) (void) SetImageAlphaChannel(canvas_image,OpaqueAlphaChannel,exception); implode_image=CloneImage(canvas_image,0,0,MagickTrue,exception); if (implode_image == (Image *) NULL) { canvas_image=DestroyImage(canvas_image); return((Image *) NULL); } if (SetImageStorageClass(implode_image,DirectClass,exception) == MagickFalse) { canvas_image=DestroyImage(canvas_image); implode_image=DestroyImage(implode_image); return((Image *) NULL); } /* Compute scaling factor. */ scale.x=1.0; scale.y=1.0; center.x=0.5*canvas_image->columns; center.y=0.5*canvas_image->rows; radius=center.x; if (canvas_image->columns > canvas_image->rows) scale.y=(double) canvas_image->columns*PerceptibleReciprocal((double) canvas_image->rows); else if (canvas_image->columns < canvas_image->rows) { scale.x=(double) canvas_image->rows*PerceptibleReciprocal((double) canvas_image->columns); radius=center.y; } /* Implode image. */ status=MagickTrue; progress=0; canvas_view=AcquireVirtualCacheView(canvas_image,exception); interpolate_view=AcquireVirtualCacheView(canvas_image,exception); implode_view=AcquireAuthenticCacheView(implode_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(canvas_image,implode_image,canvas_image->rows,1) #endif for (y=0; y < (ssize_t) canvas_image->rows; y++) { double distance; PointInfo delta; const Quantum *magick_restrict p; ssize_t x; Quantum *magick_restrict q; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(canvas_view,0,y,canvas_image->columns,1, exception); q=QueueCacheViewAuthenticPixels(implode_view,0,y,implode_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } delta.y=scale.y*(double) (y-center.y); for (x=0; x < (ssize_t) canvas_image->columns; x++) { ssize_t i; /* Determine if the pixel is within an ellipse. */ delta.x=scale.x*(double) (x-center.x); distance=delta.x*delta.x+delta.y*delta.y; if (distance >= (radius*radius)) for (i=0; i < (ssize_t) GetPixelChannels(canvas_image); i++) { PixelChannel channel = GetPixelChannelChannel(canvas_image,i); PixelTrait traits = GetPixelChannelTraits(canvas_image,channel); PixelTrait implode_traits = GetPixelChannelTraits(implode_image, channel); if ((traits == UndefinedPixelTrait) || (implode_traits == UndefinedPixelTrait)) continue; SetPixelChannel(implode_image,channel,p[i],q); } else { double factor; /* Implode the pixel. */ factor=1.0; if (distance > 0.0) factor=pow(sin(MagickPI*sqrt((double) distance)*PerceptibleReciprocal(radius)/2),-amount); status=InterpolatePixelChannels(canvas_image,interpolate_view, implode_image,method,(double) (factor*delta.x*PerceptibleReciprocal(scale.x)+center.x), (double) (factor*delta.y*PerceptibleReciprocal(scale.y)+center.y),q,exception); if (status == MagickFalse) break; } p+=GetPixelChannels(canvas_image); q+=GetPixelChannels(implode_image); } if (SyncCacheViewAuthenticPixels(implode_view,exception) == MagickFalse) status=MagickFalse; if (canvas_image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(canvas_image,ImplodeImageTag,progress, canvas_image->rows); if (proceed == MagickFalse) status=MagickFalse; } } implode_view=DestroyCacheView(implode_view); interpolate_view=DestroyCacheView(interpolate_view); canvas_view=DestroyCacheView(canvas_view); canvas_image=DestroyImage(canvas_image); if (status == MagickFalse) implode_image=DestroyImage(implode_image); return(implode_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M o r p h I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % The MorphImages() method requires a minimum of two images. The first % image is transformed into the second by a number of intervening images % as specified by frames. % % The format of the MorphImage method is: % % Image *MorphImages(const Image *image,const size_t number_frames, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o number_frames: Define the number of in-between image to generate. % The more in-between frames, the smoother the morph. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *MorphImages(const Image *image,const size_t number_frames, ExceptionInfo *exception) { #define MorphImageTag "Morph/Image" double alpha, beta; Image *morph_image, *morph_images; MagickBooleanType status; MagickOffsetType scene; const Image *next; ssize_t n; ssize_t y; /* Clone first frame in sequence. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); morph_images=CloneImage(image,0,0,MagickTrue,exception); if (morph_images == (Image *) NULL) return((Image *) NULL); if (GetNextImageInList(image) == (Image *) NULL) { /* Morph single image. */ for (n=1; n < (ssize_t) number_frames; n++) { morph_image=CloneImage(image,0,0,MagickTrue,exception); if (morph_image == (Image *) NULL) { morph_images=DestroyImageList(morph_images); return((Image *) NULL); } AppendImageToList(&morph_images,morph_image); if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,MorphImageTag,(MagickOffsetType) n, number_frames); if (proceed == MagickFalse) status=MagickFalse; } } return(GetFirstImageInList(morph_images)); } /* Morph image sequence. */ status=MagickTrue; scene=0; next=image; for ( ; GetNextImageInList(next) != (Image *) NULL; next=GetNextImageInList(next)) { for (n=0; n < (ssize_t) number_frames; n++) { CacheView *image_view, *morph_view; beta=(double) (n+1.0)/(double) (number_frames+1.0); alpha=1.0-beta; morph_image=ResizeImage(next,(size_t) (alpha*next->columns+beta* GetNextImageInList(next)->columns+0.5),(size_t) (alpha*next->rows+beta* GetNextImageInList(next)->rows+0.5),next->filter,exception); if (morph_image == (Image *) NULL) { morph_images=DestroyImageList(morph_images); return((Image *) NULL); } status=SetImageStorageClass(morph_image,DirectClass,exception); if (status == MagickFalse) { morph_image=DestroyImage(morph_image); return((Image *) NULL); } AppendImageToList(&morph_images,morph_image); morph_images=GetLastImageInList(morph_images); morph_image=ResizeImage(GetNextImageInList(next),morph_images->columns, morph_images->rows,GetNextImageInList(next)->filter,exception); if (morph_image == (Image *) NULL) { morph_images=DestroyImageList(morph_images); return((Image *) NULL); } image_view=AcquireVirtualCacheView(morph_image,exception); morph_view=AcquireAuthenticCacheView(morph_images,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(morph_image,morph_image,morph_image->rows,1) #endif for (y=0; y < (ssize_t) morph_images->rows; y++) { MagickBooleanType sync; const Quantum *magick_restrict p; ssize_t x; Quantum *magick_restrict q; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,morph_image->columns,1, exception); q=GetCacheViewAuthenticPixels(morph_view,0,y,morph_images->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) morph_images->columns; x++) { ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(morph_image); i++) { PixelChannel channel = GetPixelChannelChannel(morph_image,i); PixelTrait traits = GetPixelChannelTraits(morph_image,channel); PixelTrait morph_traits=GetPixelChannelTraits(morph_images,channel); if ((traits == UndefinedPixelTrait) || (morph_traits == UndefinedPixelTrait)) continue; if ((morph_traits & CopyPixelTrait) != 0) { SetPixelChannel(morph_image,channel,p[i],q); continue; } SetPixelChannel(morph_image,channel,ClampToQuantum(alpha* GetPixelChannel(morph_images,channel,q)+beta*p[i]),q); } p+=GetPixelChannels(morph_image); q+=GetPixelChannels(morph_images); } sync=SyncCacheViewAuthenticPixels(morph_view,exception); if (sync == MagickFalse) status=MagickFalse; } morph_view=DestroyCacheView(morph_view); image_view=DestroyCacheView(image_view); morph_image=DestroyImage(morph_image); } if (n < (ssize_t) number_frames) break; /* Clone last frame in sequence. */ morph_image=CloneImage(GetNextImageInList(next),0,0,MagickTrue,exception); if (morph_image == (Image *) NULL) { morph_images=DestroyImageList(morph_images); return((Image *) NULL); } AppendImageToList(&morph_images,morph_image); morph_images=GetLastImageInList(morph_images); if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,MorphImageTag,scene, GetImageListLength(image)); if (proceed == MagickFalse) status=MagickFalse; } scene++; } if (GetNextImageInList(next) != (Image *) NULL) { morph_images=DestroyImageList(morph_images); return((Image *) NULL); } return(GetFirstImageInList(morph_images)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % P l a s m a I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PlasmaImage() initializes an image with plasma fractal values. The image % must be initialized with a base color and the random number generator % seeded before this method is called. % % The format of the PlasmaImage method is: % % MagickBooleanType PlasmaImage(Image *image,const SegmentInfo *segment, % size_t attenuate,size_t depth,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o segment: Define the region to apply plasma fractals values. % % o attenuate: Define the plasma attenuation factor. % % o depth: Limit the plasma recursion depth. % % o exception: return any errors or warnings in this structure. % */ static inline Quantum PlasmaPixel(RandomInfo *magick_restrict random_info, const double pixel,const double noise) { MagickRealType plasma; plasma=pixel+noise*GetPseudoRandomValue(random_info)-noise/2.0; return(ClampToQuantum(plasma)); } static MagickBooleanType PlasmaImageProxy(Image *image,CacheView *image_view, CacheView *u_view,CacheView *v_view,RandomInfo *magick_restrict random_info, const SegmentInfo *magick_restrict segment,size_t attenuate,size_t depth, ExceptionInfo *exception) { double plasma; MagickStatusType status; const Quantum *magick_restrict u, *magick_restrict v; Quantum *magick_restrict q; ssize_t i; ssize_t x, x_mid, y, y_mid; if ((fabs(segment->x2-segment->x1) < MagickEpsilon) && (fabs(segment->y2-segment->y1) < MagickEpsilon)) return(MagickTrue); if (depth != 0) { SegmentInfo local_info; /* Divide the area into quadrants and recurse. */ depth--; attenuate++; x_mid=CastDoubleToLong(ceil((segment->x1+segment->x2)/2-0.5)); y_mid=CastDoubleToLong(ceil((segment->y1+segment->y2)/2-0.5)); local_info=(*segment); local_info.x2=(double) x_mid; local_info.y2=(double) y_mid; status=PlasmaImageProxy(image,image_view,u_view,v_view,random_info, &local_info,attenuate,depth,exception); local_info=(*segment); local_info.y1=(double) y_mid; local_info.x2=(double) x_mid; status&=PlasmaImageProxy(image,image_view,u_view,v_view,random_info, &local_info,attenuate,depth,exception); local_info=(*segment); local_info.x1=(double) x_mid; local_info.y2=(double) y_mid; status&=PlasmaImageProxy(image,image_view,u_view,v_view,random_info, &local_info,attenuate,depth,exception); local_info=(*segment); local_info.x1=(double) x_mid; local_info.y1=(double) y_mid; status&=PlasmaImageProxy(image,image_view,u_view,v_view,random_info, &local_info,attenuate,depth,exception); return(status == 0 ? MagickFalse : MagickTrue); } x_mid=CastDoubleToLong(ceil((segment->x1+segment->x2)/2-0.5)); y_mid=CastDoubleToLong(ceil((segment->y1+segment->y2)/2-0.5)); if ((fabs(segment->x1-x_mid) < MagickEpsilon) && (fabs(segment->x2-x_mid) < MagickEpsilon) && (fabs(segment->y1-y_mid) < MagickEpsilon) && (fabs(segment->y2-y_mid) < MagickEpsilon)) return(MagickFalse); /* Average pixels and apply plasma. */ status=MagickTrue; plasma=(double) QuantumRange/(2.0*attenuate); if ((fabs(segment->x1-x_mid) >= MagickEpsilon) || (fabs(segment->x2-x_mid) >= MagickEpsilon)) { /* Left pixel. */ x=CastDoubleToLong(ceil(segment->x1-0.5)); u=GetCacheViewVirtualPixels(u_view,x,CastDoubleToLong(ceil( segment->y1-0.5)),1,1,exception); v=GetCacheViewVirtualPixels(v_view,x,CastDoubleToLong(ceil( segment->y2-0.5)),1,1,exception); q=QueueCacheViewAuthenticPixels(image_view,x,y_mid,1,1,exception); if ((u == (const Quantum *) NULL) || (v == (const Quantum *) NULL) || (q == (Quantum *) NULL)) return(MagickTrue); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if (traits == UndefinedPixelTrait) continue; q[i]=PlasmaPixel(random_info,((double) u[i]+v[i])/2.0,plasma); } status=SyncCacheViewAuthenticPixels(image_view,exception); if (fabs(segment->x1-segment->x2) >= MagickEpsilon) { /* Right pixel. */ x=CastDoubleToLong(ceil(segment->x2-0.5)); u=GetCacheViewVirtualPixels(u_view,x,CastDoubleToLong(ceil( segment->y1-0.5)),1,1,exception); v=GetCacheViewVirtualPixels(v_view,x,CastDoubleToLong(ceil( segment->y2-0.5)),1,1,exception); q=QueueCacheViewAuthenticPixels(image_view,x,y_mid,1,1,exception); if ((u == (const Quantum *) NULL) || (v == (const Quantum *) NULL) || (q == (Quantum *) NULL)) return(MagickFalse); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if (traits == UndefinedPixelTrait) continue; q[i]=PlasmaPixel(random_info,((double) u[i]+v[i])/2.0,plasma); } status=SyncCacheViewAuthenticPixels(image_view,exception); } } if ((fabs(segment->y1-y_mid) >= MagickEpsilon) || (fabs(segment->y2-y_mid) >= MagickEpsilon)) { if ((fabs(segment->x1-x_mid) >= MagickEpsilon) || (fabs(segment->y2-y_mid) >= MagickEpsilon)) { /* Bottom pixel. */ y=CastDoubleToLong(ceil(segment->y2-0.5)); u=GetCacheViewVirtualPixels(u_view,CastDoubleToLong(ceil( segment->x1-0.5)),y,1,1,exception); v=GetCacheViewVirtualPixels(v_view,CastDoubleToLong(ceil( segment->x2-0.5)),y,1,1,exception); q=QueueCacheViewAuthenticPixels(image_view,x_mid,y,1,1,exception); if ((u == (const Quantum *) NULL) || (v == (const Quantum *) NULL) || (q == (Quantum *) NULL)) return(MagickTrue); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if (traits == UndefinedPixelTrait) continue; q[i]=PlasmaPixel(random_info,((double) u[i]+v[i])/2.0,plasma); } status=SyncCacheViewAuthenticPixels(image_view,exception); } if (fabs(segment->y1-segment->y2) >= MagickEpsilon) { /* Top pixel. */ y=CastDoubleToLong(ceil(segment->y1-0.5)); u=GetCacheViewVirtualPixels(u_view,CastDoubleToLong(ceil( segment->x1-0.5)),y,1,1,exception); v=GetCacheViewVirtualPixels(v_view,CastDoubleToLong(ceil( segment->x2-0.5)),y,1,1,exception); q=QueueCacheViewAuthenticPixels(image_view,x_mid,y,1,1,exception); if ((u == (const Quantum *) NULL) || (v == (const Quantum *) NULL) || (q == (Quantum *) NULL)) return(MagickTrue); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if (traits == UndefinedPixelTrait) continue; q[i]=PlasmaPixel(random_info,((double) u[i]+v[i])/2.0,plasma); } status=SyncCacheViewAuthenticPixels(image_view,exception); } } if ((fabs(segment->x1-segment->x2) >= MagickEpsilon) || (fabs(segment->y1-segment->y2) >= MagickEpsilon)) { /* Middle pixel. */ x=CastDoubleToLong(ceil(segment->x1-0.5)); y=CastDoubleToLong(ceil(segment->y1-0.5)); u=GetCacheViewVirtualPixels(u_view,x,y,1,1,exception); x=CastDoubleToLong(ceil(segment->x2-0.5)); y=CastDoubleToLong(ceil(segment->y2-0.5)); v=GetCacheViewVirtualPixels(v_view,x,y,1,1,exception); q=QueueCacheViewAuthenticPixels(image_view,x_mid,y_mid,1,1,exception); if ((u == (const Quantum *) NULL) || (v == (const Quantum *) NULL) || (q == (Quantum *) NULL)) return(MagickTrue); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if (traits == UndefinedPixelTrait) continue; q[i]=PlasmaPixel(random_info,((double) u[i]+v[i])/2.0,plasma); } status=SyncCacheViewAuthenticPixels(image_view,exception); } if ((fabs(segment->x2-segment->x1) < 3.0) && (fabs(segment->y2-segment->y1) < 3.0)) return(status == 0 ? MagickFalse : MagickTrue); return(MagickFalse); } MagickExport MagickBooleanType PlasmaImage(Image *image, const SegmentInfo *segment,size_t attenuate,size_t depth, ExceptionInfo *exception) { CacheView *image_view, *u_view, *v_view; MagickBooleanType status; RandomInfo *random_info; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); image_view=AcquireAuthenticCacheView(image,exception); u_view=AcquireVirtualCacheView(image,exception); v_view=AcquireVirtualCacheView(image,exception); random_info=AcquireRandomInfo(); status=PlasmaImageProxy(image,image_view,u_view,v_view,random_info,segment, attenuate,depth,exception); random_info=DestroyRandomInfo(random_info); v_view=DestroyCacheView(v_view); u_view=DestroyCacheView(u_view); image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % P o l a r o i d I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PolaroidImage() simulates a Polaroid picture. % % The format of the PolaroidImage method is: % % Image *PolaroidImage(const Image *image,const DrawInfo *draw_info, % const char *caption,const double angle, % const PixelInterpolateMethod method,ExceptionInfo exception) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % % o caption: the Polaroid caption. % % o angle: Apply the effect along this angle. % % o method: the pixel interpolation method. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *PolaroidImage(const Image *image,const DrawInfo *draw_info, const char *caption,const double angle,const PixelInterpolateMethod method, ExceptionInfo *exception) { Image *bend_image, *caption_image, *flop_image, *picture_image, *polaroid_image, *rotate_image, *trim_image; size_t height; ssize_t quantum; /* Simulate a Polaroid picture. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); quantum=(ssize_t) MagickMax(MagickMax((double) image->columns,(double) image->rows)/25.0,10.0); height=image->rows+2*quantum; caption_image=(Image *) NULL; if (caption != (const char *) NULL) { char *text; /* Generate caption image. */ caption_image=CloneImage(image,image->columns,1,MagickTrue,exception); if (caption_image == (Image *) NULL) return((Image *) NULL); text=InterpretImageProperties((ImageInfo *) NULL,(Image *) image,caption, exception); if (text != (char *) NULL) { char geometry[MagickPathExtent]; DrawInfo *annotate_info; MagickBooleanType status; ssize_t count; TypeMetric metrics; annotate_info=CloneDrawInfo((const ImageInfo *) NULL,draw_info); (void) CloneString(&annotate_info->text,text); count=FormatMagickCaption(caption_image,annotate_info,MagickTrue, &metrics,&text,exception); status=SetImageExtent(caption_image,image->columns,(size_t) ((count+1)*(metrics.ascent-metrics.descent)+0.5),exception); if (status == MagickFalse) caption_image=DestroyImage(caption_image); else { caption_image->background_color=image->border_color; (void) SetImageBackgroundColor(caption_image,exception); (void) CloneString(&annotate_info->text,text); (void) FormatLocaleString(geometry,MagickPathExtent,"+0+%.20g", metrics.ascent); if (annotate_info->gravity == UndefinedGravity) (void) CloneString(&annotate_info->geometry,AcquireString( geometry)); (void) AnnotateImage(caption_image,annotate_info,exception); height+=caption_image->rows; } annotate_info=DestroyDrawInfo(annotate_info); text=DestroyString(text); } } picture_image=CloneImage(image,image->columns+2*quantum,height,MagickTrue, exception); if (picture_image == (Image *) NULL) { if (caption_image != (Image *) NULL) caption_image=DestroyImage(caption_image); return((Image *) NULL); } picture_image->background_color=image->border_color; (void) SetImageBackgroundColor(picture_image,exception); (void) CompositeImage(picture_image,image,OverCompositeOp,MagickTrue,quantum, quantum,exception); if (caption_image != (Image *) NULL) { (void) CompositeImage(picture_image,caption_image,OverCompositeOp, MagickTrue,quantum,(ssize_t) (image->rows+3*quantum/2),exception); caption_image=DestroyImage(caption_image); } (void) QueryColorCompliance("none",AllCompliance, &picture_image->background_color,exception); (void) SetImageAlphaChannel(picture_image,OpaqueAlphaChannel,exception); rotate_image=RotateImage(picture_image,90.0,exception); picture_image=DestroyImage(picture_image); if (rotate_image == (Image *) NULL) return((Image *) NULL); picture_image=rotate_image; bend_image=WaveImage(picture_image,0.01*picture_image->rows,2.0* picture_image->columns,method,exception); picture_image=DestroyImage(picture_image); if (bend_image == (Image *) NULL) return((Image *) NULL); picture_image=bend_image; rotate_image=RotateImage(picture_image,-90.0,exception); picture_image=DestroyImage(picture_image); if (rotate_image == (Image *) NULL) return((Image *) NULL); picture_image=rotate_image; picture_image->background_color=image->background_color; polaroid_image=ShadowImage(picture_image,80.0,2.0,quantum/3,quantum/3, exception); if (polaroid_image == (Image *) NULL) { picture_image=DestroyImage(picture_image); return(picture_image); } flop_image=FlopImage(polaroid_image,exception); polaroid_image=DestroyImage(polaroid_image); if (flop_image == (Image *) NULL) { picture_image=DestroyImage(picture_image); return(picture_image); } polaroid_image=flop_image; (void) CompositeImage(polaroid_image,picture_image,OverCompositeOp, MagickTrue,(ssize_t) (-0.01*picture_image->columns/2.0),0L,exception); picture_image=DestroyImage(picture_image); (void) QueryColorCompliance("none",AllCompliance, &polaroid_image->background_color,exception); rotate_image=RotateImage(polaroid_image,angle,exception); polaroid_image=DestroyImage(polaroid_image); if (rotate_image == (Image *) NULL) return((Image *) NULL); polaroid_image=rotate_image; trim_image=TrimImage(polaroid_image,exception); polaroid_image=DestroyImage(polaroid_image); if (trim_image == (Image *) NULL) return((Image *) NULL); polaroid_image=trim_image; return(polaroid_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e p i a T o n e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickSepiaToneImage() applies a special effect to the image, similar to the % effect achieved in a photo darkroom by sepia toning. Threshold ranges from % 0 to QuantumRange and is a measure of the extent of the sepia toning. A % threshold of 80% is a good starting point for a reasonable tone. % % The format of the SepiaToneImage method is: % % Image *SepiaToneImage(const Image *image,const double threshold, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o threshold: the tone threshold. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *SepiaToneImage(const Image *image,const double threshold, ExceptionInfo *exception) { #define SepiaToneImageTag "SepiaTone/Image" CacheView *image_view, *sepia_view; Image *sepia_image; MagickBooleanType status; MagickOffsetType progress; ssize_t y; /* Initialize sepia-toned image attributes. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); sepia_image=CloneImage(image,0,0,MagickTrue,exception); if (sepia_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(sepia_image,DirectClass,exception) == MagickFalse) { sepia_image=DestroyImage(sepia_image); return((Image *) NULL); } /* Tone each row of the image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); sepia_view=AcquireAuthenticCacheView(sepia_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,sepia_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { const Quantum *magick_restrict p; ssize_t x; Quantum *magick_restrict q; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=GetCacheViewAuthenticPixels(sepia_view,0,y,sepia_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double intensity, tone; intensity=GetPixelIntensity(image,p); tone=intensity > threshold ? (double) QuantumRange : intensity+ (double) QuantumRange-threshold; SetPixelRed(sepia_image,ClampToQuantum(tone),q); tone=intensity > (7.0*threshold/6.0) ? (double) QuantumRange : intensity+(double) QuantumRange-7.0*threshold/6.0; SetPixelGreen(sepia_image,ClampToQuantum(tone),q); tone=intensity < (threshold/6.0) ? 0 : intensity-threshold/6.0; SetPixelBlue(sepia_image,ClampToQuantum(tone),q); tone=threshold/7.0; if ((double) GetPixelGreen(image,q) < tone) SetPixelGreen(sepia_image,ClampToQuantum(tone),q); if ((double) GetPixelBlue(image,q) < tone) SetPixelBlue(sepia_image,ClampToQuantum(tone),q); SetPixelAlpha(sepia_image,GetPixelAlpha(image,p),q); p+=GetPixelChannels(image); q+=GetPixelChannels(sepia_image); } if (SyncCacheViewAuthenticPixels(sepia_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,SepiaToneImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } sepia_view=DestroyCacheView(sepia_view); image_view=DestroyCacheView(image_view); (void) NormalizeImage(sepia_image,exception); (void) ContrastImage(sepia_image,MagickTrue,exception); if (status == MagickFalse) sepia_image=DestroyImage(sepia_image); return(sepia_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S h a d o w I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ShadowImage() simulates a shadow from the specified image and returns it. % % The format of the ShadowImage method is: % % Image *ShadowImage(const Image *image,const double alpha, % const double sigma,const ssize_t x_offset,const ssize_t y_offset, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o alpha: percentage transparency. % % o sigma: the standard deviation of the Gaussian, in pixels. % % o x_offset: the shadow x-offset. % % o y_offset: the shadow y-offset. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ShadowImage(const Image *image,const double alpha, const double sigma,const ssize_t x_offset,const ssize_t y_offset, ExceptionInfo *exception) { #define ShadowImageTag "Shadow/Image" CacheView *image_view; ChannelType channel_mask; Image *border_image, *clone_image, *shadow_image; MagickBooleanType status; PixelInfo background_color; RectangleInfo border_info; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); clone_image=CloneImage(image,0,0,MagickTrue,exception); if (clone_image == (Image *) NULL) return((Image *) NULL); if (IsGrayColorspace(image->colorspace) != MagickFalse) (void) SetImageColorspace(clone_image,sRGBColorspace,exception); (void) SetImageVirtualPixelMethod(clone_image,EdgeVirtualPixelMethod, exception); border_info.width=(size_t) floor(2.0*sigma+0.5); border_info.height=(size_t) floor(2.0*sigma+0.5); border_info.x=0; border_info.y=0; (void) QueryColorCompliance("none",AllCompliance,&clone_image->border_color, exception); clone_image->alpha_trait=BlendPixelTrait; border_image=BorderImage(clone_image,&border_info,OverCompositeOp,exception); clone_image=DestroyImage(clone_image); if (border_image == (Image *) NULL) return((Image *) NULL); if (border_image->alpha_trait == UndefinedPixelTrait) (void) SetImageAlphaChannel(border_image,OpaqueAlphaChannel,exception); /* Shadow image. */ status=MagickTrue; background_color=border_image->background_color; background_color.alpha_trait=BlendPixelTrait; image_view=AcquireAuthenticCacheView(border_image,exception); for (y=0; y < (ssize_t) border_image->rows; y++) { Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q=QueueCacheViewAuthenticPixels(image_view,0,y,border_image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) border_image->columns; x++) { if (border_image->alpha_trait != UndefinedPixelTrait) background_color.alpha=GetPixelAlpha(border_image,q)*alpha/100.0; SetPixelViaPixelInfo(border_image,&background_color,q); q+=GetPixelChannels(border_image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (status == MagickFalse) { border_image=DestroyImage(border_image); return((Image *) NULL); } channel_mask=SetImageChannelMask(border_image,AlphaChannel); shadow_image=BlurImage(border_image,0.0,sigma,exception); border_image=DestroyImage(border_image); if (shadow_image == (Image *) NULL) return((Image *) NULL); (void) SetPixelChannelMask(shadow_image,channel_mask); if (shadow_image->page.width == 0) shadow_image->page.width=shadow_image->columns; if (shadow_image->page.height == 0) shadow_image->page.height=shadow_image->rows; shadow_image->page.width+=x_offset-(ssize_t) border_info.width; shadow_image->page.height+=y_offset-(ssize_t) border_info.height; shadow_image->page.x+=x_offset-(ssize_t) border_info.width; shadow_image->page.y+=y_offset-(ssize_t) border_info.height; return(shadow_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S k e t c h I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SketchImage() simulates a pencil sketch. We convolve the image with a % Gaussian operator of the given radius and standard deviation (sigma). For % reasonable results, radius should be larger than sigma. Use a radius of 0 % and SketchImage() selects a suitable radius for you. Angle gives the angle % of the sketch. % % The format of the SketchImage method is: % % Image *SketchImage(const Image *image,const double radius, % const double sigma,const double angle,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the Gaussian, in pixels, not counting the % center pixel. % % o sigma: the standard deviation of the Gaussian, in pixels. % % o angle: apply the effect along this angle. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *SketchImage(const Image *image,const double radius, const double sigma,const double angle,ExceptionInfo *exception) { CacheView *random_view; Image *blend_image, *blur_image, *dodge_image, *random_image, *sketch_image; MagickBooleanType status; RandomInfo **magick_restrict random_info; ssize_t y; #if defined(MAGICKCORE_OPENMP_SUPPORT) unsigned long key; #endif /* Sketch image. */ random_image=CloneImage(image,image->columns << 1,image->rows << 1, MagickTrue,exception); if (random_image == (Image *) NULL) return((Image *) NULL); status=MagickTrue; random_info=AcquireRandomInfoThreadSet(); random_view=AcquireAuthenticCacheView(random_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) key=GetRandomSecretKey(random_info[0]); #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(random_image,random_image,random_image->rows,key == ~0UL) #endif for (y=0; y < (ssize_t) random_image->rows; y++) { const int id = GetOpenMPThreadId(); Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q=QueueCacheViewAuthenticPixels(random_view,0,y,random_image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) random_image->columns; x++) { double value; ssize_t i; value=GetPseudoRandomValue(random_info[id]); for (i=0; i < (ssize_t) GetPixelChannels(random_image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if (traits == UndefinedPixelTrait) continue; q[i]=ClampToQuantum(QuantumRange*value); } q+=GetPixelChannels(random_image); } if (SyncCacheViewAuthenticPixels(random_view,exception) == MagickFalse) status=MagickFalse; } random_view=DestroyCacheView(random_view); random_info=DestroyRandomInfoThreadSet(random_info); if (status == MagickFalse) { random_image=DestroyImage(random_image); return(random_image); } blur_image=MotionBlurImage(random_image,radius,sigma,angle,exception); random_image=DestroyImage(random_image); if (blur_image == (Image *) NULL) return((Image *) NULL); dodge_image=EdgeImage(blur_image,radius,exception); blur_image=DestroyImage(blur_image); if (dodge_image == (Image *) NULL) return((Image *) NULL); status=ClampImage(dodge_image,exception); if (status != MagickFalse) status=NormalizeImage(dodge_image,exception); if (status != MagickFalse) status=NegateImage(dodge_image,MagickFalse,exception); if (status != MagickFalse) status=TransformImage(&dodge_image,(char *) NULL,"50%",exception); sketch_image=CloneImage(image,0,0,MagickTrue,exception); if (sketch_image == (Image *) NULL) { dodge_image=DestroyImage(dodge_image); return((Image *) NULL); } (void) CompositeImage(sketch_image,dodge_image,ColorDodgeCompositeOp, MagickTrue,0,0,exception); dodge_image=DestroyImage(dodge_image); blend_image=CloneImage(image,0,0,MagickTrue,exception); if (blend_image == (Image *) NULL) { sketch_image=DestroyImage(sketch_image); return((Image *) NULL); } if (blend_image->alpha_trait != BlendPixelTrait) (void) SetImageAlpha(blend_image,TransparentAlpha,exception); (void) SetImageArtifact(blend_image,"compose:args","20x80"); (void) CompositeImage(sketch_image,blend_image,BlendCompositeOp,MagickTrue, 0,0,exception); blend_image=DestroyImage(blend_image); return(sketch_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S o l a r i z e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SolarizeImage() applies a special effect to the image, similar to the effect % achieved in a photo darkroom by selectively exposing areas of photo % sensitive paper to light. Threshold ranges from 0 to QuantumRange and is a % measure of the extent of the solarization. % % The format of the SolarizeImage method is: % % MagickBooleanType SolarizeImage(Image *image,const double threshold, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o threshold: Define the extent of the solarization. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType SolarizeImage(Image *image, const double threshold,ExceptionInfo *exception) { #define SolarizeImageTag "Solarize/Image" CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (IsGrayColorspace(image->colorspace) != MagickFalse) (void) SetImageColorspace(image,sRGBColorspace,exception); if (image->storage_class == PseudoClass) { ssize_t i; /* Solarize colormap. */ for (i=0; i < (ssize_t) image->colors; i++) { if ((double) image->colormap[i].red > threshold) image->colormap[i].red=QuantumRange-image->colormap[i].red; if ((double) image->colormap[i].green > threshold) image->colormap[i].green=QuantumRange-image->colormap[i].green; if ((double) image->colormap[i].blue > threshold) image->colormap[i].blue=QuantumRange-image->colormap[i].blue; } return(SyncImage(image,exception)); } /* Solarize image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { ssize_t x; Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; if ((double) q[i] > threshold) q[i]=QuantumRange-q[i]; } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,SolarizeImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S t e g a n o I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SteganoImage() hides a digital watermark within the image. Recover % the hidden watermark later to prove that the authenticity of an image. % Offset defines the start position within the image to hide the watermark. % % The format of the SteganoImage method is: % % Image *SteganoImage(const Image *image,Image *watermark, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o watermark: the watermark image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *SteganoImage(const Image *image,const Image *watermark, ExceptionInfo *exception) { #define GetBit(alpha,i) ((((size_t) (alpha) >> (size_t) (i)) & 0x01) != 0) #define SetBit(alpha,i,set) (Quantum) ((set) != 0 ? (size_t) (alpha) \ | (one << (size_t) (i)) : (size_t) (alpha) & ~(one << (size_t) (i))) #define SteganoImageTag "Stegano/Image" CacheView *stegano_view, *watermark_view; Image *stegano_image; int c; MagickBooleanType status; PixelInfo pixel; Quantum *q; ssize_t x; size_t depth, one; ssize_t i, j, k, y; /* Initialize steganographic image attributes. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(watermark != (const Image *) NULL); assert(watermark->signature == MagickCoreSignature); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); one=1UL; stegano_image=CloneImage(image,0,0,MagickTrue,exception); if (stegano_image == (Image *) NULL) return((Image *) NULL); stegano_image->depth=MAGICKCORE_QUANTUM_DEPTH; if (SetImageStorageClass(stegano_image,DirectClass,exception) == MagickFalse) { stegano_image=DestroyImage(stegano_image); return((Image *) NULL); } /* Hide watermark in low-order bits of image. */ c=0; i=0; j=0; depth=stegano_image->depth; k=stegano_image->offset; status=MagickTrue; watermark_view=AcquireVirtualCacheView(watermark,exception); stegano_view=AcquireAuthenticCacheView(stegano_image,exception); for (i=(ssize_t) depth-1; (i >= 0) && (j < (ssize_t) depth); i--) { for (y=0; (y < (ssize_t) watermark->rows) && (j < (ssize_t) depth); y++) { for (x=0; (x < (ssize_t) watermark->columns) && (j < (ssize_t) depth); x++) { ssize_t offset; (void) GetOneCacheViewVirtualPixelInfo(watermark_view,x,y,&pixel, exception); offset=k/(ssize_t) stegano_image->columns; if (offset >= (ssize_t) stegano_image->rows) break; q=GetCacheViewAuthenticPixels(stegano_view,k % (ssize_t) stegano_image->columns,k/(ssize_t) stegano_image->columns,1,1, exception); if (q == (Quantum *) NULL) break; switch (c) { case 0: { SetPixelRed(stegano_image,SetBit(GetPixelRed(stegano_image,q),j, GetBit(GetPixelInfoIntensity(stegano_image,&pixel),i)),q); break; } case 1: { SetPixelGreen(stegano_image,SetBit(GetPixelGreen(stegano_image,q),j, GetBit(GetPixelInfoIntensity(stegano_image,&pixel),i)),q); break; } case 2: { SetPixelBlue(stegano_image,SetBit(GetPixelBlue(stegano_image,q),j, GetBit(GetPixelInfoIntensity(stegano_image,&pixel),i)),q); break; } } if (SyncCacheViewAuthenticPixels(stegano_view,exception) == MagickFalse) break; c++; if (c == 3) c=0; k++; if (k == (ssize_t) (stegano_image->columns*stegano_image->columns)) k=0; if (k == stegano_image->offset) j++; } } if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,SteganoImageTag,(MagickOffsetType) (depth-i),depth); if (proceed == MagickFalse) status=MagickFalse; } } stegano_view=DestroyCacheView(stegano_view); watermark_view=DestroyCacheView(watermark_view); if (status == MagickFalse) stegano_image=DestroyImage(stegano_image); return(stegano_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S t e r e o A n a g l y p h I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % StereoAnaglyphImage() combines two images and produces a single image that % is the composite of a left and right image of a stereo pair. Special % red-green stereo glasses are required to view this effect. % % The format of the StereoAnaglyphImage method is: % % Image *StereoImage(const Image *left_image,const Image *right_image, % ExceptionInfo *exception) % Image *StereoAnaglyphImage(const Image *left_image, % const Image *right_image,const ssize_t x_offset,const ssize_t y_offset, % ExceptionInfo *exception) % % A description of each parameter follows: % % o left_image: the left image. % % o right_image: the right image. % % o exception: return any errors or warnings in this structure. % % o x_offset: amount, in pixels, by which the left image is offset to the % right of the right image. % % o y_offset: amount, in pixels, by which the left image is offset to the % bottom of the right image. % % */ MagickExport Image *StereoImage(const Image *left_image, const Image *right_image,ExceptionInfo *exception) { return(StereoAnaglyphImage(left_image,right_image,0,0,exception)); } MagickExport Image *StereoAnaglyphImage(const Image *left_image, const Image *right_image,const ssize_t x_offset,const ssize_t y_offset, ExceptionInfo *exception) { #define StereoImageTag "Stereo/Image" const Image *image; Image *stereo_image; MagickBooleanType status; ssize_t y; assert(left_image != (const Image *) NULL); assert(left_image->signature == MagickCoreSignature); if (left_image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", left_image->filename); assert(right_image != (const Image *) NULL); assert(right_image->signature == MagickCoreSignature); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); image=left_image; if ((left_image->columns != right_image->columns) || (left_image->rows != right_image->rows)) ThrowImageException(ImageError,"LeftAndRightImageSizesDiffer"); /* Initialize stereo image attributes. */ stereo_image=CloneImage(left_image,left_image->columns,left_image->rows, MagickTrue,exception); if (stereo_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(stereo_image,DirectClass,exception) == MagickFalse) { stereo_image=DestroyImage(stereo_image); return((Image *) NULL); } (void) SetImageColorspace(stereo_image,sRGBColorspace,exception); /* Copy left image to red channel and right image to blue channel. */ status=MagickTrue; for (y=0; y < (ssize_t) stereo_image->rows; y++) { const Quantum *magick_restrict p, *magick_restrict q; ssize_t x; Quantum *magick_restrict r; p=GetVirtualPixels(left_image,-x_offset,y-y_offset,image->columns,1, exception); q=GetVirtualPixels(right_image,0,y,right_image->columns,1,exception); r=QueueAuthenticPixels(stereo_image,0,y,stereo_image->columns,1,exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL) || (r == (Quantum *) NULL)) break; for (x=0; x < (ssize_t) stereo_image->columns; x++) { SetPixelRed(stereo_image,GetPixelRed(left_image,p),r); SetPixelGreen(stereo_image,GetPixelGreen(right_image,q),r); SetPixelBlue(stereo_image,GetPixelBlue(right_image,q),r); if ((GetPixelAlphaTraits(stereo_image) & CopyPixelTrait) != 0) SetPixelAlpha(stereo_image,(GetPixelAlpha(left_image,p)+ GetPixelAlpha(right_image,q))/2,r); p+=GetPixelChannels(left_image); q+=GetPixelChannels(right_image); r+=GetPixelChannels(stereo_image); } if (SyncAuthenticPixels(stereo_image,exception) == MagickFalse) break; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,StereoImageTag,(MagickOffsetType) y, stereo_image->rows); if (proceed == MagickFalse) status=MagickFalse; } } if (status == MagickFalse) stereo_image=DestroyImage(stereo_image); return(stereo_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S w i r l I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SwirlImage() swirls the pixels about the center of the image, where % degrees indicates the sweep of the arc through which each pixel is moved. % You get a more dramatic effect as the degrees move from 1 to 360. % % The format of the SwirlImage method is: % % Image *SwirlImage(const Image *image,double degrees, % const PixelInterpolateMethod method,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o degrees: Define the tightness of the swirling effect. % % o method: the pixel interpolation method. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *SwirlImage(const Image *image,double degrees, const PixelInterpolateMethod method,ExceptionInfo *exception) { #define SwirlImageTag "Swirl/Image" CacheView *canvas_view, *interpolate_view, *swirl_view; double radius; Image *canvas_image, *swirl_image; MagickBooleanType status; MagickOffsetType progress; PointInfo center, scale; ssize_t y; /* Initialize swirl image attributes. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); canvas_image=CloneImage(image,0,0,MagickTrue,exception); if (canvas_image == (Image *) NULL) return((Image *) NULL); swirl_image=CloneImage(canvas_image,0,0,MagickTrue,exception); if (swirl_image == (Image *) NULL) { canvas_image=DestroyImage(canvas_image); return((Image *) NULL); } if (SetImageStorageClass(swirl_image,DirectClass,exception) == MagickFalse) { canvas_image=DestroyImage(canvas_image); swirl_image=DestroyImage(swirl_image); return((Image *) NULL); } if (swirl_image->background_color.alpha_trait != UndefinedPixelTrait) (void) SetImageAlphaChannel(swirl_image,OnAlphaChannel,exception); /* Compute scaling factor. */ center.x=(double) canvas_image->columns/2.0; center.y=(double) canvas_image->rows/2.0; radius=MagickMax(center.x,center.y); scale.x=1.0; scale.y=1.0; if (canvas_image->columns > canvas_image->rows) scale.y=(double) canvas_image->columns/(double) canvas_image->rows; else if (canvas_image->columns < canvas_image->rows) scale.x=(double) canvas_image->rows/(double) canvas_image->columns; degrees=(double) DegreesToRadians(degrees); /* Swirl image. */ status=MagickTrue; progress=0; canvas_view=AcquireVirtualCacheView(canvas_image,exception); interpolate_view=AcquireVirtualCacheView(image,exception); swirl_view=AcquireAuthenticCacheView(swirl_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(canvas_image,swirl_image,canvas_image->rows,1) #endif for (y=0; y < (ssize_t) canvas_image->rows; y++) { double distance; PointInfo delta; const Quantum *magick_restrict p; ssize_t x; Quantum *magick_restrict q; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(canvas_view,0,y,canvas_image->columns,1, exception); q=QueueCacheViewAuthenticPixels(swirl_view,0,y,swirl_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } delta.y=scale.y*(double) (y-center.y); for (x=0; x < (ssize_t) canvas_image->columns; x++) { /* Determine if the pixel is within an ellipse. */ delta.x=scale.x*(double) (x-center.x); distance=delta.x*delta.x+delta.y*delta.y; if (distance >= (radius*radius)) { ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(canvas_image); i++) { PixelChannel channel = GetPixelChannelChannel(canvas_image,i); PixelTrait traits = GetPixelChannelTraits(canvas_image,channel); PixelTrait swirl_traits = GetPixelChannelTraits(swirl_image, channel); if ((traits == UndefinedPixelTrait) || (swirl_traits == UndefinedPixelTrait)) continue; SetPixelChannel(swirl_image,channel,p[i],q); } } else { double cosine, factor, sine; /* Swirl the pixel. */ factor=1.0-sqrt((double) distance)/radius; sine=sin((double) (degrees*factor*factor)); cosine=cos((double) (degrees*factor*factor)); status=InterpolatePixelChannels(canvas_image,interpolate_view, swirl_image,method,((cosine*delta.x-sine*delta.y)/scale.x+center.x), (double) ((sine*delta.x+cosine*delta.y)/scale.y+center.y),q, exception); if (status == MagickFalse) break; } p+=GetPixelChannels(canvas_image); q+=GetPixelChannels(swirl_image); } if (SyncCacheViewAuthenticPixels(swirl_view,exception) == MagickFalse) status=MagickFalse; if (canvas_image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(canvas_image,SwirlImageTag,progress, canvas_image->rows); if (proceed == MagickFalse) status=MagickFalse; } } swirl_view=DestroyCacheView(swirl_view); interpolate_view=DestroyCacheView(interpolate_view); canvas_view=DestroyCacheView(canvas_view); canvas_image=DestroyImage(canvas_image); if (status == MagickFalse) swirl_image=DestroyImage(swirl_image); return(swirl_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T i n t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TintImage() applies a color vector to each pixel in the image. The length % of the vector is 0 for black and white and at its maximum for the midtones. % The vector weighting function is f(x)=(1-(4.0*((x-0.5)*(x-0.5)))) % % The format of the TintImage method is: % % Image *TintImage(const Image *image,const char *blend, % const PixelInfo *tint,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o blend: A color value used for tinting. % % o tint: A color value used for tinting. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *TintImage(const Image *image,const char *blend, const PixelInfo *tint,ExceptionInfo *exception) { #define TintImageTag "Tint/Image" CacheView *image_view, *tint_view; double intensity; GeometryInfo geometry_info; Image *tint_image; MagickBooleanType status; MagickOffsetType progress; PixelInfo color_vector; MagickStatusType flags; ssize_t y; /* Allocate tint image. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); tint_image=CloneImage(image,0,0,MagickTrue,exception); if (tint_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(tint_image,DirectClass,exception) == MagickFalse) { tint_image=DestroyImage(tint_image); return((Image *) NULL); } if ((IsGrayColorspace(image->colorspace) != MagickFalse) && (IsPixelInfoGray(tint) == MagickFalse)) (void) SetImageColorspace(tint_image,sRGBColorspace,exception); if (blend == (const char *) NULL) return(tint_image); /* Determine RGB values of the color. */ GetPixelInfo(image,&color_vector); flags=ParseGeometry(blend,&geometry_info); color_vector.red=geometry_info.rho; color_vector.green=geometry_info.rho; color_vector.blue=geometry_info.rho; color_vector.alpha=(MagickRealType) OpaqueAlpha; if ((flags & SigmaValue) != 0) color_vector.green=geometry_info.sigma; if ((flags & XiValue) != 0) color_vector.blue=geometry_info.xi; if ((flags & PsiValue) != 0) color_vector.alpha=geometry_info.psi; if (image->colorspace == CMYKColorspace) { color_vector.black=geometry_info.rho; if ((flags & PsiValue) != 0) color_vector.black=geometry_info.psi; if ((flags & ChiValue) != 0) color_vector.alpha=geometry_info.chi; } intensity=(double) GetPixelInfoIntensity((const Image *) NULL,tint); color_vector.red=(double) (color_vector.red*tint->red/100.0-intensity); color_vector.green=(double) (color_vector.green*tint->green/100.0-intensity); color_vector.blue=(double) (color_vector.blue*tint->blue/100.0-intensity); color_vector.black=(double) (color_vector.black*tint->black/100.0-intensity); color_vector.alpha=(double) (color_vector.alpha*tint->alpha/100.0-intensity); /* Tint image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); tint_view=AcquireAuthenticCacheView(tint_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,tint_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { const Quantum *magick_restrict p; Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=QueueCacheViewAuthenticPixels(tint_view,0,y,tint_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { PixelInfo pixel; double weight; GetPixelInfo(image,&pixel); weight=QuantumScale*GetPixelRed(image,p)-0.5; pixel.red=(MagickRealType) GetPixelRed(image,p)+color_vector.red* (1.0-(4.0*(weight*weight))); weight=QuantumScale*GetPixelGreen(image,p)-0.5; pixel.green=(MagickRealType) GetPixelGreen(image,p)+color_vector.green* (1.0-(4.0*(weight*weight))); weight=QuantumScale*GetPixelBlue(image,p)-0.5; pixel.blue=(MagickRealType) GetPixelBlue(image,p)+color_vector.blue* (1.0-(4.0*(weight*weight))); weight=QuantumScale*GetPixelBlack(image,p)-0.5; pixel.black=(MagickRealType) GetPixelBlack(image,p)+color_vector.black* (1.0-(4.0*(weight*weight))); pixel.alpha=(MagickRealType) GetPixelAlpha(image,p); SetPixelViaPixelInfo(tint_image,&pixel,q); p+=GetPixelChannels(image); q+=GetPixelChannels(tint_image); } if (SyncCacheViewAuthenticPixels(tint_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,TintImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } tint_view=DestroyCacheView(tint_view); image_view=DestroyCacheView(image_view); if (status == MagickFalse) tint_image=DestroyImage(tint_image); return(tint_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % V i g n e t t e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % VignetteImage() softens the edges of the image in vignette style. % % The format of the VignetteImage method is: % % Image *VignetteImage(const Image *image,const double radius, % const double sigma,const ssize_t x,const ssize_t y, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the pixel neighborhood. % % o sigma: the standard deviation of the Gaussian, in pixels. % % o x, y: Define the x and y ellipse offset. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *VignetteImage(const Image *image,const double radius, const double sigma,const ssize_t x,const ssize_t y,ExceptionInfo *exception) { char ellipse[MagickPathExtent]; DrawInfo *draw_info; Image *canvas, *blur_image, *oval_image, *vignette_image; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); canvas=CloneImage(image,0,0,MagickTrue,exception); if (canvas == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(canvas,DirectClass,exception) == MagickFalse) { canvas=DestroyImage(canvas); return((Image *) NULL); } canvas->alpha_trait=BlendPixelTrait; oval_image=CloneImage(canvas,canvas->columns,canvas->rows,MagickTrue, exception); if (oval_image == (Image *) NULL) { canvas=DestroyImage(canvas); return((Image *) NULL); } (void) QueryColorCompliance("#000000",AllCompliance, &oval_image->background_color,exception); (void) SetImageBackgroundColor(oval_image,exception); draw_info=CloneDrawInfo((const ImageInfo *) NULL,(const DrawInfo *) NULL); (void) QueryColorCompliance("#ffffff",AllCompliance,&draw_info->fill, exception); (void) QueryColorCompliance("#ffffff",AllCompliance,&draw_info->stroke, exception); (void) FormatLocaleString(ellipse,MagickPathExtent,"ellipse %g,%g,%g,%g," "0.0,360.0",image->columns/2.0,image->rows/2.0,image->columns/2.0-x, image->rows/2.0-y); draw_info->primitive=AcquireString(ellipse); (void) DrawImage(oval_image,draw_info,exception); draw_info=DestroyDrawInfo(draw_info); blur_image=BlurImage(oval_image,radius,sigma,exception); oval_image=DestroyImage(oval_image); if (blur_image == (Image *) NULL) { canvas=DestroyImage(canvas); return((Image *) NULL); } blur_image->alpha_trait=UndefinedPixelTrait; (void) CompositeImage(canvas,blur_image,IntensityCompositeOp,MagickTrue, 0,0,exception); blur_image=DestroyImage(blur_image); vignette_image=MergeImageLayers(canvas,FlattenLayer,exception); canvas=DestroyImage(canvas); if (vignette_image != (Image *) NULL) (void) TransformImageColorspace(vignette_image,image->colorspace,exception); return(vignette_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % W a v e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % WaveImage() creates a "ripple" effect in the image by shifting the pixels % vertically along a sine wave whose amplitude and wavelength is specified % by the given parameters. % % The format of the WaveImage method is: % % Image *WaveImage(const Image *image,const double amplitude, % const double wave_length,const PixelInterpolateMethod method, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o amplitude, wave_length: Define the amplitude and wave length of the % sine wave. % % o interpolate: the pixel interpolation method. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *WaveImage(const Image *image,const double amplitude, const double wave_length,const PixelInterpolateMethod method, ExceptionInfo *exception) { #define WaveImageTag "Wave/Image" CacheView *canvas_image_view, *wave_view; float *sine_map; Image *canvas_image, *wave_image; MagickBooleanType status; MagickOffsetType progress; ssize_t i; ssize_t y; /* Initialize wave image attributes. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); canvas_image=CloneImage(image,0,0,MagickTrue,exception); if (canvas_image == (Image *) NULL) return((Image *) NULL); if ((canvas_image->alpha_trait == UndefinedPixelTrait) && (canvas_image->background_color.alpha != OpaqueAlpha)) (void) SetImageAlpha(canvas_image,OpaqueAlpha,exception); wave_image=CloneImage(canvas_image,canvas_image->columns,(size_t) (canvas_image->rows+2.0*fabs(amplitude)),MagickTrue,exception); if (wave_image == (Image *) NULL) { canvas_image=DestroyImage(canvas_image); return((Image *) NULL); } if (SetImageStorageClass(wave_image,DirectClass,exception) == MagickFalse) { canvas_image=DestroyImage(canvas_image); wave_image=DestroyImage(wave_image); return((Image *) NULL); } /* Allocate sine map. */ sine_map=(float *) AcquireQuantumMemory((size_t) wave_image->columns, sizeof(*sine_map)); if (sine_map == (float *) NULL) { canvas_image=DestroyImage(canvas_image); wave_image=DestroyImage(wave_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } for (i=0; i < (ssize_t) wave_image->columns; i++) sine_map[i]=(float) fabs(amplitude)+amplitude*sin((double) ((2.0*MagickPI*i)*PerceptibleReciprocal(wave_length))); /* Wave image. */ status=MagickTrue; progress=0; canvas_image_view=AcquireVirtualCacheView(canvas_image,exception); wave_view=AcquireAuthenticCacheView(wave_image,exception); (void) SetCacheViewVirtualPixelMethod(canvas_image_view, BackgroundVirtualPixelMethod); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(canvas_image,wave_image,wave_image->rows,1) #endif for (y=0; y < (ssize_t) wave_image->rows; y++) { const Quantum *magick_restrict p; Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(canvas_image_view,0,y,canvas_image->columns,1, exception); q=QueueCacheViewAuthenticPixels(wave_view,0,y,wave_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) wave_image->columns; x++) { status=InterpolatePixelChannels(canvas_image,canvas_image_view, wave_image,method,(double) x,(double) (y-sine_map[x]),q,exception); if (status == MagickFalse) break; p+=GetPixelChannels(canvas_image); q+=GetPixelChannels(wave_image); } if (SyncCacheViewAuthenticPixels(wave_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(canvas_image,WaveImageTag,progress, canvas_image->rows); if (proceed == MagickFalse) status=MagickFalse; } } wave_view=DestroyCacheView(wave_view); canvas_image_view=DestroyCacheView(canvas_image_view); canvas_image=DestroyImage(canvas_image); sine_map=(float *) RelinquishMagickMemory(sine_map); if (status == MagickFalse) wave_image=DestroyImage(wave_image); return(wave_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % W a v e l e t D e n o i s e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % WaveletDenoiseImage() removes noise from the image using a wavelet % transform. The wavelet transform is a fast hierarchical scheme for % processing an image using a set of consecutive lowpass and high_pass filters, % followed by a decimation. This results in a decomposition into different % scales which can be regarded as different “frequency bands”, determined by % the mother wavelet. Adapted from dcraw.c by David Coffin. % % The format of the WaveletDenoiseImage method is: % % Image *WaveletDenoiseImage(const Image *image,const double threshold, % const double softness,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o threshold: set the threshold for smoothing. % % o softness: attenuate the smoothing threshold. % % o exception: return any errors or warnings in this structure. % */ static inline void HatTransform(const float *magick_restrict pixels, const size_t stride,const size_t extent,const size_t scale,float *kernel) { const float *magick_restrict p, *magick_restrict q, *magick_restrict r; ssize_t i; p=pixels; q=pixels+scale*stride; r=pixels+scale*stride; for (i=0; i < (ssize_t) scale; i++) { kernel[i]=0.25f*(*p+(*p)+(*q)+(*r)); p+=stride; q-=stride; r+=stride; } for ( ; i < (ssize_t) (extent-scale); i++) { kernel[i]=0.25f*(2.0f*(*p)+*(p-scale*stride)+*(p+scale*stride)); p+=stride; } q=p-scale*stride; r=pixels+stride*(extent-2); for ( ; i < (ssize_t) extent; i++) { kernel[i]=0.25f*(*p+(*p)+(*q)+(*r)); p+=stride; q+=stride; r-=stride; } } MagickExport Image *WaveletDenoiseImage(const Image *image, const double threshold,const double softness,ExceptionInfo *exception) { CacheView *image_view, *noise_view; float *kernel, *pixels; Image *noise_image; MagickBooleanType status; MagickSizeType number_pixels; MemoryInfo *pixels_info; ssize_t channel; static const float noise_levels[] = { 0.8002f, 0.2735f, 0.1202f, 0.0585f, 0.0291f, 0.0152f, 0.0080f, 0.0044f }; /* Initialize noise image attributes. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); #if defined(MAGICKCORE_OPENCL_SUPPORT) noise_image=AccelerateWaveletDenoiseImage(image,threshold,exception); if (noise_image != (Image *) NULL) return(noise_image); #endif noise_image=CloneImage(image,0,0,MagickTrue,exception); if (noise_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(noise_image,DirectClass,exception) == MagickFalse) { noise_image=DestroyImage(noise_image); return((Image *) NULL); } if (AcquireMagickResource(WidthResource,4*image->columns) == MagickFalse) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); pixels_info=AcquireVirtualMemory(3*image->columns,image->rows* sizeof(*pixels)); kernel=(float *) AcquireQuantumMemory(MagickMax(image->rows,image->columns)+1, GetOpenMPMaximumThreads()*sizeof(*kernel)); if ((pixels_info == (MemoryInfo *) NULL) || (kernel == (float *) NULL)) { if (kernel != (float *) NULL) kernel=(float *) RelinquishMagickMemory(kernel); if (pixels_info != (MemoryInfo *) NULL) pixels_info=RelinquishVirtualMemory(pixels_info); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } pixels=(float *) GetVirtualMemoryBlob(pixels_info); status=MagickTrue; number_pixels=(MagickSizeType) image->columns*image->rows; image_view=AcquireAuthenticCacheView(image,exception); noise_view=AcquireAuthenticCacheView(noise_image,exception); for (channel=0; channel < (ssize_t) GetPixelChannels(image); channel++) { ssize_t i; size_t high_pass, low_pass; ssize_t level, y; PixelChannel pixel_channel; PixelTrait traits; if (status == MagickFalse) continue; traits=GetPixelChannelTraits(image,(PixelChannel) channel); if (traits == UndefinedPixelTrait) continue; pixel_channel=GetPixelChannelChannel(image,channel); if ((pixel_channel != RedPixelChannel) && (pixel_channel != GreenPixelChannel) && (pixel_channel != BluePixelChannel)) continue; /* Copy channel from image to wavelet pixel array. */ i=0; for (y=0; y < (ssize_t) image->rows; y++) { const Quantum *magick_restrict p; ssize_t x; p=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) { status=MagickFalse; break; } for (x=0; x < (ssize_t) image->columns; x++) { pixels[i++]=(float) p[channel]; p+=GetPixelChannels(image); } } /* Low pass filter outputs are called approximation kernel & high pass filters are referred to as detail kernel. The detail kernel have high values in the noisy parts of the signal. */ high_pass=0; for (level=0; level < 5; level++) { double magnitude; ssize_t x; low_pass=(size_t) (number_pixels*((level & 0x01)+1)); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,1) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { const int id = GetOpenMPThreadId(); float *magick_restrict p, *magick_restrict q; ssize_t c; p=kernel+id*image->columns; q=pixels+y*image->columns; HatTransform(q+high_pass,1,image->columns,((size_t) 1UL << level),p); q+=low_pass; for (c=0; c < (ssize_t) image->columns; c++) *q++=(*p++); } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,1) \ magick_number_threads(image,image,image->columns,1) #endif for (x=0; x < (ssize_t) image->columns; x++) { const int id = GetOpenMPThreadId(); float *magick_restrict p, *magick_restrict q; ssize_t r; p=kernel+id*image->rows; q=pixels+x+low_pass; HatTransform(q,image->columns,image->rows,((size_t) 1UL << level),p); for (r=0; r < (ssize_t) image->rows; r++) { *q=(*p++); q+=image->columns; } } /* To threshold, each coefficient is compared to a threshold value and attenuated / shrunk by some factor. */ magnitude=threshold*noise_levels[level]; for (i=0; i < (ssize_t) number_pixels; ++i) { pixels[high_pass+i]-=pixels[low_pass+i]; if (pixels[high_pass+i] < -magnitude) pixels[high_pass+i]+=magnitude-softness*magnitude; else if (pixels[high_pass+i] > magnitude) pixels[high_pass+i]-=magnitude-softness*magnitude; else pixels[high_pass+i]*=softness; if (high_pass != 0) pixels[i]+=pixels[high_pass+i]; } high_pass=low_pass; } /* Reconstruct image from the thresholded wavelet kernel. */ i=0; for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; Quantum *magick_restrict q; ssize_t x; ssize_t offset; q=GetCacheViewAuthenticPixels(noise_view,0,y,noise_image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; break; } offset=GetPixelChannelOffset(noise_image,pixel_channel); for (x=0; x < (ssize_t) image->columns; x++) { MagickRealType pixel; pixel=(MagickRealType) pixels[i]+pixels[low_pass+i]; q[offset]=ClampToQuantum(pixel); i++; q+=GetPixelChannels(noise_image); } sync=SyncCacheViewAuthenticPixels(noise_view,exception); if (sync == MagickFalse) status=MagickFalse; } if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,AddNoiseImageTag,(MagickOffsetType) channel,GetPixelChannels(image)); if (proceed == MagickFalse) status=MagickFalse; } } noise_view=DestroyCacheView(noise_view); image_view=DestroyCacheView(image_view); kernel=(float *) RelinquishMagickMemory(kernel); pixels_info=RelinquishVirtualMemory(pixels_info); if (status == MagickFalse) noise_image=DestroyImage(noise_image); return(noise_image); }
parallel-reduction-nowait.c
/* * parallel-reduction-nowait.c -- Archer testcase */ //===----------------------------------------------------------------------===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // // See tools/archer/LICENSE.txt for details. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // RUN: %libarcher-compile-and-run | FileCheck %s #include <omp.h> #include <stdio.h> int main(int argc, char *argv[]) { int var = 0, i; int sum1 = 0; int sum2 = 0; // Number of threads is empirical: We need enough threads so that // the reduction is really performed hierarchically in the barrier! #pragma omp parallel num_threads(5) reduction(+ : var) { #pragma omp for schedule(static) nowait reduction(+ : sum1) for (i = 0; i < 5; i++) sum1 += i; #pragma omp for schedule(static) reduction(+ : sum2) for (i = 0; i < 5; i++) sum2 += i; var = sum1 + sum2; } fprintf(stderr, "DONE\n"); int error = (var != 100); return error; } // CHECK-NOT: ThreadSanitizer: data race // CHECK-NOT: ThreadSanitizer: reported // CHECK: DONE
conv2x2s1_neon.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. #include "option.h" #include "mat.h" namespace ncnn{ static void conv2x2s1_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias, const Option& opt) { int w = bottom_blob.w; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const float* kernel = _kernel; const float* bias = _bias; #pragma omp parallel for num_threads(opt.num_threads) for (int p=0; p<outch; p++) { Mat out = top_blob.channel(p); const float bias0 = bias ? bias[p] : 0.f; out.fill(bias0); int q = 0; for (; q+1<inch; q+=2) { float* outptr = out; const float* img0 = bottom_blob.channel(q); const float* img1 = bottom_blob.channel(q+1); const float* kernel0 = kernel + p*inch*4 + q*4; const float* kernel1 = kernel0 + 4; const float* r00 = img0; const float* r01 = img0 + w; const float* r10 = img1; const float* r11 = img1 + w; #if __ARM_NEON float32x4_t _k0 = vld1q_f32(kernel0); float32x4_t _k1 = vld1q_f32(kernel1); #endif // __ARM_NEON for (int i = 0; i < outh; i++) { #if __ARM_NEON int nn = outw >> 2; int remain = outw & 3; #else int remain = outw; #endif // __ARM_NEON #if __ARM_NEON #if __aarch64__ if (nn > 0) { asm volatile( "prfm pldl1keep, [%1, #128] \n" "ld1 {v0.4s}, [%1], #16 \n" "prfm pldl1keep, [%2, #128] \n" "ld1 {v2.4s}, [%2], #16 \n" "prfm pldl1keep, [%3, #128] \n" "ld1 {v12.4s}, [%3], #16 \n" "prfm pldl1keep, [%4, #128] \n" "ld1 {v14.4s}, [%4], #16 \n" "0: \n" "prfm pldl1keep, [%5, #128] \n" "ld1 {v9.4s}, [%5] \n" "fmul v8.4s, v0.4s, %12.s[0] \n" "fmla v9.4s, v2.4s, %12.s[2] \n" "prfm pldl1keep, [%1, #128] \n" "ld1 {v1.4s}, [%1], #16 \n" "prfm pldl1keep, [%2, #128] \n" "ld1 {v3.4s}, [%2], #16 \n" "ext v10.16b, v0.16b, v1.16b, #4 \n" "ext v11.16b, v2.16b, v3.16b, #4 \n" "fmla v8.4s, v12.4s, %13.s[0] \n" "fmla v9.4s, v14.4s, %13.s[2] \n" "prfm pldl1keep, [%3, #128] \n" "ld1 {v13.4s}, [%3], #16 \n" "prfm pldl1keep, [%4, #128] \n" "ld1 {v15.4s}, [%4], #16 \n" "fmla v8.4s, v10.4s, %12.s[1] \n" "fmla v9.4s, v11.4s, %12.s[3] \n" "ext v10.16b, v12.16b, v13.16b, #4 \n" "ext v11.16b, v14.16b, v15.16b, #4 \n" "fmla v8.4s, v10.4s, %13.s[1] \n" "fmla v9.4s, v11.4s, %13.s[3] \n" "orr v0.16b, v1.16b, v1.16b \n" "orr v2.16b, v3.16b, v3.16b \n" "fadd v8.4s, v8.4s, v9.4s \n" "orr v12.16b, v13.16b, v13.16b \n" "orr v14.16b, v15.16b, v15.16b \n" "subs %w0, %w0, #1 \n" "st1 {v8.4s}, [%5], #16 \n" "bne 0b \n" "sub %1, %1, #16 \n" "sub %2, %2, #16 \n" "sub %3, %3, #16 \n" "sub %4, %4, #16 \n" : "=r"(nn), // %0 "=r"(r00), // %1 "=r"(r01), // %2 "=r"(r10), // %3 "=r"(r11), // %4 "=r"(outptr) // %5 : "0"(nn), "1"(r00), "2"(r01), "3"(r10), "4"(r11), "5"(outptr), "w"(_k0), // %12 "w"(_k1) // %13 : "cc", "memory", "v0", "v1", "v2", "v3", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15" ); } #else if (nn > 0) { asm volatile( "pld [%1, #128] \n" "vld1.f32 {d0-d1}, [%1]! \n" "pld [%2, #128] \n" "vld1.f32 {d4-d5}, [%2]! \n" "pld [%3, #128] \n" "vld1.f32 {d24-d25}, [%3]! \n" "pld [%4, #128] \n" "vld1.f32 {d28-d29}, [%4]! \n" "0: \n" "pld [%5, #128] \n" "vld1.f32 {d18-d19}, [%5] \n"// q9 = sum "vmul.f32 q8, q0, %e12[0] \n" "vmla.f32 q9, q2, %f12[0] \n" "pld [%1, #128] \n" "vld1.f32 {d2-d3}, [%1]! \n" "pld [%2, #128] \n" "vld1.f32 {d6-d7}, [%2]! \n" "vext.f32 q10, q0, q1, #1 \n" "vext.f32 q11, q2, q3, #1 \n" "vmla.f32 q8, q12, %e13[0] \n" "vmla.f32 q9, q14, %f13[0] \n" "pld [%3, #128] \n" "vld1.f32 {d26-d27}, [%3]! \n" "pld [%4, #128] \n" "vld1.f32 {d30-d31}, [%4]! \n" "vmla.f32 q8, q10, %e12[1] \n" "vmla.f32 q9, q11, %f12[1] \n" "vext.f32 q10, q12, q13, #1 \n" "vext.f32 q11, q14, q15, #1 \n" "vmla.f32 q8, q10, %e13[1] \n" "vmla.f32 q9, q11, %f13[1] \n" "vorr q0, q1, q1 \n" "vorr q2, q3, q3 \n" "vadd.f32 q8, q8, q9 \n" "vorr q12, q13, q13 \n" "vorr q14, q15, q15 \n" "subs %0, #1 \n" "vst1.f32 {d16-d17}, [%5]! \n" "bne 0b \n" "sub %1, #16 \n" "sub %2, #16 \n" "sub %3, #16 \n" "sub %4, #16 \n" : "=r"(nn), // %0 "=r"(r00), // %1 "=r"(r01), // %2 "=r"(r10), // %3 "=r"(r11), // %4 "=r"(outptr) // %5 : "0"(nn), "1"(r00), "2"(r01), "3"(r10), "4"(r11), "5"(outptr), "w"(_k0), // %12 "w"(_k1) // %13 : "cc", "memory", "q0", "q1", "q2", "q3", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); } #endif // __aarch64__ #endif // __ARM_NEON for (; remain>0; remain--) { #if __ARM_NEON float32x2_t _r00 = vld1_f32(r00); float32x2_t _r01 = vld1_f32(r01); float32x4_t _r00r1 = vcombine_f32(_r00, _r01); float32x4_t _s0s1 = vmulq_f32(_r00r1, _k0); float32x2_t _r10 = vld1_f32(r10); float32x2_t _r11 = vld1_f32(r11); float32x4_t _r10r1 = vcombine_f32(_r10, _r11); _s0s1 = vmlaq_f32(_s0s1, _r10r1, _k1); float32x2_t _s = vadd_f32(vget_low_f32(_s0s1), vget_high_f32(_s0s1)); _s = vpadd_f32(_s, _s); *outptr += vget_lane_f32(_s, 0); #else float sum = 0.f; sum += r00[0] * kernel0[0]; sum += r00[1] * kernel0[1]; sum += r01[0] * kernel0[2]; sum += r01[1] * kernel0[3]; sum += r10[0] * kernel1[0]; sum += r10[1] * kernel1[1]; sum += r11[0] * kernel1[2]; sum += r11[1] * kernel1[3]; *outptr += sum; #endif // __ARM_NEON r00 += 1; r01 += 1; r10 += 1; r11 += 1; outptr++; } r00 += 1; r01 += 1; r10 += 1; r11 += 1; } } for (; q<inch; q++) { float* outptr = out; const float* img0 = bottom_blob.channel(q); const float* kernel0 = kernel + p*inch*4 + q*4; const float* r0 = img0; const float* r1 = img0 + w; #if __ARM_NEON float32x4_t _k0 = vdupq_n_f32(kernel0[0]); float32x4_t _k1 = vdupq_n_f32(kernel0[1]); float32x4_t _k2 = vdupq_n_f32(kernel0[2]); float32x4_t _k3 = vdupq_n_f32(kernel0[3]); #endif // __ARM_NEON for (int i = 0; i < outh; i++) { #if __ARM_NEON int nn = outw >> 2; int remain = outw & 3; #else int remain = outw; #endif // __ARM_NEON #if __ARM_NEON #if __aarch64__ if (nn > 0) { asm volatile( "prfm pldl1keep, [%1, #128] \n" "ld1 {v0.4s}, [%1], #16 \n" "prfm pldl1keep, [%2, #128] \n" "ld1 {v2.4s}, [%2], #16 \n" "0: \n" "prfm pldl1keep, [%3, #128] \n" "ld1 {v9.4s}, [%3] \n" "fmul v8.4s, v0.4s, %8.4s \n" "fmla v9.4s, v2.4s, %10.4s \n" "prfm pldl1keep, [%1, #128] \n" "ld1 {v1.4s}, [%1], #16 \n" "ext v10.16b, v0.16b, v1.16b, #4 \n" "fmla v8.4s, v10.4s, %9.4s \n" "prfm pldl1keep, [%2, #128] \n" "ld1 {v3.4s}, [%2], #16 \n" "ext v11.16b, v2.16b, v3.16b, #4 \n" "fmla v9.4s, v11.4s, %11.4s \n" "orr v0.16b, v1.16b, v1.16b \n" "fadd v8.4s, v8.4s, v9.4s \n" "orr v2.16b, v3.16b, v3.16b \n" "subs %w0, %w0, #1 \n" "st1 {v8.4s}, [%3], #16 \n" "bne 0b \n" "sub %1, %1, #16 \n" "sub %2, %2, #16 \n" : "=r"(nn), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(outptr) // %3 : "0"(nn), "1"(r0), "2"(r1), "3"(outptr), "w"(_k0), // %8 "w"(_k1), // %9 "w"(_k2), // %10 "w"(_k3) // %11 : "cc", "memory", "v0", "v1", "v2", "v3", "v8", "v9", "v10", "v11" ); } #else if (nn > 0) { asm volatile( "pld [%1, #128] \n" "vld1.f32 {d0-d1}, [%1]! \n" "pld [%2, #128] \n" "vld1.f32 {d4-d5}, [%2]! \n" "0: \n" "pld [%3, #128] \n" "vld1.f32 {d18-d19}, [%3] \n"// q9 = sum "vmul.f32 q8, q0, %q8 \n" "vmla.f32 q9, q2, %q10 \n" "pld [%1, #128] \n" "vld1.f32 {d2-d3}, [%1]! \n" "vext.f32 q10, q0, q1, #1 \n" "vmla.f32 q8, q10, %q9 \n" "pld [%2, #128] \n" "vld1.f32 {d6-d7}, [%2]! \n" "vext.f32 q11, q2, q3, #1 \n" "vmla.f32 q9, q11, %q11 \n" "vorr q0, q1, q1 \n" "vadd.f32 q8, q8, q9 \n" "vorr q2, q3, q3 \n" "subs %0, #1 \n" "vst1.f32 {d16-d17}, [%3]! \n" "bne 0b \n" "sub %1, #16 \n" "sub %2, #16 \n" : "=r"(nn), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(outptr) // %3 : "0"(nn), "1"(r0), "2"(r1), "3"(outptr), "w"(_k0), // %8 "w"(_k1), // %9 "w"(_k2), // %10 "w"(_k3) // %11 : "cc", "memory", "q0", "q1", "q2", "q3", "q8", "q9", "q10", "q11" ); } #endif // __aarch64__ #endif // __ARM_NEON #if __ARM_NEON float32x4_t _k0123 = vld1q_f32(kernel0); #endif for (; remain>0; remain--) { #if __ARM_NEON float32x2_t _r0 = vld1_f32(r0); float32x2_t _r1 = vld1_f32(r1); float32x4_t _r0r1 = vcombine_f32(_r0, _r1); float32x4_t _s0s1 = vmulq_f32(_r0r1, _k0123); float32x2_t _s = vadd_f32(vget_low_f32(_s0s1), vget_high_f32(_s0s1)); _s = vpadd_f32(_s, _s); *outptr += vget_lane_f32(_s, 0); #else float sum = 0.f; sum += r0[0] * kernel0[0]; sum += r0[1] * kernel0[1]; sum += r1[0] * kernel0[2]; sum += r1[1] * kernel0[3]; *outptr += sum; #endif r0 += 1; r1 += 1; outptr++; } r0 += 1; r1 += 1; } } } } }
GB_critical_section.c
//------------------------------------------------------------------------------ // Source/Template/GB_critical_section: execute code in a critical section //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // All access to the global matrix queue, via GB_queue_* operations, must // be done through a critical section. No other part of SuiteSparse:GraphBLAS // uses this critical section; it is only used for accessing the global matrix // queue via GB_queue_*. All GB_queue_* operations use the GB_CRITICAL macro // to check the result, and if the critical section fails (ok == false), // they return GrB_PANIC. // Critical sections for Windows threads and ANSI C11 threads are listed below // as drafts, but these threading models are not yet supported. { //-------------------------------------------------------------------------- // POSIX pthreads //-------------------------------------------------------------------------- #if defined (USER_POSIX_THREADS) { ok = (pthread_mutex_lock (&GB_sync) == 0) ; GB_CRITICAL_SECTION ; ok = ok && (pthread_mutex_unlock (&GB_sync) == 0) ; } //-------------------------------------------------------------------------- // Microsoft Windows //-------------------------------------------------------------------------- #elif defined (USER_WINDOWS_THREADS) { // This should work, per the Windows spec, but is not yet supported. EnterCriticalSection (&GB_sync) ; GB_CRITICAL_SECTION ; LeaveCriticalSection (&GB_sync) ; } //-------------------------------------------------------------------------- // ANSI C11 threads //-------------------------------------------------------------------------- #elif defined (USER_ANSI_THREADS) { // This should work per the ANSI C11 Spec, but is not yet supported. ok = (mtx_lock (&GB_sync) == thrd_success) ; GB_CRITICAL_SECTION ; ok = ok && (mtx_unlock (&GB_sync) == thrd_success) ; } //-------------------------------------------------------------------------- // OpenMP //-------------------------------------------------------------------------- #else // USER_OPENMP_THREADS or USER_NO_THREADS { // default: use a named OpenMP critical section. If OpenMP is not // available, then the #pragma is ignored and this becomes vanilla, // single-threaded code. #pragma omp critical(GB_critical_section) GB_CRITICAL_SECTION ; } #endif } #undef GB_CRITICAL_SECTION
H2Pack_3D_kernels.h
#ifndef __H2PACK_3D_KERNELS_H__ #define __H2PACK_3D_KERNELS_H__ #include <math.h> #include "H2Pack_config.h" #include "ASTER/include/aster.h" #ifndef KRNL_EVAL_PARAM #define KRNL_EVAL_PARAM \ const DTYPE *coord0, const int ld0, const int n0, \ const DTYPE *coord1, const int ld1, const int n1, \ const void *param, DTYPE * __restrict mat, const int ldm #endif #ifndef KRNL_MV_PARAM #define KRNL_MV_PARAM \ const DTYPE *coord0, const int ld0, const int n0, \ const DTYPE *coord1, const int ld1, const int n1, \ const void *param, const DTYPE *x_in, DTYPE * __restrict x_out #endif #ifndef KRNL_BIMV_PARAM #define KRNL_BIMV_PARAM \ const DTYPE *coord0, const int ld0, const int n0, \ const DTYPE *coord1, const int ld1, const int n1, \ const void *param, const DTYPE *x_in_0, const DTYPE *x_in_1, \ DTYPE * __restrict x_out_0, DTYPE * __restrict x_out_1 #endif #define EXTRACT_3D_COORD() \ const DTYPE *x0 = coord0 + ld0 * 0; \ const DTYPE *y0 = coord0 + ld0 * 1; \ const DTYPE *z0 = coord0 + ld0 * 2; \ const DTYPE *x1 = coord1 + ld1 * 0; \ const DTYPE *y1 = coord1 + ld1 * 1; \ const DTYPE *z1 = coord1 + ld1 * 2; // When counting bimv flops, report effective flops (1 / sqrt(x) == 2 flops) // instead of achieved flops (1 / sqrt(x) == 1 + NEWTON_ITER * 4 flops) #ifdef __cplusplus extern "C" { #endif // ============================================================ // // ==================== Coulomb Kernel ==================== // // ============================================================ // const int Coulomb_3D_krnl_bimv_flop = 14; static void Coulomb_3D_eval_intrin_t(KRNL_EVAL_PARAM) { EXTRACT_3D_COORD(); const int n1_vec = (n1 / SIMD_LEN) * SIMD_LEN; const vec_t frsqrt_pf = vec_frsqrt_pf_t(); for (int i = 0; i < n0; i++) { DTYPE *mat_irow = mat + i * ldm; vec_t x0_iv = vec_bcast_t(x0 + i); vec_t y0_iv = vec_bcast_t(y0 + i); vec_t z0_iv = vec_bcast_t(z0 + i); for (int j = 0; j < n1_vec; j += SIMD_LEN) { vec_t dx = vec_sub_t(x0_iv, vec_loadu_t(x1 + j)); vec_t dy = vec_sub_t(y0_iv, vec_loadu_t(y1 + j)); vec_t dz = vec_sub_t(z0_iv, vec_loadu_t(z1 + j)); vec_t r2 = vec_mul_t(dx, dx); r2 = vec_fmadd_t(dy, dy, r2); r2 = vec_fmadd_t(dz, dz, r2); vec_t rinv = vec_mul_t(frsqrt_pf, vec_frsqrt_t(r2)); vec_storeu_t(mat_irow + j, rinv); } const DTYPE x0_i = x0[i]; const DTYPE y0_i = y0[i]; const DTYPE z0_i = z0[i]; for (int j = n1_vec; j < n1; j++) { DTYPE dx = x0_i - x1[j]; DTYPE dy = y0_i - y1[j]; DTYPE dz = z0_i - z1[j]; DTYPE r2 = dx * dx + dy * dy + dz * dz; mat_irow[j] = (r2 == 0.0) ? 0.0 : (1.0 / DSQRT(r2)); } } } static void Coulomb_3D_krnl_bimv_intrin_t(KRNL_BIMV_PARAM) { EXTRACT_3D_COORD(); const vec_t frsqrt_pf = vec_frsqrt_pf_t(); for (int i = 0; i < n0; i += 2) { vec_t sum_v0 = vec_zero_t(); vec_t sum_v1 = vec_zero_t(); const vec_t x0_i0v = vec_bcast_t(x0 + i); const vec_t y0_i0v = vec_bcast_t(y0 + i); const vec_t z0_i0v = vec_bcast_t(z0 + i); const vec_t x0_i1v = vec_bcast_t(x0 + i + 1); const vec_t y0_i1v = vec_bcast_t(y0 + i + 1); const vec_t z0_i1v = vec_bcast_t(z0 + i + 1); const vec_t x_in_1_i0v = vec_bcast_t(x_in_1 + i); const vec_t x_in_1_i1v = vec_bcast_t(x_in_1 + i + 1); for (int j = 0; j < n1; j += SIMD_LEN) { vec_t d0, d1, jv, r20, r21; jv = vec_load_t(x1 + j); d0 = vec_sub_t(x0_i0v, jv); d1 = vec_sub_t(x0_i1v, jv); r20 = vec_mul_t(d0, d0); r21 = vec_mul_t(d1, d1); jv = vec_load_t(y1 + j); d0 = vec_sub_t(y0_i0v, jv); d1 = vec_sub_t(y0_i1v, jv); r20 = vec_fmadd_t(d0, d0, r20); r21 = vec_fmadd_t(d1, d1, r21); jv = vec_load_t(z1 + j); d0 = vec_sub_t(z0_i0v, jv); d1 = vec_sub_t(z0_i1v, jv); r20 = vec_fmadd_t(d0, d0, r20); r21 = vec_fmadd_t(d1, d1, r21); d0 = vec_load_t(x_in_0 + j); d1 = vec_load_t(x_out_1 + j); r20 = vec_mul_t(frsqrt_pf, vec_frsqrt_t(r20)); r21 = vec_mul_t(frsqrt_pf, vec_frsqrt_t(r21)); sum_v0 = vec_fmadd_t(d0, r20, sum_v0); sum_v1 = vec_fmadd_t(d0, r21, sum_v1); d1 = vec_fmadd_t(x_in_1_i0v, r20, d1); d1 = vec_fmadd_t(x_in_1_i1v, r21, d1); vec_store_t(x_out_1 + j, d1); } x_out_0[i] += vec_reduce_add_t(sum_v0); x_out_0[i+1] += vec_reduce_add_t(sum_v1); } } // ============================================================ // // ==================== Gaussian Kernel =================== // // ============================================================ // const int Gaussian_3D_krnl_bimv_flop = 14; static void Gaussian_3D_eval_intrin_t(KRNL_EVAL_PARAM) { EXTRACT_3D_COORD(); const int n1_vec = (n1 / SIMD_LEN) * SIMD_LEN; const DTYPE *param_ = (DTYPE*) param; const DTYPE neg_l = -param_[0]; const vec_t neg_l_v = vec_set1_t(neg_l); for (int i = 0; i < n0; i++) { DTYPE *mat_irow = mat + i * ldm; vec_t x0_iv = vec_bcast_t(x0 + i); vec_t y0_iv = vec_bcast_t(y0 + i); vec_t z0_iv = vec_bcast_t(z0 + i); for (int j = 0; j < n1_vec; j += SIMD_LEN) { vec_t dx = vec_sub_t(x0_iv, vec_loadu_t(x1 + j)); vec_t dy = vec_sub_t(y0_iv, vec_loadu_t(y1 + j)); vec_t dz = vec_sub_t(z0_iv, vec_loadu_t(z1 + j)); vec_t r2 = vec_mul_t(dx, dx); r2 = vec_fmadd_t(dy, dy, r2); r2 = vec_fmadd_t(dz, dz, r2); r2 = vec_exp_t(vec_mul_t(neg_l_v, r2)); vec_storeu_t(mat_irow + j, r2); } const DTYPE x0_i = x0[i]; const DTYPE y0_i = y0[i]; const DTYPE z0_i = z0[i]; for (int j = n1_vec; j < n1; j++) { DTYPE dx = x0_i - x1[j]; DTYPE dy = y0_i - y1[j]; DTYPE dz = z0_i - z1[j]; DTYPE r2 = dx * dx + dy * dy + dz * dz; mat_irow[j] = exp(neg_l * r2); } } } static void Gaussian_3D_krnl_bimv_intrin_t(KRNL_BIMV_PARAM) { EXTRACT_3D_COORD(); const DTYPE *param_ = (DTYPE*) param; const DTYPE neg_l = -param_[0]; const vec_t neg_l_v = vec_set1_t(neg_l); for (int i = 0; i < n0; i += 2) { vec_t sum_v0 = vec_zero_t(); vec_t sum_v1 = vec_zero_t(); const vec_t x0_i0v = vec_bcast_t(x0 + i); const vec_t y0_i0v = vec_bcast_t(y0 + i); const vec_t z0_i0v = vec_bcast_t(z0 + i); const vec_t x0_i1v = vec_bcast_t(x0 + i + 1); const vec_t y0_i1v = vec_bcast_t(y0 + i + 1); const vec_t z0_i1v = vec_bcast_t(z0 + i + 1); const vec_t x_in_1_i0v = vec_bcast_t(x_in_1 + i); const vec_t x_in_1_i1v = vec_bcast_t(x_in_1 + i + 1); for (int j = 0; j < n1; j += SIMD_LEN) { vec_t d0, d1, jv, r20, r21; jv = vec_load_t(x1 + j); d0 = vec_sub_t(x0_i0v, jv); d1 = vec_sub_t(x0_i1v, jv); r20 = vec_mul_t(d0, d0); r21 = vec_mul_t(d1, d1); jv = vec_load_t(y1 + j); d0 = vec_sub_t(y0_i0v, jv); d1 = vec_sub_t(y0_i1v, jv); r20 = vec_fmadd_t(d0, d0, r20); r21 = vec_fmadd_t(d1, d1, r21); jv = vec_load_t(z1 + j); d0 = vec_sub_t(z0_i0v, jv); d1 = vec_sub_t(z0_i1v, jv); r20 = vec_fmadd_t(d0, d0, r20); r21 = vec_fmadd_t(d1, d1, r21); d0 = vec_load_t(x_in_0 + j); d1 = vec_load_t(x_out_1 + j); r20 = vec_exp_t(vec_mul_t(neg_l_v, r20)); r21 = vec_exp_t(vec_mul_t(neg_l_v, r21)); sum_v0 = vec_fmadd_t(d0, r20, sum_v0); sum_v1 = vec_fmadd_t(d0, r21, sum_v1); d1 = vec_fmadd_t(x_in_1_i0v, r20, d1); d1 = vec_fmadd_t(x_in_1_i1v, r21, d1); vec_store_t(x_out_1 + j, d1); } x_out_0[i] += vec_reduce_add_t(sum_v0); x_out_0[i+1] += vec_reduce_add_t(sum_v1); } } // ============================================================ // // ================== Exponential Kernel ================== // // ============================================================ // const int Expon_3D_krnl_bimv_flop = 15; static void Expon_3D_eval_intrin_t(KRNL_EVAL_PARAM) { EXTRACT_3D_COORD(); const int n1_vec = (n1 / SIMD_LEN) * SIMD_LEN; const DTYPE *param_ = (DTYPE*) param; const DTYPE neg_l = -param_[0]; const vec_t neg_l_v = vec_set1_t(neg_l); for (int i = 0; i < n0; i++) { DTYPE *mat_irow = mat + i * ldm; vec_t x0_iv = vec_bcast_t(x0 + i); vec_t y0_iv = vec_bcast_t(y0 + i); vec_t z0_iv = vec_bcast_t(z0 + i); for (int j = 0; j < n1_vec; j += SIMD_LEN) { vec_t dx = vec_sub_t(x0_iv, vec_loadu_t(x1 + j)); vec_t dy = vec_sub_t(y0_iv, vec_loadu_t(y1 + j)); vec_t dz = vec_sub_t(z0_iv, vec_loadu_t(z1 + j)); vec_t r2 = vec_mul_t(dx, dx); r2 = vec_fmadd_t(dy, dy, r2); r2 = vec_fmadd_t(dz, dz, r2); r2 = vec_mul_t(neg_l_v, vec_sqrt_t(r2)); r2 = vec_exp_t(r2); vec_storeu_t(mat_irow + j, r2); } const DTYPE x0_i = x0[i]; const DTYPE y0_i = y0[i]; const DTYPE z0_i = z0[i]; for (int j = n1_vec; j < n1; j++) { DTYPE dx = x0_i - x1[j]; DTYPE dy = y0_i - y1[j]; DTYPE dz = z0_i - z1[j]; DTYPE r2 = dx * dx + dy * dy + dz * dz; mat_irow[j] = exp(neg_l * sqrt(r2)); } } } static void Expon_3D_krnl_bimv_intrin_t(KRNL_BIMV_PARAM) { EXTRACT_3D_COORD(); const DTYPE *param_ = (DTYPE*) param; const DTYPE neg_l = -param_[0]; const vec_t neg_l_v = vec_set1_t(neg_l); for (int i = 0; i < n0; i += 2) { vec_t sum_v0 = vec_zero_t(); vec_t sum_v1 = vec_zero_t(); const vec_t x0_i0v = vec_bcast_t(x0 + i); const vec_t y0_i0v = vec_bcast_t(y0 + i); const vec_t z0_i0v = vec_bcast_t(z0 + i); const vec_t x0_i1v = vec_bcast_t(x0 + i + 1); const vec_t y0_i1v = vec_bcast_t(y0 + i + 1); const vec_t z0_i1v = vec_bcast_t(z0 + i + 1); const vec_t x_in_1_i0v = vec_bcast_t(x_in_1 + i); const vec_t x_in_1_i1v = vec_bcast_t(x_in_1 + i + 1); for (int j = 0; j < n1; j += SIMD_LEN) { vec_t d0, d1, jv, r20, r21; jv = vec_load_t(x1 + j); d0 = vec_sub_t(x0_i0v, jv); d1 = vec_sub_t(x0_i1v, jv); r20 = vec_mul_t(d0, d0); r21 = vec_mul_t(d1, d1); jv = vec_load_t(y1 + j); d0 = vec_sub_t(y0_i0v, jv); d1 = vec_sub_t(y0_i1v, jv); r20 = vec_fmadd_t(d0, d0, r20); r21 = vec_fmadd_t(d1, d1, r21); jv = vec_load_t(z1 + j); d0 = vec_sub_t(z0_i0v, jv); d1 = vec_sub_t(z0_i1v, jv); r20 = vec_fmadd_t(d0, d0, r20); r21 = vec_fmadd_t(d1, d1, r21); d0 = vec_load_t(x_in_0 + j); d1 = vec_load_t(x_out_1 + j); r20 = vec_mul_t(neg_l_v, vec_sqrt_t(r20)); r21 = vec_mul_t(neg_l_v, vec_sqrt_t(r21)); r20 = vec_exp_t(r20); r21 = vec_exp_t(r21); sum_v0 = vec_fmadd_t(d0, r20, sum_v0); sum_v1 = vec_fmadd_t(d0, r21, sum_v1); d1 = vec_fmadd_t(x_in_1_i0v, r20, d1); d1 = vec_fmadd_t(x_in_1_i1v, r21, d1); vec_store_t(x_out_1 + j, d1); } x_out_0[i] += vec_reduce_add_t(sum_v0); x_out_0[i+1] += vec_reduce_add_t(sum_v1); } } // ============================================================ // // =================== Matern 3/2 Kernel ================== // // ============================================================ // const int Matern32_3D_krnl_bimv_flop = 17; #define NSQRT3 -1.7320508075688772 static void Matern32_3D_eval_intrin_t(KRNL_EVAL_PARAM) { EXTRACT_3D_COORD(); const int n1_vec = (n1 / SIMD_LEN) * SIMD_LEN; const DTYPE *param_ = (DTYPE*) param; const DTYPE nsqrt3_l = NSQRT3 * param_[0]; const vec_t nsqrt3_l_v = vec_set1_t(nsqrt3_l); const vec_t v_1 = vec_set1_t(1.0); for (int i = 0; i < n0; i++) { DTYPE *mat_irow = mat + i * ldm; vec_t x0_iv = vec_bcast_t(x0 + i); vec_t y0_iv = vec_bcast_t(y0 + i); vec_t z0_iv = vec_bcast_t(z0 + i); for (int j = 0; j < n1_vec; j += SIMD_LEN) { vec_t dx = vec_sub_t(x0_iv, vec_loadu_t(x1 + j)); vec_t dy = vec_sub_t(y0_iv, vec_loadu_t(y1 + j)); vec_t dz = vec_sub_t(z0_iv, vec_loadu_t(z1 + j)); vec_t r = vec_mul_t(dx, dx); r = vec_fmadd_t(dy, dy, r); r = vec_fmadd_t(dz, dz, r); r = vec_sqrt_t(r); r = vec_mul_t(r, nsqrt3_l_v); r = vec_mul_t(vec_sub_t(v_1, r), vec_exp_t(r)); vec_storeu_t(mat_irow + j, r); } const DTYPE x0_i = x0[i]; const DTYPE y0_i = y0[i]; const DTYPE z0_i = z0[i]; for (int j = n1_vec; j < n1; j++) { DTYPE dx = x0_i - x1[j]; DTYPE dy = y0_i - y1[j]; DTYPE dz = z0_i - z1[j]; DTYPE r = sqrt(dx * dx + dy * dy + dz * dz); r = r * nsqrt3_l; r = (1.0 - r) * exp(r); mat_irow[j] = r; } } } static void Matern32_3D_krnl_bimv_intrin_t(KRNL_BIMV_PARAM) { EXTRACT_3D_COORD(); const DTYPE *param_ = (DTYPE*) param; const DTYPE nsqrt3_l = NSQRT3 * param_[0]; const vec_t nsqrt3_l_v = vec_set1_t(nsqrt3_l); const vec_t v_1 = vec_set1_t(1.0); for (int i = 0; i < n0; i += 2) { vec_t sum_v0 = vec_zero_t(); vec_t sum_v1 = vec_zero_t(); const vec_t x0_i0v = vec_bcast_t(x0 + i); const vec_t y0_i0v = vec_bcast_t(y0 + i); const vec_t z0_i0v = vec_bcast_t(z0 + i); const vec_t x0_i1v = vec_bcast_t(x0 + i + 1); const vec_t y0_i1v = vec_bcast_t(y0 + i + 1); const vec_t z0_i1v = vec_bcast_t(z0 + i + 1); const vec_t x_in_1_i0v = vec_bcast_t(x_in_1 + i); const vec_t x_in_1_i1v = vec_bcast_t(x_in_1 + i + 1); for (int j = 0; j < n1; j += SIMD_LEN) { vec_t d0, d1, jv, r0, r1; jv = vec_load_t(x1 + j); d0 = vec_sub_t(x0_i0v, jv); d1 = vec_sub_t(x0_i1v, jv); r0 = vec_mul_t(d0, d0); r1 = vec_mul_t(d1, d1); jv = vec_load_t(y1 + j); d0 = vec_sub_t(y0_i0v, jv); d1 = vec_sub_t(y0_i1v, jv); r0 = vec_fmadd_t(d0, d0, r0); r1 = vec_fmadd_t(d1, d1, r1); jv = vec_load_t(z1 + j); d0 = vec_sub_t(z0_i0v, jv); d1 = vec_sub_t(z0_i1v, jv); r0 = vec_fmadd_t(d0, d0, r0); r1 = vec_fmadd_t(d1, d1, r1); r0 = vec_sqrt_t(r0); r1 = vec_sqrt_t(r1); d0 = vec_load_t(x_in_0 + j); d1 = vec_load_t(x_out_1 + j); r0 = vec_mul_t(r0, nsqrt3_l_v); r1 = vec_mul_t(r1, nsqrt3_l_v); r0 = vec_mul_t(vec_sub_t(v_1, r0), vec_exp_t(r0)); r1 = vec_mul_t(vec_sub_t(v_1, r1), vec_exp_t(r1)); sum_v0 = vec_fmadd_t(d0, r0, sum_v0); sum_v1 = vec_fmadd_t(d0, r1, sum_v1); d1 = vec_fmadd_t(x_in_1_i0v, r0, d1); d1 = vec_fmadd_t(x_in_1_i1v, r1, d1); vec_store_t(x_out_1 + j, d1); } x_out_0[i] += vec_reduce_add_t(sum_v0); x_out_0[i+1] += vec_reduce_add_t(sum_v1); } } // ============================================================ // // =================== Matern 5/2 Kernel ================== // // ============================================================ // const int Matern52_3D_krnl_bimv_flop = 20; #define NSQRT5 -2.2360679774997896 #define _1o3 0.3333333333333333 static void Matern52_3D_eval_intrin_t(KRNL_EVAL_PARAM) { EXTRACT_3D_COORD(); const int n1_vec = (n1 / SIMD_LEN) * SIMD_LEN; const DTYPE *param_ = (DTYPE*) param; const DTYPE nsqrt5_l = NSQRT5 * param_[0]; const vec_t nsqrt5_l_v = vec_set1_t(nsqrt5_l); const vec_t v_1 = vec_set1_t(1.0); const vec_t v_1o3 = vec_set1_t(_1o3); for (int i = 0; i < n0; i++) { DTYPE *mat_irow = mat + i * ldm; vec_t x0_iv = vec_bcast_t(x0 + i); vec_t y0_iv = vec_bcast_t(y0 + i); vec_t z0_iv = vec_bcast_t(z0 + i); for (int j = 0; j < n1_vec; j += SIMD_LEN) { vec_t dx = vec_sub_t(x0_iv, vec_loadu_t(x1 + j)); vec_t dy = vec_sub_t(y0_iv, vec_loadu_t(y1 + j)); vec_t dz = vec_sub_t(z0_iv, vec_loadu_t(z1 + j)); vec_t r = vec_mul_t(dx, dx); r = vec_fmadd_t(dy, dy, r); r = vec_fmadd_t(dz, dz, r); r = vec_sqrt_t(r); vec_t lk = vec_mul_t(nsqrt5_l_v, r); vec_t val = vec_sub_t(v_1, lk); vec_t lk2 = vec_mul_t(lk, lk); val = vec_fmadd_t(v_1o3, lk2, val); val = vec_mul_t(val, vec_exp_t(lk)); vec_storeu_t(mat_irow + j, val); } const DTYPE x0_i = x0[i]; const DTYPE y0_i = y0[i]; const DTYPE z0_i = z0[i]; for (int j = n1_vec; j < n1; j++) { DTYPE dx = x0_i - x1[j]; DTYPE dy = y0_i - y1[j]; DTYPE dz = z0_i - z1[j]; DTYPE r = sqrt(dx * dx + dy * dy + dz * dz); DTYPE lk = nsqrt5_l * r; DTYPE val = (1.0 - lk + _1o3 * lk * lk) * exp(lk); mat_irow[j] = val; } } } static void Matern52_3D_krnl_bimv_intrin_t(KRNL_BIMV_PARAM) { EXTRACT_3D_COORD(); const DTYPE *param_ = (DTYPE*) param; const DTYPE nsqrt5_l = NSQRT5 * param_[0]; const vec_t nsqrt5_l_v = vec_set1_t(nsqrt5_l); const vec_t v_1 = vec_set1_t(1.0); const vec_t v_1o3 = vec_set1_t(_1o3); for (int i = 0; i < n0; i += 2) { vec_t sum_v0 = vec_zero_t(); vec_t sum_v1 = vec_zero_t(); const vec_t x0_i0v = vec_bcast_t(x0 + i); const vec_t y0_i0v = vec_bcast_t(y0 + i); const vec_t z0_i0v = vec_bcast_t(z0 + i); const vec_t x0_i1v = vec_bcast_t(x0 + i + 1); const vec_t y0_i1v = vec_bcast_t(y0 + i + 1); const vec_t z0_i1v = vec_bcast_t(z0 + i + 1); const vec_t x_in_1_i0v = vec_bcast_t(x_in_1 + i); const vec_t x_in_1_i1v = vec_bcast_t(x_in_1 + i + 1); for (int j = 0; j < n1; j += SIMD_LEN) { vec_t d0, d1, jv, r0, r1, lk0, lk1, lk02, lk12, val0, val1; jv = vec_load_t(x1 + j); d0 = vec_sub_t(x0_i0v, jv); d1 = vec_sub_t(x0_i1v, jv); r0 = vec_mul_t(d0, d0); r1 = vec_mul_t(d1, d1); jv = vec_load_t(y1 + j); d0 = vec_sub_t(y0_i0v, jv); d1 = vec_sub_t(y0_i1v, jv); r0 = vec_fmadd_t(d0, d0, r0); r1 = vec_fmadd_t(d1, d1, r1); jv = vec_load_t(z1 + j); d0 = vec_sub_t(z0_i0v, jv); d1 = vec_sub_t(z0_i1v, jv); r0 = vec_fmadd_t(d0, d0, r0); r1 = vec_fmadd_t(d1, d1, r1); r0 = vec_sqrt_t(r0); r1 = vec_sqrt_t(r1); d0 = vec_load_t(x_in_0 + j); d1 = vec_load_t(x_out_1 + j); lk0 = vec_mul_t(nsqrt5_l_v, r0); val0 = vec_sub_t(v_1, lk0); lk02 = vec_mul_t(lk0, lk0); val0 = vec_fmadd_t(v_1o3, lk02, val0); val0 = vec_mul_t(val0, vec_exp_t(lk0)); lk1 = vec_mul_t(nsqrt5_l_v, r1); val1 = vec_sub_t(v_1, lk1); lk12 = vec_mul_t(lk1, lk1); val1 = vec_fmadd_t(v_1o3, lk12, val1); val1 = vec_mul_t(val1, vec_exp_t(lk1)); sum_v0 = vec_fmadd_t(d0, val0, sum_v0); sum_v1 = vec_fmadd_t(d0, val1, sum_v1); d1 = vec_fmadd_t(x_in_1_i0v, val0, d1); d1 = vec_fmadd_t(x_in_1_i1v, val1, d1); vec_store_t(x_out_1 + j, d1); } x_out_0[i] += vec_reduce_add_t(sum_v0); x_out_0[i+1] += vec_reduce_add_t(sum_v1); } } // ============================================================ // // =================== Quadratic Kernel =================== // // ============================================================ // const int Quadratic_3D_krnl_bimv_flop = 15; static void Quadratic_3D_eval_intrin_t(KRNL_EVAL_PARAM) { EXTRACT_3D_COORD(); const int n1_vec = (n1 / SIMD_LEN) * SIMD_LEN; const DTYPE *param_ = (DTYPE*) param; const DTYPE c = param_[0]; const DTYPE a = param_[1]; const vec_t vec_c = vec_set1_t(c); const vec_t vec_a = vec_set1_t(a); const vec_t vec_1 = vec_set1_t(1.0); for (int i = 0; i < n0; i++) { DTYPE *mat_irow = mat + i * ldm; vec_t x0_iv = vec_bcast_t(x0 + i); vec_t y0_iv = vec_bcast_t(y0 + i); vec_t z0_iv = vec_bcast_t(z0 + i); for (int j = 0; j < n1_vec; j += SIMD_LEN) { vec_t dx = vec_sub_t(x0_iv, vec_loadu_t(x1 + j)); vec_t dy = vec_sub_t(y0_iv, vec_loadu_t(y1 + j)); vec_t dz = vec_sub_t(z0_iv, vec_loadu_t(z1 + j)); vec_t r2 = vec_mul_t(dx, dx); r2 = vec_fmadd_t(dy, dy, r2); r2 = vec_fmadd_t(dz, dz, r2); r2 = vec_fmadd_t(r2, vec_c, vec_1); r2 = vec_pow_t(r2, vec_a); vec_storeu_t(mat_irow + j, r2); } const DTYPE x0_i = x0[i]; const DTYPE y0_i = y0[i]; const DTYPE z0_i = z0[i]; for (int j = n1_vec; j < n1; j++) { DTYPE dx = x0_i - x1[j]; DTYPE dy = y0_i - y1[j]; DTYPE dz = z0_i - z1[j]; DTYPE r2 = dx * dx + dy * dy + dz * dz; r2 = 1.0 + c * r2; r2 = DPOW(r2, a); mat_irow[j] = r2; } } } static void Quadratic_3D_krnl_bimv_intrin_t(KRNL_BIMV_PARAM) { EXTRACT_3D_COORD(); const DTYPE *param_ = (DTYPE*) param; const vec_t vec_c = vec_bcast_t(param_ + 0); const vec_t vec_a = vec_bcast_t(param_ + 1); const vec_t vec_1 = vec_set1_t(1.0); for (int i = 0; i < n0; i += 2) { vec_t sum_v0 = vec_zero_t(); vec_t sum_v1 = vec_zero_t(); const vec_t x0_i0v = vec_bcast_t(x0 + i); const vec_t y0_i0v = vec_bcast_t(y0 + i); const vec_t z0_i0v = vec_bcast_t(z0 + i); const vec_t x0_i1v = vec_bcast_t(x0 + i + 1); const vec_t y0_i1v = vec_bcast_t(y0 + i + 1); const vec_t z0_i1v = vec_bcast_t(z0 + i + 1); const vec_t x_in_1_i0v = vec_bcast_t(x_in_1 + i); const vec_t x_in_1_i1v = vec_bcast_t(x_in_1 + i + 1); for (int j = 0; j < n1; j += SIMD_LEN) { vec_t d0, d1, jv, r20, r21; jv = vec_load_t(x1 + j); d0 = vec_sub_t(x0_i0v, jv); d1 = vec_sub_t(x0_i1v, jv); r20 = vec_mul_t(d0, d0); r21 = vec_mul_t(d1, d1); jv = vec_load_t(y1 + j); d0 = vec_sub_t(y0_i0v, jv); d1 = vec_sub_t(y0_i1v, jv); r20 = vec_fmadd_t(d0, d0, r20); r21 = vec_fmadd_t(d1, d1, r21); jv = vec_load_t(z1 + j); d0 = vec_sub_t(z0_i0v, jv); d1 = vec_sub_t(z0_i1v, jv); r20 = vec_fmadd_t(d0, d0, r20); r21 = vec_fmadd_t(d1, d1, r21); d0 = vec_load_t(x_in_0 + j); d1 = vec_load_t(x_out_1 + j); r20 = vec_fmadd_t(r20, vec_c, vec_1); r21 = vec_fmadd_t(r21, vec_c, vec_1); r20 = vec_pow_t(r20, vec_a); r21 = vec_pow_t(r21, vec_a); sum_v0 = vec_fmadd_t(d0, r20, sum_v0); sum_v1 = vec_fmadd_t(d0, r21, sum_v1); d1 = vec_fmadd_t(x_in_1_i0v, r20, d1); d1 = vec_fmadd_t(x_in_1_i1v, r21, d1); vec_store_t(x_out_1 + j, d1); } x_out_0[i] += vec_reduce_add_t(sum_v0); x_out_0[i+1] += vec_reduce_add_t(sum_v1); } } // ============================================================ // // ===================== Stokes Kernel ==================== // // ============================================================ // #define CALC_STOKES_CONST() \ const DTYPE *param_ = (DTYPE*) param; \ const DTYPE eta = param_[0]; \ const DTYPE a = param_[1]; \ const DTYPE C = 1.0 / (6.0 * M_PI * a * eta); \ const DTYPE Ca3o4 = C * a * 0.75; const int Stokes_krnl_bimv_flop = 48; static void Stokes_eval_std(KRNL_EVAL_PARAM) { EXTRACT_3D_COORD(); CALC_STOKES_CONST(); for (int i = 0; i < n0; i++) { DTYPE tx = x0[i]; DTYPE ty = y0[i]; DTYPE tz = z0[i]; for (int j = 0; j < n1; j++) { DTYPE dx = tx - x1[j]; DTYPE dy = ty - y1[j]; DTYPE dz = tz - z1[j]; DTYPE r2 = dx * dx + dy * dy + dz * dz; DTYPE inv_r, t1; if (r2 == 0.0) { inv_r = 0.0; t1 = C; } else { inv_r = 1.0 / sqrt(r2); t1 = inv_r * Ca3o4; } dx *= inv_r; dy *= inv_r; dz *= inv_r; int base = 3 * i * ldm + 3 * j; DTYPE tmp; #define krnl(k, l) mat[base + k * ldm + l] tmp = t1 * dx; krnl(0, 0) = tmp * dx + t1; krnl(0, 1) = tmp * dy; krnl(0, 2) = tmp * dz; tmp = t1 * dy; krnl(1, 0) = tmp * dx; krnl(1, 1) = tmp * dy + t1; krnl(1, 2) = tmp * dz; tmp = t1 * dz; krnl(2, 0) = tmp * dx; krnl(2, 1) = tmp * dy; krnl(2, 2) = tmp * dz + t1; #undef krnl } } } static void Stokes_krnl_bimv_intrin_t(KRNL_BIMV_PARAM) { EXTRACT_3D_COORD(); CALC_STOKES_CONST(); for (int i = 0; i < n0; i++) { vec_t txv = vec_bcast_t(x0 + i); vec_t tyv = vec_bcast_t(y0 + i); vec_t tzv = vec_bcast_t(z0 + i); vec_t x_in_1_i0 = vec_bcast_t(x_in_1 + i + 0 * ld0); vec_t x_in_1_i1 = vec_bcast_t(x_in_1 + i + 1 * ld0); vec_t x_in_1_i2 = vec_bcast_t(x_in_1 + i + 2 * ld0); vec_t xo0_0 = vec_zero_t(); vec_t xo0_1 = vec_zero_t(); vec_t xo0_2 = vec_zero_t(); vec_t frsqrt_pf = vec_frsqrt_pf_t(); for (int j = 0; j < n1; j += SIMD_LEN_D) { vec_t dx = vec_sub_t(txv, vec_load_t(x1 + j)); vec_t dy = vec_sub_t(tyv, vec_load_t(y1 + j)); vec_t dz = vec_sub_t(tzv, vec_load_t(z1 + j)); vec_t r2 = vec_mul_t(dx, dx); r2 = vec_fmadd_t(dy, dy, r2); r2 = vec_fmadd_t(dz, dz, r2); vec_t inv_r = vec_mul_t(vec_frsqrt_t(r2), frsqrt_pf); dx = vec_mul_t(dx, inv_r); dy = vec_mul_t(dy, inv_r); dz = vec_mul_t(dz, inv_r); vec_cmp_t r2_eq_0 = vec_cmp_eq_t(r2, vec_zero_t()); vec_t tmp0 = vec_set1_t(C); vec_t tmp1 = vec_mul_t(inv_r, vec_set1_t(Ca3o4)); vec_t t1 = vec_blend_t(tmp1, tmp0, r2_eq_0); vec_t x_in_0_j0 = vec_load_t(x_in_0 + j + ld1 * 0); vec_t x_in_0_j1 = vec_load_t(x_in_0 + j + ld1 * 1); vec_t x_in_0_j2 = vec_load_t(x_in_0 + j + ld1 * 2); tmp0 = vec_mul_t(x_in_0_j0, dx); tmp0 = vec_fmadd_t(x_in_0_j1, dy, tmp0); tmp0 = vec_fmadd_t(x_in_0_j2, dz, tmp0); tmp1 = vec_mul_t(x_in_1_i0, dx); tmp1 = vec_fmadd_t(x_in_1_i1, dy, tmp1); tmp1 = vec_fmadd_t(x_in_1_i2, dz, tmp1); xo0_0 = vec_fmadd_t(t1, vec_fmadd_t(dx, tmp0, x_in_0_j0), xo0_0); xo0_1 = vec_fmadd_t(t1, vec_fmadd_t(dy, tmp0, x_in_0_j1), xo0_1); xo0_2 = vec_fmadd_t(t1, vec_fmadd_t(dz, tmp0, x_in_0_j2), xo0_2); DTYPE *x_out_1_0 = x_out_1 + j + 0 * ld1; DTYPE *x_out_1_1 = x_out_1 + j + 1 * ld1; DTYPE *x_out_1_2 = x_out_1 + j + 2 * ld1; vec_t xo1_0 = vec_load_t(x_out_1_0); vec_t xo1_1 = vec_load_t(x_out_1_1); vec_t xo1_2 = vec_load_t(x_out_1_2); xo1_0 = vec_fmadd_t(t1, vec_fmadd_t(dx, tmp1, x_in_1_i0), xo1_0); xo1_1 = vec_fmadd_t(t1, vec_fmadd_t(dy, tmp1, x_in_1_i1), xo1_1); xo1_2 = vec_fmadd_t(t1, vec_fmadd_t(dz, tmp1, x_in_1_i2), xo1_2); vec_store_t(x_out_1_0, xo1_0); vec_store_t(x_out_1_1, xo1_1); vec_store_t(x_out_1_2, xo1_2); } x_out_0[i + 0 * ld0] += vec_reduce_add_t(xo0_0); x_out_0[i + 1 * ld0] += vec_reduce_add_t(xo0_1); x_out_0[i + 2 * ld0] += vec_reduce_add_t(xo0_2); } } // ============================================================ // // ====================== RPY Kernel ====================== // // ============================================================ // const int RPY_krnl_bimv_flop = 82; const int RPY_krnl_mv_flop = 70; static void RPY_eval_std(KRNL_EVAL_PARAM) { EXTRACT_3D_COORD(); // Radii const DTYPE *a0 = coord0 + ld0 * 3; const DTYPE *a1 = coord1 + ld1 * 3; const DTYPE *param_ = (DTYPE*) param; const DTYPE eta = param_[0]; const DTYPE C = 1.0 / (6.0 * M_PI * eta); for (int i = 0; i < n0; i++) { DTYPE tx = x0[i]; DTYPE ty = y0[i]; DTYPE tz = z0[i]; DTYPE ta = a0[i]; for (int j = 0; j < n1; j++) { DTYPE dx = tx - x1[j]; DTYPE dy = ty - y1[j]; DTYPE dz = tz - z1[j]; DTYPE sa = a1[j]; DTYPE r2 = dx * dx + dy * dy + dz * dz; DTYPE r = DSQRT(r2); DTYPE inv_r = (r == 0.0) ? 0.0 : 1.0 / r; DTYPE inv_r2 = inv_r * inv_r; dx *= inv_r; dy *= inv_r; dz *= inv_r; DTYPE t1, t2, tmp0, tmp1, tmp2; DTYPE ta_p_sa = ta + sa; DTYPE ta_m_sa = ta - sa; if (r > ta_p_sa) { tmp0 = C * 0.75 * inv_r; tmp1 = (ta * ta + sa * sa) * inv_r2; t1 = tmp0 * (1.0 + tmp1 / 3.0); t2 = tmp0 * (1.0 - tmp1); } else if (r > DABS(ta_m_sa)) { tmp0 = ta_m_sa * ta_m_sa; tmp1 = (inv_r2 * inv_r * C) / (ta * sa * 32.0); t1 = tmp0 + 3.0 * r2; t1 = tmp1 * (16.0 * r2 * r * ta_p_sa - t1 * t1); t2 = tmp0 - r2; t2 = tmp1 * 3.0 * t2 * t2; } else { t1 = C / (ta > sa ? ta : sa); t2 = 0.0; } DTYPE *krnl_ptr = mat + 3 * i * ldm + 3 * j; tmp0 = t2 * dx; tmp1 = t2 * dy; tmp2 = t2 * dz; krnl_ptr[0 * ldm + 0] = tmp0 * dx + t1; krnl_ptr[0 * ldm + 1] = tmp0 * dy; krnl_ptr[0 * ldm + 2] = tmp0 * dz; krnl_ptr[1 * ldm + 0] = tmp1 * dx; krnl_ptr[1 * ldm + 1] = tmp1 * dy + t1; krnl_ptr[1 * ldm + 2] = tmp1 * dz; krnl_ptr[2 * ldm + 0] = tmp2 * dx; krnl_ptr[2 * ldm + 1] = tmp2 * dy; krnl_ptr[2 * ldm + 2] = tmp2 * dz + t1; } } } static void RPY_Ewald_init_workbuf(const DTYPE L, const DTYPE xi, const int nr, const int nk, DTYPE **workbuf_) { const int r_size = (2*nr+1) * (2*nr+1) * (2*nr+1); const int k_size = ((2*nk+1) * (2*nk+1) * (2*nk+1) - 1) / 2; DTYPE *workbuf = (DTYPE*) malloc(sizeof(DTYPE) * (3 * r_size + 6 * k_size)); DTYPE *rx_shift_arr = workbuf; DTYPE *ry_shift_arr = rx_shift_arr + r_size; DTYPE *rz_shift_arr = ry_shift_arr + r_size; DTYPE *k_arr = rz_shift_arr + r_size; DTYPE *kinv_arr = k_arr + k_size; DTYPE *m2_arr = kinv_arr + k_size; DTYPE *kx_arr = m2_arr + k_size; DTYPE *ky_arr = kx_arr + k_size; DTYPE *kz_arr = ky_arr + k_size; const DTYPE V_inv = 1.0 / (L * L * L); const DTYPE _2_PI_o_L = 2.0 * M_PI / L; int idx = 0; for (int ix = -nr; ix <= nr; ix++) for (int iy = -nr; iy <= nr; iy++) for (int iz = -nr; iz <= nr; iz++) { DTYPE k = _2_PI_o_L * DSQRT((DTYPE)(ix*ix + iy*iy + iz*iz)); DTYPE k2 = k * k; DTYPE k2_o_xi2 = k2 / (xi * xi); DTYPE m2 = 2.0 * V_inv * (1.0 + 0.25 * k2_o_xi2 + 0.125 * k2_o_xi2 * k2_o_xi2) * 6.0 * M_PI / k2 * DEXP(-0.25 * k2_o_xi2); rx_shift_arr[idx] = L * ix; ry_shift_arr[idx] = L * iy; rz_shift_arr[idx] = L * iz; if (idx < k_size) { k_arr[idx] = k; kinv_arr[idx] = 1.0 / k; m2_arr[idx] = m2; kx_arr[idx] = _2_PI_o_L * ix; ky_arr[idx] = _2_PI_o_L * iy; kz_arr[idx] = _2_PI_o_L * iz; } idx++; } *workbuf_ = workbuf; } static void RPY_Ewald_eval_std(KRNL_EVAL_PARAM) { EXTRACT_3D_COORD(); // Radii const DTYPE *a0 = coord0 + ld0 * 3; const DTYPE *a1 = coord1 + ld1 * 3; // Other parameters const DTYPE *param_ = (DTYPE*) param; const DTYPE L = param_[0]; const DTYPE xi = param_[1]; const int nr = DROUND(param_[2]); const int nk = DROUND(param_[3]); DTYPE *workbuf; memcpy(&workbuf, param_ + 4, sizeof(DTYPE*)); const DTYPE xi2 = xi * xi; const DTYPE xi3 = xi2 * xi; const DTYPE xi5 = xi3 * xi2; const DTYPE xi7 = xi5 * xi2; const DTYPE _40_o_3_xi2 = 40.0 / 3.0 * xi * xi; const DTYPE xi_o_sqrt_PI = xi / DSQRT(M_PI); const DTYPE inv_sqrt_PI = 1.0 / DSQRT(M_PI); const int r_size = (2*nr+1) * (2*nr+1) * (2*nr+1); const int k_size = ((2*nk+1) * (2*nk+1) * (2*nk+1) - 1) / 2; DTYPE *rx_shift_arr = workbuf; DTYPE *ry_shift_arr = rx_shift_arr + r_size; DTYPE *rz_shift_arr = ry_shift_arr + r_size; DTYPE *k_arr = rz_shift_arr + r_size; DTYPE *kinv_arr = k_arr + k_size; DTYPE *m2_arr = kinv_arr + k_size; DTYPE *kx_arr = m2_arr + k_size; DTYPE *ky_arr = kx_arr + k_size; DTYPE *kz_arr = ky_arr + k_size; for (int i = 0; i < n0; i++) { DTYPE x_i = x0[i]; DTYPE y_i = y0[i]; DTYPE z_i = z0[i]; DTYPE a_i = a0[i]; DTYPE self_t = 1.0 / a_i - (6.0 - _40_o_3_xi2 * a_i * a_i) * xi_o_sqrt_PI; for (int j = 0; j < n1; j++) { DTYPE a00 = 0.0; DTYPE a10 = 0.0, a11 = 0.0; DTYPE a20 = 0.0, a21 = 0.0, a22 = 0.0; DTYPE dx = x_i - x1[j]; DTYPE dy = y_i - y1[j]; DTYPE dz = z_i - z1[j]; DTYPE a_j = a1[j]; DTYPE a3 = 0.5 * (a_i * a_i + a_j * a_j); // 1. Real-space sum #pragma omp simd for (int idx_r = 0; idx_r < r_size; idx_r++) { DTYPE rvec_x = dx + rx_shift_arr[idx_r]; DTYPE rvec_y = dy + ry_shift_arr[idx_r]; DTYPE rvec_z = dz + rz_shift_arr[idx_r]; DTYPE r2 = rvec_x * rvec_x + rvec_y * rvec_y + rvec_z * rvec_z; DTYPE r4 = r2 * r2; DTYPE r = DSQRT(r2); DTYPE rinv = 1.0 / r; DTYPE rinv2 = rinv * rinv; DTYPE rinv3 = rinv * rinv2; DTYPE erfc_xi_r = DERFC(xi * r); DTYPE pi_exp = inv_sqrt_PI * DEXP(-xi2 * r2); DTYPE tmp0 = 0.75 * rinv; DTYPE tmp1 = 0.5 * rinv3 * a3; DTYPE tmp2 = 4.0 * xi7 * a3 * r4; DTYPE tmp3 = 3.0 * xi3 * r2; DTYPE tmp4 = 4.0 * xi5 * a3 * r2; DTYPE tmp5 = 2.0 * xi3 * a3; DTYPE tmp6 = xi * a3 * rinv2; DTYPE m11 = (tmp0 + tmp1) * erfc_xi_r + ( tmp2 + tmp3 - 5.0*tmp4 - 4.5*xi + 7.0*tmp5 + tmp6) * pi_exp; DTYPE m12 = (tmp0 - 3.0*tmp1) * erfc_xi_r + (-tmp2 - tmp3 + 4.0*tmp4 + 1.5*xi - tmp5 - 3.0*tmp6) * pi_exp; if (r2 == 0) { m11 = 0.0; m12 = 0.0; rvec_x = 0.0; rvec_y = 0.0; rvec_z = 0.0; } else { rvec_x *= rinv; rvec_y *= rinv; rvec_z *= rinv; } a00 += m12 * rvec_x * rvec_x + m11; a10 += m12 * rvec_x * rvec_y; a20 += m12 * rvec_x * rvec_z; a11 += m12 * rvec_y * rvec_y + m11; a21 += m12 * rvec_y * rvec_z; a22 += m12 * rvec_z * rvec_z + m11; } // End of idx_r loop // 2. Reciprocal-space sum #pragma omp simd for (int idx_k = 0; idx_k < k_size; idx_k++) { DTYPE k = k_arr[idx_k]; DTYPE kinv = kinv_arr[idx_k]; DTYPE m2 = m2_arr[idx_k]; DTYPE kvec_x = kx_arr[idx_k]; DTYPE kvec_y = ky_arr[idx_k]; DTYPE kvec_z = kz_arr[idx_k]; DTYPE t = m2 * DCOS(kvec_x * dx + kvec_y * dy + kvec_z * dz) * (1.0 - a3 * k * k / 3.0); kvec_x *= kinv; kvec_y *= kinv; kvec_z *= kinv; a00 += t * (1.0 - kvec_x * kvec_x); a10 += t * - kvec_x * kvec_y; a20 += t * - kvec_x * kvec_z; a11 += t * (1.0 - kvec_y * kvec_y); a21 += t * - kvec_y * kvec_z; a22 += t * (1.0 - kvec_z * kvec_z); } // End of idx_k loop DTYPE r2 = dx * dx + dy * dy + dz * dz; // 3. Overlap correction (i and j are different particles) if (r2 >= 1e-15 * 1e-15) { DTYPE rvec_x = DFMOD(dx + 2 * L, L); DTYPE rvec_y = DFMOD(dy + 2 * L, L); DTYPE rvec_z = DFMOD(dz + 2 * L, L); rvec_x = (rvec_x > 0.5 * L) ? rvec_x - L : rvec_x; rvec_y = (rvec_y > 0.5 * L) ? rvec_y - L : rvec_y; rvec_z = (rvec_z > 0.5 * L) ? rvec_z - L : rvec_z; DTYPE r2 = rvec_x * rvec_x + rvec_y * rvec_y + rvec_z * rvec_z; DTYPE r = DSQRT(r2); DTYPE r3 = r2 * r; DTYPE rinv = 1.0 / r; DTYPE rinv3 = rinv * rinv * rinv; rvec_x *= rinv; rvec_y *= rinv; rvec_z *= rinv; DTYPE t1, t2; DTYPE tmp0 = (a_i * a_i + a_j * a_j) / r2; DTYPE tmp1 = 0.75 * rinv * (1.0 + tmp0 / 3.0); DTYPE tmp2 = 0.75 * rinv * (1.0 - tmp0); DTYPE diff_aij = a_i - a_j; if (r > a_i + a_j) { // So t1 and t2 will be 0 t1 = tmp1; t2 = tmp2; } else if (r > DABS(diff_aij)) { DTYPE tmp3 = rinv3 / (32.0 * a_i * a_j); t1 = diff_aij * diff_aij + 3.0 * r2; t1 = (16.0 * r3 * (a_i + a_j) - t1 * t1) * tmp3; t2 = diff_aij * diff_aij - r2; t2 = 3.0 * t2 * t2 * tmp3; } else { t1 = 1.0 / (a_i > a_j ? a_i : a_j); t2 = 0; } t1 -= tmp1; t2 -= tmp2; a00 += t2 * rvec_x * rvec_x + t1; a10 += t2 * rvec_x * rvec_y; a20 += t2 * rvec_x * rvec_z; a11 += t2 * rvec_y * rvec_y + t1; a21 += t2 * rvec_y * rvec_z; a22 += t2 * rvec_z * rvec_z + t1; } // End of "if (j > i)" // 4. Self part (i and j are the same particle) if (r2 < 1e-15 * 1e-15) { a00 += self_t; a11 += self_t; a22 += self_t; } // 5. Write global matrix block DTYPE *mat_blk = mat + (i * 3) * ldm + (j * 3); mat_blk[0 * ldm + 0] = a00; mat_blk[0 * ldm + 1] = a10; mat_blk[0 * ldm + 2] = a20; mat_blk[1 * ldm + 0] = a10; mat_blk[1 * ldm + 1] = a11; mat_blk[1 * ldm + 2] = a21; mat_blk[2 * ldm + 0] = a20; mat_blk[2 * ldm + 1] = a21; mat_blk[2 * ldm + 2] = a22; } // End of j loop } // End of i loop } static void RPY_krnl_mv_intrin_t(KRNL_MV_PARAM) { EXTRACT_3D_COORD(); // Radii const DTYPE *a0 = coord0 + ld0 * 3; const DTYPE *a1 = coord1 + ld1 * 3; const DTYPE *param_ = (DTYPE*) param; const DTYPE eta = param_[0]; const DTYPE C = 1.0 / (6.0 * M_PI * eta); const vec_t vC = vec_set1_t(C); const vec_t vC3o4 = vec_set1_t(C * 0.75); const vec_t v1 = vec_set1_t(1.0); const vec_t v3 = vec_set1_t(3.0); const vec_t v16 = vec_set1_t(16.0); const vec_t v32 = vec_set1_t(32.0); const vec_t v1o3 = vec_set1_t(1.0 / 3.0); for (int i = 0; i < n0; i++) { vec_t txv = vec_bcast_t(x0 + i); vec_t tyv = vec_bcast_t(y0 + i); vec_t tzv = vec_bcast_t(z0 + i); vec_t ta = vec_bcast_t(a0 + i); vec_t x_out_i0 = vec_zero_t(); vec_t x_out_i1 = vec_zero_t(); vec_t x_out_i2 = vec_zero_t(); vec_t frsqrt_pf = vec_frsqrt_pf_t(); for (int j = 0; j < n1; j += SIMD_LEN_D) { vec_t dx = vec_sub_t(txv, vec_load_t(x1 + j)); vec_t dy = vec_sub_t(tyv, vec_load_t(y1 + j)); vec_t dz = vec_sub_t(tzv, vec_load_t(z1 + j)); vec_t sa = vec_load_t(a1 + j); vec_t r2 = vec_mul_t(dx, dx); r2 = vec_fmadd_t(dy, dy, r2); r2 = vec_fmadd_t(dz, dz, r2); vec_t inv_r = vec_mul_t(vec_frsqrt_t(r2), frsqrt_pf); vec_t inv_r2 = vec_mul_t(inv_r, inv_r); vec_t r = vec_mul_t(inv_r, r2); dx = vec_mul_t(dx, inv_r); dy = vec_mul_t(dy, inv_r); dz = vec_mul_t(dz, inv_r); vec_t tmp0, tmp1, t1, t2; vec_t t1_0, t2_0, t1_1, t2_1, t1_2, t2_2; vec_t ta_p_sa = vec_add_t(ta, sa); vec_t ta_m_sa = vec_max_t(vec_sub_t(ta, sa), vec_sub_t(sa, ta)); // r > ta + sa tmp0 = vec_mul_t(vC3o4, inv_r); tmp1 = vec_mul_t(vec_fmadd_t(sa, sa, vec_mul_t(ta, ta)), inv_r2); t1_0 = vec_mul_t(tmp0, vec_fmadd_t(v1o3, tmp1, v1)); t2_0 = vec_mul_t(tmp0, vec_sub_t(v1, tmp1)); // ta + sa >= r > abs(ta - sa) tmp0 = vec_mul_t(ta_m_sa, ta_m_sa); tmp1 = vec_div_t(vec_mul_t(vec_mul_t(vC, inv_r2), inv_r), vec_mul_t(vec_mul_t(ta, sa), v32)); t1_1 = vec_fmadd_t(v3, r2, tmp0); t1_1 = vec_mul_t(t1_1, t1_1); t1_1 = vec_fmsub_t(vec_mul_t(v16, r2), vec_mul_t(r, ta_p_sa), t1_1); t1_1 = vec_mul_t(tmp1, t1_1); t2_1 = vec_sub_t(tmp0, r2); t2_1 = vec_mul_t(t2_1, t2_1); t2_1 = vec_mul_t(vec_mul_t(tmp1, v3), t2_1); // r <= abs(ta - sa) t1_2 = vec_div_t(vC, vec_max_t(ta, sa)); t2_2 = vec_set1_t(0.0); vec_cmp_t r_gt_ta_p_da = vec_cmp_gt_t(r, ta_p_sa); vec_cmp_t r_le_ta_m_da = vec_cmp_le_t(r, ta_m_sa); t1 = vec_blend_t(t1_1, t1_0, r_gt_ta_p_da); t1 = vec_blend_t(t1, t1_2, r_le_ta_m_da); t2 = vec_blend_t(t2_1, t2_0, r_gt_ta_p_da); t2 = vec_blend_t(t2, t2_2, r_le_ta_m_da); vec_t x_in_j0 = vec_load_t(x_in + j + ld1 * 0); vec_t x_in_j1 = vec_load_t(x_in + j + ld1 * 1); vec_t x_in_j2 = vec_load_t(x_in + j + ld1 * 2); tmp0 = vec_mul_t(x_in_j0, dx); tmp0 = vec_fmadd_t(x_in_j1, dy, tmp0); tmp0 = vec_fmadd_t(x_in_j2, dz, tmp0); tmp0 = vec_mul_t(tmp0, t2); x_out_i0 = vec_fmadd_t(dx, tmp0, x_out_i0); x_out_i1 = vec_fmadd_t(dy, tmp0, x_out_i1); x_out_i2 = vec_fmadd_t(dz, tmp0, x_out_i2); x_out_i0 = vec_fmadd_t(t1, x_in_j0, x_out_i0); x_out_i1 = vec_fmadd_t(t1, x_in_j1, x_out_i1); x_out_i2 = vec_fmadd_t(t1, x_in_j2, x_out_i2); } x_out[i + 0 * ld0] += vec_reduce_add_t(x_out_i0); x_out[i + 1 * ld0] += vec_reduce_add_t(x_out_i1); x_out[i + 2 * ld0] += vec_reduce_add_t(x_out_i2); } } static void RPY_krnl_bimv_intrin_t(KRNL_BIMV_PARAM) { EXTRACT_3D_COORD(); // Radii const DTYPE *a0 = coord0 + ld0 * 3; const DTYPE *a1 = coord1 + ld1 * 3; const DTYPE *param_ = (DTYPE*) param; const DTYPE eta = param_[0]; const DTYPE C = 1.0 / (6.0 * M_PI * eta); const vec_t vC = vec_set1_t(C); const vec_t vC3o4 = vec_set1_t(C * 0.75); const vec_t v1 = vec_set1_t(1.0); const vec_t v3 = vec_set1_t(3.0); const vec_t v16 = vec_set1_t(16.0); const vec_t v32 = vec_set1_t(32.0); const vec_t v1o3 = vec_set1_t(1.0 / 3.0); for (int i = 0; i < n0; i++) { vec_t txv = vec_bcast_t(x0 + i); vec_t tyv = vec_bcast_t(y0 + i); vec_t tzv = vec_bcast_t(z0 + i); vec_t ta = vec_bcast_t(a0 + i); vec_t x_in_1_i0 = vec_bcast_t(x_in_1 + i + 0 * ld0); vec_t x_in_1_i1 = vec_bcast_t(x_in_1 + i + 1 * ld0); vec_t x_in_1_i2 = vec_bcast_t(x_in_1 + i + 2 * ld0); vec_t xo0_0 = vec_zero_t(); vec_t xo0_1 = vec_zero_t(); vec_t xo0_2 = vec_zero_t(); vec_t frsqrt_pf = vec_frsqrt_pf_t(); for (int j = 0; j < n1; j += SIMD_LEN_D) { vec_t dx = vec_sub_t(txv, vec_load_t(x1 + j)); vec_t dy = vec_sub_t(tyv, vec_load_t(y1 + j)); vec_t dz = vec_sub_t(tzv, vec_load_t(z1 + j)); vec_t sa = vec_load_t(a1 + j); vec_t r2 = vec_mul_t(dx, dx); r2 = vec_fmadd_t(dy, dy, r2); r2 = vec_fmadd_t(dz, dz, r2); vec_t inv_r = vec_mul_t(vec_frsqrt_t(r2), frsqrt_pf); vec_t inv_r2 = vec_mul_t(inv_r, inv_r); vec_t r = vec_mul_t(inv_r, r2); dx = vec_mul_t(dx, inv_r); dy = vec_mul_t(dy, inv_r); dz = vec_mul_t(dz, inv_r); vec_t tmp0, tmp1, t1, t2; vec_t t1_0, t2_0, t1_1, t2_1, t1_2, t2_2; vec_t ta_p_sa = vec_add_t(ta, sa); vec_t ta_m_sa = vec_max_t(vec_sub_t(ta, sa), vec_sub_t(sa, ta)); // r > ta + sa tmp0 = vec_mul_t(vC3o4, inv_r); tmp1 = vec_mul_t(vec_fmadd_t(sa, sa, vec_mul_t(ta, ta)), inv_r2); t1_0 = vec_mul_t(tmp0, vec_fmadd_t(v1o3, tmp1, v1)); t2_0 = vec_mul_t(tmp0, vec_sub_t(v1, tmp1)); // ta + sa >= r > abs(ta - sa) tmp0 = vec_mul_t(ta_m_sa, ta_m_sa); tmp1 = vec_div_t(vec_mul_t(vec_mul_t(vC, inv_r2), inv_r), vec_mul_t(vec_mul_t(ta, sa), v32)); t1_1 = vec_fmadd_t(v3, r2, tmp0); t1_1 = vec_mul_t(t1_1, t1_1); t1_1 = vec_fmsub_t(vec_mul_t(v16, r2), vec_mul_t(r, ta_p_sa), t1_1); t1_1 = vec_mul_t(tmp1, t1_1); t2_1 = vec_sub_t(tmp0, r2); t2_1 = vec_mul_t(t2_1, t2_1); t2_1 = vec_mul_t(vec_mul_t(tmp1, v3), t2_1); // r <= abs(ta - sa) t1_2 = vec_div_t(vC, vec_max_t(ta, sa)); t2_2 = vec_set1_t(0.0); vec_cmp_t r_gt_ta_p_da = vec_cmp_gt_t(r, ta_p_sa); vec_cmp_t r_le_ta_m_da = vec_cmp_le_t(r, ta_m_sa); t1 = vec_blend_t(t1_1, t1_0, r_gt_ta_p_da); t1 = vec_blend_t(t1, t1_2, r_le_ta_m_da); t2 = vec_blend_t(t2_1, t2_0, r_gt_ta_p_da); t2 = vec_blend_t(t2, t2_2, r_le_ta_m_da); vec_t x_in_0_j0 = vec_load_t(x_in_0 + j + ld1 * 0); vec_t x_in_0_j1 = vec_load_t(x_in_0 + j + ld1 * 1); vec_t x_in_0_j2 = vec_load_t(x_in_0 + j + ld1 * 2); tmp0 = vec_mul_t(x_in_0_j0, dx); tmp0 = vec_fmadd_t(x_in_0_j1, dy, tmp0); tmp0 = vec_fmadd_t(x_in_0_j2, dz, tmp0); tmp0 = vec_mul_t(tmp0, t2); tmp1 = vec_mul_t(x_in_1_i0, dx); tmp1 = vec_fmadd_t(x_in_1_i1, dy, tmp1); tmp1 = vec_fmadd_t(x_in_1_i2, dz, tmp1); tmp1 = vec_mul_t(tmp1, t2); DTYPE *x_out_1_0 = x_out_1 + j + 0 * ld1; DTYPE *x_out_1_1 = x_out_1 + j + 1 * ld1; DTYPE *x_out_1_2 = x_out_1 + j + 2 * ld1; vec_t xo1_0 = vec_load_t(x_out_1_0); vec_t xo1_1 = vec_load_t(x_out_1_1); vec_t xo1_2 = vec_load_t(x_out_1_2); xo0_0 = vec_fmadd_t(dx, tmp0, xo0_0); xo0_1 = vec_fmadd_t(dy, tmp0, xo0_1); xo0_2 = vec_fmadd_t(dz, tmp0, xo0_2); xo0_0 = vec_fmadd_t(t1, x_in_0_j0, xo0_0); xo0_1 = vec_fmadd_t(t1, x_in_0_j1, xo0_1); xo0_2 = vec_fmadd_t(t1, x_in_0_j2, xo0_2); xo1_0 = vec_fmadd_t(dx, tmp1, xo1_0); xo1_1 = vec_fmadd_t(dy, tmp1, xo1_1); xo1_2 = vec_fmadd_t(dz, tmp1, xo1_2); xo1_0 = vec_fmadd_t(t1, x_in_1_i0, xo1_0); xo1_1 = vec_fmadd_t(t1, x_in_1_i1, xo1_1); xo1_2 = vec_fmadd_t(t1, x_in_1_i2, xo1_2); vec_store_t(x_out_1_0, xo1_0); vec_store_t(x_out_1_1, xo1_1); vec_store_t(x_out_1_2, xo1_2); } x_out_0[i + 0 * ld0] += vec_reduce_add_t(xo0_0); x_out_0[i + 1 * ld0] += vec_reduce_add_t(xo0_1); x_out_0[i + 2 * ld0] += vec_reduce_add_t(xo0_2); } } #ifdef __cplusplus } #endif #endif
GB_unop__isinf_bool_fc32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__isinf_bool_fc32 // op(A') function: GB_unop_tran__isinf_bool_fc32 // C type: bool // A type: GxB_FC32_t // cast: GxB_FC32_t cij = (aij) // unaryop: cij = GB_cisinff (aij) #define GB_ATYPE \ GxB_FC32_t #define GB_CTYPE \ bool // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ GxB_FC32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_cisinff (x) ; // casting #define GB_CAST(z, aij) \ GxB_FC32_t z = (aij) ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GxB_FC32_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ GxB_FC32_t z = (aij) ; \ Cx [pC] = GB_cisinff (z) ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ISINF || GxB_NO_BOOL || GxB_NO_FC32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__isinf_bool_fc32 ( bool *Cx, // Cx and Ax may be aliased const GxB_FC32_t *Ax, const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (GxB_FC32_t), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GxB_FC32_t aij = Ax [p] ; GxB_FC32_t z = (aij) ; Cx [p] = GB_cisinff (z) ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; GxB_FC32_t aij = Ax [p] ; GxB_FC32_t z = (aij) ; Cx [p] = GB_cisinff (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__isinf_bool_fc32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_unop__atan_fp32_fp32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__atan_fp32_fp32) // op(A') function: GB (_unop_tran__atan_fp32_fp32) // C type: float // A type: float // cast: float cij = aij // unaryop: cij = atanf (aij) #define GB_ATYPE \ float #define GB_CTYPE \ float // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ float aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = atanf (x) ; // casting #define GB_CAST(z, aij) \ float z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ float aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ float z = aij ; \ Cx [pC] = atanf (z) ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ATAN || GxB_NO_FP32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__atan_fp32_fp32) ( float *Cx, // Cx and Ax may be aliased const float *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; // TODO: if OP is ONE and uniform-valued matrices are exploited, then // do this in O(1) time if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (float), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { float aij = Ax [p] ; float z = aij ; Cx [p] = atanf (z) ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; float aij = Ax [p] ; float z = aij ; Cx [p] = atanf (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__atan_fp32_fp32) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
sections-1.c
/* { dg-do compile } */ extern void bar(int); void f1(void) { #pragma omp sections nowait { bar (1); #pragma omp section bar (2); #pragma omp section bar (3); #pragma omp section bar (4); #pragma omp section bar (5); } } void f2(void) { #pragma omp sections { #pragma omp section { bar (1); bar (1); } #pragma omp section bar (2); #pragma omp section bar (3); #pragma omp section bar (4); #pragma omp section bar (5); } }
testing_dsyrk.c
/** * * @file testing_dsyrk.c * * PLASMA testing routines * PLASMA is a software package provided by Univ. of Tennessee, * Univ. of California Berkeley and Univ. of Colorado Denver * * @version 2.6.0 * @author Mathieu Faverge * @date 2010-11-15 * @generated d Tue Jan 7 11:45:18 2014 * **/ #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include <plasma.h> #include <cblas.h> #include <lapacke.h> #include <core_blas.h> #include "testing_dmain.h" static int check_solution(PLASMA_enum uplo, PLASMA_enum trans, int N, int K, double alpha, double *A, int LDA, double beta, double *Cref, double *Cplasma, int LDC); int testing_dsyrk(int argc, char **argv) { /* Check for number of arguments*/ if ( argc != 6){ USAGE("SYRK", "alpha beta M N LDA LDC", " - alpha : alpha coefficient\n" " - beta : beta coefficient\n" " - N : number of columns and rows of matrix C and number of row of matrix A\n" " - K : number of columns of matrix A\n" " - LDA : leading dimension of matrix A\n" " - LDC : leading dimension of matrix C\n"); return -1; } double alpha = (double) atol(argv[0]); double beta = (double) atol(argv[1]); int N = atoi(argv[2]); int K = atoi(argv[3]); int LDA = atoi(argv[4]); int LDC = atoi(argv[5]); int NKmax = max(N, K); double eps; int info_solution; int u, t; size_t LDAxK = LDA*NKmax; size_t LDCxN = LDC*N; double *A = (double *)malloc(LDAxK*sizeof(double)); #pragma omp register([LDAxK]A) double *C = (double *)malloc(LDCxN*sizeof(double)); #pragma omp register([LDCxN]C) double *Cinit = (double *)malloc(LDCxN*sizeof(double)); #pragma omp register([LDCxN]Cinit) double *Cfinal = (double *)malloc(LDCxN*sizeof(double)); #pragma omp register([LDCxN]Cfinal) /* Check if unable to allocate memory */ if ( (!A) || (!Cinit) || (!Cfinal) ){ printf("Out of Memory \n "); return -2; } eps = LAPACKE_dlamch_work('e'); printf("\n"); printf("------ TESTS FOR PLASMA DSYRK ROUTINE ------- \n"); printf(" Size of the Matrix A %d by %d\n", N, K); printf("\n"); printf(" The matrix A is randomly generated for each test.\n"); printf("============\n"); printf(" The relative machine precision (eps) is to be %e \n",eps); printf(" Computational tests pass if scaled residuals are less than 10.\n"); /*---------------------------------------------------------- * TESTING DSYRK */ /* Initialize A */ LAPACKE_dlarnv_work(IONE, ISEED, LDAxK, A); /* Initialize C */ PLASMA_dplgsy( (double)0., N, C, LDC, 51 ); for (u=0; u<2; u++) { for (t=0; t<2; t++) { memcpy(Cinit, C, LDCxN*sizeof(double)); memcpy(Cfinal, C, LDCxN*sizeof(double)); /* PLASMA DSYRK */ PLASMA_dsyrk(uplo[u], trans[t], N, K, alpha, A, LDA, beta, Cfinal, LDC); /* Check the solution */ info_solution = check_solution(uplo[u], trans[t], N, K, alpha, A, LDA, beta, Cinit, Cfinal, LDC); if (info_solution == 0) { printf("***************************************************\n"); printf(" ---- TESTING DSYRK (%5s, %s) ........... PASSED !\n", uplostr[u], transstr[t]); printf("***************************************************\n"); } else { printf("************************************************\n"); printf(" - TESTING DSYRK (%5s, %s) ... FAILED !\n", uplostr[u], transstr[t]); printf("************************************************\n"); } } } free(A); free(C); free(Cinit); free(Cfinal); return 0; } /*-------------------------------------------------------------- * Check the solution */ static int check_solution(PLASMA_enum uplo, PLASMA_enum trans, int N, int K, double alpha, double *A, int LDA, double beta, double *Cref, double *Cplasma, int LDC) { int info_solution; double Anorm, Cinitnorm, Cplasmanorm, Clapacknorm, Rnorm; double eps; double beta_const; double result; double *work = (double *)malloc(max(N, K)* sizeof(double)); beta_const = -1.0; Anorm = LAPACKE_dlange_work(LAPACK_COL_MAJOR, lapack_const(PlasmaInfNorm), (trans == PlasmaNoTrans) ? N : K, (trans == PlasmaNoTrans) ? K : N, A, LDA, work); Cinitnorm = LAPACKE_dlange_work(LAPACK_COL_MAJOR, lapack_const(PlasmaInfNorm), N, N, Cref, LDC, work); Cplasmanorm = LAPACKE_dlange_work(LAPACK_COL_MAJOR, lapack_const(PlasmaInfNorm), N, N, Cplasma, LDC, work); cblas_dsyrk(CblasColMajor, (CBLAS_UPLO)uplo, (CBLAS_TRANSPOSE)trans, N, K, (alpha), A, LDA, (beta), Cref, LDC); Clapacknorm = LAPACKE_dlange_work(LAPACK_COL_MAJOR, lapack_const(PlasmaInfNorm), N, N, Cref, LDC, work); cblas_daxpy(LDC*N, (beta_const), Cplasma, 1, Cref, 1); Rnorm = LAPACKE_dlange_work(LAPACK_COL_MAJOR, lapack_const(PlasmaInfNorm), N, N, Cref, LDC, work); eps = LAPACKE_dlamch_work('e'); printf("Rnorm %e, Anorm %e, Cinitnorm %e, Cplasmanorm %e, Clapacknorm %e\n", Rnorm, Anorm, Cinitnorm, Cplasmanorm, Clapacknorm); result = Rnorm / ((Anorm + Cinitnorm) * N * eps); printf("============\n"); printf("Checking the norm of the difference against reference DSYRK \n"); printf("-- ||Cplasma - Clapack||_oo/((||A||_oo+||C||_oo).N.eps) = %e \n", result); if ( isinf(Clapacknorm) || isinf(Cplasmanorm) || isnan(result) || isinf(result) || (result > 10.0) ) { printf("-- The solution is suspicious ! \n"); info_solution = 1; } else { printf("-- The solution is CORRECT ! \n"); info_solution= 0 ; } free(work); return info_solution; } int timing_dsyrk(int argc, char **argv) { /* Check for number of arguments*/ if ( argc != 8){ USAGE("SYRK", "alpha beta N K LDA LDC bs rep", " - alpha : alpha coefficient\n" " - beta : beta coefficient\n" " - N : number of columns and rows of matrix C and number of row of matrix A\n" " - K : number of columns of matrix A\n" " - LDA : leading dimension of matrix A\n" " - LDC : leading dimension of matrix C\n"); return -1; } double alpha = (double) atol(argv[0]); double beta = (double) atol(argv[1]); int N = atoi(argv[2]); int K = atoi(argv[3]); int LDA = atoi(argv[4]); int LDC = atoi(argv[5]); int bs = atoi(argv[6]); int rep = atoi(argv[7]); int NKmax = max(N, K); size_t LDAxK = LDA*NKmax; size_t LDCxN = LDC*N; double *A = (double *)malloc(LDAxK*sizeof(double)); double *C = (double *)malloc(LDCxN*sizeof(double)); double start, end; double elapsed; int num_threads; /* Check if unable to allocate memory */ if ( (!A) || (!C) ){ printf("Out of Memory \n "); return -2; } LAPACKE_dlarnv_work(IONE, ISEED, LDAxK, A); LAPACKE_dlarnv_work(IONE, ISEED, LDCxN, C); // PLASMA_dplgsy( (double)0., N, C, LDC, 51 ); PLASMA_Set(PLASMA_TILE_SIZE, bs); int i; for ( i = 0; i < rep; ++i ) { start = gtime(); PLASMA_dsyrk(PlasmaUpper, PlasmaNoTrans, N, K, alpha, A, LDA, beta, C, LDC); end = gtime(); elapsed += end - start; } num_threads = omp_get_max_threads(); dump_info("plasma_dsyrk.log", num_threads, elapsed, rep); free(A); free(C); return 0; }
DRB113-default-orig-no.c
/* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov, schordan1@llnl.gov, karlin1@llnl.gov) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https://github.com/LLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* Two-dimensional array computation: default(none) to enforce explictly list all variables in data-sharing attribute clauses default(shared) to cover another option. */ int a[100][100]; int b[100][100]; int main() { int i,j; #pragma omp parallel for shared(a) private(i, j) for (i=0;i<100;i++) { #pragma omp parallel for shared(a) private(j) for (j=0;j<100;j++) { a[i][j] = i; b[i][j] = i; } } #pragma omp parallel for shared(a) private(i, j) for (i=0;i<100;i++) #pragma omp parallel for shared(a) private(j) for (j=0;j<100;j++) a[i][j]=a[i][j]+1; #pragma omp parallel for private(i, j) for (i=0;i<100;i++) #pragma omp parallel for private(j) for (j=0;j<100;j++) b[i][j]=b[i][j]+1; for (i=0;i<100;i++) for (j=0;j<100;j++) printf("%d %d\n", a[i][j], b[i][j]); return 0; }
ClassicWots.h
#ifndef CLASSIC_WOTS #define CLASSIC_WOTS #include "primitives/AbstractDigest.h" #include "utils/ByteArray.hpp" #include <sstream> #include <iostream> #include <math.h> template<class D, int W, class Enable = void> class ClassicWots; template <class D, int W> class ClassicWots <D, W, typename std::enable_if<std::is_base_of<AbstractDigest, D>::value>::type> : protected std::decay<D>::type { public: ClassicWots() { paramCheck(); this->current_state = ClassicWots::INITIALIZED; block_size = std::ceil(log2(W)); if(private_seed.size()==0) private_seed = hstoba("01020304FFFF"); }; ClassicWots(const ByteArray& seed) : ClassicWots() { private_seed = seed; }; //constexpr?? virtual const unsigned int t() const noexcept { return t1()+t2(); }; virtual const unsigned int t1() const noexcept { float u = (float)this->bitLen()/(float)this->block_size; return std::ceil(u); }; virtual const unsigned int t2() const noexcept { float u = (log2(t1()*(W-1)))/(float)this->block_size; return (const unsigned int) std::floor(u) + 1; }; virtual const unsigned int w() const noexcept { return W; }; virtual const unsigned int n() const noexcept { return this->len(); }; virtual const ByteArray publicKey(){ loadPublicKey(); return this->public_key; }; virtual const std::vector<ByteArray> privateKey() { loadPrivateKey(); return this->private_key; }; virtual void loadPrivateKey() { if(not this->privKeyIsLoaded()) { this->genPrivateKey(); this->current_state += ClassicWots::PRIV_KEY_LOADED; } }; virtual void loadPublicKey() { if(not this->pubKeyIsLoaded()) { this->genPublicKey(); this->current_state += ClassicWots::PUB_KEY_LOADED; } }; virtual void loadKeys() { loadPrivateKey(); loadPublicKey(); }; virtual void clearPrivateKey() { if(this->privKeyIsLoaded()) { this->private_key.clear(); this->current_state -= ClassicWots::PRIV_KEY_LOADED; } }; virtual void clearPublicKey() { if(this->pubKeyIsLoaded()) { this->public_key = ByteArray(); this->current_state -= ClassicWots::PUB_KEY_LOADED; } }; virtual void clearKeys() { this->private_key.clear(); this->public_key = ByteArray(); this->current_state = ClassicWots::INITIALIZED; }; virtual const std::vector<ByteArray> sign(ByteArray& data) { std::vector<unsigned int> blocks = this->genFingerprint(data); std::vector<unsigned int> cs = checksum(blocks); blocks.insert(blocks.end(), cs.begin(), cs.end()); std::vector<ByteArray> signature(blocks.size() + cs.size()); //#pragma omp parallel for for(long unsigned int i = 0; i < blocks.size(); i++){ signature[i] = this->digestChain(this->private_key[i], W - 1 - blocks[i]); } return signature; }; virtual bool verify(ByteArray& data, std::vector<ByteArray>& signature) { if(not this->pubKeyIsLoaded()) return false; std::vector<unsigned int> blocks = this->genFingerprint(data); std::vector<unsigned int> cs = checksum(blocks); blocks.insert(blocks.end(), cs.begin(), cs.end()); ByteArray check; //#pragma omp parallel for for(long unsigned int i = 0; i < blocks.size(); i++) { check += this->digestChain(signature[i], blocks[i]); } check = this->digest(check); //TODO( We can improve this using xor and vactor iterator) if( std::to_string(this->public_key).compare(std::to_string(check)) == 0 ) return true; return false; }; virtual const std::vector<unsigned int> checksum(std::vector<unsigned int>& blocks) { std::vector<unsigned int> checksum; int sum = 0; for(auto &b : blocks) sum += W -1 - b; std::stringstream ss; ss << std::hex << sum; ByteArray aux = hstoba(ss.str()); std::vector<unsigned int> ret = this->toBaseW(aux); int rm = ret.size() - this->t2(); if(rm > 0) { ret.erase(ret.begin(), ret.begin()+rm); } if(rm < 0) { std::vector<unsigned int> aux(abs(rm), 0); ret.insert(ret.begin(), aux.begin(), aux.end()); } return ret; }; virtual std::vector<unsigned int> genFingerprint(ByteArray& data) { ByteArray fingerprint = this->digest(data); return this->toBaseW(fingerprint); }; protected: virtual void paramCheck() { //static_assert( W == 4 || W == 16 || W == 256 || W == 65536, "Winternitz Parameter W not supported."); }; virtual void genPrivateKey() { const unsigned int key_len = this->t(); //TODO(Perin): Use PRF and SEED; for(unsigned int i = 0; i < key_len; i++) { this->private_key.push_back(this->digest(this->private_seed)); } }; virtual void genPublicKey() { this->loadPrivateKey(); ByteArray pub; const unsigned int S = W - 1; for(long unsigned int i = 0; i < this->private_key.size(); i++) pub += this->digestChain(this->private_key[i], S); this->public_key = this->digest(pub); }; virtual bool privKeyIsLoaded() { return (current_state & ClassicWots::PRIV_KEY_LOADED) > 0; }; virtual bool pubKeyIsLoaded() { return (current_state & ClassicWots::PUB_KEY_LOADED) > 0; }; enum State { INITIALIZED = 1, PRIV_KEY_LOADED = 2, PUB_KEY_LOADED = 4, }; //TODO: trocar parametro por template. usar SFINAE para avaliar em tempo de compilação no lugar do switch? std::vector<unsigned int> toBaseW(ByteArray& data) { if (W > 256) { return toBaseWBig(data); } return toBaseWSmall(data); }; std::vector<unsigned int> toBaseWBig(ByteArray& data) { //TODO REVIEW, does this work? const unsigned int bytes_per_block = block_size/8; std::vector<unsigned int> ret; unsigned int total = 0; unsigned int s = 0; for(unsigned int i=0; i < data.size(); i++) { s = (bytes_per_block-1) - (i%bytes_per_block); //total += (data.at(i)<< s) & ( (1<<block_size)-1); total += (std::to_integer<unsigned int>(data[i])<< s) & ( (1<<block_size)-1); if( (i+1)%bytes_per_block == 0){ ret.push_back(total); total = 0; } } return ret; }; std::vector<unsigned int> toBaseWSmall(ByteArray& data) { unsigned int in = 0; unsigned int total = 0; unsigned int bits = 0; unsigned int consumed; std::vector<unsigned int> ret; unsigned int out_len = data.size()*8 / block_size; for ( consumed = 0; consumed < out_len; consumed++ ) { if ( bits == 0 ) { total = std::to_integer<unsigned int>(data[in]); in++; bits += 8; } bits -= block_size; ret.push_back((total >> bits) & ( (1<<(block_size)) -1)); } return ret; }; //Attributes unsigned int current_state; ByteArray public_key; std::vector<ByteArray> private_key; unsigned int block_size; ByteArray private_seed; }; #endif
factorgraph.h
// This file is part of sibilla : inference in epidemics with Belief Propagation // Author: Alfredo Braunstein // Author: Alessandro Ingrosso // Author: Anna Paola Muntoni #ifndef FACTORGRAPH_H #define FACTORGRAPH_H #include <vector> #include <iostream> #include <memory> #include <omp.h> #include "params.h" extern int const Tinf; template<class TMes> struct NeighType { NeighType(int index, int pos) : index(index), pos(pos), t(1, Tinf), lambdas(1, 0.0), msg(1, 1.0) { omp_init_lock(&lock_); } int index; // index of the node int pos; // position of the node in neighbors list std::vector<int> t; // time index of contacts std::vector<real_t> lambdas; // transmission probability TMes msg; // BP msg nij^2 or void lock() const { omp_set_lock(&lock_); } void unlock() const { omp_unset_lock(&lock_); } mutable omp_lock_t lock_; }; template<class TMes> struct NodeType { NodeType(std::shared_ptr<Proba> prob_i, std::shared_ptr<Proba> prob_r, int index) : prob_i(prob_i), prob_r(prob_r), prob_i0(prob_i), prob_r0(prob_r), f_(0), df_i(RealParams(0.0, prob_i->theta.size())), df_r(RealParams(0.0, prob_r->theta.size())), index(index) { times.push_back(-1); times.push_back(Tinf); for (int t = 0; t < 2; ++t) { bt.push_back(1); ht.push_back(1); bg.push_back(1); hg.push_back(1); } } void push_back_time(times_t t) { times.back() = t; times.push_back(Tinf); ht.push_back(ht.back()); hg.push_back(hg.back()); bt.push_back(bt.back()); bg.push_back(bg.back()); } static char const * name(); std::shared_ptr<Proba> prob_i; std::shared_ptr<Proba> prob_r; std::shared_ptr<Proba> prob_i0; std::shared_ptr<Proba> prob_r0; std::vector<times_t> times; std::vector<real_t> bt; // marginals infection times T[ni+2] std::vector<real_t> bg; // marginals recovery times G[ni+2] std::vector<real_t> ht; // message infection times T[ni+2] std::vector<real_t> hg; // message recovery times G[ni+2] std::vector<NeighType<TMes>> neighs; // list of neighbors real_t f_; real_t err_; RealParams df_i; RealParams df_r; int index; }; template<class TMes> class FactorGraph { public: typedef TMes Mes; typedef NodeType<Mes> Node; typedef NeighType<Mes> Neigh; static char const * name(); std::vector<Node> nodes; FactorGraph(Params const & params, std::vector<std::tuple<int,int,times_t,real_t> > const & contacts, std::vector<std::tuple<int,int,times_t> > const & obs, std::vector<std::tuple<int, std::shared_ptr<Proba>, std::shared_ptr<Proba>, std::shared_ptr<Proba>, std::shared_ptr<Proba>> > const & individuals = std::vector<std::tuple<int, std::shared_ptr<Proba>, std::shared_ptr<Proba>, std::shared_ptr<Proba>, std::shared_ptr<Proba>>>()); int find_neighbor(int i, int j) const; void append_contact(int i, int j, times_t t, real_t lambdaij, real_t lambdaji = DO_NOT_OVERWRITE); void drop_contacts(times_t t); void append_observation(int i, int s, times_t t); void append_time(int i, times_t t); void add_node(int i); void init(); void set_fields(int i, std::vector<int> const & sobs, std::vector<times_t> const & tobs); void set_field(int i, int s, int t); void reset_observations(std::vector<std::tuple<int, int, times_t> > const & obs); real_t update(int i, real_t damping, bool learn = false); void show_graph() const; void show_beliefs(std::ostream &) const; void show_msg(std::ostream &) const; real_t iterate(int maxit, real_t tol, real_t damping, bool learn = false); real_t iteration(real_t damping, bool learn = false); real_t loglikelihood() const; Params params; enum ARRAY_ENUM { DO_NOT_OVERWRITE = -1 }; }; template<class TMes> void FactorGraph<TMes>::append_contact(int i, int j, times_t t, real_t lambdaij, real_t lambdaji) { if (i == j) throw std::invalid_argument("self loops are not allowed"); add_node(i); add_node(j); Node & fi = nodes[i]; Node & fj = nodes[j]; int qi = fi.times.size(); int qj = fj.times.size(); if (fi.times[qi - 2] > t || fj.times[qj - 2] > t) throw std::invalid_argument("time of contacts should be ordered"); int ki = find_neighbor(i, j); int kj = find_neighbor(j, i); if (ki == int(fi.neighs.size())) { assert(kj == int(fj.neighs.size())); fi.neighs.push_back(Neigh(j, kj)); fj.neighs.push_back(Neigh(i, ki)); } Neigh & ni = fi.neighs[ki]; Neigh & nj = fj.neighs[kj]; if (fi.times[qi - 2] < t) { fi.push_back_time(t); ++qi; } if (fj.times[qj - 2] < t) { fj.push_back_time(t); ++qj; } if (ni.t.size() < 2 || ni.t[ni.t.size() - 2] < qi - 2) { ni.t.back() = qi - 2; nj.t.back() = qj - 2; ni.t.push_back(qi - 1); nj.t.push_back(qj - 1); if (lambdaij != DO_NOT_OVERWRITE) ni.lambdas.back() = lambdaij; if (lambdaji != DO_NOT_OVERWRITE) nj.lambdas.back() = lambdaji; ni.lambdas.push_back(0.0); nj.lambdas.push_back(0.0); ++ni.msg; ++nj.msg; } else if (ni.t[ni.t.size() - 2] == qi - 2) { if (lambdaij != DO_NOT_OVERWRITE) ni.lambdas[ni.t.size() - 2] = lambdaij; if (lambdaji != DO_NOT_OVERWRITE) nj.lambdas[nj.t.size() - 2] = lambdaji; } else { throw std::invalid_argument("time of contacts should be ordered"); } // adjust infinite times for (int k = 0; k < int(fi.neighs.size()); ++k) { fi.neighs[k].t.back() = qi - 1; } for (int k = 0; k < int(fj.neighs.size()); ++k) { fj.neighs[k].t.back() = qj - 1; } } template<class TMes> FactorGraph<TMes>::FactorGraph(Params const & params, std::vector<std::tuple<int, int, times_t, real_t> > const & contacts, std::vector<std::tuple<int, int, times_t> > const & obs, std::vector<std::tuple<int, std::shared_ptr<Proba>, std::shared_ptr<Proba>, std::shared_ptr<Proba>, std::shared_ptr<Proba>>> const & individuals) : params(params) { for (auto it = individuals.begin(); it != individuals.end(); ++it) { if (!std::get<1>(*it) || !std::get<1>(*it) || !std::get<1>(*it)|| !std::get<1>(*it)) throw std::invalid_argument("invalid individual definition"); add_node(std::get<0>(*it)); Node & n = nodes[std::get<0>(*it)]; n.prob_i = std::get<1>(*it); n.prob_r = std::get<2>(*it); n.prob_i0 = std::get<3>(*it); n.prob_r0 = std::get<4>(*it); n.df_i = RealParams(n.prob_i->theta.size()); n.df_r = RealParams(n.prob_r->theta.size()); } auto ic = contacts.begin(), ec = contacts.end(); auto io = obs.begin(), eo = obs.end(); while (ic != ec || io != eo) { int tc = ic == ec ? Tinf : std::get<2>(*ic); int to = io == eo ? Tinf : std::get<2>(*io); if (tc < to) { // cerr << "appending contact" << get<0>(*ic) << " " << get<1>(*ic)<< " " << get<2>(*ic) << " " << get<3>(*ic) << endl; append_contact(std::get<0>(*ic), std::get<1>(*ic), std::get<2>(*ic), std::get<3>(*ic)); ic++; } else { // cerr << "appending obs" << get<0>(*io) << " " << get<1>(*io)<< " " << get<2>(*io) << endl; append_time(std::get<0>(*io), std::get<2>(*io)); io++; } } reset_observations(obs); } template<class TMes> int FactorGraph<TMes>::find_neighbor(int i, int j) const { int k = 0; for (; k < int(nodes[i].neighs.size()); ++k) if (j == nodes[i].neighs[k].index) break; return k; } template<class TMes> void norm_msg(TMes & msg) { real_t S = 0; for(int n = 0; n < int(msg.size()); ++n) S += msg[n]; if (!(S > 0)) throw std::domain_error("singularity error"); for(int n = 0; n < int(msg.size()); ++n) msg[n] /= S; } template<class TMes> real_t setmes(TMes & from, TMes & to, real_t damp) { int n = from.size(); real_t s = 0; for (int i = 0; i < n; ++i) { s += from[i]; } real_t err = 0; for (int i = 0; i < n; ++i) { if (!(s > 0)){ from[i] = 1./n; err = std::numeric_limits<real_t>::infinity(); } else { from[i] /= s; err = std::max(err, std::abs(from[i] - to[i])); } to[i] = damp*to[i] + (1-damp)*from[i]; } return err; } template<class TMes> std::ostream & operator<<(std::ostream & ost, FactorGraph<TMes> const & f) { int nasym = 0; int nedge = 0; int ncont = 0; for(int i = 0; i < int(f.nodes.size()); ++i) { for (auto vit = f.nodes[i].neighs.begin(), vend = f.nodes[i].neighs.end(); vit != vend; ++vit) { if (vit->index < i) continue; ++nedge; ncont += vit->lambdas.size() - 1; if (vit->lambdas != f.nodes[vit->index].neighs[vit->pos].lambdas) ++nasym; } } return ost << "FactorGraph\n" << " nodes: " << f.nodes.size() << "\n" << " edges: " << nedge << " (" << nasym << " asymmetric)\n" << " time contacts: " << ncont; } template<class TMes> void FactorGraph<TMes>::add_node(int i) { for (int j = nodes.size(); j < i + 1; ++j) nodes.push_back(Node(params.prob_i, params.prob_r, j)); } template<class TMes> void FactorGraph<TMes>::show_graph() const { std::cerr << "Number of nodes " << int(nodes.size()) << std::endl; for(int i = 0; i < int(nodes.size()); i++) { std::cerr << "### index " << i << "###" << std::endl; std::cerr << "### in contact with " << int(nodes[i].neighs.size()) << "nodes" << std::endl; std::vector<Neigh> const & aux = nodes[i].neighs; for (int j = 0; j < int(aux.size()); j++) { std::cerr << "# neighbor " << aux[j].index << std::endl; std::cerr << "# in position " << aux[j].pos << std::endl; std::cerr << "# in contact " << int(aux[j].t.size()) << " times, in t: "; for (int s = 0; s < int(aux[j].t.size()); s++) std::cerr << aux[j].t[s] << " "; std::cerr << " " << std::endl; } } } template<class TMes> void FactorGraph<TMes>::show_beliefs(std::ostream & ofs) const { for(int i = 0; i < int(nodes.size()); ++i) { Node const & f = nodes[i]; ofs << "node " << i << ":" << std::endl; for (int t = 0; t < int(f.bt.size()); ++t) { ofs << " " << f.times[t] << " " << f.bt[t] << " (" << f.ht[t] << ") " << f.bg[t] << " (" << f.hg[t] << ")" << std::endl; } } } template<class TMes> void FactorGraph<TMes>::show_msg(std::ostream & o) const { for (int i = 0; i < int(nodes.size()); ++i) { auto & n = nodes[i]; for(int j = 0; j < int(n.neighs.size()); ++j) { Neigh const & v = n.neighs[j]; o << i << " <- " << v.index << " : " << std::endl; o << v.msg << std::endl; } } } template<class TMes> real_t FactorGraph<TMes>::iteration(real_t damping, bool learn) { int const N = nodes.size(); real_t err = 0.0; std::vector<int> perm(N); for(int i = 0; i < N; ++i) perm[i] = i; random_shuffle(perm.begin(), perm.end()); #pragma omp parallel for reduction(max:err) for(int i = 0; i < N; ++i) err = std::max(err, update(perm[i], damping, learn)); return err; } template<class TMes> real_t FactorGraph<TMes>::iterate(int maxit, real_t tol, real_t damping, bool learn) { real_t err = std::numeric_limits<real_t>::infinity(); for (int it = 1; it <= maxit; ++it) { err = iteration(damping, learn); std::cout << "it: " << it << " err: " << err << std::endl; if (err < tol) break; } return err; } template<class TMes> void drop_time(FactorGraph<TMes> & fg, int t) { fg.drop_contacts(t); int n = fg.nodes.size(); for (int i = 0; i < n; ++i) { NodeType<TMes> & f = fg.nodes[i]; if (t == f.times[1]) { f.bt.erase(f.bt.begin()); f.bg.erase(f.bg.begin()); f.ht.erase(f.ht.begin()); f.hg.erase(f.hg.begin()); f.times.erase(f.times.begin() + 1); int m = f.neighs.size(); for (int j = 0; j < m; ++j) { NeighType<TMes> & v = f.neighs[j]; for (int k = 0; k < int(v.t.size()); ++k) { --v.t[k]; } } } f.times[0] = t; } } template<class TMes> void FactorGraph<TMes>::set_field(int i, int s, int tobs) { Node & n = nodes[i]; int qi = n.times.size(); switch (s) { case 0: for (int t = 0; t < qi; ++t) n.ht[t] *= params.fn_rate * (n.times[t] < tobs) + (1 - params.fn_rate) * (n.times[t] >= tobs); break; case 1: for (int t = 0; t < qi; ++t) { n.ht[t] *= (1 - params.fp_rate) * (n.times[t] < tobs) + params.fp_rate * (n.times[t] >= tobs); n.hg[t] *= (n.times[t] >= tobs); } break; case 2: for (int t = 0; t < qi; ++t) { n.ht[t] *= (n.times[t] < tobs); n.hg[t] *= (n.times[t] < tobs); } break; } } template<class TMes> void FactorGraph<TMes>::append_time(int i, times_t t) { add_node(i); Node & n = nodes[i]; // most common case if (t == n.times[n.times.size() - 2] || t == *lower_bound(n.times.begin(), n.times.end(), t)) return; if (t > n.times[n.times.size() - 2]) { n.push_back_time(t); // adjust infinite times for (int j = 0; j < int(n.neighs.size()); ++j) { n.neighs[j].t.back() = n.times.size() - 1; } return; } std::cerr << t << " < " << n.times[n.times.size() - 2] << std::endl; throw std::invalid_argument("observation time unexistent and too small"); } template<class TMes> void FactorGraph<TMes>::append_observation(int i, int s, times_t t) { append_time(i, t); set_field(i, s, t); } template<class TMes> void FactorGraph<TMes>::reset_observations(std::vector<std::tuple<int, int, times_t> > const & obs) { std::vector<std::vector<times_t>> tobs(nodes.size()); std::vector<std::vector<int>> sobs(nodes.size()); for (auto it = obs.begin(); it != obs.end(); ++it) { sobs[std::get<0>(*it)].push_back(std::get<1>(*it)); tobs[std::get<0>(*it)].push_back(std::get<2>(*it)); } int largeT = 0; for (int i = 0; i < int(nodes.size()); ++i) { largeT = std::max(largeT, int(nodes[i].times.size())); } std::vector<int> FS(largeT), FI(largeT), TS(largeT), TI(largeT), TR(largeT); std::vector<real_t> pFS(largeT, 1.0), pFI(largeT, 1.0), pTS(largeT, 1.0), pTI(largeT, 1.0); for (int t = 1; t < largeT; ++t) { pTI[t] = pTI[t-1] * (1-params.fp_rate); pFI[t] = pFI[t-1] * params.fp_rate; pTS[t] = pTS[t-1] * (1-params.fn_rate); pFS[t] = pFS[t-1] * params.fn_rate; } for (int i = 0; i < int(nodes.size()); ++i) { fill(TS.begin(), TS.end(), 0); fill(FS.begin(), FS.end(), 0); fill(TI.begin(), TI.end(), 0); fill(FI.begin(), FI.end(), 0); fill(TR.begin(), TR.end(), 0); // this assumes ordered observation times int T = nodes[i].times.size(); int t = 0; for (int k = 0; k < int(tobs[i].size()); ++k) { int state = sobs[i][k]; int to = tobs[i][k]; while (nodes[i].times[t] != to && t < T) t++; if (nodes[i].times[t] != to) throw std::invalid_argument(("this is a bad time: node" + std::to_string(i) + " time " + std::to_string(t)).c_str()); switch(state) { case 0: FS[0]++; FS[t]--; TS[t]++; break; case 1: TI[0]++; TI[t]--; FI[t]++; TR[0]++; TR[t]--; break; case 2: TR[t]++; TI[t]++; break; } } int fs = 0, fi = 0, ts = 0, ti = 0, tr = 0; for (int t = 0; t < T; ++t) { fs += FS[t]; fi += FI[t]; ts += TS[t]; ti += TI[t]; tr += TR[t]; nodes[i].ht[t] = pFS[fs] * pTS[ts] * pFI[fi] * pTI[ti]; nodes[i].hg[t] = tr == 0; } } } template<class TMes> void FactorGraph<TMes>::drop_contacts(times_t t) { for (size_t i = 0; i < nodes.size(); ++i) { Node & fi = nodes[i]; for (size_t k = 0; k < fi.neighs.size(); ++k) { if (fi.times[fi.neighs[k].t[0]] < t) throw std::invalid_argument("can only drop first contact"); else if (fi.times[fi.neighs[k].t[0]] == t) { fi.neighs[k].t.erase(fi.neighs[k].t.begin(), fi.neighs[k].t.begin() + 1); fi.neighs[k].lambdas.erase(fi.neighs[k].lambdas.begin(), fi.neighs[k].lambdas.begin() + 1); --fi.neighs[k].msg; } } } } #endif
graph.h
// Copyright (c) 2015, The Regents of the University of California (Regents) // See LICENSE.txt for license details #ifndef GRAPH_H_ #define GRAPH_H_ #include <algorithm> #include <cinttypes> #include <cstddef> #include <iostream> #include <type_traits> #include "pvector.h" #include "util.h" #ifdef ZSIM #include "zsimhooks.h" #endif /* GAP Benchmark Suite Class: CSRGraph Author: Scott Beamer Simple container for graph in CSR format - Intended to be constructed by a Builder - To make weighted, set DestID_ template type to NodeWeight - MakeInverse parameter controls whether graph stores its inverse */ // Used to hold node & weight, with another node it makes a weighted edge template <typename NodeID_, typename WeightT_> struct NodeWeight { NodeID_ v; WeightT_ w; NodeWeight() {} NodeWeight(NodeID_ v) : v(v), w(1) {} NodeWeight(NodeID_ v, WeightT_ w) : v(v), w(w) {} bool operator< (const NodeWeight& rhs) const { return v == rhs.v ? w < rhs.w : v < rhs.v; } // doesn't check WeightT_s, needed to remove duplicate edges bool operator== (const NodeWeight& rhs) const { return v == rhs.v; } // doesn't check WeightT_s, needed to remove self edges bool operator== (const NodeID_& rhs) const { return v == rhs; } operator NodeID_() { return v; } }; template <typename NodeID_, typename WeightT_> std::ostream& operator<<(std::ostream& os, const NodeWeight<NodeID_, WeightT_>& nw) { os << nw.v << " " << nw.w; return os; } template <typename NodeID_, typename WeightT_> std::istream& operator>>(std::istream& is, NodeWeight<NodeID_, WeightT_>& nw) { is >> nw.v >> nw.w; return is; } // Syntatic sugar for an edge template <typename SrcT, typename DstT = SrcT> struct EdgePair { SrcT u; DstT v; EdgePair() {} EdgePair(SrcT u, DstT v) : u(u), v(v) {} }; // SG = serialized graph, these types are for writing graph to file typedef int32_t SGID; typedef EdgePair<SGID> SGEdge; typedef int64_t SGOffset; template <class NodeID_, class DestID_ = NodeID_, bool MakeInverse = true> class CSRGraph { // Used for *non-negative* offsets within a neighborhood typedef std::make_unsigned<std::ptrdiff_t>::type OffsetT; // Used to access neighbors of vertex, basically sugar for iterators class Neighborhood { NodeID_ n_; DestID_** g_index_; OffsetT start_offset_; public: Neighborhood(NodeID_ n, DestID_** g_index, OffsetT start_offset) : n_(n), g_index_(g_index), start_offset_(0) { OffsetT max_offset = end() - begin(); start_offset_ = std::min(start_offset, max_offset); } typedef DestID_* iterator; iterator begin() { return g_index_[n_] + start_offset_; } iterator end() { return g_index_[n_+1]; } }; void ReleaseResources() { if (out_index_ != nullptr) delete[] out_index_; if (out_neighbors_ != nullptr) delete[] out_neighbors_; if (directed_) { if (in_index_ != nullptr) delete[] in_index_; if (in_neighbors_ != nullptr) delete[] in_neighbors_; } } public: CSRGraph() : directed_(false), num_nodes_(-1), num_edges_(-1), out_index_(nullptr), out_neighbors_(nullptr), in_index_(nullptr), in_neighbors_(nullptr) {} CSRGraph(int64_t num_nodes, DestID_** index, DestID_* neighs) : directed_(false), num_nodes_(num_nodes), out_index_(index), out_neighbors_(neighs), in_index_(index), in_neighbors_(neighs) { num_edges_ = (out_index_[num_nodes_] - out_index_[0]) / 2; } CSRGraph(int64_t num_nodes, DestID_** out_index, DestID_* out_neighs, DestID_** in_index, DestID_* in_neighs) : directed_(true), num_nodes_(num_nodes), out_index_(out_index), out_neighbors_(out_neighs), in_index_(in_index), in_neighbors_(in_neighs) { num_edges_ = out_index_[num_nodes_] - out_index_[0]; } CSRGraph(CSRGraph&& other) : directed_(other.directed_), num_nodes_(other.num_nodes_), num_edges_(other.num_edges_), out_index_(other.out_index_), out_neighbors_(other.out_neighbors_), in_index_(other.in_index_), in_neighbors_(other.in_neighbors_) { other.num_edges_ = -1; other.num_nodes_ = -1; other.out_index_ = nullptr; other.out_neighbors_ = nullptr; other.in_index_ = nullptr; other.in_neighbors_ = nullptr; } ~CSRGraph() { ReleaseResources(); } CSRGraph& operator=(CSRGraph&& other) { if (this != &other) { ReleaseResources(); directed_ = other.directed_; num_edges_ = other.num_edges_; num_nodes_ = other.num_nodes_; out_index_ = other.out_index_; out_neighbors_ = other.out_neighbors_; in_index_ = other.in_index_; in_neighbors_ = other.in_neighbors_; other.num_edges_ = -1; other.num_nodes_ = -1; other.out_index_ = nullptr; other.out_neighbors_ = nullptr; other.in_index_ = nullptr; other.in_neighbors_ = nullptr; } return *this; } bool directed() const { return directed_; } int64_t num_nodes() const { return num_nodes_; } int64_t num_edges() const { return num_edges_; } int64_t num_edges_directed() const { return directed_ ? num_edges_ : 2*num_edges_; } int64_t out_degree(NodeID_ v) const { return out_index_[v+1] - out_index_[v]; } int64_t in_degree(NodeID_ v) const { static_assert(MakeInverse, "Graph inversion disabled but reading inverse"); return in_index_[v+1] - in_index_[v]; } Neighborhood out_neigh(NodeID_ n, OffsetT start_offset = 0) const { return Neighborhood(n, out_index_, start_offset); } Neighborhood in_neigh(NodeID_ n, OffsetT start_offset = 0) const { static_assert(MakeInverse, "Graph inversion disabled but reading inverse"); return Neighborhood(n, in_index_, start_offset); } void PrintStats() const { std::cout << "Graph has " << num_nodes_ << " nodes and " << num_edges_ << " "; if (!directed_) std::cout << "un"; std::cout << "directed edges for degree: "; std::cout << num_edges_/num_nodes_ << std::endl; } void PrintTopology() const { for (NodeID_ i=0; i < num_nodes_; i++) { std::cout << i << ": "; for (DestID_ j : out_neigh(i)) { std::cout << j << " "; } std::cout << std::endl; } } static DestID_** GenIndex(const pvector<SGOffset> &offsets, DestID_* neighs) { NodeID_ length = offsets.size(); DestID_** index = new DestID_*[length]; #pragma omp parallel for for (NodeID_ n=0; n < length; n++) { #ifdef ZSIM PIMPROF_BEGIN_REG_PARALLEL #endif index[n] = neighs + offsets[n]; #ifdef ZSIM PIMPROF_END_REG_PARALLEL #endif } return index; } pvector<SGOffset> VertexOffsets(bool in_graph = false) const { pvector<SGOffset> offsets(num_nodes_+1); for (NodeID_ n=0; n < num_nodes_+1; n++) if (in_graph) offsets[n] = in_index_[n] - in_index_[0]; else offsets[n] = out_index_[n] - out_index_[0]; return offsets; } Range<NodeID_> vertices() const { return Range<NodeID_>(num_nodes()); } private: bool directed_; int64_t num_nodes_; int64_t num_edges_; DestID_** out_index_; DestID_* out_neighbors_; DestID_** in_index_; DestID_* in_neighbors_; }; #endif // GRAPH_H_
ofmo-oneint-gen.c
#include <stdio.h> #include <stdlib.h> #include <math.h> #include "ofmo-index.h" #ifdef _OPENMP #include <omp.h> #else #include "omp-dummy.h" #endif extern void fmt( double F[], const int m, const double T, const double cssss ); #ifndef false #define false 0 #endif #ifndef true #define true 1 #endif #define HALF 0.5e0 #define ZERO 0.e0 #define EPS_PS_PAIR 1.e-32 // 2次元整数配列の確保 static int** ofmo_alloc_imatrix( const int na, const int nb ) { int **ip, i; ip = (int**)malloc( sizeof(int*) * na ); ip[0] = (int*)malloc( sizeof(int) * na * nb ); for (i=1; i<na; i++ ) ip[i] = ip[i-1] + nb; return ip; } // 二次元整数配列の解放 static void ofmo_free_imatrix( int** ip ) { if ( ip ) { if ( ip[0] ) free( ip[0] ); free( ip ); } } /* ofmo-index.cで生成される変数 */ static int *NNAO; static int *LAOT; static int *INDX; static int **ANGM; static int **NAM; static double *DFACT; /* 1電子積分のVRRで必要な変数 */ static int ***V_OVIADD = NULL; static int ***V_NAIADD = NULL; static int ***V_MINDEX = NULL; static double **V_OVI = NULL; static double **V_KEI = NULL; static double **V_NAI = NULL; /* 縮約積分の保存領域 */ static double **OVI_MASTER = NULL; static double **HCORE_MASTER = NULL; /* 定数など */ static double _PI_32_; static double _2PI_; static double EPS_PS_PAIR_NAI; /* 各種アドレス計算 */ static int ofmo_oneint_gen_make_add( const int mythread, const int La, const int Lb, int *OVI_MEM, int *NAI_MEM ) { int ovi_mem, nai_mem; int Lab, ma, mb, na, nb, nab, lmin, mmax; int **OVIADD, **NAIADD, **MINDEX; Lab = La + Lb; OVIADD = V_OVIADD[mythread]; NAIADD = V_NAIADD[mythread]; MINDEX = V_MINDEX[mythread]; ovi_mem = nai_mem = 0; // (a|s)タイプ for ( ma=0; ma<=La; ma++ ) { na = NNAO[ma]; OVIADD[ma][0] = ovi_mem; NAIADD[ma][0] = nai_mem; MINDEX[ma][0] = na; ovi_mem += na; nai_mem += (Lab-ma+1) * na; } // (a|b)タイプ for ( mb=1; mb<=Lb; mb++ ) { lmin = La - Lb + mb; if ( lmin < 0 ) lmin = 0; mmax = Lb - mb; nb = NNAO[mb]; for ( ma=lmin; ma<=La; ma++ ) { nab = nb * NNAO[ma]; OVIADD[ma][mb] = ovi_mem; NAIADD[ma][mb] = nai_mem; MINDEX[ma][mb] = nab; ovi_mem += nab; nai_mem += (mmax+1)*nab; } } *OVI_MEM = ovi_mem; *NAI_MEM = nai_mem; return 0; } static void ofmo_oneint_gen_finalize() { int i, nthreads; nthreads = omp_get_max_threads(); for ( i=0; i<nthreads; i++ ) { if ( V_OVIADD[i] ) ofmo_free_imatrix( V_OVIADD[i] ); if ( V_NAIADD[i] ) ofmo_free_imatrix( V_NAIADD[i] ); if ( V_MINDEX[i] ) ofmo_free_imatrix( V_MINDEX[i] ); if ( V_OVI[i] ) free( V_OVI[i] ); if ( V_KEI[i] ) free( V_KEI[i] ); if ( V_NAI[i] ) free( V_NAI[i] ); if ( OVI_MASTER[i] ) free( OVI_MASTER[i] ); if ( HCORE_MASTER[i] ) free( HCORE_MASTER[i] ); } free( V_OVIADD ); free( V_NAIADD ); free( V_MINDEX ); free( V_OVI ); free( V_KEI ); free( V_NAI ); free( OVI_MASTER ); free( HCORE_MASTER ); V_OVIADD = NULL; V_NAIADD = NULL; V_MINDEX = NULL; V_OVI = NULL; V_KEI = NULL; V_NAI = NULL; OVI_MASTER = NULL; HCORE_MASTER = NULL; } int ofmo_oneint_gen_init( const int maxlqn ) { int nthreads; double pi; static int called = false; if ( called ) return 0; pi = 4.e0 * atan( 1.e0 ); _PI_32_ = pi * sqrt(pi); _2PI_ = 2.e0 * pi; EPS_PS_PAIR_NAI = EPS_PS_PAIR; ofmo_index_init( 2*maxlqn ); NNAO = ofmo_getadd_nnao(); LAOT = ofmo_getadd_laot(); ANGM = ofmo_getadd_angm(); INDX = ofmo_getadd_indx(); NAM = ofmo_getadd_nam(); DFACT = ofmo_getadd_dfact(); nthreads = omp_get_max_threads(); V_OVIADD = (int***)malloc( sizeof(int**) * nthreads ); V_NAIADD = (int***)malloc( sizeof(int**) * nthreads ); V_MINDEX = (int***)malloc( sizeof(int**) * nthreads ); V_OVI = (double**)malloc( sizeof(double*) * nthreads ); V_KEI = (double**)malloc( sizeof(double*) * nthreads ); V_NAI = (double**)malloc( sizeof(double*) * nthreads ); OVI_MASTER = (double**)malloc( sizeof(double*) * nthreads ); HCORE_MASTER = (double**)malloc( sizeof(double*) * nthreads ); #pragma omp parallel { int mythread; int ovi_mem, nai_mem; int n, n2; n = NNAO[maxlqn]; n2 = n*n; mythread = omp_get_thread_num(); V_OVIADD[mythread] = ofmo_alloc_imatrix( maxlqn+1, maxlqn+1 ); V_NAIADD[mythread] = ofmo_alloc_imatrix( maxlqn+1, maxlqn+1 ); V_MINDEX[mythread] = ofmo_alloc_imatrix( maxlqn+1, maxlqn+1 ); ofmo_oneint_gen_make_add( mythread, maxlqn, maxlqn, &ovi_mem, &nai_mem ); V_OVI[mythread] = (double*)malloc( sizeof(double) * ovi_mem ); V_KEI[mythread] = (double*)malloc( sizeof(double) * ovi_mem ); V_NAI[mythread] = (double*)malloc( sizeof(double) * nai_mem ); OVI_MASTER[mythread] = (double*)malloc( sizeof(double) * n2 ); HCORE_MASTER[mythread] = (double*)malloc( sizeof(double) * n2 ); } atexit( ofmo_oneint_gen_finalize ); called = true; return 0; } static int oneint_core_xx( const int mythread, const int *pLa, const int *pLb, const int *ips0, const int *nps_i, const double A[3], const int *jps0, const int *nps_j, const double B[3], const double prim_exp[], const double prim_coe[], const int *nat, const double atom_x[], const double atom_y[], const double atom_z[], const int atomic_number[], double OVI[], double HCORE[] ) { int i, ij, m, ips, jps, kat, ips1, jps1; double zeta_a, zeta_b, zeta, coef_a, coef_b, coef; double sqrzi, zi, xiza, xizb, xi, xiab2, xi2, exp_ab, css, zeta2; double oviss, keiss; double BA[3], P[3], PA[3], PC[3], PB[3], AB2; double PC2, dq, U; double _pi32_ = _PI_32_, _2pi_ = _2PI_; double coe_a, coe; int La=*pLa, Lb=*pLb, Lab, nab, na, nb, ma, mb; int **OVIADD, **NAIADD, **MINDEX; double *ovi, *kei, *nai, *p, *ov, *ke; int ip0, i0p, i00, i10, i01, ix, nia, nib, lmin, mmax; int ip00, i0p0, i000, i001, i100, i101, i010, i011; int iaop, iaop0, iaop1, iao, iao0, iao1, iaom, iaom0; int jaop, jaop0, jaop1, jao, jao0, jao1, jaom, jaom0; ovi = V_OVI[mythread]; kei = V_KEI[mythread]; nai = V_NAI[mythread]; OVIADD = V_OVIADD[mythread]; NAIADD = V_NAIADD[mythread]; MINDEX = V_MINDEX[mythread]; na = NNAO[La]; nb = NNAO[Lb]; nab = na * nb; Lab = La + Lb; for ( i=0; i<nab; i++ ) OVI[i] = HCORE[i] = ZERO; AB2 = ZERO; for ( i=0; i<3; i++ ) { BA[i] = B[i]-A[i]; AB2 += BA[i]*BA[i]; } ips1 = *ips0 + (*nps_i); jps1 = *jps0 + (*nps_j); for ( ips=(*ips0); ips<ips1; ips++ ) { zeta_a = prim_exp[ips]; coef_a = prim_coe[ips]; for ( jps=(*jps0); jps<jps1; jps++ ) { zeta_b = prim_exp[jps]; coef_b = prim_coe[jps]; zeta = zeta_a + zeta_b; sqrzi = sqrt( 1.e0 / zeta ); zi = sqrzi * sqrzi; coef = coef_a * coef_b; xiza = zeta_b * zi; xizb = zeta_a * zi; xi = xiza * zeta_a; xiab2 = xi * AB2; xi2 = 2.e0 * xi; zeta2 = HALF * zi; exp_ab = coef * exp( -xiab2 ); oviss = _pi32_ * zi * sqrzi * exp_ab; keiss = xi * ( 3.e0 - 2.e0 * xiab2 ) * oviss; css = _2pi_ * zi * exp_ab; if ( fabs(css) < EPS_PS_PAIR_NAI ) continue; for ( i=0; i<3; i++ ) { P[i] = xiza*BA[i] + A[i]; PA[i] = xiza*BA[i]; PB[i] = xiza*BA[i] - BA[i]; } // OVI and KEI ovi[0] = oviss; kei[0] = keiss; // (x||s) and (x|T|s) for ( ma=1; ma<=La; ma++ ) { iaop0 = LAOT[ma]; iaop1 = iaop0 + NNAO[ma]; iao0 = LAOT[ma-1]; if ( ma>=2 ) iaom0 = LAOT[ma-2]; for ( iaop=iaop0; iaop<iaop1; iaop++ ) { ix = INDX[iaop]; iao = NAM[iaop][ix]; nia = ANGM[iao][ix]; ip0 = OVIADD[ma ][0] + (iaop - iaop0); i00 = OVIADD[ma-1][0] + (iao - iao0 ); ovi[ip0] = PA[ix]*ovi[i00]; kei[ip0] = PA[ix]*kei[i00]; if ( nia > 0 ) { iaom = NAM[iao][ix]; i10 = OVIADD[ma-2][0] + (iaom - iaom0); ovi[ip0] += nia * zeta2 * ovi[i10]; kei[ip0] += nia * (zeta2*kei[i10]-xiza*ovi[i10]); } kei[ip0] += xi2 * ovi[ip0]; } } // for ( ma ) // (x||y) and (x|T|y) for ( mb=1; mb<=Lb; mb++ ) { lmin = La - Lb + mb; jaop0 = LAOT[mb]; jao0 = LAOT[mb-1]; jaop1 = jaop0 + NNAO[mb]; if ( lmin < 0 ) lmin = 0; if ( mb>=2 ) jaom0 = LAOT[mb-2]; for ( ma=lmin; ma<=La; ma++ ) { iao0 = LAOT[ma]; iao1 = iao0 + NNAO[ma]; if ( ma>=1 ) iaom0 = LAOT[ma-1]; for ( iao=iao0; iao<iao1; iao++ ) { for ( jaop=jaop0; jaop<jaop1; jaop++ ) { ix = INDX[jaop]; jao = NAM[jaop][ix]; nib = ANGM[jao][ix]; nia = ANGM[iao][ix]; i0p = OVIADD[ma][mb] + (iao-iao0)*NNAO[mb]+(jaop-jaop0); i00 = OVIADD[ma][mb-1] + (iao-iao0)*NNAO[mb-1]+(jao-jao0); ovi[i0p] = PB[ix]*ovi[i00]; kei[i0p] = PB[ix]*kei[i00]; if ( nib > 0 ) { jaom = NAM[jao][ix]; i01 = OVIADD[ma][mb-2] + (iao-iao0)*NNAO[mb-2] + (jaom-jaom0); ovi[i0p] += nib * zeta2 * ovi[i01]; kei[i0p] += nib * (zeta2*kei[i01]-xizb*ovi[i01]); } if ( nia > 0 ) { iaom = NAM[iao][ix]; i10 = OVIADD[ma-1][mb-1] + (iaom-iaom0)*NNAO[mb-1]+(jao-jao0); ovi[i0p] += nia * zeta2 * ovi[i10]; kei[i0p] += nia * zeta2 * kei[i10]; } kei[i0p] += xi2 * ovi[i0p]; } // for (jao) } // for ( iao ) } // for ( ma ) } // for ( mb ); // NAI for ( kat=0; kat<(*nat); kat++ ) { dq = -(double)atomic_number[kat]; PC[0] = P[0]-atom_x[kat]; PC[1] = P[1]-atom_y[kat]; PC[2] = P[2]-atom_z[kat]; PC2 = PC[0]*PC[0] + PC[1]*PC[1] + PC[2]*PC[2]; U = zeta * PC2; fmt( &nai[0], Lab, U, dq*css ); // (x|A|s) type for ( ma=1; ma<=La; ma++ ) { mmax = Lab - ma; iaop0 = LAOT[ma]; iaop1 = iaop0 + NNAO[ma]; iao0 = LAOT[ma-1]; if ( ma>=2 ) iaom0 = LAOT[ma-2]; for ( m=0; m<=mmax; m++ ) { for ( iaop=iaop0; iaop<iaop1; iaop++ ) { ix = INDX[iaop]; iao = NAM[iaop][ix]; nia = ANGM[iao][ix]; ip00 = NAIADD[ma][0] + m*NNAO[ma] + (iaop-iaop0); i000 = NAIADD[ma-1][0] + m*NNAO[ma-1] + (iao - iao0); i001 = i000 + NNAO[ma-1]; nai[ip00] = PA[ix]*nai[i000]-PC[ix]*nai[i001]; if ( nia > 0 ) { iaom = NAM[iao][ix]; i100 = NAIADD[ma-2][0] + m*NNAO[ma-2] + (iaom-iaom0); i101 = i100 + NNAO[ma-2]; nai[ip00] += nia * zeta2 * (nai[i100]-nai[i101]); } } // for (iaop) } // for (m) } // for (ma) // (x|A|y) type for ( mb=1; mb<=Lb; mb++ ) { lmin = La - Lb + mb; if ( lmin < 0 ) lmin = 0; mmax = Lb - mb; jaop0 = LAOT[mb]; jaop1 = jaop0 + NNAO[mb]; jao0 = LAOT[mb-1]; if ( mb >= 2 ) jaom0 = LAOT[mb-2]; for ( ma=lmin; ma<=La; ma++ ) { iao0 = LAOT[ma]; iao1 = iao0 + NNAO[ma]; if ( ma > 0 ) iaom0 = LAOT[ma-1]; for ( m=0; m<=mmax; m++ ) { for ( iao=iao0; iao<iao1; iao++ ) { for ( jaop=jaop0; jaop<jaop1; jaop++ ) { ix = INDX[jaop]; jao = NAM[jaop][ix]; nib = ANGM[jao][ix]; nia = ANGM[iao][ix]; i0p0 = NAIADD[ma][mb]+m*MINDEX[ma][mb] + (iao-iao0)*NNAO[mb] + (jaop-jaop0); i000 = NAIADD[ma][mb-1]+m*MINDEX[ma][mb-1]+ (iao-iao0)*NNAO[mb-1] + (jao-jao0); i001 = i000 + MINDEX[ma][mb-1]; nai[i0p0] = PB[ix]*nai[i000] - PC[ix]*nai[i001]; if ( nib > 0 ) { jaom = NAM[jao][ix]; i010 = NAIADD[ma][mb-2] + m*MINDEX[ma][mb-2] + (iao-iao0)*NNAO[mb-2] + (jaom-jaom0); i011 = i010 + MINDEX[ma][mb-2]; nai[i0p0] += nib * zeta2 * ( nai[i010] - nai[i011] ); } if ( nia > 0 ) { iaom = NAM[iao][ix]; i100 = NAIADD[ma-1][mb-1] + m * MINDEX[ma-1][mb-1] + (iaom-iaom0)*NNAO[mb-1] + (jao-jao0); i101 = i100 + MINDEX[ma-1][mb-1]; nai[i0p0] += nia * zeta2 * ( nai[i100] - nai[i101] ); } } // for (jaop) } // for (iao) } // for (m) } // for (ma) } // for (mb) // contraction of NAI p = &nai[NAIADD[La][Lb]]; for ( i=0; i<nab; i++ ) HCORE[i] += p[i]; } // for (kat) // contraction of OVI and KEI ov = &ovi[OVIADD[La][Lb]]; ke = &kei[OVIADD[La][Lb]]; for ( i=0; i<nab; i++ ) { OVI[i] += ov[i]; HCORE[i] += ke[i]; } } // for ( jps ) } // for ( ips ) // multiply coefficients iao0 = LAOT[La]; iao1 = iao0 + na; jao0 = LAOT[Lb]; jao1 = jao0 + nb; ij = 0; //for ( iao=iao0, ij=0; iao<iao1; iao++ ) { for ( iao=iao0; iao<iao1; iao++ ) { coe_a = DFACT[iao]; for ( jao=jao0; jao<jao1; jao++ ) { coe = coe_a * DFACT[jao]; // debug //if ( coe != 1.e0 ) printf("coe= %10.5f\n", coe ); OVI[ij] *= coe; HCORE[ij] *= coe; ij++; } } return 0; } int ofmo_oneint_xx( const int *pnworkers, const int *pworkerid, const int *pLa, const int *pLb, const int leading_cs[], const int shel_tem[], const int shel_atm[], const int shel_add[], const int shel_ini[], const double atom_x[], const double atom_y[], const double atom_z[], const double prim_exp[], const double prim_coe[], const int *pnat, const int atomic_number[], double S[], double H[] ) { int ij, iao2, ijao; int ips0, ics, ics0, ics1, iat, iao, iao0, iao1; int jps0, jcs, jcs0, jcs1, jat, jao, jao0, jao1, jcs_max; int nps_i, nps_j; double A[3], B[3]; double *ovixx, *hcorexx; // int nworkers=*pnworkers, workerid=*pworkerid; int La=*pLa, Lb=*pLb, nat=*pnat, dum1, dum2; // int na, nb, mythread; mythread = omp_get_thread_num(); ovixx = OVI_MASTER[mythread]; hcorexx = HCORE_MASTER[mythread]; ofmo_oneint_gen_make_add( mythread, La, Lb, &dum1, &dum2 ); na = NNAO[La]; nb = NNAO[Lb]; ics0 = leading_cs[La]; jcs0 = leading_cs[Lb]; ics1 = leading_cs[La+1]; jcs1 = leading_cs[Lb+1]; for ( ics=ics0+workerid; ics<ics1; ics+=nworkers ) { jcs_max = ( La==Lb ? ics+1 : jcs1 ); ips0 = shel_add[ics]; iat = shel_atm[ics]; iao0 = shel_ini[ics]; nps_i = shel_tem[ics]; A[0]=atom_x[ iat ]; A[1]=atom_y[ iat ]; A[2]=atom_z[ iat ]; for ( jcs=jcs0; jcs<jcs_max; jcs++ ) { jps0 = shel_add[jcs]; jat = shel_atm[jcs]; jao0 = shel_ini[jcs]; nps_j = shel_tem[jcs]; B[0]=atom_x[ jat ]; B[1]=atom_y[ jat ]; B[2]=atom_z[ jat ]; oneint_core_xx( mythread, &La, &Lb, &ips0, &nps_i, A, &jps0, &nps_j, B, prim_exp, prim_coe, &nat, atom_x, atom_y, atom_z, atomic_number, ovixx, hcorexx ); iao1 = iao0 + na; jao1 = jao0 + nb; ij = -1; for ( iao=iao0; iao<iao1; iao++ ) { iao2 = (iao*iao+iao)>>1; for ( jao=jao0; jao<jao1; jao++ ) { ij++; if ( jao>iao ) continue; ijao = iao2 + jao; S[ijao] = ovixx[ij]; H[ijao] = hcorexx[ij]; } } } // for (jcs) } // for (ics) return 0; }
openmp-ex14b.c
#include <stdio.h> #include <omp.h> int main(void) { int N = 10; int i; /* It is easier to see the way that schedulers work if we are able to print * out the iterations in order. To do that, we need to add an `ordered` * clause to the `for` directive, followed by an `ordered` directive * specifying the region that should be executed in order. */ #pragma omp parallel for schedule(runtime) ordered for (i = 0; i < N; i++) { int my_thread = omp_get_thread_num(); #pragma omp ordered { printf("iteration %d, thread %d\n", i, my_thread); } } return 0; }
draw.c
#include <xcb/xcb.h> #include <xcb/xinput.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <math.h> #include "image.h" #include <tiffio.h> int min(int x, int y) { return x <= y ? x : y; } int max(int x, int y) { return x >= y ? x : y; } typedef struct FloatingDrawing FloatingDrawing; typedef struct FloatingLayer FloatingLayer; typedef struct Brush Brush; typedef struct rect rect; struct rect { int x, y; int width, height; }; struct FloatingLayer { image_t *image; FloatingLayer *next; double alpha; }; struct Brush { double radius; double hardness; double density; double smudge; int is_drawing; int is_erasing; int is_picking; int is_smudging; color color; color medium_color; Brush *next; }; struct FloatingDrawing { uint32_t *image; double x, y; int is_drawing; FloatingLayer *bottom; FloatingLayer *current; Brush *stored_brushes; /* List of all brushes. */ Brush *active_brushes; /* List of active ones. */ color color; color medium_color; char *filename; }; static double blend (double t, double x, double y) { return (1.0 - t) * x + t * y; } static FloatingLayer * floating_layer_new (int width, int height) { FloatingLayer *layer = malloc (sizeof (FloatingLayer)); layer->alpha = 1.0; layer->image = image_new (width, height); layer->next = NULL; return layer; } static void floating_layer_del (FloatingLayer *layer) { image_del (layer->image); free (layer); } static void add_top_layer (FloatingDrawing *drawing, int width, int height) { FloatingLayer *current = drawing->current; if (current != NULL) { current->next = floating_layer_new (current->image->width, current->image->height); drawing->current = current->next; } else { drawing->current = floating_layer_new (width, height); drawing->bottom = drawing->current; } } static void del_top_layer (FloatingDrawing *drawing) { FloatingLayer *current = drawing->current; if (current != NULL) { FloatingLayer *down = drawing->bottom; while (down->next != NULL && down->next != current) { down = down->next; } down->next = NULL; drawing->current = down; floating_layer_del (current); if (current == drawing->bottom) { drawing->bottom = NULL; drawing->current = NULL; } } } void update(FloatingDrawing *drawing, rect invalid_area, int image_width, int image_height, uint32_t *image, xcb_connection_t *connection, xcb_window_t window, xcb_gcontext_t draw, xcb_pixmap_t pixmap, uint8_t background) { /* Draw to screen. */ if (invalid_area.x < image_width && invalid_area.y < image_height && invalid_area.width && invalid_area.height) { const int width = invalid_area.width; const int height = invalid_area.height; const int x = invalid_area.x; const int y = invalid_area.y; image_t *scratch = image_new(width, height); FloatingLayer *current = drawing->bottom; while (current != NULL) { int i; #pragma omp parallel for for (i = 0; i < height; ++i) { int j; #pragma omp parallel for for (j = 0; j < width; ++j) { unsigned int scratch_index = i * width + j; unsigned int current_index = (y + i) * current->image->width + x + j; if (x + j >= 0 && x + j < image_width && y + i < image_height && y + i >= 0) { color final_color = scratch->data[scratch_index]; color current_color = current->image->data[current_index]; if (current != drawing->bottom) { if (current->alpha > 0 && current_color.alpha > 0) { const float alpha = final_color.alpha; const float current_alpha = current_color.alpha; const float inv_alpha = 1 / (alpha * (1 - current_alpha) + current->alpha * current_alpha); color_multiply_single_struct (final_color.alpha, final_color.vector, &final_color); color_multiply_single_struct (current->alpha, current_color.vector, &current_color); color_blend_single_struct (current_alpha, final_color.vector, current_color.vector, &final_color); color_multiply_single_struct (inv_alpha, final_color.vector, &final_color); final_color.alpha = fmin (1.0, alpha + current->alpha * current_alpha); } } else { final_color = current_color; final_color.alpha = current->alpha * final_color.alpha; } scratch->data[scratch_index] = final_color; } } } current = current->next; } uint8_t *tmp_data = malloc(sizeof(uint32_t) * width * height); unsigned char *surface_data = (void *) image; int i; #pragma omp parallel for for (i = 0; i < height; ++i) { int j; #pragma omp parallel for for (j = 0; j < width; ++j) { int scratch_index = i * width + j; int surface_index = 4*((y + i) * image_width + x + j); if (x + j >= 0 && x + j < image_width && y + i < image_height && y + i >= 0) { const double f = scratch->data[scratch_index].alpha; surface_data[surface_index + 0] = scratch->data[scratch_index].red * 255; surface_data[surface_index + 1] = scratch->data[scratch_index].green * 255; surface_data[surface_index + 2] = scratch->data[scratch_index].blue * 255; surface_data[surface_index + 3] = scratch->data[scratch_index].alpha * 255; tmp_data[4*scratch_index + 0] = blend(f, background, surface_data[surface_index + 2]); tmp_data[4*scratch_index + 1] = blend(f, background, surface_data[surface_index + 1]); tmp_data[4*scratch_index + 2] = blend(f, background, surface_data[surface_index + 0]); tmp_data[4*scratch_index + 3] = blend(f, background, surface_data[surface_index + 3]); } } } image_del(scratch); xcb_put_image(connection, XCB_IMAGE_FORMAT_Z_PIXMAP, pixmap, draw, width, height, x, y, 0, 24, (4 * width * height), (void *) tmp_data); xcb_copy_area(connection, pixmap, window, draw, x, y, x, y, width, height); xcb_flush(connection); free(tmp_data); } } #define BRUSH_SIZE_MAX 64 #define BRUSH_SIZE_DEFAULT 20 #define BACKGROUND 0x40 const color Red = {{1.0, 0.0, 0.0, 1.0}}; const color Green = {{0.0, 1.0, 0.0, 1.0}}; const color Blue = {{0.0, 0.0, 1.0, 1.0}}; const color White = {{1.0, 1.0, 1.0, 1.0}}; const color Black = {{0.0, 0.0, 0.0, 1.0}}; int main(int argc, char **args) { const color *Colors[] ={ &Red, &Green, &Blue, &White, &Black }; const int colors = 5; /*length of Colors*/ int image_width = 400; int image_height = 400; char *image_file_name = 0; if (argc > 3) { image_width = atoi(args[1]); image_height = atoi(args[2]); image_file_name = args[3]; } FloatingDrawing drawing_obj; Brush default_brush; color default_color = {{1, 0.1, 0.25, 0.8}}; color default_medium_color = {{0.9, 0.9, 0.75, 0.0}}; default_brush.is_drawing = 0; default_brush.is_picking = 0; default_brush.is_erasing = 0; default_brush.is_smudging = 0; default_brush.color = default_color; default_brush.medium_color = default_medium_color; default_brush.radius = BRUSH_SIZE_DEFAULT; default_brush.hardness = 0.4; default_brush.density = 2.5; default_brush.smudge = 0.5; default_brush.next = NULL; drawing_obj.image = NULL; drawing_obj.is_drawing = 0; drawing_obj.bottom = floating_layer_new (image_width, image_height); drawing_obj.current = drawing_obj.bottom; drawing_obj.stored_brushes = &default_brush; drawing_obj.active_brushes = &default_brush; drawing_obj.filename = image_file_name; drawing_obj.color = default_color; drawing_obj.medium_color = default_medium_color; FloatingDrawing *drawing = &drawing_obj; const char vendor_name[] = "Wacom", stylus[] = "Pen stylus", finger[] = "Finger"; uint8_t graphics_tablet_stylus_device_id = 0; /* Will find what the correct id is later. */ const int graphics_tablet_stylus_pressure_axis_number = 3; double graphics_tablet_stylus_pressure_axis_min = 0; double graphics_tablet_stylus_pressure_axis_max = 1; uint32_t graphics_tablet_stylus_pressure_axis_resolution = 1; const int graphics_tablet_stylus_x_axis_number = 1; double graphics_tablet_stylus_x_axis_min = 0; double graphics_tablet_stylus_x_axis_max = 1; uint32_t graphics_tablet_stylus_x_axis_resolution = 1; const int graphics_tablet_stylus_y_axis_number = 2; double graphics_tablet_stylus_y_axis_min = 0; double graphics_tablet_stylus_y_axis_max = 1; uint32_t graphics_tablet_stylus_y_axis_resolution = 1; xcb_connection_t *connection = xcb_connect(NULL, NULL); xcb_screen_t *screen = xcb_setup_roots_iterator(xcb_get_setup(connection)).data; xcb_drawable_t window; xcb_gcontext_t draw = xcb_generate_id(connection); uint32_t mask; uint32_t values[2]; window = xcb_generate_id(connection); mask = XCB_CW_BACK_PIXEL | XCB_CW_EVENT_MASK; values[0] = 0x808080; values[1] = XCB_EVENT_MASK_EXPOSURE |\ XCB_EVENT_MASK_PROPERTY_CHANGE |\ XCB_EVENT_MASK_STRUCTURE_NOTIFY |\ XCB_EVENT_MASK_SUBSTRUCTURE_NOTIFY |\ XCB_EVENT_MASK_POINTER_MOTION |\ XCB_EVENT_MASK_BUTTON_PRESS |\ XCB_EVENT_MASK_BUTTON_RELEASE |\ XCB_EVENT_MASK_KEY_PRESS; xcb_create_window(connection, XCB_COPY_FROM_PARENT, window, screen->root, 0, 0, image_width, image_height, 0, XCB_WINDOW_CLASS_INPUT_OUTPUT, screen->root_visual, mask, values); xcb_map_window(connection, window); uint32_t *image = malloc(sizeof(uint32_t) * image_width * image_height); memset((void *) image, BACKGROUND, sizeof(uint32_t) * image_width * image_height); int i; for (i = 0; i < image_width * image_height; ++i) { *((unsigned char *)(image + i) + 3) = 0x00; } drawing->image = image; xcb_pixmap_t pixmap = xcb_generate_id(connection); xcb_create_pixmap(connection, 24, pixmap, window, image_width, image_height); mask = XCB_GC_GRAPHICS_EXPOSURES; values[0] = 0; values[1] = 0; xcb_create_gc(connection, draw, window, mask, values); printf("Screen depth: %d\n", screen->root_depth); xcb_put_image(connection, XCB_IMAGE_FORMAT_Z_PIXMAP, pixmap, draw, image_width, image_height, 0, 0, 0, 24, (4 * image_width * image_height), (void *) image); xcb_flush(connection); const double divider = (double) (1ull << 32); xcb_input_xi_query_device_reply_t *devices_reply; xcb_input_xi_query_device_cookie_t devices_cookie; devices_cookie = xcb_input_xi_query_device(connection, XCB_INPUT_DEVICE_ALL); devices_reply = xcb_input_xi_query_device_reply(connection, devices_cookie, NULL); xcb_input_xi_device_info_iterator_t devices_iterator = \ xcb_input_xi_query_device_infos_iterator(devices_reply); for (; devices_iterator.rem; xcb_input_xi_device_info_next(&devices_iterator)) { xcb_input_xi_device_info_t *info = devices_iterator.data; char *name = xcb_input_xi_device_info_name(info); printf("%d %d %s %d %d\n", info->deviceid, info->type, name, info->attachment, info->num_classes); if (strstr(name, vendor_name) && strstr(name, stylus)) { graphics_tablet_stylus_device_id = info->deviceid; printf("Found %s graphics tablet stylus with device id = %d\n", vendor_name, graphics_tablet_stylus_device_id); xcb_input_device_class_iterator_t class_iterator = \ xcb_input_xi_device_info_classes_iterator(info); for (; class_iterator.rem; xcb_input_device_class_next(&class_iterator)) { xcb_input_device_class_t * class = class_iterator.data; if (class->type == XCB_INPUT_DEVICE_CLASS_TYPE_VALUATOR) { xcb_input_valuator_class_t *class_data = (void *) class; if (class_data) { { double min, max, range; uint32_t res = class_data->resolution; min = (((double) class_data->min.integral) + ((double) class_data->min.frac) / divider); max = (((double) class_data->max.integral) + ((double) class_data->max.frac) / divider); range = max - min; printf("%d res = %d min = %f max = %f range = %f\n", class_data->number, res, min, max, range); } if (class_data->number + 1 == graphics_tablet_stylus_pressure_axis_number) { graphics_tablet_stylus_pressure_axis_resolution = class_data->resolution; graphics_tablet_stylus_pressure_axis_min = (((double) class_data->min.integral) + ((double) class_data->min.frac) / divider); graphics_tablet_stylus_pressure_axis_max = (((double) class_data->max.integral) + ((double) class_data->max.frac) / divider); printf("Pressure axis number %d with min = %f, max = %f, and resolution = %d, range = %f\n", graphics_tablet_stylus_pressure_axis_number, graphics_tablet_stylus_pressure_axis_min, graphics_tablet_stylus_pressure_axis_max, graphics_tablet_stylus_pressure_axis_resolution, graphics_tablet_stylus_pressure_axis_max - graphics_tablet_stylus_pressure_axis_min); } if (class_data->number + 1 == graphics_tablet_stylus_x_axis_number) { graphics_tablet_stylus_x_axis_resolution = class_data->resolution; graphics_tablet_stylus_x_axis_min = (((double) class_data->min.integral) + ((double) class_data->min.frac) / divider); graphics_tablet_stylus_x_axis_max = (((double) class_data->max.integral) + ((double) class_data->max.frac) / divider); printf("X axis number %d with min = %f, max = %f, and resolution = %d, range = %f\n", graphics_tablet_stylus_x_axis_number, graphics_tablet_stylus_x_axis_min, graphics_tablet_stylus_x_axis_max, graphics_tablet_stylus_x_axis_resolution, graphics_tablet_stylus_x_axis_max - graphics_tablet_stylus_x_axis_min); } if (class_data->number + 1 == graphics_tablet_stylus_y_axis_number) { graphics_tablet_stylus_y_axis_resolution = class_data->resolution; graphics_tablet_stylus_y_axis_min = (((double) class_data->min.integral) + ((double) class_data->min.frac) / divider); graphics_tablet_stylus_y_axis_max = (((double) class_data->max.integral) + ((double) class_data->max.frac) / divider); printf("Y axis number %d with min = %f, max = %f, and resolution = %d, range = %f\n", graphics_tablet_stylus_y_axis_number, graphics_tablet_stylus_y_axis_min, graphics_tablet_stylus_y_axis_max, graphics_tablet_stylus_y_axis_resolution, graphics_tablet_stylus_y_axis_max - graphics_tablet_stylus_y_axis_min); } } } } } } struct { xcb_input_event_mask_t iem; xcb_input_xi_event_mask_t xiem; } se_mask; se_mask.iem.deviceid = XCB_INPUT_DEVICE_ALL; se_mask.iem.mask_len = 1; se_mask.xiem = XCB_INPUT_XI_EVENT_MASK_MOTION | XCB_INPUT_XI_EVENT_MASK_BUTTON_PRESS | XCB_INPUT_XI_EVENT_MASK_BUTTON_RELEASE; xcb_input_xi_select_events(connection, window, 1, &se_mask.iem); xcb_flush(connection); if (graphics_tablet_stylus_device_id) { xcb_input_xi_passive_grab_device_cookie_t cookie; xcb_input_xi_passive_grab_device_reply_t *reply; const uint32_t mask[] = { XCB_EVENT_MASK_POINTER_MOTION | XCB_EVENT_MASK_BUTTON_PRESS | XCB_EVENT_MASK_BUTTON_RELEASE }; const uint32_t mods[] = { XCB_INPUT_MODIFIER_MASK_ANY }; cookie = xcb_input_xi_passive_grab_device(connection, XCB_CURRENT_TIME, screen->root, XCB_CURSOR_NONE, 0, graphics_tablet_stylus_device_id, 1, 1, XCB_INPUT_GRAB_TYPE_FOCUS_IN, XCB_INPUT_GRAB_MODE_22_ASYNC, XCB_INPUT_GRAB_MODE_22_ASYNC, XCB_INPUT_GRAB_OWNER_NO_OWNER, mask, mods); reply = xcb_input_xi_passive_grab_device_reply(connection, cookie, NULL); free(reply); struct { xcb_input_event_mask_t iem; xcb_input_xi_event_mask_t xiem; } se_mask; se_mask.iem.deviceid = graphics_tablet_stylus_device_id; se_mask.iem.mask_len = 1; se_mask.xiem = XCB_INPUT_XI_EVENT_MASK_MOTION | XCB_INPUT_XI_EVENT_MASK_RAW_MOTION | XCB_INPUT_XI_EVENT_MASK_BUTTON_PRESS | XCB_INPUT_XI_EVENT_MASK_BUTTON_RELEASE | XCB_INPUT_XI_EVENT_MASK_RAW_BUTTON_PRESS | XCB_INPUT_XI_EVENT_MASK_RAW_BUTTON_RELEASE ; xcb_input_xi_select_events(connection, screen->root, 1, &se_mask.iem); xcb_flush(connection); } uint16_t root_width, root_height; root_width = screen->width_in_pixels; root_height = screen->height_in_pixels; xcb_generic_event_t *event; uint16_t win_original_conf_x = 0, win_original_conf_y = 0; uint16_t win_pos_x = 0, win_pos_y = 0; float pressure = 0.0f; int colors_index = -1; while ((event = xcb_wait_for_event(connection))) { switch (event->response_type & ~0x80) { case XCB_CONFIGURE_NOTIFY: { xcb_configure_notify_event_t *nt_event = (void *) event; if (!win_original_conf_x) { win_original_conf_x = nt_event->x; } if (!win_original_conf_y) { win_original_conf_y = nt_event->y; } if (win_original_conf_x != nt_event->x) { win_pos_x = nt_event->x; } if (win_original_conf_y != nt_event->y) { win_pos_y = nt_event->y; } break; } case XCB_GE_GENERIC: { const double prev_x = drawing->x; const double prev_y = drawing->y; xcb_ge_generic_event_t * ge_event = (void *) event; switch (ge_event->event_type) { case XCB_INPUT_RAW_BUTTON_PRESS: { xcb_input_raw_button_press_event_t * rbt_event = (void *) event; if (rbt_event->deviceid > 3 && rbt_event->deviceid == graphics_tablet_stylus_device_id) { if (rbt_event->detail == 1) { drawing->is_drawing = 1; pressure = 1.0; int axis_len = xcb_input_raw_button_press_axisvalues_length(rbt_event); if (axis_len) { xcb_input_fp3232_t *axisvalues = xcb_input_raw_button_press_axisvalues_raw(rbt_event); int i = 0; for (; i < axis_len; ++i) { xcb_input_fp3232_t value = axisvalues[i]; double dbl_value = ((double) value.integral + (double) value.frac / divider); if (i == graphics_tablet_stylus_x_axis_number - 1) { long pos_x = root_width * dbl_value / (graphics_tablet_stylus_x_axis_max - graphics_tablet_stylus_x_axis_min); drawing->x = pos_x - win_pos_x; } if (i == graphics_tablet_stylus_y_axis_number - 1) { long pos_y = root_height * dbl_value / (graphics_tablet_stylus_y_axis_max - graphics_tablet_stylus_y_axis_min); drawing->y = pos_y - win_pos_y; } if (i == graphics_tablet_stylus_pressure_axis_number - 1) { pressure = pressure * (float) (dbl_value / graphics_tablet_stylus_pressure_axis_max); } } } } } break; } case XCB_INPUT_RAW_BUTTON_RELEASE: { xcb_input_raw_button_release_event_t * rbt_event = (void *) event; if (rbt_event->deviceid > 3 && rbt_event->deviceid == graphics_tablet_stylus_device_id) { if (rbt_event->detail == 1) { drawing->is_drawing = 0; pressure = 0.0; } } break; } case XCB_INPUT_RAW_MOTION: { xcb_input_raw_motion_event_t *rmt_event = (void *) event; if (rmt_event->deviceid > 3 && rmt_event->deviceid == graphics_tablet_stylus_device_id) { int axis_len = xcb_input_raw_button_press_axisvalues_length(rmt_event); if (axis_len) { xcb_input_fp3232_t *axisvalues = xcb_input_raw_button_press_axisvalues_raw(rmt_event); int i = 0; for (; i < axis_len; ++i) { xcb_input_fp3232_t value = axisvalues[i]; double dbl_value = ((double) value.integral + (double) value.frac / divider); if (i == graphics_tablet_stylus_x_axis_number - 1) { long pos_x = root_width * dbl_value / (graphics_tablet_stylus_x_axis_max - graphics_tablet_stylus_x_axis_min); drawing->x = pos_x - win_pos_x; } if (i == graphics_tablet_stylus_y_axis_number - 1) { long pos_y = root_height * dbl_value / (graphics_tablet_stylus_y_axis_max - graphics_tablet_stylus_y_axis_min); drawing->y = pos_y - win_pos_y; } if (i == graphics_tablet_stylus_pressure_axis_number - 1) { pressure = (float) (dbl_value / graphics_tablet_stylus_pressure_axis_max); } } } } break; } case XCB_INPUT_BUTTON_PRESS: { xcb_input_button_press_event_t *bt_event = (void *) event; if (bt_event->deviceid != graphics_tablet_stylus_device_id) { uint32_t *button_mask = xcb_input_button_press_button_mask(bt_event); int button_len = xcb_input_button_press_button_mask_length(bt_event); if (bt_event->deviceid > 3 && button_len && bt_event->detail == 1 && !button_mask[0]) { drawing->is_drawing = 1; pressure = 1.0; win_pos_x = (bt_event->root_x >> 16) - (bt_event->event_x >> 16); win_pos_y = (bt_event->root_y >> 16) - (bt_event->event_y >> 16); drawing->x = (bt_event->event_x >> 16); drawing->y = (bt_event->event_y >> 16); } } break; } case XCB_INPUT_BUTTON_RELEASE: { xcb_input_button_release_event_t * bt_event = (void *) event; if (bt_event->deviceid != graphics_tablet_stylus_device_id) { uint32_t *button_mask = xcb_input_button_press_button_mask(bt_event); int button_len = xcb_input_button_press_button_mask_length(bt_event); if (bt_event->deviceid > 3 && button_len && bt_event->detail == 1 && button_mask[0]) { win_pos_x = (bt_event->root_x >> 16) - (bt_event->event_x >> 16); win_pos_y = (bt_event->root_y >> 16) - (bt_event->event_y >> 16); drawing->is_drawing = 0; pressure = 0.0; } } break; } case XCB_INPUT_MOTION: { xcb_input_motion_event_t *mt_event = (void *) event; if (mt_event->deviceid > 3 && mt_event->deviceid != graphics_tablet_stylus_device_id) { drawing->x = (mt_event->root_x >> 16) - win_pos_x; drawing->y = (mt_event->root_y >> 16) - win_pos_y; } break; } default: break; } if (!drawing->is_drawing || drawing->current == NULL || drawing->current->image == NULL) { break; } if (pressure <= 0.0) { break; } /* Draw to image buffer. */ const int width = drawing->current->image->width; const int height = drawing->current->image->height; color *pix = drawing->current->image->data; Brush *brush = drawing->active_brushes; rect invalid_area; invalid_area.x = image_width; invalid_area.y = image_height; invalid_area.width = 0; invalid_area.height = 0; while (brush != NULL) { double brush_density = brush->density; double brush_radius = brush->radius * pressure; double brush_hardness = brush->hardness; double brush_alpha = 1.0; double brush_smudge = brush->smudge * pressure; if (brush->is_drawing && brush_density > 0 && brush_radius > 0 && brush_hardness > 0) { double t; for (t = 0.0; t < 1.0 + brush_density; t += brush_density) { const double x = t * drawing->x + (1 - t) * prev_x; const double y = t * drawing->y + (1 - t) * prev_y; const int xi = x, yi = y; const double brush_radius_sq = brush_radius * brush_radius; unsigned int total_pixels = 0; color total_color = {{0, 0, 0, 0}}; /* Draw circular brush mark. */ const int brush_bounding_size = 2 * ceil(brush_radius) + 1; int i; #pragma omp parallel for for (i = xi - ceil(brush_radius); i <= xi + brush_bounding_size; ++i) { int j; #pragma omp parallel for for (j = yi - ceil(brush_radius); j <= yi + brush_bounding_size; ++j) { double blend_factor = 0.0; double alpha = 1.0; double distance_sq = (i - x) * (i - x) + (j - y) * (j - y); if (i >= 0 && j >= 0 && i < width && j < height && distance_sq <= brush_radius_sq) { unsigned int index = (j * width + i); color final_color = pix[index]; color brush_color; color_blend_single_struct (brush->medium_color.alpha, brush->color.vector, brush->medium_color.vector, &brush_color); if (distance_sq / brush_radius_sq >= brush_hardness) { alpha = brush_hardness * brush_hardness * brush_radius_sq / distance_sq; } alpha *= brush_alpha; if (brush->is_erasing) { if (final_color.alpha > 0) { blend_factor = alpha * brush_color.alpha; if (final_color.alpha > 0) { final_color.alpha = blend (blend_factor, final_color.alpha, 0); } } } else { if (final_color.alpha > 0) { blend_factor = alpha * brush_color.alpha; color_blend_single_struct (blend_factor, final_color.vector, brush_color.vector, &final_color); } else { final_color = brush_color; final_color.alpha = alpha * final_color.alpha; } #pragma omp critical { color_add_struct (total_color.vector, final_color.vector, &total_color); total_pixels += 1; } } if (!brush->is_picking) { pix[index].red = final_color.red; pix[index].green = final_color.green; pix[index].blue = final_color.blue; pix[index].alpha = final_color.alpha; } } } } if (total_pixels > 0) { total_color.red /= total_pixels; total_color.green /= total_pixels; total_color.blue /= total_pixels; total_color.alpha /= total_pixels; if (brush->is_smudging) { color_blend_single_struct (brush_smudge, brush->color.vector, total_color.vector, &brush->color); } if (brush->is_picking) { drawing->color = total_color; brush->color = drawing->color; } } int invalid_area_x = xi - brush_radius; int invalid_area_y = yi - brush_radius; while (width - invalid_area_x < brush_bounding_size) { --invalid_area_x; } while (height - invalid_area_y < brush_bounding_size) { --invalid_area_y; } int invalid_area_width = brush_bounding_size; int invalid_area_height = brush_bounding_size; invalid_area.x = min(invalid_area.x, invalid_area_x); invalid_area.y = min(invalid_area.y, invalid_area_y); invalid_area.width = max(invalid_area.width, invalid_area_width); invalid_area.height = max(invalid_area.height, invalid_area_height); } } brush = brush->next; } /* Draw to screen and update image file buffer. */ update(drawing, invalid_area, image_width, image_height, image, connection, window, draw, pixmap, BACKGROUND); break; } case XCB_EXPOSE: { xcb_copy_area(connection, pixmap, window, draw, 0, 0, 0, 0, image_width, image_height); xcb_flush(connection); break; } case XCB_KEY_PRESS: { xcb_key_press_event_t *key_event = (void *) event; printf("Keycode: %d, %d\n", key_event->detail, key_event->state); switch (key_event->detail) { case 31: { /*key: i; increase brush size*/ if (key_event->state & XCB_MOD_MASK_SHIFT) { /*key: shift-i; decrease brush size*/ Brush *brush = drawing->active_brushes; while (brush != NULL) { brush->radius -= 1; if (brush->radius < 0) { brush->radius = 0; } brush = brush->next; } /*printf("Maybe decreased brush size to %d\n", brush->size);*/ } else { Brush *brush = drawing->active_brushes; while (brush != NULL) { brush->radius += 1; if (brush->radius > BRUSH_SIZE_MAX) { brush->radius = BRUSH_SIZE_MAX; } brush = brush->next; } /*printf("Maybe increased brush size to %d\n", brush->size);*/ } break; } case 32: { /*key: o; increase brush alpha*/ if (key_event->state & XCB_MOD_MASK_SHIFT) { /*key: shift-o; decrease brush alpha*/ Brush *brush = drawing->active_brushes; while (brush != NULL) { brush->color.alpha -= 0.01; if (brush->color.alpha < 0) { brush->color.alpha = 0; } brush = brush->next; } /*printf("Maybe decreased brush alpha to %d\n", brush->color.alpha);*/ } else { Brush *brush = drawing->active_brushes; while (brush != NULL) { brush->color.alpha += 0.01; if (brush->color.alpha > 1) { brush->color.alpha = 1; } brush = brush->next; } /*printf("Maybe increased brush alpha to %d\n", brush->color.alpha);*/ } break; } case 39: { /*key: s; maybe save image file*/ if (key_event->state & XCB_MOD_MASK_SHIFT) { /*shift-s saves image data to file*/ if (image_file_name) { TIFF *tif = TIFFOpen(image_file_name, "w"); if (tif) { TIFFSetField(tif, TIFFTAG_IMAGEWIDTH, image_width); TIFFSetField(tif, TIFFTAG_IMAGELENGTH, image_height); TIFFSetField(tif, TIFFTAG_SAMPLEFORMAT, SAMPLEFORMAT_UINT); TIFFSetField(tif, TIFFTAG_SAMPLESPERPIXEL, 4); TIFFSetField(tif, TIFFTAG_BITSPERSAMPLE, 8); TIFFSetField(tif, TIFFTAG_PLANARCONFIG, PLANARCONFIG_CONTIG); TIFFSetField(tif, TIFFTAG_PHOTOMETRIC, PHOTOMETRIC_RGB); TIFFSetField(tif, TIFFTAG_TILEWIDTH, image_width); TIFFSetField(tif, TIFFTAG_TILELENGTH, image_height); TIFFWriteTile(tif, image, 0, 0, 0, 0); TIFFClose(tif); printf("Saved image to file %s\n", image_file_name); } } } else { /*key: s; toggle smudge on / off*/ Brush *brush = drawing->active_brushes; while (brush != NULL) { brush->is_smudging = brush->is_smudging ? 0 : 1; if (!brush->is_smudging) { brush->color = drawing->color; } brush = brush->next; } } break; } case 56: { /*key: b; toggle paint on / off*/ drawing->is_drawing = drawing->is_drawing ? 0 : 1; Brush *brush = drawing->active_brushes; while (brush != NULL) { brush->is_drawing = brush->is_drawing ? 0 : 1; brush = brush->next; } break; } case 26: { /*key: e; toggle erase on / off*/ Brush *brush = drawing->active_brushes; while (brush != NULL) { brush->is_erasing = brush->is_erasing ? 0 : 1; brush = brush->next; } break; } case 33: { /*key: p; toggle pick on / off*/ Brush *brush = drawing->active_brushes; while (brush != NULL) { brush->is_picking = brush->is_picking ? 0 : 1; brush = brush->next; } break; } case 46: { /*key: l; layer management*/ if (key_event->state & XCB_MOD_MASK_SHIFT) { /*shift-l deletes topmost layer*/ del_top_layer(drawing); if (drawing->current == NULL) { memset((void *) image, 0x0, sizeof(uint32_t) * image_width * image_height); } rect invalid_area = { 0, 0, image_width, image_height }; update(drawing, invalid_area, image_width, image_height, image, connection, window, draw, pixmap, BACKGROUND); } else { add_top_layer(drawing, image_width, image_height); } break; } case 10: { /*key: 1; color number 1*/ colors_index = 0; drawing->color = *Colors[colors_index]; Brush *brush = drawing->active_brushes; while (brush != NULL) { brush->color = *Colors[colors_index]; brush = brush->next; } break; } case 11: { /*key: 2; color number 2*/ colors_index = 1; drawing->color = *Colors[colors_index]; Brush *brush = drawing->active_brushes; while (brush != NULL) { brush->color = *Colors[colors_index]; brush = brush->next; } break; } case 12: { /*key: 3; color number 3*/ colors_index = 2; drawing->color = *Colors[colors_index]; Brush *brush = drawing->active_brushes; while (brush != NULL) { brush->color = *Colors[colors_index]; brush = brush->next; } break; } case 13: { /*key: 4; color number 4*/ colors_index = 3; drawing->color = *Colors[colors_index]; Brush *brush = drawing->active_brushes; while (brush != NULL) { brush->color = *Colors[colors_index]; brush = brush->next; } break; } case 14: { /*key: 5; color number 5*/ colors_index = 4; drawing->color = *Colors[colors_index]; Brush *brush = drawing->active_brushes; while (brush != NULL) { brush->color = *Colors[colors_index]; brush = brush->next; } break; } case 54: { /*key: c; next color*/ if (colors_index >= 0) { if (key_event->state & XCB_MOD_MASK_SHIFT) { /*shift-c previous color*/ colors_index -= 1; if (colors_index < 0) { colors_index = colors - 1; } } else { colors_index += 1; if (colors_index >= colors) { colors_index = 0; } } drawing->color = *Colors[colors_index]; Brush *brush = drawing->active_brushes; while (brush != NULL) { brush->color = *Colors[colors_index]; brush = brush->next; } } break; } default: break; } } default: break; } free(event); } free(devices_reply); xcb_free_pixmap(connection, pixmap); xcb_disconnect(connection); free(image); while (drawing->bottom != NULL) { FloatingLayer *current = drawing->bottom; drawing->bottom = drawing->bottom->next; floating_layer_del(current); } return 0; }
tanh.c
#include <cdnn/activations.h> #include <cdnn/model.h> extern __Model__ * m; dARRAY * forward_pass_tanh(){ dARRAY * tanh_out = (dARRAY*)malloc(sizeof(dARRAY)); tanh_out->matrix = (float*)calloc(m->current_layer->DENSE->cache->shape[0]*m->current_layer->DENSE->cache->shape[1],sizeof(float)); omp_set_num_threads(8); #pragma omp parallel for num_threads(8) shared(m,tanh_out) schedule(static) for(int i=0;i<m->current_layer->DENSE->cache->shape[0]*m->current_layer->DENSE->cache->shape[1];i++){ //Computing the tanh function float exp_res1 = exp(m->current_layer->DENSE->cache->matrix[i]); float exp_res2 = exp(-1*m->current_layer->DENSE->cache->matrix[i]); tanh_out->matrix[i] = (exp_res1 - exp_res2)/(exp_res1 + exp_res2); } tanh_out->shape[0] = m->current_layer->DENSE->cache->shape[0]; tanh_out->shape[1] = m->current_layer->DENSE->cache->shape[1]; return tanh_out; } dARRAY * backward_pass_tanh(){ dARRAY * tanh_out = (dARRAY*)malloc(sizeof(dARRAY)); tanh_out->matrix = (float*)calloc(m->current_layer->DENSE->A->shape[0]*m->current_layer->DENSE->A->shape[1],sizeof(float)); omp_set_num_threads(8); #pragma omp parallel for num_threads(8) shared(m,tanh_out) schedule(static) for(int i=0;i<m->current_layer->DENSE->A->shape[0]*m->current_layer->DENSE->A->shape[1];i++){ //Computing the tanh function float exp_res1 = exp(m->current_layer->DENSE->A->matrix[i]); float exp_res2 = exp(-1*m->current_layer->DENSE->A->matrix[i]); float temp = (exp_res1 - exp_res2)/(exp_res1 + exp_res2); //gradient of tanh is g'(z) = 1 - (tanh(z))^2 tanh_out->matrix[i] = 1-pow(temp,2.0f); } tanh_out->shape[0] = m->current_layer->DENSE->A->shape[0]; tanh_out->shape[1] = m->current_layer->DENSE->A->shape[1]; return tanh_out; } Tanh * Tanh__init__(dARRAY * linear_matrix){ Tanh * tanh = (Tanh*)malloc(sizeof(Tanh)); tanh->forward = forward_pass_tanh; tanh->backward = backward_pass_tanh; tanh->in_dims[0] = tanh->out_dims[0] = linear_matrix->shape[0]; tanh->in_dims[1] = tanh->out_dims[1] = linear_matrix->shape[1]; return tanh; } dARRAY * (TanH)(Tanh_args args){ Tanh * t = Tanh__init__(args.input); if(!args.status) return t->forward(); else return t->backward(); free(t); t=NULL; }
resource_manager.h
// ----------------------------------------------------------------------------- // // Copyright (C) The BioDynaMo Project. // All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // // See the LICENSE file distributed with this work for details. // See the NOTICE file distributed with this work for additional information // regarding copyright ownership. // // ----------------------------------------------------------------------------- #ifndef CORE_RESOURCE_MANAGER_H_ #define CORE_RESOURCE_MANAGER_H_ #include <omp.h> #include <sched.h> #include <algorithm> #include <cmath> #include <limits> #include <memory> #include <ostream> #include <set> #include <string> #include <unordered_map> #include <utility> #include <vector> #if defined(USE_OPENCL) && !defined(__ROOTCLING__) #ifdef __APPLE__ #define CL_HPP_ENABLE_EXCEPTIONS #define CL_HPP_ENABLE_PROGRAM_CONSTRUCTION_FROM_ARRAY_COMPATIBILITY #define CL_HPP_MINIMUM_OPENCL_VERSION 120 #define CL_HPP_TARGET_OPENCL_VERSION 120 #include "cl2.hpp" #else #define __CL_ENABLE_EXCEPTIONS #include <CL/cl2.hpp> #endif #endif #include "core/agent/agent.h" #include "core/agent/agent_handle.h" #include "core/agent/agent_uid.h" #include "core/agent/agent_uid_generator.h" #include "core/container/agent_uid_map.h" #include "core/diffusion_grid.h" #include "core/operation/operation.h" #include "core/simulation.h" #include "core/type_index.h" #include "core/util/numa.h" #include "core/util/root.h" #include "core/util/thread_info.h" #include "core/util/type.h" namespace bdm { /// ResourceManager stores agents and diffusion grids and provides /// methods to add, remove, and access them. Agents are uniquely identified /// by their AgentUid, and AgentHandle. An AgentHandle might change during the /// simulation. class ResourceManager { public: explicit ResourceManager(TRootIOCtor* r) {} ResourceManager(); virtual ~ResourceManager() { for (auto& el : diffusion_grids_) { delete el.second; } for (auto& numa_agents : agents_) { for (auto* agent : numa_agents) { delete agent; } } if (type_index_) { delete type_index_; } } ResourceManager& operator=(ResourceManager&& other) { if (agents_.size() != other.agents_.size()) { Log::Fatal( "Restored ResourceManager has different number of NUMA nodes."); } for (auto& el : diffusion_grids_) { delete el.second; } for (auto& numa_agents : agents_) { for (auto* agent : numa_agents) { delete agent; } } agents_ = std::move(other.agents_); diffusion_grids_ = std::move(other.diffusion_grids_); RebuildAgentUidMap(); // restore type_index_ if (type_index_) { for (auto& numa_agents : agents_) { for (auto* agent : numa_agents) { type_index_->Add(agent); } } } return *this; } void RebuildAgentUidMap() { // rebuild uid_ah_map_ uid_ah_map_.clear(); auto* agent_uid_generator = Simulation::GetActive()->GetAgentUidGenerator(); uid_ah_map_.resize(agent_uid_generator->GetHighestIndex() + 1); for (unsigned n = 0; n < agents_.size(); ++n) { for (unsigned i = 0; i < agents_[n].size(); ++i) { auto* agent = agents_[n][i]; this->uid_ah_map_.Insert(agent->GetUid(), AgentHandle(n, i)); } } } Agent* GetAgent(const AgentUid& uid) { if (!uid_ah_map_.Contains(uid)) { return nullptr; } auto& ah = uid_ah_map_[uid]; return agents_[ah.GetNumaNode()][ah.GetElementIdx()]; } Agent* GetAgent(AgentHandle ah) { return agents_[ah.GetNumaNode()][ah.GetElementIdx()]; } AgentHandle GetAgentHandle(const AgentUid& uid) { return uid_ah_map_[uid]; } void AddDiffusionGrid(DiffusionGrid* dgrid) { uint64_t substance_id = dgrid->GetSubstanceId(); auto search = diffusion_grids_.find(substance_id); if (search != diffusion_grids_.end()) { Log::Fatal("ResourceManager::AddDiffusionGrid", "You tried to add a diffusion grid with an already existing " "substance id. Please choose a different substance id."); } else { diffusion_grids_[substance_id] = dgrid; } } void RemoveDiffusionGrid(size_t substance_id) { auto search = diffusion_grids_.find(substance_id); if (search != diffusion_grids_.end()) { delete search->second; diffusion_grids_.erase(search); } else { Log::Fatal("ResourceManager::AddDiffusionGrid", "You tried to remove a diffusion grid that does not exist."); } } /// Return the diffusion grid which holds the substance of specified id DiffusionGrid* GetDiffusionGrid(size_t substance_id) const { assert(substance_id < diffusion_grids_.size() && "You tried to access a diffusion grid that does not exist!"); return diffusion_grids_.at(substance_id); } /// Return the diffusion grid which holds the substance of specified name /// Caution: using this function in a tight loop will result in a slow /// simulation. Use `GetDiffusionGrid(size_t)` in those cases. DiffusionGrid* GetDiffusionGrid(std::string substance_name) const { for (auto& el : diffusion_grids_) { auto& dg = el.second; if (dg->GetSubstanceName() == substance_name) { return dg; } } assert(false && "You tried to access a diffusion grid that does not exist! " "Did you specify the correct substance name?"); return nullptr; } /// Execute the given functor for all diffusion grids /// rm->ForEachDiffusionGrid([](DiffusionGrid* dgrid) { /// ... /// }); template <typename TFunctor> void ForEachDiffusionGrid(TFunctor&& f) const { for (auto& el : diffusion_grids_) { f(el.second); } } /// Returns the total number of agents if numa_node == -1 /// Otherwise the number of agents in the specific numa node size_t GetNumAgents(int numa_node = -1) const { if (numa_node == -1) { size_t num_agents = 0; for (auto& numa_agents : agents_) { num_agents += numa_agents.size(); } return num_agents; } else { return agents_[numa_node].size(); } } /// Apply a function on all elements in every container /// @param function that will be called with each container as a parameter /// /// rm->ForEachAgent([](Agent* element) { /// std::cout << *element << std::endl; /// }); virtual void ForEachAgent(const std::function<void(Agent*)>& function) { for (auto& numa_agents : agents_) { for (auto* agent : numa_agents) { function(agent); } } } virtual void ForEachAgent( const std::function<void(Agent*, AgentHandle)>& function) { for (uint64_t n = 0; n < agents_.size(); ++n) { auto& numa_agents = agents_[n]; for (uint64_t i = 0; i < numa_agents.size(); ++i) { function(numa_agents[i], AgentHandle(n, i)); } } } /// Apply a function on all elements.\n /// Function invocations are parallelized.\n /// Uses static scheduling. /// \see ForEachAgent virtual void ForEachAgentParallel(Functor<void, Agent*>& function); /// Apply an operation on all elements.\n /// Function invocations are parallelized.\n /// Uses static scheduling. /// \see ForEachAgent virtual void ForEachAgentParallel(Operation& op); virtual void ForEachAgentParallel( Functor<void, Agent*, AgentHandle>& function); /// Apply a function on all elements.\n /// Function invocations are parallelized.\n /// Uses dynamic scheduling and work stealing. Batch size controlled by /// `chunk`. /// \param chunk number of agents that are assigned to a thread (batch /// size) /// \see ForEachAgent virtual void ForEachAgentParallel( uint64_t chunk, Functor<void, Agent*, AgentHandle>& function); /// Reserves enough memory to hold `capacity` number of agents for /// each numa domain. void Reserve(size_t capacity) { for (auto& numa_agents : agents_) { numa_agents.reserve(capacity); } if (type_index_) { type_index_->Reserve(capacity); } } /// Resize `agents_[numa_node]` such that it holds `current + additional` /// elements after this call. /// Returns the size after uint64_t GrowAgentContainer(size_t additional, size_t numa_node) { if (additional == 0) { return agents_[numa_node].size(); } auto current = agents_[numa_node].size(); if (current + additional < agents_[numa_node].size()) { agents_[numa_node].reserve((current + additional) * 1.5); } agents_[numa_node].resize(current + additional); return current; } /// Returns true if an agent with the given uid is stored in this /// ResourceManager. bool ContainsAgent(const AgentUid& uid) const { return uid_ah_map_.Contains(uid); } /// Remove all agents /// NB: This method is not thread-safe! This function invalidates /// agent references pointing into the ResourceManager. AgentPointer are /// not affected. void ClearAgents() { uid_ah_map_.clear(); for (auto& numa_agents : agents_) { for (auto* agent : numa_agents) { delete agent; } numa_agents.clear(); } if (type_index_) { type_index_->Clear(); } } /// Reorder agents such that, agents are distributed to NUMA /// nodes. Nearby agents will be moved to the same NUMA node. virtual void LoadBalance(); void DebugNuma() const; /// NB: This method is not thread-safe! This function might invalidate /// agent references pointing into the ResourceManager. AgentPointer are /// not affected. void AddAgent(Agent* agent, // NOLINT typename AgentHandle::NumaNode_t numa_node = 0) { auto uid = agent->GetUid(); if (uid.GetIndex() >= uid_ah_map_.size()) { uid_ah_map_.resize(uid.GetIndex() + 1); } agents_[numa_node].push_back(agent); uid_ah_map_.Insert(uid, AgentHandle(numa_node, agents_[numa_node].size() - 1)); if (type_index_) { type_index_->Add(agent); } } void ResizeAgentUidMap() { auto* agent_uid_generator = Simulation::GetActive()->GetAgentUidGenerator(); auto highest_idx = agent_uid_generator->GetHighestIndex(); auto new_size = highest_idx * 1.5 + 1; if (highest_idx >= uid_ah_map_.size()) { uid_ah_map_.resize(new_size); } if (type_index_) { type_index_->Reserve(new_size); } } void EndOfIteration() { // Check if SoUiD defragmentation should be turned on or off double utilization = static_cast<double>(GetNumAgents()) / static_cast<double>(uid_ah_map_.size()); auto* sim = Simulation::GetActive(); auto* param = sim->GetParam(); if (utilization < param->agent_uid_defragmentation_low_watermark) { sim->GetAgentUidGenerator()->EnableDefragmentation(&uid_ah_map_); } else if (utilization > param->agent_uid_defragmentation_high_watermark) { sim->GetAgentUidGenerator()->DisableDefragmentation(); } } /// Adds `new_agents` to `agents_[numa_node]`. `offset` specifies /// the index at which the first element is inserted. Agents are inserted /// consecutively. This methos is thread safe only if insertion intervals do /// not overlap! virtual void AddAgents(typename AgentHandle::NumaNode_t numa_node, uint64_t offset, const std::vector<Agent*>& new_agents) { uint64_t i = 0; for (auto* agent : new_agents) { auto uid = agent->GetUid(); uid_ah_map_.Insert(uid, AgentHandle(numa_node, offset + i)); agents_[numa_node][offset + i] = agent; i++; } if (type_index_) { #pragma omp critical for (auto* agent : new_agents) { type_index_->Add(agent); } } } /// Removes the agent with the given uid.\n /// NB: This method is not thread-safe! This function invalidates /// agent references pointing into the ResourceManager. AgentPointer are /// not affected. void RemoveAgent(const AgentUid& uid) { // remove from map if (uid_ah_map_.Contains(uid)) { auto ah = uid_ah_map_[uid]; uid_ah_map_.Remove(uid); // remove from vector auto& numa_agents = agents_[ah.GetNumaNode()]; Agent* agent = nullptr; if (ah.GetElementIdx() == numa_agents.size() - 1) { agent = numa_agents.back(); numa_agents.pop_back(); } else { // swap agent = numa_agents[ah.GetElementIdx()]; auto* reordered = numa_agents.back(); numa_agents[ah.GetElementIdx()] = reordered; numa_agents.pop_back(); uid_ah_map_.Insert(reordered->GetUid(), ah); } if (type_index_) { type_index_->Remove(agent); } delete agent; } } const TypeIndex* GetTypeIndex() const { return type_index_; } protected: /// Maps an AgentUid to its storage location in `agents_` \n AgentUidMap<AgentHandle> uid_ah_map_ = AgentUidMap<AgentHandle>(100u); //! /// Pointer container for all agents std::vector<std::vector<Agent*>> agents_; /// Maps a diffusion grid ID to the pointer to the diffusion grid std::unordered_map<uint64_t, DiffusionGrid*> diffusion_grids_; ThreadInfo* thread_info_ = ThreadInfo::GetInstance(); //! TypeIndex* type_index_ = nullptr; friend class SimulationBackup; friend std::ostream& operator<<(std::ostream& os, const ResourceManager& rm); BDM_CLASS_DEF_NV(ResourceManager, 1); }; inline std::ostream& operator<<(std::ostream& os, const ResourceManager& rm) { os << "\033[1mAgents per numa node\033[0m" << std::endl; uint64_t cnt = 0; for (auto& numa_agents : rm.agents_) { os << "numa node " << cnt++ << " -> size: " << numa_agents.size() << std::endl; } return os; } } // namespace bdm #endif // CORE_RESOURCE_MANAGER_H_
GB_unop__identity_fc64_int8.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCUDA_DEV #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__identity_fc64_int8) // op(A') function: GB (_unop_tran__identity_fc64_int8) // C type: GxB_FC64_t // A type: int8_t // cast: GxB_FC64_t cij = GxB_CMPLX ((double) (aij), 0) // unaryop: cij = aij #define GB_ATYPE \ int8_t #define GB_CTYPE \ GxB_FC64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int8_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ GxB_FC64_t z = GxB_CMPLX ((double) (aij), 0) ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ int8_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ GxB_FC64_t z = GxB_CMPLX ((double) (aij), 0) ; \ Cx [pC] = z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_FC64 || GxB_NO_INT8) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__identity_fc64_int8) ( GxB_FC64_t *Cx, // Cx and Ax may be aliased const int8_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int8_t aij = Ax [p] ; GxB_FC64_t z = GxB_CMPLX ((double) (aij), 0) ; Cx [p] = z ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; int8_t aij = Ax [p] ; GxB_FC64_t z = GxB_CMPLX ((double) (aij), 0) ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__identity_fc64_int8) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
test.c
#include <stdio.h> #include <omp.h> #include <math.h> #include "../utilities/check.h" #include "../utilities/utilities.h" #define TRIALS (1) #define N (992) #define INIT() INIT_LOOP(N, {C[i] = 1; D[i] = i; E[i] = -i;}) #define ZERO(X) ZERO_ARRAY(N, X) int main(void) { check_offloading(); double A[N], B[N], C[N], D[N], E[N]; INIT(); int cpuExec = 0; #pragma omp target map(tofrom: cpuExec) { cpuExec = omp_is_initial_device(); } int gpu_threads = 128; int cpu_threads = 32; int max_threads = cpuExec ? cpu_threads : gpu_threads; // // Test: omp_get_thread_num() // ZERO(A); TESTD("omp target parallel num_threads(max_threads)", { int tid = omp_get_thread_num(); A[tid] += tid; }, VERIFY(0, max_threads, A[i], i*(trial+1))); // // Test: Execute parallel on device // TESTD("omp target parallel num_threads(max_threads)", { int i = omp_get_thread_num()*4; for (int j = i; j < i + 4; j++) { B[j] = D[j] + E[j]; } }, VERIFY(0, max_threads*4, B[i], (double)0)); // // Test: if clause serial execution of parallel region on host // ZERO(A); TESTD("omp target parallel num_threads(max_threads) if(0)", { int tid = omp_get_thread_num(); A[tid] = tid; }, VERIFY(0, max_threads, A[i], 0)); // // Test: if clause parallel execution of parallel region on device // ZERO(A); TESTD("omp target parallel num_threads(max_threads) if(A[0] == 0)", { int tid = omp_get_thread_num(); A[tid] = tid + omp_is_initial_device(); }, VERIFY(0, max_threads, A[i], i + cpuExec)); // // Test: if clause serial execution of parallel region on device // ZERO(A); TESTD("omp target parallel num_threads(max_threads) if(parallel: 0)", { int tid = omp_get_thread_num(); A[tid] = !omp_is_initial_device(); }, VERIFY(0, max_threads, A[i], i == 0 ? 1 - cpuExec : 0)); // // Test: if clause parallel execution of parallel region on host // ZERO(A); TESTD("omp target parallel num_threads(max_threads) if(target: 0) if(parallel: A[0] == 0)", { int tid = omp_get_thread_num(); A[tid] = tid + omp_is_initial_device(); }, VERIFY(0, /* bound to */ cpu_threads, A[i], i+1)); // // Test: if clause serial execution of parallel region on device without num_threads clause // ZERO(A); TESTD("omp target parallel if(parallel: A[0] > 0)", { int tid = omp_get_thread_num(); A[tid] = omp_get_num_threads(); }, VERIFY(0, 1, A[0], 1)); // // Test: if clause parallel execution of parallel region on device without num_threads clause // The testcase should be launched with the default number of threads. // ZERO(A); #pragma omp target parallel { // Get default number of threads launched by this runtime. B[0] = omp_get_num_threads(); } TESTD("omp target parallel if(parallel: A[0] == 0)", { int tid = omp_get_thread_num(); A[tid] = omp_get_num_threads(); }, VERIFY(0, 1, A[0], B[0])); // // Test: if clause parallel execution of parallel region on device with num_threads clause // ZERO(A); TESTD("omp target parallel num_threads(2) if(parallel: A[0] == 0)", { int tid = omp_get_thread_num(); A[tid] = omp_get_num_threads(); }, VERIFY(0, 1, A[0], 2)); // // Test: proc_bind clause // TESTD("omp target parallel num_threads(max_threads) proc_bind(master)", { int i = omp_get_thread_num()*4; for (int j = i; j < i + 4; j++) { B[j] = 1 + D[j] + E[j]; } }, VERIFY(0, max_threads*4, B[i], 1)); TESTD("omp target parallel num_threads(max_threads) proc_bind(close)", { int i = omp_get_thread_num()*4; for (int j = i; j < i + 4; j++) { B[j] = 1 + D[j] + E[j]; } }, VERIFY(0, max_threads*4, B[i], 1)); TESTD("omp target parallel num_threads(max_threads) proc_bind(spread)", { int i = omp_get_thread_num()*4; for (int j = i; j < i + 4; j++) { B[j] = 1 + D[j] + E[j]; } }, VERIFY(0, max_threads*4, B[i], 1)); // // Test: num_threads on parallel. // for (int t = 1; t <= max_threads; t++) { ZERO(A); int threads[1]; threads[0] = t; TESTD("omp target parallel num_threads(threads[0])", { int tid = omp_get_thread_num(); A[tid] = 99; }, VERIFY(0, 128, A[i], 99*(i < t))); } DUMP_SUCCESS(gpu_threads-max_threads); // // Test: sharing of variables from host to parallel region. // ZERO(A); { double tmp = 1; A[0] = tmp; TESTD("omp target parallel map(tofrom: tmp) num_threads(1)", { tmp = 2; A[0] += tmp; }, VERIFY(0, 1, A[i]+tmp, (1+trial)*2+1+2)); } // // Test: private clause on target parallel region. // ZERO(A); { double p[1], q = 99; p[0] = 1; A[0] = p[0]; TESTD("omp target parallel private(p, q) num_threads(1)", { p[0] = 2; q = 0; A[0] += p[0]; }, VERIFY(0, 1, A[i]+p[0]+q, (1+trial)*2+2+99)); } // // Test: firstprivate clause on parallel region. // ZERO(A); { double p[1], q = 99; p[0] = 5; A[0] = p[0]; TESTD("omp target parallel firstprivate(p, q) num_threads(1)", { A[0] += p[0] + q; p[0] = 2; q = 0; }, VERIFY(0, 1, A[i]+p[0]+q, (1+trial)*(99+5)+5+5+99)); } #if 0 INCORRECT CODEGEN // // Test: shared clause on parallel region. // ZERO(A); { double p[1], q; p[0] = 5; A[0] = p[0]; q = -7; TESTD("omp target parallel num_threads(2) shared(p, q)", { if (omp_get_thread_num() == 1) { p[0] = 99; q = 2; } _Pragma("omp barrier") if (omp_get_thread_num() == 0) A[0] += p[0] + q; _Pragma("omp barrier") p[0] = 1; q = -100; }, VERIFY(0, 1, A[i]+p[0]+q, (1+trial)*(99+2)+5+-7)); } #endif return 0; }
GB_unop__abs_int64_int64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__abs_int64_int64) // op(A') function: GB (_unop_tran__abs_int64_int64) // C type: int64_t // A type: int64_t // cast: int64_t cij = aij // unaryop: cij = GB_IABS (aij) #define GB_ATYPE \ int64_t #define GB_CTYPE \ int64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_IABS (x) ; // casting #define GB_CAST(z, aij) \ int64_t z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ int64_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ int64_t z = aij ; \ Cx [pC] = GB_IABS (z) ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ABS || GxB_NO_INT64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__abs_int64_int64) ( int64_t *Cx, // Cx and Ax may be aliased const int64_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; // TODO: if OP is ONE and uniform-valued matrices are exploited, then // do this in O(1) time if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (int64_t), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int64_t aij = Ax [p] ; int64_t z = aij ; Cx [p] = GB_IABS (z) ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; int64_t aij = Ax [p] ; int64_t z = aij ; Cx [p] = GB_IABS (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__abs_int64_int64) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
quantum.h
#pragma once #include <global.h> #include <randomutil.h> #include <mathutil.h> #include <testutil.h> #include <iomanip> #include "state_manipulator.h" ns_easyquantum #define TO_STRING_CASE(gatetype) case GateType::gatetype: #define TO_STRING_DAG if (dag) {ss<<"dag ";} /* gate type definition */ enum class GateType { null, CNOT, RX, RY, RZ, H, DIAGONAL, I, }; constexpr double quantum_default_threshold = 1e-8; // forward decl template<typename T> class Circuit; template<typename T> class Gate; template<typename qtraits = default_qtraits> class Gate { public: using fp_t = typename qtraits::value_t; using qid = typename qtraits::qidx_t; std::vector<qid> qubits; fp_t argument; GateType type = GateType::null; std::vector<qid> controllers; bool dag = false; Gate() {} Gate(GateType type_, std::vector<qid> qs, fp_t arg = 0) : type(type_), qubits(qs.begin(), qs.end()), argument(arg) {} Gate(const Gate<qtraits>& g) { qubits.assign(g.qubits.begin(), g.qubits.end()); argument = g.argument; type = g.type; controllers.assign(g.controllers.begin(), g.controllers.end()); dag = g.dag; } Gate<qtraits> valuecopy() const { Gate<qtraits> g; g.qubits = qubits; g.argument = argument; g.type = type; g.controllers = controllers; g.dag = dag; return g; } void dagger() { dag = !dag; } Gate<qtraits> get_dagger() const { Gate<qtraits> g(*this); g.dag = !g.dag; return g; } void control(std::vector<qid> new_controllers) { controllers.insert(g.controllers.end(), new_controllers.begin(), new_controllers.end()); } Gate<qtraits> get_control(std::vector<qid> new_controllers) const { Gate<qtraits> g(*this); g.controllers.insert(g.controllers.end(), new_controllers.begin(), new_controllers.end()); return g; } static std::string qasm_style_qubit(qid i) { std::stringstream ss; ss << "q[" << i << "]"; return ss.str(); } static std::string controller_to_string(std::vector<qid> controllers) { if (controllers.size() == 0) return std::string(); std::stringstream ss; ss << "ctrl:{" << vec2str(controllers) << "}"; return ss.str(); } static std::string params_to_string(std::vector<fp_t> args) { if (args.size() == 0) return std::string(); std::stringstream ss; ss << std::showpoint; ss << "(" << vec2str(args, ", ", SHOWPOINT) << ")"; return ss.str(); } std::string to_string() const { std::stringstream ss; switch (type) { TO_STRING_CASE(CNOT) { ss << "CNOT "; ss << qubits[0] << ", " << qubits[1] << " "; ss << controller_to_string(controllers); return ss.str(); } TO_STRING_CASE(RX) { ss << "RX "; TO_STRING_DAG; ss << qubits[0] << " "; ss << params_to_string({ argument }); ss << " "; ss << controller_to_string(controllers); return ss.str(); } TO_STRING_CASE(RY) { ss << "RY "; TO_STRING_DAG; ss << qubits[0] << " "; ss << params_to_string({ argument }); ss << " "; ss << controller_to_string(controllers); return ss.str(); } TO_STRING_CASE(RZ) { ss << "RZ "; TO_STRING_DAG; ss << qubits[0] << " "; ss << params_to_string({ argument }); ss << " "; ss << controller_to_string(controllers); return ss.str(); } TO_STRING_CASE(H) { ss << "H "; ss << qubits[0] << " "; ss << controller_to_string(controllers); return ss.str(); } TO_STRING_CASE(DIAGONAL) { ss << "Diagonal "; ss << 0 << ";"; return ss.str(); } TO_STRING_CASE(I) { ss << "I "; ss << qubits[0] << " "; ss << params_to_string({ argument }); return ss.str(); } default: { assert(false, "Bad Type."); } } } std::string to_qasm() const { assert(controllers.size() == 0); assert(dag == false); std::stringstream ss; switch (type) { TO_STRING_CASE(CNOT) { ss << "cx "; ss << qasm_style_qubit(qubits[0]) << ", " << qasm_style_qubit(qubits[1]); return ss.str(); } TO_STRING_CASE(RX) { ss << "rx "; ss << params_to_string({ argument }) << " "; ss << qasm_style_qubit(qubits[0]); return ss.str(); } TO_STRING_CASE(RY) { ss << "ry "; ss << params_to_string({ argument }) << " "; ss << qasm_style_qubit(qubits[0]); return ss.str(); } TO_STRING_CASE(RZ) { ss << "rz "; ss << params_to_string({ argument }) << " "; ss << qasm_style_qubit(qubits[0]); return ss.str(); } TO_STRING_CASE(H) { ss << "h "; ss << qasm_style_qubit(qubits[0]); return ss.str(); } TO_STRING_CASE(DIAGONAL) { assert(false, "Bad Type."); } TO_STRING_CASE(I) { ss << "Id "; ss << qubits[0] << " "; return ss.str(); } default: assert(false, "Bad Type."); } return ""; } ~Gate() { } }; template<typename qtraits = default_qtraits> Gate<qtraits> CNOT(typename qtraits::qidx_t controller, typename qtraits::qidx_t target) { return Gate(GateType::CNOT, { controller, target }); } template<typename qtraits = default_qtraits> Gate<qtraits> RX(typename qtraits::qidx_t q, typename qtraits::value_t arg = 0) { return Gate<qtraits>(GateType::RX, { q }, arg); } template<typename qtraits = default_qtraits> Gate<qtraits> RY(typename qtraits::qidx_t q, typename qtraits::value_t arg) { return Gate(GateType::RY, { q }, arg); } template<typename qtraits = default_qtraits> Gate<qtraits> RZ(typename qtraits::qidx_t q, typename qtraits::value_t arg) { return Gate(GateType::RZ, { q }, arg); } template<typename qtraits = default_qtraits> Gate<qtraits> H(typename qtraits::qidx_t q) { return Gate(GateType::H, { q }); } template<typename qtraits = default_qtraits> Gate<qtraits> I(typename qtraits::qidx_t q, size_t time) { using fp_t = typename qtraits::value_t; return Gate(GateType::I, { q }, (fp_t)time); } class flatten {}; template<typename qtraits = default_qtraits> class Circuit { public: using fp_t = typename qtraits::value_t; using qid = typename qtraits::qidx_t; qid max_qubit = 0; std::list<Gate<qtraits>> gates; Circuit() {} void _circuit_append(const Circuit<qtraits>& c) { if (c.max_qubit > max_qubit) max_qubit = c.max_qubit; for (auto gate : c.gates) { gates.push_back(gate); } } Circuit(const Circuit<qtraits>& c) { _circuit_append(c); } Circuit valuecopy() const { Circuit c; c.max_qubit = max_qubit; for (const auto& g : gates) { c.gates.push_back(g.valuecopy()); } return c; } Circuit& operator-(Gate<qtraits> g) { if (g.type != GateType::DIAGONAL) { for (auto qubit : g.qubits) { if (max_qubit - 1 < qubit) max_qubit = qubit + 1; } } gates.push_back(g); return *this; } Circuit& operator-(flatten flatten) { for (auto &gate : gates) { if (gate.dag) { gate.dag = false; switch (gate.type) { case GateType::RX: case GateType::RY: case GateType::RZ: gate.argument *= -1; break; default: break; } } } return *this; } Circuit& operator-(const Circuit<qtraits> &c) { _circuit_append(c); return *this; } Circuit& dagger() { gates.reverse(); for (auto& gate : gates) { gate.dagger(); } return newc; } Circuit control(std::vector<qid> controllers) { Circuit newc(*this); for (auto& gate : newc.gates) { gate.control(controllers); } return newc; } std::string to_string() const { std::stringstream ss_stat; std::stringstream ss_header; std::stringstream ss_groupdefs; /* header*/ ss_header << "qubit : " << max_qubit << ";" << std::endl; /* stat */ for (auto gate : gates) { ss_stat << gate.to_string() << ";" << std::endl; } return ss_header.str() + "\n" + ss_stat.str() + "\n"; } std::string to_qasm() const { std::stringstream ss; ss << "OPENQASM 2.0;" << std::endl << "include \"qelib1.inc\";" << std::endl << "qreg q[" << max_qubit << "];" << std::endl; for (const auto &gate : gates) { ss << gate.to_qasm() << ";" << std::endl; } return ss.str(); } ~Circuit() {} }; void get_damping_kraus_op( double* k0_real, double* k0_imag, double* k1_real, double* k1_imag, const int T1, const int T_gate); void get_dephasing_kraus_op( double* k0_real, double* k0_imag, double* k1_real, double* k1_imag, const int T1, const int T2, const int T_gate); template<typename qtraits = default_qtraits> class RealCircuit { public: using fp_t = typename qtraits::value_t; using qid = typename qtraits::qidx_t; struct Kraus { fp_t *kraus0_real; fp_t *kraus0_imag; fp_t *kraus1_real; fp_t *kraus1_imag; }; std::vector<Kraus> one_qubit_damping_kraus; std::vector<Kraus> one_qubit_dephasing_kraus; std::vector<Kraus> two_qubit_damping_kraus; std::vector<Kraus> two_qubit_dephasing_kraus; Circuit<qtraits> c; Circuit<qtraits> real_circuit; int one_qubit_gate_time; int two_qubit_gate_time; std::vector<int> T1; std::vector<int> T2; std::vector<fp_t> one_qubit_gate_error; std::vector<std::vector<fp_t>> two_qubit_gate_error; std::vector<std::vector<GateType>> clock_cycle; bool pre_gen_used = false; static struct Config { int one_qubit_gate_time = 1; int two_qubit_gate_time = 2; int global_T1 = 1000; int global_T2 = 1000; fp_t one_qubit_gate_error = 0.01; fp_t two_qubit_gate_error = 0.02; } default_config; RealCircuit() {} void assign_circuit(const Circuit<qtraits>& c_) { one_qubit_gate_error.assign(c_.max_qubit, default_config.one_qubit_gate_error); T1.assign(c_.max_qubit, default_config.global_T1); T2.assign(c_.max_qubit, default_config.global_T2); one_qubit_gate_time = default_config.one_qubit_gate_time; two_qubit_gate_time = default_config.two_qubit_gate_time; two_qubit_gate_error.resize(c_.max_qubit); for (auto& e : two_qubit_gate_error) { e.resize(c_.max_qubit, default_config.two_qubit_gate_error); } clock_cycle.resize(c_.max_qubit); } explicit RealCircuit(const Circuit<qtraits>& c_) : c(c_.valuecopy()) { assign_circuit(c_); } RealCircuit(const RealCircuit<qtraits>&) = delete; //RealCircuit(RealCircuit<qtraits>&& rc) { // c = std::move(rc.c); // real_circuit = std::move(rc.real_circuit); // one_qubit_gate_time = rc.one_qubit_gate_time; // two_qubit_gate_time = rc.two_qubit_gate_time; // T1 = std::move(rc.T1); // T2 = std::move(rc.T2); // one_qubit_gate_error = std::move(rc.one_qubit_gate_error); // two_qubit_gate_error = std::move(rc.two_qubit_gate_error); // clock_cycle = std::move(rc.clock_cycle); // pre_gen_used = rc.pre_gen_used; //} RealCircuit<qtraits>&& valuecopy() { RealCircuit<qtraits> rc; rc.c = c.valuecopy(); rc.real_circuit = real_circuit.valuecopy(); rc.one_qubit_gate_time = one_qubit_gate_time; rc.two_qubit_gate_time = two_qubit_gate_time; rc.T1 = T1; rc.T2 = T2; rc.one_qubit_gate_error = one_qubit_gate_error; rc.two_qubit_gate_error = two_qubit_gate_error; rc.clock_cycle = clock_cycle; rc.pre_gen_used = pre_gen_used; return rc; } void analyze_clock() { for (const auto &gate : c.gates) { assert(gate.controllers.size() == 0, "No controller is allowed."); assert(gate.dag == false, "Flatten first."); switch (gate.type) { case GateType::RX: case GateType::RY: case GateType::RZ: case GateType::H: add_single_qubit(gate, gate.qubits[0]); break; case GateType::CNOT: add_two_qubit(gate, gate.qubits[0], gate.qubits[1]); break; case GateType::I: add_I(gate, gate.qubits[0]); break; default: assert(false, "Not supported gate type."); } } } void generate_kraus_op() { qid qsize = c.max_qubit; /* one_qubit_damping_kraus */ for (qid i = 0; i < qsize; ++i) { fp_t *kraus0_real = (fp_t*)malloc(4 * sizeof(fp_t)); fp_t *kraus0_imag = (fp_t*)malloc(4 * sizeof(fp_t)); fp_t *kraus1_real = (fp_t*)malloc(4 * sizeof(fp_t)); fp_t *kraus1_imag = (fp_t*)malloc(4 * sizeof(fp_t)); get_damping_kraus_op(kraus0_real, kraus0_imag, kraus1_real, kraus1_imag, T1[i], one_qubit_gate_time); Kraus one_damping; one_damping.kraus0_real = kraus0_real; one_damping.kraus0_imag = kraus0_imag; one_damping.kraus1_real = kraus1_real; one_damping.kraus1_imag = kraus1_imag; one_qubit_damping_kraus.push_back(one_damping); } /* one_qubit_dephasing_kraus */ for (qid i = 0; i < qsize; ++i) { fp_t *kraus0_real = (fp_t*)malloc(4 * sizeof(fp_t)); fp_t *kraus0_imag = (fp_t*)malloc(4 * sizeof(fp_t)); fp_t *kraus1_real = (fp_t*)malloc(4 * sizeof(fp_t)); fp_t *kraus1_imag = (fp_t*)malloc(4 * sizeof(fp_t)); get_dephasing_kraus_op(kraus0_real, kraus0_imag, kraus1_real, kraus1_imag, T1[i], T2[i], one_qubit_gate_time); Kraus one_dephasing; one_dephasing.kraus0_real = kraus0_real; one_dephasing.kraus0_imag = kraus0_imag; one_dephasing.kraus1_real = kraus1_real; one_dephasing.kraus1_imag = kraus1_imag; one_qubit_dephasing_kraus.push_back(one_dephasing); } /* two_qubit_damping_kraus */ for (qid i = 0; i < qsize; ++i) { fp_t *kraus0_real = (fp_t*)malloc(4 * sizeof(fp_t)); fp_t *kraus0_imag = (fp_t*)malloc(4 * sizeof(fp_t)); fp_t *kraus1_real = (fp_t*)malloc(4 * sizeof(fp_t)); fp_t *kraus1_imag = (fp_t*)malloc(4 * sizeof(fp_t)); get_damping_kraus_op(kraus0_real, kraus0_imag, kraus1_real, kraus1_imag, T1[i], two_qubit_gate_time); Kraus two_damping; two_damping.kraus0_real = kraus0_real; two_damping.kraus0_imag = kraus0_imag; two_damping.kraus1_real = kraus1_real; two_damping.kraus1_imag = kraus1_imag; two_qubit_damping_kraus.push_back(two_damping); } /* two_qubit_dephasing_kraus */ for (qid i = 0; i < qsize; ++i) { fp_t *kraus0_real = (fp_t*)malloc(4 * sizeof(fp_t)); fp_t *kraus0_imag = (fp_t*)malloc(4 * sizeof(fp_t)); fp_t *kraus1_real = (fp_t*)malloc(4 * sizeof(fp_t)); fp_t *kraus1_imag = (fp_t*)malloc(4 * sizeof(fp_t)); get_dephasing_kraus_op(kraus0_real, kraus0_imag, kraus1_real, kraus1_imag, T1[i], T2[i], two_qubit_gate_time); Kraus two_dephasing; two_dephasing.kraus0_real = kraus0_real; two_dephasing.kraus0_imag = kraus0_imag; two_dephasing.kraus1_real = kraus1_real; two_dephasing.kraus1_imag = kraus1_imag; two_qubit_dephasing_kraus.push_back(two_dephasing); } } void add_single_qubit(Gate<qtraits> g, qid q) { clock_cycle[q].push_back(g.type); for (int i = 1; i < one_qubit_gate_time; ++i) clock_cycle[q].push_back(GateType(-1)); real_circuit - g; } void add_two_qubit(Gate<qtraits> g, qid q1, qid q2) { size_t front1 = clock_cycle[q1].size(); size_t front2 = clock_cycle[q2].size(); if (front1 > front2) { clock_cycle[q2].push_back(GateType::I); for (int i = 1; i < front1 - front2; ++i) clock_cycle[q2].push_back(GateType(-1)); real_circuit - I(q2, front1 - front2); } else if (front2 > front1) { clock_cycle[q1].push_back(GateType::I); for (int i = 1; i < front2 - front1; ++i) clock_cycle[q1].push_back(GateType(-1)); real_circuit - I(q1, front2 - front1); } clock_cycle[q1].push_back(g.type); clock_cycle[q2].push_back(g.type); for (int i = 1; i < two_qubit_gate_time; ++i) { clock_cycle[q1].push_back(GateType(-1)); clock_cycle[q2].push_back(GateType(-1)); } real_circuit - g; } void add_I(Gate<qtraits> g, qid q) { clock_cycle[q].push_back(g.type); for (int i = 1; i < int(g.argument); ++i) clock_cycle[q].push_back(GateType(-1)); real_circuit - g; } void ready() { analyze_clock(); if (use_pre_gen && !pre_gen_used) { pre_gen_used = true; one_qubit_damping_kraus.assign(pre_gen_1qdam.begin(), pre_gen_1qdam.end()); one_qubit_dephasing_kraus.assign(pre_gen_1qdep.begin(), pre_gen_1qdep.end()); two_qubit_damping_kraus.assign(pre_gen_2qdam.begin(), pre_gen_2qdam.end()); two_qubit_dephasing_kraus.assign(pre_gen_2qdep.begin(), pre_gen_2qdep.end()); } else { generate_kraus_op(); } } std::string to_string() const { std::stringstream ss; for (auto cc : clock_cycle) { for (auto p : cc) { if ((int)p == -1) ss << std::setw(3) << " "; else ss << std::setw(3) << (int)p; } ss << std::endl; } return ss.str(); } ~RealCircuit() { if (pre_gen_used) return; for (auto p : one_qubit_damping_kraus) { free(p.kraus0_real); free(p.kraus0_imag); free(p.kraus1_real); free(p.kraus1_imag); } for (auto p : one_qubit_dephasing_kraus) { free(p.kraus0_real); free(p.kraus0_imag); free(p.kraus1_real); free(p.kraus1_imag); } for (auto p : two_qubit_damping_kraus) { free(p.kraus0_real); free(p.kraus0_imag); free(p.kraus1_real); free(p.kraus1_imag); } for (auto p : two_qubit_dephasing_kraus) { free(p.kraus0_real); free(p.kraus0_imag); free(p.kraus1_real); free(p.kraus1_imag); } } static std::vector<Kraus> pre_gen_1qdam; static std::vector<Kraus> pre_gen_1qdep; static std::vector<Kraus> pre_gen_2qdam; static std::vector<Kraus> pre_gen_2qdep; static void pre_gen_kraus(size_t qsize) { use_pre_gen = true; auto& T1 = decltype(*this)::default_config.global_T1; auto& T2 = decltype(*this)::default_config.global_T2; auto& one_qubit_gate_time = decltype(*this)::default_config.one_qubit_gate_time; auto& two_qubit_gate_time = decltype(*this)::default_config.two_qubit_gate_time; /* one_qubit_damping_kraus */ for (qid i = 0; i < qsize; ++i) { fp_t* kraus0_real = (fp_t*)malloc(4 * sizeof(fp_t)); fp_t* kraus0_imag = (fp_t*)malloc(4 * sizeof(fp_t)); fp_t* kraus1_real = (fp_t*)malloc(4 * sizeof(fp_t)); fp_t* kraus1_imag = (fp_t*)malloc(4 * sizeof(fp_t)); get_damping_kraus_op(kraus0_real, kraus0_imag, kraus1_real, kraus1_imag, T1, one_qubit_gate_time); Kraus one_damping; one_damping.kraus0_real = kraus0_real; one_damping.kraus0_imag = kraus0_imag; one_damping.kraus1_real = kraus1_real; one_damping.kraus1_imag = kraus1_imag; pre_gen_1qdam.push_back(one_damping); } /* one_qubit_dephasing_kraus */ for (qid i = 0; i < qsize; ++i) { fp_t* kraus0_real = (fp_t*)malloc(4 * sizeof(fp_t)); fp_t* kraus0_imag = (fp_t*)malloc(4 * sizeof(fp_t)); fp_t* kraus1_real = (fp_t*)malloc(4 * sizeof(fp_t)); fp_t* kraus1_imag = (fp_t*)malloc(4 * sizeof(fp_t)); get_dephasing_kraus_op(kraus0_real, kraus0_imag, kraus1_real, kraus1_imag, T1, T2, one_qubit_gate_time); Kraus one_dephasing; one_dephasing.kraus0_real = kraus0_real; one_dephasing.kraus0_imag = kraus0_imag; one_dephasing.kraus1_real = kraus1_real; one_dephasing.kraus1_imag = kraus1_imag; pre_gen_1qdep.push_back(one_dephasing); } /* two_qubit_damping_kraus */ for (qid i = 0; i < qsize; ++i) { fp_t* kraus0_real = (fp_t*)malloc(4 * sizeof(fp_t)); fp_t* kraus0_imag = (fp_t*)malloc(4 * sizeof(fp_t)); fp_t* kraus1_real = (fp_t*)malloc(4 * sizeof(fp_t)); fp_t* kraus1_imag = (fp_t*)malloc(4 * sizeof(fp_t)); get_damping_kraus_op(kraus0_real, kraus0_imag, kraus1_real, kraus1_imag, T1, two_qubit_gate_time); Kraus two_damping; two_damping.kraus0_real = kraus0_real; two_damping.kraus0_imag = kraus0_imag; two_damping.kraus1_real = kraus1_real; two_damping.kraus1_imag = kraus1_imag; pre_gen_2qdam.push_back(two_damping); } /* two_qubit_dephasing_kraus */ for (qid i = 0; i < qsize; ++i) { fp_t* kraus0_real = (fp_t*)malloc(4 * sizeof(fp_t)); fp_t* kraus0_imag = (fp_t*)malloc(4 * sizeof(fp_t)); fp_t* kraus1_real = (fp_t*)malloc(4 * sizeof(fp_t)); fp_t* kraus1_imag = (fp_t*)malloc(4 * sizeof(fp_t)); get_dephasing_kraus_op(kraus0_real, kraus0_imag, kraus1_real, kraus1_imag, T1, T2, two_qubit_gate_time); Kraus two_dephasing; two_dephasing.kraus0_real = kraus0_real; two_dephasing.kraus0_imag = kraus0_imag; two_dephasing.kraus1_real = kraus1_real; two_dephasing.kraus1_imag = kraus1_imag; pre_gen_2qdep.push_back(two_dephasing); } } static void set_use_pre_gen(bool use) { use_pre_gen = use; } static bool use_pre_gen; }; template<typename qtraits> bool RealCircuit<qtraits>::use_pre_gen = false; template<typename qtraits> typename RealCircuit<qtraits>::Config RealCircuit<qtraits>::default_config; template<typename qtraits> std::vector<typename RealCircuit<qtraits>::Kraus> RealCircuit<qtraits>::pre_gen_1qdam; template<typename qtraits> std::vector<typename RealCircuit<qtraits>::Kraus> RealCircuit<qtraits>::pre_gen_1qdep; template<typename qtraits> std::vector<typename RealCircuit<qtraits>::Kraus> RealCircuit<qtraits>::pre_gen_2qdam; template<typename qtraits> std::vector<typename RealCircuit<qtraits>::Kraus> RealCircuit<qtraits>::pre_gen_2qdep; template<typename qtraits_t = default_qtraits> struct result_analyzer { using qtraits = qtraits_t; using qid = typename qtraits::qidx_t; using fp_t = typename qtraits::value_t; using uint_t = typename qtraits::idx_t; static size_t count_zero(uint_t* result, size_t shots) { size_t zeros = 0; for (size_t i = 0; i < shots; ++i) { if (result[i] == 0) zeros++; } return zeros; } static size_t count_n(uint_t* result, size_t shots, uint_t n) { size_t count = 0; for (size_t i = 0; i < shots; ++i) { if (result[i] == n) count++; } return count; } static std::vector<qid> generate_qubit_idxes(qid max_qubit) { std::vector<qid> q(max_qubit, 0); for (qid i = 0; i < max_qubit; ++i) { q[i] = i; } return q; } static std::pair<uint_t, size_t> find_max(uint_t* result, size_t shots) { std::map<uint_t, size_t> max_counter; std::pair<uint_t, size_t> max = { 0,0 }; for (size_t i = 0; i < shots; ++i) { if (max_counter.find(result[i]) == max_counter.end()) { max_counter[result[i]] = 0; } max_counter[result[i]]++; size_t &p = max_counter[result[i]]; if (p > max.second) { max = { result[i], p }; } } return max; } static fp_t get_expectation_from_amplitude(fp_t *real, fp_t *imag, size_t size, fp_t *diag_h) { fp_t expectation = 0; for (size_t i = 0; i < size; ++i) { fp_t prob = real[i] * real[i] + imag[i] * imag[i]; expectation += prob * diag_h[i]; } return expectation; } static double get_expectation(const uint_t *res, const uint_t size, const std::vector<double> &diag) { double exp = 0; for (uint_t i = 0; i < size; ++i) { exp += diag[res[i]]; } return exp / size; } static std::vector<fp_t> get_meas_probs(const uint_t *res, const uint_t shots, const uint_t size) { std::vector<fp_t> probs(size, 0); for (uint_t i = 0; i < shots; ++i) { probs[res[i]] += (1.0 / shots); } return probs; } static std::vector<uint_t> get_meas_count(const uint_t *res, const uint_t shots, const uint_t size) { std::vector<uint_t> count(size, 0); for (uint_t i = 0; i < shots; ++i) { count[res[i]]++; } return count; } static bool check_error(fp_t *prob1, fp_t *prob2, uint_t size, fp_t error_bound) { for (uint_t i = 0; i < size; ++i) { if (abs(prob1[i] - prob2[i]) > error_bound) { return false; } } return true; } static fp_t get_norm_2(const fp_t *prob1, const fp_t *prob2, const uint_t size) { fp_t norm2 = 0; for (uint_t i = 0; i < size; ++i) { norm2 += (prob1[i] - prob2[i])*(prob1[i] - prob2[i]); } norm2 = sqrt(norm2); return norm2; } static fp_t get_norm_inf(const fp_t *prob1, const fp_t *prob2, const uint_t size) { fp_t norminf = 0; for (uint_t i = 0; i < size; ++i) { fp_t ninf = abs(prob1[i] - prob2[i]); if (ninf > norminf) norminf = ninf; } return norminf; } }; template<typename qtraits = default_qtraits> struct simulator_v1 { using fp_t = typename qtraits::value_t; using uint_t = typename qtraits::idx_t; using qid = typename qtraits::qidx_t; static void run_circuit(const Circuit<qtraits> &c, fp_t *real, fp_t *imag, uint_t size) { const qid &qn = c.max_qubit; // auto &groupdefs = c.groupdefs; size_t groupdef_iter = 0; for (const Gate<qtraits> &gate : c.gates) { switch (gate.type) { case GateType::RX: state_manipulator<qtraits>::rx(real, imag, size, gate.argument, gate.qubits[0]); break; case GateType::RY: state_manipulator<qtraits>::ry(real, imag, size, gate.argument, gate.qubits[0]); break; case GateType::RZ: state_manipulator<qtraits>::rz(real, imag, size, gate.argument, gate.qubits[0]); break; case GateType::H: state_manipulator<qtraits>::h(real, imag, size, gate.qubits[0]); break; case GateType::CNOT: state_manipulator<qtraits>::cnot(real, imag, size, gate.qubits[0], gate.qubits[1]); break; case GateType::I: break; default: assert(false, "Bad Type."); } } } static uint_t *simulate_N(size_t n, const Circuit<qtraits> &c, RandomEngine *rng) { // first allocate n times memory uint_t size = pow2(c.max_qubit); fp_t *real_n = (fp_t*)malloc(n * size * sizeof(fp_t)); fp_t *imag_n = (fp_t*)malloc(n * size * sizeof(fp_t)); memset(real_n, 0, n * size * sizeof(fp_t)); memset(imag_n, 0, n * size * sizeof(fp_t)); uint_t *result = (uint_t*)malloc(n * sizeof(uint_t)); memset(result, 0, sizeof(uint_t)*n); for (int i = 0; i < n; ++i) { fp_t* real = &real_n[i * size]; fp_t* imag = &imag_n[i * size]; real[0] = 1; run_circuit(c, real, imag, size); fp_t randnum = (fp_t)(*rng)(); fp_t total_prob = 0; for (uint_t j = 0; j < size; ++j) { fp_t pi = real[j] * real[j] + imag[j] * imag[j]; total_prob += pi; if (randnum < total_prob) { result[i] = j; break; } } } free(real_n); free(imag_n); return result; } /* This version will assume the real, imag is refreshed to zero state. and result has enough space. */ static void simulate_N(size_t n, const Circuit<qtraits> &c, RandomEngine** rng, fp_t *real_n, fp_t *imag_n, uint_t *result) { uint_t size = pow2(c.max_qubit); memset(real_n, 0, n * size * sizeof(fp_t)); memset(imag_n, 0, n * size * sizeof(fp_t)); for (int i = 0; i < n; ++i) { fp_t* real = &real_n[i * size]; fp_t* imag = &imag_n[i * size]; real[0] = 1; run_circuit(c, real, imag, size); fp_t randnum = (fp_t)(*rng[i])(); fp_t total_prob = 0; for (uint_t j = 0; j < size; ++j) { fp_t pi = real[j] * real[j] + imag[j] * imag[j]; total_prob += pi; if (randnum < total_prob) { result[i] = j; break; } } } } static uint_t* simulate_N_threads(size_t n, const Circuit<qtraits>& c, RandomEngine** rng) { // first allocate n times memory uint_t size = pow2(c.max_qubit); fp_t* real_n = (fp_t*)malloc(n * size * sizeof(fp_t)); fp_t* imag_n = (fp_t*)malloc(n * size * sizeof(fp_t)); memset(real_n, 0, n * size * sizeof(fp_t)); memset(imag_n, 0, n * size * sizeof(fp_t)); uint_t* result = (uint_t*)malloc(n * sizeof(uint_t)); memset(result, 0, sizeof(uint_t) * n); #pragma omp parallel for for (int i = 0; i < n; ++i) { fp_t* real = &real_n[i * size]; fp_t* imag = &imag_n[i * size]; real[0] = 1; run_circuit(c, real, imag, size); fp_t randnum = (fp_t)(*rng[i])(); fp_t total_prob = 0; for (uint_t j = 0; j < size; ++j) { fp_t pi = real[j] * real[j] + imag[j] * imag[j]; total_prob += pi; if (randnum < total_prob) { result[i] = j; break; } } } free(real_n); free(imag_n); return result; } static void simulate_N_threads(size_t n, const Circuit<qtraits> &c, RandomEngine** rng, fp_t* real_n, fp_t* imag_n, uint_t* result) { uint_t size = pow2(c.max_qubit); memset(real_n, 0, n * size * sizeof(fp_t)); memset(imag_n, 0, n * size * sizeof(fp_t)); #pragma omp parallel for for (int i = 0; i < n; ++i) { fp_t* real = &real_n[i * size]; fp_t* imag = &imag_n[i * size]; real[0] = 1; run_circuit(c, real, imag, size); fp_t randnum = (fp_t)(*rng[i])(); fp_t total_prob = 0; for (uint_t j = 0; j < size; ++j) { fp_t pi = real[j] * real[j] + imag[j] * imag[j]; total_prob += pi; if (randnum < total_prob) { result[i] = j; break; } } } } static void run_real_circuit(const RealCircuit<qtraits> &rc, fp_t *real, fp_t *imag, uint_t size, RandomEngine* rng) { auto &p1 = rc.one_qubit_gate_error; auto &p2 = rc.two_qubit_gate_error; auto &T1 = rc.T1; auto &T2 = rc.T2; auto &one_qubit = rc.one_qubit_gate_time; auto &two_qubit = rc.two_qubit_gate_time; const Circuit<qtraits>& c = rc.real_circuit; const qid &qn = c.max_qubit; /*debug_info_s(Circuit:); debug_print_circuit;*/ for (auto &gate : c.gates) { // incoherent error // for first qubit fp_t *damp0_real = nullptr, *damp0_imag = nullptr, *damp1_real = nullptr, *damp1_imag = nullptr; fp_t *dephase0_real = nullptr, *dephase0_imag = nullptr, *dephase1_real = nullptr, *dephase1_imag = nullptr; // for second qubit fp_t *damp0_real2 = nullptr, *damp0_imag2 = nullptr, *damp1_real2 = nullptr, *damp1_imag2 = nullptr; fp_t *dephase0_real2 = nullptr, *dephase0_imag2 = nullptr, *dephase1_real2 = nullptr, *dephase1_imag2 = nullptr; if (gate.type == GateType::I) { damp0_real = (fp_t*)malloc(4 * sizeof(fp_t)); damp0_imag = (fp_t*)malloc(4 * sizeof(fp_t)); damp1_real = (fp_t*)malloc(4 * sizeof(fp_t)); damp1_imag = (fp_t*)malloc(4 * sizeof(fp_t)); dephase0_real = (fp_t*)malloc(4 * sizeof(fp_t)); dephase0_imag = (fp_t*)malloc(4 * sizeof(fp_t)); dephase1_real = (fp_t*)malloc(4 * sizeof(fp_t)); dephase1_imag = (fp_t*)malloc(4 * sizeof(fp_t)); get_damping_kraus_op( damp0_real, damp0_imag, damp1_real, damp1_imag, T1[gate.qubits[0]], (int)gate.argument); get_dephasing_kraus_op( dephase0_real, dephase0_imag, dephase1_real, dephase1_imag, T1[gate.qubits[0]], T2[gate.qubits[0]], (int)gate.argument); } else if (gate.qubits.size() == 1) { auto &damp = rc.one_qubit_damping_kraus[gate.qubits[0]]; damp0_real = damp.kraus0_real; damp0_imag = damp.kraus0_imag; damp1_real = damp.kraus1_real; damp1_imag = damp.kraus1_imag; auto &dephase = rc.one_qubit_dephasing_kraus[gate.qubits[0]]; dephase0_real = dephase.kraus0_real; dephase0_imag = dephase.kraus0_imag; dephase1_real = dephase.kraus1_real; dephase1_imag = dephase.kraus1_imag; } else if (gate.qubits.size() == 2) { auto &damp = rc.two_qubit_damping_kraus[gate.qubits[0]]; damp0_real = damp.kraus0_real; damp0_imag = damp.kraus0_imag; damp1_real = damp.kraus1_real; damp1_imag = damp.kraus1_imag; auto &dephase = rc.two_qubit_dephasing_kraus[gate.qubits[0]]; dephase0_real = dephase.kraus0_real; dephase0_imag = dephase.kraus0_imag; dephase1_real = dephase.kraus1_real; dephase1_imag = dephase.kraus1_imag; auto &damp2 = rc.two_qubit_damping_kraus[gate.qubits[1]]; damp0_real2 = damp.kraus0_real; damp0_imag2 = damp.kraus0_imag; damp1_real2 = damp.kraus1_real; damp1_imag2 = damp.kraus1_imag; auto &dephase2 = rc.two_qubit_dephasing_kraus[gate.qubits[1]]; dephase0_real2 = dephase.kraus0_real; dephase0_imag2 = dephase.kraus0_imag; dephase1_real2 = dephase.kraus1_real; dephase1_imag2 = dephase.kraus1_imag; } else assert(false, "Bad Gate Number."); double r0 = (*rng)(); double r1 = (*rng)(); /* debug_info_s(Before Kraus); debug_output_state; debug_display(r0); debug_display(r1);*/ state_manipulator<qtraits>::perform_kraus(real, imag, size, damp0_real, damp0_imag, damp1_real, damp1_imag, gate.qubits[0], r0); state_manipulator<qtraits>::perform_kraus(real, imag, size, dephase0_real, dephase0_imag, dephase1_real, dephase1_imag, gate.qubits[0], r1); if (gate.type == GateType::I) { free(damp0_real); free(damp0_imag); free(damp1_real); free(damp1_imag); free(dephase0_real); free(dephase0_imag); free(dephase1_real); free(dephase1_imag); } if (gate.qubits.size() == 2) { double p0 = (*rng)(); double p1 = (*rng)(); state_manipulator<qtraits>::perform_kraus(real, imag, size, damp0_real2, damp0_imag2, damp1_real2, damp1_imag2, gate.qubits[1], p0); state_manipulator<qtraits>::perform_kraus(real, imag, size, dephase0_real2, dephase0_imag2, dephase1_real2, dephase1_imag2, gate.qubits[1], p1); } // coherent error double p_bad_gate = (*rng)(); if (gate.type != GateType::I) { if (gate.qubits.size() == 1) { if (p_bad_gate < p1[gate.qubits[0]]) { goto SkipGate; } } else if (gate.qubits.size() == 2) { if (p_bad_gate < p2[gate.qubits[0]][gate.qubits[1]]) { goto SkipGate; } } else assert(false, "Bad Gate Number."); } /*debug_info_s(After Kraus); debug_output_state;*/ switch (gate.type) { case GateType::RX: state_manipulator<qtraits>::rx(real, imag, size, gate.argument, gate.qubits[0]); break; case GateType::RY: state_manipulator<qtraits>::ry(real, imag, size, gate.argument, gate.qubits[0]); break; case GateType::RZ: state_manipulator<qtraits>::rz(real, imag, size, gate.argument, gate.qubits[0]); break; case GateType::H: state_manipulator<qtraits>::h(real, imag, size, gate.qubits[0]); break; case GateType::CNOT: state_manipulator<qtraits>::cnot(real, imag, size, gate.qubits[0], gate.qubits[1]); break; case GateType::I: break; default: assert(false, "Bad Type."); } SkipGate:; /*debug_info_s(Last); debug_output_state; debug_pause;*/ } } static uint_t *simulate_N_noisy(size_t n, const RealCircuit<qtraits> &c, RandomEngine **rng) { // first allocate n times memory uint_t size = pow2(c.real_circuit.max_qubit); fp_t *real_n = (fp_t*)malloc(n * size * sizeof(fp_t)); fp_t *imag_n = (fp_t*)malloc(n * size * sizeof(fp_t)); memset(real_n, 0, n * size * sizeof(fp_t)); memset(imag_n, 0, n * size * sizeof(fp_t)); uint_t *result = (uint_t*)malloc(n * sizeof(uint_t)); memset(result, 0, sizeof(uint_t)*n); for (int i = 0; i < n; ++i) { fp_t* real = &real_n[i * size]; fp_t* imag = &imag_n[i * size]; real[0] = 1; run_real_circuit(c, real, imag, size, rng[i]); fp_t randnum = (fp_t)(*rng[i])(); fp_t total_prob = 0; for (uint_t j = 0; j < size; ++j) { fp_t pi = real[j] * real[j] + imag[j] * imag[j]; total_prob += pi; if (randnum < total_prob) { result[i] = j; break; } } } free(real_n); free(imag_n); return result; } static void simulate_N_noisy(size_t n, const RealCircuit<qtraits>& c, RandomEngine** rng, fp_t *real_n, fp_t *imag_n, uint_t* result) { // first allocate n times memory uint_t size = pow2(c.real_circuit.max_qubit); memset(real_n, 0, n * size * sizeof(fp_t)); memset(imag_n, 0, n * size * sizeof(fp_t)); for (int i = 0; i < n; ++i) { fp_t* real = &real_n[i * size]; fp_t* imag = &imag_n[i * size]; real[0] = 1; run_real_circuit(c, real, imag, size, rng[i]); fp_t randnum = (fp_t)(*rng[i])(); fp_t total_prob = 0; for (uint_t j = 0; j < size; ++j) { fp_t pi = real[j] * real[j] + imag[j] * imag[j]; total_prob += pi; if (randnum < total_prob) { result[i] = j; break; } } } } static uint_t *simulate_N_threads_noisy(size_t n, const RealCircuit<qtraits> &c, RandomEngine **rng) { // first allocate n times memory uint_t size = pow2(c.real_circuit.max_qubit); fp_t *real_n = (fp_t*)malloc(n * size * sizeof(fp_t)); fp_t *imag_n = (fp_t*)malloc(n * size * sizeof(fp_t)); memset(real_n, 0, n * size * sizeof(fp_t)); memset(imag_n, 0, n * size * sizeof(fp_t)); uint_t *result = (uint_t*)malloc(n * sizeof(uint_t)); // memset(result, 0, sizeof(uint_t)*n); #pragma omp parallel for for (int i = 0; i < n; ++i) { fp_t* real = &real_n[i * size]; fp_t* imag = &imag_n[i * size]; real[0] = 1; run_real_circuit(c, real, imag, size, rng[i]); fp_t randnum = (fp_t)(*rng[i])(); fp_t total_prob = 0; for (uint_t j = 0; j < size; ++j) { fp_t pi = real[j] * real[j] + imag[j] * imag[j]; total_prob += pi; if (randnum < total_prob) { result[i] = j; break; } } } free(real_n); free(imag_n); return result; } static void simulate_N_threads_noisy(size_t n, const RealCircuit<qtraits>& c, RandomEngine** rng, fp_t* real_n, fp_t* imag_n, uint_t *result) { // first allocate n times memory uint_t size = pow2(c.real_circuit.max_qubit); memset(real_n, 0, n * size * sizeof(fp_t)); memset(imag_n, 0, n * size * sizeof(fp_t)); #pragma omp parallel for for (int i = 0; i < n; ++i) { fp_t* real = &real_n[i * size]; fp_t* imag = &imag_n[i * size]; real[0] = 1; run_real_circuit(c, real, imag, size, rng[i]); fp_t randnum = (fp_t)(*rng[i])(); fp_t total_prob = 0; for (uint_t j = 0; j < size; ++j) { fp_t pi = real[j] * real[j] + imag[j] * imag[j]; total_prob += pi; if (randnum < total_prob) { result[i] = j; break; } } } } }; /* simulator modes */ constexpr bool noisy = true; constexpr bool noise_free = false; constexpr bool single_thread = false; constexpr bool multi_threads = true; ns_end
symm_x_coo_n_hi_row.c
#include "alphasparse/kernel.h" #include "alphasparse/util.h" #include <alphasparse.h> #define CACHELINE 64 alphasparse_status_t ONAME(const ALPHA_Number alpha, const ALPHA_SPMAT_COO *mat, const ALPHA_Number *x, const ALPHA_INT columns, const ALPHA_INT ldx, const ALPHA_Number beta, ALPHA_Number *y, const ALPHA_INT ldy) { ALPHA_INT m = mat->rows; ALPHA_INT n = columns; ALPHA_INT num_threads = alpha_get_thread_num(); #ifdef _OPENMP #pragma omp parallel for num_threads(num_threads) #endif for (ALPHA_INT i = 0; i < mat->rows; i++) for (ALPHA_INT j = 0; j < columns; j++) { alpha_mul(y[i * ldy + j], y[i * ldy + j], beta); } ALPHA_INT block_size = CACHELINE / sizeof(ALPHA_Number); ALPHA_INT block_num = (columns + block_size - 1) / block_size; if (num_threads > block_num) num_threads = block_num; #ifdef _OPENMP #pragma omp parallel num_threads(num_threads) #endif { ALPHA_INT tid = alpha_get_thread_id(); ALPHA_INT bcl = cross_block_low(tid, num_threads, block_num) * block_size; ALPHA_INT bch = cross_block_high(tid, num_threads, block_num) * block_size; if (bch > columns) bch = columns; for (ALPHA_INT ai = 0; ai < mat->nnz; ai++) { ALPHA_INT ac = mat->col_indx[ai]; ALPHA_INT r = mat->row_indx[ai]; if (ac > r) { ALPHA_Number val; alpha_mul(val, alpha, mat->values[ai]); for (ALPHA_INT c = bcl; c < bch; ++c) alpha_madde(y[index2(r, c, ldy)], val, x[index2(ac, c, ldx)]); for (ALPHA_INT c = bcl; c < bch; ++c) alpha_madde(y[index2(ac, c, ldy)], val, x[index2(r, c, ldx)]); } else if (ac == r) { ALPHA_Number val; alpha_mul(val, alpha, mat->values[ai]); for (ALPHA_INT c = bcl; c < bch; ++c) { alpha_madde(y[index2(r, c, ldy)], val, x[index2(ac, c, ldx)]); } } } } return ALPHA_SPARSE_STATUS_SUCCESS; }
GB_unaryop__abs_bool_int16.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__abs_bool_int16 // op(A') function: GB_tran__abs_bool_int16 // C type: bool // A type: int16_t // cast: bool cij = (bool) aij // unaryop: cij = aij #define GB_ATYPE \ int16_t #define GB_CTYPE \ bool // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int16_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CASTING(z, aij) \ bool z = (bool) aij ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (z, aij) ; \ GB_OP (GB_CX (pC), z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ABS || GxB_NO_BOOL || GxB_NO_INT16) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__abs_bool_int16 ( bool *Cx, // Cx and Ax may be aliased int16_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__abs_bool_int16 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
convolution_3x3_pack8to1_int8.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2022 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void conv3x3s1_winograd42_transform_kernel_pack8to1_int8_sse(const Mat& kernel, Mat& kernel_tm_pack8to1, int inch, int outch, const Option& opt) { #if NCNN_AVX512VNNI && __AVX512F__ && !__AVX512VNNI__ if (ncnn::cpu_support_x86_avx512_vnni()) { extern void conv3x3s1_winograd42_transform_kernel_pack8to1_int8_sse_avx512vnni(const Mat& kernel, Mat& kernel_tm_pack8to1, int inch, int outch, const Option& opt); conv3x3s1_winograd42_transform_kernel_pack8to1_int8_sse_avx512vnni(kernel, kernel_tm_pack8to1, inch, outch, opt); return; } #endif #if NCNN_AVXVNNI && __AVX2__ && !__AVXVNNI__ if (ncnn::cpu_support_x86_avx_vnni()) { extern void conv3x3s1_winograd42_transform_kernel_pack8to1_int8_sse_avxvnni(const Mat& kernel, Mat& kernel_tm_pack8to1, int inch, int outch, const Option& opt); conv3x3s1_winograd42_transform_kernel_pack8to1_int8_sse_avxvnni(kernel, kernel_tm_pack8to1, inch, outch, opt); return; } #endif #if NCNN_XOP && __SSE2__ && !__XOP__ if (ncnn::cpu_support_x86_xop()) { extern void conv3x3s1_winograd42_transform_kernel_pack8to1_int8_sse_xop(const Mat& kernel, Mat& kernel_tm_pack8to1, int inch, int outch, const Option& opt); conv3x3s1_winograd42_transform_kernel_pack8to1_int8_sse_xop(kernel, kernel_tm_pack8to1, inch, outch, opt); return; } #endif // winograd42 transform kernel Mat kernel_tm(6 * 6, inch, outch, (size_t)2u); const short ktm[6][3] = { {6, 0, 0}, {-4, -4, -4}, {-4, 4, -4}, {1, 2, 4}, {1, -2, 4}, {0, 0, 6} }; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { for (int q = 0; q < inch; q++) { const signed char* kernel0 = (const signed char*)kernel + p * inch * 9 + q * 9; short* kernel_tm0 = kernel_tm.channel(p).row<short>(q); // transform kernel const signed char* k0 = kernel0; const signed char* k1 = kernel0 + 3; const signed char* k2 = kernel0 + 6; // h short tmp[6][3]; for (int i = 0; i < 6; i++) { tmp[i][0] = k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2]; tmp[i][1] = k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2]; tmp[i][2] = k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2]; } // U for (int j = 0; j < 6; j++) { short* tmpp = &tmp[j][0]; for (int i = 0; i < 6; i++) { kernel_tm0[j * 6 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2]; } } } } // interleave // src = 36-inch-outch // dst = 4b-8a-inch/8a-36-outch/4b kernel_tm_pack8to1.create(8 * inch / 8, 36, outch / 4 + outch % 4, (size_t)2u * 4, 4); int p = 0; for (; p + 3 < outch; p += 4) { const Mat k0 = kernel_tm.channel(p); const Mat k1 = kernel_tm.channel(p + 1); const Mat k2 = kernel_tm.channel(p + 2); const Mat k3 = kernel_tm.channel(p + 3); Mat g0 = kernel_tm_pack8to1.channel(p / 4); for (int k = 0; k < 36; k++) { short* g00 = g0.row<short>(k); for (int q = 0; q + 7 < inch; q += 8) { #if __AVXVNNI__ || __AVX512VNNI__ || __XOP__ for (int i = 0; i < 4; i++) { const short* k00 = k0.row<const short>(q + i * 2); const short* k10 = k1.row<const short>(q + i * 2); const short* k20 = k2.row<const short>(q + i * 2); const short* k30 = k3.row<const short>(q + i * 2); const short* k01 = k0.row<const short>(q + i * 2 + 1); const short* k11 = k1.row<const short>(q + i * 2 + 1); const short* k21 = k2.row<const short>(q + i * 2 + 1); const short* k31 = k3.row<const short>(q + i * 2 + 1); g00[0] = k00[k]; g00[1] = k01[k]; g00[2] = k10[k]; g00[3] = k11[k]; g00[4] = k20[k]; g00[5] = k21[k]; g00[6] = k30[k]; g00[7] = k31[k]; g00 += 8; } #else for (int i = 0; i < 8; i++) { g00[0] = k0.row<const short>(q + i)[k]; g00[1] = k1.row<const short>(q + i)[k]; g00[2] = k2.row<const short>(q + i)[k]; g00[3] = k3.row<const short>(q + i)[k]; g00 += 4; } #endif } } } for (; p < outch; p++) { const Mat k0 = kernel_tm.channel(p); Mat g0 = kernel_tm_pack8to1.channel(p / 4 + p % 4); for (int k = 0; k < 36; k++) { short* g00 = g0.row<short>(k); for (int q = 0; q + 7 < inch; q += 8) { for (int i = 0; i < 8; i++) { g00[0] = k0.row<const short>(q + i)[k]; g00 += 1; } } } } } static void conv3x3s1_winograd42_pack8to1_int8_sse(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel_tm, const Option& opt) { #if NCNN_AVX512VNNI && __AVX512F__ && !__AVX512VNNI__ if (ncnn::cpu_support_x86_avx512_vnni()) { extern void conv3x3s1_winograd42_pack8to1_int8_sse_avx512vnni(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Option& opt); conv3x3s1_winograd42_pack8to1_int8_sse_avx512vnni(bottom_blob, top_blob, kernel_tm, opt); return; } #endif #if NCNN_AVXVNNI && __AVX2__ && !__AVXVNNI__ if (ncnn::cpu_support_x86_avx_vnni()) { extern void conv3x3s1_winograd42_pack8to1_int8_sse_avxvnni(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Option& opt); conv3x3s1_winograd42_pack8to1_int8_sse_avxvnni(bottom_blob, top_blob, kernel_tm, opt); return; } #endif #if NCNN_XOP && __SSE2__ && !__XOP__ if (ncnn::cpu_support_x86_xop()) { extern void conv3x3s1_winograd42_pack8to1_int8_sse_xop(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Option& opt); conv3x3s1_winograd42_pack8to1_int8_sse_xop(bottom_blob, top_blob, kernel_tm, opt); return; } #endif int w = bottom_blob.w; int h = bottom_blob.h; int inch = bottom_blob.c; // size_t elemsize = bottom_blob.elemsize; int elempack = bottom_blob.elempack; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; // pad to 4n+2 Mat bottom_blob_bordered = bottom_blob; outw = (outw + 3) / 4 * 4; outh = (outh + 3) / 4 * 4; w = outw + 2; h = outh + 2; copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, BORDER_CONSTANT, 0.f, opt); // BEGIN transform input Mat bottom_blob_tm; { int w_tm = outw / 4 * 6; int h_tm = outh / 4 * 6; const int tiles = w_tm / 6 * h_tm / 6; bottom_blob_tm.create(tiles, 36, inch, 2u * elempack, elempack, opt.workspace_allocator); // const float itm[4][4] = { // {4.0f, 0.0f, -5.0f, 0.0f, 1.0f, 0.0f}, // {0.0f,-4.0f, -4.0f, 1.0f, 1.0f, 0.0f}, // {0.0f, 4.0f, -4.0f,-1.0f, 1.0f, 0.0f}, // {0.0f,-2.0f, -1.0f, 2.0f, 1.0f, 0.0f}, // {0.0f, 2.0f, -1.0f,-2.0f, 1.0f, 0.0f}, // {0.0f, 4.0f, 0.0f,-5.0f, 0.0f, 1.0f} // }; // 0 = 4 * r00 - 5 * r02 + r04 // 1 = -4 * (r01 + r02) + r04 + r03 // 2 = 4 * (r01 - r02) + r04 - r03 // 3 = -2 * (r01 - r03) + r04 - r02 // 4 = 2 * (r01 - r03) + r04 - r02 // 5 = 4 * r01 - 5 * r03 + r05 #pragma omp parallel for num_threads(opt.num_threads) for (int q = 0; q < inch; q++) { const Mat img0 = bottom_blob_bordered.channel(q); Mat img0_tm = bottom_blob_tm.channel(q); short tmp[6][6][8]; // tile for (int i = 0; i < h_tm / 6; i++) { for (int j = 0; j < w_tm / 6; j++) { const signed char* r0 = img0.row<const signed char>(i * 4) + (j * 4) * 8; for (int m = 0; m < 6; m++) { // TODO use _mm_cvtepi8_epi16 on sse4.1 __m128i _r00_01 = _mm_loadu_si128((const __m128i*)r0); __m128i _r02_03 = _mm_loadu_si128((const __m128i*)(r0 + 16)); __m128i _r04_05 = _mm_loadu_si128((const __m128i*)(r0 + 32)); __m128i _extr0001 = _mm_cmpgt_epi8(_mm_setzero_si128(), _r00_01); __m128i _extr0203 = _mm_cmpgt_epi8(_mm_setzero_si128(), _r02_03); __m128i _extr0405 = _mm_cmpgt_epi8(_mm_setzero_si128(), _r04_05); __m128i _r00 = _mm_unpacklo_epi8(_r00_01, _extr0001); __m128i _r01 = _mm_unpackhi_epi8(_r00_01, _extr0001); __m128i _r02 = _mm_unpacklo_epi8(_r02_03, _extr0203); __m128i _r03 = _mm_unpackhi_epi8(_r02_03, _extr0203); __m128i _r04 = _mm_unpacklo_epi8(_r04_05, _extr0405); __m128i _r05 = _mm_unpackhi_epi8(_r04_05, _extr0405); __m128i _v5 = _mm_set1_epi16(5); __m128i _tmp0m = _mm_sub_epi16(_mm_add_epi16(_mm_slli_epi16(_r00, 2), _r04), _mm_mullo_epi16(_r02, _v5)); __m128i _tmp1m = _mm_sub_epi16(_mm_add_epi16(_r04, _r03), _mm_slli_epi16(_mm_add_epi16(_r01, _r02), 2)); __m128i _tmp2m = _mm_add_epi16(_mm_sub_epi16(_r04, _r03), _mm_slli_epi16(_mm_sub_epi16(_r01, _r02), 2)); __m128i _tmp3m = _mm_sub_epi16(_mm_sub_epi16(_r04, _r02), _mm_slli_epi16(_mm_sub_epi16(_r01, _r03), 1)); __m128i _tmp4m = _mm_add_epi16(_mm_sub_epi16(_r04, _r02), _mm_slli_epi16(_mm_sub_epi16(_r01, _r03), 1)); __m128i _tmp5m = _mm_sub_epi16(_mm_add_epi16(_mm_slli_epi16(_r01, 2), _r05), _mm_mullo_epi16(_r03, _v5)); _mm_storeu_si128((__m128i*)tmp[0][m], _tmp0m); _mm_storeu_si128((__m128i*)tmp[1][m], _tmp1m); _mm_storeu_si128((__m128i*)tmp[2][m], _tmp2m); _mm_storeu_si128((__m128i*)tmp[3][m], _tmp3m); _mm_storeu_si128((__m128i*)tmp[4][m], _tmp4m); _mm_storeu_si128((__m128i*)tmp[5][m], _tmp5m); r0 += w * 8; } short* r0_tm_0 = (short*)img0_tm + (i * w_tm / 6 + j) * 8; short* r0_tm_1 = r0_tm_0 + tiles * 8; short* r0_tm_2 = r0_tm_0 + tiles * 16; short* r0_tm_3 = r0_tm_0 + tiles * 24; short* r0_tm_4 = r0_tm_0 + tiles * 32; short* r0_tm_5 = r0_tm_0 + tiles * 40; for (int m = 0; m < 6; m++) { __m128i _tmp00 = _mm_loadu_si128((const __m128i*)tmp[m][0]); __m128i _tmp01 = _mm_loadu_si128((const __m128i*)tmp[m][1]); __m128i _tmp02 = _mm_loadu_si128((const __m128i*)tmp[m][2]); __m128i _tmp03 = _mm_loadu_si128((const __m128i*)tmp[m][3]); __m128i _tmp04 = _mm_loadu_si128((const __m128i*)tmp[m][4]); __m128i _tmp05 = _mm_loadu_si128((const __m128i*)tmp[m][5]); __m128i _v5 = _mm_set1_epi16(5); __m128i _r0tm0 = _mm_sub_epi16(_mm_add_epi16(_mm_slli_epi16(_tmp00, 2), _tmp04), _mm_mullo_epi16(_tmp02, _v5)); __m128i _r0tm1 = _mm_sub_epi16(_mm_add_epi16(_tmp04, _tmp03), _mm_slli_epi16(_mm_add_epi16(_tmp01, _tmp02), 2)); __m128i _r0tm2 = _mm_add_epi16(_mm_sub_epi16(_tmp04, _tmp03), _mm_slli_epi16(_mm_sub_epi16(_tmp01, _tmp02), 2)); __m128i _r0tm3 = _mm_sub_epi16(_mm_sub_epi16(_tmp04, _tmp02), _mm_slli_epi16(_mm_sub_epi16(_tmp01, _tmp03), 1)); __m128i _r0tm4 = _mm_add_epi16(_mm_sub_epi16(_tmp04, _tmp02), _mm_slli_epi16(_mm_sub_epi16(_tmp01, _tmp03), 1)); __m128i _r0tm5 = _mm_sub_epi16(_mm_add_epi16(_mm_slli_epi16(_tmp01, 2), _tmp05), _mm_mullo_epi16(_tmp03, _v5)); _mm_storeu_si128((__m128i*)r0_tm_0, _r0tm0); _mm_storeu_si128((__m128i*)r0_tm_1, _r0tm1); _mm_storeu_si128((__m128i*)r0_tm_2, _r0tm2); _mm_storeu_si128((__m128i*)r0_tm_3, _r0tm3); _mm_storeu_si128((__m128i*)r0_tm_4, _r0tm4); _mm_storeu_si128((__m128i*)r0_tm_5, _r0tm5); r0_tm_0 += tiles * 48; r0_tm_1 += tiles * 48; r0_tm_2 += tiles * 48; r0_tm_3 += tiles * 48; r0_tm_4 += tiles * 48; r0_tm_5 += tiles * 48; } } } } } bottom_blob_bordered = Mat(); // END transform input // BEGIN dot Mat top_blob_tm; { int w_tm = outw / 4 * 6; int h_tm = outh / 4 * 6; const int tiles = h_tm / 6 * w_tm / 6; // permute // bottom_blob_tm.create(tiles, 36, inch, elemsize, elempack, opt.workspace_allocator); Mat bottom_blob_tm2; #if __AVX2__ if (tiles >= 4) bottom_blob_tm2.create(4 * inch, tiles / 4 + (tiles % 4) / 2 + tiles % 2, 36, 2u * elempack, elempack, opt.workspace_allocator); else if (tiles >= 2) bottom_blob_tm2.create(2 * inch, tiles / 2 + tiles % 2, 36, 2u * elempack, elempack, opt.workspace_allocator); else // if (tiles >= 1) bottom_blob_tm2.create(1 * inch, tiles, 36, 2u * elempack, elempack, opt.workspace_allocator); #else if (tiles >= 2) bottom_blob_tm2.create(2 * inch, tiles / 2 + tiles % 2, 36, 2u * elempack, elempack, opt.workspace_allocator); else // if (tiles >= 1) bottom_blob_tm2.create(1 * inch, tiles, 36, 2u * elempack, elempack, opt.workspace_allocator); #endif #pragma omp parallel for num_threads(opt.num_threads) for (int r = 0; r < 36; r++) { Mat tm2 = bottom_blob_tm2.channel(r); // tile int i = 0; #if __AVX2__ for (; i + 3 < tiles; i += 4) { short* tmpptr = tm2.row<short>(i / 4); const short* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 8; for (int q = 0; q < inch; q++) { __m256i _r0 = _mm256_loadu_si256((const __m256i*)r0); __m256i _r1 = _mm256_loadu_si256((const __m256i*)(r0 + 16)); _mm256_storeu_si256((__m256i*)tmpptr, _r0); _mm256_storeu_si256((__m256i*)(tmpptr + 16), _r1); r0 += bottom_blob_tm.cstep * 8; tmpptr += 32; } } #endif for (; i + 1 < tiles; i += 2) { #if __AVX2__ short* tmpptr = tm2.row<short>(i / 4 + (i % 4) / 2); #else short* tmpptr = tm2.row<short>(i / 2); #endif const short* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 8; for (int q = 0; q < inch; q++) { __m128i _r0 = _mm_loadu_si128((const __m128i*)r0); __m128i _r1 = _mm_loadu_si128((const __m128i*)(r0 + 8)); _mm_storeu_si128((__m128i*)tmpptr, _r0); _mm_storeu_si128((__m128i*)(tmpptr + 8), _r1); r0 += bottom_blob_tm.cstep * 8; tmpptr += 16; } } for (; i < tiles; i++) { #if __AVX2__ short* tmpptr = tm2.row<short>(i / 4 + (i % 4) / 2 + i % 2); #else short* tmpptr = tm2.row<short>(i / 2 + i % 2); #endif const short* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 8; for (int q = 0; q < inch; q++) { __m128i _r0 = _mm_loadu_si128((const __m128i*)r0); _mm_storeu_si128((__m128i*)tmpptr, _r0); r0 += bottom_blob_tm.cstep * 8; tmpptr += 8; } } } bottom_blob_tm = Mat(); // permute end top_blob_tm.create(tiles, 36, outch, 4u, 1, opt.workspace_allocator); int nn_outch = 0; int remain_outch_start = 0; nn_outch = outch >> 2; #pragma omp parallel for num_threads(opt.num_threads) for (int pp = 0; pp < nn_outch; pp++) { int p = pp * 4; int* output0_tm = top_blob_tm.channel(p); int* output1_tm = top_blob_tm.channel(p + 1); int* output2_tm = top_blob_tm.channel(p + 2); int* output3_tm = top_blob_tm.channel(p + 3); const Mat kernel0_tm = kernel_tm.channel(p / 4); for (int r = 0; r < 36; r++) { const Mat bb2 = bottom_blob_tm2.channel(r); int i = 0; #if __AVX2__ for (; i + 3 < tiles; i += 4) { const short* r0 = bb2.row<const short>(i / 4); const short* k0 = kernel0_tm.row<const short>(r); int nn = inch; // inch always > 0 __m256i _sum0_1 = _mm256_setzero_si256(); __m256i _sum2_3 = _mm256_setzero_si256(); __m256i _sum4_5 = _mm256_setzero_si256(); __m256i _sum6_7 = _mm256_setzero_si256(); for (int j = 0; j < nn; j++) { // 0 1 2 3 4 5 6 7 8 9 a b c d e f __m256i _val0 = _mm256_loadu_si256((const __m256i*)r0); __m256i _w01 = _mm256_loadu_si256((const __m256i*)k0); __m256i _w23 = _mm256_loadu_si256((const __m256i*)(k0 + 16)); #if __AVXVNNI__ || __AVX512VNNI__ __m256i _val0_0123 = _mm256_permutevar8x32_epi32(_val0, _mm256_set_epi32(1, 1, 1, 1, 0, 0, 0, 0)); __m256i _val0_4567 = _mm256_permutevar8x32_epi32(_val0, _mm256_set_epi32(3, 3, 3, 3, 2, 2, 2, 2)); __m256i _val0_89ab = _mm256_permutevar8x32_epi32(_val0, _mm256_set_epi32(5, 5, 5, 5, 4, 4, 4, 4)); __m256i _val0_cdef = _mm256_permutevar8x32_epi32(_val0, _mm256_set_epi32(7, 7, 7, 7, 6, 6, 6, 6)); _sum0_1 = _mm256_dpwssd_epi32(_sum0_1, _w01, _val0_0123); _sum2_3 = _mm256_dpwssd_epi32(_sum2_3, _w01, _val0_89ab); _sum0_1 = _mm256_dpwssd_epi32(_sum0_1, _w23, _val0_4567); _sum2_3 = _mm256_dpwssd_epi32(_sum2_3, _w23, _val0_cdef); #else // 0 0 1 1 2 2 3 3 8 8 9 9 a a b b // 4 4 5 5 6 6 7 7 c c d d e e f f __m256i _val0_0123_89ab = _mm256_unpacklo_epi16(_val0, _val0); __m256i _val0_4567_cdef = _mm256_unpackhi_epi16(_val0, _val0); __m256i _val0_0123 = _mm256_permutevar8x32_epi32(_val0_0123_89ab, _mm256_set_epi32(3, 3, 2, 2, 1, 1, 0, 0)); __m256i _val0_4567 = _mm256_permutevar8x32_epi32(_val0_4567_cdef, _mm256_set_epi32(3, 3, 2, 2, 1, 1, 0, 0)); __m256i _val0_89ab = _mm256_permutevar8x32_epi32(_val0_0123_89ab, _mm256_set_epi32(7, 7, 6, 6, 5, 5, 4, 4)); __m256i _val0_cdef = _mm256_permutevar8x32_epi32(_val0_4567_cdef, _mm256_set_epi32(7, 7, 6, 6, 5, 5, 4, 4)); __m256i _sl00_01 = _mm256_mullo_epi16(_w01, _val0_0123); __m256i _sh00_01 = _mm256_mulhi_epi16(_w01, _val0_0123); __m256i _sl10_11 = _mm256_mullo_epi16(_w01, _val0_89ab); __m256i _sh10_11 = _mm256_mulhi_epi16(_w01, _val0_89ab); __m256i _sl02_03 = _mm256_mullo_epi16(_w23, _val0_4567); __m256i _sh02_03 = _mm256_mulhi_epi16(_w23, _val0_4567); __m256i _sl12_13 = _mm256_mullo_epi16(_w23, _val0_cdef); __m256i _sh12_13 = _mm256_mulhi_epi16(_w23, _val0_cdef); _sum0_1 = _mm256_add_epi32(_sum0_1, _mm256_unpacklo_epi16(_sl00_01, _sh00_01)); _sum2_3 = _mm256_add_epi32(_sum2_3, _mm256_unpacklo_epi16(_sl10_11, _sh10_11)); _sum0_1 = _mm256_add_epi32(_sum0_1, _mm256_unpacklo_epi16(_sl02_03, _sh02_03)); _sum2_3 = _mm256_add_epi32(_sum2_3, _mm256_unpacklo_epi16(_sl12_13, _sh12_13)); _sum0_1 = _mm256_add_epi32(_sum0_1, _mm256_unpackhi_epi16(_sl00_01, _sh00_01)); _sum2_3 = _mm256_add_epi32(_sum2_3, _mm256_unpackhi_epi16(_sl10_11, _sh10_11)); _sum0_1 = _mm256_add_epi32(_sum0_1, _mm256_unpackhi_epi16(_sl02_03, _sh02_03)); _sum2_3 = _mm256_add_epi32(_sum2_3, _mm256_unpackhi_epi16(_sl12_13, _sh12_13)); #endif __m256i _val1 = _mm256_loadu_si256((const __m256i*)(r0 + 16)); #if __AVXVNNI__ || __AVX512VNNI__ __m256i _val1_0123 = _mm256_permutevar8x32_epi32(_val1, _mm256_set_epi32(1, 1, 1, 1, 0, 0, 0, 0)); __m256i _val1_4567 = _mm256_permutevar8x32_epi32(_val1, _mm256_set_epi32(3, 3, 3, 3, 2, 2, 2, 2)); __m256i _val1_89ab = _mm256_permutevar8x32_epi32(_val1, _mm256_set_epi32(5, 5, 5, 5, 4, 4, 4, 4)); __m256i _val1_cdef = _mm256_permutevar8x32_epi32(_val1, _mm256_set_epi32(7, 7, 7, 7, 6, 6, 6, 6)); _sum4_5 = _mm256_dpwssd_epi32(_sum4_5, _w01, _val1_0123); _sum6_7 = _mm256_dpwssd_epi32(_sum6_7, _w01, _val1_89ab); _sum4_5 = _mm256_dpwssd_epi32(_sum4_5, _w23, _val1_4567); _sum6_7 = _mm256_dpwssd_epi32(_sum6_7, _w23, _val1_cdef); #else __m256i _val1_0123_89ab = _mm256_unpacklo_epi16(_val1, _val1); __m256i _val1_4567_cdef = _mm256_unpackhi_epi16(_val1, _val1); __m256i _val1_0123 = _mm256_permutevar8x32_epi32(_val1_0123_89ab, _mm256_set_epi32(3, 3, 2, 2, 1, 1, 0, 0)); __m256i _val1_4567 = _mm256_permutevar8x32_epi32(_val1_4567_cdef, _mm256_set_epi32(3, 3, 2, 2, 1, 1, 0, 0)); __m256i _val1_89ab = _mm256_permutevar8x32_epi32(_val1_0123_89ab, _mm256_set_epi32(7, 7, 6, 6, 5, 5, 4, 4)); __m256i _val1_cdef = _mm256_permutevar8x32_epi32(_val1_4567_cdef, _mm256_set_epi32(7, 7, 6, 6, 5, 5, 4, 4)); __m256i _sl04_05 = _mm256_mullo_epi16(_w01, _val1_0123); __m256i _sh04_05 = _mm256_mulhi_epi16(_w01, _val1_0123); __m256i _sl14_15 = _mm256_mullo_epi16(_w01, _val1_89ab); __m256i _sh14_15 = _mm256_mulhi_epi16(_w01, _val1_89ab); __m256i _sl06_07 = _mm256_mullo_epi16(_w23, _val1_4567); __m256i _sh06_07 = _mm256_mulhi_epi16(_w23, _val1_4567); __m256i _sl16_17 = _mm256_mullo_epi16(_w23, _val1_cdef); __m256i _sh16_17 = _mm256_mulhi_epi16(_w23, _val1_cdef); _sum4_5 = _mm256_add_epi32(_sum4_5, _mm256_unpacklo_epi16(_sl04_05, _sh04_05)); _sum6_7 = _mm256_add_epi32(_sum6_7, _mm256_unpacklo_epi16(_sl14_15, _sh14_15)); _sum4_5 = _mm256_add_epi32(_sum4_5, _mm256_unpacklo_epi16(_sl06_07, _sh06_07)); _sum6_7 = _mm256_add_epi32(_sum6_7, _mm256_unpacklo_epi16(_sl16_17, _sh16_17)); _sum4_5 = _mm256_add_epi32(_sum4_5, _mm256_unpackhi_epi16(_sl04_05, _sh04_05)); _sum6_7 = _mm256_add_epi32(_sum6_7, _mm256_unpackhi_epi16(_sl14_15, _sh14_15)); _sum4_5 = _mm256_add_epi32(_sum4_5, _mm256_unpackhi_epi16(_sl06_07, _sh06_07)); _sum6_7 = _mm256_add_epi32(_sum6_7, _mm256_unpackhi_epi16(_sl16_17, _sh16_17)); #endif r0 += 32; k0 += 32; } __m256i _sum0_2 = _mm256_permute2x128_si256(_sum0_1, _sum2_3, _MM_SHUFFLE(0, 2, 0, 0)); __m256i _sum1_3 = _mm256_permute2x128_si256(_sum0_1, _sum2_3, _MM_SHUFFLE(0, 3, 0, 1)); _sum0_2 = _mm256_add_epi32(_sum0_2, _sum1_3); __m256i _sum4_6 = _mm256_permute2x128_si256(_sum4_5, _sum6_7, _MM_SHUFFLE(0, 2, 0, 0)); __m256i _sum5_7 = _mm256_permute2x128_si256(_sum4_5, _sum6_7, _MM_SHUFFLE(0, 3, 0, 1)); _sum4_6 = _mm256_add_epi32(_sum4_6, _sum5_7); int sum[16]; _mm256_storeu_si256((__m256i*)sum, _sum0_2); _mm256_storeu_si256((__m256i*)(sum + 8), _sum4_6); output0_tm[0] = sum[0]; output1_tm[0] = sum[1]; output2_tm[0] = sum[2]; output3_tm[0] = sum[3]; output0_tm[1] = sum[4]; output1_tm[1] = sum[5]; output2_tm[1] = sum[6]; output3_tm[1] = sum[7]; output0_tm[2] = sum[8]; output1_tm[2] = sum[9]; output2_tm[2] = sum[10]; output3_tm[2] = sum[11]; output0_tm[3] = sum[12]; output1_tm[3] = sum[13]; output2_tm[3] = sum[14]; output3_tm[3] = sum[15]; output0_tm += 4; output1_tm += 4; output2_tm += 4; output3_tm += 4; } #endif for (; i + 1 < tiles; i += 2) { #if __AVX2__ const short* r0 = bb2.row<const short>(i / 4 + (i % 4) / 2); #else const short* r0 = bb2.row<const short>(i / 2); #endif const short* k0 = kernel0_tm.row<const short>(r); int nn = inch; // inch always > 0 #if __AVX2__ __m256i _sum0_1 = _mm256_setzero_si256(); __m256i _sum2_3 = _mm256_setzero_si256(); #else __m128i _sum0 = _mm_setzero_si128(); __m128i _sum1 = _mm_setzero_si128(); __m128i _sum2 = _mm_setzero_si128(); __m128i _sum3 = _mm_setzero_si128(); #endif for (int j = 0; j < nn; j++) { #if __AVX2__ // 0 1 2 3 4 5 6 7 8 9 a b c d e f __m256i _val = _mm256_loadu_si256((const __m256i*)r0); __m256i _w01 = _mm256_loadu_si256((const __m256i*)k0); __m256i _w23 = _mm256_loadu_si256((const __m256i*)(k0 + 16)); #if __AVXVNNI__ || __AVX512VNNI__ __m256i _val_0123 = _mm256_permutevar8x32_epi32(_val, _mm256_set_epi32(1, 1, 1, 1, 0, 0, 0, 0)); __m256i _val_4567 = _mm256_permutevar8x32_epi32(_val, _mm256_set_epi32(3, 3, 3, 3, 2, 2, 2, 2)); __m256i _val_89ab = _mm256_permutevar8x32_epi32(_val, _mm256_set_epi32(5, 5, 5, 5, 4, 4, 4, 4)); __m256i _val_cdef = _mm256_permutevar8x32_epi32(_val, _mm256_set_epi32(7, 7, 7, 7, 6, 6, 6, 6)); _sum0_1 = _mm256_dpwssd_epi32(_sum0_1, _w01, _val_0123); _sum2_3 = _mm256_dpwssd_epi32(_sum2_3, _w01, _val_89ab); _sum0_1 = _mm256_dpwssd_epi32(_sum0_1, _w23, _val_4567); _sum2_3 = _mm256_dpwssd_epi32(_sum2_3, _w23, _val_cdef); #else __m256i _val_0123_89ab = _mm256_unpacklo_epi16(_val, _val); __m256i _val_4567_cdef = _mm256_unpackhi_epi16(_val, _val); __m256i _val_0123 = _mm256_permutevar8x32_epi32(_val_0123_89ab, _mm256_set_epi32(3, 3, 2, 2, 1, 1, 0, 0)); __m256i _val_4567 = _mm256_permutevar8x32_epi32(_val_4567_cdef, _mm256_set_epi32(3, 3, 2, 2, 1, 1, 0, 0)); __m256i _val_89ab = _mm256_permutevar8x32_epi32(_val_0123_89ab, _mm256_set_epi32(7, 7, 6, 6, 5, 5, 4, 4)); __m256i _val_cdef = _mm256_permutevar8x32_epi32(_val_4567_cdef, _mm256_set_epi32(7, 7, 6, 6, 5, 5, 4, 4)); __m256i _sl00_01 = _mm256_mullo_epi16(_w01, _val_0123); __m256i _sh00_01 = _mm256_mulhi_epi16(_w01, _val_0123); __m256i _sl10_11 = _mm256_mullo_epi16(_w01, _val_89ab); __m256i _sh10_11 = _mm256_mulhi_epi16(_w01, _val_89ab); __m256i _sl02_03 = _mm256_mullo_epi16(_w23, _val_4567); __m256i _sh02_03 = _mm256_mulhi_epi16(_w23, _val_4567); __m256i _sl12_13 = _mm256_mullo_epi16(_w23, _val_cdef); __m256i _sh12_13 = _mm256_mulhi_epi16(_w23, _val_cdef); _sum0_1 = _mm256_add_epi32(_sum0_1, _mm256_unpacklo_epi16(_sl00_01, _sh00_01)); _sum2_3 = _mm256_add_epi32(_sum2_3, _mm256_unpacklo_epi16(_sl10_11, _sh10_11)); _sum0_1 = _mm256_add_epi32(_sum0_1, _mm256_unpacklo_epi16(_sl02_03, _sh02_03)); _sum2_3 = _mm256_add_epi32(_sum2_3, _mm256_unpacklo_epi16(_sl12_13, _sh12_13)); _sum0_1 = _mm256_add_epi32(_sum0_1, _mm256_unpackhi_epi16(_sl00_01, _sh00_01)); _sum2_3 = _mm256_add_epi32(_sum2_3, _mm256_unpackhi_epi16(_sl10_11, _sh10_11)); _sum0_1 = _mm256_add_epi32(_sum0_1, _mm256_unpackhi_epi16(_sl02_03, _sh02_03)); _sum2_3 = _mm256_add_epi32(_sum2_3, _mm256_unpackhi_epi16(_sl12_13, _sh12_13)); #endif #else // 0 1 2 3 4 5 6 7 __m128i _val0 = _mm_loadu_si128((const __m128i*)r0); __m128i _val1 = _mm_loadu_si128((const __m128i*)(r0 + 8)); __m128i _w0 = _mm_loadu_si128((const __m128i*)k0); __m128i _w1 = _mm_loadu_si128((const __m128i*)(k0 + 8)); __m128i _w2 = _mm_loadu_si128((const __m128i*)(k0 + 16)); __m128i _w3 = _mm_loadu_si128((const __m128i*)(k0 + 24)); #if __XOP__ __m128i _val0_01 = _mm_shuffle_epi32(_val0, _MM_SHUFFLE(0, 0, 0, 0)); __m128i _val0_23 = _mm_shuffle_epi32(_val0, _MM_SHUFFLE(1, 1, 1, 1)); __m128i _val0_45 = _mm_shuffle_epi32(_val0, _MM_SHUFFLE(2, 2, 2, 2)); __m128i _val0_67 = _mm_shuffle_epi32(_val0, _MM_SHUFFLE(3, 3, 3, 3)); __m128i _val1_01 = _mm_shuffle_epi32(_val1, _MM_SHUFFLE(0, 0, 0, 0)); __m128i _val1_23 = _mm_shuffle_epi32(_val1, _MM_SHUFFLE(1, 1, 1, 1)); __m128i _val1_45 = _mm_shuffle_epi32(_val1, _MM_SHUFFLE(2, 2, 2, 2)); __m128i _val1_67 = _mm_shuffle_epi32(_val1, _MM_SHUFFLE(3, 3, 3, 3)); _sum0 = _mm_maddd_epi16(_val0_01, _w0, _sum0); _sum1 = _mm_maddd_epi16(_val0_23, _w1, _sum1); _sum2 = _mm_maddd_epi16(_val1_01, _w0, _sum2); _sum3 = _mm_maddd_epi16(_val1_23, _w1, _sum3); _sum0 = _mm_maddd_epi16(_val0_45, _w2, _sum0); _sum1 = _mm_maddd_epi16(_val0_67, _w3, _sum1); _sum2 = _mm_maddd_epi16(_val1_45, _w2, _sum2); _sum3 = _mm_maddd_epi16(_val1_67, _w3, _sum3); #else // 0 0 1 1 2 2 3 3 // 4 4 5 5 6 6 7 7 __m128i _val0_0123 = _mm_unpacklo_epi16(_val0, _val0); __m128i _val0_4567 = _mm_unpackhi_epi16(_val0, _val0); __m128i _val1_0123 = _mm_unpacklo_epi16(_val1, _val1); __m128i _val1_4567 = _mm_unpackhi_epi16(_val1, _val1); __m128i _val0_01 = _mm_unpacklo_epi32(_val0_0123, _val0_0123); __m128i _val0_23 = _mm_unpackhi_epi32(_val0_0123, _val0_0123); __m128i _val0_45 = _mm_unpacklo_epi32(_val0_4567, _val0_4567); __m128i _val0_67 = _mm_unpackhi_epi32(_val0_4567, _val0_4567); __m128i _val1_01 = _mm_unpacklo_epi32(_val1_0123, _val1_0123); __m128i _val1_23 = _mm_unpackhi_epi32(_val1_0123, _val1_0123); __m128i _val1_45 = _mm_unpacklo_epi32(_val1_4567, _val1_4567); __m128i _val1_67 = _mm_unpackhi_epi32(_val1_4567, _val1_4567); __m128i _sl00 = _mm_mullo_epi16(_w0, _val0_01); __m128i _sh00 = _mm_mulhi_epi16(_w0, _val0_01); __m128i _sl10 = _mm_mullo_epi16(_w0, _val1_01); __m128i _sh10 = _mm_mulhi_epi16(_w0, _val1_01); __m128i _sl01 = _mm_mullo_epi16(_w1, _val0_23); __m128i _sh01 = _mm_mulhi_epi16(_w1, _val0_23); __m128i _sl11 = _mm_mullo_epi16(_w1, _val1_23); __m128i _sh11 = _mm_mulhi_epi16(_w1, _val1_23); __m128i _sl02 = _mm_mullo_epi16(_w2, _val0_45); __m128i _sh02 = _mm_mulhi_epi16(_w2, _val0_45); __m128i _sl12 = _mm_mullo_epi16(_w2, _val1_45); __m128i _sh12 = _mm_mulhi_epi16(_w2, _val1_45); __m128i _sl03 = _mm_mullo_epi16(_w3, _val0_67); __m128i _sh03 = _mm_mulhi_epi16(_w3, _val0_67); __m128i _sl13 = _mm_mullo_epi16(_w3, _val1_67); __m128i _sh13 = _mm_mulhi_epi16(_w3, _val1_67); _sum0 = _mm_add_epi32(_sum0, _mm_unpacklo_epi16(_sl00, _sh00)); _sum1 = _mm_add_epi32(_sum1, _mm_unpackhi_epi16(_sl00, _sh00)); _sum2 = _mm_add_epi32(_sum2, _mm_unpacklo_epi16(_sl10, _sh10)); _sum3 = _mm_add_epi32(_sum3, _mm_unpackhi_epi16(_sl10, _sh10)); _sum0 = _mm_add_epi32(_sum0, _mm_unpacklo_epi16(_sl01, _sh01)); _sum1 = _mm_add_epi32(_sum1, _mm_unpackhi_epi16(_sl01, _sh01)); _sum2 = _mm_add_epi32(_sum2, _mm_unpacklo_epi16(_sl11, _sh11)); _sum3 = _mm_add_epi32(_sum3, _mm_unpackhi_epi16(_sl11, _sh11)); _sum0 = _mm_add_epi32(_sum0, _mm_unpacklo_epi16(_sl02, _sh02)); _sum1 = _mm_add_epi32(_sum1, _mm_unpackhi_epi16(_sl02, _sh02)); _sum2 = _mm_add_epi32(_sum2, _mm_unpacklo_epi16(_sl12, _sh12)); _sum3 = _mm_add_epi32(_sum3, _mm_unpackhi_epi16(_sl12, _sh12)); _sum0 = _mm_add_epi32(_sum0, _mm_unpacklo_epi16(_sl03, _sh03)); _sum1 = _mm_add_epi32(_sum1, _mm_unpackhi_epi16(_sl03, _sh03)); _sum2 = _mm_add_epi32(_sum2, _mm_unpacklo_epi16(_sl13, _sh13)); _sum3 = _mm_add_epi32(_sum3, _mm_unpackhi_epi16(_sl13, _sh13)); #endif #endif r0 += 16; k0 += 32; } #if __AVX2__ __m256i _sum0_2 = _mm256_permute2x128_si256(_sum0_1, _sum2_3, _MM_SHUFFLE(0, 2, 0, 0)); __m256i _sum1_3 = _mm256_permute2x128_si256(_sum0_1, _sum2_3, _MM_SHUFFLE(0, 3, 0, 1)); _sum0_2 = _mm256_add_epi32(_sum0_2, _sum1_3); int sum[8]; _mm256_storeu_si256((__m256i*)sum, _sum0_2); #else _sum0 = _mm_add_epi32(_sum0, _sum1); _sum2 = _mm_add_epi32(_sum2, _sum3); int sum[8]; _mm_storeu_si128((__m128i*)sum, _sum0); _mm_storeu_si128((__m128i*)(sum + 4), _sum2); #endif output0_tm[0] = sum[0]; output1_tm[0] = sum[1]; output2_tm[0] = sum[2]; output3_tm[0] = sum[3]; output0_tm[1] = sum[4]; output1_tm[1] = sum[5]; output2_tm[1] = sum[6]; output3_tm[1] = sum[7]; output0_tm += 2; output1_tm += 2; output2_tm += 2; output3_tm += 2; } for (; i < tiles; i++) { #if __AVX2__ const short* r0 = bb2.row<const short>(i / 4 + (i % 4) / 2 + i % 2); #else const short* r0 = bb2.row<const short>(i / 2 + i % 2); #endif const short* k0 = kernel0_tm.row<const short>(r); int nn = inch; // inch always > 0 #if __AVX2__ __m256i _sum0_1 = _mm256_setzero_si256(); #else __m128i _sum0 = _mm_setzero_si128(); __m128i _sum1 = _mm_setzero_si128(); #endif for (int j = 0; j < nn; j++) { // 0 1 2 3 4 5 6 7 __m128i _val = _mm_loadu_si128((const __m128i*)r0); #if __AVX2__ __m256i _w01 = _mm256_loadu_si256((const __m256i*)k0); __m256i _w23 = _mm256_loadu_si256((const __m256i*)(k0 + 16)); #if __AVXVNNI__ || __AVX512VNNI__ // 0 1 0 1 x x x x // 0 1 0 1 0 1 0 1 __m128i _val_01 = _mm_shuffle_epi32(_val, _MM_SHUFFLE(0, 0, 0, 0)); __m128i _val_23 = _mm_shuffle_epi32(_val, _MM_SHUFFLE(1, 1, 1, 1)); __m128i _val_45 = _mm_shuffle_epi32(_val, _MM_SHUFFLE(2, 2, 2, 2)); __m128i _val_67 = _mm_shuffle_epi32(_val, _MM_SHUFFLE(3, 3, 3, 3)); __m256i _val_0123 = _mm256_inserti128_si256(_mm256_castsi128_si256(_val_01), _val_23, 1); __m256i _val_4567 = _mm256_inserti128_si256(_mm256_castsi128_si256(_val_45), _val_67, 1); _sum0_1 = _mm256_dpwssd_epi32(_sum0_1, _w01, _val_0123); _sum0_1 = _mm256_dpwssd_epi32(_sum0_1, _w23, _val_4567); #else // 0 0 1 1 2 2 3 3 // 4 4 5 5 6 6 7 7 __m256i _val_0123 = _mm256_castsi128_si256(_mm_unpacklo_epi16(_val, _val)); __m256i _val_4567 = _mm256_castsi128_si256(_mm_unpackhi_epi16(_val, _val)); _val_0123 = _mm256_permutevar8x32_epi32(_val_0123, _mm256_set_epi32(3, 3, 2, 2, 1, 1, 0, 0)); _val_4567 = _mm256_permutevar8x32_epi32(_val_4567, _mm256_set_epi32(3, 3, 2, 2, 1, 1, 0, 0)); __m256i _sl00_01 = _mm256_mullo_epi16(_w01, _val_0123); __m256i _sh00_01 = _mm256_mulhi_epi16(_w01, _val_0123); __m256i _sl02_03 = _mm256_mullo_epi16(_w23, _val_4567); __m256i _sh02_03 = _mm256_mulhi_epi16(_w23, _val_4567); _sum0_1 = _mm256_add_epi32(_sum0_1, _mm256_unpacklo_epi16(_sl00_01, _sh00_01)); _sum0_1 = _mm256_add_epi32(_sum0_1, _mm256_unpacklo_epi16(_sl02_03, _sh02_03)); _sum0_1 = _mm256_add_epi32(_sum0_1, _mm256_unpackhi_epi16(_sl00_01, _sh00_01)); _sum0_1 = _mm256_add_epi32(_sum0_1, _mm256_unpackhi_epi16(_sl02_03, _sh02_03)); #endif #else __m128i _w0 = _mm_loadu_si128((const __m128i*)k0); __m128i _w1 = _mm_loadu_si128((const __m128i*)(k0 + 8)); __m128i _w2 = _mm_loadu_si128((const __m128i*)(k0 + 16)); __m128i _w3 = _mm_loadu_si128((const __m128i*)(k0 + 24)); #if __XOP__ __m128i _val01 = _mm_shuffle_epi32(_val, _MM_SHUFFLE(0, 0, 0, 0)); __m128i _val23 = _mm_shuffle_epi32(_val, _MM_SHUFFLE(1, 1, 1, 1)); __m128i _val45 = _mm_shuffle_epi32(_val, _MM_SHUFFLE(2, 2, 2, 2)); __m128i _val67 = _mm_shuffle_epi32(_val, _MM_SHUFFLE(3, 3, 3, 3)); _sum0 = _mm_maddd_epi16(_val01, _w0, _sum0); _sum1 = _mm_maddd_epi16(_val23, _w1, _sum1); _sum0 = _mm_maddd_epi16(_val45, _w2, _sum0); _sum1 = _mm_maddd_epi16(_val67, _w3, _sum1); #else // 0 0 1 1 2 2 3 3 // 4 4 5 5 6 6 7 7 __m128i _val_0123 = _mm_unpacklo_epi16(_val, _val); __m128i _val_4567 = _mm_unpackhi_epi16(_val, _val); __m128i _val01 = _mm_unpacklo_epi32(_val_0123, _val_0123); __m128i _val23 = _mm_unpackhi_epi32(_val_0123, _val_0123); __m128i _val45 = _mm_unpacklo_epi32(_val_4567, _val_4567); __m128i _val67 = _mm_unpackhi_epi32(_val_4567, _val_4567); __m128i _sl0 = _mm_mullo_epi16(_w0, _val01); __m128i _sh0 = _mm_mulhi_epi16(_w0, _val01); __m128i _sl1 = _mm_mullo_epi16(_w1, _val23); __m128i _sh1 = _mm_mulhi_epi16(_w1, _val23); __m128i _sl2 = _mm_mullo_epi16(_w2, _val45); __m128i _sh2 = _mm_mulhi_epi16(_w2, _val45); __m128i _sl3 = _mm_mullo_epi16(_w3, _val67); __m128i _sh3 = _mm_mulhi_epi16(_w3, _val67); _sum0 = _mm_add_epi32(_sum0, _mm_unpacklo_epi16(_sl0, _sh0)); _sum1 = _mm_add_epi32(_sum1, _mm_unpackhi_epi16(_sl0, _sh0)); _sum0 = _mm_add_epi32(_sum0, _mm_unpacklo_epi16(_sl1, _sh1)); _sum1 = _mm_add_epi32(_sum1, _mm_unpackhi_epi16(_sl1, _sh1)); _sum0 = _mm_add_epi32(_sum0, _mm_unpacklo_epi16(_sl2, _sh2)); _sum1 = _mm_add_epi32(_sum1, _mm_unpackhi_epi16(_sl2, _sh2)); _sum0 = _mm_add_epi32(_sum0, _mm_unpacklo_epi16(_sl3, _sh3)); _sum1 = _mm_add_epi32(_sum1, _mm_unpackhi_epi16(_sl3, _sh3)); #endif #endif r0 += 8; k0 += 32; } #if __AVX2__ __m128i _sum0 = _mm256_extracti128_si256(_sum0_1, 0); __m128i _sum1 = _mm256_extracti128_si256(_sum0_1, 1); #endif _sum0 = _mm_add_epi32(_sum0, _sum1); int sum[4]; _mm_storeu_si128((__m128i*)sum, _sum0); output0_tm[0] = sum[0]; output1_tm[0] = sum[1]; output2_tm[0] = sum[2]; output3_tm[0] = sum[3]; output0_tm += 1; output1_tm += 1; output2_tm += 1; output3_tm += 1; } } } remain_outch_start += nn_outch << 2; #pragma omp parallel for num_threads(opt.num_threads) for (int p = remain_outch_start; p < outch; p++) { int* output0_tm = top_blob_tm.channel(p); const Mat kernel0_tm = kernel_tm.channel(p / 4 + p % 4); for (int r = 0; r < 36; r++) { const Mat bb2 = bottom_blob_tm2.channel(r); int i = 0; #if __AVX2__ for (; i + 3 < tiles; i += 4) { const short* r0 = bb2.row<const short>(i / 4); const short* k0 = kernel0_tm.row<const short>(r); __m128i _sum0 = _mm_setzero_si128(); __m128i _sum1 = _mm_setzero_si128(); __m128i _sum2 = _mm_setzero_si128(); __m128i _sum3 = _mm_setzero_si128(); __m128i _sum4 = _mm_setzero_si128(); __m128i _sum5 = _mm_setzero_si128(); __m128i _sum6 = _mm_setzero_si128(); __m128i _sum7 = _mm_setzero_si128(); for (int q = 0; q < inch; q++) { __m128i _val0 = _mm_loadu_si128((const __m128i*)r0); __m128i _val1 = _mm_loadu_si128((const __m128i*)(r0 + 8)); __m128i _val2 = _mm_loadu_si128((const __m128i*)(r0 + 16)); __m128i _val3 = _mm_loadu_si128((const __m128i*)(r0 + 24)); __m128i _w0 = _mm_loadu_si128((const __m128i*)k0); __m128i _sl0 = _mm_mullo_epi16(_val0, _w0); __m128i _sh0 = _mm_mulhi_epi16(_val0, _w0); __m128i _sl1 = _mm_mullo_epi16(_val1, _w0); __m128i _sh1 = _mm_mulhi_epi16(_val1, _w0); __m128i _sl2 = _mm_mullo_epi16(_val2, _w0); __m128i _sh2 = _mm_mulhi_epi16(_val2, _w0); __m128i _sl3 = _mm_mullo_epi16(_val3, _w0); __m128i _sh3 = _mm_mulhi_epi16(_val3, _w0); _sum0 = _mm_add_epi32(_sum0, _mm_unpacklo_epi16(_sl0, _sh0)); _sum1 = _mm_add_epi32(_sum1, _mm_unpackhi_epi16(_sl0, _sh0)); _sum2 = _mm_add_epi32(_sum2, _mm_unpacklo_epi16(_sl1, _sh1)); _sum3 = _mm_add_epi32(_sum3, _mm_unpackhi_epi16(_sl1, _sh1)); _sum4 = _mm_add_epi32(_sum0, _mm_unpacklo_epi16(_sl2, _sh2)); _sum5 = _mm_add_epi32(_sum1, _mm_unpackhi_epi16(_sl2, _sh2)); _sum6 = _mm_add_epi32(_sum2, _mm_unpacklo_epi16(_sl3, _sh3)); _sum7 = _mm_add_epi32(_sum3, _mm_unpackhi_epi16(_sl3, _sh3)); k0 += 8; r0 += 16; } _sum0 = _mm_add_epi32(_sum0, _sum1); _sum2 = _mm_add_epi32(_sum2, _sum3); _sum4 = _mm_add_epi32(_sum4, _sum5); _sum6 = _mm_add_epi32(_sum6, _sum7); output0_tm[0] = _mm_reduce_add_epi32(_sum0); output0_tm[1] = _mm_reduce_add_epi32(_sum2); output0_tm[2] = _mm_reduce_add_epi32(_sum4); output0_tm[3] = _mm_reduce_add_epi32(_sum6); output0_tm += 4; } #endif for (; i + 1 < tiles; i += 2) { #if __AVX2__ const short* r0 = bb2.row<const short>(i / 4 + (i % 4) / 2); #else const short* r0 = bb2.row<const short>(i / 2); #endif const short* k0 = kernel0_tm.row<const short>(r); __m128i _sum0 = _mm_setzero_si128(); __m128i _sum1 = _mm_setzero_si128(); __m128i _sum2 = _mm_setzero_si128(); __m128i _sum3 = _mm_setzero_si128(); for (int q = 0; q < inch; q++) { __m128i _val0 = _mm_loadu_si128((const __m128i*)r0); __m128i _val1 = _mm_loadu_si128((const __m128i*)(r0 + 8)); __m128i _w0 = _mm_loadu_si128((const __m128i*)k0); __m128i _sl0 = _mm_mullo_epi16(_val0, _w0); __m128i _sh0 = _mm_mulhi_epi16(_val0, _w0); __m128i _sl1 = _mm_mullo_epi16(_val1, _w0); __m128i _sh1 = _mm_mulhi_epi16(_val1, _w0); _sum0 = _mm_add_epi32(_sum0, _mm_unpacklo_epi16(_sl0, _sh0)); _sum1 = _mm_add_epi32(_sum1, _mm_unpackhi_epi16(_sl0, _sh0)); _sum2 = _mm_add_epi32(_sum2, _mm_unpacklo_epi16(_sl1, _sh1)); _sum3 = _mm_add_epi32(_sum3, _mm_unpackhi_epi16(_sl1, _sh1)); k0 += 8; r0 += 16; } _sum0 = _mm_add_epi32(_sum0, _sum1); _sum2 = _mm_add_epi32(_sum2, _sum3); output0_tm[0] = _mm_reduce_add_epi32(_sum0); output0_tm[1] = _mm_reduce_add_epi32(_sum2); output0_tm += 2; } for (; i < tiles; i++) { #if __AVX2__ const short* r0 = bb2.row<const short>(i / 4 + (i % 4) / 2 + i % 2); #else const short* r0 = bb2.row<const short>(i / 2 + i % 2); #endif const short* k0 = kernel0_tm.row<const short>(r); __m128i _sum0 = _mm_setzero_si128(); __m128i _sum1 = _mm_setzero_si128(); for (int q = 0; q < inch; q++) { __m128i _val = _mm_loadu_si128((const __m128i*)r0); __m128i _w0 = _mm_loadu_si128((const __m128i*)k0); __m128i _sl0 = _mm_mullo_epi16(_val, _w0); __m128i _sh0 = _mm_mulhi_epi16(_val, _w0); _sum0 = _mm_add_epi32(_sum0, _mm_unpacklo_epi16(_sl0, _sh0)); _sum1 = _mm_add_epi32(_sum1, _mm_unpackhi_epi16(_sl0, _sh0)); k0 += 8; r0 += 8; } _sum0 = _mm_add_epi32(_sum0, _sum1); output0_tm[0] = _mm_reduce_add_epi32(_sum0); output0_tm++; } } } } bottom_blob_tm = Mat(); // END dot // BEGIN transform output Mat top_blob_bordered; if (outw == top_blob.w && outh == top_blob.h) { top_blob_bordered = top_blob; } else { top_blob_bordered.create(outw, outh, outch, 4u, 1, opt.workspace_allocator); } { // const float otm[4][6] = { // {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 0.0f}, // {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 1.0f} // }; // 0 = r00 + (r01 + r02) + (r03 + r04) // 1 = (r01 - r02) + (r03 - r04) * 2 // 2 = (r01 + r02) + (r03 + r04) * 4 // 3 = r05 + (r01 - r02) + (r03 - r04) * 8 int w_tm = outw / 4 * 6; int h_tm = outh / 4 * 6; const int tiles = w_tm / 6 * h_tm / 6; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { const Mat out0_tm = top_blob_tm.channel(p); Mat out0 = top_blob_bordered.channel(p); int tmp[4][6]; // tile for (int i = 0; i < outh / 4; i++) { for (int j = 0; j < outw / 4; j++) { // top_blob_tm.create(tiles, 36, outch, 4u, 1, opt.workspace_allocator); const int* output0_tm_0 = (const int*)out0_tm + (i * w_tm / 6 + j) * 1; const int* output0_tm_1 = output0_tm_0 + tiles * 1; const int* output0_tm_2 = output0_tm_0 + tiles * 2; const int* output0_tm_3 = output0_tm_0 + tiles * 3; const int* output0_tm_4 = output0_tm_0 + tiles * 4; const int* output0_tm_5 = output0_tm_0 + tiles * 5; int* output0 = out0.row<int>(i * 4) + j * 4; // 0 = r00 + (r01 + r02) + (r03 + r04) // 1 = (r01 - r02) + (r03 - r04) * 2 // 2 = (r01 + r02) + (r03 + r04) * 4 // 3 = r05 + (r01 - r02) + (r03 - r04) * 8 // TODO sse optimize for (int m = 0; m < 5; m++) { int tmp02a = output0_tm_1[0] + output0_tm_2[0]; int tmp13a = output0_tm_1[0] - output0_tm_2[0]; int tmp02b = output0_tm_3[0] + output0_tm_4[0]; int tmp13b = output0_tm_3[0] - output0_tm_4[0]; tmp[0][m] = output0_tm_0[0] + tmp02a + tmp02b; tmp[1][m] = tmp13a + tmp13b * 2; tmp[2][m] = tmp02a + tmp02b * 4; tmp[3][m] = output0_tm_5[0] * 4 + tmp13a + tmp13b * 8; output0_tm_0 += tiles * 6; output0_tm_1 += tiles * 6; output0_tm_2 += tiles * 6; output0_tm_3 += tiles * 6; output0_tm_4 += tiles * 6; output0_tm_5 += tiles * 6; } for (int m = 5; m < 6; m++) { int tmp02a = output0_tm_1[0] + output0_tm_2[0]; int tmp13a = output0_tm_1[0] - output0_tm_2[0]; int tmp02b = output0_tm_3[0] + output0_tm_4[0]; int tmp13b = output0_tm_3[0] - output0_tm_4[0]; tmp[0][m] = (output0_tm_0[0] + tmp02a + tmp02b) * 4; tmp[1][m] = (tmp13a + tmp13b * 2) * 4; tmp[2][m] = (tmp02a + tmp02b * 4) * 4; tmp[3][m] = (output0_tm_5[0] * 4 + tmp13a + tmp13b * 8) * 4; output0_tm_0 += tiles * 6; output0_tm_1 += tiles * 6; output0_tm_2 += tiles * 6; output0_tm_3 += tiles * 6; output0_tm_4 += tiles * 6; output0_tm_5 += tiles * 6; } for (int m = 0; m < 4; m++) { const int* tmp0 = tmp[m]; int tmp02a = tmp0[1] + tmp0[2]; int tmp13a = tmp0[1] - tmp0[2]; int tmp02b = tmp0[3] + tmp0[4]; int tmp13b = tmp0[3] - tmp0[4]; output0[0] = (tmp0[0] + tmp02a + tmp02b) / 576; output0[1] = (tmp13a + tmp13b * 2) / 576; output0[2] = (tmp02a + tmp02b * 4) / 576; output0[3] = (tmp0[5] + tmp13a + tmp13b * 8) / 576; output0 += outw; } } } } } // END transform output // cut result pad copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt); }
valid.mob2.src.h
#pragma once #include "ukr.h" #include "omp.h" #include "transpose.h" #include "gen_ukr_A6B2gemm_1_64_56_56_64_3_3.h" #include "gen_ukr_A4B2gemm_1_64_56_56_64_3_3.h" void testrun(float* A ,float*B, float*C, float*oriB ){ int tid = omp_get_thread_num(); int Nx = 56; int Ny = 56; int Nh = 3; long long Astrides[6] = {0,2,4,6,8,10}; int b1 = 0; for (int fpck = (tid%1)*16; fpck < uNf; fpck+=1*16){ for(int cwh = (tid/1)*8; cwh < uNc*uNw*uNh/8*8; cwh+=8*1){ transpose8x8_avx(oriB+ (fpck+0)*uNc*uNw*uNh + cwh, B + fpck*uNc*uNw*uNh + cwh* 16 + 0, uNc*uNw*uNh, 16); transpose8x8_avx(oriB+ (fpck+8)*uNc*uNw*uNh + cwh, B + fpck*uNc*uNw*uNh + cwh* 16 + 8, uNc*uNw*uNh, 16); } } #pragma omp barrier// begin push button generated block for(int xy5=0;xy5<3136+0;xy5+=3136) { for(int f5=0;f5<64+0;f5+=64) { for(int c5=0;c5<64+0;c5+=64) { for(int c4=c5;c4<min(64, 64+c5);c4+=32) { for(int f4=f5;f4<min(64, 64+f5);f4+=Tf2) { for(int xy4=xy5;xy4<min(3136, 3136+xy5);xy4+=3136) { for(int c3=c4;c3<min(64, 32+c4);c3+=Tc1) { for(int f3=f4;f3<min(64, Tf2+f4);f3+=Tf2) { for(int xy3=xy4;xy3<min(3136, 3136+xy4);xy3+=Txy3) { for(int xy2=xy3;xy2<min(3136, Txy3+xy3);xy2+=6) { for(int f2=f3;f2<min(64, Tf2+f3);f2+=16) { for(int c2=c3;c2<min(64, Tc1+c3);c2+=Tc1) { for(int c1=c2;c1<min(64, Tc1+c2);c1+=Tc1) { for(int xy1=xy2;xy1<min(3136, 6+xy2);xy1+=6) { for(int f1=f2;f1<min(64, 16+f2);f1+=16) { int ctile=min(Tc1, 64-c1); int x1=xy1/56; int y1=xy1%56/1; int c1_1=c1/1; int c1_2=c1%1/1; int kf1_1=f1/16; int kf1_2=f1%16/1; int of1_1=f1/1; int of1_2=f1%1/1; int offsetA=0+b1*831744+c1_1*12996+2*x1*114+2*y1*1+c1_2*1; int offsetB=0+kf1_1*9216+c1*144+0*48+0*16+kf1_2*1; int offsetC=0+b1*200704+of1_1*3136+x1*56+y1*1+of1_2*1; if(56-y1>=6){ cnn_ukr_float_scatter_6x2v_cxycgemm(A+offsetA, B+offsetB, C+offsetC, ctile, Astrides); } else if(56*56-xy1>=6){ for(int sti=56-y1;sti<6;sti+=1) { Astrides[sti]+=116; } cnn_ukr_float_scatter_6x2v_cxycgemm(A+offsetA, B+offsetB, C+offsetC, ctile, Astrides); for(int sti=56-y1;sti<6;sti+=1) { Astrides[sti]-=116; } } else{ cnn_ukr_float_scatter_4x2v_cxycgemm(A+offsetA, B+offsetB, C+offsetC, ctile, Astrides); } } } } } } } } } } } } } } } } // end push button generated block }
backprop.c
/* ****************************************************************** * HISTORY * 15-Oct-94 Jeff Shufelt (js), Carnegie Mellon University * Prepared for 15-681, Fall 1994. * Modified by Shuai Che ****************************************************************** */ /* Copyright (c)2008-2011 University of Virginia All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted without royalty fees or other restrictions, provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of the University of Virginia, the Dept. of Computer Science, nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE UNIVERSITY OF VIRGINIA OR THE SOFTWARE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <omp.h> #include <stdio.h> #include <stdlib.h> #include "backprop.h" #include <math.h> #include <unistd.h> #include <sys/types.h> #include <sys/stat.h> #include <fcntl.h> #define ABS(x) (((x) > 0.0) ? (x) : (-(x))) #define fastcopy(to,from,len)\ {\ register char *_to,*_from;\ register int _i,_l;\ _to = (char *)(to);\ _from = (char *)(from);\ _l = (len);\ for (_i = 0; _i < _l; _i++) *_to++ = *_from++;\ } /*** Return random number between 0.0 and 1.0 ***/ float drnd() { return ((float) rand() / (float) BIGRND); } /*** Return random number between -1.0 and 1.0 ***/ float dpn1() { return ((drnd() * 2.0) - 1.0); } /*** The squashing function. Currently, it's a sigmoid. ***/ float squash(float x) { float m; //x = -x; //m = 1 + x + x*x/2 + x*x*x/6 + x*x*x*x/24 + x*x*x*x*x/120; //return(1.0 / (1.0 + m)); return (1.0 / (1.0 + exp(-x))); } /*** Allocate 1d array of floats ***/ float *alloc_1d_dbl(int n) { float *new_t; new_t = (float *) malloc ((unsigned) (n * sizeof (float))); if (new_t == NULL) { fprintf(stderr, "ALLOC_1D_DBL: Couldn't allocate array of floats\n"); return (NULL); } return (new_t); } /*** Allocate 2d array of floats ***/ float **alloc_2d_dbl(int m, int n) { int i; float **new_t; new_t = (float **) malloc ((unsigned) (m * sizeof (float *))); if (new_t == NULL) { fprintf(stderr, "ALLOC_2D_DBL: Couldn't allocate array of dbl ptrs\n"); return (NULL); } for (i = 0; i < m; i++) { new_t[i] = alloc_1d_dbl(n); } return (new_t); } void bpnn_randomize_weights(float **w, int m, int n) { int i, j; for (i = 0; i <= m; i++) { for (j = 0; j <= n; j++) { w[i][j] = (float) rand()/RAND_MAX; // w[i][j] = dpn1(); } } } void bpnn_randomize_row(float *w, int m) { int i; for (i = 0; i <= m; i++) { //w[i] = (float) rand()/RAND_MAX; w[i] = 0.1; } } void bpnn_zero_weights(float **w, int m, int n) { int i, j; for (i = 0; i <= m; i++) { for (j = 0; j <= n; j++) { w[i][j] = 0.0; } } } void bpnn_initialize(int seed) { fprintf(stderr, "Random number generator seed: %d\n", seed); srand(seed); } BPNN *bpnn_internal_create(int n_in, int n_hidden, int n_out) { BPNN *newnet; newnet = (BPNN *) malloc (sizeof (BPNN)); if (newnet == NULL) { fprintf(stderr, "BPNN_CREATE: Couldn't allocate neural network\n"); return (NULL); } newnet->input_n = n_in; newnet->hidden_n = n_hidden; newnet->output_n = n_out; newnet->input_units = alloc_1d_dbl(n_in + 1); newnet->hidden_units = alloc_1d_dbl(n_hidden + 1); newnet->output_units = alloc_1d_dbl(n_out + 1); newnet->hidden_delta = alloc_1d_dbl(n_hidden + 1); newnet->output_delta = alloc_1d_dbl(n_out + 1); newnet->target = alloc_1d_dbl(n_out + 1); newnet->input_weights = alloc_2d_dbl(n_in + 1, n_hidden + 1); newnet->hidden_weights = alloc_2d_dbl(n_hidden + 1, n_out + 1); newnet->input_prev_weights = alloc_2d_dbl(n_in + 1, n_hidden + 1); newnet->hidden_prev_weights = alloc_2d_dbl(n_hidden + 1, n_out + 1); return (newnet); } void bpnn_free(BPNN *net) { int n1, n2, i; n1 = net->input_n; n2 = net->hidden_n; free((char *) net->input_units); free((char *) net->hidden_units); free((char *) net->output_units); free((char *) net->hidden_delta); free((char *) net->output_delta); free((char *) net->target); for (i = 0; i <= n1; i++) { free((char *) net->input_weights[i]); free((char *) net->input_prev_weights[i]); } free((char *) net->input_weights); free((char *) net->input_prev_weights); for (i = 0; i <= n2; i++) { free((char *) net->hidden_weights[i]); free((char *) net->hidden_prev_weights[i]); } free((char *) net->hidden_weights); free((char *) net->hidden_prev_weights); free((char *) net); } /*** Creates a new fully-connected network from scratch, with the given numbers of input, hidden, and output units. Threshold units are automatically included. All weights are randomly initialized. Space is also allocated for temporary storage (momentum weights, error computations, etc). ***/ BPNN *bpnn_create(int n_in, int n_hidden, int n_out) { BPNN *newnet; newnet = bpnn_internal_create(n_in, n_hidden, n_out); #ifdef INITZERO bpnn_zero_weights(newnet->input_weights, n_in, n_hidden); #else bpnn_randomize_weights(newnet->input_weights, n_in, n_hidden); #endif bpnn_randomize_weights(newnet->hidden_weights, n_hidden, n_out); bpnn_zero_weights(newnet->input_prev_weights, n_in, n_hidden); bpnn_zero_weights(newnet->hidden_prev_weights, n_hidden, n_out); bpnn_randomize_row(newnet->target, n_out); return (newnet); } void bpnn_layerforward(float *l1, float *l2, float **conn, int n1, int n2) { float sum; int j, k; /*** Set up thresholding unit ***/ l1[0] = 1.0; #ifdef OPEN omp_set_num_threads(NUM_THREAD); #pragma omp parallel for shared(conn, n1, n2, l1) private(k, j) reduction(+: sum) schedule(static) #endif /*** For each unit in second layer ***/ for (j = 1; j <= n2; j++) { /*** Compute weighted sum of its inputs ***/ sum = 0.0; for (k = 0; k <= n1; k++) { sum += conn[k][j] * l1[k]; } l2[j] = squash(sum); } } void bpnn_output_error(float *delta, float *target, float *output, int nj, float *err) { int j; float o, t, errsum; errsum = 0.0; for (j = 1; j <= nj; j++) { o = output[j]; t = target[j]; delta[j] = o * (1.0 - o) * (t - o); errsum += ABS(delta[j]); } *err = errsum; } void bpnn_hidden_error(float *delta_h, int nh, float *delta_o, int no, float **who, float *hidden, float *err) { int j, k; float h, sum, errsum; errsum = 0.0; for (j = 1; j <= nh; j++) { h = hidden[j]; sum = 0.0; for (k = 1; k <= no; k++) { sum += delta_o[k] * who[j][k]; } delta_h[j] = h * (1.0 - h) * sum; errsum += ABS(delta_h[j]); } *err = errsum; } void bpnn_adjust_weights(float *delta, int ndelta, float *ly, int nly, float **w, float **oldw) { float new_dw; int k, j; ly[0] = 1.0; #ifdef OPEN omp_set_num_threads(NUM_THREAD); #pragma omp parallel for \ shared(oldw, w, delta) \ private(j, k, new_dw) \ firstprivate(ndelta, nly, momentum) #endif for (j = 1; j <= ndelta; j++) { for (k = 0; k <= nly; k++) { new_dw = ((ETA * delta[j] * ly[k]) + (MOMENTUM * oldw[k][j])); w[k][j] += new_dw; oldw[k][j] = new_dw; } } } void bpnn_feedforward(BPNN *net) { int in, hid, out; in = net->input_n; hid = net->hidden_n; out = net->output_n; /*** Feed forward input activations. ***/ bpnn_layerforward(net->input_units, net->hidden_units, net->input_weights, in, hid); bpnn_layerforward(net->hidden_units, net->output_units, net->hidden_weights, hid, out); } void bpnn_train(BPNN *net, float *eo, float *eh) { int in, hid, out; float out_err, hid_err; in = net->input_n; hid = net->hidden_n; out = net->output_n; /*** Feed forward input activations. ***/ bpnn_layerforward(net->input_units, net->hidden_units, net->input_weights, in, hid); bpnn_layerforward(net->hidden_units, net->output_units, net->hidden_weights, hid, out); /*** Compute error on output and hidden units. ***/ bpnn_output_error(net->output_delta, net->target, net->output_units, out, &out_err); bpnn_hidden_error(net->hidden_delta, hid, net->output_delta, out, net->hidden_weights, net->hidden_units, &hid_err); *eo = out_err; *eh = hid_err; /*** Adjust input and hidden weights. ***/ bpnn_adjust_weights(net->output_delta, out, net->hidden_units, hid, net->hidden_weights, net->hidden_prev_weights); bpnn_adjust_weights(net->hidden_delta, hid, net->input_units, in, net->input_weights, net->input_prev_weights); } void bpnn_save(BPNN *net, char *filename) { int n1, n2, n3, i, j, memcnt; float dvalue, **w; char *mem; ///add// FILE *pFile; pFile = fopen( filename, "w+" ); /////// /* if ((fd = creat(filename, 0644)) == -1) { fprintf(stderr, "BPNN_SAVE: Cannot create '%s'\n", filename); return; } */ n1 = net->input_n; n2 = net->hidden_n; n3 = net->output_n; fprintf(stderr, "Saving %dx%dx%d network to '%s'\n", n1, n2, n3, filename); //fflush(stdout); //write(fd, (char *) &n1, sizeof(int)); //write(fd, (char *) &n2, sizeof(int)); //write(fd, (char *) &n3, sizeof(int)); fwrite( (char *) &n1 , sizeof(char), sizeof(char), pFile); fwrite( (char *) &n2 , sizeof(char), sizeof(char), pFile); fwrite( (char *) &n3 , sizeof(char), sizeof(char), pFile); memcnt = 0; w = net->input_weights; mem = (char *) malloc ((unsigned) ((n1+1) * (n2+1) * sizeof(float))); for (i = 0; i <= n1; i++) { for (j = 0; j <= n2; j++) { dvalue = w[i][j]; fastcopy(&mem[memcnt], &dvalue, sizeof(float)); memcnt += sizeof(float); } } //write(fd, mem, (n1+1) * (n2+1) * sizeof(float)); fwrite( mem , (unsigned)(sizeof(float)), (unsigned) ((n1+1) * (n2+1) * sizeof(float)) , pFile); free(mem); memcnt = 0; w = net->hidden_weights; mem = (char *) malloc ((unsigned) ((n2+1) * (n3+1) * sizeof(float))); for (i = 0; i <= n2; i++) { for (j = 0; j <= n3; j++) { dvalue = w[i][j]; fastcopy(&mem[memcnt], &dvalue, sizeof(float)); memcnt += sizeof(float); } } //write(fd, mem, (n2+1) * (n3+1) * sizeof(float)); fwrite( mem , sizeof(float), (unsigned) ((n2+1) * (n3+1) * sizeof(float)) , pFile); free(mem); fclose(pFile); return; } BPNN *bpnn_read(char *filename) { char *mem; BPNN *new_t; int fd, n1, n2, n3, i, j, memcnt; if ((fd = open(filename, 0, 0644)) == -1) { return (NULL); } fprintf(stderr, "Reading '%s'\n", filename); //fflush(stdout); read(fd, (char *) &n1, sizeof(int)); read(fd, (char *) &n2, sizeof(int)); read(fd, (char *) &n3, sizeof(int)); new_t = bpnn_internal_create(n1, n2, n3); fprintf(stderr, "'%s' contains a %dx%dx%d network\n", filename, n1, n2, n3); fprintf(stderr, "Reading input weights..."); //fflush(stdout); memcnt = 0; mem = (char *) malloc ((unsigned) ((n1+1) * (n2+1) * sizeof(float))); read(fd, mem, (n1+1) * (n2+1) * sizeof(float)); for (i = 0; i <= n1; i++) { for (j = 0; j <= n2; j++) { fastcopy(&(new_t->input_weights[i][j]), &mem[memcnt], sizeof(float)); memcnt += sizeof(float); } } free(mem); fprintf(stderr, "Done\nReading hidden weights..."); //fflush(stdout); memcnt = 0; mem = (char *) malloc ((unsigned) ((n2+1) * (n3+1) * sizeof(float))); read(fd, mem, (n2+1) * (n3+1) * sizeof(float)); for (i = 0; i <= n2; i++) { for (j = 0; j <= n3; j++) { fastcopy(&(new_t->hidden_weights[i][j]), &mem[memcnt], sizeof(float)); memcnt += sizeof(float); } } free(mem); close(fd); fprintf(stderr, "Done\n"); //fflush(stdout); bpnn_zero_weights(new_t->input_prev_weights, n1, n2); bpnn_zero_weights(new_t->hidden_prev_weights, n2, n3); return (new_t); }
GB_binop__iseq_uint8.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__iseq_uint8) // A.*B function (eWiseMult): GB (_AemultB_08__iseq_uint8) // A.*B function (eWiseMult): GB (_AemultB_02__iseq_uint8) // A.*B function (eWiseMult): GB (_AemultB_04__iseq_uint8) // A.*B function (eWiseMult): GB (_AemultB_bitmap__iseq_uint8) // A*D function (colscale): GB (_AxD__iseq_uint8) // D*A function (rowscale): GB (_DxB__iseq_uint8) // C+=B function (dense accum): GB (_Cdense_accumB__iseq_uint8) // C+=b function (dense accum): GB (_Cdense_accumb__iseq_uint8) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__iseq_uint8) // C=scalar+B GB (_bind1st__iseq_uint8) // C=scalar+B' GB (_bind1st_tran__iseq_uint8) // C=A+scalar GB (_bind2nd__iseq_uint8) // C=A'+scalar GB (_bind2nd_tran__iseq_uint8) // C type: uint8_t // A type: uint8_t // A pattern? 0 // B type: uint8_t // B pattern? 0 // BinaryOp: cij = (aij == bij) #define GB_ATYPE \ uint8_t #define GB_BTYPE \ uint8_t #define GB_CTYPE \ uint8_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint8_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ uint8_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint8_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x == y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ISEQ || GxB_NO_UINT8 || GxB_NO_ISEQ_UINT8) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__iseq_uint8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__iseq_uint8) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__iseq_uint8) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint8_t uint8_t bwork = (*((uint8_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__iseq_uint8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *restrict Cx = (uint8_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__iseq_uint8) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *restrict Cx = (uint8_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__iseq_uint8) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; uint8_t alpha_scalar ; uint8_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((uint8_t *) alpha_scalar_in)) ; beta_scalar = (*((uint8_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__iseq_uint8) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__iseq_uint8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__iseq_uint8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__iseq_uint8) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__iseq_uint8) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *Cx = (uint8_t *) Cx_output ; uint8_t x = (*((uint8_t *) x_input)) ; uint8_t *Bx = (uint8_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; uint8_t bij = GBX (Bx, p, false) ; Cx [p] = (x == bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__iseq_uint8) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint8_t *Cx = (uint8_t *) Cx_output ; uint8_t *Ax = (uint8_t *) Ax_input ; uint8_t y = (*((uint8_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint8_t aij = GBX (Ax, p, false) ; Cx [p] = (aij == y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x == aij) ; \ } GrB_Info GB (_bind1st_tran__iseq_uint8) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint8_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t x = (*((const uint8_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint8_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij == y) ; \ } GrB_Info GB (_bind2nd_tran__iseq_uint8) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t y = (*((const uint8_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
parallel_for_misc_messages.c
// RUN: %clang_cc1 -fsyntax-only -fopenmp=libiomp5 -verify %s // expected-error@+1 {{unexpected OpenMP directive '#pragma omp parallel for'}} #pragma omp parallel for // expected-error@+1 {{unexpected OpenMP directive '#pragma omp parallel for'}} #pragma omp parallel for foo void test_no_clause() { int i; #pragma omp parallel for for (i = 0; i < 16; ++i) ; // expected-error@+2 {{statement after '#pragma omp parallel for' must be a for loop}} #pragma omp parallel for ++i; } void test_branch_protected_scope() { int i = 0; L1: ++i; int x[24]; #pragma omp parallel for for (i = 0; i < 16; ++i) { if (i == 5) goto L1; // expected-error {{use of undeclared label 'L1'}} else if (i == 6) return; // expected-error {{cannot return from OpenMP region}} else if (i == 7) goto L2; else if (i == 8) { L2: x[i]++; } } if (x[0] == 0) goto L2; // expected-error {{use of undeclared label 'L2'}} else if (x[1] == 1) goto L1; } void test_invalid_clause() { int i; // expected-warning@+1 {{extra tokens at the end of '#pragma omp parallel for' are ignored}} #pragma omp parallel for foo bar for (i = 0; i < 16; ++i) ; } void test_non_identifiers() { int i, x; // expected-warning@+1 {{extra tokens at the end of '#pragma omp parallel for' are ignored}} #pragma omp parallel for; for (i = 0; i < 16; ++i) ; // expected-error@+2 {{unexpected OpenMP clause 'linear' in directive '#pragma omp parallel for'}} // expected-warning@+1 {{extra tokens at the end of '#pragma omp parallel for' are ignored}} #pragma omp parallel for linear(x); for (i = 0; i < 16; ++i) ; // expected-warning@+1 {{extra tokens at the end of '#pragma omp parallel for' are ignored}} #pragma omp parallel for private(x); for (i = 0; i < 16; ++i) ; // expected-warning@+1 {{extra tokens at the end of '#pragma omp parallel for' are ignored}} #pragma omp parallel for, private(x); for (i = 0; i < 16; ++i) ; } extern int foo(); void test_collapse() { int i; // expected-error@+1 {{expected '('}} #pragma omp parallel for collapse for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp parallel for collapse( for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} #pragma omp parallel for collapse() for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp parallel for collapse(, for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp parallel for collapse(, ) for (i = 0; i < 16; ++i) ; // expected-warning@+2 {{extra tokens at the end of '#pragma omp parallel for' are ignored}} // expected-error@+1 {{expected '('}} #pragma omp parallel for collapse 4) for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp parallel for collapse(4 for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp parallel for', but found only 1}} // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp parallel for collapse(4, for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp parallel for', but found only 1}} // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp parallel for collapse(4, ) for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp parallel for', but found only 1}} // expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp parallel for collapse(4) for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp parallel for', but found only 1}} // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp parallel for collapse(4 4) for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp parallel for', but found only 1}} // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp parallel for collapse(4, , 4) for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp parallel for', but found only 1}} #pragma omp parallel for collapse(4) for (int i1 = 0; i1 < 16; ++i1) for (int i2 = 0; i2 < 16; ++i2) for (int i3 = 0; i3 < 16; ++i3) for (int i4 = 0; i4 < 16; ++i4) foo(); // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp parallel for collapse(4, 8) for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp parallel for', but found only 1}} // expected-error@+1 {{expression is not an integer constant expression}} #pragma omp parallel for collapse(2.5) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expression is not an integer constant expression}} #pragma omp parallel for collapse(foo()) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{argument to 'collapse' clause must be a positive integer value}} #pragma omp parallel for collapse(-5) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{argument to 'collapse' clause must be a positive integer value}} #pragma omp parallel for collapse(0) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{argument to 'collapse' clause must be a positive integer value}} #pragma omp parallel for collapse(5 - 5) for (i = 0; i < 16; ++i) ; // expected-note@+1 {{defined as firstprivate}} #pragma omp parallel for collapse(2) firstprivate(i) for (i = 0; i < 16; ++i) // expected-note@+1 {{variable with automatic storage duration is predetermined as private; perhaps you forget to enclose 'omp for' directive into a parallel or another task region?}} for (int j = 0; j < 16; ++j) // expected-error@+3 {{reduction variable must be shared}} // expected-error@+2 {{private variable cannot be reduction}} // expected-error@+1 {{region cannot be closely nested inside 'parallel for' region; perhaps you forget to enclose 'omp for' directive into a parallel region?}} #pragma omp for reduction(+ : i, j) for (int k = 0; k < 16; ++k) i += j; } void test_private() { int i; // expected-error@+2 {{expected expression}} // expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp parallel for private( for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 2 {{expected expression}} #pragma omp parallel for private(, for (i = 0; i < 16; ++i) ; // expected-error@+1 2 {{expected expression}} #pragma omp parallel for private(, ) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} #pragma omp parallel for private() for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} #pragma omp parallel for private(int) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected variable name}} #pragma omp parallel for private(0) for (i = 0; i < 16; ++i) ; int x, y, z; #pragma omp parallel for private(x) for (i = 0; i < 16; ++i) ; #pragma omp parallel for private(x, y) for (i = 0; i < 16; ++i) ; #pragma omp parallel for private(x, y, z) for (i = 0; i < 16; ++i) { x = y * i + z; } } void test_lastprivate() { int i; // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 {{expected expression}} #pragma omp parallel for lastprivate( for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 2 {{expected expression}} #pragma omp parallel for lastprivate(, for (i = 0; i < 16; ++i) ; // expected-error@+1 2 {{expected expression}} #pragma omp parallel for lastprivate(, ) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} #pragma omp parallel for lastprivate() for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} #pragma omp parallel for lastprivate(int) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected variable name}} #pragma omp parallel for lastprivate(0) for (i = 0; i < 16; ++i) ; int x, y, z; #pragma omp parallel for lastprivate(x) for (i = 0; i < 16; ++i) ; #pragma omp parallel for lastprivate(x, y) for (i = 0; i < 16; ++i) ; #pragma omp parallel for lastprivate(x, y, z) for (i = 0; i < 16; ++i) ; } void test_firstprivate() { int i; // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 {{expected expression}} #pragma omp parallel for firstprivate( for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 2 {{expected expression}} #pragma omp parallel for firstprivate(, for (i = 0; i < 16; ++i) ; // expected-error@+1 2 {{expected expression}} #pragma omp parallel for firstprivate(, ) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} #pragma omp parallel for firstprivate() for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} #pragma omp parallel for firstprivate(int) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected variable name}} #pragma omp parallel for firstprivate(0) for (i = 0; i < 16; ++i) ; int x, y, z; #pragma omp parallel for lastprivate(x) firstprivate(x) for (i = 0; i < 16; ++i) ; #pragma omp parallel for lastprivate(x, y) firstprivate(x, y) for (i = 0; i < 16; ++i) ; #pragma omp parallel for lastprivate(x, y, z) firstprivate(x, y, z) for (i = 0; i < 16; ++i) ; } void test_loop_messages() { float a[100], b[100], c[100]; // expected-error@+2 {{variable must be of integer or pointer type}} #pragma omp parallel for for (float fi = 0; fi < 10.0; fi++) { c[(int)fi] = a[(int)fi] + b[(int)fi]; } // expected-error@+2 {{variable must be of integer or pointer type}} #pragma omp parallel for for (double fi = 0; fi < 10.0; fi++) { c[(int)fi] = a[(int)fi] + b[(int)fi]; } }
ams.c
/*BHEADER********************************************************************** * Copyright (c) 2008, Lawrence Livermore National Security, LLC. * Produced at the Lawrence Livermore National Laboratory. * This file is part of HYPRE. See file COPYRIGHT for details. * * HYPRE is free software; you can redistribute it and/or modify it under the * terms of the GNU Lesser General Public License (as published by the Free * Software Foundation) version 2.1 dated February 1999. * * $Revision$ ***********************************************************************EHEADER*/ #include "_hypre_parcsr_ls.h" #include "float.h" #include "ams.h" /*-------------------------------------------------------------------------- * hypre_ParCSRRelax * * Relaxation on the ParCSR matrix A with right-hand side f and * initial guess u. Possible values for relax_type are: * * 1 = l1-scaled (or weighted) Jacobi * 2 = l1-scaled block Gauss-Seidel/SSOR * 3 = Kaczmarz * 4 = truncated version of 2 (Remark 6.2 in smoothers paper) * x = BoomerAMG relaxation with relax_type = |x| * (16 = Cheby) * * The default value of relax_type is 2. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRRelax(/* matrix to relax with */ hypre_ParCSRMatrix *A, /* right-hand side */ hypre_ParVector *f, /* relaxation type */ HYPRE_Int relax_type, /* number of sweeps */ HYPRE_Int relax_times, /* l1 norms of the rows of A */ HYPRE_Real *l1_norms, /* damping coefficient (usually <= 1) */ HYPRE_Real relax_weight, /* SOR parameter (usually in (0,2) */ HYPRE_Real omega, /* for cheby smoothers */ HYPRE_Real max_eig_est, HYPRE_Real min_eig_est, HYPRE_Int cheby_order, HYPRE_Real cheby_fraction, /* initial/updated approximation */ hypre_ParVector *u, /* temporary vector */ hypre_ParVector *v, /* temporary vector */ hypre_ParVector *z) { HYPRE_Int sweep; HYPRE_Real *u_data = hypre_VectorData(hypre_ParVectorLocalVector(u)); HYPRE_Real *f_data = hypre_VectorData(hypre_ParVectorLocalVector(f)); HYPRE_Real *v_data = hypre_VectorData(hypre_ParVectorLocalVector(v)); //printRC(hypre_ParVectorLocalVector(u),"STarting...."); for (sweep = 0; sweep < relax_times; sweep++) { if (relax_type == 1) /* l1-scaled Jacobi */ { PUSH_RANGE_PAYLOAD("RELAX",4,sweep); HYPRE_Int num_rows = hypre_ParCSRMatrixNumRows(A); #ifdef HYPRE_USING_UNIFIED_MEMORY if (sweep==0) { hypre_SeqVectorPrefetchToDevice(hypre_ParVectorLocalVector(v)); hypre_SeqVectorPrefetchToDevice(hypre_ParVectorLocalVector(f)); } #endif //SyncVectorToHost(hypre_ParVectorLocalVector(v)); //SyncVectorToHost(hypre_ParVectorLocalVector(f)); #if defined(HYPRE_USING_GPU) && defined(HYPRE_USING_UNIFIED_MEMORY) VecCopy(v_data,f_data,hypre_VectorSize(hypre_ParVectorLocalVector(v)),HYPRE_STREAM(4)); #else //printRC(hypre_ParVectorLocalVector(v),"Pre-COPY V"); //printRC(hypre_ParVectorLocalVector(f),"Pre-COPY F"); hypre_ParVectorCopy(f,v); #endif #ifdef HYPRE_USING_MAPPED_OPENMP_OFFLOAD SyncVectorToDevice(hypre_ParVectorLocalVector(v)); #endif hypre_ParCSRMatrixMatvec(-relax_weight, A, u, relax_weight, v); //SyncVectorToHost(hypre_ParVectorLocalVector(v)); //SyncVectorToHost(hypre_ParVectorLocalVector(u)); PUSH_RANGE_PAYLOAD("VECSCALE-RELAX",5,num_rows); #if defined(HYPRE_USING_GPU) && defined(HYPRE_USING_UNIFIED_MEMORY) VecScale(u_data,v_data,l1_norms,num_rows,HYPRE_STREAM(4)); #else HYPRE_Int i; /* u += w D^{-1}(f - A u), where D_ii = ||A(i,:)||_1 */ #if defined(HYPRE_USING_OPENMP_OFFLOAD) HYPRE_Int num_teams = (num_rows+num_rows%1024)/1024; //printf("AMS.C %d = %d \n",num_rows,num_teams*1024); //printf("Ptypes %d %d %d \n",PointerAttributes(u_data),PointerAttributes(v_data),PointerAttributes(l1_norms)); #pragma omp target teams distribute parallel for private(i) num_teams(num_teams) thread_limit(1024) is_device_ptr(u_data,v_data,l1_norms) #elif defined(HYPRE_USING_MAPPED_OPENMP_OFFLOAD) HYPRE_Int num_teams = (num_rows+num_rows%1024)/1024; #pragma omp target teams distribute parallel for private(i) num_teams(num_teams) thread_limit(1024) #endif for (i = 0; i < num_rows; i++) { u_data[i] += v_data[i] / l1_norms[i]; } #endif #ifdef HYPRE_USING_MAPPED_OPENMP_OFFLOAD UpdateDRC(hypre_ParVectorLocalVector(u)); #endif //printf("AMS.C DONE %d = %d \n",num_rows,num_teams*1024); POP_RANGE; POP_RANGE; } else if (relax_type == 2 || relax_type == 4) /* offd-l1-scaled block GS */ { hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_I = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_J = hypre_CSRMatrixJ(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Int *A_offd_I = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_J = hypre_CSRMatrixJ(A_offd); HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_Int i, j; HYPRE_Int num_rows = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(A_offd); HYPRE_Real *u_offd_data = hypre_TAlloc(HYPRE_Real, num_cols_offd, HYPRE_MEMORY_HOST); HYPRE_Real res; HYPRE_Int num_procs; hypre_MPI_Comm_size(hypre_ParCSRMatrixComm(A), &num_procs); /* Copy off-diagonal values of u to the current processor */ if (num_procs > 1) { hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); HYPRE_Int num_sends; HYPRE_Real *u_buf_data; hypre_ParCSRCommHandle *comm_handle; HYPRE_Int index = 0, start; if (!comm_pkg) { hypre_MatvecCommPkgCreate(A); comm_pkg = hypre_ParCSRMatrixCommPkg(A); } num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); u_buf_data = hypre_TAlloc(HYPRE_Real, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST); for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg,i+1); j++) u_buf_data[index++] = u_data[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } comm_handle = hypre_ParCSRCommHandleCreate(1,comm_pkg,u_buf_data,u_offd_data); hypre_ParCSRCommHandleDestroy(comm_handle); hypre_TFree(u_buf_data, HYPRE_MEMORY_HOST); } if (relax_weight == 1.0 && omega == 1.0) /* symmetric Gauss-Seidel */ { /* Forward local pass */ for (i = 0; i < num_rows; i++) { res = f_data[i]; for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++) res -= A_diag_data[j] * u_data[A_diag_J[j]]; if (num_cols_offd) for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++) res -= A_offd_data[j] * u_offd_data[A_offd_J[j]]; u_data[i] += res / l1_norms[i]; } /* Backward local pass */ for (i = num_rows-1; i > -1; i--) { res = f_data[i]; for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++) res -= A_diag_data[j] * u_data[A_diag_J[j]]; if (num_cols_offd) for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++) res -= A_offd_data[j] * u_offd_data[A_offd_J[j]]; u_data[i] += res / l1_norms[i]; } } else if (relax_weight == 1.0) /* SSOR */ { /* Forward local pass */ for (i = 0; i < num_rows; i++) { res = f_data[i]; for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++) res -= A_diag_data[j] * u_data[A_diag_J[j]]; if (num_cols_offd) for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++) res -= A_offd_data[j] * u_offd_data[A_offd_J[j]]; u_data[i] += omega * res / l1_norms[i]; } /* Backward local pass */ for (i = num_rows-1; i > -1; i--) { res = f_data[i]; for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++) res -= A_diag_data[j] * u_data[A_diag_J[j]]; if (num_cols_offd) for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++) res -= A_offd_data[j] * u_offd_data[A_offd_J[j]]; u_data[i] += omega * res / l1_norms[i]; } } else /* scaled SSOR */ { HYPRE_Real dif; HYPRE_Real c1 = omega * relax_weight; HYPRE_Real c2 = omega * (1.0 - relax_weight); /* Forward local pass (save initial guess in v_data) */ for (i = 0; i < num_rows; i++) { dif = 0.0; v_data[i] = u_data[i]; res = f_data[i]; for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++) { res -= A_diag_data[j] * u_data[A_diag_J[j]]; if (A_diag_J[j] < i) dif += A_diag_data[j] * (v_data[A_diag_J[j]] - u_data[A_diag_J[j]]); } if (num_cols_offd) for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++) res -= A_offd_data[j] * u_offd_data[A_offd_J[j]]; u_data[i] += (c1 * res + c2 * dif) / l1_norms[i]; } /* Backward local pass */ for (i = num_rows-1; i > -1; i--) { dif = 0.0; res = f_data[i]; for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++) { res -= A_diag_data[j] * u_data[A_diag_J[j]]; if (A_diag_J[j] > i) dif += A_diag_data[j] * (v_data[A_diag_J[j]] - u_data[A_diag_J[j]]); } if (num_cols_offd) for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++) res -= A_offd_data[j] * u_offd_data[A_offd_J[j]]; u_data[i] += (c1 * res + c2 * dif) / l1_norms[i]; } } hypre_TFree(u_offd_data, HYPRE_MEMORY_HOST); } else if (relax_type == 3) /* Kaczmarz */ { hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_I = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_J = hypre_CSRMatrixJ(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Int *A_offd_I = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_J = hypre_CSRMatrixJ(A_offd); HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_Int i, j; HYPRE_Int num_rows = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(A_offd); HYPRE_Real *u_offd_data = hypre_TAlloc(HYPRE_Real, num_cols_offd, HYPRE_MEMORY_HOST); HYPRE_Real res; HYPRE_Int num_procs; hypre_MPI_Comm_size(hypre_ParCSRMatrixComm(A), &num_procs); /* Copy off-diagonal values of u to the current processor */ if (num_procs > 1) { hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); HYPRE_Int num_sends; HYPRE_Real *u_buf_data; hypre_ParCSRCommHandle *comm_handle; HYPRE_Int index = 0, start; if (!comm_pkg) { hypre_MatvecCommPkgCreate(A); comm_pkg = hypre_ParCSRMatrixCommPkg(A); } num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); u_buf_data = hypre_TAlloc(HYPRE_Real, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST); for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg,i+1); j++) u_buf_data[index++] = u_data[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } comm_handle = hypre_ParCSRCommHandleCreate(1,comm_pkg,u_buf_data,u_offd_data); hypre_ParCSRCommHandleDestroy(comm_handle); hypre_TFree(u_buf_data, HYPRE_MEMORY_HOST); } /* Forward local pass */ for (i = 0; i < num_rows; i++) { res = f_data[i]; for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++) res -= A_diag_data[j] * u_data[A_diag_J[j]]; if (num_cols_offd) for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++) res -= A_offd_data[j] * u_offd_data[A_offd_J[j]]; res /= l1_norms[i]; for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++) u_data[A_diag_J[j]] += omega * res * A_diag_data[j]; } /* Backward local pass */ for (i = num_rows-1; i > -1; i--) { res = f_data[i]; for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++) res -= A_diag_data[j] * u_data[A_diag_J[j]]; if (num_cols_offd) for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++) res -= A_offd_data[j] * u_offd_data[A_offd_J[j]]; res /= l1_norms[i]; for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++) u_data[A_diag_J[j]] += omega * res * A_diag_data[j]; } hypre_TFree(u_offd_data, HYPRE_MEMORY_HOST); } else /* call BoomerAMG relaxation */ { if (relax_type == 16) { hypre_ParCSRRelax_Cheby(A, f, max_eig_est, min_eig_est, cheby_fraction, cheby_order, 1, 0, u, v, z); } else hypre_BoomerAMGRelax(A, f, NULL, hypre_abs(relax_type), 0, relax_weight, omega, l1_norms, u, v, z); } } return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_ParVectorInRangeOf * * Return a vector that belongs to the range of a given matrix. *--------------------------------------------------------------------------*/ hypre_ParVector *hypre_ParVectorInRangeOf(hypre_ParCSRMatrix *A) { hypre_ParVector *x; x = hypre_ParVectorCreate(hypre_ParCSRMatrixComm(A), hypre_ParCSRMatrixGlobalNumRows(A), hypre_ParCSRMatrixRowStarts(A)); hypre_ParVectorInitialize(x); hypre_ParVectorOwnsData(x) = 1; hypre_ParVectorOwnsPartitioning(x) = 0; return x; } /*-------------------------------------------------------------------------- * hypre_ParVectorInDomainOf * * Return a vector that belongs to the domain of a given matrix. *--------------------------------------------------------------------------*/ hypre_ParVector *hypre_ParVectorInDomainOf(hypre_ParCSRMatrix *A) { hypre_ParVector *x; x = hypre_ParVectorCreate(hypre_ParCSRMatrixComm(A), hypre_ParCSRMatrixGlobalNumCols(A), hypre_ParCSRMatrixColStarts(A)); hypre_ParVectorInitialize(x); hypre_ParVectorOwnsData(x) = 1; hypre_ParVectorOwnsPartitioning(x) = 0; return x; } /*-------------------------------------------------------------------------- * hypre_ParVectorBlockSplit * * Extract the dim sub-vectors x_0,...,x_{dim-1} composing a parallel * block vector x. It is assumed that &x[i] = [x_0[i],...,x_{dim-1}[i]]. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParVectorBlockSplit(hypre_ParVector *x, hypre_ParVector *x_[3], HYPRE_Int dim) { HYPRE_Int i, d, size_; HYPRE_Real *x_data, *x_data_[3]; size_ = hypre_VectorSize(hypre_ParVectorLocalVector(x_[0])); x_data = hypre_VectorData(hypre_ParVectorLocalVector(x)); for (d = 0; d < dim; d++) x_data_[d] = hypre_VectorData(hypre_ParVectorLocalVector(x_[d])); for (i = 0; i < size_; i++) for (d = 0; d < dim; d++) x_data_[d][i] = x_data[dim*i+d]; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_ParVectorBlockGather * * Compose a parallel block vector x from dim given sub-vectors * x_0,...,x_{dim-1}, such that &x[i] = [x_0[i],...,x_{dim-1}[i]]. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParVectorBlockGather(hypre_ParVector *x, hypre_ParVector *x_[3], HYPRE_Int dim) { HYPRE_Int i, d, size_; HYPRE_Real *x_data, *x_data_[3]; size_ = hypre_VectorSize(hypre_ParVectorLocalVector(x_[0])); x_data = hypre_VectorData(hypre_ParVectorLocalVector(x)); for (d = 0; d < dim; d++) x_data_[d] = hypre_VectorData(hypre_ParVectorLocalVector(x_[d])); for (i = 0; i < size_; i++) for (d = 0; d < dim; d++) x_data[dim*i+d] = x_data_[d][i]; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_BoomerAMGBlockSolve * * Apply the block-diagonal solver diag(B) to the system diag(A) x = b. * Here B is a given BoomerAMG solver for A, while x and b are "block" * parallel vectors. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_BoomerAMGBlockSolve(void *B, hypre_ParCSRMatrix *A, hypre_ParVector *b, hypre_ParVector *x) { HYPRE_Int d, dim = 1; hypre_ParVector *b_[3]; hypre_ParVector *x_[3]; dim = hypre_ParVectorGlobalSize(x) / hypre_ParCSRMatrixGlobalNumRows(A); if (dim == 1) { hypre_BoomerAMGSolve(B, A, b, x); return hypre_error_flag; } for (d = 0; d < dim; d++) { b_[d] = hypre_ParVectorInRangeOf(A); x_[d] = hypre_ParVectorInRangeOf(A); } hypre_ParVectorBlockSplit(b, b_, dim); hypre_ParVectorBlockSplit(x, x_, dim); for (d = 0; d < dim; d++) hypre_BoomerAMGSolve(B, A, b_[d], x_[d]); hypre_ParVectorBlockGather(x, x_, dim); for (d = 0; d < dim; d++) { hypre_ParVectorDestroy(b_[d]); hypre_ParVectorDestroy(x_[d]); } return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixFixZeroRows * * For every zero row in the matrix: set the diagonal element to 1. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixFixZeroRows(hypre_ParCSRMatrix *A) { HYPRE_Int i, j; HYPRE_Real l1_norm; HYPRE_Int num_rows = hypre_ParCSRMatrixNumRows(A); hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Int *A_diag_I = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_J = hypre_CSRMatrixJ(A_diag); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Int *A_offd_I = hypre_CSRMatrixI(A_offd); HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(A_offd); /* a row will be considered zero if its l1 norm is less than eps */ HYPRE_Real eps = 0.0; /* DBL_EPSILON * 1e+4; */ for (i = 0; i < num_rows; i++) { l1_norm = 0.0; for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++) l1_norm += fabs(A_diag_data[j]); if (num_cols_offd) for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++) l1_norm += fabs(A_offd_data[j]); if (l1_norm <= eps) { for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++) if (A_diag_J[j] == i) A_diag_data[j] = 1.0; else A_diag_data[j] = 0.0; if (num_cols_offd) for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++) A_offd_data[j] = 0.0; } } return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_ParCSRComputeL1Norms * * Compute the l1 norms of the rows of a given matrix, depending on * the option parameter: * * option 1 = Compute the l1 norm of the rows * option 2 = Compute the l1 norm of the (processor) off-diagonal * part of the rows plus the diagonal of A * option 3 = Compute the l2 norm^2 of the rows * option 4 = Truncated version of option 2 based on Remark 6.2 in "Multigrid * Smoothers for Ultra-Parallel Computing" * * The above computations are done in a CF manner, whenever the provided * cf_marker is not NULL. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRComputeL1Norms(hypre_ParCSRMatrix *A, HYPRE_Int option, HYPRE_Int *cf_marker, HYPRE_Real **l1_norm_ptr) { HYPRE_Int i, j; HYPRE_Int num_rows = hypre_ParCSRMatrixNumRows(A); hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Int *A_diag_I = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_J = hypre_CSRMatrixJ(A_diag); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Int *A_offd_I = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_J = hypre_CSRMatrixJ(A_offd); HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(A_offd); HYPRE_Real diag; HYPRE_Real *l1_norm = hypre_TAlloc(HYPRE_Real, num_rows, HYPRE_MEMORY_SHARED); HYPRE_Int *cf_marker_offd = NULL; HYPRE_Int cf_diag; /* collect the cf marker data from other procs */ if (cf_marker != NULL) { HYPRE_Int index; HYPRE_Int num_sends; HYPRE_Int start; HYPRE_Int *int_buf_data = NULL; hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); hypre_ParCSRCommHandle *comm_handle; if (num_cols_offd) cf_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd, HYPRE_MEMORY_HOST); num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); if (hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends)) int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST); index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) { int_buf_data[index++] = cf_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } } comm_handle = hypre_ParCSRCommHandleCreate(11, comm_pkg, int_buf_data, cf_marker_offd); hypre_ParCSRCommHandleDestroy(comm_handle); hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST); } if (option == 1) { for (i = 0; i < num_rows; i++) { l1_norm[i] = 0.0; if (cf_marker == NULL) { /* Add the l1 norm of the diag part of the ith row */ for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++) l1_norm[i] += fabs(A_diag_data[j]); /* Add the l1 norm of the offd part of the ith row */ if (num_cols_offd) { for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++) l1_norm[i] += fabs(A_offd_data[j]); } } else { cf_diag = cf_marker[i]; /* Add the CF l1 norm of the diag part of the ith row */ for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++) if (cf_diag == cf_marker[A_diag_J[j]]) l1_norm[i] += fabs(A_diag_data[j]); /* Add the CF l1 norm of the offd part of the ith row */ if (num_cols_offd) { for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++) if (cf_diag == cf_marker_offd[A_offd_J[j]]) l1_norm[i] += fabs(A_offd_data[j]); } } } } else if (option == 2) { for (i = 0; i < num_rows; i++) { /* Add the diag element of the ith row */ l1_norm[i] = fabs(A_diag_data[A_diag_I[i]]); if (cf_marker == NULL) { /* Add the l1 norm of the offd part of the ith row */ if (num_cols_offd) { for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++) l1_norm[i] += fabs(A_offd_data[j]); } } else { cf_diag = cf_marker[i]; /* Add the CF l1 norm of the offd part of the ith row */ if (num_cols_offd) { for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++) if (cf_diag == cf_marker_offd[A_offd_J[j]]) l1_norm[i] += fabs(A_offd_data[j]); } } } } else if (option == 3) { for (i = 0; i < num_rows; i++) { l1_norm[i] = 0.0; for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++) l1_norm[i] += A_diag_data[j] * A_diag_data[j]; if (num_cols_offd) for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++) l1_norm[i] += A_offd_data[j] * A_offd_data[j]; } } else if (option == 4) { for (i = 0; i < num_rows; i++) { /* Add the diag element of the ith row */ diag = l1_norm[i] = fabs(A_diag_data[A_diag_I[i]]); if (cf_marker == NULL) { /* Add the scaled l1 norm of the offd part of the ith row */ if (num_cols_offd) { for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++) l1_norm[i] += 0.5*fabs(A_offd_data[j]); } } else { cf_diag = cf_marker[i]; /* Add the scaled CF l1 norm of the offd part of the ith row */ if (num_cols_offd) { for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++) if (cf_diag == cf_marker_offd[A_offd_J[j]]) l1_norm[i] += 0.5*fabs(A_offd_data[j]); } } /* Truncate according to Remark 6.2 */ if (l1_norm[i] <= 4.0/3.0*diag) l1_norm[i] = diag; } } else if (option == 5) /*stores diagonal of A for Jacobi using matvec, rlx 7 */ { for (i = 0; i < num_rows; i++) { diag = A_diag_data[A_diag_I[i]]; if (diag != 0.0) l1_norm[i] = diag; else l1_norm[i] = 1.0; } *l1_norm_ptr = l1_norm; return hypre_error_flag; } /* Handle negative definite matrices */ for (i = 0; i < num_rows; i++) if (A_diag_data[A_diag_I[i]] < 0) l1_norm[i] = -l1_norm[i]; for (i = 0; i < num_rows; i++) /* if (fabs(l1_norm[i]) < DBL_EPSILON) */ if (fabs(l1_norm[i]) == 0.0) { hypre_error_in_arg(1); break; } //for (i = 0; i < num_rows; i++) l1_norm[i]=1.0/l1_norm[i]; hypre_TFree(cf_marker_offd, HYPRE_MEMORY_HOST); *l1_norm_ptr = l1_norm; #ifdef HYPRE_USING_MAPPED_OPENMP_OFFLOAD #pragma omp target enter data map(to:l1_norm[0:num_rows]) if (num_rows>0) #endif return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixSetDiagRows * * For every row containing only a diagonal element: set it to d. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixSetDiagRows(hypre_ParCSRMatrix *A, HYPRE_Real d) { HYPRE_Int i, j; HYPRE_Int num_rows = hypre_ParCSRMatrixNumRows(A); hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Int *A_diag_I = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_J = hypre_CSRMatrixJ(A_diag); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Int *A_offd_I = hypre_CSRMatrixI(A_offd); HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(A_offd); for (i = 0; i < num_rows; i++) { j = A_diag_I[i]; if ((A_diag_I[i+1] == j+1) && (A_diag_J[j] == i) && (!num_cols_offd || (A_offd_I[i+1] == A_offd_I[i]))) { A_diag_data[j] = d; } } return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_AMSCreate * * Allocate the AMS solver structure. *--------------------------------------------------------------------------*/ void * hypre_AMSCreate() { hypre_AMSData *ams_data; ams_data = hypre_CTAlloc(hypre_AMSData, 1, HYPRE_MEMORY_HOST); /* Default parameters */ ams_data -> dim = 3; /* 3D problem */ ams_data -> maxit = 20; /* perform at most 20 iterations */ ams_data -> tol = 1e-6; /* convergence tolerance */ ams_data -> print_level = 1; /* print residual norm at each step */ ams_data -> cycle_type = 1; /* a 3-level multiplicative solver */ ams_data -> A_relax_type = 2; /* offd-l1-scaled GS */ ams_data -> A_relax_times = 1; /* one relaxation sweep */ ams_data -> A_relax_weight = 1.0; /* damping parameter */ ams_data -> A_omega = 1.0; /* SSOR coefficient */ ams_data -> A_cheby_order = 2; /* Cheby: order (1 -4 are vaild) */ ams_data -> A_cheby_fraction = .3; /* Cheby: fraction of spectrum to smooth */ ams_data -> B_G_coarsen_type = 10; /* HMIS coarsening */ ams_data -> B_G_agg_levels = 1; /* Levels of aggressive coarsening */ ams_data -> B_G_relax_type = 3; /* hybrid G-S/Jacobi */ ams_data -> B_G_theta = 0.25; /* strength threshold */ ams_data -> B_G_interp_type = 0; /* interpolation type */ ams_data -> B_G_Pmax = 0; /* max nonzero elements in interp. rows */ ams_data -> B_Pi_coarsen_type = 10; /* HMIS coarsening */ ams_data -> B_Pi_agg_levels = 1; /* Levels of aggressive coarsening */ ams_data -> B_Pi_relax_type = 3; /* hybrid G-S/Jacobi */ ams_data -> B_Pi_theta = 0.25; /* strength threshold */ ams_data -> B_Pi_interp_type = 0; /* interpolation type */ ams_data -> B_Pi_Pmax = 0; /* max nonzero elements in interp. rows */ ams_data -> beta_is_zero = 0; /* the problem has a mass term */ /* By default, do l1-GS smoothing on the coarsest grid */ ams_data -> B_G_coarse_relax_type = 8; ams_data -> B_Pi_coarse_relax_type = 8; /* The rest of the fields are initialized using the Set functions */ ams_data -> A = NULL; ams_data -> G = NULL; ams_data -> A_G = NULL; ams_data -> B_G = 0; ams_data -> Pi = NULL; ams_data -> A_Pi = NULL; ams_data -> B_Pi = 0; ams_data -> x = NULL; ams_data -> y = NULL; ams_data -> z = NULL; ams_data -> Gx = NULL; ams_data -> Gy = NULL; ams_data -> Gz = NULL; ams_data -> r0 = NULL; ams_data -> g0 = NULL; ams_data -> r1 = NULL; ams_data -> g1 = NULL; ams_data -> r2 = NULL; ams_data -> g2 = NULL; ams_data -> Pix = NULL; ams_data -> Piy = NULL; ams_data -> Piz = NULL; ams_data -> A_Pix = NULL; ams_data -> A_Piy = NULL; ams_data -> A_Piz = NULL; ams_data -> B_Pix = 0; ams_data -> B_Piy = 0; ams_data -> B_Piz = 0; ams_data -> interior_nodes = NULL; ams_data -> G0 = NULL; ams_data -> A_G0 = NULL; ams_data -> B_G0 = 0; ams_data -> projection_frequency = 5; ams_data -> A_l1_norms = NULL; ams_data -> A_max_eig_est = 0; ams_data -> A_min_eig_est = 0; ams_data -> owns_Pi = 1; ams_data -> owns_A_G = 0; ams_data -> owns_A_Pi = 0; return (void *) ams_data; } /*-------------------------------------------------------------------------- * hypre_AMSDestroy * * Deallocate the AMS solver structure. Note that the input data (given * through the Set functions) is not destroyed. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_AMSDestroy(void *solver) { hypre_AMSData *ams_data = (hypre_AMSData *) solver; if (!ams_data) { hypre_error_in_arg(1); return hypre_error_flag; } if (ams_data -> owns_A_G) if (ams_data -> A_G) hypre_ParCSRMatrixDestroy(ams_data -> A_G); if (!ams_data -> beta_is_zero) if (ams_data -> B_G) HYPRE_BoomerAMGDestroy(ams_data -> B_G); if (ams_data -> owns_Pi && ams_data -> Pi) hypre_ParCSRMatrixDestroy(ams_data -> Pi); if (ams_data -> owns_A_Pi) if (ams_data -> A_Pi) hypre_ParCSRMatrixDestroy(ams_data -> A_Pi); if (ams_data -> B_Pi) HYPRE_BoomerAMGDestroy(ams_data -> B_Pi); if (ams_data -> owns_Pi && ams_data -> Pix) hypre_ParCSRMatrixDestroy(ams_data -> Pix); if (ams_data -> A_Pix) hypre_ParCSRMatrixDestroy(ams_data -> A_Pix); if (ams_data -> B_Pix) HYPRE_BoomerAMGDestroy(ams_data -> B_Pix); if (ams_data -> owns_Pi && ams_data -> Piy) hypre_ParCSRMatrixDestroy(ams_data -> Piy); if (ams_data -> A_Piy) hypre_ParCSRMatrixDestroy(ams_data -> A_Piy); if (ams_data -> B_Piy) HYPRE_BoomerAMGDestroy(ams_data -> B_Piy); if (ams_data -> owns_Pi && ams_data -> Piz) hypre_ParCSRMatrixDestroy(ams_data -> Piz); if (ams_data -> A_Piz) hypre_ParCSRMatrixDestroy(ams_data -> A_Piz); if (ams_data -> B_Piz) HYPRE_BoomerAMGDestroy(ams_data -> B_Piz); if (ams_data -> r0) hypre_ParVectorDestroy(ams_data -> r0); if (ams_data -> g0) hypre_ParVectorDestroy(ams_data -> g0); if (ams_data -> r1) hypre_ParVectorDestroy(ams_data -> r1); if (ams_data -> g1) hypre_ParVectorDestroy(ams_data -> g1); if (ams_data -> r2) hypre_ParVectorDestroy(ams_data -> r2); if (ams_data -> g2) hypre_ParVectorDestroy(ams_data -> g2); if (ams_data -> G0) hypre_ParCSRMatrixDestroy(ams_data -> A); if (ams_data -> G0) hypre_ParCSRMatrixDestroy(ams_data -> G0); if (ams_data -> A_G0) hypre_ParCSRMatrixDestroy(ams_data -> A_G0); if (ams_data -> B_G0) HYPRE_BoomerAMGDestroy(ams_data -> B_G0); if (ams_data -> A_l1_norms) hypre_TFree(ams_data -> A_l1_norms, HYPRE_MEMORY_SHARED); /* G, x, y ,z, Gx, Gy and Gz are not destroyed */ if (ams_data) hypre_TFree(ams_data, HYPRE_MEMORY_HOST); return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_AMSSetDimension * * Set problem dimension (2 or 3). By default we assume dim = 3. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_AMSSetDimension(void *solver, HYPRE_Int dim) { hypre_AMSData *ams_data = (hypre_AMSData *) solver; if (dim != 2 && dim != 3) hypre_error_in_arg(2); ams_data -> dim = dim; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_AMSSetDiscreteGradient * * Set the discrete gradient matrix G. * This function should be called before hypre_AMSSetup()! *--------------------------------------------------------------------------*/ HYPRE_Int hypre_AMSSetDiscreteGradient(void *solver, hypre_ParCSRMatrix *G) { hypre_AMSData *ams_data = (hypre_AMSData *) solver; ams_data -> G = G; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_AMSSetCoordinateVectors * * Set the x, y and z coordinates of the vertices in the mesh. * * Either SetCoordinateVectors or SetEdgeConstantVectors should be * called before hypre_AMSSetup()! *--------------------------------------------------------------------------*/ HYPRE_Int hypre_AMSSetCoordinateVectors(void *solver, hypre_ParVector *x, hypre_ParVector *y, hypre_ParVector *z) { hypre_AMSData *ams_data = (hypre_AMSData *) solver; ams_data -> x = x; ams_data -> y = y; ams_data -> z = z; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_AMSSetEdgeConstantVectors * * Set the vectors Gx, Gy and Gz which give the representations of * the constant vector fields (1,0,0), (0,1,0) and (0,0,1) in the * edge element basis. * * Either SetCoordinateVectors or SetEdgeConstantVectors should be * called before hypre_AMSSetup()! *--------------------------------------------------------------------------*/ HYPRE_Int hypre_AMSSetEdgeConstantVectors(void *solver, hypre_ParVector *Gx, hypre_ParVector *Gy, hypre_ParVector *Gz) { hypre_AMSData *ams_data = (hypre_AMSData *) solver; ams_data -> Gx = Gx; ams_data -> Gy = Gy; ams_data -> Gz = Gz; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_AMSSetInterpolations * * Set the (components of) the Nedelec interpolation matrix Pi=[Pix,Piy,Piz]. * * This function is generally intended to be used only for high-order Nedelec * discretizations (in the lowest order case, Pi is constructed internally in * AMS from the discreet gradient matrix and the coordinates of the vertices), * though it can also be used in the lowest-order case or for other types of * discretizations (e.g. ones based on the second family of Nedelec elements). * * By definition, Pi is the matrix representation of the linear operator that * interpolates (high-order) vector nodal finite elements into the (high-order) * Nedelec space. The component matrices are defined as Pix phi = Pi (phi,0,0) * and similarly for Piy and Piz. Note that all these operators depend on the * choice of the basis and degrees of freedom in the high-order spaces. * * The column numbering of Pi should be node-based, i.e. the x/y/z components of * the first node (vertex or high-order dof) should be listed first, followed by * the x/y/z components of the second node and so on (see the documentation of * HYPRE_BoomerAMGSetDofFunc). * * If used, this function should be called before hypre_AMSSetup() and there is * no need to provide the vertex coordinates. Furthermore, only one of the sets * {Pi} and {Pix,Piy,Piz} needs to be specified (though it is OK to provide * both). If Pix is NULL, then scalar Pi-based AMS cycles, i.e. those with * cycle_type > 10, will be unavailable. Similarly, AMS cycles based on * monolithic Pi (cycle_type < 10) require that Pi is not NULL. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_AMSSetInterpolations(void *solver, hypre_ParCSRMatrix *Pi, hypre_ParCSRMatrix *Pix, hypre_ParCSRMatrix *Piy, hypre_ParCSRMatrix *Piz) { hypre_AMSData *ams_data = (hypre_AMSData *) solver; ams_data -> Pi = Pi; ams_data -> Pix = Pix; ams_data -> Piy = Piy; ams_data -> Piz = Piz; ams_data -> owns_Pi = 0; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_AMSSetAlphaPoissonMatrix * * Set the matrix corresponding to the Poisson problem with coefficient * alpha (the curl-curl term coefficient in the Maxwell problem). * * If this function is called, the coarse space solver on the range * of Pi^T is a block-diagonal version of A_Pi. If this function is not * called, the coarse space solver on the range of Pi^T is constructed * as Pi^T A Pi in hypre_AMSSetup(). *--------------------------------------------------------------------------*/ HYPRE_Int hypre_AMSSetAlphaPoissonMatrix(void *solver, hypre_ParCSRMatrix *A_Pi) { hypre_AMSData *ams_data = (hypre_AMSData *) solver; ams_data -> A_Pi = A_Pi; /* Penalize the eliminated degrees of freedom */ hypre_ParCSRMatrixSetDiagRows(A_Pi, HYPRE_REAL_MAX); /* Make sure that the first entry in each row is the diagonal one. */ /* hypre_CSRMatrixReorder(hypre_ParCSRMatrixDiag(A_Pi)); */ return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_AMSSetBetaPoissonMatrix * * Set the matrix corresponding to the Poisson problem with coefficient * beta (the mass term coefficient in the Maxwell problem). * * This function call is optional - if not given, the Poisson matrix will * be computed in hypre_AMSSetup(). If the given matrix is NULL, we assume * that beta is 0 and use two-level (instead of three-level) methods. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_AMSSetBetaPoissonMatrix(void *solver, hypre_ParCSRMatrix *A_G) { hypre_AMSData *ams_data = (hypre_AMSData *) solver; ams_data -> A_G = A_G; if (!A_G) ams_data -> beta_is_zero = 1; else { /* Penalize the eliminated degrees of freedom */ hypre_ParCSRMatrixSetDiagRows(A_G, HYPRE_REAL_MAX); /* Make sure that the first entry in each row is the diagonal one. */ /* hypre_CSRMatrixReorder(hypre_ParCSRMatrixDiag(A_G)); */ } return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_AMSSetInteriorNodes * * Set the list of nodes which are interior to the zero-conductivity region. * A node is interior if interior_nodes[i] == 1.0. * * Should be called before hypre_AMSSetup()! *--------------------------------------------------------------------------*/ HYPRE_Int hypre_AMSSetInteriorNodes(void *solver, hypre_ParVector *interior_nodes) { hypre_AMSData *ams_data = (hypre_AMSData *) solver; ams_data -> interior_nodes = interior_nodes; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_AMSSetProjectionFrequency * * How often to project the r.h.s. onto the compatible sub-space Ker(G0^T), * when iterating with the solver. * * The default value is every 5th iteration. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_AMSSetProjectionFrequency(void *solver, HYPRE_Int projection_frequency) { hypre_AMSData *ams_data = (hypre_AMSData *) solver; ams_data -> projection_frequency = projection_frequency; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_AMSSetMaxIter * * Set the maximum number of iterations in the three-level method. * The default value is 20. To use the AMS solver as a preconditioner, * set maxit to 1, tol to 0.0 and print_level to 0. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_AMSSetMaxIter(void *solver, HYPRE_Int maxit) { hypre_AMSData *ams_data = (hypre_AMSData *) solver; ams_data -> maxit = maxit; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_AMSSetTol * * Set the convergence tolerance (if the method is used as a solver). * The default value is 1e-6. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_AMSSetTol(void *solver, HYPRE_Real tol) { hypre_AMSData *ams_data = (hypre_AMSData *) solver; ams_data -> tol = tol; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_AMSSetCycleType * * Choose which three-level solver to use. Possible values are: * * 1 = 3-level multipl. solver (01210) <-- small solution time * 2 = 3-level additive solver (0+1+2) * 3 = 3-level multipl. solver (02120) * 4 = 3-level additive solver (010+2) * 5 = 3-level multipl. solver (0102010) <-- small solution time * 6 = 3-level additive solver (1+020) * 7 = 3-level multipl. solver (0201020) <-- small number of iterations * 8 = 3-level additive solver (0(1+2)0) <-- small solution time * 9 = 3-level multipl. solver (01210) with discrete divergence * 11 = 5-level multipl. solver (013454310) <-- small solution time, memory * 12 = 5-level additive solver (0+1+3+4+5) * 13 = 5-level multipl. solver (034515430) <-- small solution time, memory * 14 = 5-level additive solver (01(3+4+5)10) * 20 = 2-level multipl. solver (0[12]0) * * 0 = a Hiptmair-like smoother (010) * * The default value is 1. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_AMSSetCycleType(void *solver, HYPRE_Int cycle_type) { hypre_AMSData *ams_data = (hypre_AMSData *) solver; ams_data -> cycle_type = cycle_type; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_AMSSetPrintLevel * * Control how much information is printed during the solution iterations. * The defaut values is 1 (print residual norm at each step). *--------------------------------------------------------------------------*/ HYPRE_Int hypre_AMSSetPrintLevel(void *solver, HYPRE_Int print_level) { hypre_AMSData *ams_data = (hypre_AMSData *) solver; ams_data -> print_level = print_level; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_AMSSetSmoothingOptions * * Set relaxation parameters for A. Default values: 2, 1, 1.0, 1.0. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_AMSSetSmoothingOptions(void *solver, HYPRE_Int A_relax_type, HYPRE_Int A_relax_times, HYPRE_Real A_relax_weight, HYPRE_Real A_omega) { hypre_AMSData *ams_data = (hypre_AMSData *) solver; ams_data -> A_relax_type = A_relax_type; ams_data -> A_relax_times = A_relax_times; ams_data -> A_relax_weight = A_relax_weight; ams_data -> A_omega = A_omega; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_AMSSetChebySmoothingOptions * AB: note: this could be added to the above, * but I didn't want to change parameter list) * Set parameters for chebyshev smoother for A. Default values: 2,.3. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_AMSSetChebySmoothingOptions(void *solver, HYPRE_Int A_cheby_order, HYPRE_Int A_cheby_fraction) { hypre_AMSData *ams_data = (hypre_AMSData *) solver; ams_data -> A_cheby_order = A_cheby_order; ams_data -> A_cheby_fraction = A_cheby_fraction; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_AMSSetAlphaAMGOptions * * Set AMG parameters for B_Pi. Default values: 10, 1, 3, 0.25, 0, 0. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_AMSSetAlphaAMGOptions(void *solver, HYPRE_Int B_Pi_coarsen_type, HYPRE_Int B_Pi_agg_levels, HYPRE_Int B_Pi_relax_type, HYPRE_Real B_Pi_theta, HYPRE_Int B_Pi_interp_type, HYPRE_Int B_Pi_Pmax) { hypre_AMSData *ams_data = (hypre_AMSData *) solver; ams_data -> B_Pi_coarsen_type = B_Pi_coarsen_type; ams_data -> B_Pi_agg_levels = B_Pi_agg_levels; ams_data -> B_Pi_relax_type = B_Pi_relax_type; ams_data -> B_Pi_theta = B_Pi_theta; ams_data -> B_Pi_interp_type = B_Pi_interp_type; ams_data -> B_Pi_Pmax = B_Pi_Pmax; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_AMSSetAlphaAMGCoarseRelaxType * * Set the AMG coarsest level relaxation for B_Pi. Default value: 8. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_AMSSetAlphaAMGCoarseRelaxType(void *solver, HYPRE_Int B_Pi_coarse_relax_type) { hypre_AMSData *ams_data = (hypre_AMSData *)solver; ams_data -> B_Pi_coarse_relax_type = B_Pi_coarse_relax_type; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_AMSSetBetaAMGOptions * * Set AMG parameters for B_G. Default values: 10, 1, 3, 0.25, 0, 0. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_AMSSetBetaAMGOptions(void *solver, HYPRE_Int B_G_coarsen_type, HYPRE_Int B_G_agg_levels, HYPRE_Int B_G_relax_type, HYPRE_Real B_G_theta, HYPRE_Int B_G_interp_type, HYPRE_Int B_G_Pmax) { hypre_AMSData *ams_data = (hypre_AMSData *) solver; ams_data -> B_G_coarsen_type = B_G_coarsen_type; ams_data -> B_G_agg_levels = B_G_agg_levels; ams_data -> B_G_relax_type = B_G_relax_type; ams_data -> B_G_theta = B_G_theta; ams_data -> B_G_interp_type = B_G_interp_type; ams_data -> B_G_Pmax = B_G_Pmax; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_AMSSetBetaAMGCoarseRelaxType * * Set the AMG coarsest level relaxation for B_G. Default value: 8. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_AMSSetBetaAMGCoarseRelaxType(void *solver, HYPRE_Int B_G_coarse_relax_type) { hypre_AMSData *ams_data = (hypre_AMSData *) solver; ams_data -> B_G_coarse_relax_type = B_G_coarse_relax_type; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_AMSComputePi * * Construct the Pi interpolation matrix, which maps the space of vector * linear finite elements to the space of edge finite elements. * * The construction is based on the fact that Pi = [Pi_x, Pi_y, Pi_z], * where each block has the same sparsity structure as G, and the entries * can be computed from the vectors Gx, Gy, Gz. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_AMSComputePi(hypre_ParCSRMatrix *A, hypre_ParCSRMatrix *G, hypre_ParVector *Gx, hypre_ParVector *Gy, hypre_ParVector *Gz, HYPRE_Int dim, hypre_ParCSRMatrix **Pi_ptr) { hypre_ParCSRMatrix *Pi; /* Compute Pi = [Pi_x, Pi_y, Pi_z] */ { HYPRE_Int i, j, d; HYPRE_Real *Gx_data, *Gy_data, *Gz_data; MPI_Comm comm = hypre_ParCSRMatrixComm(G); HYPRE_BigInt global_num_rows = hypre_ParCSRMatrixGlobalNumRows(G); HYPRE_BigInt global_num_cols = dim*hypre_ParCSRMatrixGlobalNumCols(G); HYPRE_BigInt *row_starts = hypre_ParCSRMatrixRowStarts(G); HYPRE_BigInt *col_starts; HYPRE_Int col_starts_size; HYPRE_Int num_cols_offd = dim*hypre_CSRMatrixNumCols(hypre_ParCSRMatrixOffd(G)); HYPRE_Int num_nonzeros_diag = dim*hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixDiag(G)); HYPRE_Int num_nonzeros_offd = dim*hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixOffd(G)); HYPRE_BigInt *col_starts_G = hypre_ParCSRMatrixColStarts(G); #ifdef HYPRE_NO_GLOBAL_PARTITION col_starts_size = 2; #else HYPRE_Int num_procs; hypre_MPI_Comm_size(comm, &num_procs); col_starts_size = num_procs+1; #endif col_starts = hypre_TAlloc(HYPRE_BigInt, col_starts_size, HYPRE_MEMORY_HOST); for (i = 0; i < col_starts_size; i++) col_starts[i] = (HYPRE_BigInt)dim * col_starts_G[i]; Pi = hypre_ParCSRMatrixCreate(comm, global_num_rows, global_num_cols, row_starts, col_starts, num_cols_offd, num_nonzeros_diag, num_nonzeros_offd); hypre_ParCSRMatrixOwnsData(Pi) = 1; hypre_ParCSRMatrixOwnsRowStarts(Pi) = 0; hypre_ParCSRMatrixOwnsColStarts(Pi) = 1; hypre_ParCSRMatrixInitialize(Pi); Gx_data = hypre_VectorData(hypre_ParVectorLocalVector(Gx)); Gy_data = hypre_VectorData(hypre_ParVectorLocalVector(Gy)); if (dim == 3) Gz_data = hypre_VectorData(hypre_ParVectorLocalVector(Gz)); /* Fill-in the diagonal part */ { hypre_CSRMatrix *G_diag = hypre_ParCSRMatrixDiag(G); HYPRE_Int *G_diag_I = hypre_CSRMatrixI(G_diag); HYPRE_Int *G_diag_J = hypre_CSRMatrixJ(G_diag); HYPRE_Real *G_diag_data = hypre_CSRMatrixData(G_diag); HYPRE_Int G_diag_nrows = hypre_CSRMatrixNumRows(G_diag); HYPRE_Int G_diag_nnz = hypre_CSRMatrixNumNonzeros(G_diag); hypre_CSRMatrix *Pi_diag = hypre_ParCSRMatrixDiag(Pi); HYPRE_Int *Pi_diag_I = hypre_CSRMatrixI(Pi_diag); HYPRE_Int *Pi_diag_J = hypre_CSRMatrixJ(Pi_diag); HYPRE_Real *Pi_diag_data = hypre_CSRMatrixData(Pi_diag); for (i = 0; i < G_diag_nrows+1; i++) Pi_diag_I[i] = dim * G_diag_I[i]; for (i = 0; i < G_diag_nnz; i++) for (d = 0; d < dim; d++) Pi_diag_J[dim*i+d] = dim*G_diag_J[i]+d; for (i = 0; i < G_diag_nrows; i++) for (j = G_diag_I[i]; j < G_diag_I[i+1]; j++) { *Pi_diag_data++ = fabs(G_diag_data[j]) * 0.5 * Gx_data[i]; *Pi_diag_data++ = fabs(G_diag_data[j]) * 0.5 * Gy_data[i]; if (dim == 3) *Pi_diag_data++ = fabs(G_diag_data[j]) * 0.5 * Gz_data[i]; } } /* Fill-in the off-diagonal part */ { hypre_CSRMatrix *G_offd = hypre_ParCSRMatrixOffd(G); HYPRE_Int *G_offd_I = hypre_CSRMatrixI(G_offd); HYPRE_Int *G_offd_J = hypre_CSRMatrixJ(G_offd); HYPRE_Real *G_offd_data = hypre_CSRMatrixData(G_offd); HYPRE_Int G_offd_nrows = hypre_CSRMatrixNumRows(G_offd); HYPRE_Int G_offd_ncols = hypre_CSRMatrixNumCols(G_offd); HYPRE_Int G_offd_nnz = hypre_CSRMatrixNumNonzeros(G_offd); hypre_CSRMatrix *Pi_offd = hypre_ParCSRMatrixOffd(Pi); HYPRE_Int *Pi_offd_I = hypre_CSRMatrixI(Pi_offd); HYPRE_Int *Pi_offd_J = hypre_CSRMatrixJ(Pi_offd); HYPRE_Real *Pi_offd_data = hypre_CSRMatrixData(Pi_offd); HYPRE_BigInt *G_cmap = hypre_ParCSRMatrixColMapOffd(G); HYPRE_BigInt *Pi_cmap = hypre_ParCSRMatrixColMapOffd(Pi); if (G_offd_ncols) for (i = 0; i < G_offd_nrows+1; i++) Pi_offd_I[i] = dim * G_offd_I[i]; for (i = 0; i < G_offd_nnz; i++) for (d = 0; d < dim; d++) Pi_offd_J[dim*i+d] = dim*G_offd_J[i]+d; for (i = 0; i < G_offd_nrows; i++) for (j = G_offd_I[i]; j < G_offd_I[i+1]; j++) { *Pi_offd_data++ = fabs(G_offd_data[j]) * 0.5 * Gx_data[i]; *Pi_offd_data++ = fabs(G_offd_data[j]) * 0.5 * Gy_data[i]; if (dim == 3) *Pi_offd_data++ = fabs(G_offd_data[j]) * 0.5 * Gz_data[i]; } for (i = 0; i < G_offd_ncols; i++) for (d = 0; d < dim; d++) Pi_cmap[dim*i+d] = (HYPRE_BigInt)dim*G_cmap[i]+(HYPRE_BigInt)d; } } *Pi_ptr = Pi; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_AMSComputePixyz * * Construct the components Pix, Piy, Piz of the interpolation matrix Pi, * which maps the space of vector linear finite elements to the space of * edge finite elements. * * The construction is based on the fact that each component has the same * sparsity structure as G, and the entries can be computed from the vectors * Gx, Gy, Gz. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_AMSComputePixyz(hypre_ParCSRMatrix *A, hypre_ParCSRMatrix *G, hypre_ParVector *Gx, hypre_ParVector *Gy, hypre_ParVector *Gz, HYPRE_Int dim, hypre_ParCSRMatrix **Pix_ptr, hypre_ParCSRMatrix **Piy_ptr, hypre_ParCSRMatrix **Piz_ptr) { hypre_ParCSRMatrix *Pix, *Piy, *Piz; /* Compute Pix, Piy, Piz */ { HYPRE_Int i, j; HYPRE_Real *Gx_data, *Gy_data, *Gz_data; MPI_Comm comm = hypre_ParCSRMatrixComm(G); HYPRE_BigInt global_num_rows = hypre_ParCSRMatrixGlobalNumRows(G); HYPRE_BigInt global_num_cols = hypre_ParCSRMatrixGlobalNumCols(G); HYPRE_BigInt *row_starts = hypre_ParCSRMatrixRowStarts(G); HYPRE_BigInt *col_starts = hypre_ParCSRMatrixColStarts(G); HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(hypre_ParCSRMatrixOffd(G)); HYPRE_Int num_nonzeros_diag = hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixDiag(G)); HYPRE_Int num_nonzeros_offd = hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixOffd(G)); Pix = hypre_ParCSRMatrixCreate(comm, global_num_rows, global_num_cols, row_starts, col_starts, num_cols_offd, num_nonzeros_diag, num_nonzeros_offd); hypre_ParCSRMatrixOwnsData(Pix) = 1; hypre_ParCSRMatrixOwnsRowStarts(Pix) = 0; hypre_ParCSRMatrixOwnsColStarts(Pix) = 0; hypre_ParCSRMatrixInitialize(Pix); Piy = hypre_ParCSRMatrixCreate(comm, global_num_rows, global_num_cols, row_starts, col_starts, num_cols_offd, num_nonzeros_diag, num_nonzeros_offd); hypre_ParCSRMatrixOwnsData(Piy) = 1; hypre_ParCSRMatrixOwnsRowStarts(Piy) = 0; hypre_ParCSRMatrixOwnsColStarts(Piy) = 0; hypre_ParCSRMatrixInitialize(Piy); if (dim == 3) { Piz = hypre_ParCSRMatrixCreate(comm, global_num_rows, global_num_cols, row_starts, col_starts, num_cols_offd, num_nonzeros_diag, num_nonzeros_offd); hypre_ParCSRMatrixOwnsData(Piz) = 1; hypre_ParCSRMatrixOwnsRowStarts(Piz) = 0; hypre_ParCSRMatrixOwnsColStarts(Piz) = 0; hypre_ParCSRMatrixInitialize(Piz); } Gx_data = hypre_VectorData(hypre_ParVectorLocalVector(Gx)); Gy_data = hypre_VectorData(hypre_ParVectorLocalVector(Gy)); if (dim == 3) Gz_data = hypre_VectorData(hypre_ParVectorLocalVector(Gz)); /* Fill-in the diagonal part */ if (dim == 3) { hypre_CSRMatrix *G_diag = hypre_ParCSRMatrixDiag(G); HYPRE_Int *G_diag_I = hypre_CSRMatrixI(G_diag); HYPRE_Int *G_diag_J = hypre_CSRMatrixJ(G_diag); HYPRE_Real *G_diag_data = hypre_CSRMatrixData(G_diag); HYPRE_Int G_diag_nrows = hypre_CSRMatrixNumRows(G_diag); HYPRE_Int G_diag_nnz = hypre_CSRMatrixNumNonzeros(G_diag); hypre_CSRMatrix *Pix_diag = hypre_ParCSRMatrixDiag(Pix); HYPRE_Int *Pix_diag_I = hypre_CSRMatrixI(Pix_diag); HYPRE_Int *Pix_diag_J = hypre_CSRMatrixJ(Pix_diag); HYPRE_Real *Pix_diag_data = hypre_CSRMatrixData(Pix_diag); hypre_CSRMatrix *Piy_diag = hypre_ParCSRMatrixDiag(Piy); HYPRE_Int *Piy_diag_I = hypre_CSRMatrixI(Piy_diag); HYPRE_Int *Piy_diag_J = hypre_CSRMatrixJ(Piy_diag); HYPRE_Real *Piy_diag_data = hypre_CSRMatrixData(Piy_diag); hypre_CSRMatrix *Piz_diag = hypre_ParCSRMatrixDiag(Piz); HYPRE_Int *Piz_diag_I = hypre_CSRMatrixI(Piz_diag); HYPRE_Int *Piz_diag_J = hypre_CSRMatrixJ(Piz_diag); HYPRE_Real *Piz_diag_data = hypre_CSRMatrixData(Piz_diag); for (i = 0; i < G_diag_nrows+1; i++) { Pix_diag_I[i] = G_diag_I[i]; Piy_diag_I[i] = G_diag_I[i]; Piz_diag_I[i] = G_diag_I[i]; } for (i = 0; i < G_diag_nnz; i++) { Pix_diag_J[i] = G_diag_J[i]; Piy_diag_J[i] = G_diag_J[i]; Piz_diag_J[i] = G_diag_J[i]; } for (i = 0; i < G_diag_nrows; i++) for (j = G_diag_I[i]; j < G_diag_I[i+1]; j++) { *Pix_diag_data++ = fabs(G_diag_data[j]) * 0.5 * Gx_data[i]; *Piy_diag_data++ = fabs(G_diag_data[j]) * 0.5 * Gy_data[i]; *Piz_diag_data++ = fabs(G_diag_data[j]) * 0.5 * Gz_data[i]; } } else { hypre_CSRMatrix *G_diag = hypre_ParCSRMatrixDiag(G); HYPRE_Int *G_diag_I = hypre_CSRMatrixI(G_diag); HYPRE_Int *G_diag_J = hypre_CSRMatrixJ(G_diag); HYPRE_Real *G_diag_data = hypre_CSRMatrixData(G_diag); HYPRE_Int G_diag_nrows = hypre_CSRMatrixNumRows(G_diag); HYPRE_Int G_diag_nnz = hypre_CSRMatrixNumNonzeros(G_diag); hypre_CSRMatrix *Pix_diag = hypre_ParCSRMatrixDiag(Pix); HYPRE_Int *Pix_diag_I = hypre_CSRMatrixI(Pix_diag); HYPRE_Int *Pix_diag_J = hypre_CSRMatrixJ(Pix_diag); HYPRE_Real *Pix_diag_data = hypre_CSRMatrixData(Pix_diag); hypre_CSRMatrix *Piy_diag = hypre_ParCSRMatrixDiag(Piy); HYPRE_Int *Piy_diag_I = hypre_CSRMatrixI(Piy_diag); HYPRE_Int *Piy_diag_J = hypre_CSRMatrixJ(Piy_diag); HYPRE_Real *Piy_diag_data = hypre_CSRMatrixData(Piy_diag); for (i = 0; i < G_diag_nrows+1; i++) { Pix_diag_I[i] = G_diag_I[i]; Piy_diag_I[i] = G_diag_I[i]; } for (i = 0; i < G_diag_nnz; i++) { Pix_diag_J[i] = G_diag_J[i]; Piy_diag_J[i] = G_diag_J[i]; } for (i = 0; i < G_diag_nrows; i++) for (j = G_diag_I[i]; j < G_diag_I[i+1]; j++) { *Pix_diag_data++ = fabs(G_diag_data[j]) * 0.5 * Gx_data[i]; *Piy_diag_data++ = fabs(G_diag_data[j]) * 0.5 * Gy_data[i]; } } /* Fill-in the off-diagonal part */ if (dim == 3) { hypre_CSRMatrix *G_offd = hypre_ParCSRMatrixOffd(G); HYPRE_Int *G_offd_I = hypre_CSRMatrixI(G_offd); HYPRE_Int *G_offd_J = hypre_CSRMatrixJ(G_offd); HYPRE_Real *G_offd_data = hypre_CSRMatrixData(G_offd); HYPRE_Int G_offd_nrows = hypre_CSRMatrixNumRows(G_offd); HYPRE_Int G_offd_ncols = hypre_CSRMatrixNumCols(G_offd); HYPRE_Int G_offd_nnz = hypre_CSRMatrixNumNonzeros(G_offd); hypre_CSRMatrix *Pix_offd = hypre_ParCSRMatrixOffd(Pix); HYPRE_Int *Pix_offd_I = hypre_CSRMatrixI(Pix_offd); HYPRE_Int *Pix_offd_J = hypre_CSRMatrixJ(Pix_offd); HYPRE_Real *Pix_offd_data = hypre_CSRMatrixData(Pix_offd); hypre_CSRMatrix *Piy_offd = hypre_ParCSRMatrixOffd(Piy); HYPRE_Int *Piy_offd_I = hypre_CSRMatrixI(Piy_offd); HYPRE_Int *Piy_offd_J = hypre_CSRMatrixJ(Piy_offd); HYPRE_Real *Piy_offd_data = hypre_CSRMatrixData(Piy_offd); hypre_CSRMatrix *Piz_offd = hypre_ParCSRMatrixOffd(Piz); HYPRE_Int *Piz_offd_I = hypre_CSRMatrixI(Piz_offd); HYPRE_Int *Piz_offd_J = hypre_CSRMatrixJ(Piz_offd); HYPRE_Real *Piz_offd_data = hypre_CSRMatrixData(Piz_offd); HYPRE_BigInt *G_cmap = hypre_ParCSRMatrixColMapOffd(G); HYPRE_BigInt *Pix_cmap = hypre_ParCSRMatrixColMapOffd(Pix); HYPRE_BigInt *Piy_cmap = hypre_ParCSRMatrixColMapOffd(Piy); HYPRE_BigInt *Piz_cmap = hypre_ParCSRMatrixColMapOffd(Piz); if (G_offd_ncols) for (i = 0; i < G_offd_nrows+1; i++) { Pix_offd_I[i] = G_offd_I[i]; Piy_offd_I[i] = G_offd_I[i]; Piz_offd_I[i] = G_offd_I[i]; } for (i = 0; i < G_offd_nnz; i++) { Pix_offd_J[i] = G_offd_J[i]; Piy_offd_J[i] = G_offd_J[i]; Piz_offd_J[i] = G_offd_J[i]; } for (i = 0; i < G_offd_nrows; i++) for (j = G_offd_I[i]; j < G_offd_I[i+1]; j++) { *Pix_offd_data++ = fabs(G_offd_data[j]) * 0.5 * Gx_data[i]; *Piy_offd_data++ = fabs(G_offd_data[j]) * 0.5 * Gy_data[i]; *Piz_offd_data++ = fabs(G_offd_data[j]) * 0.5 * Gz_data[i]; } for (i = 0; i < G_offd_ncols; i++) { Pix_cmap[i] = G_cmap[i]; Piy_cmap[i] = G_cmap[i]; Piz_cmap[i] = G_cmap[i]; } } else { hypre_CSRMatrix *G_offd = hypre_ParCSRMatrixOffd(G); HYPRE_Int *G_offd_I = hypre_CSRMatrixI(G_offd); HYPRE_Int *G_offd_J = hypre_CSRMatrixJ(G_offd); HYPRE_Real *G_offd_data = hypre_CSRMatrixData(G_offd); HYPRE_Int G_offd_nrows = hypre_CSRMatrixNumRows(G_offd); HYPRE_Int G_offd_ncols = hypre_CSRMatrixNumCols(G_offd); HYPRE_Int G_offd_nnz = hypre_CSRMatrixNumNonzeros(G_offd); hypre_CSRMatrix *Pix_offd = hypre_ParCSRMatrixOffd(Pix); HYPRE_Int *Pix_offd_I = hypre_CSRMatrixI(Pix_offd); HYPRE_Int *Pix_offd_J = hypre_CSRMatrixJ(Pix_offd); HYPRE_Real *Pix_offd_data = hypre_CSRMatrixData(Pix_offd); hypre_CSRMatrix *Piy_offd = hypre_ParCSRMatrixOffd(Piy); HYPRE_Int *Piy_offd_I = hypre_CSRMatrixI(Piy_offd); HYPRE_Int *Piy_offd_J = hypre_CSRMatrixJ(Piy_offd); HYPRE_Real *Piy_offd_data = hypre_CSRMatrixData(Piy_offd); HYPRE_BigInt *G_cmap = hypre_ParCSRMatrixColMapOffd(G); HYPRE_BigInt *Pix_cmap = hypre_ParCSRMatrixColMapOffd(Pix); HYPRE_BigInt *Piy_cmap = hypre_ParCSRMatrixColMapOffd(Piy); if (G_offd_ncols) for (i = 0; i < G_offd_nrows+1; i++) { Pix_offd_I[i] = G_offd_I[i]; Piy_offd_I[i] = G_offd_I[i]; } for (i = 0; i < G_offd_nnz; i++) { Pix_offd_J[i] = G_offd_J[i]; Piy_offd_J[i] = G_offd_J[i]; } for (i = 0; i < G_offd_nrows; i++) for (j = G_offd_I[i]; j < G_offd_I[i+1]; j++) { *Pix_offd_data++ = fabs(G_offd_data[j]) * 0.5 * Gx_data[i]; *Piy_offd_data++ = fabs(G_offd_data[j]) * 0.5 * Gy_data[i]; } for (i = 0; i < G_offd_ncols; i++) { Pix_cmap[i] = G_cmap[i]; Piy_cmap[i] = G_cmap[i]; } } } *Pix_ptr = Pix; *Piy_ptr = Piy; if (dim == 3) *Piz_ptr = Piz; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_AMSComputeGPi * * Construct the matrix [G,Pi] which can be considered an interpolation * matrix from S_h^4 (4 copies of the scalar linear finite element space) * to the edge finite elements space. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_AMSComputeGPi(hypre_ParCSRMatrix *A, hypre_ParCSRMatrix *G, hypre_ParVector *Gx, hypre_ParVector *Gy, hypre_ParVector *Gz, HYPRE_Int dim, hypre_ParCSRMatrix **GPi_ptr) { hypre_ParCSRMatrix *GPi; /* Take into account G */ dim++; /* Compute GPi = [Pi_x, Pi_y, Pi_z, G] */ { HYPRE_Int i, j, d; HYPRE_Real *Gx_data, *Gy_data, *Gz_data; MPI_Comm comm = hypre_ParCSRMatrixComm(G); HYPRE_BigInt global_num_rows = hypre_ParCSRMatrixGlobalNumRows(G); HYPRE_BigInt global_num_cols = dim*hypre_ParCSRMatrixGlobalNumCols(G); HYPRE_BigInt *row_starts = hypre_ParCSRMatrixRowStarts(G); HYPRE_BigInt *col_starts; HYPRE_Int col_starts_size; HYPRE_Int num_cols_offd = dim*hypre_CSRMatrixNumCols(hypre_ParCSRMatrixOffd(G)); HYPRE_Int num_nonzeros_diag = dim*hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixDiag(G)); HYPRE_Int num_nonzeros_offd = dim*hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixOffd(G)); HYPRE_BigInt *col_starts_G = hypre_ParCSRMatrixColStarts(G); #ifdef HYPRE_NO_GLOBAL_PARTITION col_starts_size = 2; #else HYPRE_Int num_procs; hypre_MPI_Comm_size(comm, &num_procs); col_starts_size = num_procs+1; #endif col_starts = hypre_TAlloc(HYPRE_BigInt, col_starts_size, HYPRE_MEMORY_HOST); for (i = 0; i < col_starts_size; i++) col_starts[i] = (HYPRE_BigInt) dim * col_starts_G[i]; GPi = hypre_ParCSRMatrixCreate(comm, global_num_rows, global_num_cols, row_starts, col_starts, num_cols_offd, num_nonzeros_diag, num_nonzeros_offd); hypre_ParCSRMatrixOwnsData(GPi) = 1; hypre_ParCSRMatrixOwnsRowStarts(GPi) = 0; hypre_ParCSRMatrixOwnsColStarts(GPi) = 1; hypre_ParCSRMatrixInitialize(GPi); Gx_data = hypre_VectorData(hypre_ParVectorLocalVector(Gx)); Gy_data = hypre_VectorData(hypre_ParVectorLocalVector(Gy)); if (dim == 4) Gz_data = hypre_VectorData(hypre_ParVectorLocalVector(Gz)); /* Fill-in the diagonal part */ { hypre_CSRMatrix *G_diag = hypre_ParCSRMatrixDiag(G); HYPRE_Int *G_diag_I = hypre_CSRMatrixI(G_diag); HYPRE_Int *G_diag_J = hypre_CSRMatrixJ(G_diag); HYPRE_Real *G_diag_data = hypre_CSRMatrixData(G_diag); HYPRE_Int G_diag_nrows = hypre_CSRMatrixNumRows(G_diag); HYPRE_Int G_diag_nnz = hypre_CSRMatrixNumNonzeros(G_diag); hypre_CSRMatrix *GPi_diag = hypre_ParCSRMatrixDiag(GPi); HYPRE_Int *GPi_diag_I = hypre_CSRMatrixI(GPi_diag); HYPRE_Int *GPi_diag_J = hypre_CSRMatrixJ(GPi_diag); HYPRE_Real *GPi_diag_data = hypre_CSRMatrixData(GPi_diag); for (i = 0; i < G_diag_nrows+1; i++) GPi_diag_I[i] = dim * G_diag_I[i]; for (i = 0; i < G_diag_nnz; i++) for (d = 0; d < dim; d++) GPi_diag_J[dim*i+d] = dim*G_diag_J[i]+d; for (i = 0; i < G_diag_nrows; i++) for (j = G_diag_I[i]; j < G_diag_I[i+1]; j++) { *GPi_diag_data++ = G_diag_data[j]; *GPi_diag_data++ = fabs(G_diag_data[j]) * 0.5 * Gx_data[i]; *GPi_diag_data++ = fabs(G_diag_data[j]) * 0.5 * Gy_data[i]; if (dim == 4) *GPi_diag_data++ = fabs(G_diag_data[j]) * 0.5 * Gz_data[i]; } } /* Fill-in the off-diagonal part */ { hypre_CSRMatrix *G_offd = hypre_ParCSRMatrixOffd(G); HYPRE_Int *G_offd_I = hypre_CSRMatrixI(G_offd); HYPRE_Int *G_offd_J = hypre_CSRMatrixJ(G_offd); HYPRE_Real *G_offd_data = hypre_CSRMatrixData(G_offd); HYPRE_Int G_offd_nrows = hypre_CSRMatrixNumRows(G_offd); HYPRE_Int G_offd_ncols = hypre_CSRMatrixNumCols(G_offd); HYPRE_Int G_offd_nnz = hypre_CSRMatrixNumNonzeros(G_offd); hypre_CSRMatrix *GPi_offd = hypre_ParCSRMatrixOffd(GPi); HYPRE_Int *GPi_offd_I = hypre_CSRMatrixI(GPi_offd); HYPRE_Int *GPi_offd_J = hypre_CSRMatrixJ(GPi_offd); HYPRE_Real *GPi_offd_data = hypre_CSRMatrixData(GPi_offd); HYPRE_BigInt *G_cmap = hypre_ParCSRMatrixColMapOffd(G); HYPRE_BigInt *GPi_cmap = hypre_ParCSRMatrixColMapOffd(GPi); if (G_offd_ncols) for (i = 0; i < G_offd_nrows+1; i++) GPi_offd_I[i] = dim * G_offd_I[i]; for (i = 0; i < G_offd_nnz; i++) for (d = 0; d < dim; d++) GPi_offd_J[dim*i+d] = dim*G_offd_J[i]+d; for (i = 0; i < G_offd_nrows; i++) for (j = G_offd_I[i]; j < G_offd_I[i+1]; j++) { *GPi_offd_data++ = G_offd_data[j]; *GPi_offd_data++ = fabs(G_offd_data[j]) * 0.5 * Gx_data[i]; *GPi_offd_data++ = fabs(G_offd_data[j]) * 0.5 * Gy_data[i]; if (dim == 4) *GPi_offd_data++ = fabs(G_offd_data[j]) * 0.5 * Gz_data[i]; } for (i = 0; i < G_offd_ncols; i++) for (d = 0; d < dim; d++) GPi_cmap[dim*i+d] = dim*G_cmap[i]+d; } } *GPi_ptr = GPi; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_AMSSetup * * Construct the AMS solver components. * * The following functions need to be called before hypre_AMSSetup(): * - hypre_AMSSetDimension() (if solving a 2D problem) * - hypre_AMSSetDiscreteGradient() * - hypre_AMSSetCoordinateVectors() or hypre_AMSSetEdgeConstantVectors *--------------------------------------------------------------------------*/ HYPRE_Int hypre_AMSSetup(void *solver, hypre_ParCSRMatrix *A, hypre_ParVector *b, hypre_ParVector *x) { hypre_AMSData *ams_data = (hypre_AMSData *) solver; HYPRE_Int input_info = 0; ams_data -> A = A; /* Modifications for problems with zero-conductivity regions */ if (ams_data -> interior_nodes) { hypre_ParCSRMatrix *G0t, *Aorig = A; /* Make sure that multiple Setup()+Solve() give identical results */ ams_data -> solve_counter = 0; /* Construct the discrete gradient matrix for the zero-conductivity region by eliminating the zero-conductivity nodes from G^t. The range of G0 represents the kernel of A, i.e. the gradients of nodal basis functions supported in zero-conductivity regions. */ hypre_ParCSRMatrixTranspose(ams_data -> G, &G0t, 1); { HYPRE_Int i, j; HYPRE_Int nv = hypre_ParCSRMatrixNumCols(ams_data -> G); hypre_CSRMatrix *G0td = hypre_ParCSRMatrixDiag(G0t); HYPRE_Int *G0tdI = hypre_CSRMatrixI(G0td); HYPRE_Real *G0tdA = hypre_CSRMatrixData(G0td); hypre_CSRMatrix *G0to = hypre_ParCSRMatrixOffd(G0t); HYPRE_Int *G0toI = hypre_CSRMatrixI(G0to); HYPRE_Real *G0toA = hypre_CSRMatrixData(G0to); HYPRE_Real *interior_nodes_data=hypre_VectorData( hypre_ParVectorLocalVector((hypre_ParVector*) ams_data -> interior_nodes)); for (i = 0; i < nv; i++) { if (interior_nodes_data[i] != 1) { for (j = G0tdI[i]; j < G0tdI[i+1]; j++) G0tdA[j] = 0.0; if (G0toI) for (j = G0toI[i]; j < G0toI[i+1]; j++) G0toA[j] = 0.0; } } } hypre_ParCSRMatrixTranspose(G0t, & ams_data -> G0, 1); /* Construct the subspace matrix A_G0 = G0^T G0 */ ams_data -> A_G0 = hypre_ParMatmul(G0t, ams_data -> G0); hypre_ParCSRMatrixFixZeroRows(ams_data -> A_G0); /* Create AMG solver for A_G0 */ HYPRE_BoomerAMGCreate(&ams_data -> B_G0); HYPRE_BoomerAMGSetCoarsenType(ams_data -> B_G0, ams_data -> B_G_coarsen_type); HYPRE_BoomerAMGSetAggNumLevels(ams_data -> B_G0, ams_data -> B_G_agg_levels); HYPRE_BoomerAMGSetRelaxType(ams_data -> B_G0, ams_data -> B_G_relax_type); HYPRE_BoomerAMGSetNumSweeps(ams_data -> B_G0, 1); HYPRE_BoomerAMGSetMaxLevels(ams_data -> B_G0, 25); HYPRE_BoomerAMGSetTol(ams_data -> B_G0, 0.0); HYPRE_BoomerAMGSetMaxIter(ams_data -> B_G0, 3); /* use just a few V-cycles */ HYPRE_BoomerAMGSetStrongThreshold(ams_data -> B_G0, ams_data -> B_G_theta); HYPRE_BoomerAMGSetInterpType(ams_data -> B_G0, ams_data -> B_G_interp_type); HYPRE_BoomerAMGSetPMaxElmts(ams_data -> B_G0, ams_data -> B_G_Pmax); HYPRE_BoomerAMGSetMinCoarseSize(ams_data -> B_G0, 2); /* don't coarsen to 0 */ /* Generally, don't use exact solve on the coarsest level (matrix may be singular) */ HYPRE_BoomerAMGSetCycleRelaxType(ams_data -> B_G0, ams_data -> B_G_coarse_relax_type, 3); HYPRE_BoomerAMGSetup(ams_data -> B_G0, (HYPRE_ParCSRMatrix)ams_data -> A_G0, 0, 0); /* Construct the preconditioner for ams_data->A = A + G0 G0^T. NOTE: this can be optimized significantly by taking into account that the sparsity pattern of A is subset of the sparsity pattern of G0 G0^T */ { hypre_ParCSRMatrix *A = hypre_ParMatmul(ams_data -> G0, G0t); hypre_ParCSRMatrix *B = Aorig; hypre_ParCSRMatrix **C_ptr = &ams_data -> A; hypre_ParCSRMatrix *C; hypre_CSRMatrix *A_local, *B_local, *C_local, *C_tmp; MPI_Comm comm = hypre_ParCSRMatrixComm(A); HYPRE_BigInt global_num_rows = hypre_ParCSRMatrixGlobalNumRows(A); HYPRE_BigInt global_num_cols = hypre_ParCSRMatrixGlobalNumCols(A); HYPRE_BigInt *row_starts = hypre_ParCSRMatrixRowStarts(A); HYPRE_BigInt *col_starts = hypre_ParCSRMatrixColStarts(A); HYPRE_Int A_num_cols_offd = hypre_CSRMatrixNumCols(hypre_ParCSRMatrixOffd(A)); HYPRE_Int A_num_nonzeros_diag = hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixDiag(A)); HYPRE_Int A_num_nonzeros_offd = hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixOffd(A)); HYPRE_Int B_num_cols_offd = hypre_CSRMatrixNumCols(hypre_ParCSRMatrixOffd(B)); HYPRE_Int B_num_nonzeros_diag = hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixDiag(B)); HYPRE_Int B_num_nonzeros_offd = hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixOffd(B)); A_local = hypre_MergeDiagAndOffd(A); B_local = hypre_MergeDiagAndOffd(B); /* scale (penalize) G0 G0^T before adding it to the matrix */ { HYPRE_Int i, nnz = hypre_CSRMatrixNumNonzeros(A_local); HYPRE_Real *data = hypre_CSRMatrixData(A_local); HYPRE_Real *dataB = hypre_CSRMatrixData(B_local); HYPRE_Int nnzB = hypre_CSRMatrixNumNonzeros(B_local); HYPRE_Real factor, lfactor; lfactor = -1; for (i = 0; i < nnzB; i++) if (fabs(dataB[i]) > lfactor) lfactor = fabs(dataB[i]); lfactor *= 1e-10; /* scaling factor: max|A_ij|*1e-10 */ hypre_MPI_Allreduce(&lfactor, &factor, 1, HYPRE_MPI_REAL, hypre_MPI_MAX, hypre_ParCSRMatrixComm(A)); for (i = 0; i < nnz; i++) data[i] *= factor; } C_tmp = hypre_CSRMatrixBigAdd(A_local, B_local); hypre_CSRMatrixBigJtoJ(C_tmp); C_local = hypre_CSRMatrixDeleteZeros(C_tmp,0.0); if (C_local) hypre_CSRMatrixDestroy(C_tmp); else C_local = C_tmp; C = hypre_ParCSRMatrixCreate (comm, global_num_rows, global_num_cols, row_starts, col_starts, A_num_cols_offd + B_num_cols_offd, A_num_nonzeros_diag + B_num_nonzeros_diag, A_num_nonzeros_offd + B_num_nonzeros_offd); GenerateDiagAndOffd(C_local, C, hypre_ParCSRMatrixFirstColDiag(A), hypre_ParCSRMatrixLastColDiag(A)); hypre_ParCSRMatrixOwnsRowStarts(C) = 0; hypre_ParCSRMatrixOwnsColStarts(C) = 1; hypre_ParCSRMatrixOwnsColStarts(G0t) = 0; hypre_CSRMatrixDestroy(A_local); hypre_CSRMatrixDestroy(B_local); hypre_CSRMatrixDestroy(C_local); hypre_ParCSRMatrixDestroy(A); *C_ptr = C; } hypre_ParCSRMatrixDestroy(G0t); } /* Make sure that the first entry in each row is the diagonal one. */ /* hypre_CSRMatrixReorder(hypre_ParCSRMatrixDiag(ams_data -> A)); */ /* Compute the l1 norm of the rows of A */ if (ams_data -> A_relax_type >= 1 && ams_data -> A_relax_type <= 4) hypre_ParCSRComputeL1Norms(ams_data -> A, ams_data -> A_relax_type, NULL, &ams_data -> A_l1_norms); /* Chebyshev? */ if (ams_data -> A_relax_type == 16) { hypre_ParCSRMaxEigEstimateCG(ams_data->A, 1, 10, &ams_data->A_max_eig_est, &ams_data->A_min_eig_est); } /* If not given, compute Gx, Gy and Gz */ { if (ams_data -> x != NULL && ams_data -> y != NULL && (ams_data -> dim == 2 || ams_data -> z != NULL)) input_info = 1; if (ams_data -> Gx != NULL && ams_data -> Gy != NULL && (ams_data -> dim == 2 || ams_data -> Gz != NULL)) input_info = 2; if (input_info == 1) { ams_data -> Gx = hypre_ParVectorInRangeOf(ams_data -> G); hypre_ParCSRMatrixMatvec (1.0, ams_data -> G, ams_data -> x, 0.0, ams_data -> Gx); ams_data -> Gy = hypre_ParVectorInRangeOf(ams_data -> G); hypre_ParCSRMatrixMatvec (1.0, ams_data -> G, ams_data -> y, 0.0, ams_data -> Gy); if (ams_data -> dim == 3) { ams_data -> Gz = hypre_ParVectorInRangeOf(ams_data -> G); hypre_ParCSRMatrixMatvec (1.0, ams_data -> G, ams_data -> z, 0.0, ams_data -> Gz); } } } if (ams_data -> Pi == NULL && ams_data -> Pix == NULL) { if (ams_data -> cycle_type == 20) /* Construct the combined interpolation matrix [G,Pi] */ hypre_AMSComputeGPi(ams_data -> A, ams_data -> G, ams_data -> Gx, ams_data -> Gy, ams_data -> Gz, ams_data -> dim, &ams_data -> Pi); else if (ams_data -> cycle_type > 10) /* Construct Pi{x,y,z} instead of Pi = [Pix,Piy,Piz] */ hypre_AMSComputePixyz(ams_data -> A, ams_data -> G, ams_data -> Gx, ams_data -> Gy, ams_data -> Gz, ams_data -> dim, &ams_data -> Pix, &ams_data -> Piy, &ams_data -> Piz); else /* Construct the Pi interpolation matrix */ hypre_AMSComputePi(ams_data -> A, ams_data -> G, ams_data -> Gx, ams_data -> Gy, ams_data -> Gz, ams_data -> dim, &ams_data -> Pi); } /* Keep Gx, Gy and Gz only if use the method with discrete divergence stabilization (where we use them to compute the local mesh size). */ if (input_info == 1 && ams_data -> cycle_type != 9) { hypre_ParVectorDestroy(ams_data -> Gx); hypre_ParVectorDestroy(ams_data -> Gy); if (ams_data -> dim == 3) hypre_ParVectorDestroy(ams_data -> Gz); } /* Create the AMG solver on the range of G^T */ if (!ams_data -> beta_is_zero && ams_data -> cycle_type != 20) { HYPRE_BoomerAMGCreate(&ams_data -> B_G); HYPRE_BoomerAMGSetCoarsenType(ams_data -> B_G, ams_data -> B_G_coarsen_type); HYPRE_BoomerAMGSetAggNumLevels(ams_data -> B_G, ams_data -> B_G_agg_levels); HYPRE_BoomerAMGSetRelaxType(ams_data -> B_G, ams_data -> B_G_relax_type); HYPRE_BoomerAMGSetNumSweeps(ams_data -> B_G, 1); HYPRE_BoomerAMGSetMaxLevels(ams_data -> B_G, 25); HYPRE_BoomerAMGSetTol(ams_data -> B_G, 0.0); HYPRE_BoomerAMGSetMaxIter(ams_data -> B_G, 1); HYPRE_BoomerAMGSetStrongThreshold(ams_data -> B_G, ams_data -> B_G_theta); HYPRE_BoomerAMGSetInterpType(ams_data -> B_G, ams_data -> B_G_interp_type); HYPRE_BoomerAMGSetPMaxElmts(ams_data -> B_G, ams_data -> B_G_Pmax); HYPRE_BoomerAMGSetMinCoarseSize(ams_data -> B_G, 2); /* don't coarsen to 0 */ /* Generally, don't use exact solve on the coarsest level (matrix may be singular) */ HYPRE_BoomerAMGSetCycleRelaxType(ams_data -> B_G, ams_data -> B_G_coarse_relax_type, 3); if (ams_data -> cycle_type == 0) HYPRE_BoomerAMGSetMaxLevels(ams_data -> B_G, 2); /* If not given, construct the coarse space matrix by RAP */ if (!ams_data -> A_G) { HYPRE_Int G_owned_col_starts; if (!hypre_ParCSRMatrixCommPkg(ams_data -> G)) hypre_MatvecCommPkgCreate(ams_data -> G); if (!hypre_ParCSRMatrixCommPkg(ams_data -> A)) hypre_MatvecCommPkgCreate(ams_data -> A); G_owned_col_starts = hypre_ParCSRMatrixOwnsColStarts(ams_data -> G); hypre_BoomerAMGBuildCoarseOperator(ams_data -> G, ams_data -> A, ams_data -> G, &ams_data -> A_G); /* Make sure that A_G has no zero rows (this can happen if beta is zero in part of the domain). */ hypre_ParCSRMatrixFixZeroRows(ams_data -> A_G); hypre_ParCSRMatrixOwnsColStarts(ams_data -> G) = G_owned_col_starts; hypre_ParCSRMatrixOwnsRowStarts(ams_data -> A_G) = 0; ams_data -> owns_A_G = 1; } HYPRE_BoomerAMGSetup(ams_data -> B_G, (HYPRE_ParCSRMatrix)ams_data -> A_G, 0, 0); } if (ams_data -> cycle_type > 10 && ams_data -> cycle_type != 20) /* Create the AMG solvers on the range of Pi{x,y,z}^T */ { HYPRE_Int P_owned_col_starts; HYPRE_BoomerAMGCreate(&ams_data -> B_Pix); HYPRE_BoomerAMGSetCoarsenType(ams_data -> B_Pix, ams_data -> B_Pi_coarsen_type); HYPRE_BoomerAMGSetAggNumLevels(ams_data -> B_Pix, ams_data -> B_Pi_agg_levels); HYPRE_BoomerAMGSetRelaxType(ams_data -> B_Pix, ams_data -> B_Pi_relax_type); HYPRE_BoomerAMGSetNumSweeps(ams_data -> B_Pix, 1); HYPRE_BoomerAMGSetMaxLevels(ams_data -> B_Pix, 25); HYPRE_BoomerAMGSetTol(ams_data -> B_Pix, 0.0); HYPRE_BoomerAMGSetMaxIter(ams_data -> B_Pix, 1); HYPRE_BoomerAMGSetStrongThreshold(ams_data -> B_Pix, ams_data -> B_Pi_theta); HYPRE_BoomerAMGSetInterpType(ams_data -> B_Pix, ams_data -> B_Pi_interp_type); HYPRE_BoomerAMGSetPMaxElmts(ams_data -> B_Pix, ams_data -> B_Pi_Pmax); HYPRE_BoomerAMGSetMinCoarseSize(ams_data -> B_Pix, 2); HYPRE_BoomerAMGCreate(&ams_data -> B_Piy); HYPRE_BoomerAMGSetCoarsenType(ams_data -> B_Piy, ams_data -> B_Pi_coarsen_type); HYPRE_BoomerAMGSetAggNumLevels(ams_data -> B_Piy, ams_data -> B_Pi_agg_levels); HYPRE_BoomerAMGSetRelaxType(ams_data -> B_Piy, ams_data -> B_Pi_relax_type); HYPRE_BoomerAMGSetNumSweeps(ams_data -> B_Piy, 1); HYPRE_BoomerAMGSetMaxLevels(ams_data -> B_Piy, 25); HYPRE_BoomerAMGSetTol(ams_data -> B_Piy, 0.0); HYPRE_BoomerAMGSetMaxIter(ams_data -> B_Piy, 1); HYPRE_BoomerAMGSetStrongThreshold(ams_data -> B_Piy, ams_data -> B_Pi_theta); HYPRE_BoomerAMGSetInterpType(ams_data -> B_Piy, ams_data -> B_Pi_interp_type); HYPRE_BoomerAMGSetPMaxElmts(ams_data -> B_Piy, ams_data -> B_Pi_Pmax); HYPRE_BoomerAMGSetMinCoarseSize(ams_data -> B_Piy, 2); HYPRE_BoomerAMGCreate(&ams_data -> B_Piz); HYPRE_BoomerAMGSetCoarsenType(ams_data -> B_Piz, ams_data -> B_Pi_coarsen_type); HYPRE_BoomerAMGSetAggNumLevels(ams_data -> B_Piz, ams_data -> B_Pi_agg_levels); HYPRE_BoomerAMGSetRelaxType(ams_data -> B_Piz, ams_data -> B_Pi_relax_type); HYPRE_BoomerAMGSetNumSweeps(ams_data -> B_Piz, 1); HYPRE_BoomerAMGSetMaxLevels(ams_data -> B_Piz, 25); HYPRE_BoomerAMGSetTol(ams_data -> B_Piz, 0.0); HYPRE_BoomerAMGSetMaxIter(ams_data -> B_Piz, 1); HYPRE_BoomerAMGSetStrongThreshold(ams_data -> B_Piz, ams_data -> B_Pi_theta); HYPRE_BoomerAMGSetInterpType(ams_data -> B_Piz, ams_data -> B_Pi_interp_type); HYPRE_BoomerAMGSetPMaxElmts(ams_data -> B_Piz, ams_data -> B_Pi_Pmax); HYPRE_BoomerAMGSetMinCoarseSize(ams_data -> B_Piz, 2); /* Generally, don't use exact solve on the coarsest level (matrices may be singular) */ HYPRE_BoomerAMGSetCycleRelaxType(ams_data -> B_Pix, ams_data -> B_Pi_coarse_relax_type, 3); HYPRE_BoomerAMGSetCycleRelaxType(ams_data -> B_Piy, ams_data -> B_Pi_coarse_relax_type, 3); HYPRE_BoomerAMGSetCycleRelaxType(ams_data -> B_Piz, ams_data -> B_Pi_coarse_relax_type, 3); if (ams_data -> cycle_type == 0) { HYPRE_BoomerAMGSetMaxLevels(ams_data -> B_Pix, 2); HYPRE_BoomerAMGSetMaxLevels(ams_data -> B_Piy, 2); HYPRE_BoomerAMGSetMaxLevels(ams_data -> B_Piz, 2); } /* Construct the coarse space matrices by RAP */ if (!hypre_ParCSRMatrixCommPkg(ams_data -> Pix)) hypre_MatvecCommPkgCreate(ams_data -> Pix); P_owned_col_starts = hypre_ParCSRMatrixOwnsColStarts(ams_data -> Pix); hypre_BoomerAMGBuildCoarseOperator(ams_data -> Pix, ams_data -> A, ams_data -> Pix, &ams_data -> A_Pix); if (!P_owned_col_starts) { hypre_ParCSRMatrixOwnsRowStarts(ams_data -> A_Pix) = 0; hypre_ParCSRMatrixOwnsColStarts(ams_data -> A_Pix) = 0; } /* Make sure that A_Pix has no zero rows (this can happen for some kinds of boundary conditions with contact). */ hypre_ParCSRMatrixFixZeroRows(ams_data -> A_Pix); HYPRE_BoomerAMGSetup(ams_data -> B_Pix, (HYPRE_ParCSRMatrix)ams_data -> A_Pix, 0, 0); if (!hypre_ParCSRMatrixCommPkg(ams_data -> Piy)) hypre_MatvecCommPkgCreate(ams_data -> Piy); P_owned_col_starts = hypre_ParCSRMatrixOwnsColStarts(ams_data -> Piy); hypre_BoomerAMGBuildCoarseOperator(ams_data -> Piy, ams_data -> A, ams_data -> Piy, &ams_data -> A_Piy); if (!P_owned_col_starts) { hypre_ParCSRMatrixOwnsRowStarts(ams_data -> A_Piy) = 0; hypre_ParCSRMatrixOwnsColStarts(ams_data -> A_Piy) = 0; } /* Make sure that A_Piy has no zero rows (this can happen for some kinds of boundary conditions with contact). */ hypre_ParCSRMatrixFixZeroRows(ams_data -> A_Piy); HYPRE_BoomerAMGSetup(ams_data -> B_Piy, (HYPRE_ParCSRMatrix)ams_data -> A_Piy, 0, 0); if (ams_data -> Piz) { if (!hypre_ParCSRMatrixCommPkg(ams_data -> Piz)) hypre_MatvecCommPkgCreate(ams_data -> Piz); P_owned_col_starts = hypre_ParCSRMatrixOwnsColStarts(ams_data -> Piz); hypre_BoomerAMGBuildCoarseOperator(ams_data -> Piz, ams_data -> A, ams_data -> Piz, &ams_data -> A_Piz); if (!P_owned_col_starts) { hypre_ParCSRMatrixOwnsRowStarts(ams_data -> A_Piz) = 0; hypre_ParCSRMatrixOwnsColStarts(ams_data -> A_Piz) = 0; } /* Make sure that A_Piz has no zero rows (this can happen for some kinds of boundary conditions with contact). */ hypre_ParCSRMatrixFixZeroRows(ams_data -> A_Piz); HYPRE_BoomerAMGSetup(ams_data -> B_Piz, (HYPRE_ParCSRMatrix)ams_data -> A_Piz, 0, 0); } } else /* Create the AMG solver on the range of Pi^T */ { HYPRE_BoomerAMGCreate(&ams_data -> B_Pi); HYPRE_BoomerAMGSetCoarsenType(ams_data -> B_Pi, ams_data -> B_Pi_coarsen_type); HYPRE_BoomerAMGSetAggNumLevels(ams_data -> B_Pi, ams_data -> B_Pi_agg_levels); HYPRE_BoomerAMGSetRelaxType(ams_data -> B_Pi, ams_data -> B_Pi_relax_type); HYPRE_BoomerAMGSetNumSweeps(ams_data -> B_Pi, 1); HYPRE_BoomerAMGSetMaxLevels(ams_data -> B_Pi, 25); HYPRE_BoomerAMGSetTol(ams_data -> B_Pi, 0.0); HYPRE_BoomerAMGSetMaxIter(ams_data -> B_Pi, 1); HYPRE_BoomerAMGSetStrongThreshold(ams_data -> B_Pi, ams_data -> B_Pi_theta); HYPRE_BoomerAMGSetInterpType(ams_data -> B_Pi, ams_data -> B_Pi_interp_type); HYPRE_BoomerAMGSetPMaxElmts(ams_data -> B_Pi, ams_data -> B_Pi_Pmax); HYPRE_BoomerAMGSetMinCoarseSize(ams_data -> B_Pi, 2); /* don't coarsen to 0 */ /* Generally, don't use exact solve on the coarsest level (matrix may be singular) */ HYPRE_BoomerAMGSetCycleRelaxType(ams_data -> B_Pi, ams_data -> B_Pi_coarse_relax_type, 3); if (ams_data -> cycle_type == 0) HYPRE_BoomerAMGSetMaxLevels(ams_data -> B_Pi, 2); /* If not given, construct the coarse space matrix by RAP and notify BoomerAMG that this is a dim x dim block system. */ if (!ams_data -> A_Pi) { HYPRE_Int P_owned_col_starts = hypre_ParCSRMatrixOwnsColStarts(ams_data -> Pi); if (!hypre_ParCSRMatrixCommPkg(ams_data -> Pi)) hypre_MatvecCommPkgCreate(ams_data -> Pi); if (!hypre_ParCSRMatrixCommPkg(ams_data -> A)) hypre_MatvecCommPkgCreate(ams_data -> A); if (ams_data -> cycle_type == 9) { /* Add a discrete divergence term to A before computing Pi^t A Pi */ { hypre_ParCSRMatrix *Gt, *GGt, *ApGGt; hypre_ParCSRMatrixTranspose(ams_data -> G, &Gt, 1); hypre_ParCSRMatrixOwnsColStarts(Gt) = 0; hypre_ParCSRMatrixOwnsRowStarts(Gt) = 0; /* scale GGt by h^2 */ { HYPRE_Real h2; HYPRE_Int i, j, k, ne; hypre_CSRMatrix *Gt_diag = hypre_ParCSRMatrixDiag(Gt); HYPRE_Int Gt_num_rows = hypre_CSRMatrixNumRows(Gt_diag); HYPRE_Int *Gt_diag_I = hypre_CSRMatrixI(Gt_diag); HYPRE_Int *Gt_diag_J = hypre_CSRMatrixJ(Gt_diag); HYPRE_Real *Gt_diag_data = hypre_CSRMatrixData(Gt_diag); hypre_CSRMatrix *Gt_offd = hypre_ParCSRMatrixOffd(Gt); HYPRE_Int *Gt_offd_I = hypre_CSRMatrixI(Gt_offd); HYPRE_Real *Gt_offd_data = hypre_CSRMatrixData(Gt_offd); HYPRE_Real *Gx_data = hypre_VectorData(hypre_ParVectorLocalVector(ams_data -> Gx)); HYPRE_Real *Gy_data = hypre_VectorData(hypre_ParVectorLocalVector(ams_data -> Gy)); HYPRE_Real *Gz_data = hypre_VectorData(hypre_ParVectorLocalVector(ams_data -> Gz)); for (i = 0; i < Gt_num_rows; i++) { /* determine the characteristic mesh size for vertex i */ h2 = 0.0; ne = 0; for (j = Gt_diag_I[i]; j < Gt_diag_I[i+1]; j++) { k = Gt_diag_J[j]; h2 += Gx_data[k]*Gx_data[k]+Gy_data[k]*Gy_data[k]+Gz_data[k]*Gz_data[k]; ne++; } if (ne != 0) { h2 /= ne; for (j = Gt_diag_I[i]; j < Gt_diag_I[i+1]; j++) Gt_diag_data[j] *= h2; for (j = Gt_offd_I[i]; j < Gt_offd_I[i+1]; j++) Gt_offd_data[j] *= h2; } } } /* we only needed Gx, Gy and Gz to compute the local mesh size */ if (input_info == 1) { hypre_ParVectorDestroy(ams_data -> Gx); hypre_ParVectorDestroy(ams_data -> Gy); if (ams_data -> dim == 3) hypre_ParVectorDestroy(ams_data -> Gz); } GGt = hypre_ParMatmul(ams_data -> G, Gt); hypre_ParCSRMatrixDestroy(Gt); /* hypre_ParCSRMatrixAdd(GGt, A, &ams_data -> A); */ { hypre_ParCSRMatrix *A = GGt; hypre_ParCSRMatrix *B = ams_data -> A; hypre_ParCSRMatrix **C_ptr = &ApGGt; hypre_ParCSRMatrix *C; hypre_CSRMatrix *A_local, *B_local, *C_local; MPI_Comm comm = hypre_ParCSRMatrixComm(A); HYPRE_BigInt global_num_rows = hypre_ParCSRMatrixGlobalNumRows(A); HYPRE_BigInt global_num_cols = hypre_ParCSRMatrixGlobalNumCols(A); HYPRE_BigInt *row_starts = hypre_ParCSRMatrixRowStarts(A); HYPRE_BigInt *col_starts = hypre_ParCSRMatrixColStarts(A); HYPRE_Int A_num_cols_offd = hypre_CSRMatrixNumCols(hypre_ParCSRMatrixOffd(A)); HYPRE_Int A_num_nonzeros_diag = hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixDiag(A)); HYPRE_Int A_num_nonzeros_offd = hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixOffd(A)); HYPRE_Int B_num_cols_offd = hypre_CSRMatrixNumCols(hypre_ParCSRMatrixOffd(B)); HYPRE_Int B_num_nonzeros_diag = hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixDiag(B)); HYPRE_Int B_num_nonzeros_offd = hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixOffd(B)); A_local = hypre_MergeDiagAndOffd(A); B_local = hypre_MergeDiagAndOffd(B); C_local = hypre_CSRMatrixAdd(A_local, B_local); C = hypre_ParCSRMatrixCreate (comm, global_num_rows, global_num_cols, row_starts, col_starts, A_num_cols_offd + B_num_cols_offd, A_num_nonzeros_diag + B_num_nonzeros_diag, A_num_nonzeros_offd + B_num_nonzeros_offd); GenerateDiagAndOffd(C_local, C, hypre_ParCSRMatrixFirstColDiag(A), hypre_ParCSRMatrixLastColDiag(A)); hypre_ParCSRMatrixOwnsRowStarts(C) = 0; hypre_ParCSRMatrixOwnsColStarts(C) = 0; hypre_CSRMatrixDestroy(A_local); hypre_CSRMatrixDestroy(B_local); hypre_CSRMatrixDestroy(C_local); *C_ptr = C; } hypre_ParCSRMatrixDestroy(GGt); hypre_BoomerAMGBuildCoarseOperator(ams_data -> Pi, ApGGt, ams_data -> Pi, &ams_data -> A_Pi); } } else { hypre_BoomerAMGBuildCoarseOperator(ams_data -> Pi, ams_data -> A, ams_data -> Pi, &ams_data -> A_Pi); } if (!P_owned_col_starts) { hypre_ParCSRMatrixOwnsRowStarts(ams_data -> A_Pi) = 0; hypre_ParCSRMatrixOwnsColStarts(ams_data -> A_Pi) = 0; } ams_data -> owns_A_Pi = 1; if (ams_data -> cycle_type != 20) HYPRE_BoomerAMGSetNumFunctions(ams_data -> B_Pi, ams_data -> dim); else HYPRE_BoomerAMGSetNumFunctions(ams_data -> B_Pi, ams_data -> dim + 1); /* HYPRE_BoomerAMGSetNodal(ams_data -> B_Pi, 1); */ } /* Make sure that A_Pi has no zero rows (this can happen for some kinds of boundary conditions with contact). */ hypre_ParCSRMatrixFixZeroRows(ams_data -> A_Pi); HYPRE_BoomerAMGSetup(ams_data -> B_Pi, (HYPRE_ParCSRMatrix)ams_data -> A_Pi, 0, 0); } /* Allocate temporary vectors */ ams_data -> r0 = hypre_ParVectorInRangeOf(ams_data -> A); ams_data -> g0 = hypre_ParVectorInRangeOf(ams_data -> A); if (ams_data -> A_G) { ams_data -> r1 = hypre_ParVectorInRangeOf(ams_data -> A_G); ams_data -> g1 = hypre_ParVectorInRangeOf(ams_data -> A_G); } if (ams_data -> r1 == NULL && ams_data -> A_Pix) { ams_data -> r1 = hypre_ParVectorInRangeOf(ams_data -> A_Pix); ams_data -> g1 = hypre_ParVectorInRangeOf(ams_data -> A_Pix); } if (ams_data -> Pi) { ams_data -> r2 = hypre_ParVectorInDomainOf(ams_data -> Pi); ams_data -> g2 = hypre_ParVectorInDomainOf(ams_data -> Pi); } return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_AMSSolve * * Solve the system A x = b. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_AMSSolve(void *solver, hypre_ParCSRMatrix *A, hypre_ParVector *b, hypre_ParVector *x) { hypre_AMSData *ams_data = (hypre_AMSData *) solver; HYPRE_Int i, my_id = -1; HYPRE_Real r0_norm, r_norm, b_norm, relative_resid = 0, old_resid; char cycle[30]; hypre_ParCSRMatrix *Ai[5], *Pi[5]; HYPRE_Solver Bi[5]; HYPRE_PtrToSolverFcn HBi[5]; hypre_ParVector *ri[5], *gi[5]; hypre_ParVector *z = NULL; Ai[0] = ams_data -> A_G; Pi[0] = ams_data -> G; Ai[1] = ams_data -> A_Pi; Pi[1] = ams_data -> Pi; Ai[2] = ams_data -> A_Pix; Pi[2] = ams_data -> Pix; Ai[3] = ams_data -> A_Piy; Pi[3] = ams_data -> Piy; Ai[4] = ams_data -> A_Piz; Pi[4] = ams_data -> Piz; Bi[0] = ams_data -> B_G; HBi[0] = (HYPRE_PtrToSolverFcn) hypre_BoomerAMGSolve; Bi[1] = ams_data -> B_Pi; HBi[1] = (HYPRE_PtrToSolverFcn) hypre_BoomerAMGBlockSolve; Bi[2] = ams_data -> B_Pix; HBi[2] = (HYPRE_PtrToSolverFcn) hypre_BoomerAMGSolve; Bi[3] = ams_data -> B_Piy; HBi[3] = (HYPRE_PtrToSolverFcn) hypre_BoomerAMGSolve; Bi[4] = ams_data -> B_Piz; HBi[4] = (HYPRE_PtrToSolverFcn) hypre_BoomerAMGSolve; ri[0] = ams_data -> r1; gi[0] = ams_data -> g1; ri[1] = ams_data -> r2; gi[1] = ams_data -> g2; ri[2] = ams_data -> r1; gi[2] = ams_data -> g1; ri[3] = ams_data -> r1; gi[3] = ams_data -> g1; ri[4] = ams_data -> r1; gi[4] = ams_data -> g1; /* may need to create an additional temporary vector for relaxation */ if (hypre_NumThreads() > 1 || ams_data -> A_relax_type == 16) { z = hypre_ParVectorCreate(hypre_ParCSRMatrixComm(A), hypre_ParCSRMatrixGlobalNumRows(A), hypre_ParCSRMatrixRowStarts(A)); hypre_ParVectorInitialize(z); hypre_ParVectorSetPartitioningOwner(z,0); } if (ams_data -> print_level > 0) hypre_MPI_Comm_rank(hypre_ParCSRMatrixComm(A), &my_id); /* Compatible subspace projection for problems with zero-conductivity regions. Note that this modifies the input (r.h.s.) vector b! */ if ( (ams_data -> B_G0) && (++ams_data->solve_counter % ( ams_data -> projection_frequency ) == 0) ) { /* hypre_printf("Projecting onto the compatible subspace...\n"); */ hypre_AMSProjectOutGradients(ams_data, b); } if (ams_data -> beta_is_zero) { switch (ams_data -> cycle_type) { case 0: hypre_sprintf(cycle,"%s","0"); break; case 1: case 3: case 5: case 7: default: hypre_sprintf(cycle,"%s","020"); break; case 2: case 4: case 6: case 8: hypre_sprintf(cycle,"%s","(0+2)"); break; case 11: case 13: hypre_sprintf(cycle,"%s","0345430"); break; case 12: hypre_sprintf(cycle,"%s","(0+3+4+5)"); break; case 14: hypre_sprintf(cycle,"%s","0(+3+4+5)0"); break; } } else { switch (ams_data -> cycle_type) { case 0: hypre_sprintf(cycle,"%s","010"); break; case 1: default: hypre_sprintf(cycle,"%s","01210"); break; case 2: hypre_sprintf(cycle,"%s","(0+1+2)"); break; case 3: hypre_sprintf(cycle,"%s","02120"); break; case 4: hypre_sprintf(cycle,"%s","(010+2)"); break; case 5: hypre_sprintf(cycle,"%s","0102010"); break; case 6: hypre_sprintf(cycle,"%s","(020+1)"); break; case 7: hypre_sprintf(cycle,"%s","0201020"); break; case 8: hypre_sprintf(cycle,"%s","0(+1+2)0"); break; case 9: hypre_sprintf(cycle,"%s","01210"); break; case 11: hypre_sprintf(cycle,"%s","013454310"); break; case 12: hypre_sprintf(cycle,"%s","(0+1+3+4+5)"); break; case 13: hypre_sprintf(cycle,"%s","034515430"); break; case 14: hypre_sprintf(cycle,"%s","01(+3+4+5)10"); break; case 20: hypre_sprintf(cycle,"%s","020"); break; } } for (i = 0; i < ams_data -> maxit; i++) { /* Compute initial residual norms */ if (ams_data -> maxit > 1 && i == 0) { hypre_ParVectorCopy(b, ams_data -> r0); hypre_ParCSRMatrixMatvec(-1.0, ams_data -> A, x, 1.0, ams_data -> r0); r_norm = sqrt(hypre_ParVectorInnerProd(ams_data -> r0,ams_data -> r0)); r0_norm = r_norm; b_norm = sqrt(hypre_ParVectorInnerProd(b, b)); if (b_norm) relative_resid = r_norm / b_norm; else relative_resid = r_norm; if (my_id == 0 && ams_data -> print_level > 0) { hypre_printf(" relative\n"); hypre_printf(" residual factor residual\n"); hypre_printf(" -------- ------ --------\n"); hypre_printf(" Initial %e %e\n", r_norm, relative_resid); } } /* Apply the preconditioner */ hypre_ParCSRSubspacePrec(ams_data -> A, ams_data -> A_relax_type, ams_data -> A_relax_times, ams_data -> A_l1_norms, ams_data -> A_relax_weight, ams_data -> A_omega, ams_data -> A_max_eig_est, ams_data -> A_min_eig_est, ams_data -> A_cheby_order, ams_data -> A_cheby_fraction, Ai, Bi, HBi, Pi, ri, gi, b, x, ams_data -> r0, ams_data -> g0, cycle, z); /* Compute new residual norms */ if (ams_data -> maxit > 1) { old_resid = r_norm; hypre_ParVectorCopy(b, ams_data -> r0); hypre_ParCSRMatrixMatvec(-1.0, ams_data -> A, x, 1.0, ams_data -> r0); r_norm = sqrt(hypre_ParVectorInnerProd(ams_data -> r0,ams_data -> r0)); if (b_norm) relative_resid = r_norm / b_norm; else relative_resid = r_norm; if (my_id == 0 && ams_data -> print_level > 0) hypre_printf(" Cycle %2d %e %f %e \n", i+1, r_norm, r_norm / old_resid, relative_resid); } if (relative_resid < ams_data -> tol) { i++; break; } } if (my_id == 0 && ams_data -> print_level > 0 && ams_data -> maxit > 1) hypre_printf("\n\n Average Convergence Factor = %f\n\n", pow((r_norm/r0_norm),(1.0/(HYPRE_Real) i))); ams_data -> num_iterations = i; ams_data -> rel_resid_norm = relative_resid; if (ams_data -> num_iterations == ams_data -> maxit && ams_data -> tol > 0.0) hypre_error(HYPRE_ERROR_CONV); if (z) hypre_ParVectorDestroy(z); return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_ParCSRSubspacePrec * * General subspace preconditioner for A0 y = x, based on ParCSR storage. * * P[i] and A[i] are the interpolation and coarse grid matrices for * the (i+1)'th subspace. B[i] is an AMG solver for A[i]. r[i] and g[i] * are temporary vectors. A0_* are the fine grid smoothing parameters. * * The default mode is multiplicative, '+' changes the next correction * to additive, based on residual computed at '('. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRSubspacePrec(/* fine space matrix */ hypre_ParCSRMatrix *A0, /* relaxation parameters */ HYPRE_Int A0_relax_type, HYPRE_Int A0_relax_times, HYPRE_Real *A0_l1_norms, HYPRE_Real A0_relax_weight, HYPRE_Real A0_omega, HYPRE_Real A0_max_eig_est, HYPRE_Real A0_min_eig_est, HYPRE_Int A0_cheby_order, HYPRE_Real A0_cheby_fraction, /* subspace matrices */ hypre_ParCSRMatrix **A, /* subspace preconditioners */ HYPRE_Solver *B, /* hypre solver functions for B */ HYPRE_PtrToSolverFcn *HB, /* subspace interpolations */ hypre_ParCSRMatrix **P, /* temporary subspace vectors */ hypre_ParVector **r, hypre_ParVector **g, /* right-hand side */ hypre_ParVector *x, /* current approximation */ hypre_ParVector *y, /* current residual */ hypre_ParVector *r0, /* temporary vector */ hypre_ParVector *g0, char *cycle, /* temporary vector */ hypre_ParVector *z) { char *op; HYPRE_Int use_saved_residual = 0; for (op = cycle; *op != '\0'; op++) { /* do nothing */ if (*op == ')') continue; /* compute the residual: r = x - Ay */ else if (*op == '(') { hypre_ParVectorCopy(x,r0); hypre_ParCSRMatrixMatvec(-1.0, A0, y, 1.0, r0); } /* switch to additive correction */ else if (*op == '+') { use_saved_residual = 1; continue; } /* smooth: y += S (x - Ay) */ else if (*op == '0') { hypre_ParCSRRelax(A0, x, A0_relax_type, A0_relax_times, A0_l1_norms, A0_relax_weight, A0_omega, A0_max_eig_est, A0_min_eig_est, A0_cheby_order, A0_cheby_fraction, y, g0, z); } /* subspace correction: y += P B^{-1} P^t r */ else { HYPRE_Int i = *op - '1'; if (i < 0) hypre_error_in_arg(16); /* skip empty subspaces */ if (!A[i]) continue; /* compute the residual? */ if (use_saved_residual) { use_saved_residual = 0; hypre_ParCSRMatrixMatvecT(1.0, P[i], r0, 0.0, r[i]); } else { hypre_ParVectorCopy(x,g0); hypre_ParCSRMatrixMatvec(-1.0, A0, y, 1.0, g0); hypre_ParCSRMatrixMatvecT(1.0, P[i], g0, 0.0, r[i]); } hypre_ParVectorSetConstantValues(g[i], 0.0); (*HB[i]) (B[i], (HYPRE_Matrix)A[i], (HYPRE_Vector)r[i], (HYPRE_Vector)g[i]); hypre_ParCSRMatrixMatvec(1.0, P[i], g[i], 0.0, g0); hypre_ParVectorAxpy(1.0, g0, y); } } return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_AMSGetNumIterations * * Get the number of AMS iterations. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_AMSGetNumIterations(void *solver, HYPRE_Int *num_iterations) { hypre_AMSData *ams_data = (hypre_AMSData *) solver; *num_iterations = ams_data -> num_iterations; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_AMSGetFinalRelativeResidualNorm * * Get the final relative residual norm in AMS. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_AMSGetFinalRelativeResidualNorm(void *solver, HYPRE_Real *rel_resid_norm) { hypre_AMSData *ams_data = (hypre_AMSData *) solver; *rel_resid_norm = ams_data -> rel_resid_norm; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_AMSProjectOutGradients * * For problems with zero-conductivity regions, project the vector onto the * compatible subspace: x = (I - G0 (G0^t G0)^{-1} G0^T) x, where G0 is the * discrete gradient restricted to the interior nodes of the regions with * zero conductivity. This ensures that x is orthogonal to the gradients in * the range of G0. * * This function is typically called after the solution iteration is complete, * in order to facilitate the visualization of the computed field. Without it * the values in the zero-conductivity regions contain kernel components. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_AMSProjectOutGradients(void *solver, hypre_ParVector *x) { hypre_AMSData *ams_data = (hypre_AMSData *) solver; if (ams_data -> B_G0) { hypre_ParCSRMatrixMatvecT(1.0, ams_data -> G0, x, 0.0, ams_data -> r1); hypre_ParVectorSetConstantValues(ams_data -> g1, 0.0); hypre_BoomerAMGSolve(ams_data -> B_G0, ams_data -> A_G0, ams_data -> r1, ams_data -> g1); hypre_ParCSRMatrixMatvec(1.0, ams_data -> G0, ams_data -> g1, 0.0, ams_data -> g0); hypre_ParVectorAxpy(-1.0, ams_data -> g0, x); } return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_AMSConstructDiscreteGradient * * Construct and return the lowest-order discrete gradient matrix G, based on: * - a matrix on the egdes (e.g. the stiffness matrix A) * - a vector on the vertices (e.g. the x coordinates) * - the array edge_vertex, which lists the global indexes of the * vertices of the local edges. * * We assume that edge_vertex lists the edge vertices consecutively, * and that the orientation of all edges is consistent. More specificaly: * If edge_orientation = 1, the edges are already oriented. * If edge_orientation = 2, the orientation of edge i depends only on the * sign of edge_vertex[2*i+1] - edge_vertex[2*i]. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_AMSConstructDiscreteGradient(hypre_ParCSRMatrix *A, hypre_ParVector *x_coord, HYPRE_Int *edge_vertex, HYPRE_Int edge_orientation, hypre_ParCSRMatrix **G_ptr) { hypre_ParCSRMatrix *G; HYPRE_Int nedges; nedges = hypre_ParCSRMatrixNumRows(A); /* Construct the local part of G based on edge_vertex and the edge and vertex partitionings from A and x_coord */ { HYPRE_Int i, *I = hypre_CTAlloc(HYPRE_Int, nedges+1, HYPRE_MEMORY_HOST); HYPRE_Int part_size; HYPRE_BigInt *row_starts, *col_starts; HYPRE_Real *data = hypre_CTAlloc(HYPRE_Real, 2*nedges, HYPRE_MEMORY_HOST); hypre_CSRMatrix *local = hypre_CSRMatrixCreate (nedges, hypre_ParVectorGlobalSize(x_coord), 2*nedges); for (i = 0; i <= nedges; i++) I[i] = 2*i; if (edge_orientation == 1) { /* Assume that the edges are already oriented */ for (i = 0; i < 2*nedges; i+=2) { data[i] = -1.0; data[i+1] = 1.0; } } else if (edge_orientation == 2) { /* Assume that the edge orientation is based on the vertex indexes */ for (i = 0; i < 2*nedges; i+=2) { if (edge_vertex[i] < edge_vertex[i+1]) { data[i] = -1.0; data[i+1] = 1.0; } else { data[i] = 1.0; data[i+1] = -1.0; } } } else hypre_error_in_arg(4); hypre_CSRMatrixI(local) = I; hypre_CSRMatrixJ(local) = edge_vertex; hypre_CSRMatrixData(local) = data; hypre_CSRMatrixRownnz(local) = NULL; hypre_CSRMatrixOwnsData(local) = 1; hypre_CSRMatrixNumRownnz(local) = nedges; /* Copy partitioning from A and x_coord (previously they were re-used) */ #ifdef HYPRE_NO_GLOBAL_PARTITION part_size = 2; #else hypre_MPI_Comm_size(hypre_ParCSRMatrixComm(A), &part_size); part_size++; #endif row_starts = hypre_TAlloc(HYPRE_BigInt, part_size, HYPRE_MEMORY_HOST); col_starts = hypre_TAlloc(HYPRE_BigInt, part_size, HYPRE_MEMORY_HOST); for (i = 0; i < part_size; i++) { row_starts[i] = hypre_ParCSRMatrixRowStarts(A)[i]; col_starts[i] = hypre_ParVectorPartitioning(x_coord)[i]; } /* Generate the discrete gradient matrix */ G = hypre_ParCSRMatrixCreate(hypre_ParCSRMatrixComm(A), hypre_ParCSRMatrixGlobalNumRows(A), hypre_ParVectorGlobalSize(x_coord), row_starts, col_starts, 0, 0, 0); hypre_ParCSRMatrixOwnsRowStarts(G) = 1; hypre_ParCSRMatrixOwnsColStarts(G) = 1; GenerateDiagAndOffd(local, G, hypre_ParVectorFirstIndex(x_coord), hypre_ParVectorLastIndex(x_coord)); /* Account for empty rows in G. These may appear when A includes only the interior (non-Dirichlet b.c.) edges. */ { hypre_CSRMatrix *G_diag = hypre_ParCSRMatrixDiag(G); G_diag->num_cols = hypre_VectorSize(hypre_ParVectorLocalVector(x_coord)); } /* Free the local matrix */ hypre_CSRMatrixJ(local) = NULL; hypre_CSRMatrixDestroy(local); } *G_ptr = G; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_AMSFEISetup * * Construct an AMS solver object based on the following data: * * A - the edge element stiffness matrix * num_vert - number of vertices (nodes) in the processor * num_local_vert - number of vertices owned by the processor * vert_number - global indexes of the vertices in the processor * vert_coord - coordinates of the vertices in the processor * num_edges - number of edges owned by the processor * edge_vertex - the vertices of the edges owned by the processor. * Vertices are in local numbering (the same as in * vert_number), and edge orientation is always from * the first to the second vertex. * * Here we distinguish between vertices that belong to elements in the * current processor, and the subset of these vertices that is owned by * the processor. * * This function is written specifically for input from the FEI and should * be called before hypre_AMSSetup(). *--------------------------------------------------------------------------*/ HYPRE_Int hypre_AMSFEISetup(void *solver, hypre_ParCSRMatrix *A, hypre_ParVector *b, hypre_ParVector *x, HYPRE_Int num_vert, HYPRE_Int num_local_vert, HYPRE_BigInt *vert_number, HYPRE_Real *vert_coord, HYPRE_Int num_edges, HYPRE_Int *edge_vertex) { hypre_AMSData *ams_data = (hypre_AMSData *) solver; HYPRE_Int i, j; hypre_ParCSRMatrix *G; hypre_ParVector *x_coord, *y_coord, *z_coord; HYPRE_Real *x_data, *y_data, *z_data; MPI_Comm comm = hypre_ParCSRMatrixComm(A); HYPRE_BigInt *vert_part, num_global_vert; HYPRE_BigInt vert_start, vert_end; HYPRE_BigInt big_local_vert = (HYPRE_BigInt) num_local_vert; HYPRE_BigInt *big_edge_vertex; /* Find the processor partitioning of the vertices */ #ifdef HYPRE_NO_GLOBAL_PARTITION vert_part = hypre_TAlloc(HYPRE_BigInt, 2, HYPRE_MEMORY_HOST); hypre_MPI_Scan(&big_local_vert, &vert_part[1], 1, HYPRE_MPI_BIG_INT, hypre_MPI_SUM, comm); vert_part[0] = vert_part[1] - big_local_vert; hypre_MPI_Allreduce(&big_local_vert, &num_global_vert, 1, HYPRE_MPI_BIG_INT, hypre_MPI_SUM, comm); #else HYPRE_Int num_procs; hypre_MPI_Comm_size(comm, &num_procs); vert_part = hypre_TAlloc(HYPRE_BigInt, num_procs+1, HYPRE_MEMORY_HOST); hypre_MPI_Allgather(&big_local_vert, 1, HYPRE_MPI_BIG_INT, &vert_part[1], 1, HYPRE_MPI_BIG_INT, comm); vert_part[0] = 0; for (i = 0; i < num_procs; i++) vert_part[i+1] += vert_part[i]; num_global_vert = vert_part[num_procs]; #endif /* Construct hypre parallel vectors for the vertex coordinates */ x_coord = hypre_ParVectorCreate(comm, num_global_vert, vert_part); hypre_ParVectorInitialize(x_coord); hypre_ParVectorOwnsData(x_coord) = 1; hypre_ParVectorOwnsPartitioning(x_coord) = 0; x_data = hypre_VectorData(hypre_ParVectorLocalVector(x_coord)); y_coord = hypre_ParVectorCreate(comm, num_global_vert, vert_part); hypre_ParVectorInitialize(y_coord); hypre_ParVectorOwnsData(y_coord) = 1; hypre_ParVectorOwnsPartitioning(y_coord) = 0; y_data = hypre_VectorData(hypre_ParVectorLocalVector(y_coord)); z_coord = hypre_ParVectorCreate(comm, num_global_vert, vert_part); hypre_ParVectorInitialize(z_coord); hypre_ParVectorOwnsData(z_coord) = 1; hypre_ParVectorOwnsPartitioning(z_coord) = 0; z_data = hypre_VectorData(hypre_ParVectorLocalVector(z_coord)); vert_start = hypre_ParVectorFirstIndex(x_coord); vert_end = hypre_ParVectorLastIndex(x_coord); /* Save coordinates of locally owned vertices */ for (i = 0; i < num_vert; i++) { if (vert_number[i] >= vert_start && vert_number[i] <= vert_end) { j = (HYPRE_Int)(vert_number[i] - vert_start); x_data[j] = vert_coord[3*i]; y_data[j] = vert_coord[3*i+1]; z_data[j] = vert_coord[3*i+2]; } } /* Change vertex numbers from local to global */ big_edge_vertex = hypre_CTAlloc(HYPRE_BigInt, 2*num_edges, HYPRE_MEMORY_HOST); for (i = 0; i < 2*num_edges; i++) big_edge_vertex[i] = vert_number[edge_vertex[i]]; /* Construct the local part of G based on edge_vertex */ { /* HYPRE_Int num_edges = hypre_ParCSRMatrixNumRows(A); */ HYPRE_Int *I = hypre_CTAlloc(HYPRE_Int, num_edges+1, HYPRE_MEMORY_HOST); HYPRE_Real *data = hypre_CTAlloc(HYPRE_Real, 2*num_edges, HYPRE_MEMORY_HOST); hypre_CSRMatrix *local = hypre_CSRMatrixCreate (num_edges, num_global_vert, 2*num_edges); for (i = 0; i <= num_edges; i++) I[i] = 2*i; /* Assume that the edge orientation is based on the vertex indexes */ for (i = 0; i < 2*num_edges; i+=2) { data[i] = 1.0; data[i+1] = -1.0; } hypre_CSRMatrixI(local) = I; hypre_CSRMatrixBigJ(local) = big_edge_vertex; hypre_CSRMatrixData(local) = data; hypre_CSRMatrixRownnz(local) = NULL; hypre_CSRMatrixOwnsData(local) = 1; hypre_CSRMatrixNumRownnz(local) = num_edges; G = hypre_ParCSRMatrixCreate(comm, hypre_ParCSRMatrixGlobalNumRows(A), num_global_vert, hypre_ParCSRMatrixRowStarts(A), vert_part, 0, 0, 0); hypre_ParCSRMatrixOwnsRowStarts(G) = 0; hypre_ParCSRMatrixOwnsColStarts(G) = 1; GenerateDiagAndOffd(local, G, vert_start, vert_end); hypre_CSRMatrixJ(local) = NULL; hypre_CSRMatrixDestroy(local); } hypre_TFree(big_edge_vertex, HYPRE_MEMORY_HOST); ams_data -> G = G; ams_data -> x = x_coord; ams_data -> y = y_coord; ams_data -> z = z_coord; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_AMSFEIDestroy * * Free the additional memory allocated in hypre_AMSFEISetup(). * * This function is written specifically for input from the FEI and should * be called before hypre_AMSDestroy(). *--------------------------------------------------------------------------*/ HYPRE_Int hypre_AMSFEIDestroy(void *solver) { hypre_AMSData *ams_data = (hypre_AMSData *) solver; if (ams_data -> G) hypre_ParCSRMatrixDestroy(ams_data -> G); if (ams_data -> x) hypre_ParVectorDestroy(ams_data -> x); if (ams_data -> y) hypre_ParVectorDestroy(ams_data -> y); if (ams_data -> z) hypre_ParVectorDestroy(ams_data -> z); return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_ParCSRComputeL1Norms Threads * * Compute the l1 norms of the rows of a given matrix, depending on * the option parameter: * * option 1 = Compute the l1 norm of the rows * option 2 = Compute the l1 norm of the (processor) off-diagonal * part of the rows plus the diagonal of A * option 3 = Compute the l2 norm^2 of the rows * option 4 = Truncated version of option 2 based on Remark 6.2 in "Multigrid * Smoothers for Ultra-Parallel Computing" * * The above computations are done in a CF manner, whenever the provided * cf_marker is not NULL. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRComputeL1NormsThreads(hypre_ParCSRMatrix *A, HYPRE_Int option, HYPRE_Int num_threads, HYPRE_Int *cf_marker, HYPRE_Real **l1_norm_ptr) { HYPRE_Int i, j, k; HYPRE_Int num_rows = hypre_ParCSRMatrixNumRows(A); hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Int *A_diag_I = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_J = hypre_CSRMatrixJ(A_diag); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Int *A_offd_I = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_J = hypre_CSRMatrixJ(A_offd); HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(A_offd); HYPRE_Real diag; HYPRE_Real *l1_norm = hypre_CTAlloc(HYPRE_Real, num_rows, HYPRE_MEMORY_SHARED); HYPRE_Int ii, ns, ne, rest, size; HYPRE_Int *cf_marker_offd = NULL; HYPRE_Int cf_diag; /* collect the cf marker data from other procs */ if (cf_marker != NULL) { HYPRE_Int index; HYPRE_Int num_sends; HYPRE_Int start; HYPRE_Int *int_buf_data = NULL; hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); hypre_ParCSRCommHandle *comm_handle; if (num_cols_offd) cf_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd, HYPRE_MEMORY_HOST); num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); if (hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends)) int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST); index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) { int_buf_data[index++] = cf_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } } comm_handle = hypre_ParCSRCommHandleCreate(11, comm_pkg, int_buf_data, cf_marker_offd); hypre_ParCSRCommHandleDestroy(comm_handle); hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST); } #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,ii,j,k,ns,ne,rest,size,diag,cf_diag) HYPRE_SMP_SCHEDULE #endif for (k = 0; k < num_threads; k++) { size = num_rows/num_threads; rest = num_rows - size*num_threads; if (k < rest) { ns = k*size+k; ne = (k+1)*size+k+1; } else { ns = k*size+rest; ne = (k+1)*size+rest; } if (option == 1) { for (i = ns; i < ne; i++) { l1_norm[i] = 0.0; if (cf_marker == NULL) { /* Add the l1 norm of the diag part of the ith row */ for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++) l1_norm[i] += fabs(A_diag_data[j]); /* Add the l1 norm of the offd part of the ith row */ if (num_cols_offd) { for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++) l1_norm[i] += fabs(A_offd_data[j]); } } else { cf_diag = cf_marker[i]; /* Add the CF l1 norm of the diag part of the ith row */ for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++) if (cf_diag == cf_marker[A_diag_J[j]]) l1_norm[i] += fabs(A_diag_data[j]); /* Add the CF l1 norm of the offd part of the ith row */ if (num_cols_offd) { for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++) if (cf_diag == cf_marker_offd[A_offd_J[j]]) l1_norm[i] += fabs(A_offd_data[j]); } } } } else if (option == 2) { for (i = ns; i < ne; i++) { l1_norm[i] = 0.0; if (cf_marker == NULL) { /* Add the diagonal and the local off-thread part of the ith row */ for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++) { ii = A_diag_J[j]; if (ii == i || ii < ns || ii >= ne) l1_norm[i] += fabs(A_diag_data[j]); } /* Add the l1 norm of the offd part of the ith row */ if (num_cols_offd) { for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++) l1_norm[i] += fabs(A_offd_data[j]); } } else { cf_diag = cf_marker[i]; /* Add the diagonal and the local off-thread part of the ith row */ for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++) { ii = A_diag_J[j]; if ((ii == i || ii < ns || ii >= ne) && (cf_diag == cf_marker[A_diag_J[j]])) l1_norm[i] += fabs(A_diag_data[j]); } /* Add the CF l1 norm of the offd part of the ith row */ if (num_cols_offd) { for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++) if (cf_diag == cf_marker_offd[A_offd_J[j]]) l1_norm[i] += fabs(A_offd_data[j]); } } } } else if (option == 3) { for (i = ns; i < ne; i++) { l1_norm[i] = 0.0; for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++) l1_norm[i] += A_diag_data[j] * A_diag_data[j]; if (num_cols_offd) for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++) l1_norm[i] += A_offd_data[j] * A_offd_data[j]; } } else if (option == 4) { for (i = ns; i < ne; i++) { l1_norm[i] = 0.0; if (cf_marker == NULL) { /* Add the diagonal and the local off-thread part of the ith row */ for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++) { ii = A_diag_J[j]; if (ii == i || ii < ns || ii >= ne) { if (ii == i) { diag = fabs(A_diag_data[j]); l1_norm[i] += fabs(A_diag_data[j]); } else l1_norm[i] += 0.5*fabs(A_diag_data[j]); } } /* Add the l1 norm of the offd part of the ith row */ if (num_cols_offd) { for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++) l1_norm[i] += 0.5*fabs(A_offd_data[j]); } } else { cf_diag = cf_marker[i]; /* Add the diagonal and the local off-thread part of the ith row */ for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++) { ii = A_diag_J[j]; if ((ii == i || ii < ns || ii >= ne) && (cf_diag == cf_marker[A_diag_J[j]])) { if (ii == i) { diag = fabs(A_diag_data[j]); l1_norm[i] += fabs(A_diag_data[j]); } else l1_norm[i] += 0.5*fabs(A_diag_data[j]); } } /* Add the CF l1 norm of the offd part of the ith row */ if (num_cols_offd) { for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++) if (cf_diag == cf_marker_offd[A_offd_J[j]]) l1_norm[i] += 0.5*fabs(A_offd_data[j]); } } /* Truncate according to Remark 6.2 */ if (l1_norm[i] <= 4.0/3.0*diag) l1_norm[i] = diag; } } /* Handle negative definite matrices */ for (i = ns; i < ne; i++) if (A_diag_data[A_diag_I[i]] < 0) l1_norm[i] = -l1_norm[i]; for (i = ns; i < ne; i++) /* if (fabs(l1_norm[i]) < DBL_EPSILON) */ if (fabs(l1_norm[i]) == 0.0) { hypre_error_in_arg(1); break; } } hypre_TFree(cf_marker_offd, HYPRE_MEMORY_HOST); *l1_norm_ptr = l1_norm; #ifdef HYPRE_USING_MAPPED_OPENMP_OFFLOAD #pragma omp target enter data map(to:l1_norm[0:num_rows]) #endif return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_ParCSRRelaxThreads * 1 = l1-scaled Jacobi * 2 = l1-scaled block Gauss-Seidel/SSOR *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRRelaxThreads(hypre_ParCSRMatrix *A, hypre_ParVector *f, HYPRE_Int relax_type, HYPRE_Int relax_times, HYPRE_Real *l1_norms, HYPRE_Real relax_weight, HYPRE_Real omega, hypre_ParVector *u, hypre_ParVector *Vtemp, hypre_ParVector *z) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); hypre_ParCSRCommHandle *comm_handle; HYPRE_Int n = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(A_offd); hypre_Vector *u_local = hypre_ParVectorLocalVector(u); HYPRE_Real *u_data = hypre_VectorData(u_local); hypre_Vector *f_local = hypre_ParVectorLocalVector(f); HYPRE_Real *f_data = hypre_VectorData(f_local); hypre_Vector *Vtemp_local = hypre_ParVectorLocalVector(Vtemp); HYPRE_Real *Vtemp_data = hypre_VectorData(Vtemp_local); HYPRE_Real *Vext_data; HYPRE_Real *v_buf_data; HYPRE_Real *tmp_data; HYPRE_Int i, j; HYPRE_Int ii, jj; HYPRE_Int ns, ne, size, rest; HYPRE_Int relax_error = 0; HYPRE_Int num_sends; HYPRE_Int index, start; HYPRE_Int num_procs, num_threads, my_id; HYPRE_Real zero = 0.0; HYPRE_Real res, res2; hypre_MPI_Comm_size(comm,&num_procs); hypre_MPI_Comm_rank(comm,&my_id); num_threads = hypre_NumThreads(); /* only allow jacobi and GS */ if (relax_type > 2) relax_type = 2; /*----------------------------------------------------------------- * Copy current approximation into temporary vector. *-----------------------------------------------------------------*/ if (num_procs > 1) { num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); v_buf_data = hypre_CTAlloc(HYPRE_Real, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST); Vext_data = hypre_CTAlloc(HYPRE_Real, num_cols_offd, HYPRE_MEMORY_HOST); if (num_cols_offd) { A_offd_j = hypre_CSRMatrixJ(A_offd); A_offd_data = hypre_CSRMatrixData(A_offd); } index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j=start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg,i+1); j++) v_buf_data[index++] = u_data[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } comm_handle = hypre_ParCSRCommHandleCreate(1, comm_pkg, v_buf_data, Vext_data); /*----------------------------------------------------------------- * Copy current approximation into temporary vector. *-----------------------------------------------------------------*/ hypre_ParCSRCommHandleDestroy(comm_handle); comm_handle = NULL; } if (relax_type == 1) /* Jacobi */ { #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < n; i++) { Vtemp_data[i] = u_data[i]; } #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,ii,jj,res) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < n; i++) { /*----------------------------------------------------------- * If diagonal is nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if (A_diag_data[A_diag_i[i]] != zero) { res = f_data[i]; for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; res -= A_diag_data[jj] * Vtemp_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[i] += (relax_weight*res)/l1_norms[i]; } } } else if (relax_type == 2) /* GS */ { if (relax_weight == 1 && omega == 1) { tmp_data = hypre_CTAlloc(HYPRE_Real, n, HYPRE_MEMORY_HOST); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < n; i++) tmp_data[i] = u_data[i]; #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE #endif for (j = 0; j < num_threads; j++) { size = n/num_threads; rest = n - size*num_threads; if (j < rest) { ns = j*size+j; ne = (j+1)*size+j+1; } else { ns = j*size+rest; ne = (j+1)*size+rest; } for (i = ns; i < ne; i++) /* interior points first */ { /*----------------------------------------------------------- * If diagonal is nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if (A_diag_data[A_diag_i[i]] != zero) { res = f_data[i]; for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; if (ii >= ns && ii < ne) { res -= A_diag_data[jj] * u_data[ii]; } else res -= A_diag_data[jj] * tmp_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[i] += res / l1_norms[i]; } } for (i = ne-1; i > ns-1; i--) /* interior points first */ { /*----------------------------------------------------------- * If diagonal is nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if (A_diag_data[A_diag_i[i]] != zero) { res = f_data[i]; for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; if (ii >= ns && ii < ne) { res -= A_diag_data[jj] * u_data[ii]; } else res -= A_diag_data[jj] * tmp_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[i] += res / l1_norms[i]; } } } hypre_TFree(tmp_data, HYPRE_MEMORY_HOST); } else { HYPRE_Real c1 = omega*relax_weight; HYPRE_Real c2 = omega*(1.0-relax_weight); tmp_data = hypre_CTAlloc(HYPRE_Real, n, HYPRE_MEMORY_HOST); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < n; i++) { tmp_data[i] = u_data[i]; } #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE #endif for (j = 0; j < num_threads; j++) { size = n/num_threads; rest = n - size*num_threads; if (j < rest) { ns = j*size+j; ne = (j+1)*size+j+1; } else { ns = j*size+rest; ne = (j+1)*size+rest; } for (i = ns; i < ne; i++) /* interior points first */ { /*----------------------------------------------------------- * If diagonal is nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if (A_diag_data[A_diag_i[i]] != zero) { res2 = 0.0; res = f_data[i]; Vtemp_data[i] = u_data[i]; for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; if (ii >= ns && ii < ne) { res -= A_diag_data[jj] * u_data[ii]; if (ii < i) res2 += A_diag_data[jj] * (Vtemp_data[ii] - u_data[ii]); } else res -= A_diag_data[jj] * tmp_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[i] += (c1*res + c2*res2) / l1_norms[i]; } } for (i = ne-1; i > ns-1; i--) /* interior points first */ { /*----------------------------------------------------------- * If diagonal is nonzero, relax point i; otherwise, skip it. *-----------------------------------------------------------*/ if (A_diag_data[A_diag_i[i]] != zero) { res2 = 0.0; res = f_data[i]; for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++) { ii = A_diag_j[jj]; if (ii >= ns && ii < ne) { res -= A_diag_data[jj] * u_data[ii]; if (ii > i) res2 += A_diag_data[jj] * (Vtemp_data[ii] - u_data[ii]); } else res -= A_diag_data[jj] * tmp_data[ii]; } for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { ii = A_offd_j[jj]; res -= A_offd_data[jj] * Vext_data[ii]; } u_data[i] += (c1*res + c2*res2) / l1_norms[i]; } } } hypre_TFree(tmp_data, HYPRE_MEMORY_HOST); } } /* end of Jacobi or G.S. */ if (num_procs > 1) { hypre_TFree(Vext_data, HYPRE_MEMORY_HOST); hypre_TFree(v_buf_data, HYPRE_MEMORY_HOST); } return(relax_error); }
c_md.c
/* *********************************************************************** This program is part of the OpenMP Source Code Repository http://www.pcg.ull.es/ompscr/ e-mail: ompscr@etsii.ull.es This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License (LICENSE file) along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA FILE: c_md.c VERSION: 1.0 DATE: May 2004 AUTHOR: Bill Magro, Kuck and Associates, Inc. (KAI), 1998 COMMENTS TO: sande@csi.ull.es DESCRIPTION: This program implements a simple molecular dynamics simulation, using the velocity Verlet time integration scheme. The particles interact with a central pair potential. COMMENTS: REFERENCES: W. C. Swope and H. C. Andersen and P. H. Berens and K. R. Wilson A Computer Simulation Method for the Calculation of Equilibrium Constants for the Formation of Physical Clusters of Molecules: Application to Small Water Clusters Journal of Chemical Physics, 1982 vol. 76 pg 637-649 BASIC PRAGMAS: parallel for USAGE: ./c_md.par 8192 10 INPUT: Number of particles Number of simulation steps OUTPUT: - FILE FORMATS: - RESTRICTIONS: - REVISION HISTORY: **************************************************************************/ //#include "OmpSCR.h" #include <math.h> #include <omp.h> // following added by sfsiegel due to use of "calloc": #include <stdlib.h> // following added by sfsiegel due to use of "printf": #include <stdio.h> #ifndef RAND_MAX #define RAND_MAX 0x7fff #endif #ifndef M_PI_2 #define M_PI_2 1.57079632679489661923 /* pi/2 */ #endif #define NUM_ARGS 2 #define NUM_TIMERS 1 #define DEFAULT_NPARTS 8192 #define DEFAULT_NSTEPS 10 #define USAGE_STR "NPARTS NSTEPS" #define NDIM 3 #define NPARTSINIT 10 #define NSTEPSINIT 4 int NPARTS; /* No. of particles */ int NSTEPS; /* No. of simulation steps */ typedef double vnd_t[NDIM]; /* ----------------------------------------------------------------------- PROTOTYPES * ----------------------------------------------------------------------- */ double v(double x); double dv(double x); void initialize(int np, int nd, vnd_t box, vnd_t *pos, vnd_t *vel, vnd_t *acc); double dist(int nd, vnd_t r1, vnd_t r2, vnd_t dr); double dot_prod(int n, vnd_t x,vnd_t y); void compute(int np, int nd, vnd_t *pos, vnd_t *vel, double mass, vnd_t *f, double *pot_p, double *kin_p); void update(int np, int nd, vnd_t *pos, vnd_t *vel, vnd_t *f, vnd_t *a, double mass, double dt); int main (int argc, char **argv); /* ----------------------------------------------------------------------- IMPLEMENTATION * ----------------------------------------------------------------------- */ /* ----------------------------------------------------------------------- statement function for the pair potential. This potential is a harmonic well which smoothly saturates to a maximum value at PI/2. * ----------------------------------------------------------------------- */ double v(double x) { if (x < M_PI_2) return pow(sin(x), 2.0); else return 1.0; } /* ----------------------------------------------------------------------- statement function for the derivative of the pair potential * ----------------------------------------------------------------------- */ double dv(double x) { if (x < M_PI_2) return 2.0 * sin(x) * cos(x); else return 0.0; } /* ----------------------------------------------------------------------- Initialize the positions, velocities, and accelerations. * ----------------------------------------------------------------------- */ void initialize(int np, int nd, vnd_t box, vnd_t *pos, vnd_t *vel, vnd_t *acc) { int i, j; double x; //srand(4711L); int r = 42; // REPLACE RANDOM NUMBER GENERATION for (i = 0; i < np; i++) { for (j = 0; j < nd; j++) { x = (r++) % 10000 / (double)10000.0; pos[i][j] = box[j] * x; vel[i][j] = 0.0; acc[i][j] = 0.0; } } } /* ----------------------------------------------------------------------- Compute the displacement vector (and its norm) between two particles. * ----------------------------------------------------------------------- */ double dist(int nd, vnd_t r1, vnd_t r2, vnd_t dr) { int i; double d; d = 0.0; for (i = 0; i < nd; i++) { dr[i] = r1[i] - r2[i]; d += dr[i] * dr[i]; } return sqrt(d); } /* ----------------------------------------------------------------------- Return the dot product between two vectors of type double and length n * ----------------------------------------------------------------------- */ double dot_prod(int n, vnd_t x, vnd_t y) { int i; double t = 0.0; for (i = 0; i < n; i++) { t += x[i] * y[i]; } return t; } /* ----------------------------------------------------------------------- Compute the forces and energies, given positions, masses, and velocities * ----------------------------------------------------------------------- */ void compute(int np, int nd, vnd_t *pos, vnd_t *vel, double mass, vnd_t *f, double *pot_p, double *kin_p) { int i, j, k; vnd_t rij; double d; double pot, kin; pot = 0.0; kin = 0.0; /* The computation of forces and energies is fully parallel. */ #pragma omp parallel for default(shared) private(i, j, k, rij, d) reduction(+ : pot, kin) for (i = 0; i < np; i++) { /* compute potential energy and forces */ for (j = 0; j < nd; j++) f[i][j] = 0.0; for (j = 0; j < np; j++) { if (i != j) { d = dist(nd, pos[i], pos[j], rij); /* attribute half of the potential energy to particle 'j' */ pot = pot + 0.5 * v(d); for (k = 0; k < nd; k++) { f[i][k] = f[i][k] - rij[k]* dv(d) /d; } } } /* compute kinetic energy */ kin = kin + dot_prod(nd, vel[i], vel[j]); } kin = kin * 0.5 * mass; *pot_p = pot; *kin_p = kin; } /* ----------------------------------------------------------------------- Perform the time integration, using a velocity Verlet algorithm * ----------------------------------------------------------------------- */ void update(int np, int nd, vnd_t *pos, vnd_t *vel, vnd_t *f, vnd_t *a, double mass, double dt) { int i, j; double rmass; rmass = 1.0/mass; /* The time integration is fully parallel */ #pragma omp parallel for default(shared) private(i, j) firstprivate(rmass, dt) for (i = 0; i < np; i++) { for (j = 0; j < nd; j++) { pos[i][j] = pos[i][j] + vel[i][j]*dt + 0.5*dt*dt*a[i][j]; vel[i][j] = vel[i][j] + 0.5*dt*(f[i][j]*rmass + a[i][j]); a[i][j] = f[i][j]*rmass; } } } /* ----------------------------------------------------------------------- */ int main (int argc, char **argv) { /* simulation parameters */ double mass = 1.0; double dt = 1.0e-4; vnd_t box; vnd_t *position; vnd_t *velocity; vnd_t *force; vnd_t *accel; double potential, kinetic, E0; int i; int NUMTHREADS; double total_time; char *PARAM_NAMES[NUM_ARGS] = {"Nparts", "Nsteps"}; char *TIMERS_NAMES[NUM_TIMERS] = {"Total_time" }; char *DEFAULT_VALUES[NUM_ARGS] = {"8192", "10"}; NUMTHREADS = 1; //omp_get_num_threads(); //OSCR_init (NUMTHREADS, "Molecular dynamic simulation", "Use md <Nparts> <Nsteps>", NUM_ARGS, // PARAM_NAMES, DEFAULT_VALUES , NUM_TIMERS, NUM_TIMERS, TIMERS_NAMES, //argc, argv); NPARTS = NPARTSINIT; //OSCR_getarg_int(1); NSTEPS = NSTEPSINIT; //OSCR_getarg_int(2); /* Default: DEFAULT_NPARTS, DEFAULT_NSTEPS */ /* Memory allocation */ position = calloc(NPARTS, sizeof(vnd_t)); velocity = calloc(NPARTS, sizeof(vnd_t)); force = calloc(NPARTS, sizeof(vnd_t)); accel = calloc(NPARTS, sizeof(vnd_t)); NUMTHREADS = 1; //omp_get_num_threads(); for (i = 0; i < NDIM; i++) box[i] = 10.0; /* set initial positions, velocities, and accelerations */ initialize(NPARTS, NDIM, box, position, velocity, accel); //OSCR_timer_start(0); /* compute the forces and energies */ compute(NPARTS, NDIM, position, velocity, mass, force, &potential, &kinetic); E0 = potential + kinetic; /* This is the main time stepping loop */ for (i = 0; i < NSTEPS; i++) { compute(NPARTS, NDIM, position, velocity, mass, force, &potential, &kinetic); #if 0 printf("%17.9e %17.9e %17.9e\n", potential, kinetic, (potential + kinetic - E0) / E0); #endif update(NPARTS, NDIM, position, velocity, force, accel, mass, dt); } //OSCR_timer_stop(0); total_time = 1; //OSCR_timer_read(0); //OSCR_report(1, TIMERS_NAMES); printf("\n \t# THREADS \tTIME (secs.) \n"); printf("\t %d \t\t%14.6lf\n", NUMTHREADS, total_time); return 0; } /* * vim:ts=2:sw=2: */
for_schedule_guided.c
/* * Test for guided scheduling * Ensure threads get chunks interleavely first * Then judge the chunk sizes are decreasing until to a stable value * Modifed by Chunhua Liao * For example, 100 iteration on 2 threads, chunksize 7 * one line for each dispatch, 0/1 means thread id *0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 24 *1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 18 *0 0 0 0 0 0 0 0 0 0 0 0 0 0 14 *1 1 1 1 1 1 1 1 1 1 10 *0 0 0 0 0 0 0 0 8 *1 1 1 1 1 1 1 7 *0 0 0 0 0 0 0 7 *1 1 1 1 1 1 1 7 *0 0 0 0 0 5 */ #include <stdio.h> #include <omp.h> #include <unistd.h> #include <stdlib.h> #include "omp_testsuite.h" #include "omp_my_sleep.h" #define CFSMAX_SIZE 150 /*choose small iteration space for small sync. overhead*/ #define MAX_TIME 5 /* #define SLEEPTIME 0.5 */ #define SLEEPTIME 1 int check_for_schedule_guided (FILE * logFile) { int threads; /* const int chunk_size = 7; */ int tids[CFSMAX_SIZE]; int i, *tmp; int flag=0; int result = 0; int notout = 1; int maxiter = 0; int count = 0; int tmp_count = 0; int tid; #pragma omp parallel { #pragma omp single { threads = omp_get_num_threads (); } } if (threads < 2) { printf ("This test only works with at least two threads .\n"); return 0; } /* Now the real parallel work: Each thread will start immediately with the first chunk. */ #pragma omp parallel shared(tids,maxiter) private(tid,count) { tid = omp_get_thread_num (); //#pragma omp for nowait schedule(guided,chunk_size) #pragma omp for nowait schedule(guided,7) for (i = 0; i < CFSMAX_SIZE; ++i) { /*printf(" notout=%d, count= %d\n",notout,count); */ count = 0; #pragma omp flush(maxiter) if (i > maxiter) { #pragma omp critical { maxiter = i; } } /* if it is not our turn we wait a) until another thread executed an iteration with a higher iteration count b) we are at the end of the loop (first thread finished and set notout=0 OR c) timeout arrived */ #pragma omp flush(maxiter,notout) while (notout && (count < MAX_TIME) && (maxiter == i)) { /*printf("Thread Nr. %d sleeping\n",tid); */ my_sleep (SLEEPTIME); count += SLEEPTIME; } /*printf("Thread Nr. %d working once\n",tid); */ tids[i] = tid; } /*end omp for */ notout = 0; #pragma omp flush(notout) } /* end omp parallel */ count = 0; /* printf("debug--------\n"); for (i = 0; i < CFSMAX_SIZE; ++i) printf("%d ",tids[i]); printf("\nEnd debug--------\n"); */ /*fprintf(logFile,"# global_chunknr thread local_chunknr chunksize\n"); */ for (i = 0; i < CFSMAX_SIZE - 1; ++i) { if (tids[i] != tids[i + 1]) { count++; } } tmp = (int *) malloc((count + 1)* sizeof (int)); tmp_count=0; tmp[0]=1; /*calculate the chunksize for each dispatch*/ for (i = 0; i < CFSMAX_SIZE - 1; ++i) { if (tids[i] == tids[i + 1]) { tmp[tmp_count]++; } else { tmp_count ++; tmp[tmp_count]=1; } } /* printf("Debug2----\n"); for (i=0;i<=tmp_count;i++) printf("%d ",tmp[i]); printf("\nEndDebug2----\n"); */ /*Check if chunk sizes are decreased until equals to the specified one, ignore the last dispatch for possible smaller remainder*/ flag=0; for (i = 0; i < count-1; i++) { if ((i>0)&&(tmp[i]==tmp[i+1])) flag=1; /*set flag to indicate the Chunk sizes should be the same from now on*/ if(flag==0) { if (tmp[i]<=tmp[i+1]) { result++; fprintf(logFile,"chunk size from %d to %d not decreased.\n", i,i+1); } } else if (tmp[i]!=tmp[i+1]) { result++; fprintf(logFile,"chunk size not maintained.\n"); } } return (result==0); } int crosscheck_for_schedule_guided (FILE * logFile) { int threads; /* const int chunk_size = 7; */ int tids[CFSMAX_SIZE]; int i, *tmp; int flag=0; int result = 0; int notout = 1; int maxiter = 0; int count = 0; int tmp_count = 0; int tid; /* Since it takes quite long to finish the check_x(), I skip the cross_check_X() here. Liao */ #pragma omp parallel { #pragma omp single { threads = omp_get_num_threads (); } } if (threads < 2) { printf ("This test only works with at least two threads .\n"); return 0; } #pragma omp parallel shared(tids,maxiter) private(tid,count) { tid = omp_get_thread_num (); //#pragma omp for nowait schedule(static,chunk_size) #pragma omp for nowait schedule(static,7) for (i = 0; i < CFSMAX_SIZE; ++i) { count = 0; #pragma omp flush(maxiter) if (i > maxiter) { #pragma omp critical { maxiter = i; } } #pragma omp flush(maxiter,notout) while (notout && (count < MAX_TIME) && (maxiter == i)) { my_sleep (SLEEPTIME); count += SLEEPTIME; } tids[i] = tid; } notout = 0; #pragma omp flush(notout) } count = 0; for (i = 0; i < CFSMAX_SIZE - 1; ++i) { if (tids[i] != tids[i + 1]) { count++; } } tmp = (int *) malloc((count + 1)* sizeof (int)); tmp_count=0; tmp[0]=1; for (i = 0; i < CFSMAX_SIZE - 1; ++i) { if (tids[i] == tids[i + 1]) { tmp[tmp_count]++; } else { tmp_count ++; tmp[tmp_count]=1; } } flag=0; for (i = 0; i < count-1; i++) { if ((i>0)&&(tmp[i]==tmp[i+1])) flag=1; if(flag==0){ if (tmp[i]<=tmp[i+1]) { result++; fprintf(logFile,"chunk size from %d to %d not decreased.\n", i,i+1); } } else if (tmp[i]!=tmp[i+1]) { result++; fprintf(logFile,"chunk size not maintained.\n"); } } result=1; return (result==0); }
omp_critical.c
/****************************************************************************** * OpenMP Example - Matrix-vector multiplication - C/C++ Version * FILE: omp_matvec.c * DESCRIPTION: * This example multiplies all row i elements of matrix A with vector * element b(i) and stores the summed products in vector c(i). A total is * maintained for the entire matrix. Performed by using the OpenMP loop * work-sharing construct. The update of the shared global total is * serialized by using the OpenMP critical directive. * SOURCE: Blaise Barney 5/99 * LAST REVISED: ******************************************************************************/ #include <stdio.h> #include <omp.h> #define SIZE 10 int main () { float A[SIZE][SIZE], b[SIZE], c[SIZE], total; int i, j, tid; /* Initializations */ total = 0.0; for (i=0; i < SIZE; i++) { for (j=0; j < SIZE; j++) A[i][j] = (j+1) * 1.0; b[i] = 1.0 * (i+1); c[i] = 0.0; } printf("\nStarting values of matrix A and vector b:\n"); for (i=0; i < SIZE; i++) { printf(" A[%d]= ",i); for (j=0; j < SIZE; j++) printf("%.1f ",A[i][j]); printf(" b[%d]= %.1f\n",i,b[i]); } printf("\nResults by thread/row:\n"); /* Create a team of threads and scope variables */ #pragma omp parallel shared(A,b,c,total) private(tid,i) { tid = omp_get_thread_num(); /* Loop work-sharing construct - distribute rows of matrix */ #pragma omp for private(j) for (i=0; i < SIZE; i++) { for (j=0; j < SIZE; j++) c[i] += (A[i][j] * b[i]); /* Update and display of running total must be serialized */ { total = total + c[i]; printf(" thread %d did row %d\t c[%d]=%.2f\t",tid,i,i,c[i]); printf("Running total= %.2f\n",total); } } /* end of parallel i loop */ } /* end of parallel construct */ printf("\nMatrix-vector total - sum of all c[] = %.2f\n\n",total); }
GB_unaryop__abs_uint8_fp64.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__abs_uint8_fp64 // op(A') function: GB_tran__abs_uint8_fp64 // C type: uint8_t // A type: double // cast: uint8_t cij ; GB_CAST_UNSIGNED(cij,aij,8) // unaryop: cij = aij #define GB_ATYPE \ double #define GB_CTYPE \ uint8_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ double aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CASTING(z, x) \ uint8_t z ; GB_CAST_UNSIGNED(z,x,8) ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ABS || GxB_NO_UINT8 || GxB_NO_FP64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__abs_uint8_fp64 ( uint8_t *restrict Cx, const double *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__abs_uint8_fp64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
AverageLut.h
// -------------------------------------------------------------------------- // Binary Brain -- binary neural net framework // // Copyright (C) 2018 by Ryuji Fuchikami // https://github.com/ryuz // ryuji.fuchikami@nifty.com // -------------------------------------------------------------------------- #pragma once #include <array> #include <vector> #include "bb/BinaryLutModel.h" namespace bb { // LUT版popcount template <typename BinType = float, typename RealType = float> class AverageLut : public BinaryLutModel { using _super = BinaryLutModel; public: static inline std::string ClassName(void) { return "AverageLut"; } static inline std::string ObjectName(void){ return ClassName() + "_" + DataType<BinType>::Name() + "_" + DataType<RealType>::Name(); } std::string GetModelName(void) const override { return ClassName(); } std::string GetObjectName(void) const override { return ObjectName(); } protected: bool m_host_only = false; bool m_binarize_input = false; bool m_binarize_output = true; std::string m_connection; int m_n = 6; indices_t m_input_shape; indices_t m_output_shape; Tensor_<std::int32_t> m_input_index; std::mt19937_64 m_mt; public: struct create_t { int n = 6; indices_t output_shape; std::string connection = ""; bool binarize_input = false; bool binarize_output = true; std::uint64_t seed = 1; }; protected: AverageLut(create_t const &create) { BB_ASSERT(!create.output_shape.empty()); m_mt.seed(create.seed); m_n = create.n; m_output_shape = create.output_shape; m_connection = create.connection; m_input_index.Resize(CalcShapeSize(m_output_shape), (index_t)m_n); } void CommandProc(std::vector<std::string> args) override { // HostOnlyモード設定 if (args.size() == 2 && args[0] == "host_only") { m_host_only = EvalBool(args[1]); } // バイナリモード設定 if ( args.size() == 2 && args[0] == "binary" ) { m_binarize_input = EvalBool(args[1]); m_binarize_output = EvalBool(args[1]); } if ( args.size() == 2 && args[0] == "binarize_input" ) { m_binarize_input = EvalBool(args[1]); } if ( args.size() == 2 && args[0] == "binarize_output" ) { m_binarize_output = EvalBool(args[1]); } } public: ~AverageLut() {} static std::shared_ptr<AverageLut> Create(create_t const &create) { return std::shared_ptr<AverageLut>(new AverageLut(create)); } static std::shared_ptr<AverageLut> Create(int n, indices_t const &output_shape, std::string connection = "", bool binarize = true, bool binarize_input = false, std::uint64_t seed = 1) { create_t create; create.n = n; create.output_shape = output_shape; create.connection = connection; create.binarize_input = binarize_input; create.binarize_output = binarize; create.seed = seed; return Create(create); } static std::shared_ptr<AverageLut> Create(int n, index_t output_node_size, std::string connection = "", bool binarize = true, bool binarize_input = false, std::uint64_t seed = 1) { create_t create; create.n = n; create.output_shape.resize(1); create.output_shape[0] = output_node_size; create.connection = connection; create.binarize_input = binarize_input; create.binarize_output = binarize; create.seed = seed; return Create(create); } static std::shared_ptr<AverageLut> Create(void) { return Create(create_t()); } #ifdef BB_PYBIND11 // python用 static std::shared_ptr<AverageLut> CreatePy( int n, indices_t output_shape, std::string connection="", bool binarize = true, bool binarize_input = false, std::uint64_t seed = 1) { create_t create; create.n = n; create.output_shape = output_shape; create.connection = connection; create.binarize_input = binarize_input; create.binarize_output = binarize; create.seed = seed; return Create(create); } #endif auto lock_InputIndex(void) { return m_input_index.Lock(); } auto lock_InputIndex_const(void) const { return m_input_index.LockConst(); } // 疎結合の管理 index_t GetNodeConnectionSize(index_t node) const override { return m_n; } void SetNodeConnectionIndex(index_t node, index_t input_index, index_t input_node) override { BB_ASSERT(node >= 0 && node < CalcShapeSize(m_output_shape)); BB_ASSERT(input_index >= 0 && input_index < m_n); BB_DEBUG_ASSERT(input_node >= 0 && input_node < GetInputNodeSize()); auto ptr = lock_InputIndex(); ptr(node, input_index) = (std::int32_t)input_node; } index_t GetNodeConnectionIndex(index_t node, index_t input_index) const override { BB_ASSERT(node >= 0 && node < CalcShapeSize(m_output_shape)); BB_ASSERT(input_index >= 0 && input_index < m_n); auto ptr = lock_InputIndex_const(); return (index_t)ptr(node, input_index); } // LUT操作の定義 int GetLutTableSize(index_t node) const { return (1 << m_n); } void SetLutTable(index_t node, int bitpos, bool value) override { } bool GetLutTable(index_t node, int bitpos) const override { int count = 0; for ( int i = 0; i < m_n; ++i ) { count += (bitpos & 1) ? +1 : -1; bitpos >>= 1; } return count > 0; } /** * @brief 入力のshape設定 * @detail 入力のshape設定 * @param shape 新しいshape * @return なし */ indices_t SetInputShape(indices_t shape) override { // 設定済みなら何もしない if ( shape == this->GetInputShape() ) { return this->GetOutputShape(); } // 形状設定 m_input_shape = shape; // 接続初期化 this->InitializeNodeInput(m_mt(), m_connection); return m_output_shape; } /** * @brief 出力のshape設定 * @detail 出力のshape設定 * 出力ノード数が変わらない限りshpeは自由 * @param shape 新しいshape * @return なし */ void SetOutputShape(indices_t const &shape) { BB_ASSERT(CalcShapeSize(shape) == this->m_output_node_size); m_output_shape = shape; } /** * @brief 入力形状取得 * @detail 入力形状を取得する * @return 入力形状を返す */ indices_t GetInputShape(void) const override { return m_input_shape; } /** * @brief 出力形状取得 * @detail 出力形状を取得する * @return 出力形状を返す */ indices_t GetOutputShape(void) const override { return m_output_shape; } public: FrameBuffer Forward(FrameBuffer x_buf, bool train = true) override { BB_ASSERT(x_buf.GetType() == DataType<BinType>::type); // SetInputShpaeされていなければ初回に設定 if (x_buf.GetShape() != m_input_shape) { SetInputShape(x_buf.GetShape()); } // 出力を設定 FrameBuffer y_buf(x_buf.GetFrameSize(), m_output_shape, DataType<BinType>::type); #ifdef BB_WITH_CUDA if ( DataType<BinType>::type == BB_TYPE_FP32 && !m_host_only && x_buf.IsDeviceAvailable() && y_buf.IsDeviceAvailable() && Manager::IsDeviceAvailable() ) { auto x_ptr = x_buf.LockDeviceMemoryConst(); auto y_ptr = y_buf.LockDeviceMemory(true); auto input_index_ptr = m_input_index.LockDeviceMemoryConst(); bbcu_AverageLut_Forward<float> ( (float const *)x_ptr.GetAddr(), (float *)y_ptr.GetAddr(), (int const *)input_index_ptr.GetAddr(), (int )m_n, (int )y_buf.GetNodeSize(), (int )y_buf.GetFrameSize(), (int )(y_buf.GetFrameStride() / sizeof(float)), (bool )m_binarize_input, (bool )m_binarize_output ); return y_buf; } if ( DataType<BinType>::type == BB_TYPE_BIT && !m_host_only && x_buf.IsDeviceAvailable() && y_buf.IsDeviceAvailable() && Manager::IsDeviceAvailable() ) { auto x_ptr = x_buf.LockDeviceMemoryConst(); auto y_ptr = y_buf.LockDeviceMemory(true); auto input_index_ptr = m_input_index.LockDeviceMemoryConst(); bbcu_bit_AverageLut_Forward ( (int const *)x_ptr.GetAddr(), (int *)y_ptr.GetAddr(), (int const *)input_index_ptr.GetAddr(), (int )m_n, (int )y_buf.GetNodeSize(), (int )y_buf.GetFrameSize(), (int )(y_buf.GetFrameStride() / sizeof(int)) ); return y_buf; } #endif { // 汎用版 auto x_ptr = x_buf.LockConst<BinType>(); auto y_ptr = y_buf.Lock<BinType>(); auto input_index_ptr = m_input_index.LockConst(); index_t frame_size = x_buf.GetFrameSize(); index_t node_size = this->GetOutputNodeSize(); #pragma omp parallel for for (index_t node = 0; node < node_size; ++node) { for (index_t frame = 0; frame < frame_size; ++frame) { RealType sum = 0; for (index_t i = 0; i < m_n; i++) { index_t input_node = input_index_ptr(node, i); RealType val = (RealType)x_ptr.Get(frame, input_node); if (m_binarize_input) { val = (val > 0) ? (RealType)BB_BINARY_HI : (RealType)BB_BINARY_LO; } sum += val; } if (m_binarize_output) { sum = (sum > 0) ? (RealType)BB_BINARY_HI : (RealType)BB_BINARY_LO; } y_ptr.Set(frame, node, (BinType)sum); } } return y_buf; } } // Backward FrameBuffer Backward(FrameBuffer dy_buf) override { if (dy_buf.Empty()) { return dy_buf; } BB_ASSERT(dy_buf.GetType() == DataType<RealType>::type); // 出力を設定 FrameBuffer dx_buf(dy_buf.GetFrameSize(), m_input_shape, DataType<RealType>::type); #ifdef BB_WITH_CUDA if ( DataType<RealType>::type == BB_TYPE_FP32 && !m_host_only && dy_buf.IsDeviceAvailable() && dx_buf.IsDeviceAvailable() && Manager::IsDeviceAvailable() ) { auto dy_ptr = dy_buf.LockDeviceMemoryConst(); auto dx_ptr = dx_buf.LockDeviceMemory(true); auto input_index_ptr = m_input_index.LockDeviceMemoryConst(); bbcu_AverageLut_Backward<float> ( (float const *)dy_ptr.GetAddr(), (float *)dx_ptr.GetAddr(), (int const *)input_index_ptr.GetAddr(), (int )m_n, (int )dx_buf.GetNodeSize(), (int )dy_buf.GetNodeSize(), (int )dy_buf.GetFrameSize(), (int )(dy_buf.GetFrameStride() / sizeof(float)) ); return dx_buf; } #endif { // 汎用版 dx_buf.FillZero(); auto dy_ptr = dy_buf.LockConst<RealType>(); auto dx_ptr = dx_buf.Lock<RealType>(); auto input_index_ptr = m_input_index.LockConst(); index_t frame_size = dy_buf.GetFrameSize(); index_t node_size = this->GetOutputNodeSize(); #pragma omp parallel for for (index_t node = 0; node < node_size; ++node) { for (index_t frame = 0; frame < frame_size; ++frame) { auto dx = dy_ptr.Get(frame, node) / m_n; for (index_t i = 0; i < m_n; i++) { index_t input_node = input_index_ptr(node, i); dx_ptr.Add(frame, input_node, dx); } } } return dx_buf; } } // シリアライズ protected: void DumpObjectData(std::ostream &os) const override { // バージョン std::int64_t ver = 1; bb::SaveValue(os, ver); // 親クラス _super::DumpObjectData(os); // メンバ bb::SaveValue(os, m_n); bb::SaveValue(os, m_host_only); bb::SaveValue(os, m_connection); bb::SaveValue(os, m_input_shape); bb::SaveValue(os, m_output_shape); bb::SaveValue(os, m_binarize_input); bb::SaveValue(os, m_binarize_output); m_input_index.DumpObject(os); } void LoadObjectData(std::istream &is) override { // バージョン std::int64_t ver; bb::LoadValue(is, ver); BB_ASSERT(ver == 1); // 親クラス _super::LoadObjectData(is); // メンバ bb::LoadValue(is, m_n); bb::LoadValue(is, m_host_only); bb::LoadValue(is, m_connection); bb::LoadValue(is, m_input_shape); bb::LoadValue(is, m_output_shape); bb::LoadValue(is, m_binarize_input); bb::LoadValue(is, m_binarize_output); m_input_index.LoadObject(is); } }; }
diamond_count.h
// This is the implementation for subgraph counting, not listing std::cout << "Running the subgraph counting implementation\n"; #pragma omp parallel for schedule(dynamic,1) reduction(+:counter) for (vidType v0 = 0; v0 < g.V(); v0++) { #if 0 auto tid = omp_get_thread_num(); auto &local_ccodes = ccodes[tid]; for (auto u : g.N(v0)) local_ccodes[u] = 1; for (auto v1 : g.N(v0)) { if (v1 >= v0) break; uint64_t n = 0; for (auto u : g.N(v1)) { if (local_ccodes[u] == 1) n ++; } counter += n * (n-1) / 2; } for (auto u : g.N(v0)) local_ccodes[u] = 0; #else for (auto v1 : g.N(v0)) { if (v1 >= v0) break; uint64_t n = intersect(g, v0, v1); counter += n * (n-1) / 2; } #endif }
val_omp.c
/* This file performs the following test: each OMP thread measures flops for its provided tasks, and compares this to expected flop counts, each thread having been provided with a random amount of work, such that the time and order that they complete their measurements varies. Specifically tested is the case where the value returned for some threads actually corresponds to that for another thread reading its counter values at the same time. - It is based on zero_omp.c but ignored much of its functionality. - It attempts to use the following two counters. It may use less depending on hardware counter resource limitations. These are counted in the default counting domain and default granularity, depending on the platform. Usually this is the user domain (PAPI_DOM_USER) and thread context (PAPI_GRN_THR). + PAPI_FP_INS + PAPI_TOT_CYC Each thread inside the Thread routine: - Do prework (MAX_FLOPS - flops) - Get cyc. - Get us. - Start counters - Do flops - Stop and read counters - Get us. - Get cyc. - Return flops */ #include "papi_test.h" #ifdef _OPENMP #include <omp.h> #else #error "This compiler does not understand OPENMP" #endif const int MAX_FLOPS = NUM_FLOPS; extern int TESTS_QUIET; /* Declared in test_utils.c */ const PAPI_hw_info_t *hw_info = NULL; long_long Thread(int n) { int retval, num_tests = 1; int EventSet1=PAPI_NULL; int PAPI_event, mask1; int num_events1; long_long flops; long_long **values; long_long elapsed_us, elapsed_cyc; char event_name[PAPI_MAX_STR_LEN]; /* printf("Thread(n=%d) 0x%x started\n", n, omp_get_thread_num()); */ num_events1 = 2; /* add PAPI_TOT_CYC and one of the events in PAPI_FP_INS, PAPI_FP_OPS or PAPI_TOT_INS, depending on the availability of the event on the platform */ EventSet1 = add_two_events(&num_events1, &PAPI_event, hw_info, &mask1); retval = PAPI_event_code_to_name(PAPI_event, event_name); if (retval != PAPI_OK) test_fail(__FILE__, __LINE__, "PAPI_event_code_to_name", retval); values = allocate_test_space(num_tests, num_events1); do_flops(MAX_FLOPS - n); /* prework for balance */ elapsed_us = PAPI_get_real_usec(); elapsed_cyc = PAPI_get_real_cyc(); retval = PAPI_start(EventSet1); if (retval != PAPI_OK) test_fail(__FILE__, __LINE__, "PAPI_start", retval); do_flops(n); retval = PAPI_stop(EventSet1, values[0]); if (retval != PAPI_OK) test_fail(__FILE__, __LINE__, "PAPI_stop", retval); flops = (values[0])[0]; elapsed_us = PAPI_get_real_usec() - elapsed_us; elapsed_cyc = PAPI_get_real_cyc() - elapsed_cyc; remove_test_events(&EventSet1, mask1); if (!TESTS_QUIET) { /*printf("Thread 0x%x %-12s : \t%lld\t%d\n", omp_get_thread_num(), event_name, (values[0])[0], n);*/ #if 0 printf("Thread 0x%x PAPI_TOT_CYC: \t%lld\n", omp_get_thread_num(), (values[0])[1]); printf("Thread 0x%x Real usec : \t%lld\n", omp_get_thread_num(), elapsed_us); printf("Thread 0x%x Real cycles : \t%lld\n", omp_get_thread_num(), elapsed_cyc); #endif } /* It is illegal for the threads to exit in OpenMP */ /* test_pass(__FILE__,0,0); */ free_test_space(values, num_tests); PAPI_unregister_thread(); /* printf("Thread 0x%x finished\n", omp_get_thread_num()); */ return flops; } int main(int argc, char **argv) { int tid, retval; int maxthr = omp_get_max_threads(); int flopper = 0; long_long *flops = calloc(maxthr, sizeof(long_long)); long_long *flopi = calloc(maxthr, sizeof(long_long)); tests_quiet(argc, argv); /* Set TESTS_QUIET variable */ if (maxthr < 2) test_skip(__FILE__, __LINE__, "omp_get_num_threads < 2", PAPI_EINVAL); if ((flops == NULL) || (flopi == NULL)) test_fail(__FILE__, __LINE__, "calloc", PAPI_ENOMEM); retval = PAPI_library_init(PAPI_VER_CURRENT); if (retval != PAPI_VER_CURRENT) test_fail(__FILE__, __LINE__, "PAPI_library_init", retval); if (!TESTS_QUIET) { retval = PAPI_set_debug(PAPI_VERB_ECONT); if (retval != PAPI_OK) test_fail(__FILE__, __LINE__, "PAPI_set_debug", retval); } hw_info = PAPI_get_hardware_info(); if (hw_info == NULL) test_fail(__FILE__, __LINE__, "PAPI_get_hardware_info", 2); retval = PAPI_thread_init((unsigned long (*)(void)) (omp_get_thread_num)); if (retval != PAPI_OK) if (retval == PAPI_ESBSTR) test_skip(__FILE__, __LINE__, "PAPI_thread_init", retval); else test_fail(__FILE__, __LINE__, "PAPI_thread_init", retval); flopper = Thread(65536) / 65536; printf("flopper=%d\n", flopper); for (int i=0; i<100000; i++) #pragma omp parallel private(tid) { tid = omp_get_thread_num(); flopi[tid] = rand()*3; flops[tid] = Thread((flopi[tid]/flopper)%MAX_FLOPS); #pragma omp barrier #pragma omp master if (flops[tid] < flopi[tid]) { printf("test iteration=%d\n", i); for (int j=0; j<omp_get_num_threads(); j++) { printf("Thread 0x%x Value %6lld %c %6lld", j, flops[j], (flops[j]<flopi[j])?'<':'=', flopi[j]); for (int k=0; k<omp_get_num_threads(); k++) if ((k != j) && (flops[k] == flops[j])) printf(" == Thread 0x%x!", k); printf("\n"); } test_fail(__FILE__, __LINE__, "value returned for thread", PAPI_EBUG); } } test_pass(__FILE__, NULL, 0); exit(0); }
GB_binop__div_uint64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__div_uint64) // A.*B function (eWiseMult): GB (_AemultB) // A.*B function (eWiseMult): GB (_AemultB_02__div_uint64) // A.*B function (eWiseMult): GB (_AemultB_03__div_uint64) // A.*B function (eWiseMult): GB (_AemultB_bitmap__div_uint64) // A*D function (colscale): GB (_AxD__div_uint64) // D*A function (rowscale): GB (_DxB__div_uint64) // C+=B function (dense accum): GB (_Cdense_accumB__div_uint64) // C+=b function (dense accum): GB (_Cdense_accumb__div_uint64) // C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__div_uint64) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__div_uint64) // C=scalar+B GB (_bind1st__div_uint64) // C=scalar+B' GB (_bind1st_tran__div_uint64) // C=A+scalar GB (_bind2nd__div_uint64) // C=A'+scalar GB (_bind2nd_tran__div_uint64) // C type: uint64_t // A type: uint64_t // B,b type: uint64_t // BinaryOp: cij = GB_IDIV_UNSIGNED (aij, bij, 64) #define GB_ATYPE \ uint64_t #define GB_BTYPE \ uint64_t #define GB_CTYPE \ uint64_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint64_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ uint64_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint64_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = GB_IDIV_UNSIGNED (x, y, 64) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_DIV || GxB_NO_UINT64 || GxB_NO_DIV_UINT64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB (_Cdense_ewise3_accum__div_uint64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__div_uint64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__div_uint64) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__div_uint64) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint64_t uint64_t bwork = (*((uint64_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__div_uint64) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t *restrict Cx = (uint64_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__div_uint64) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t *restrict Cx = (uint64_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__div_uint64) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__div_uint64) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__div_uint64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__div_uint64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__div_uint64) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__div_uint64) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t *Cx = (uint64_t *) Cx_output ; uint64_t x = (*((uint64_t *) x_input)) ; uint64_t *Bx = (uint64_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; uint64_t bij = Bx [p] ; Cx [p] = GB_IDIV_UNSIGNED (x, bij, 64) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__div_uint64) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint64_t *Cx = (uint64_t *) Cx_output ; uint64_t *Ax = (uint64_t *) Ax_input ; uint64_t y = (*((uint64_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint64_t aij = Ax [p] ; Cx [p] = GB_IDIV_UNSIGNED (aij, y, 64) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint64_t aij = Ax [pA] ; \ Cx [pC] = GB_IDIV_UNSIGNED (x, aij, 64) ; \ } GrB_Info GB (_bind1st_tran__div_uint64) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint64_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t x = (*((const uint64_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint64_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint64_t aij = Ax [pA] ; \ Cx [pC] = GB_IDIV_UNSIGNED (aij, y, 64) ; \ } GrB_Info GB (_bind2nd_tran__div_uint64) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t y = (*((const uint64_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif