source
stringlengths
3
92
c
stringlengths
26
2.25M
GB_binop__rminus_int32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__rminus_int32 // A.*B function (eWiseMult): GB_AemultB__rminus_int32 // A*D function (colscale): GB_AxD__rminus_int32 // D*A function (rowscale): GB_DxB__rminus_int32 // C+=B function (dense accum): GB_Cdense_accumB__rminus_int32 // C+=b function (dense accum): GB_Cdense_accumb__rminus_int32 // C+=A+B function (dense ewise3): GB_Cdense_ewise3_accum__rminus_int32 // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__rminus_int32 // C=scalar+B GB_bind1st__rminus_int32 // C=scalar+B' GB_bind1st_tran__rminus_int32 // C=A+scalar GB_bind2nd__rminus_int32 // C=A'+scalar GB_bind2nd_tran__rminus_int32 // C type: int32_t // A type: int32_t // B,b type: int32_t // BinaryOp: cij = (bij - aij) #define GB_ATYPE \ int32_t #define GB_BTYPE \ int32_t #define GB_CTYPE \ int32_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int32_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ int32_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int32_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = (y - x) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_RMINUS || GxB_NO_INT32 || GxB_NO_RMINUS_INT32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB_Cdense_ewise3_accum__rminus_int32 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__rminus_int32 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__rminus_int32 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__rminus_int32 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int32_t int32_t bwork = (*((int32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_AxD__rminus_int32 ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *GB_RESTRICT Cx = (int32_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_DxB__rminus_int32 ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *GB_RESTRICT Cx = (int32_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ #undef GB_FREE_ALL #define GB_FREE_ALL \ { \ GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \ GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \ GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \ } GrB_Info GB_AaddB__rminus_int32 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_add_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__rminus_int32 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_emult_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__rminus_int32 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *GB_RESTRICT Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *Cx = (int32_t *) Cx_output ; int32_t x = (*((int32_t *) x_input)) ; int32_t *Bx = (int32_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; int32_t bij = Bx [p] ; Cx [p] = (bij - x) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__rminus_int32 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *GB_RESTRICT Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int32_t *Cx = (int32_t *) Cx_output ; int32_t *Ax = (int32_t *) Ax_input ; int32_t y = (*((int32_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int32_t aij = Ax [p] ; Cx [p] = (y - aij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int32_t aij = Ax [pA] ; \ Cx [pC] = (aij - x) ; \ } GrB_Info GB_bind1st_tran__rminus_int32 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t x = (*((const int32_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int32_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int32_t aij = Ax [pA] ; \ Cx [pC] = (y - aij) ; \ } GrB_Info GB_bind2nd_tran__rminus_int32 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t y = (*((const int32_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
assign_scalar_variable_to_entities_process.h
// // | / | // ' / __| _` | __| _ \ __| // . \ | ( | | ( |\__ ` // _|\_\_| \__,_|\__|\___/ ____/ // Multi-Physics // // License: BSD License // Kratos default license: kratos/license.txt // // Main authors: Josep Maria Carbonell // Vicente Mataix Ferrandiz // #if !defined(KRATOS_ASSIGN_SCALAR_VARIABLE_TO_ENTITIES_PROCESS_H_INCLUDED ) #define KRATOS_ASSIGN_SCALAR_VARIABLE_TO_ENTITIES_PROCESS_H_INCLUDED // System includes // External includes // Project includes #include "includes/model_part.h" #include "includes/kratos_parameters.h" #include "processes/process.h" namespace Kratos { ///@name Kratos Classes ///@{ /** * @class AssignScalarVariableToEntitiesProcess * @ingroup KratosCore * @brief This function assigns a value to a variable belonging to all of the entities in a given mesh * @details Can be used to any entities * @tparam TEntity The entity type * @author Josep Maria Carbonell * @author Vicente Mataix Ferrandiz */ template<class TEntity> class KRATOS_API(KRATOS_CORE) AssignScalarVariableToEntitiesProcess : public Process { public: ///@name Type Definitions ///@{ /// Node type definition typedef Node<3> NodeType; /// Definition of array component typedef VariableComponent<VectorComponentAdaptor<array_1d<double, 3> > > array_1d_component_type; /// The container of the entities typedef PointerVectorSet<TEntity, IndexedObject> EntityContainerType; /// Pointer definition of AssignScalarVariableToEntitiesProcess KRATOS_CLASS_POINTER_DEFINITION(AssignScalarVariableToEntitiesProcess); ///@} ///@name Life Cycle ///@{ /** * @brief Default constructor * @param rModelPart The model part to be set * @param rParameters The configuration parameters */ AssignScalarVariableToEntitiesProcess( ModelPart& rModelPart, Parameters rParameters ); /// Destructor. ~AssignScalarVariableToEntitiesProcess() override {} ///@} ///@name Operators ///@{ /// This operator is provided to call the process as a function and simply calls the Execute method. void operator()() { Execute(); } ///@} ///@name Operations ///@{ /** * @brief Execute method is used to execute the AssignScalarVariableToEntitiesProcess algorithms. */ void Execute() override; /** * @brief This function will be executed at every time step BEFORE performing the solve phase */ void ExecuteInitializeSolutionStep() override { Execute(); } ///@} ///@name Access ///@{ ///@} ///@name Inquiry ///@{ ///@} ///@name Input and output ///@{ /// Turn back information as a string. std::string Info() const override { return "AssignScalarVariableToEntitiesProcess"; } /// Print information about this object. void PrintInfo(std::ostream& rOStream) const override { rOStream << "AssignScalarVariableToEntitiesProcess"; } /// Print object's data. void PrintData(std::ostream& rOStream) const override { } ///@} ///@name Friends ///@{ ///@} protected: ///@name Protected static Member Variables ///@{ ///@} ///@name Protected member Variables ///@{ ///@} ///@name Protected Operators ///@{ /// Copy constructor. AssignScalarVariableToEntitiesProcess(AssignScalarVariableToEntitiesProcess const& rOther); ///@} ///@name Protected Operations ///@{ ///@} ///@name Protected Access ///@{ ///@} ///@name Protected Inquiry ///@{ ///@} ///@name Protected LifeCycle ///@{ ///@} private: ///@name Static Member Variables ///@{ ///@} ///@name Member Variables ///@{ ModelPart& mrModelPart; /// The model part where to assign the values std::string mVariableName; /// The name of the variable double mDoubleValue; /// The double value to assign int mIntValue; /// The integer value to assign bool mBoolValue; /// The boolean value to assign std::size_t mMeshId; /// The mesh id ///@} ///@name Private Operators ///@{ /** * @brief This method assigns the value (with OMP) * @param rVar The variable to be assigned * @param Value The value to assign */ template< class TVarType, class TDataType > void InternalAssignValue(TVarType& rVar, const TDataType Value) { auto& r_entities_array = GetEntitiesContainer(); const int number_of_entities = static_cast<int>(r_entities_array.size()); if(number_of_entities != 0) { const auto it_begin = r_entities_array.begin(); #pragma omp parallel for for(int i = 0; i<number_of_entities; i++) { auto it_entity = it_begin + i; it_entity->SetValue(rVar, Value); } } } /** * @brief This method assigns the value (without OMP) * @param rVar The variable to be assigned * @param Value The value to assign */ template< class TVarType, class TDataType > void InternalAssignValueSerial(TVarType& rVar, const TDataType Value) { auto& r_entities_array = GetEntitiesContainer(); const int number_of_entities = static_cast<int>(r_entities_array.size()); if(number_of_entities != 0) { const auto it_begin = r_entities_array.begin(); for(int i = 0; i<number_of_entities; i++) { auto it_entity = it_begin + i; it_entity->SetValue(rVar, Value); } } } /** * @brief This method returns the current entity container * @return The current entity container */ EntityContainerType& GetEntitiesContainer(); ///@} ///@name Private Operations ///@{ ///@} ///@name Private Access ///@{ /// Assignment operator. AssignScalarVariableToEntitiesProcess& operator=(AssignScalarVariableToEntitiesProcess const& rOther); ///@} ///@name Serialization ///@{ ///@name Private Inquiry ///@{ ///@} ///@name Un accessible methods ///@{ ///@} }; // Class AssignScalarVariableToEntitiesProcess ///@} ///@name Type Definitions ///@{ ///@} ///@name Input and output ///@{ /// input stream function template<class TEntity> inline std::istream& operator >> (std::istream& rIStream, AssignScalarVariableToEntitiesProcess<TEntity>& rThis); /// output stream function template<class TEntity> inline std::ostream& operator << (std::ostream& rOStream, const AssignScalarVariableToEntitiesProcess<TEntity>& rThis) { rThis.PrintInfo(rOStream); rOStream << std::endl; rThis.PrintData(rOStream); return rOStream; } ///@} } // namespace Kratos. #endif // KRATOS_ASSIGN_SCALAR_VARIABLE_TO_ENTITIES_PROCESS_H_INCLUDED defined
learner.c
/* ============================================================================= * * learn.c * -- Learns structure of Bayesian net from data * * ============================================================================= * * Copyright (C) Stanford University, 2006. All Rights Reserved. * Author: Chi Cao Minh * * ============================================================================= * * The penalized log-likelihood score (Friedman & Yahkani, 1996) is used to * evaluated the "goodness" of a Bayesian net: * * M n_j * --- --- --- * -N_params * ln(R) / 2 + R > > > P((a_j = v), X_j) ln P(a_j = v | X_j) * --- --- --- * j=1 X_j v=1 * * Where: * * N_params total number of parents across all variables * R number of records * M number of variables * X_j parents of the jth variable * n_j number of attributes of the jth variable * a_j attribute * * The second summation of X_j varies across all possible assignments to the * values of the parents X_j. * * In the code: * * "local log likelihood" is P((a_j = v), X_j) ln P(a_j = v | X_j) * "log likelihood" is everything to the right of the '+', i.e., "R ... X_j)" * "base penalty" is -ln(R) / 2 * "penalty" is N_params * -ln(R) / 2 * "score" is the entire expression * * For more notes, refer to: * * A. Moore and M.-S. Lee. Cached sufficient statistics for efficient machine * learning with large datasets. Journal of Artificial Intelligence Research 8 * (1998), pp 67-91. * * ============================================================================= * * The search strategy uses a combination of local and global structure search. * Similar to the technique described in: * * D. M. Chickering, D. Heckerman, and C. Meek. A Bayesian approach to learning * Bayesian networks with local structure. In Proceedings of Thirteenth * Conference on Uncertainty in Artificial Intelligence (1997), pp. 80-89. * * ============================================================================= * * For the license of bayes/sort.h and bayes/sort.c, please see the header * of the files. * * ------------------------------------------------------------------------ * * For the license of kmeans, please see kmeans/LICENSE.kmeans * * ------------------------------------------------------------------------ * * For the license of ssca2, please see ssca2/COPYRIGHT * * ------------------------------------------------------------------------ * * For the license of lib/mt19937ar.c and lib/mt19937ar.h, please see the * header of the files. * * ------------------------------------------------------------------------ * * For the license of lib/rbtree.h and lib/rbtree.c, please see * lib/LEGALNOTICE.rbtree and lib/LICENSE.rbtree * * ------------------------------------------------------------------------ * * Unless otherwise noted, the following license applies to STAMP files: * * Copyright (c) 2007, Stanford University * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of Stanford University nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY STANFORD UNIVERSITY ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL STANFORD UNIVERSITY BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. * * ============================================================================= */ #include <assert.h> #include <math.h> #include <stdlib.h> #include "adtree.h" #include "data.h" #include "learner.h" #include "list.h" #include "net.h" #include "operation.h" #include "query.h" #include "random.h" #include "thread.h" #include "timer.h" #include "utility.h" #include "vector.h" struct learner_task { operation_t op; long fromId; long toId; float score; }; typedef struct findBestTaskArg { long toId; learner_t* learnerPtr; query_t* queries; vector_t* queryVectorPtr; vector_t* parentQueryVectorPtr; long numTotalParent; float basePenalty; float baseLogLikelihood; bitmap_t* bitmapPtr; queue_t* workQueuePtr; vector_t* aQueryVectorPtr; vector_t* bQueryVectorPtr; } findBestTaskArg_t; extern long global_insertPenalty; extern long global_maxNumEdgeLearned; extern float global_operationQualityFactor; /* ============================================================================= * DECLARATION OF TM_CALLABLE FUNCTIONS * ============================================================================= */ TM_CALLABLE static learner_task_t TMfindBestReverseTask (TM_ARGDECL findBestTaskArg_t* argPtr); TM_CALLABLE static learner_task_t TMfindBestInsertTask (TM_ARGDECL findBestTaskArg_t* argPtr); TM_CALLABLE static learner_task_t TMfindBestRemoveTask (TM_ARGDECL findBestTaskArg_t* argPtr); /* ============================================================================= * compareTask * -- Want greatest score first * -- For list * ============================================================================= */ static long compareTask (const void* aPtr, const void* bPtr) { learner_task_t* aTaskPtr = (learner_task_t*)aPtr; learner_task_t* bTaskPtr = (learner_task_t*)bPtr; float aScore = aTaskPtr->score; float bScore = bTaskPtr->score; if (aScore < bScore) { return 1; } else if (aScore > bScore) { return -1; } else { return (aTaskPtr->toId - bTaskPtr->toId); } } /* ============================================================================= * compareQuery * -- Want smallest ID first * -- For vector_sort * ============================================================================= */ static int compareQuery (const void* aPtr, const void* bPtr) { query_t* aQueryPtr = (query_t*)(*(void**)aPtr); query_t* bQueryPtr = (query_t*)(*(void**)bPtr); return (aQueryPtr->index - bQueryPtr->index); } /* ============================================================================= * learner_alloc * ============================================================================= */ learner_t* learner_alloc (data_t* dataPtr, adtree_t* adtreePtr, long numThread) { learner_t* learnerPtr; learnerPtr = (learner_t*)malloc(sizeof(learner_t)); if (learnerPtr) { learnerPtr->adtreePtr = adtreePtr; learnerPtr->netPtr = net_alloc(dataPtr->numVar); assert(learnerPtr->netPtr); learnerPtr->localBaseLogLikelihoods = (float*)malloc(dataPtr->numVar * sizeof(float)); assert(learnerPtr->localBaseLogLikelihoods); learnerPtr->baseLogLikelihood = 0.0F; learnerPtr->tasks = (learner_task_t*)malloc(dataPtr->numVar * sizeof(learner_task_t)); assert(learnerPtr->tasks); learnerPtr->taskListPtr = list_alloc(&compareTask); assert(learnerPtr->taskListPtr); learnerPtr->numTotalParent = 0; } return learnerPtr; } /* ============================================================================= * learner_free * ============================================================================= */ void learner_free (learner_t* learnerPtr) { list_free(learnerPtr->taskListPtr); free(learnerPtr->tasks); free(learnerPtr->localBaseLogLikelihoods); net_free(learnerPtr->netPtr); free(learnerPtr); } /* ============================================================================= * computeSpecificLocalLogLikelihood * -- Query vectors should not contain wildcards * ============================================================================= */ static float computeSpecificLocalLogLikelihood (adtree_t* adtreePtr, vector_t* queryVectorPtr, vector_t* parentQueryVectorPtr) { long count = adtree_getCount(adtreePtr, queryVectorPtr); if (count == 0) { return 0.0; } double probability = (double)count / (double)adtreePtr->numRecord; long parentCount = adtree_getCount(adtreePtr, parentQueryVectorPtr); assert(parentCount >= count); assert(parentCount > 0); return (float)(probability * (double)log((double)count/ (double)parentCount)); } /* ============================================================================= * createPartition * ============================================================================= */ static void createPartition (long min, long max, long id, long n, long* startPtr, long* stopPtr) { long range = max - min; long chunk = MAX(1, ((range + n/2) / n)); /* rounded */ long start = min + chunk * id; long stop; if (id == (n-1)) { stop = max; } else { stop = MIN(max, (start + chunk)); } *startPtr = start; *stopPtr = stop; } /* ============================================================================= * createTaskList * -- baseLogLikelihoods and taskListPtr are updated * ============================================================================= */ static void createTaskList (void* argPtr) { TM_THREAD_ENTER(); long myId = thread_getId(); long numThread = thread_getNumThread(); learner_t* learnerPtr = (learner_t*)argPtr; list_t* taskListPtr = learnerPtr->taskListPtr; bool_t status; adtree_t* adtreePtr = learnerPtr->adtreePtr; float* localBaseLogLikelihoods = learnerPtr->localBaseLogLikelihoods; learner_task_t* tasks = learnerPtr->tasks; query_t queries[2]; vector_t* queryVectorPtr = PVECTOR_ALLOC(2); assert(queryVectorPtr); status = vector_pushBack(queryVectorPtr, (void*)&queries[0]); assert(status); query_t parentQuery; vector_t* parentQueryVectorPtr = PVECTOR_ALLOC(1); assert(parentQueryVectorPtr); long numVar = adtreePtr->numVar; long numRecord = adtreePtr->numRecord; float baseLogLikelihood = 0.0; float penalty = (float)(-0.5 * log((double)numRecord)); /* only add 1 edge */ long v; long v_start; long v_stop; createPartition(0, numVar, myId, numThread, &v_start, &v_stop); /* * Compute base log likelihood for each variable and total base loglikelihood */ for (v = v_start; v < v_stop; v++) { float localBaseLogLikelihood = 0.0; queries[0].index = v; queries[0].value = 0; localBaseLogLikelihood += computeSpecificLocalLogLikelihood(adtreePtr, queryVectorPtr, parentQueryVectorPtr); queries[0].value = 1; localBaseLogLikelihood += computeSpecificLocalLogLikelihood(adtreePtr, queryVectorPtr, parentQueryVectorPtr); localBaseLogLikelihoods[v] = localBaseLogLikelihood; baseLogLikelihood += localBaseLogLikelihood; } /* foreach variable */ TM_BEGIN(); float globalBaseLogLikelihood = TM_SHARED_READ_F(learnerPtr->baseLogLikelihood); TM_SHARED_WRITE_F(learnerPtr->baseLogLikelihood, (baseLogLikelihood + globalBaseLogLikelihood)); TM_END(); /* * For each variable, find if the addition of any edge _to_ it is better */ status = PVECTOR_PUSHBACK(parentQueryVectorPtr, (void*)&parentQuery); assert(status); for (v = v_start; v < v_stop; v++) { /* * Compute base log likelihood for this variable */ queries[0].index = v; long bestLocalIndex = v; float bestLocalLogLikelihood = localBaseLogLikelihoods[v]; status = PVECTOR_PUSHBACK(queryVectorPtr, (void*)&queries[1]); assert(status); long vv; for (vv = 0; vv < numVar; vv++) { if (vv == v) { continue; } parentQuery.index = vv; if (v < vv) { queries[0].index = v; queries[1].index = vv; } else { queries[0].index = vv; queries[1].index = v; } float newLocalLogLikelihood = 0.0; queries[0].value = 0; queries[1].value = 0; parentQuery.value = 0; newLocalLogLikelihood += computeSpecificLocalLogLikelihood(adtreePtr, queryVectorPtr, parentQueryVectorPtr); queries[0].value = 0; queries[1].value = 1; parentQuery.value = ((vv < v) ? 0 : 1); newLocalLogLikelihood += computeSpecificLocalLogLikelihood(adtreePtr, queryVectorPtr, parentQueryVectorPtr); queries[0].value = 1; queries[1].value = 0; parentQuery.value = ((vv < v) ? 1 : 0); newLocalLogLikelihood += computeSpecificLocalLogLikelihood(adtreePtr, queryVectorPtr, parentQueryVectorPtr); queries[0].value = 1; queries[1].value = 1; parentQuery.value = 1; newLocalLogLikelihood += computeSpecificLocalLogLikelihood(adtreePtr, queryVectorPtr, parentQueryVectorPtr); if (newLocalLogLikelihood > bestLocalLogLikelihood) { bestLocalIndex = vv; bestLocalLogLikelihood = newLocalLogLikelihood; } } /* foreach other variable */ PVECTOR_POPBACK(queryVectorPtr); if (bestLocalIndex != v) { float logLikelihood = numRecord * (baseLogLikelihood + + bestLocalLogLikelihood - localBaseLogLikelihoods[v]); float score = penalty + logLikelihood; learner_task_t* taskPtr = &tasks[v]; taskPtr->op = OPERATION_INSERT; taskPtr->fromId = bestLocalIndex; taskPtr->toId = v; taskPtr->score = score; TM_BEGIN(); status = TMLIST_INSERT(taskListPtr, (void*)taskPtr); TM_END(); assert(status); } } /* for each variable */ PVECTOR_FREE(queryVectorPtr); PVECTOR_FREE(parentQueryVectorPtr); #ifdef TEST_LEARNER list_iter_t it; list_iter_reset(&it, taskListPtr); while (list_iter_hasNext(&it, taskListPtr)) { learner_task_t* taskPtr = (learner_task_t*)list_iter_next(&it, taskListPtr); printf("[task] op=%i from=%li to=%li score=%lf\n", taskPtr->op, taskPtr->fromId, taskPtr->toId, taskPtr->score); } #endif /* TEST_LEARNER */ TM_THREAD_EXIT(); } /* ============================================================================= * TMpopTask * -- Returns NULL is list is empty * ============================================================================= */ learner_task_t* TMpopTask (TM_ARGDECL list_t* taskListPtr) { learner_task_t* taskPtr = NULL; list_iter_t it; TMLIST_ITER_RESET(&it, taskListPtr); if (TMLIST_ITER_HASNEXT(&it, taskListPtr)) { taskPtr = (learner_task_t*)TMLIST_ITER_NEXT(&it, taskListPtr); bool_t status = TMLIST_REMOVE(taskListPtr, (void*)taskPtr); assert(status); } return taskPtr; } /* ============================================================================= * populateParentQuery * -- Modifies contents of parentQueryVectorPtr * ============================================================================= */ static void populateParentQueryVector (net_t* netPtr, long id, query_t* queries, vector_t* parentQueryVectorPtr) { vector_clear(parentQueryVectorPtr); list_t* parentIdListPtr = net_getParentIdListPtr(netPtr, id); list_iter_t it; list_iter_reset(&it, parentIdListPtr); while (list_iter_hasNext(&it, parentIdListPtr)) { long parentId = (long)list_iter_next(&it, parentIdListPtr); bool_t status = vector_pushBack(parentQueryVectorPtr, (void*)&queries[parentId]); assert(status); } } /* ============================================================================= * TMpopulateParentQuery * -- Modifies contents of parentQueryVectorPtr * ============================================================================= */ static void TMpopulateParentQueryVector (TM_ARGDECL net_t* netPtr, long id, query_t* queries, vector_t* parentQueryVectorPtr) { vector_clear(parentQueryVectorPtr); list_t* parentIdListPtr = net_getParentIdListPtr(netPtr, id); list_iter_t it; TMLIST_ITER_RESET(&it, parentIdListPtr); while (TMLIST_ITER_HASNEXT(&it, parentIdListPtr)) { long parentId = (long)TMLIST_ITER_NEXT(&it, parentIdListPtr); bool_t status = PVECTOR_PUSHBACK(parentQueryVectorPtr, (void*)&queries[parentId]); assert(status); } } /* ============================================================================= * populateQueryVectors * -- Modifies contents of queryVectorPtr and parentQueryVectorPtr * ============================================================================= */ static void populateQueryVectors (net_t* netPtr, long id, query_t* queries, vector_t* queryVectorPtr, vector_t* parentQueryVectorPtr) { populateParentQueryVector(netPtr, id, queries, parentQueryVectorPtr); bool_t status; status = vector_copy(queryVectorPtr, parentQueryVectorPtr); assert(status); status = vector_pushBack(queryVectorPtr, (void*)&queries[id]); assert(status); vector_sort(queryVectorPtr, &compareQuery); } /* ============================================================================= * TMpopulateQueryVectors * -- Modifies contents of queryVectorPtr and parentQueryVectorPtr * ============================================================================= */ static void TMpopulateQueryVectors (TM_ARGDECL net_t* netPtr, long id, query_t* queries, vector_t* queryVectorPtr, vector_t* parentQueryVectorPtr) { TMpopulateParentQueryVector(TM_ARG netPtr, id, queries, parentQueryVectorPtr); bool_t status; status = PVECTOR_COPY(queryVectorPtr, parentQueryVectorPtr); assert(status); status = PVECTOR_PUSHBACK(queryVectorPtr, (void*)&queries[id]); assert(status); PVECTOR_SORT(queryVectorPtr, &compareQuery); } /* ============================================================================= * computeLocalLogLikelihoodHelper * -- Recursive helper routine * ============================================================================= */ static float computeLocalLogLikelihoodHelper (long i, long numParent, adtree_t* adtreePtr, query_t* queries, vector_t* queryVectorPtr, vector_t* parentQueryVectorPtr) { if (i >= numParent) { return computeSpecificLocalLogLikelihood(adtreePtr, queryVectorPtr, parentQueryVectorPtr); } float localLogLikelihood = 0.0; query_t* parentQueryPtr = vector_at(parentQueryVectorPtr, i); long parentIndex = parentQueryPtr->index; queries[parentIndex].value = 0; localLogLikelihood += computeLocalLogLikelihoodHelper((i + 1), numParent, adtreePtr, queries, queryVectorPtr, parentQueryVectorPtr); queries[parentIndex].value = 1; localLogLikelihood += computeLocalLogLikelihoodHelper((i + 1), numParent, adtreePtr, queries, queryVectorPtr, parentQueryVectorPtr); queries[parentIndex].value = QUERY_VALUE_WILDCARD; return localLogLikelihood; } /* ============================================================================= * computeLocalLogLikelihood * -- Populate the query vectors before passing as args * ============================================================================= */ static float computeLocalLogLikelihood (long id, adtree_t* adtreePtr, net_t* netPtr, query_t* queries, vector_t* queryVectorPtr, vector_t* parentQueryVectorPtr) { long numParent = vector_getSize(parentQueryVectorPtr); float localLogLikelihood = 0.0; queries[id].value = 0; localLogLikelihood += computeLocalLogLikelihoodHelper(0, numParent, adtreePtr, queries, queryVectorPtr, parentQueryVectorPtr); queries[id].value = 1; localLogLikelihood += computeLocalLogLikelihoodHelper(0, numParent, adtreePtr, queries, queryVectorPtr, parentQueryVectorPtr); queries[id].value = QUERY_VALUE_WILDCARD; return localLogLikelihood; } /* ============================================================================= * TMfindBestInsertTask * ============================================================================= */ static learner_task_t TMfindBestInsertTask (TM_ARGDECL findBestTaskArg_t* argPtr) { long toId = argPtr->toId; learner_t* learnerPtr = argPtr->learnerPtr; query_t* queries = argPtr->queries; vector_t* queryVectorPtr = argPtr->queryVectorPtr; vector_t* parentQueryVectorPtr = argPtr->parentQueryVectorPtr; long numTotalParent = argPtr->numTotalParent; float basePenalty = argPtr->basePenalty; float baseLogLikelihood = argPtr->baseLogLikelihood; bitmap_t* invalidBitmapPtr = argPtr->bitmapPtr; queue_t* workQueuePtr = argPtr->workQueuePtr; vector_t* baseParentQueryVectorPtr = argPtr->aQueryVectorPtr; vector_t* baseQueryVectorPtr = argPtr->bQueryVectorPtr; bool_t status; adtree_t* adtreePtr = learnerPtr->adtreePtr; net_t* netPtr = learnerPtr->netPtr; float* localBaseLogLikelihoods = learnerPtr->localBaseLogLikelihoods; TMpopulateParentQueryVector(TM_ARG netPtr, toId, queries, parentQueryVectorPtr); /* * Create base query and parentQuery */ status = PVECTOR_COPY(baseParentQueryVectorPtr, parentQueryVectorPtr); assert(status); status = PVECTOR_COPY(baseQueryVectorPtr, baseParentQueryVectorPtr); assert(status); status = PVECTOR_PUSHBACK(baseQueryVectorPtr, (void*)&queries[toId]); assert(status); PVECTOR_SORT(queryVectorPtr, &compareQuery); /* * Search all possible valid operations for better local log likelihood */ float bestFromId = toId; /* flag for not found */ float oldLocalLogLikelihood = (float)TM_SHARED_READ_F(localBaseLogLikelihoods[toId]); float bestLocalLogLikelihood = oldLocalLogLikelihood; status = TMNET_FINDDESCENDANTS(netPtr, toId, invalidBitmapPtr, workQueuePtr); assert(status); long fromId = -1; list_t* parentIdListPtr = net_getParentIdListPtr(netPtr, toId); long maxNumEdgeLearned = global_maxNumEdgeLearned; if ((maxNumEdgeLearned < 0) || (TMLIST_GETSIZE(parentIdListPtr) <= maxNumEdgeLearned)) { list_iter_t it; TMLIST_ITER_RESET(&it, parentIdListPtr); while (TMLIST_ITER_HASNEXT(&it, parentIdListPtr)) { long parentId = (long)TMLIST_ITER_NEXT(&it, parentIdListPtr); bitmap_set(invalidBitmapPtr, parentId); /* invalid since already have edge */ } while ((fromId = bitmap_findClear(invalidBitmapPtr, (fromId + 1))) >= 0) { if (fromId == toId) { continue; } status = PVECTOR_COPY(queryVectorPtr, baseQueryVectorPtr); assert(status); status = PVECTOR_PUSHBACK(queryVectorPtr, (void*)&queries[fromId]); assert(status); PVECTOR_SORT(queryVectorPtr, &compareQuery); status = PVECTOR_COPY(parentQueryVectorPtr, baseParentQueryVectorPtr); assert(status); status = PVECTOR_PUSHBACK(parentQueryVectorPtr, (void*)&queries[fromId]); assert(status); PVECTOR_SORT(parentQueryVectorPtr, &compareQuery); float newLocalLogLikelihood = computeLocalLogLikelihood(toId, adtreePtr, netPtr, queries, queryVectorPtr, parentQueryVectorPtr); if (newLocalLogLikelihood > bestLocalLogLikelihood) { bestLocalLogLikelihood = newLocalLogLikelihood; bestFromId = fromId; } } /* foreach valid parent */ } /* if have not exceeded max number of edges to learn */ /* * Return best task; Note: if none is better, fromId will equal toId */ learner_task_t bestTask; bestTask.op = OPERATION_INSERT; bestTask.fromId = bestFromId; bestTask.toId = toId; bestTask.score = 0.0; if (bestFromId != toId) { long numRecord = adtreePtr->numRecord; long numParent = TMLIST_GETSIZE(parentIdListPtr) + 1; float penalty = (numTotalParent + numParent * global_insertPenalty) * basePenalty; float logLikelihood = numRecord * (baseLogLikelihood + + bestLocalLogLikelihood - oldLocalLogLikelihood); float bestScore = penalty + logLikelihood; bestTask.score = bestScore; } return bestTask; } #ifdef LEARNER_TRY_REMOVE /* ============================================================================= * TMfindBestRemoveTask * ============================================================================= */ static learner_task_t TMfindBestRemoveTask (TM_ARGDECL findBestTaskArg_t* argPtr) { long toId = argPtr->toId; learner_t* learnerPtr = argPtr->learnerPtr; query_t* queries = argPtr->queries; vector_t* queryVectorPtr = argPtr->queryVectorPtr; vector_t* parentQueryVectorPtr = argPtr->parentQueryVectorPtr; long numTotalParent = argPtr->numTotalParent; float basePenalty = argPtr->basePenalty; float baseLogLikelihood = argPtr->baseLogLikelihood; vector_t* origParentQueryVectorPtr = argPtr->aQueryVectorPtr; bool_t status; adtree_t* adtreePtr = learnerPtr->adtreePtr; net_t* netPtr = learnerPtr->netPtr; float* localBaseLogLikelihoods = learnerPtr->localBaseLogLikelihoods; TMpopulateParentQueryVector(TM_ARG netPtr, toId, queries, origParentQueryVectorPtr); long numParent = PVECTOR_GETSIZE(origParentQueryVectorPtr); /* * Search all possible valid operations for better local log likelihood */ float bestFromId = toId; /* flag for not found */ float oldLocalLogLikelihood = (float)TM_SHARED_READ_F(localBaseLogLikelihoods[toId]); float bestLocalLogLikelihood = oldLocalLogLikelihood; long i; for (i = 0; i < numParent; i++) { query_t* queryPtr = (query_t*)PVECTOR_AT(origParentQueryVectorPtr, i); long fromId = queryPtr->index; /* * Create parent query (subset of parents since remove an edge) */ PVECTOR_CLEAR(parentQueryVectorPtr); long p; for (p = 0; p < numParent; p++) { if (p != fromId) { query_t* queryPtr = PVECTOR_AT(origParentQueryVectorPtr, p); status = PVECTOR_PUSHBACK(parentQueryVectorPtr, (void*)&queries[queryPtr->index]); assert(status); } } /* create new parent query */ /* * Create query */ status = PVECTOR_COPY(queryVectorPtr, parentQueryVectorPtr); assert(status); status = PVECTOR_PUSHBACK(queryVectorPtr, (void*)&queries[toId]); assert(status); PVECTOR_SORT(queryVectorPtr, &compareQuery); /* * See if removing parent is better */ float newLocalLogLikelihood = computeLocalLogLikelihood(toId, adtreePtr, netPtr, queries, queryVectorPtr, parentQueryVectorPtr); if (newLocalLogLikelihood > bestLocalLogLikelihood) { bestLocalLogLikelihood = newLocalLogLikelihood; bestFromId = fromId; } } /* for each parent */ /* * Return best task; Note: if none is better, fromId will equal toId */ learner_task_t bestTask; bestTask.op = OPERATION_REMOVE; bestTask.fromId = bestFromId; bestTask.toId = toId; bestTask.score = 0.0; if (bestFromId != toId) { long numRecord = adtreePtr->numRecord; float penalty = (numTotalParent - 1) * basePenalty; float logLikelihood = numRecord * (baseLogLikelihood + + bestLocalLogLikelihood - oldLocalLogLikelihood); float bestScore = penalty + logLikelihood; bestTask.score = bestScore; } return bestTask; } #endif /* LEARNER_TRY_REMOVE */ #ifdef LEARNER_TRY_REVERSE /* ============================================================================= * TMfindBestReverseTask * ============================================================================= */ static learner_task_t TMfindBestReverseTask (TM_ARGDECL findBestTaskArg_t* argPtr) { long toId = argPtr->toId; learner_t* learnerPtr = argPtr->learnerPtr; query_t* queries = argPtr->queries; vector_t* queryVectorPtr = argPtr->queryVectorPtr; vector_t* parentQueryVectorPtr = argPtr->parentQueryVectorPtr; long numTotalParent = argPtr->numTotalParent; float basePenalty = argPtr->basePenalty; float baseLogLikelihood = argPtr->baseLogLikelihood; bitmap_t* visitedBitmapPtr = argPtr->bitmapPtr; queue_t* workQueuePtr = argPtr->workQueuePtr; vector_t* toOrigParentQueryVectorPtr = argPtr->aQueryVectorPtr; vector_t* fromOrigParentQueryVectorPtr = argPtr->bQueryVectorPtr; bool_t status; adtree_t* adtreePtr = learnerPtr->adtreePtr; net_t* netPtr = learnerPtr->netPtr; float* localBaseLogLikelihoods = learnerPtr->localBaseLogLikelihoods; TMpopulateParentQueryVector(TM_ARG netPtr, toId, queries, toOrigParentQueryVectorPtr); long numParent = PVECTOR_GETSIZE(toOrigParentQueryVectorPtr); /* * Search all possible valid operations for better local log likelihood */ long bestFromId = toId; /* flag for not found */ float oldLocalLogLikelihood = (float)TM_SHARED_READ_F(localBaseLogLikelihoods[toId]); float bestLocalLogLikelihood = oldLocalLogLikelihood; long fromId = 0; long i; for (i = 0; i < numParent; i++) { query_t* queryPtr = (query_t*)PVECTOR_AT(toOrigParentQueryVectorPtr, i); fromId = queryPtr->index; bestLocalLogLikelihood = localBaseLogLikelihoods[toId] + localBaseLogLikelihoods[fromId]; TMpopulateParentQueryVector(TM_ARG netPtr, fromId, queries, fromOrigParentQueryVectorPtr); /* * Create parent query (subset of parents since remove an edge) */ PVECTOR_CLEAR(parentQueryVectorPtr); long p; for (p = 0; p < numParent; p++) { if (p != fromId) { query_t* queryPtr = PVECTOR_AT(toOrigParentQueryVectorPtr, p); status = PVECTOR_PUSHBACK(parentQueryVectorPtr, (void*)&queries[queryPtr->index]); assert(status); } } /* create new parent query */ /* * Create query */ status = PVECTOR_COPY(queryVectorPtr, parentQueryVectorPtr); assert(status); status = PVECTOR_PUSHBACK(queryVectorPtr, (void*)&queries[toId]); assert(status); PVECTOR_SORT(queryVectorPtr, &compareQuery); /* * Get log likelihood for removing parent from toId */ float newLocalLogLikelihood = computeLocalLogLikelihood(toId, adtreePtr, netPtr, queries, queryVectorPtr, parentQueryVectorPtr); /* * Get log likelihood for adding parent to fromId */ status = PVECTOR_COPY(parentQueryVectorPtr, fromOrigParentQueryVectorPtr); assert(status); status = PVECTOR_PUSHBACK(parentQueryVectorPtr, (void*)&queries[toId]); assert(status); PVECTOR_SORT(parentQueryVectorPtr, &compareQuery); status = PVECTOR_COPY(queryVectorPtr, parentQueryVectorPtr); assert(status); status = PVECTOR_PUSHBACK(queryVectorPtr, (void*)&queries[fromId]); assert(status); PVECTOR_SORT(queryVectorPtr, &compareQuery); newLocalLogLikelihood += computeLocalLogLikelihood(fromId, adtreePtr, netPtr, queries, queryVectorPtr, parentQueryVectorPtr); /* * Record best */ if (newLocalLogLikelihood > bestLocalLogLikelihood) { bestLocalLogLikelihood = newLocalLogLikelihood; bestFromId = fromId; } } /* for each parent */ /* * Check validity of best */ if (bestFromId != toId) { bool_t isTaskValid = TRUE; TMNET_APPLYOPERATION(netPtr, OPERATION_REMOVE, bestFromId, toId); if (TMNET_ISPATH(netPtr, bestFromId, toId, visitedBitmapPtr, workQueuePtr)) { isTaskValid = FALSE; } TMNET_APPLYOPERATION(netPtr, OPERATION_INSERT, bestFromId, toId); if (!isTaskValid) { bestFromId = toId; } } /* * Return best task; Note: if none is better, fromId will equal toId */ learner_task_t bestTask; bestTask.op = OPERATION_REVERSE; bestTask.fromId = bestFromId; bestTask.toId = toId; bestTask.score = 0.0; if (bestFromId != toId) { float fromLocalLogLikelihood = (float)TM_SHARED_READ_F(localBaseLogLikelihoods[bestFromId]); long numRecord = adtreePtr->numRecord; float penalty = numTotalParent * basePenalty; float logLikelihood = numRecord * (baseLogLikelihood + + bestLocalLogLikelihood - oldLocalLogLikelihood - fromLocalLogLikelihood); float bestScore = penalty + logLikelihood; bestTask.score = bestScore; } return bestTask; } #endif /* LEARNER_TRY_REVERSE */ /* ============================================================================= * learnStructure * * Note it is okay if the score is not exact, as we are relaxing the greedy * search. This means we do not need to communicate baseLogLikelihood across * threads. * ============================================================================= */ static void learnStructure (void* argPtr) { TM_THREAD_ENTER(); learner_t* learnerPtr = (learner_t*)argPtr; net_t* netPtr = learnerPtr->netPtr; adtree_t* adtreePtr = learnerPtr->adtreePtr; long numRecord = adtreePtr->numRecord; float* localBaseLogLikelihoods = learnerPtr->localBaseLogLikelihoods; list_t* taskListPtr = learnerPtr->taskListPtr; float operationQualityFactor = global_operationQualityFactor; bitmap_t* visitedBitmapPtr = PBITMAP_ALLOC(learnerPtr->adtreePtr->numVar); assert(visitedBitmapPtr); queue_t* workQueuePtr = PQUEUE_ALLOC(-1); assert(workQueuePtr); long numVar = adtreePtr->numVar; query_t* queries = (query_t*)P_MALLOC(numVar * sizeof(query_t)); assert(queries); long v; for (v = 0; v < numVar; v++) { queries[v].index = v; queries[v].value = QUERY_VALUE_WILDCARD; } float basePenalty = (float)(-0.5 * log((double)numRecord)); vector_t* queryVectorPtr = PVECTOR_ALLOC(1); assert(queryVectorPtr); vector_t* parentQueryVectorPtr = PVECTOR_ALLOC(1); assert(parentQueryVectorPtr); vector_t* aQueryVectorPtr = PVECTOR_ALLOC(1); assert(aQueryVectorPtr); vector_t* bQueryVectorPtr = PVECTOR_ALLOC(1); assert(bQueryVectorPtr); findBestTaskArg_t arg; arg.learnerPtr = learnerPtr; arg.queries = queries; arg.queryVectorPtr = queryVectorPtr; arg.parentQueryVectorPtr = parentQueryVectorPtr; arg.bitmapPtr = visitedBitmapPtr; arg.workQueuePtr = workQueuePtr; arg.aQueryVectorPtr = aQueryVectorPtr; arg.bQueryVectorPtr = bQueryVectorPtr; while (1) { learner_task_t* taskPtr; TM_BEGIN(); taskPtr = TMpopTask(TM_ARG taskListPtr); TM_END(); if (taskPtr == NULL) { break; } operation_t op = taskPtr->op; long fromId = taskPtr->fromId; long toId = taskPtr->toId; bool_t isTaskValid; TM_BEGIN(); /* * Check if task is still valid */ isTaskValid = TRUE; switch (op) { case OPERATION_INSERT: { if (TMNET_HASEDGE(netPtr, fromId, toId) || TMNET_ISPATH(netPtr, toId, fromId, visitedBitmapPtr, workQueuePtr)) { isTaskValid = FALSE; } break; } case OPERATION_REMOVE: { /* Can never create cycle, so always valid */ break; } case OPERATION_REVERSE: { /* Temporarily remove edge for check */ TMNET_APPLYOPERATION(netPtr, OPERATION_REMOVE, fromId, toId); if (TMNET_ISPATH(netPtr, fromId, toId, visitedBitmapPtr, workQueuePtr)) { isTaskValid = FALSE; } TMNET_APPLYOPERATION(netPtr, OPERATION_INSERT, fromId, toId); break; } default: assert(0); } #ifdef TEST_LEARNER printf("[task] op=%i from=%li to=%li score=%lf valid=%s\n", taskPtr->op, taskPtr->fromId, taskPtr->toId, taskPtr->score, (isTaskValid ? "yes" : "no")); fflush(stdout); #endif /* * Perform task: update graph and probabilities */ if (isTaskValid) { TMNET_APPLYOPERATION(netPtr, op, fromId, toId); } TM_END(); float deltaLogLikelihood = 0.0; if (isTaskValid) { switch (op) { float newBaseLogLikelihood; case OPERATION_INSERT: { TM_BEGIN(); TMpopulateQueryVectors(TM_ARG netPtr, toId, queries, queryVectorPtr, parentQueryVectorPtr); newBaseLogLikelihood = computeLocalLogLikelihood(toId, adtreePtr, netPtr, queries, queryVectorPtr, parentQueryVectorPtr); float toLocalBaseLogLikelihood = (float)TM_SHARED_READ_F(localBaseLogLikelihoods[toId]); deltaLogLikelihood += toLocalBaseLogLikelihood - newBaseLogLikelihood; TM_SHARED_WRITE_F(localBaseLogLikelihoods[toId], newBaseLogLikelihood); TM_END(); TM_BEGIN(); long numTotalParent = (long)TM_SHARED_READ(learnerPtr->numTotalParent); TM_SHARED_WRITE(learnerPtr->numTotalParent, (numTotalParent + 1)); TM_END(); break; } #ifdef LEARNER_TRY_REMOVE case OPERATION_REMOVE: { TM_BEGIN(); TMpopulateQueryVectors(TM_ARG netPtr, fromId, queries, queryVectorPtr, parentQueryVectorPtr); newBaseLogLikelihood = computeLocalLogLikelihood(fromId, adtreePtr, netPtr, queries, queryVectorPtr, parentQueryVectorPtr); float fromLocalBaseLogLikelihood = (float)TM_SHARED_READ_F(localBaseLogLikelihoods[fromId]); deltaLogLikelihood += fromLocalBaseLogLikelihood - newBaseLogLikelihood; TM_SHARED_WRITE_F(localBaseLogLikelihoods[fromId], newBaseLogLikelihood); TM_END(); TM_BEGIN(); long numTotalParent = (long)TM_SHARED_READ(learnerPtr->numTotalParent); TM_SHARED_WRITE(learnerPtr->numTotalParent, (numTotalParent - 1)); TM_END(); break; } #endif /* LEARNER_TRY_REMOVE */ #ifdef LEARNER_TRY_REVERSE case OPERATION_REVERSE: { TM_BEGIN(); TMpopulateQueryVectors(TM_ARG netPtr, fromId, queries, queryVectorPtr, parentQueryVectorPtr); newBaseLogLikelihood = computeLocalLogLikelihood(fromId, adtreePtr, netPtr, queries, queryVectorPtr, parentQueryVectorPtr); float fromLocalBaseLogLikelihood = (float)TM_SHARED_READ_F(localBaseLogLikelihoods[fromId]); deltaLogLikelihood += fromLocalBaseLogLikelihood - newBaseLogLikelihood; TM_SHARED_WRITE_F(localBaseLogLikelihoods[fromId], newBaseLogLikelihood); TM_END(); TM_BEGIN(); TMpopulateQueryVectors(TM_ARG netPtr, toId, queries, queryVectorPtr, parentQueryVectorPtr); newBaseLogLikelihood = computeLocalLogLikelihood(toId, adtreePtr, netPtr, queries, queryVectorPtr, parentQueryVectorPtr); float toLocalBaseLogLikelihood = (float)TM_SHARED_READ_F(localBaseLogLikelihoods[toId]); deltaLogLikelihood += toLocalBaseLogLikelihood - newBaseLogLikelihood; TM_SHARED_WRITE_F(localBaseLogLikelihoods[toId], newBaseLogLikelihood); TM_END(); break; } #endif /* LEARNER_TRY_REVERSE */ default: assert(0); } /* switch op */ } /* if isTaskValid */ /* * Update/read globals */ float baseLogLikelihood; long numTotalParent; TM_BEGIN(); float oldBaseLogLikelihood = (float)TM_SHARED_READ_F(learnerPtr->baseLogLikelihood); float newBaseLogLikelihood = oldBaseLogLikelihood + deltaLogLikelihood; TM_SHARED_WRITE_F(learnerPtr->baseLogLikelihood, newBaseLogLikelihood); baseLogLikelihood = newBaseLogLikelihood; numTotalParent = (long)TM_SHARED_READ(learnerPtr->numTotalParent); TM_END(); /* * Find next task */ float baseScore = ((float)numTotalParent * basePenalty) + (numRecord * baseLogLikelihood); learner_task_t bestTask; bestTask.op = NUM_OPERATION; bestTask.toId = -1; bestTask.fromId = -1; bestTask.score = baseScore; learner_task_t newTask; arg.toId = toId; arg.numTotalParent = numTotalParent; arg.basePenalty = basePenalty; arg.baseLogLikelihood = baseLogLikelihood; TM_BEGIN(); newTask = TMfindBestInsertTask(TM_ARG &arg); TM_END(); if ((newTask.fromId != newTask.toId) && (newTask.score > (bestTask.score / operationQualityFactor))) { bestTask = newTask; } #ifdef LEARNER_TRY_REMOVE TM_BEGIN(); newTask = TMfindBestRemoveTask(TM_ARG &arg); TM_END(); if ((newTask.fromId != newTask.toId) && (newTask.score > (bestTask.score / operationQualityFactor))) { bestTask = newTask; } #endif /* LEARNER_TRY_REMOVE */ #ifdef LEARNER_TRY_REVERSE TM_BEGIN(); newTask = TMfindBestReverseTask(TM_ARG &arg); TM_END(); if ((newTask.fromId != newTask.toId) && (newTask.score > (bestTask.score / operationQualityFactor))) { bestTask = newTask; } #endif /* LEARNER_TRY_REVERSE */ if (bestTask.toId != -1) { learner_task_t* tasks = learnerPtr->tasks; tasks[toId] = bestTask; TM_BEGIN(); TMLIST_INSERT(taskListPtr, (void*)&tasks[toId]); TM_END(); #ifdef TEST_LEARNER printf("[new] op=%i from=%li to=%li score=%lf\n", bestTask.op, bestTask.fromId, bestTask.toId, bestTask.score); fflush(stdout); #endif } } /* while (tasks) */ PBITMAP_FREE(visitedBitmapPtr); PQUEUE_FREE(workQueuePtr); PVECTOR_FREE(bQueryVectorPtr); PVECTOR_FREE(aQueryVectorPtr); PVECTOR_FREE(queryVectorPtr); PVECTOR_FREE(parentQueryVectorPtr); P_FREE(queries); TM_THREAD_EXIT(); } /* ============================================================================= * learner_run * -- Call adtree_make before this * ============================================================================= */ void learner_run (learner_t* learnerPtr) { #ifdef OTM #pragma omp parallel { createTaskList((void*)learnerPtr); } #pragma omp parallel { learnStructure((void*)learnerPtr); } #else thread_start(&createTaskList, (void*)learnerPtr); thread_start(&learnStructure, (void*)learnerPtr); #endif } /* ============================================================================= * learner_score * -- Score entire network * ============================================================================= */ float learner_score (learner_t* learnerPtr) { adtree_t* adtreePtr = learnerPtr->adtreePtr; net_t* netPtr = learnerPtr->netPtr; vector_t* queryVectorPtr = vector_alloc(1); assert(queryVectorPtr); vector_t* parentQueryVectorPtr = vector_alloc(1); assert(parentQueryVectorPtr); long numVar = adtreePtr->numVar; query_t* queries = (query_t*)malloc(numVar * sizeof(query_t)); assert(queries); long v; for (v = 0; v < numVar; v++) { queries[v].index = v; queries[v].value = QUERY_VALUE_WILDCARD; } long numTotalParent = 0; float logLikelihood = 0.0; for (v = 0; v < numVar; v++) { list_t* parentIdListPtr = net_getParentIdListPtr(netPtr, v); numTotalParent += list_getSize(parentIdListPtr); populateQueryVectors(netPtr, v, queries, queryVectorPtr, parentQueryVectorPtr); float localLogLikelihood = computeLocalLogLikelihood(v, adtreePtr, netPtr, queries, queryVectorPtr, parentQueryVectorPtr); logLikelihood += localLogLikelihood; } vector_free(queryVectorPtr); vector_free(parentQueryVectorPtr); free(queries); long numRecord = adtreePtr->numRecord; float penalty = (float)(-0.5 * (double)numTotalParent * log((double)numRecord)); float score = penalty + numRecord * logLikelihood; return score; } /* ############################################################################# * TEST_LEARNER * ############################################################################# */ #ifdef TEST_LEARNER #include <stdio.h> static void testPartition (long min, long max, long n) { long start; long stop; printf("min=%li max=%li, n=%li\n", min, max, n); long i; for (i = 0; i < n; i++) { createPartition(min, max, i, n, &start, &stop); printf("%li: %li -> %li\n", i, start, stop); } puts(""); } int main (int argc, char* argv[]) { thread_startup(1); puts("Starting..."); testPartition(0, 4, 8); testPartition(0, 15, 8); testPartition(3, 103, 7); long numVar = 56; long numRecord = 256; random_t* randomPtr = random_alloc(); data_t* dataPtr = data_alloc(numVar, numRecord, randomPtr); assert(dataPtr); data_generate(dataPtr, 0, 10, 10); adtree_t* adtreePtr = adtree_alloc(); assert(adtreePtr); adtree_make(adtreePtr, dataPtr); learner_t* learnerPtr = learner_alloc(dataPtr, adtreePtr, 1); assert(learnerPtr); data_free(dataPtr); learner_run(learnerPtr); assert(!net_isCycle(learnerPtr->netPtr)); float score = learner_score(learnerPtr); printf("score = %lf\n", score); learner_free(learnerPtr); puts("Done."); adtree_free(adtreePtr); random_free(randomPtr); thread_shutdown(); return 0; } #endif /* TEST_LEARNER */ /* ============================================================================= * * End of learner.h * * ============================================================================= */
rhspmain.c
/* CalculiX - A 3-dimensional finite element program */ /* Copyright (C) 1998-2015 Guido Dhondt */ /* This program is free software; you can redistribute it and/or */ /* modify it under the terms of the GNU General Public License as */ /* published by the Free Software Foundation(version 2); */ /* */ /* This program is distributed in the hope that it will be useful, */ /* but WITHOUT ANY WARRANTY; without even the implied warranty of */ /* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */ /* GNU General Public License for more details. */ /* You should have received a copy of the GNU General Public License */ /* along with this program; if not, write to the Free Software */ /* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <unistd.h> #include <stdio.h> #include <math.h> #include <stdlib.h> #include <pthread.h> #include "CalculiX.h" static char *lakonf1; static ITG num_cpus,*nef1,*ipnei1,*neifa1,*neiel1,*jq1,*irow1,*ielfa1, *ifabou1,*neq1,*nzs1,*neij1; static double *vfa1,*area1,*advfa1,*xlet1,*cosa1,*volume1,*au1=NULL,*ad1=NULL, *ap1,*xle1,*b1=NULL,*xxn1,*hfa1,*gradpel1,*bp1,*xxi1,*xlen1,*cosb1; void rhspmain(ITG *nef,char *lakonf,ITG *ipnei, ITG *neifa,ITG *neiel,double *vfa,double *area,double *advfa, double *xlet,double *cosa,double *volume,double *au,double *ad, ITG *jq,ITG *irow,double *ap,ITG *ielfa,ITG *ifabou, double *xle,double *b,double *xxn,ITG *neq, ITG *nzs,double *hfa,double *gradpel, double *bp,double *xxi,ITG *neij,double *xlen, ITG *iatleastonepressurebc){ ITG i,j; /* variables for multithreading procedure */ ITG sys_cpus,*ithread=NULL; char *env,*envloc,*envsys; num_cpus = 0; sys_cpus=0; /* explicit user declaration prevails */ envsys=getenv("NUMBER_OF_CPUS"); if(envsys){ sys_cpus=atoi(envsys); if(sys_cpus<0) sys_cpus=0; } /* automatic detection of available number of processors */ if(sys_cpus==0){ sys_cpus = getSystemCPUs(); if(sys_cpus<1) sys_cpus=1; } /* local declaration prevails, if strictly positive */ envloc = getenv("CCX_NPROC_CFD"); if(envloc){ num_cpus=atoi(envloc); if(num_cpus<0){ num_cpus=0; }else if(num_cpus>sys_cpus){ num_cpus=sys_cpus; } } /* else global declaration, if any, applies */ env = getenv("OMP_NUM_THREADS"); if(num_cpus==0){ if (env) num_cpus = atoi(env); if (num_cpus < 1) { num_cpus=1; }else if(num_cpus>sys_cpus){ num_cpus=sys_cpus; } } // next line is to be inserted in a similar way for all other paralell parts if(*nef<num_cpus) num_cpus=*nef; pthread_t tid[num_cpus]; /* allocating fields for lhs and rhs matrix */ NNEW(ad1,double,num_cpus**neq); NNEW(au1,double,(long long)num_cpus**nzs); NNEW(b1,double,num_cpus**neq); /* calculating the stiffness and/or mass matrix (symmetric part) */ nef1=nef;lakonf1=lakonf;ipnei1=ipnei;neifa1=neifa;neiel1=neiel; vfa1=vfa;area1=area;advfa1=advfa;xlet1=xlet,cosa1=cosa;volume1=volume; jq1=jq;irow1=irow;ap1=ap;ielfa1=ielfa;ifabou1=ifabou;xle1=xle; xxn1=xxn;neq1=neq;nzs1=nzs;hfa1=hfa;gradpel1=gradpel;bp1=bp;xxi1=xxi; neij1=neij;xlen1=xlen; /* create threads and wait */ NNEW(ithread,ITG,num_cpus); for(i=0; i<num_cpus; i++) { ithread[i]=i; pthread_create(&tid[i], NULL, (void *)rhspmt, (void *)&ithread[i]); } for(i=0; i<num_cpus; i++) pthread_join(tid[i], NULL); SFREE(ithread); /* copying and accumulating the stiffnes and/or mass matrix */ #pragma omp parallel \ default(none) \ shared(neq,ad,ad1,num_cpus,nzs,au,au1,b,b1) \ private(i,j) { #pragma omp for for(i=0;i<*neq;i++){ ad[i]=ad1[i]; for(j=1;j<num_cpus;j++){ ad[i]+=ad1[i+j**neq]; } } #pragma omp for for(i=0;i<*nzs;i++){ au[i]=au1[i]; for(j=1;j<num_cpus;j++){ au[i]+=au1[i+(long long)j**nzs]; } } #pragma omp for for(i=0;i<*neq;i++){ b[i]=b1[i]; for(j=1;j<num_cpus;j++){ b[i]+=b1[i+j**neq]; } } } SFREE(ad1); SFREE(au1); SFREE(b1); /* at least one pressure bc is needed. If none is applied, the last dof is set to 0 a pressure bc is only recognized if not all velocity degrees of freedom are prescribed on the same face */ if(*iatleastonepressurebc==0) b[*nef-1]=0.; return; } /* subroutine for multithreading of rhsp */ void *rhspmt(ITG *i){ ITG indexad,indexb,nefa,nefb,nefdelta; long long indexau; indexad=*i**neq1; indexau=(long long)*i**nzs1; indexb=*i**neq1; // ceil -> floor nefdelta=(ITG)floor(*nef1/(double)num_cpus); nefa=*i*nefdelta+1; nefb=(*i+1)*nefdelta; // next line! -> all parallel sections if((*i==num_cpus-1)&&(nefb<*nef1)) nefb=*nef1; FORTRAN(rhsp,(nef1,lakonf1,ipnei1,neifa1,neiel1,vfa1,area1, advfa1,xlet1,cosa1,volume1,&au1[indexau],&ad1[indexad], jq1,irow1,ap1,ielfa1,ifabou1,xle1,&b1[indexb],xxn1,neq1,nzs1, hfa1,gradpel1,bp1,xxi1,neij1,xlen1,&nefa,&nefb)); return NULL; }
10-omp.c
/****************************************************************************** * FILE: omp_hello.c * DESCRIPTION: * OpenMP Example - Hello World - C/C++ Version * In this simple example, the master thread forks a parallel region. * All threads in the team obtain their unique thread number and print it. * The master thread only prints the total number of threads. Two OpenMP * library routines are used to obtain the number of threads and each * thread's number. * AUTHOR: Blaise Barney 5/99 * LAST REVISED: 04/06/05 ******************************************************************************/ #include <omp.h> #include <stdio.h> #include <stdlib.h> int main (int argc, char *argv[]) { int nthreads, tid; /* Fork a team of threads giving them their own copies of variables */ #pragma omp parallel private(nthreads, tid) { /* Obtain thread number */ tid = omp_get_thread_num(); printf("Hello World from thread = %d\n", tid); /* Only master thread does this */ if (tid == 0) { nthreads = omp_get_num_threads(); printf("Number of threads = %d\n", nthreads); } } /* All threads join master thread and disband */ } // RUN: clang -fopenmp -c -g -emit-llvm %s -o %t.1.bc // RUN: opt -instnamer %t.1.bc -o %t.bc // RUN: llvm-epp %t.bc -o %t.profile // RUN: clang -fopenmp -v %t.epp.bc -o %t-exec -lepp-rt -lpthread 2> %t.compile // RUN: OMP_NUM_THREADS=10 %t-exec > %t.log // RUN: llvm-epp -p=%t.profile %t.bc 2> %t.decode // RUN: diff -aub %t.profile %s.txt
zungqr.c
/** * * @file * * PLASMA is a software package provided by: * University of Tennessee, US, * University of Manchester, UK. * * @precisions normal z -> s d c * **/ #include "plasma.h" #include "plasma_async.h" #include "plasma_context.h" #include "plasma_descriptor.h" #include "plasma_internal.h" #include "plasma_tuning.h" #include "plasma_types.h" #include "plasma_workspace.h" /***************************************************************************//** * * @ingroup plasma_ungqr * * Generates an m-by-n matrix Q with orthonormal columns, which * is defined as the first n columns of a product of the elementary reflectors * returned by plasma_zgeqrf. * ******************************************************************************* * * @param[in] m * The number of rows of the matrix Q. m >= 0. * * @param[in] n * The number of columns of the matrix Q. m >= n >= 0. * * @param[in] k * The number of columns of elementary tile reflectors whose product * defines the matrix Q. * n >= k >= 0. * * @param[in] pA * Details of the QR factorization of the original matrix A as returned * by plasma_zgeqrf, where the k first columns are the reflectors. * * @param[in] lda * The leading dimension of the array A. lda >= max(1,m). * * @param[in] T * Auxiliary factorization data, computed by plasma_zgeqrf. * * @param[out] pQ * On exit, pointer to the m-by-n matrix Q. * * @param[in] ldq * The leading dimension of the array Q. ldq >= max(1,m). * ******************************************************************************* * * @retval PlasmaSuccess successful exit * @retval < 0 if -i, the i-th argument had an illegal value * ******************************************************************************* * * @sa plasma_omp_zungqr * @sa plasma_cungqr * @sa plasma_dorgqr * @sa plasma_sorgqr * @sa plasma_zgeqrf * ******************************************************************************/ int plasma_zungqr(int m, int n, int k, plasma_complex64_t *pA, int lda, plasma_desc_t T, plasma_complex64_t *pQ, int ldq) { // Get PLASMA context. plasma_context_t *plasma = plasma_context_self(); if (plasma == NULL) { plasma_fatal_error("PLASMA not initialized"); return PlasmaErrorNotInitialized; } // Check input arguments. if (m < 0) { plasma_error("illegal value of m"); return -1; } if (n < 0 || n > m) { plasma_error("illegal value of n"); return -2; } if (k < 0 || k > n) { plasma_error("illegal value of k"); return -3; } if (lda < imax(1, m)) { plasma_error("illegal value of lda"); return -5; } if (ldq < imax(1, m)) { plasma_error("illegal value of ldq"); return -8; } // quick return if (n <= 0) return PlasmaSuccess; // Tune parameters. if (plasma->tuning) plasma_tune_geqrf(plasma, PlasmaComplexDouble, m, n); // Set tiling parameters. int ib = plasma->ib; int nb = plasma->nb; // Create tile matrices. plasma_desc_t A; plasma_desc_t Q; int retval; retval = plasma_desc_general_create(PlasmaComplexDouble, nb, nb, m, n, 0, 0, m, k, &A); if (retval != PlasmaSuccess) { plasma_error("plasma_desc_general_create() failed"); return retval; } retval = plasma_desc_general_create(PlasmaComplexDouble, nb, nb, m, n, 0, 0, m, k, &Q); if (retval != PlasmaSuccess) { plasma_error("plasma_desc_general_create() failed"); plasma_desc_destroy(&A); return retval; } // Allocate workspace. plasma_workspace_t work; size_t lwork = ib*nb; // unmqr: work retval = plasma_workspace_create(&work, lwork, PlasmaComplexDouble); if (retval != PlasmaSuccess) { plasma_error("plasma_workspace_create() failed"); return retval; } // Initialize sequence. plasma_sequence_t sequence; retval = plasma_sequence_init(&sequence); // Initialize request. plasma_request_t request; retval = plasma_request_init(&request); // asynchronous block #pragma omp parallel #pragma omp master { // Translate to tile layout. plasma_omp_zge2desc(pA, lda, A, &sequence, &request); plasma_omp_zge2desc(pQ, ldq, Q, &sequence, &request); // Call the tile async function. plasma_omp_zungqr(A, T, Q, work, &sequence, &request); // Translate Q back to LAPACK layout. plasma_omp_zdesc2ge(Q, pQ, ldq, &sequence, &request); } // implicit synchronization plasma_workspace_destroy(&work); // Free matrices in tile layout. plasma_desc_destroy(&A); plasma_desc_destroy(&Q); // Return status. int status = sequence.status; return status; } /***************************************************************************//** * * @ingroup plasma_ungqr * * Non-blocking tile version of plasma_zungqr(). * May return before the computation is finished. * Allows for pipelining of operations at runtime. * ******************************************************************************* * * @param[in] A * Descriptor of matrix A. * A is stored in the tile layout. * * @param[in] T * Descriptor of matrix T. * Auxiliary factorization data, computed by plasma_zgeqrf. * * @param[out] Q * Descriptor of matrix Q. On exit, matrix Q stored in the tile layout. * * @param[in] work * Workspace for the auxiliary arrays needed by some coreblas kernels. * For multiplication by Q contains preallocated space for work * arrays. Allocated by the plasma_workspace_create function. * * @param[in] sequence * Identifies the sequence of function calls that this call belongs to * (for completion checks and exception handling purposes). * * @param[out] request * Identifies this function call (for exception handling purposes). * * @retval void * Errors are returned by setting sequence->status and * request->status to error values. The sequence->status and * request->status should never be set to PlasmaSuccess (the * initial values) since another async call may be setting a * failure value at the same time. * ******************************************************************************* * * @sa plasma_zungqr * @sa plasma_omp_cungqr * @sa plasma_omp_dorgqr * @sa plasma_omp_sorgqr * @sa plasma_omp_zgeqrf * ******************************************************************************/ void plasma_omp_zungqr(plasma_desc_t A, plasma_desc_t T, plasma_desc_t Q, plasma_workspace_t work, plasma_sequence_t *sequence, plasma_request_t *request) { // Get PLASMA context. plasma_context_t *plasma = plasma_context_self(); if (plasma == NULL) { plasma_error("PLASMA not initialized"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } // Check input arguments. if (plasma_desc_check(A) != PlasmaSuccess) { plasma_error("invalid A"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (plasma_desc_check(T) != PlasmaSuccess) { plasma_error("invalid T"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (plasma_desc_check(Q) != PlasmaSuccess) { plasma_error("invalid Q"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (sequence == NULL) { plasma_error("NULL sequence"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (request == NULL) { plasma_error("NULL request"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } // quick return if (Q.n <= 0) return; // Set Q to identity. plasma_pzlaset(PlasmaGeneral, 0.0, 1.0, Q, sequence, request); // Construct Q. if (plasma->householder_mode == PlasmaTreeHouseholder) { plasma_pzungqr_tree(A, T, Q, work, sequence, request); } else { plasma_pzungqr(A, T, Q, work, sequence, request); } }
mixed_tentusscher_myo_epi_2004_S1_8.c
// Scenario 1 - Mixed-Model TenTusscher 2004 (Myocardium + Epicardium) // (AP + max:dvdt) #include <stdio.h> #include "mixed_tentusscher_myo_epi_2004_S1_8.h" GET_CELL_MODEL_DATA(init_cell_model_data) { if(get_initial_v) cell_model->initial_v = INITIAL_V; if(get_neq) cell_model->number_of_ode_equations = NEQ; } SET_ODE_INITIAL_CONDITIONS_CPU(set_model_initial_conditions_cpu) { static bool first_call = true; if(first_call) { print_to_stdout_and_file("Using mixed version of TenTusscher 2004 myocardium + epicardium CPU model\n"); first_call = false; } // Get the mapping array uint32_t *mapping = NULL; if(extra_data) { mapping = (uint32_t*)extra_data; } else { print_to_stderr_and_file_and_exit("You need to specify a mask function when using a mixed model!\n"); } // Initial conditions for TenTusscher myocardium if (mapping[sv_id] == 0) { // Default initial conditions /* sv[0] = INITIAL_V; // V; millivolt sv[1] = 0.f; //M sv[2] = 0.75; //H sv[3] = 0.75f; //J sv[4] = 0.f; //Xr1 sv[5] = 1.f; //Xr2 sv[6] = 0.f; //Xs sv[7] = 1.f; //S sv[8] = 0.f; //R sv[9] = 0.f; //D sv[10] = 1.f; //F sv[11] = 1.f; //FCa sv[12] = 1.f; //G sv[13] = 0.0002; //Cai sv[14] = 0.2f; //CaSR sv[15] = 11.6f; //Nai sv[16] = 138.3f; //Ki */ // Elnaz's steady-state initial conditions real sv_sst[]={-86.3965119057144,0.00133824305081220,0.775463576993407,0.775278393595599,0.000179499343643571,0.483303039835057,0.00297647859235379,0.999998290403642,1.98961879737287e-08,1.93486789479597e-05,0.999599147019885,1.00646342475688,0.999975178010127,5.97703651642618e-05,0.418325344820368,10.7429775420171,138.918155900633}; for (uint32_t i = 0; i < NEQ; i++) sv[i] = sv_sst[i]; } // Initial conditions for TenTusscher epicardium else { // Default initial conditions /* sv[0] = INITIAL_V; // V; millivolt sv[1] = 0.f; //M sv[2] = 0.75; //H sv[3] = 0.75f; //J sv[4] = 0.f; //Xr1 sv[5] = 1.f; //Xr2 sv[6] = 0.f; //Xs sv[7] = 1.f; //S sv[8] = 0.f; //R sv[9] = 0.f; //D sv[10] = 1.f; //F sv[11] = 1.f; //FCa sv[12] = 1.f; //G sv[13] = 0.0002; //Cai sv[14] = 0.2f; //CaSR sv[15] = 11.6f; //Nai sv[16] = 138.3f; //Ki */ // Elnaz's steady-state initial conditions real sv_sst[]={-86.8515588450196,0.00121430213604940,0.786571707358243,0.786296716592743,0.000167844563456543,0.488038682227013,0.00288158860722668,0.999998439423763,1.84438183277425e-08,1.82097276686639e-05,0.999777090775545,1.00745417788989,0.999998681267142,3.66658927008727e-05,0.546505726022791,10.1869983095667,139.379179153826}; for (uint32_t i = 0; i < NEQ; i++) sv[i] = sv_sst[i]; } } SOLVE_MODEL_ODES_CPU(solve_model_odes_cpu) { // Get the mapping array uint32_t *mapping = NULL; if(extra_data) { mapping = (uint32_t*)extra_data; } else { print_to_stderr_and_file_and_exit("You need to specify a mask function when using a mixed model!\n"); } uint32_t sv_id; int i; #pragma omp parallel for private(sv_id) for (i = 0; i < num_cells_to_solve; i++) { if(cells_to_solve) sv_id = cells_to_solve[i]; else sv_id = (uint32_t )i; for (int j = 0; j < num_steps; ++j) { if (mapping[i] == 0) solve_model_ode_cpu_myo(dt, sv + (sv_id * NEQ), stim_currents[i]); else solve_model_ode_cpu_epi(dt, sv + (sv_id * NEQ), stim_currents[i]); } } } void solve_model_ode_cpu_myo (real dt, real *sv, real stim_current) { real rY[NEQ], rDY[NEQ]; for(int i = 0; i < NEQ; i++) rY[i] = sv[i]; RHS_cpu_myo(rY, rDY, stim_current, dt); for(int i = 0; i < NEQ; i++) sv[i] = rDY[i]; } void RHS_cpu_myo(const real *sv, real *rDY_, real stim_current, real dt) { // State variables real svolt = sv[0]; real sm = sv[1]; real sh = sv[2]; real sj = sv[3]; real sxr1 = sv[4]; real sxr2 = sv[5]; real sxs = sv[6]; real ss = sv[7]; real sr = sv[8]; real sd = sv[9]; real sf = sv[10]; real sfca = sv[11]; real sg = sv[12]; real Cai = sv[13]; real CaSR = sv[14]; real Nai = sv[15]; real Ki = sv[16]; //External concentrations real Ko=5.4; real Cao=2.0; real Nao=140.0; //Intracellular volumes real Vc=0.016404; real Vsr=0.001094; //Calcium dynamics real Bufc=0.15f; real Kbufc=0.001f; real Bufsr=10.f; real Kbufsr=0.3f; real taufca=2.f; real taug=2.f; real Vmaxup=0.000425f; real Kup=0.00025f; //Constants const real R = 8314.472f; const real F = 96485.3415f; const real T =310.0f; real RTONF =(R*T)/F; //Cellular capacitance real CAPACITANCE=0.185; //Parameters for currents //Parameters for IKr real Gkr=0.096; //Parameters for Iks real pKNa=0.03; // [!] Myocardium cell real Gks=0.062; //Parameters for Ik1 real GK1=5.405; //Parameters for Ito // [!] Myocardium cell real Gto=0.294; //Parameters for INa real GNa=14.838; //Parameters for IbNa real GbNa=0.00029; //Parameters for INaK real KmK=1.0; real KmNa=40.0; real knak=1.362; //Parameters for ICaL real GCaL=0.000175; //Parameters for IbCa real GbCa=0.000592; //Parameters for INaCa real knaca=1000; real KmNai=87.5; real KmCa=1.38; real ksat=0.1; real n=0.35; //Parameters for IpCa real GpCa=0.825; real KpCa=0.0005; //Parameters for IpK; real GpK=0.0146; real IKr; real IKs; real IK1; real Ito; real INa; real IbNa; real ICaL; real IbCa; real INaCa; real IpCa; real IpK; real INaK; real Irel; real Ileak; real dNai; real dKi; real dCai; real dCaSR; real A; // real BufferFactorc; // real BufferFactorsr; real SERCA; real Caisquare; real CaSRsquare; real CaCurrent; real CaSRCurrent; real fcaold; real gold; real Ek; real Ena; real Eks; real Eca; real CaCSQN; real bjsr; real cjsr; real CaBuf; real bc; real cc; real Ak1; real Bk1; real rec_iK1; real rec_ipK; real rec_iNaK; real AM; real BM; real AH_1; real BH_1; real AH_2; real BH_2; real AJ_1; real BJ_1; real AJ_2; real BJ_2; real M_INF; real H_INF; real J_INF; real TAU_M; real TAU_H; real TAU_J; real axr1; real bxr1; real axr2; real bxr2; real Xr1_INF; real Xr2_INF; real TAU_Xr1; real TAU_Xr2; real Axs; real Bxs; real Xs_INF; real TAU_Xs; real R_INF; real TAU_R; real S_INF; real TAU_S; real Ad; real Bd; real Cd; real TAU_D; real D_INF; real TAU_F; real F_INF; real FCa_INF; real G_INF; real inverseVcF2=1/(2*Vc*F); real inverseVcF=1./(Vc*F); real Kupsquare=Kup*Kup; // real BufcKbufc=Bufc*Kbufc; // real Kbufcsquare=Kbufc*Kbufc; // real Kbufc2=2*Kbufc; // real BufsrKbufsr=Bufsr*Kbufsr; // const real Kbufsrsquare=Kbufsr*Kbufsr; // const real Kbufsr2=2*Kbufsr; const real exptaufca=exp(-dt/taufca); const real exptaug=exp(-dt/taug); real sItot; //Needed to compute currents Ek=RTONF*(log((Ko/Ki))); Ena=RTONF*(log((Nao/Nai))); Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai))); Eca=0.5*RTONF*(log((Cao/Cai))); Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200))); Bk1=(3.*exp(0.0002*(svolt-Ek+100))+ exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek))); rec_iK1=Ak1/(Ak1+Bk1); rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T)))); rec_ipK=1./(1.+exp((25-svolt)/5.98)); //Compute currents INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena); ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))* (exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.); Ito=Gto*sr*ss*(svolt-Ek); IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek); IKs=Gks*sxs*sxs*(svolt-Eks); IK1=GK1*rec_iK1*(svolt-Ek); INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))* (1./(1+ksat*exp((n-1)*svolt*F/(R*T))))* (exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao- exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5); INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK; IpCa=GpCa*Cai/(KpCa+Cai); IpK=GpK*rec_ipK*(svolt-Ek); IbNa=GbNa*(svolt-Ena); IbCa=GbCa*(svolt-Eca); //Determine total current (sItot) = IKr + IKs + IK1 + Ito + INa + IbNa + ICaL + IbCa + INaK + INaCa + IpCa + IpK + stim_current; //update concentrations Caisquare=Cai*Cai; CaSRsquare=CaSR*CaSR; CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE; A=0.016464f*CaSRsquare/(0.0625f+CaSRsquare)+0.008232f; Irel=A*sd*sg; Ileak=0.00008f*(CaSR-Cai); SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare)); CaSRCurrent=SERCA-Irel-Ileak; CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr); dCaSR=dt*(Vc/Vsr)*CaSRCurrent; bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr; cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR); CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.; CaBuf=Bufc*Cai/(Cai+Kbufc); dCai=dt*(CaCurrent-CaSRCurrent); bc=Bufc-CaBuf-dCai-Cai+Kbufc; cc=Kbufc*(CaBuf+dCai+Cai); Cai=(sqrt(bc*bc+4*cc)-bc)/2; dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE; Nai+=dt*dNai; dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE; Ki+=dt*dKi; //compute steady state values and time constants AM=1./(1.+exp((-60.-svolt)/5.)); BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.)); TAU_M=AM*BM; M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03))); if (svolt>=-40.) { AH_1=0.; BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1)))); TAU_H= 1.0/(AH_1+BH_1); } else { AH_2=(0.057*exp(-(svolt+80.)/6.8)); BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt)); TAU_H=1.0/(AH_2+BH_2); } H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43))); if(svolt>=-40.) { AJ_1=0.; BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.)))); TAU_J= 1.0/(AJ_1+BJ_1); } else { AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)* exp(-0.04391*svolt))*(svolt+37.78)/ (1.+exp(0.311*(svolt+79.23)))); BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14)))); TAU_J= 1.0/(AJ_2+BJ_2); } J_INF=H_INF; Xr1_INF=1./(1.+exp((-26.-svolt)/7.)); axr1=450./(1.+exp((-45.-svolt)/10.)); bxr1=6./(1.+exp((svolt-(-30.))/11.5)); TAU_Xr1=axr1*bxr1; Xr2_INF=1./(1.+exp((svolt-(-88.))/24.)); axr2=3./(1.+exp((-60.-svolt)/20.)); bxr2=1.12/(1.+exp((svolt-60.)/20.)); TAU_Xr2=axr2*bxr2; Xs_INF=1./(1.+exp((-5.-svolt)/14.)); Axs=1100./(sqrt(1.+exp((-10.-svolt)/6))); Bxs=1./(1.+exp((svolt-60.)/20.)); TAU_Xs=Axs*Bxs; // [!] Myocardium cell R_INF=1./(1.+exp((20-svolt)/6.)); S_INF=1./(1.+exp((svolt+20)/5.)); TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8; TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.; D_INF=1./(1.+exp((-5-svolt)/7.5)); Ad=1.4/(1.+exp((-35-svolt)/13))+0.25; Bd=1.4/(1.+exp((svolt+5)/5)); Cd=1./(1.+exp((50-svolt)/20)); TAU_D=Ad*Bd+Cd; F_INF=1./(1.+exp((svolt+20)/7)); //TAU_F=1125*exp(-(svolt+27)*(svolt+27)/300)+80+165/(1.+exp((25-svolt)/10)); TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10)); // Updated from CellML FCa_INF=(1./(1.+pow((Cai/0.000325),8))+ 0.1/(1.+exp((Cai-0.0005)/0.0001))+ 0.20/(1.+exp((Cai-0.00075)/0.0008))+ 0.23 )/1.46; if(Cai<0.00035) G_INF=1./(1.+pow((Cai/0.00035),6)); else G_INF=1./(1.+pow((Cai/0.00035),16)); //Update gates rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M); rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H); rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J); rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1); rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2); rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs); rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S); rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R); rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D); rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F); fcaold= sfca; sfca = FCa_INF-(FCa_INF-sfca)*exptaufca; if(sfca>fcaold && (svolt)>-37.0) sfca = fcaold; gold = sg; sg = G_INF-(G_INF-sg)*exptaug; if(sg>gold && (svolt)>-37.0) sg=gold; //update voltage rDY_[0] = svolt + dt*(-sItot); rDY_[11] = sfca; rDY_[12] = sg; rDY_[13] = Cai; rDY_[14] = CaSR; rDY_[15] = Nai; rDY_[16] = Ki; } void solve_model_ode_cpu_epi (real dt, real *sv, real stim_current) { real rY[NEQ], rDY[NEQ]; for(int i = 0; i < NEQ; i++) rY[i] = sv[i]; RHS_cpu_epi(rY, rDY, stim_current, dt); for(int i = 0; i < NEQ; i++) sv[i] = rDY[i]; } void RHS_cpu_epi(const real *sv, real *rDY_, real stim_current, real dt) { // State variables real svolt = sv[0]; real sm = sv[1]; real sh = sv[2]; real sj = sv[3]; real sxr1 = sv[4]; real sxr2 = sv[5]; real sxs = sv[6]; real ss = sv[7]; real sr = sv[8]; real sd = sv[9]; real sf = sv[10]; real sfca = sv[11]; real sg = sv[12]; real Cai = sv[13]; real CaSR = sv[14]; real Nai = sv[15]; real Ki = sv[16]; //External concentrations real Ko=5.4; real Cao=2.0; real Nao=140.0; //Intracellular volumes real Vc=0.016404; real Vsr=0.001094; //Calcium dynamics real Bufc=0.15f; real Kbufc=0.001f; real Bufsr=10.f; real Kbufsr=0.3f; real taufca=2.f; real taug=2.f; real Vmaxup=0.000425f; real Kup=0.00025f; //Constants const real R = 8314.472f; const real F = 96485.3415f; const real T =310.0f; real RTONF =(R*T)/F; //Cellular capacitance real CAPACITANCE=0.185; //Parameters for currents //Parameters for IKr real Gkr=0.096; //Parameters for Iks real pKNa=0.03; // [!] Epicardium cell real Gks=0.245; //Parameters for Ik1 real GK1=5.405; //Parameters for Ito // [!] Epicardium cell real Gto=0.294; //Parameters for INa real GNa=14.838; //Parameters for IbNa real GbNa=0.00029; //Parameters for INaK real KmK=1.0; real KmNa=40.0; real knak=1.362; //Parameters for ICaL real GCaL=0.000175; //Parameters for IbCa real GbCa=0.000592; //Parameters for INaCa real knaca=1000; real KmNai=87.5; real KmCa=1.38; real ksat=0.1; real n=0.35; //Parameters for IpCa real GpCa=0.825; real KpCa=0.0005; //Parameters for IpK; real GpK=0.0146; real parameters []={14.0452238260814,0.000108706022167263,0.000159124775268073,0.000265119966583003,0.284358323096010,0.209481064695957,0.107047904137420,2.86430120215509,0.0202992477937955,1.50873877598206,1081.34868000984,0.000400030959633365,0.453106468021992,0.0170762047823025,0.00184218763805341,2.49511058194542e-05}; GNa=parameters[0]; GbNa=parameters[1]; GCaL=parameters[2]; GbCa=parameters[3]; Gto=parameters[4]; Gkr=parameters[5]; Gks=parameters[6]; GK1=parameters[7]; GpK=parameters[8]; knak=parameters[9]; knaca=parameters[10]; Vmaxup=parameters[11]; GpCa=parameters[12]; real arel=parameters[13]; real crel=parameters[14]; real Vleak=parameters[15]; real IKr; real IKs; real IK1; real Ito; real INa; real IbNa; real ICaL; real IbCa; real INaCa; real IpCa; real IpK; real INaK; real Irel; real Ileak; real dNai; real dKi; real dCai; real dCaSR; real A; // real BufferFactorc; // real BufferFactorsr; real SERCA; real Caisquare; real CaSRsquare; real CaCurrent; real CaSRCurrent; real fcaold; real gold; real Ek; real Ena; real Eks; real Eca; real CaCSQN; real bjsr; real cjsr; real CaBuf; real bc; real cc; real Ak1; real Bk1; real rec_iK1; real rec_ipK; real rec_iNaK; real AM; real BM; real AH_1; real BH_1; real AH_2; real BH_2; real AJ_1; real BJ_1; real AJ_2; real BJ_2; real M_INF; real H_INF; real J_INF; real TAU_M; real TAU_H; real TAU_J; real axr1; real bxr1; real axr2; real bxr2; real Xr1_INF; real Xr2_INF; real TAU_Xr1; real TAU_Xr2; real Axs; real Bxs; real Xs_INF; real TAU_Xs; real R_INF; real TAU_R; real S_INF; real TAU_S; real Ad; real Bd; real Cd; real TAU_D; real D_INF; real TAU_F; real F_INF; real FCa_INF; real G_INF; real inverseVcF2=1/(2*Vc*F); real inverseVcF=1./(Vc*F); real Kupsquare=Kup*Kup; // real BufcKbufc=Bufc*Kbufc; // real Kbufcsquare=Kbufc*Kbufc; // real Kbufc2=2*Kbufc; // real BufsrKbufsr=Bufsr*Kbufsr; // const real Kbufsrsquare=Kbufsr*Kbufsr; // const real Kbufsr2=2*Kbufsr; const real exptaufca=exp(-dt/taufca); const real exptaug=exp(-dt/taug); real sItot; //Needed to compute currents Ek=RTONF*(log((Ko/Ki))); Ena=RTONF*(log((Nao/Nai))); Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai))); Eca=0.5*RTONF*(log((Cao/Cai))); Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200))); Bk1=(3.*exp(0.0002*(svolt-Ek+100))+ exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek))); rec_iK1=Ak1/(Ak1+Bk1); rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T)))); rec_ipK=1./(1.+exp((25-svolt)/5.98)); //Compute currents INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena); ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))* (exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.); Ito=Gto*sr*ss*(svolt-Ek); IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek); IKs=Gks*sxs*sxs*(svolt-Eks); IK1=GK1*rec_iK1*(svolt-Ek); INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))* (1./(1+ksat*exp((n-1)*svolt*F/(R*T))))* (exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao- exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5); INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK; IpCa=GpCa*Cai/(KpCa+Cai); IpK=GpK*rec_ipK*(svolt-Ek); IbNa=GbNa*(svolt-Ena); IbCa=GbCa*(svolt-Eca); //Determine total current (sItot) = IKr + IKs + IK1 + Ito + INa + IbNa + ICaL + IbCa + INaK + INaCa + IpCa + IpK + stim_current; //update concentrations Caisquare=Cai*Cai; CaSRsquare=CaSR*CaSR; CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE; A=arel*CaSRsquare/(0.0625f+CaSRsquare)+crel; Irel=A*sd*sg; Ileak=Vleak*(CaSR-Cai); SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare)); CaSRCurrent=SERCA-Irel-Ileak; CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr); dCaSR=dt*(Vc/Vsr)*CaSRCurrent; bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr; cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR); CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.; CaBuf=Bufc*Cai/(Cai+Kbufc); dCai=dt*(CaCurrent-CaSRCurrent); bc=Bufc-CaBuf-dCai-Cai+Kbufc; cc=Kbufc*(CaBuf+dCai+Cai); Cai=(sqrt(bc*bc+4*cc)-bc)/2; dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE; Nai+=dt*dNai; dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE; Ki+=dt*dKi; //compute steady state values and time constants AM=1./(1.+exp((-60.-svolt)/5.)); BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.)); TAU_M=AM*BM; M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03))); if (svolt>=-40.) { AH_1=0.; BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1)))); TAU_H= 1.0/(AH_1+BH_1); } else { AH_2=(0.057*exp(-(svolt+80.)/6.8)); BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt)); TAU_H=1.0/(AH_2+BH_2); } H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43))); if(svolt>=-40.) { AJ_1=0.; BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.)))); TAU_J= 1.0/(AJ_1+BJ_1); } else { AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)* exp(-0.04391*svolt))*(svolt+37.78)/ (1.+exp(0.311*(svolt+79.23)))); BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14)))); TAU_J= 1.0/(AJ_2+BJ_2); } J_INF=H_INF; Xr1_INF=1./(1.+exp((-26.-svolt)/7.)); axr1=450./(1.+exp((-45.-svolt)/10.)); bxr1=6./(1.+exp((svolt-(-30.))/11.5)); TAU_Xr1=axr1*bxr1; Xr2_INF=1./(1.+exp((svolt-(-88.))/24.)); axr2=3./(1.+exp((-60.-svolt)/20.)); bxr2=1.12/(1.+exp((svolt-60.)/20.)); TAU_Xr2=axr2*bxr2; Xs_INF=1./(1.+exp((-5.-svolt)/14.)); Axs=1100./(sqrt(1.+exp((-10.-svolt)/6))); Bxs=1./(1.+exp((svolt-60.)/20.)); TAU_Xs=Axs*Bxs; R_INF=1./(1.+exp((20-svolt)/6.)); S_INF=1./(1.+exp((svolt+20)/5.)); TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8; TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.; D_INF=1./(1.+exp((-5-svolt)/7.5)); Ad=1.4/(1.+exp((-35-svolt)/13))+0.25; Bd=1.4/(1.+exp((svolt+5)/5)); Cd=1./(1.+exp((50-svolt)/20)); TAU_D=Ad*Bd+Cd; F_INF=1./(1.+exp((svolt+20)/7)); //TAU_F=1125*exp(-(svolt+27)*(svolt+27)/300)+80+165/(1.+exp((25-svolt)/10)); TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10)); // Updated from CellML FCa_INF=(1./(1.+pow((Cai/0.000325),8))+ 0.1/(1.+exp((Cai-0.0005)/0.0001))+ 0.20/(1.+exp((Cai-0.00075)/0.0008))+ 0.23 )/1.46; if(Cai<0.00035) G_INF=1./(1.+pow((Cai/0.00035),6)); else G_INF=1./(1.+pow((Cai/0.00035),16)); //Update gates rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M); rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H); rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J); rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1); rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2); rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs); rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S); rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R); rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D); rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F); fcaold= sfca; sfca = FCa_INF-(FCa_INF-sfca)*exptaufca; if(sfca>fcaold && (svolt)>-37.0) sfca = fcaold; gold = sg; sg = G_INF-(G_INF-sg)*exptaug; if(sg>gold && (svolt)>-37.0) sg=gold; //update voltage rDY_[0] = svolt + dt*(-sItot); rDY_[11] = sfca; rDY_[12] = sg; rDY_[13] = Cai; rDY_[14] = CaSR; rDY_[15] = Nai; rDY_[16] = Ki; }
nodes.c
#include <stddef.h> #ifdef __cplusplus extern "C" { #endif extern void CXX_Walk_Double(char *l, const char *h, const size_t sz, double *b); #ifdef __cplusplus } #endif #include <stdio.h> #include <stdint.h> #include <omp.h> #include "allocator.h" #include "geometry.h" //#include "fio.h" #include "mesh.h" size_t nmalloc(char *fbuf, struct ntbl *n) { size_t sz = n->sz * 4; double *buf = (double *) fun3d_malloc(sz, sizeof(double)); size_t bytes = sz * sizeof(double); //struct wtbl w; //{ // w.l = fbuf; // w.h = fbuf + bytes; // w.t = DOUBLE; // w.sz = sz; //} //walkfbuf(&w, buf); CXX_Walk_Double(fbuf, fbuf + bytes, sz, buf); // Partitioned the data and arrange them uint32_t i; #pragma omp parallel for for(i = 0; i < n->sz; i++) { n->xyz->x0[i] = buf[i]; n->xyz->x1[i] = buf[i + n->sz]; n->xyz->x2[i] = buf[i + n->sz + n->sz]; /* Ignore the area, deprecated in the newer version */ /* n->area[i] = buf[i + n->sz + n->sz + n->sz]; */ } fun3d_free(buf); n->cdt = (double *) fun3d_malloc(n->sz, sizeof(double)); return bytes; }
fm_loss.h
/** * Copyright (c) 2015 by Contributors */ #ifndef DIFACTO_LOSS_FM_LOSS_H_ #define DIFACTO_LOSS_FM_LOSS_H_ #include <vector> #include <cmath> #include "difacto/base.h" #include "dmlc/data.h" #include "dmlc/io.h" #include "difacto/loss.h" #include "common/spmv.h" #include "common/spmm.h" #include "./logit_loss.h" namespace difacto { /** * \brief parameters for FM loss */ struct FMLossParam : public dmlc::Parameter<FMLossParam> { /** * \brief the embedding dimension */ int V_dim; DMLC_DECLARE_PARAMETER(FMLossParam) { DMLC_DECLARE_FIELD(V_dim).set_range(0, 10000); } }; /** * \brief the factonization machine loss * :math:`f(x) = \langle w, x \rangle + \frac{1}{2} \|V x\|_2^2 - \sum_{i=1}^d x_i^2 \|V_i\|^2_2` */ class FMLoss : public Loss { public: FMLoss() {} virtual ~FMLoss() {} KWArgs Init(const KWArgs& kwargs) override { return param_.InitAllowUnknown(kwargs); } /** * \brief perform prediction * * pred = X * w + .5 * sum((X*V).^2 - (X.*X)*(V.*V), 2); * * where * - sum(A, 2) : sum the rows of A * - .* : elemenetal-wise times * * @param data the data * @param param input parameters * - param[0], real_t vector, the weights * - param[1], int vector, the w positions * - param[2], int vector, the V positions * @param pred predict output, should be pre-allocated */ void Predict(const dmlc::RowBlock<unsigned>& data, const std::vector<SArray<char>>& param, SArray<real_t>* pred) override { CHECK_EQ(param.size(), 3); Predict(data, SArray<real_t>(param[0]), SArray<int>(param[1]), SArray<int>(param[2]), pred); } void Predict(const dmlc::RowBlock<unsigned>& data, const SArray<real_t>& weights, const SArray<int>& w_pos, const SArray<int>& V_pos, SArray<real_t>* pred) { // pred = X * w SArray<real_t> w = weights; SpMV::Times(data, w, pred, nthreads_, w_pos, {}); int V_dim = param_.V_dim; if (V_dim == 0) return; SArray<real_t> V = weights; // XV_ = X*V XV_.clear(); XV_.resize(data.size * V_dim, 0); SpMM::Times(data, V, V_dim, &XV_, nthreads_, V_pos); // XX = X.*X auto XX = data; if (XX.value) { XX_.clear(); XX_.CopyFrom(XX.value+XX.offset[0], XX.offset[XX.size] - XX.offset[0]); for (auto& v : XX_) v *= v; XX.value = XX_.data(); } // VV = V*V SArray<real_t> VV(V.size()); #pragma omp parallel for num_threads(nthreads_) for (size_t i = 0; i < V_pos.size(); ++i) { int p = V_pos[i]; if (p < 0) continue; for (int j = 0; j < V_dim; ++j) VV[p+j] = V[p+j] * V[p+j]; } // XXVV = XX*VV SArray<real_t> XXVV(XV_.size()); SpMM::Times(XX, VV, V_dim, &XXVV, nthreads_, V_pos); // py += .5 * sum((V.XV).^2 - xxvv) #pragma omp parallel for num_threads(nthreads_) for (size_t i = 0; i < pred->size(); ++i) { real_t* t = XV_.data() + i * V_dim; real_t* tt = XXVV.data() + i * V_dim; real_t s = 0; for (int j = 0; j < V_dim; ++j) s += t[j] * t[j] - tt[j]; (*pred)[i] += .5 * s; } // projection for (auto& p : *pred) p = p > 20 ? 20 : (p < -20 ? -20 : p); } /*! * \brief compute the gradients * * p = - y ./ (1 + exp (y .* pred)); * grad_w = X' * p; * grad_u = X' * diag(p) * X * V - diag((X.*X)'*p) * V * * @param data the data * @param param input parameters * - param[0], real_t vector, the weights * - param[1], int vector, the w positions * - param[2], int vector, the V positions * - param[3], real_t vector, the predict output * @param grad the results */ void CalcGrad(const dmlc::RowBlock<unsigned>& data, const std::vector<SArray<char>>& param, SArray<real_t>* grad) override { CHECK_EQ(param.size(), 4); CalcGrad(data, SArray<real_t>(param[0]), SArray<int>(param[1]), SArray<int>(param[2]), SArray<real_t>(param[3]), grad); } void CalcGrad(const dmlc::RowBlock<unsigned>& data, const SArray<real_t>& weights, const SArray<int>& w_pos, const SArray<int>& V_pos, const SArray<real_t>& pred, SArray<real_t>* grad) { // p = ... SArray<real_t> p; p.CopyFrom(pred); CHECK_EQ(p.size(), data.size); #pragma omp parallel for num_threads(nthreads_) for (size_t i = 0; i < p.size(); ++i) { real_t y = data.label[i] > 0 ? 1 : -1; if (data.weight) { p[i] = - y / (1 + std::exp(y * p[i])) * data.weight[i]; } else { p[i] = - y / (1 + std::exp(y * p[i])); } } // grad_w = ... SpMV::TransTimes(data, p, grad, nthreads_, {}, w_pos); // grad_u = ... int V_dim = param_.V_dim; if (V_dim == 0) return; SArray<real_t> V = weights; // XXp = (X.*X)'*p auto XX = data; if (XX.value) { CHECK_EQ(XX_.size(), XX.offset[XX.size] - XX.offset[0]); XX.value = XX_.data(); } SArray<real_t> XXp(V_pos.size()); SpMV::TransTimes(XX, p, &XXp, nthreads_); // grad_u -= diag(XXp) * V, #pragma omp parallel for num_threads(nthreads_) for (size_t i = 0; i < V_pos.size(); ++i) { int p = V_pos[i]; if (p < 0) continue; for (int j = 0; j < V_dim; ++j) { (*grad)[p+j] -= V[p+j] * XXp[i]; } } // XV_ = diag(p) * X * V CHECK_EQ(XV_.size(), data.size * V_dim); #pragma omp parallel for num_threads(nthreads_) for (size_t i = 0; i < p.size(); ++i) { for (int j = 0; j < V_dim; ++j) XV_[i*V_dim+j] *= p[i]; } // grad_u += X' * diag(p) * X * V SpMM::TransTimes(data, XV_, V_dim, grad, nthreads_, {}, V_pos); } private: SArray<real_t> XV_; SArray<dmlc::real_t> XX_; FMLossParam param_; }; } // namespace difacto #endif // DIFACTO_LOSS_FM_LOSS_H_
cache.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % CCCC AAA CCCC H H EEEEE % % C A A C H H E % % C AAAAA C HHHHH EEE % % C A A C H H E % % CCCC A A CCCC H H EEEEE % % % % % % MagickCore Pixel Cache Methods % % % % Software Design % % Cristy % % July 1999 % % % % % % Copyright 1999-2020 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/blob.h" #include "MagickCore/blob-private.h" #include "MagickCore/cache.h" #include "MagickCore/cache-private.h" #include "MagickCore/color-private.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/composite-private.h" #include "MagickCore/distribute-cache-private.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/geometry.h" #include "MagickCore/list.h" #include "MagickCore/log.h" #include "MagickCore/magick.h" #include "MagickCore/memory_.h" #include "MagickCore/memory-private.h" #include "MagickCore/nt-base-private.h" #include "MagickCore/option.h" #include "MagickCore/pixel.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/policy.h" #include "MagickCore/quantum.h" #include "MagickCore/random_.h" #include "MagickCore/registry.h" #include "MagickCore/resource_.h" #include "MagickCore/semaphore.h" #include "MagickCore/splay-tree.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/timer-private.h" #include "MagickCore/thread-private.h" #include "MagickCore/utility.h" #include "MagickCore/utility-private.h" #if defined(MAGICKCORE_ZLIB_DELEGATE) #include "zlib.h" #endif /* Define declarations. */ #define CacheTick(offset,extent) QuantumTick((MagickOffsetType) offset,extent) #define IsFileDescriptorLimitExceeded() (GetMagickResource(FileResource) > \ GetMagickResourceLimit(FileResource) ? MagickTrue : MagickFalse) /* Typedef declarations. */ typedef struct _MagickModulo { ssize_t quotient, remainder; } MagickModulo; /* Forward declarations. */ #if defined(__cplusplus) || defined(c_plusplus) extern "C" { #endif static Cache GetImagePixelCache(Image *,const MagickBooleanType,ExceptionInfo *) magick_hot_spot; static const Quantum *GetVirtualPixelCache(const Image *,const VirtualPixelMethod,const ssize_t, const ssize_t,const size_t,const size_t,ExceptionInfo *), *GetVirtualPixelsCache(const Image *); static const void *GetVirtualMetacontentFromCache(const Image *); static MagickBooleanType GetOneAuthenticPixelFromCache(Image *,const ssize_t,const ssize_t,Quantum *, ExceptionInfo *), GetOneVirtualPixelFromCache(const Image *,const VirtualPixelMethod, const ssize_t,const ssize_t,Quantum *,ExceptionInfo *), OpenPixelCache(Image *,const MapMode,ExceptionInfo *), OpenPixelCacheOnDisk(CacheInfo *,const MapMode), ReadPixelCachePixels(CacheInfo *magick_restrict,NexusInfo *magick_restrict, ExceptionInfo *), ReadPixelCacheMetacontent(CacheInfo *magick_restrict, NexusInfo *magick_restrict,ExceptionInfo *), SyncAuthenticPixelsCache(Image *,ExceptionInfo *), WritePixelCachePixels(CacheInfo *magick_restrict,NexusInfo *magick_restrict, ExceptionInfo *), WritePixelCacheMetacontent(CacheInfo *,NexusInfo *magick_restrict, ExceptionInfo *); static Quantum *GetAuthenticPixelsCache(Image *,const ssize_t,const ssize_t,const size_t, const size_t,ExceptionInfo *), *QueueAuthenticPixelsCache(Image *,const ssize_t,const ssize_t,const size_t, const size_t,ExceptionInfo *), *SetPixelCacheNexusPixels(const CacheInfo *magick_restrict,const MapMode, const ssize_t,const ssize_t,const size_t,const size_t, const MagickBooleanType,NexusInfo *magick_restrict,ExceptionInfo *) magick_hot_spot; #if defined(MAGICKCORE_OPENCL_SUPPORT) static void CopyOpenCLBuffer(CacheInfo *magick_restrict); #endif #if defined(__cplusplus) || defined(c_plusplus) } #endif /* Global declarations. */ static SemaphoreInfo *cache_semaphore = (SemaphoreInfo *) NULL; static ssize_t cache_anonymous_memory = (-1); static time_t cache_epoch = 0; /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + A c q u i r e P i x e l C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquirePixelCache() acquires a pixel cache. % % The format of the AcquirePixelCache() method is: % % Cache AcquirePixelCache(const size_t number_threads) % % A description of each parameter follows: % % o number_threads: the number of nexus threads. % */ MagickPrivate Cache AcquirePixelCache(const size_t number_threads) { CacheInfo *magick_restrict cache_info; char *value; cache_info=(CacheInfo *) AcquireAlignedMemory(1,sizeof(*cache_info)); if (cache_info == (CacheInfo *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); (void) memset(cache_info,0,sizeof(*cache_info)); cache_info->type=UndefinedCache; cache_info->mode=IOMode; cache_info->disk_mode=IOMode; cache_info->colorspace=sRGBColorspace; cache_info->file=(-1); cache_info->id=GetMagickThreadId(); cache_info->number_threads=number_threads; if (GetOpenMPMaximumThreads() > cache_info->number_threads) cache_info->number_threads=GetOpenMPMaximumThreads(); if (GetMagickResourceLimit(ThreadResource) > cache_info->number_threads) cache_info->number_threads=(size_t) GetMagickResourceLimit(ThreadResource); if (cache_info->number_threads == 0) cache_info->number_threads=1; cache_info->nexus_info=AcquirePixelCacheNexus(cache_info->number_threads); if (cache_info->nexus_info == (NexusInfo **) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); value=GetEnvironmentValue("MAGICK_SYNCHRONIZE"); if (value != (const char *) NULL) { cache_info->synchronize=IsStringTrue(value); value=DestroyString(value); } value=GetPolicyValue("cache:synchronize"); if (value != (const char *) NULL) { cache_info->synchronize=IsStringTrue(value); value=DestroyString(value); } cache_info->width_limit=GetMagickResourceLimit(WidthResource); cache_info->height_limit=GetMagickResourceLimit(HeightResource); cache_info->semaphore=AcquireSemaphoreInfo(); cache_info->reference_count=1; cache_info->file_semaphore=AcquireSemaphoreInfo(); cache_info->debug=IsEventLogging(); cache_info->signature=MagickCoreSignature; return((Cache ) cache_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A c q u i r e P i x e l C a c h e N e x u s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquirePixelCacheNexus() allocates the NexusInfo structure. % % The format of the AcquirePixelCacheNexus method is: % % NexusInfo **AcquirePixelCacheNexus(const size_t number_threads) % % A description of each parameter follows: % % o number_threads: the number of nexus threads. % */ MagickPrivate NexusInfo **AcquirePixelCacheNexus(const size_t number_threads) { NexusInfo **magick_restrict nexus_info; register ssize_t i; nexus_info=(NexusInfo **) MagickAssumeAligned(AcquireAlignedMemory(2* number_threads,sizeof(*nexus_info))); if (nexus_info == (NexusInfo **) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); *nexus_info=(NexusInfo *) AcquireQuantumMemory(2*number_threads, sizeof(**nexus_info)); if (*nexus_info == (NexusInfo *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); (void) memset(*nexus_info,0,2*number_threads*sizeof(**nexus_info)); for (i=0; i < (ssize_t) (2*number_threads); i++) { nexus_info[i]=(*nexus_info+i); if (i < (ssize_t) number_threads) nexus_info[i]->virtual_nexus=(*nexus_info+number_threads+i); nexus_info[i]->signature=MagickCoreSignature; } return(nexus_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A c q u i r e P i x e l C a c h e P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquirePixelCachePixels() returns the pixels associated with the specified % image. % % The format of the AcquirePixelCachePixels() method is: % % void *AcquirePixelCachePixels(const Image *image,size_t *length, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o length: the pixel cache length. % % o exception: return any errors or warnings in this structure. % */ MagickExport void *AcquirePixelCachePixels(const Image *image,size_t *length, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); *length=0; if ((cache_info->type != MemoryCache) && (cache_info->type != MapCache)) return((void *) NULL); *length=(size_t) cache_info->length; return(cache_info->pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C a c h e C o m p o n e n t G e n e s i s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CacheComponentGenesis() instantiates the cache component. % % The format of the CacheComponentGenesis method is: % % MagickBooleanType CacheComponentGenesis(void) % */ MagickPrivate MagickBooleanType CacheComponentGenesis(void) { if (cache_semaphore == (SemaphoreInfo *) NULL) cache_semaphore=AcquireSemaphoreInfo(); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C a c h e C o m p o n e n t T e r m i n u s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CacheComponentTerminus() destroys the cache component. % % The format of the CacheComponentTerminus() method is: % % CacheComponentTerminus(void) % */ MagickPrivate void CacheComponentTerminus(void) { if (cache_semaphore == (SemaphoreInfo *) NULL) ActivateSemaphoreInfo(&cache_semaphore); /* no op-- nothing to destroy */ RelinquishSemaphoreInfo(&cache_semaphore); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C l i p P i x e l C a c h e N e x u s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ClipPixelCacheNexus() clips the cache nexus as defined by the image clip % mask. The method returns MagickTrue if the pixel region is clipped, % otherwise MagickFalse. % % The format of the ClipPixelCacheNexus() method is: % % MagickBooleanType ClipPixelCacheNexus(Image *image,NexusInfo *nexus_info, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o nexus_info: the cache nexus to clip. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType ClipPixelCacheNexus(Image *image, NexusInfo *nexus_info,ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; MagickSizeType number_pixels; register Quantum *magick_restrict p, *magick_restrict q; register ssize_t n; /* Apply clip mask. */ if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if ((image->channels & WriteMaskChannel) == 0) return(MagickTrue); if ((nexus_info->region.width == 0) || (nexus_info->region.height == 0)) return(MagickTrue); cache_info=(CacheInfo *) image->cache; if (cache_info == (Cache) NULL) return(MagickFalse); p=GetAuthenticPixelCacheNexus(image,nexus_info->region.x,nexus_info->region.y, nexus_info->region.width,nexus_info->region.height, nexus_info->virtual_nexus,exception); q=nexus_info->pixels; number_pixels=(MagickSizeType) nexus_info->region.width* nexus_info->region.height; for (n=0; n < (ssize_t) number_pixels; n++) { double mask_alpha; register ssize_t i; if (p == (Quantum *) NULL) break; mask_alpha=QuantumScale*GetPixelWriteMask(image,p); if (fabs(mask_alpha) >= MagickEpsilon) { for (i=0; i < (ssize_t) image->number_channels; i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; q[i]=ClampToQuantum(MagickOver_((double) p[i],mask_alpha* GetPixelAlpha(image,p),(double) q[i],(double) GetPixelAlpha(image,q))); } SetPixelAlpha(image,GetPixelAlpha(image,p),q); } p+=GetPixelChannels(image); q+=GetPixelChannels(image); } return(n < (ssize_t) number_pixels ? MagickFalse : MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C l o n e P i x e l C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ClonePixelCache() clones a pixel cache. % % The format of the ClonePixelCache() method is: % % Cache ClonePixelCache(const Cache cache) % % A description of each parameter follows: % % o cache: the pixel cache. % */ MagickPrivate Cache ClonePixelCache(const Cache cache) { CacheInfo *magick_restrict clone_info; const CacheInfo *magick_restrict cache_info; assert(cache != NULL); cache_info=(const CacheInfo *) cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", cache_info->filename); clone_info=(CacheInfo *) AcquirePixelCache(cache_info->number_threads); clone_info->virtual_pixel_method=cache_info->virtual_pixel_method; return((Cache ) clone_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C l o n e P i x e l C a c h e M e t h o d s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ClonePixelCacheMethods() clones the pixel cache methods from one cache to % another. % % The format of the ClonePixelCacheMethods() method is: % % void ClonePixelCacheMethods(Cache clone,const Cache cache) % % A description of each parameter follows: % % o clone: Specifies a pointer to a Cache structure. % % o cache: the pixel cache. % */ MagickPrivate void ClonePixelCacheMethods(Cache clone,const Cache cache) { CacheInfo *magick_restrict cache_info, *magick_restrict source_info; assert(clone != (Cache) NULL); source_info=(CacheInfo *) clone; assert(source_info->signature == MagickCoreSignature); if (source_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", source_info->filename); assert(cache != (Cache) NULL); cache_info=(CacheInfo *) cache; assert(cache_info->signature == MagickCoreSignature); source_info->methods=cache_info->methods; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C l o n e P i x e l C a c h e R e p o s i t o r y % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ClonePixelCacheRepository() clones the source pixel cache to the destination % cache. % % The format of the ClonePixelCacheRepository() method is: % % MagickBooleanType ClonePixelCacheRepository(CacheInfo *cache_info, % CacheInfo *source_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o cache_info: the pixel cache. % % o source_info: the source pixel cache. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType ClonePixelCacheOnDisk( CacheInfo *magick_restrict cache_info,CacheInfo *magick_restrict clone_info) { MagickSizeType extent; size_t quantum; ssize_t count; struct stat file_stats; unsigned char *buffer; /* Clone pixel cache on disk with identical morphology. */ if ((OpenPixelCacheOnDisk(cache_info,ReadMode) == MagickFalse) || (OpenPixelCacheOnDisk(clone_info,IOMode) == MagickFalse)) return(MagickFalse); if ((lseek(cache_info->file,0,SEEK_SET) < 0) || (lseek(clone_info->file,0,SEEK_SET) < 0)) return(MagickFalse); quantum=(size_t) MagickMaxBufferExtent; if ((fstat(cache_info->file,&file_stats) == 0) && (file_stats.st_size > 0)) quantum=(size_t) MagickMin(file_stats.st_size,MagickMaxBufferExtent); buffer=(unsigned char *) AcquireQuantumMemory(quantum,sizeof(*buffer)); if (buffer == (unsigned char *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); extent=0; while ((count=read(cache_info->file,buffer,quantum)) > 0) { ssize_t number_bytes; number_bytes=write(clone_info->file,buffer,(size_t) count); if (number_bytes != count) break; extent+=number_bytes; } buffer=(unsigned char *) RelinquishMagickMemory(buffer); if (extent != cache_info->length) return(MagickFalse); return(MagickTrue); } static MagickBooleanType ClonePixelCacheRepository( CacheInfo *magick_restrict clone_info,CacheInfo *magick_restrict cache_info, ExceptionInfo *exception) { #define MaxCacheThreads ((size_t) GetMagickResourceLimit(ThreadResource)) #define cache_number_threads(source,destination,chunk,multithreaded) \ num_threads((multithreaded) == 0 ? 1 : \ (((source)->type != MemoryCache) && ((source)->type != MapCache)) || \ (((destination)->type != MemoryCache) && ((destination)->type != MapCache)) ? \ MagickMax(MagickMin(GetMagickResourceLimit(ThreadResource),2),1) : \ MagickMax(MagickMin((ssize_t) GetMagickResourceLimit(ThreadResource),(ssize_t) (chunk)/256),1)) MagickBooleanType optimize, status; NexusInfo **magick_restrict cache_nexus, **magick_restrict clone_nexus; size_t length; ssize_t y; assert(cache_info != (CacheInfo *) NULL); assert(clone_info != (CacheInfo *) NULL); assert(exception != (ExceptionInfo *) NULL); if (cache_info->type == PingCache) return(MagickTrue); length=cache_info->number_channels*sizeof(*cache_info->channel_map); if ((cache_info->storage_class == clone_info->storage_class) && (cache_info->colorspace == clone_info->colorspace) && (cache_info->alpha_trait == clone_info->alpha_trait) && (cache_info->channels == clone_info->channels) && (cache_info->columns == clone_info->columns) && (cache_info->rows == clone_info->rows) && (cache_info->number_channels == clone_info->number_channels) && (memcmp(cache_info->channel_map,clone_info->channel_map,length) == 0) && (cache_info->metacontent_extent == clone_info->metacontent_extent)) { /* Identical pixel cache morphology. */ if (((cache_info->type == MemoryCache) || (cache_info->type == MapCache)) && ((clone_info->type == MemoryCache) || (clone_info->type == MapCache))) { (void) memcpy(clone_info->pixels,cache_info->pixels, cache_info->number_channels*cache_info->columns*cache_info->rows* sizeof(*cache_info->pixels)); if ((cache_info->metacontent_extent != 0) && (clone_info->metacontent_extent != 0)) (void) memcpy(clone_info->metacontent,cache_info->metacontent, cache_info->columns*cache_info->rows* clone_info->metacontent_extent*sizeof(unsigned char)); return(MagickTrue); } if ((cache_info->type == DiskCache) && (clone_info->type == DiskCache)) return(ClonePixelCacheOnDisk(cache_info,clone_info)); } /* Mismatched pixel cache morphology. */ cache_nexus=AcquirePixelCacheNexus(cache_info->number_threads); clone_nexus=AcquirePixelCacheNexus(clone_info->number_threads); length=cache_info->number_channels*sizeof(*cache_info->channel_map); optimize=(cache_info->number_channels == clone_info->number_channels) && (memcmp(cache_info->channel_map,clone_info->channel_map,length) == 0) ? MagickTrue : MagickFalse; length=(size_t) MagickMin(cache_info->number_channels*cache_info->columns, clone_info->number_channels*clone_info->columns); status=MagickTrue; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ cache_number_threads(cache_info,clone_info,cache_info->rows,1) #endif for (y=0; y < (ssize_t) cache_info->rows; y++) { const int id = GetOpenMPThreadId(); Quantum *pixels; register ssize_t x; if (status == MagickFalse) continue; if (y >= (ssize_t) clone_info->rows) continue; pixels=SetPixelCacheNexusPixels(cache_info,ReadMode,0,y, cache_info->columns,1,MagickFalse,cache_nexus[id],exception); if (pixels == (Quantum *) NULL) continue; status=ReadPixelCachePixels(cache_info,cache_nexus[id],exception); if (status == MagickFalse) continue; pixels=SetPixelCacheNexusPixels(clone_info,WriteMode,0,y, clone_info->columns,1,MagickFalse,clone_nexus[id],exception); if (pixels == (Quantum *) NULL) continue; (void) memset(clone_nexus[id]->pixels,0,(size_t) clone_nexus[id]->length); if (optimize != MagickFalse) (void) memcpy(clone_nexus[id]->pixels,cache_nexus[id]->pixels,length* sizeof(Quantum)); else { register const Quantum *magick_restrict p; register Quantum *magick_restrict q; /* Mismatched pixel channel map. */ p=cache_nexus[id]->pixels; q=clone_nexus[id]->pixels; for (x=0; x < (ssize_t) cache_info->columns; x++) { register ssize_t i; if (x == (ssize_t) clone_info->columns) break; for (i=0; i < (ssize_t) clone_info->number_channels; i++) { PixelChannel channel; PixelTrait traits; channel=clone_info->channel_map[i].channel; traits=cache_info->channel_map[channel].traits; if (traits != UndefinedPixelTrait) *q=*(p+cache_info->channel_map[channel].offset); q++; } p+=cache_info->number_channels; } } status=WritePixelCachePixels(clone_info,clone_nexus[id],exception); } if ((cache_info->metacontent_extent != 0) && (clone_info->metacontent_extent != 0)) { /* Clone metacontent. */ length=(size_t) MagickMin(cache_info->metacontent_extent, clone_info->metacontent_extent); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ cache_number_threads(cache_info,clone_info,cache_info->rows,1) #endif for (y=0; y < (ssize_t) cache_info->rows; y++) { const int id = GetOpenMPThreadId(); Quantum *pixels; if (status == MagickFalse) continue; if (y >= (ssize_t) clone_info->rows) continue; pixels=SetPixelCacheNexusPixels(cache_info,ReadMode,0,y, cache_info->columns,1,MagickFalse,cache_nexus[id],exception); if (pixels == (Quantum *) NULL) continue; status=ReadPixelCacheMetacontent(cache_info,cache_nexus[id],exception); if (status == MagickFalse) continue; pixels=SetPixelCacheNexusPixels(clone_info,WriteMode,0,y, clone_info->columns,1,MagickFalse,clone_nexus[id],exception); if (pixels == (Quantum *) NULL) continue; if ((clone_nexus[id]->metacontent != (void *) NULL) && (cache_nexus[id]->metacontent != (void *) NULL)) (void) memcpy(clone_nexus[id]->metacontent, cache_nexus[id]->metacontent,length*sizeof(unsigned char)); status=WritePixelCacheMetacontent(clone_info,clone_nexus[id],exception); } } clone_nexus=DestroyPixelCacheNexus(clone_nexus,clone_info->number_threads); cache_nexus=DestroyPixelCacheNexus(cache_nexus,cache_info->number_threads); if (cache_info->debug != MagickFalse) { char message[MagickPathExtent]; (void) FormatLocaleString(message,MagickPathExtent,"%s => %s", CommandOptionToMnemonic(MagickCacheOptions,(ssize_t) cache_info->type), CommandOptionToMnemonic(MagickCacheOptions,(ssize_t) clone_info->type)); (void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",message); } return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e s t r o y I m a g e P i x e l C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyImagePixelCache() deallocates memory associated with the pixel cache. % % The format of the DestroyImagePixelCache() method is: % % void DestroyImagePixelCache(Image *image) % % A description of each parameter follows: % % o image: the image. % */ static void DestroyImagePixelCache(Image *image) { assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->cache != (void *) NULL) image->cache=DestroyPixelCache(image->cache); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e s t r o y I m a g e P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyImagePixels() deallocates memory associated with the pixel cache. % % The format of the DestroyImagePixels() method is: % % void DestroyImagePixels(Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport void DestroyImagePixels(Image *image) { CacheInfo *magick_restrict cache_info; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->methods.destroy_pixel_handler != (DestroyPixelHandler) NULL) { cache_info->methods.destroy_pixel_handler(image); return; } image->cache=DestroyPixelCache(image->cache); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e s t r o y P i x e l C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyPixelCache() deallocates memory associated with the pixel cache. % % The format of the DestroyPixelCache() method is: % % Cache DestroyPixelCache(Cache cache) % % A description of each parameter follows: % % o cache: the pixel cache. % */ static MagickBooleanType ClosePixelCacheOnDisk(CacheInfo *cache_info) { int status; status=(-1); if (cache_info->file != -1) { status=close(cache_info->file); cache_info->file=(-1); RelinquishMagickResource(FileResource,1); } return(status == -1 ? MagickFalse : MagickTrue); } static inline void RelinquishPixelCachePixels(CacheInfo *cache_info) { switch (cache_info->type) { case MemoryCache: { #if defined(MAGICKCORE_OPENCL_SUPPORT) if (cache_info->opencl != (MagickCLCacheInfo) NULL) { cache_info->opencl=RelinquishMagickCLCacheInfo(cache_info->opencl, MagickTrue); cache_info->pixels=(Quantum *) NULL; break; } #endif if (cache_info->mapped == MagickFalse) cache_info->pixels=(Quantum *) RelinquishAlignedMemory( cache_info->pixels); else (void) UnmapBlob(cache_info->pixels,(size_t) cache_info->length); RelinquishMagickResource(MemoryResource,cache_info->length); break; } case MapCache: { (void) UnmapBlob(cache_info->pixels,(size_t) cache_info->length); cache_info->pixels=(Quantum *) NULL; if ((cache_info->mode != ReadMode) && (cache_info->mode != PersistMode)) (void) RelinquishUniqueFileResource(cache_info->cache_filename); *cache_info->cache_filename='\0'; RelinquishMagickResource(MapResource,cache_info->length); } case DiskCache: { if (cache_info->file != -1) (void) ClosePixelCacheOnDisk(cache_info); if ((cache_info->mode != ReadMode) && (cache_info->mode != PersistMode)) (void) RelinquishUniqueFileResource(cache_info->cache_filename); *cache_info->cache_filename='\0'; RelinquishMagickResource(DiskResource,cache_info->length); break; } case DistributedCache: { *cache_info->cache_filename='\0'; (void) RelinquishDistributePixelCache((DistributeCacheInfo *) cache_info->server_info); break; } default: break; } cache_info->type=UndefinedCache; cache_info->mapped=MagickFalse; cache_info->metacontent=(void *) NULL; } MagickPrivate Cache DestroyPixelCache(Cache cache) { CacheInfo *magick_restrict cache_info; assert(cache != (Cache) NULL); cache_info=(CacheInfo *) cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", cache_info->filename); LockSemaphoreInfo(cache_info->semaphore); cache_info->reference_count--; if (cache_info->reference_count != 0) { UnlockSemaphoreInfo(cache_info->semaphore); return((Cache) NULL); } UnlockSemaphoreInfo(cache_info->semaphore); if (cache_info->debug != MagickFalse) { char message[MagickPathExtent]; (void) FormatLocaleString(message,MagickPathExtent,"destroy %s", cache_info->filename); (void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",message); } RelinquishPixelCachePixels(cache_info); if (cache_info->server_info != (DistributeCacheInfo *) NULL) cache_info->server_info=DestroyDistributeCacheInfo((DistributeCacheInfo *) cache_info->server_info); if (cache_info->nexus_info != (NexusInfo **) NULL) cache_info->nexus_info=DestroyPixelCacheNexus(cache_info->nexus_info, cache_info->number_threads); if (cache_info->random_info != (RandomInfo *) NULL) cache_info->random_info=DestroyRandomInfo(cache_info->random_info); if (cache_info->file_semaphore != (SemaphoreInfo *) NULL) RelinquishSemaphoreInfo(&cache_info->file_semaphore); if (cache_info->semaphore != (SemaphoreInfo *) NULL) RelinquishSemaphoreInfo(&cache_info->semaphore); cache_info->signature=(~MagickCoreSignature); cache_info=(CacheInfo *) RelinquishAlignedMemory(cache_info); cache=(Cache) NULL; return(cache); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e s t r o y P i x e l C a c h e N e x u s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyPixelCacheNexus() destroys a pixel cache nexus. % % The format of the DestroyPixelCacheNexus() method is: % % NexusInfo **DestroyPixelCacheNexus(NexusInfo *nexus_info, % const size_t number_threads) % % A description of each parameter follows: % % o nexus_info: the nexus to destroy. % % o number_threads: the number of nexus threads. % */ static inline void RelinquishCacheNexusPixels(NexusInfo *nexus_info) { if (nexus_info->mapped == MagickFalse) (void) RelinquishAlignedMemory(nexus_info->cache); else (void) UnmapBlob(nexus_info->cache,(size_t) nexus_info->length); nexus_info->cache=(Quantum *) NULL; nexus_info->pixels=(Quantum *) NULL; nexus_info->metacontent=(void *) NULL; nexus_info->length=0; nexus_info->mapped=MagickFalse; } MagickPrivate NexusInfo **DestroyPixelCacheNexus(NexusInfo **nexus_info, const size_t number_threads) { register ssize_t i; assert(nexus_info != (NexusInfo **) NULL); for (i=0; i < (ssize_t) (2*number_threads); i++) { if (nexus_info[i]->cache != (Quantum *) NULL) RelinquishCacheNexusPixels(nexus_info[i]); nexus_info[i]->signature=(~MagickCoreSignature); } *nexus_info=(NexusInfo *) RelinquishMagickMemory(*nexus_info); nexus_info=(NexusInfo **) RelinquishAlignedMemory(nexus_info); return(nexus_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t A u t h e n t i c M e t a c o n t e n t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetAuthenticMetacontent() returns the authentic metacontent corresponding % with the last call to QueueAuthenticPixels() or GetVirtualPixels(). NULL is % returned if the associated pixels are not available. % % The format of the GetAuthenticMetacontent() method is: % % void *GetAuthenticMetacontent(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport void *GetAuthenticMetacontent(const Image *image) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->methods.get_authentic_metacontent_from_handler != (GetAuthenticMetacontentFromHandler) NULL) { void *metacontent; metacontent=cache_info->methods. get_authentic_metacontent_from_handler(image); return(metacontent); } assert(id < (int) cache_info->number_threads); return(cache_info->nexus_info[id]->metacontent); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t A u t h e n t i c M e t a c o n t e n t F r o m C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetAuthenticMetacontentFromCache() returns the meta-content corresponding % with the last call to QueueAuthenticPixelsCache() or % GetAuthenticPixelsCache(). % % The format of the GetAuthenticMetacontentFromCache() method is: % % void *GetAuthenticMetacontentFromCache(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ static void *GetAuthenticMetacontentFromCache(const Image *image) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); assert(id < (int) cache_info->number_threads); return(cache_info->nexus_info[id]->metacontent); } #if defined(MAGICKCORE_OPENCL_SUPPORT) /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t A u t h e n t i c O p e n C L B u f f e r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetAuthenticOpenCLBuffer() returns an OpenCL buffer used to execute OpenCL % operations. % % The format of the GetAuthenticOpenCLBuffer() method is: % % cl_mem GetAuthenticOpenCLBuffer(const Image *image, % MagickCLDevice device,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o device: the device to use. % % o exception: return any errors or warnings in this structure. % */ MagickPrivate cl_mem GetAuthenticOpenCLBuffer(const Image *image, MagickCLDevice device,ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; assert(image != (const Image *) NULL); assert(device != (const MagickCLDevice) NULL); cache_info=(CacheInfo *) image->cache; if ((cache_info->type == UndefinedCache) || (cache_info->reference_count > 1)) { SyncImagePixelCache((Image *) image,exception); cache_info=(CacheInfo *) image->cache; } if ((cache_info->type != MemoryCache) || (cache_info->mapped != MagickFalse)) return((cl_mem) NULL); LockSemaphoreInfo(cache_info->semaphore); if ((cache_info->opencl != (MagickCLCacheInfo) NULL) && (cache_info->opencl->device->context != device->context)) cache_info->opencl=CopyMagickCLCacheInfo(cache_info->opencl); if (cache_info->opencl == (MagickCLCacheInfo) NULL) { assert(cache_info->pixels != (Quantum *) NULL); cache_info->opencl=AcquireMagickCLCacheInfo(device,cache_info->pixels, cache_info->length); } if (cache_info->opencl != (MagickCLCacheInfo) NULL) RetainOpenCLMemObject(cache_info->opencl->buffer); UnlockSemaphoreInfo(cache_info->semaphore); if (cache_info->opencl == (MagickCLCacheInfo) NULL) return((cl_mem) NULL); assert(cache_info->opencl->pixels == cache_info->pixels); return(cache_info->opencl->buffer); } #endif /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t A u t h e n t i c P i x e l C a c h e N e x u s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetAuthenticPixelCacheNexus() gets authentic pixels from the in-memory or % disk pixel cache as defined by the geometry parameters. A pointer to the % pixels is returned if the pixels are transferred, otherwise a NULL is % returned. % % The format of the GetAuthenticPixelCacheNexus() method is: % % Quantum *GetAuthenticPixelCacheNexus(Image *image,const ssize_t x, % const ssize_t y,const size_t columns,const size_t rows, % NexusInfo *nexus_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o x,y,columns,rows: These values define the perimeter of a region of % pixels. % % o nexus_info: the cache nexus to return. % % o exception: return any errors or warnings in this structure. % */ MagickPrivate Quantum *GetAuthenticPixelCacheNexus(Image *image,const ssize_t x, const ssize_t y,const size_t columns,const size_t rows,NexusInfo *nexus_info, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; Quantum *magick_restrict pixels; /* Transfer pixels from the cache. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); pixels=QueueAuthenticPixelCacheNexus(image,x,y,columns,rows,MagickTrue, nexus_info,exception); if (pixels == (Quantum *) NULL) return((Quantum *) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); if (nexus_info->authentic_pixel_cache != MagickFalse) return(pixels); if (ReadPixelCachePixels(cache_info,nexus_info,exception) == MagickFalse) return((Quantum *) NULL); if (cache_info->metacontent_extent != 0) if (ReadPixelCacheMetacontent(cache_info,nexus_info,exception) == MagickFalse) return((Quantum *) NULL); return(pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t A u t h e n t i c P i x e l s F r o m C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetAuthenticPixelsFromCache() returns the pixels associated with the last % call to the QueueAuthenticPixelsCache() or GetAuthenticPixelsCache() methods. % % The format of the GetAuthenticPixelsFromCache() method is: % % Quantum *GetAuthenticPixelsFromCache(const Image image) % % A description of each parameter follows: % % o image: the image. % */ static Quantum *GetAuthenticPixelsFromCache(const Image *image) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); assert(id < (int) cache_info->number_threads); return(cache_info->nexus_info[id]->pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t A u t h e n t i c P i x e l Q u e u e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetAuthenticPixelQueue() returns the authentic pixels associated % corresponding with the last call to QueueAuthenticPixels() or % GetAuthenticPixels(). % % The format of the GetAuthenticPixelQueue() method is: % % Quantum *GetAuthenticPixelQueue(const Image image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport Quantum *GetAuthenticPixelQueue(const Image *image) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->methods.get_authentic_pixels_from_handler != (GetAuthenticPixelsFromHandler) NULL) return(cache_info->methods.get_authentic_pixels_from_handler(image)); assert(id < (int) cache_info->number_threads); return(cache_info->nexus_info[id]->pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t A u t h e n t i c P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetAuthenticPixels() obtains a pixel region for read/write access. If the % region is successfully accessed, a pointer to a Quantum array % representing the region is returned, otherwise NULL is returned. % % The returned pointer may point to a temporary working copy of the pixels % or it may point to the original pixels in memory. Performance is maximized % if the selected region is part of one row, or one or more full rows, since % then there is opportunity to access the pixels in-place (without a copy) % if the image is in memory, or in a memory-mapped file. The returned pointer % must *never* be deallocated by the user. % % Pixels accessed via the returned pointer represent a simple array of type % Quantum. If the image has corresponding metacontent,call % GetAuthenticMetacontent() after invoking GetAuthenticPixels() to obtain the % meta-content corresponding to the region. Once the Quantum array has % been updated, the changes must be saved back to the underlying image using % SyncAuthenticPixels() or they may be lost. % % The format of the GetAuthenticPixels() method is: % % Quantum *GetAuthenticPixels(Image *image,const ssize_t x, % const ssize_t y,const size_t columns,const size_t rows, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o x,y,columns,rows: These values define the perimeter of a region of % pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport Quantum *GetAuthenticPixels(Image *image,const ssize_t x, const ssize_t y,const size_t columns,const size_t rows, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); Quantum *pixels; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->methods.get_authentic_pixels_handler != (GetAuthenticPixelsHandler) NULL) { pixels=cache_info->methods.get_authentic_pixels_handler(image,x,y,columns, rows,exception); return(pixels); } assert(id < (int) cache_info->number_threads); pixels=GetAuthenticPixelCacheNexus(image,x,y,columns,rows, cache_info->nexus_info[id],exception); return(pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t A u t h e n t i c P i x e l s C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetAuthenticPixelsCache() gets pixels from the in-memory or disk pixel cache % as defined by the geometry parameters. A pointer to the pixels is returned % if the pixels are transferred, otherwise a NULL is returned. % % The format of the GetAuthenticPixelsCache() method is: % % Quantum *GetAuthenticPixelsCache(Image *image,const ssize_t x, % const ssize_t y,const size_t columns,const size_t rows, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o x,y,columns,rows: These values define the perimeter of a region of % pixels. % % o exception: return any errors or warnings in this structure. % */ static Quantum *GetAuthenticPixelsCache(Image *image,const ssize_t x, const ssize_t y,const size_t columns,const size_t rows, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); Quantum *magick_restrict pixels; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; if (cache_info == (Cache) NULL) return((Quantum *) NULL); assert(cache_info->signature == MagickCoreSignature); assert(id < (int) cache_info->number_threads); pixels=GetAuthenticPixelCacheNexus(image,x,y,columns,rows, cache_info->nexus_info[id],exception); return(pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t I m a g e E x t e n t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageExtent() returns the extent of the pixels associated corresponding % with the last call to QueueAuthenticPixels() or GetAuthenticPixels(). % % The format of the GetImageExtent() method is: % % MagickSizeType GetImageExtent(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport MagickSizeType GetImageExtent(const Image *image) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); assert(id < (int) cache_info->number_threads); return(GetPixelCacheNexusExtent(cache_info,cache_info->nexus_info[id])); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t I m a g e P i x e l C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImagePixelCache() ensures that there is only a single reference to the % pixel cache to be modified, updating the provided cache pointer to point to % a clone of the original pixel cache if necessary. % % The format of the GetImagePixelCache method is: % % Cache GetImagePixelCache(Image *image,const MagickBooleanType clone, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o clone: any value other than MagickFalse clones the cache pixels. % % o exception: return any errors or warnings in this structure. % */ static inline MagickBooleanType ValidatePixelCacheMorphology( const Image *magick_restrict image) { const CacheInfo *magick_restrict cache_info; const PixelChannelMap *magick_restrict p, *magick_restrict q; /* Does the image match the pixel cache morphology? */ cache_info=(CacheInfo *) image->cache; p=image->channel_map; q=cache_info->channel_map; if ((image->storage_class != cache_info->storage_class) || (image->colorspace != cache_info->colorspace) || (image->alpha_trait != cache_info->alpha_trait) || (image->channels != cache_info->channels) || (image->columns != cache_info->columns) || (image->rows != cache_info->rows) || (image->number_channels != cache_info->number_channels) || (memcmp(p,q,image->number_channels*sizeof(*p)) != 0) || (image->metacontent_extent != cache_info->metacontent_extent) || (cache_info->nexus_info == (NexusInfo **) NULL)) return(MagickFalse); return(MagickTrue); } static Cache GetImagePixelCache(Image *image,const MagickBooleanType clone, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; MagickBooleanType destroy, status; static MagickSizeType cache_timelimit = MagickResourceInfinity, cpu_throttle = MagickResourceInfinity, cycles = 0; status=MagickTrue; if (cpu_throttle == MagickResourceInfinity) cpu_throttle=GetMagickResourceLimit(ThrottleResource); if ((cpu_throttle != 0) && ((cycles++ % 32) == 0)) MagickDelay(cpu_throttle); if (cache_epoch == 0) { /* Set the expire time in seconds. */ cache_timelimit=GetMagickResourceLimit(TimeResource); cache_epoch=GetMagickTime(); } if ((cache_timelimit != MagickResourceInfinity) && ((MagickSizeType) (GetMagickTime()-cache_epoch) >= cache_timelimit)) { #if defined(ECANCELED) errno=ECANCELED; #endif cache_info=(CacheInfo *) image->cache; if (cache_info->file != -1) (void) ClosePixelCacheOnDisk(cache_info); ThrowFatalException(ResourceLimitFatalError,"TimeLimitExceeded"); } LockSemaphoreInfo(image->semaphore); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; #if defined(MAGICKCORE_OPENCL_SUPPORT) CopyOpenCLBuffer(cache_info); #endif destroy=MagickFalse; if ((cache_info->reference_count > 1) || (cache_info->mode == ReadMode)) { LockSemaphoreInfo(cache_info->semaphore); if ((cache_info->reference_count > 1) || (cache_info->mode == ReadMode)) { CacheInfo *clone_info; Image clone_image; /* Clone pixel cache. */ clone_image=(*image); clone_image.semaphore=AcquireSemaphoreInfo(); clone_image.reference_count=1; clone_image.cache=ClonePixelCache(cache_info); clone_info=(CacheInfo *) clone_image.cache; status=OpenPixelCache(&clone_image,IOMode,exception); if (status == MagickFalse) clone_info=(CacheInfo *) DestroyPixelCache(clone_info); else { if (clone != MagickFalse) status=ClonePixelCacheRepository(clone_info,cache_info, exception); if (status == MagickFalse) clone_info=(CacheInfo *) DestroyPixelCache(clone_info); else { destroy=MagickTrue; image->cache=clone_info; } } RelinquishSemaphoreInfo(&clone_image.semaphore); } UnlockSemaphoreInfo(cache_info->semaphore); } if (destroy != MagickFalse) cache_info=(CacheInfo *) DestroyPixelCache(cache_info); if (status != MagickFalse) { /* Ensure the image matches the pixel cache morphology. */ if (image->type != UndefinedType) image->type=UndefinedType; if (ValidatePixelCacheMorphology(image) == MagickFalse) { status=OpenPixelCache(image,IOMode,exception); cache_info=(CacheInfo *) image->cache; if (cache_info->file != -1) (void) ClosePixelCacheOnDisk(cache_info); } } UnlockSemaphoreInfo(image->semaphore); if (status == MagickFalse) return((Cache) NULL); return(image->cache); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t I m a g e P i x e l C a c h e T y p e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImagePixelCacheType() returns the pixel cache type: UndefinedCache, % DiskCache, MemoryCache, MapCache, or PingCache. % % The format of the GetImagePixelCacheType() method is: % % CacheType GetImagePixelCacheType(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport CacheType GetImagePixelCacheType(const Image *image) { CacheInfo *magick_restrict cache_info; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); return(cache_info->type); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t O n e A u t h e n t i c P i x e l % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetOneAuthenticPixel() returns a single pixel at the specified (x,y) % location. The image background color is returned if an error occurs. % % The format of the GetOneAuthenticPixel() method is: % % MagickBooleanType GetOneAuthenticPixel(const Image image,const ssize_t x, % const ssize_t y,Quantum *pixel,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o x,y: These values define the location of the pixel to return. % % o pixel: return a pixel at the specified (x,y) location. % % o exception: return any errors or warnings in this structure. % */ static inline MagickBooleanType CopyPixel(const Image *image, const Quantum *source,Quantum *destination) { register ssize_t i; if (source == (const Quantum *) NULL) { destination[RedPixelChannel]=ClampToQuantum(image->background_color.red); destination[GreenPixelChannel]=ClampToQuantum( image->background_color.green); destination[BluePixelChannel]=ClampToQuantum( image->background_color.blue); destination[BlackPixelChannel]=ClampToQuantum( image->background_color.black); destination[AlphaPixelChannel]=ClampToQuantum( image->background_color.alpha); return(MagickFalse); } for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); destination[channel]=source[i]; } return(MagickTrue); } MagickExport MagickBooleanType GetOneAuthenticPixel(Image *image, const ssize_t x,const ssize_t y,Quantum *pixel,ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; register Quantum *magick_restrict q; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); (void) memset(pixel,0,MaxPixelChannels*sizeof(*pixel)); if (cache_info->methods.get_one_authentic_pixel_from_handler != (GetOneAuthenticPixelFromHandler) NULL) return(cache_info->methods.get_one_authentic_pixel_from_handler(image,x,y,pixel,exception)); q=GetAuthenticPixelsCache(image,x,y,1UL,1UL,exception); return(CopyPixel(image,q,pixel)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t O n e A u t h e n t i c P i x e l F r o m C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetOneAuthenticPixelFromCache() returns a single pixel at the specified (x,y) % location. The image background color is returned if an error occurs. % % The format of the GetOneAuthenticPixelFromCache() method is: % % MagickBooleanType GetOneAuthenticPixelFromCache(const Image image, % const ssize_t x,const ssize_t y,Quantum *pixel, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o x,y: These values define the location of the pixel to return. % % o pixel: return a pixel at the specified (x,y) location. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType GetOneAuthenticPixelFromCache(Image *image, const ssize_t x,const ssize_t y,Quantum *pixel,ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); register Quantum *magick_restrict q; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); assert(id < (int) cache_info->number_threads); (void) memset(pixel,0,MaxPixelChannels*sizeof(*pixel)); q=GetAuthenticPixelCacheNexus(image,x,y,1UL,1UL,cache_info->nexus_info[id], exception); return(CopyPixel(image,q,pixel)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t O n e V i r t u a l P i x e l % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetOneVirtualPixel() returns a single virtual pixel at the specified % (x,y) location. The image background color is returned if an error occurs. % If you plan to modify the pixel, use GetOneAuthenticPixel() instead. % % The format of the GetOneVirtualPixel() method is: % % MagickBooleanType GetOneVirtualPixel(const Image image,const ssize_t x, % const ssize_t y,Quantum *pixel,ExceptionInfo exception) % % A description of each parameter follows: % % o image: the image. % % o x,y: These values define the location of the pixel to return. % % o pixel: return a pixel at the specified (x,y) location. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType GetOneVirtualPixel(const Image *image, const ssize_t x,const ssize_t y,Quantum *pixel,ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); const Quantum *p; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); (void) memset(pixel,0,MaxPixelChannels*sizeof(*pixel)); if (cache_info->methods.get_one_virtual_pixel_from_handler != (GetOneVirtualPixelFromHandler) NULL) return(cache_info->methods.get_one_virtual_pixel_from_handler(image, GetPixelCacheVirtualMethod(image),x,y,pixel,exception)); assert(id < (int) cache_info->number_threads); p=GetVirtualPixelCacheNexus(image,GetPixelCacheVirtualMethod(image),x,y, 1UL,1UL,cache_info->nexus_info[id],exception); return(CopyPixel(image,p,pixel)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t O n e V i r t u a l P i x e l F r o m C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetOneVirtualPixelFromCache() returns a single virtual pixel at the % specified (x,y) location. The image background color is returned if an % error occurs. % % The format of the GetOneVirtualPixelFromCache() method is: % % MagickBooleanType GetOneVirtualPixelFromCache(const Image image, % const VirtualPixelMethod method,const ssize_t x,const ssize_t y, % Quantum *pixel,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o virtual_pixel_method: the virtual pixel method. % % o x,y: These values define the location of the pixel to return. % % o pixel: return a pixel at the specified (x,y) location. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType GetOneVirtualPixelFromCache(const Image *image, const VirtualPixelMethod virtual_pixel_method,const ssize_t x,const ssize_t y, Quantum *pixel,ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); const Quantum *p; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); assert(id < (int) cache_info->number_threads); (void) memset(pixel,0,MaxPixelChannels*sizeof(*pixel)); p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,x,y,1UL,1UL, cache_info->nexus_info[id],exception); return(CopyPixel(image,p,pixel)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t O n e V i r t u a l P i x e l I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetOneVirtualPixelInfo() returns a single pixel at the specified (x,y) % location. The image background color is returned if an error occurs. If % you plan to modify the pixel, use GetOneAuthenticPixel() instead. % % The format of the GetOneVirtualPixelInfo() method is: % % MagickBooleanType GetOneVirtualPixelInfo(const Image image, % const VirtualPixelMethod virtual_pixel_method,const ssize_t x, % const ssize_t y,PixelInfo *pixel,ExceptionInfo exception) % % A description of each parameter follows: % % o image: the image. % % o virtual_pixel_method: the virtual pixel method. % % o x,y: these values define the location of the pixel to return. % % o pixel: return a pixel at the specified (x,y) location. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType GetOneVirtualPixelInfo(const Image *image, const VirtualPixelMethod virtual_pixel_method,const ssize_t x,const ssize_t y, PixelInfo *pixel,ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); register const Quantum *magick_restrict p; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); assert(id < (int) cache_info->number_threads); GetPixelInfo(image,pixel); p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,x,y,1UL,1UL, cache_info->nexus_info[id],exception); if (p == (const Quantum *) NULL) return(MagickFalse); GetPixelInfoPixel(image,p,pixel); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t P i x e l C a c h e C o l o r s p a c e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelCacheColorspace() returns the colorspace of the pixel cache. % % The format of the GetPixelCacheColorspace() method is: % % Colorspace GetPixelCacheColorspace(const Cache cache) % % A description of each parameter follows: % % o cache: the pixel cache. % */ MagickPrivate ColorspaceType GetPixelCacheColorspace(const Cache cache) { CacheInfo *magick_restrict cache_info; assert(cache != (Cache) NULL); cache_info=(CacheInfo *) cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", cache_info->filename); return(cache_info->colorspace); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t P i x e l C a c h e F i l e n a m e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelCacheFilename() returns the filename associated with the pixel % cache. % % The format of the GetPixelCacheFilename() method is: % % const char *GetPixelCacheFilename(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport const char *GetPixelCacheFilename(const Image *image) { CacheInfo *magick_restrict cache_info; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); return(cache_info->cache_filename); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t P i x e l C a c h e M e t h o d s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelCacheMethods() initializes the CacheMethods structure. % % The format of the GetPixelCacheMethods() method is: % % void GetPixelCacheMethods(CacheMethods *cache_methods) % % A description of each parameter follows: % % o cache_methods: Specifies a pointer to a CacheMethods structure. % */ MagickPrivate void GetPixelCacheMethods(CacheMethods *cache_methods) { assert(cache_methods != (CacheMethods *) NULL); (void) memset(cache_methods,0,sizeof(*cache_methods)); cache_methods->get_virtual_pixel_handler=GetVirtualPixelCache; cache_methods->get_virtual_pixels_handler=GetVirtualPixelsCache; cache_methods->get_virtual_metacontent_from_handler= GetVirtualMetacontentFromCache; cache_methods->get_one_virtual_pixel_from_handler=GetOneVirtualPixelFromCache; cache_methods->get_authentic_pixels_handler=GetAuthenticPixelsCache; cache_methods->get_authentic_metacontent_from_handler= GetAuthenticMetacontentFromCache; cache_methods->get_authentic_pixels_from_handler=GetAuthenticPixelsFromCache; cache_methods->get_one_authentic_pixel_from_handler= GetOneAuthenticPixelFromCache; cache_methods->queue_authentic_pixels_handler=QueueAuthenticPixelsCache; cache_methods->sync_authentic_pixels_handler=SyncAuthenticPixelsCache; cache_methods->destroy_pixel_handler=DestroyImagePixelCache; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t P i x e l C a c h e N e x u s E x t e n t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelCacheNexusExtent() returns the extent of the pixels associated % corresponding with the last call to SetPixelCacheNexusPixels() or % GetPixelCacheNexusPixels(). % % The format of the GetPixelCacheNexusExtent() method is: % % MagickSizeType GetPixelCacheNexusExtent(const Cache cache, % NexusInfo *nexus_info) % % A description of each parameter follows: % % o nexus_info: the nexus info. % */ MagickPrivate MagickSizeType GetPixelCacheNexusExtent(const Cache cache, NexusInfo *magick_restrict nexus_info) { CacheInfo *magick_restrict cache_info; MagickSizeType extent; assert(cache != NULL); cache_info=(CacheInfo *) cache; assert(cache_info->signature == MagickCoreSignature); extent=(MagickSizeType) nexus_info->region.width*nexus_info->region.height; if (extent == 0) return((MagickSizeType) cache_info->columns*cache_info->rows); return(extent); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t P i x e l C a c h e P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelCachePixels() returns the pixels associated with the specified image. % % The format of the GetPixelCachePixels() method is: % % void *GetPixelCachePixels(Image *image,MagickSizeType *length, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o length: the pixel cache length. % % o exception: return any errors or warnings in this structure. % */ MagickExport void *GetPixelCachePixels(Image *image,MagickSizeType *length, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); assert(length != (MagickSizeType *) NULL); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); *length=cache_info->length; if ((cache_info->type != MemoryCache) && (cache_info->type != MapCache)) return((void *) NULL); return((void *) cache_info->pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t P i x e l C a c h e S t o r a g e C l a s s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelCacheStorageClass() returns the class type of the pixel cache. % % The format of the GetPixelCacheStorageClass() method is: % % ClassType GetPixelCacheStorageClass(Cache cache) % % A description of each parameter follows: % % o type: GetPixelCacheStorageClass returns DirectClass or PseudoClass. % % o cache: the pixel cache. % */ MagickPrivate ClassType GetPixelCacheStorageClass(const Cache cache) { CacheInfo *magick_restrict cache_info; assert(cache != (Cache) NULL); cache_info=(CacheInfo *) cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", cache_info->filename); return(cache_info->storage_class); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t P i x e l C a c h e T i l e S i z e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelCacheTileSize() returns the pixel cache tile size. % % The format of the GetPixelCacheTileSize() method is: % % void GetPixelCacheTileSize(const Image *image,size_t *width, % size_t *height) % % A description of each parameter follows: % % o image: the image. % % o width: the optimized cache tile width in pixels. % % o height: the optimized cache tile height in pixels. % */ MagickPrivate void GetPixelCacheTileSize(const Image *image,size_t *width, size_t *height) { CacheInfo *magick_restrict cache_info; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); *width=2048UL/(MagickMax(cache_info->number_channels,1)*sizeof(Quantum)); if (GetImagePixelCacheType(image) == DiskCache) *width=8192UL/(MagickMax(cache_info->number_channels,1)*sizeof(Quantum)); *height=(*width); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t P i x e l C a c h e V i r t u a l M e t h o d % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelCacheVirtualMethod() gets the "virtual pixels" method for the % pixel cache. A virtual pixel is any pixel access that is outside the % boundaries of the image cache. % % The format of the GetPixelCacheVirtualMethod() method is: % % VirtualPixelMethod GetPixelCacheVirtualMethod(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickPrivate VirtualPixelMethod GetPixelCacheVirtualMethod(const Image *image) { CacheInfo *magick_restrict cache_info; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); return(cache_info->virtual_pixel_method); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t V i r t u a l M e t a c o n t e n t F r o m C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetVirtualMetacontentFromCache() returns the meta-content corresponding with % the last call to QueueAuthenticPixelsCache() or GetVirtualPixelCache(). % % The format of the GetVirtualMetacontentFromCache() method is: % % void *GetVirtualMetacontentFromCache(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ static const void *GetVirtualMetacontentFromCache(const Image *image) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); const void *magick_restrict metacontent; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); assert(id < (int) cache_info->number_threads); metacontent=GetVirtualMetacontentFromNexus(cache_info, cache_info->nexus_info[id]); return(metacontent); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t V i r t u a l M e t a c o n t e n t F r o m N e x u s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetVirtualMetacontentFromNexus() returns the meta-content for the specified % cache nexus. % % The format of the GetVirtualMetacontentFromNexus() method is: % % const void *GetVirtualMetacontentFromNexus(const Cache cache, % NexusInfo *nexus_info) % % A description of each parameter follows: % % o cache: the pixel cache. % % o nexus_info: the cache nexus to return the meta-content. % */ MagickPrivate const void *GetVirtualMetacontentFromNexus(const Cache cache, NexusInfo *magick_restrict nexus_info) { CacheInfo *magick_restrict cache_info; assert(cache != (Cache) NULL); cache_info=(CacheInfo *) cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->storage_class == UndefinedClass) return((void *) NULL); return(nexus_info->metacontent); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t V i r t u a l M e t a c o n t e n t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetVirtualMetacontent() returns the virtual metacontent corresponding with % the last call to QueueAuthenticPixels() or GetVirtualPixels(). NULL is % returned if the meta-content are not available. % % The format of the GetVirtualMetacontent() method is: % % const void *GetVirtualMetacontent(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport const void *GetVirtualMetacontent(const Image *image) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); const void *magick_restrict metacontent; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); metacontent=cache_info->methods.get_virtual_metacontent_from_handler(image); if (metacontent != (void *) NULL) return(metacontent); assert(id < (int) cache_info->number_threads); metacontent=GetVirtualMetacontentFromNexus(cache_info, cache_info->nexus_info[id]); return(metacontent); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t V i r t u a l P i x e l C a c h e N e x u s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetVirtualPixelCacheNexus() gets virtual pixels from the in-memory or disk % pixel cache as defined by the geometry parameters. A pointer to the pixels % is returned if the pixels are transferred, otherwise a NULL is returned. % % The format of the GetVirtualPixelCacheNexus() method is: % % Quantum *GetVirtualPixelCacheNexus(const Image *image, % const VirtualPixelMethod method,const ssize_t x,const ssize_t y, % const size_t columns,const size_t rows,NexusInfo *nexus_info, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o virtual_pixel_method: the virtual pixel method. % % o x,y,columns,rows: These values define the perimeter of a region of % pixels. % % o nexus_info: the cache nexus to acquire. % % o exception: return any errors or warnings in this structure. % */ static ssize_t DitherMatrix[64] = { 0, 48, 12, 60, 3, 51, 15, 63, 32, 16, 44, 28, 35, 19, 47, 31, 8, 56, 4, 52, 11, 59, 7, 55, 40, 24, 36, 20, 43, 27, 39, 23, 2, 50, 14, 62, 1, 49, 13, 61, 34, 18, 46, 30, 33, 17, 45, 29, 10, 58, 6, 54, 9, 57, 5, 53, 42, 26, 38, 22, 41, 25, 37, 21 }; static inline ssize_t DitherX(const ssize_t x,const size_t columns) { ssize_t index; index=x+DitherMatrix[x & 0x07]-32L; if (index < 0L) return(0L); if (index >= (ssize_t) columns) return((ssize_t) columns-1L); return(index); } static inline ssize_t DitherY(const ssize_t y,const size_t rows) { ssize_t index; index=y+DitherMatrix[y & 0x07]-32L; if (index < 0L) return(0L); if (index >= (ssize_t) rows) return((ssize_t) rows-1L); return(index); } static inline ssize_t EdgeX(const ssize_t x,const size_t columns) { if (x < 0L) return(0L); if (x >= (ssize_t) columns) return((ssize_t) (columns-1)); return(x); } static inline ssize_t EdgeY(const ssize_t y,const size_t rows) { if (y < 0L) return(0L); if (y >= (ssize_t) rows) return((ssize_t) (rows-1)); return(y); } static inline ssize_t RandomX(RandomInfo *random_info,const size_t columns) { return((ssize_t) (columns*GetPseudoRandomValue(random_info))); } static inline ssize_t RandomY(RandomInfo *random_info,const size_t rows) { return((ssize_t) (rows*GetPseudoRandomValue(random_info))); } static inline MagickModulo VirtualPixelModulo(const ssize_t offset, const size_t extent) { MagickModulo modulo; modulo.quotient=offset/((ssize_t) extent); modulo.remainder=offset % ((ssize_t) extent); if ((modulo.remainder != 0) && ((offset ^ ((ssize_t) extent)) < 0)) { modulo.quotient-=1; modulo.remainder+=((ssize_t) extent); } return(modulo); } MagickPrivate const Quantum *GetVirtualPixelCacheNexus(const Image *image, const VirtualPixelMethod virtual_pixel_method,const ssize_t x,const ssize_t y, const size_t columns,const size_t rows,NexusInfo *nexus_info, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; MagickOffsetType offset; MagickSizeType length, number_pixels; NexusInfo *magick_restrict virtual_nexus; Quantum *magick_restrict pixels, virtual_pixel[MaxPixelChannels]; register const Quantum *magick_restrict p; register const void *magick_restrict r; register Quantum *magick_restrict q; register ssize_t i, u; register unsigned char *magick_restrict s; ssize_t v; void *magick_restrict virtual_metacontent; /* Acquire pixels. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->type == UndefinedCache) return((const Quantum *) NULL); #if defined(MAGICKCORE_OPENCL_SUPPORT) CopyOpenCLBuffer(cache_info); #endif pixels=SetPixelCacheNexusPixels(cache_info,ReadMode,x,y,columns,rows, ((image->channels & WriteMaskChannel) != 0) || ((image->channels & CompositeMaskChannel) != 0) ? MagickTrue : MagickFalse, nexus_info,exception); if (pixels == (Quantum *) NULL) return((const Quantum *) NULL); q=pixels; offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns+ nexus_info->region.x; length=(MagickSizeType) (nexus_info->region.height-1L)*cache_info->columns+ nexus_info->region.width-1L; number_pixels=(MagickSizeType) cache_info->columns*cache_info->rows; if ((offset >= 0) && (((MagickSizeType) offset+length) < number_pixels)) if ((x >= 0) && ((ssize_t) (x+columns-1) < (ssize_t) cache_info->columns) && (y >= 0) && ((ssize_t) (y+rows-1) < (ssize_t) cache_info->rows)) { MagickBooleanType status; /* Pixel request is inside cache extents. */ if (nexus_info->authentic_pixel_cache != MagickFalse) return(q); status=ReadPixelCachePixels(cache_info,nexus_info,exception); if (status == MagickFalse) return((const Quantum *) NULL); if (cache_info->metacontent_extent != 0) { status=ReadPixelCacheMetacontent(cache_info,nexus_info,exception); if (status == MagickFalse) return((const Quantum *) NULL); } return(q); } /* Pixel request is outside cache extents. */ virtual_nexus=nexus_info->virtual_nexus; s=(unsigned char *) nexus_info->metacontent; (void) memset(virtual_pixel,0,cache_info->number_channels* sizeof(*virtual_pixel)); virtual_metacontent=(void *) NULL; switch (virtual_pixel_method) { case BackgroundVirtualPixelMethod: case BlackVirtualPixelMethod: case GrayVirtualPixelMethod: case TransparentVirtualPixelMethod: case MaskVirtualPixelMethod: case WhiteVirtualPixelMethod: case EdgeVirtualPixelMethod: case CheckerTileVirtualPixelMethod: case HorizontalTileVirtualPixelMethod: case VerticalTileVirtualPixelMethod: { if (cache_info->metacontent_extent != 0) { /* Acquire a metacontent buffer. */ virtual_metacontent=(void *) AcquireQuantumMemory(1, cache_info->metacontent_extent); if (virtual_metacontent == (void *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), CacheError,"UnableToGetCacheNexus","`%s'",image->filename); return((const Quantum *) NULL); } (void) memset(virtual_metacontent,0,cache_info->metacontent_extent); } switch (virtual_pixel_method) { case BlackVirtualPixelMethod: { for (i=0; i < (ssize_t) cache_info->number_channels; i++) SetPixelChannel(image,(PixelChannel) i,(Quantum) 0,virtual_pixel); SetPixelAlpha(image,OpaqueAlpha,virtual_pixel); break; } case GrayVirtualPixelMethod: { for (i=0; i < (ssize_t) cache_info->number_channels; i++) SetPixelChannel(image,(PixelChannel) i,QuantumRange/2, virtual_pixel); SetPixelAlpha(image,OpaqueAlpha,virtual_pixel); break; } case TransparentVirtualPixelMethod: { for (i=0; i < (ssize_t) cache_info->number_channels; i++) SetPixelChannel(image,(PixelChannel) i,(Quantum) 0,virtual_pixel); SetPixelAlpha(image,TransparentAlpha,virtual_pixel); break; } case MaskVirtualPixelMethod: case WhiteVirtualPixelMethod: { for (i=0; i < (ssize_t) cache_info->number_channels; i++) SetPixelChannel(image,(PixelChannel) i,QuantumRange,virtual_pixel); SetPixelAlpha(image,OpaqueAlpha,virtual_pixel); break; } default: { SetPixelRed(image,ClampToQuantum(image->background_color.red), virtual_pixel); SetPixelGreen(image,ClampToQuantum(image->background_color.green), virtual_pixel); SetPixelBlue(image,ClampToQuantum(image->background_color.blue), virtual_pixel); SetPixelBlack(image,ClampToQuantum(image->background_color.black), virtual_pixel); SetPixelAlpha(image,ClampToQuantum(image->background_color.alpha), virtual_pixel); break; } } break; } default: break; } for (v=0; v < (ssize_t) rows; v++) { ssize_t y_offset; y_offset=y+v; if ((virtual_pixel_method == EdgeVirtualPixelMethod) || (virtual_pixel_method == UndefinedVirtualPixelMethod)) y_offset=EdgeY(y_offset,cache_info->rows); for (u=0; u < (ssize_t) columns; u+=length) { ssize_t x_offset; x_offset=x+u; length=(MagickSizeType) MagickMin(cache_info->columns-x_offset,columns-u); if (((x_offset < 0) || (x_offset >= (ssize_t) cache_info->columns)) || ((y_offset < 0) || (y_offset >= (ssize_t) cache_info->rows)) || (length == 0)) { MagickModulo x_modulo, y_modulo; /* Transfer a single pixel. */ length=(MagickSizeType) 1; switch (virtual_pixel_method) { case EdgeVirtualPixelMethod: default: { p=GetVirtualPixelCacheNexus(image,virtual_pixel_method, EdgeX(x_offset,cache_info->columns), EdgeY(y_offset,cache_info->rows),1UL,1UL,virtual_nexus, exception); r=GetVirtualMetacontentFromNexus(cache_info, nexus_info->virtual_nexus); break; } case RandomVirtualPixelMethod: { if (cache_info->random_info == (RandomInfo *) NULL) cache_info->random_info=AcquireRandomInfo(); p=GetVirtualPixelCacheNexus(image,virtual_pixel_method, RandomX(cache_info->random_info,cache_info->columns), RandomY(cache_info->random_info,cache_info->rows),1UL,1UL, virtual_nexus,exception); r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus); break; } case DitherVirtualPixelMethod: { p=GetVirtualPixelCacheNexus(image,virtual_pixel_method, DitherX(x_offset,cache_info->columns), DitherY(y_offset,cache_info->rows),1UL,1UL,virtual_nexus, exception); r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus); break; } case TileVirtualPixelMethod: { x_modulo=VirtualPixelModulo(x_offset,cache_info->columns); y_modulo=VirtualPixelModulo(y_offset,cache_info->rows); p=GetVirtualPixelCacheNexus(image,virtual_pixel_method, x_modulo.remainder,y_modulo.remainder,1UL,1UL,virtual_nexus, exception); r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus); break; } case MirrorVirtualPixelMethod: { x_modulo=VirtualPixelModulo(x_offset,cache_info->columns); if ((x_modulo.quotient & 0x01) == 1L) x_modulo.remainder=(ssize_t) cache_info->columns- x_modulo.remainder-1L; y_modulo=VirtualPixelModulo(y_offset,cache_info->rows); if ((y_modulo.quotient & 0x01) == 1L) y_modulo.remainder=(ssize_t) cache_info->rows- y_modulo.remainder-1L; p=GetVirtualPixelCacheNexus(image,virtual_pixel_method, x_modulo.remainder,y_modulo.remainder,1UL,1UL,virtual_nexus, exception); r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus); break; } case HorizontalTileEdgeVirtualPixelMethod: { x_modulo=VirtualPixelModulo(x_offset,cache_info->columns); p=GetVirtualPixelCacheNexus(image,virtual_pixel_method, x_modulo.remainder,EdgeY(y_offset,cache_info->rows),1UL,1UL, virtual_nexus,exception); r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus); break; } case VerticalTileEdgeVirtualPixelMethod: { y_modulo=VirtualPixelModulo(y_offset,cache_info->rows); p=GetVirtualPixelCacheNexus(image,virtual_pixel_method, EdgeX(x_offset,cache_info->columns),y_modulo.remainder,1UL,1UL, virtual_nexus,exception); r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus); break; } case BackgroundVirtualPixelMethod: case BlackVirtualPixelMethod: case GrayVirtualPixelMethod: case TransparentVirtualPixelMethod: case MaskVirtualPixelMethod: case WhiteVirtualPixelMethod: { p=virtual_pixel; r=virtual_metacontent; break; } case CheckerTileVirtualPixelMethod: { x_modulo=VirtualPixelModulo(x_offset,cache_info->columns); y_modulo=VirtualPixelModulo(y_offset,cache_info->rows); if (((x_modulo.quotient ^ y_modulo.quotient) & 0x01) != 0L) { p=virtual_pixel; r=virtual_metacontent; break; } p=GetVirtualPixelCacheNexus(image,virtual_pixel_method, x_modulo.remainder,y_modulo.remainder,1UL,1UL,virtual_nexus, exception); r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus); break; } case HorizontalTileVirtualPixelMethod: { if ((y_offset < 0) || (y_offset >= (ssize_t) cache_info->rows)) { p=virtual_pixel; r=virtual_metacontent; break; } x_modulo=VirtualPixelModulo(x_offset,cache_info->columns); y_modulo=VirtualPixelModulo(y_offset,cache_info->rows); p=GetVirtualPixelCacheNexus(image,virtual_pixel_method, x_modulo.remainder,y_modulo.remainder,1UL,1UL,virtual_nexus, exception); r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus); break; } case VerticalTileVirtualPixelMethod: { if ((x_offset < 0) || (x_offset >= (ssize_t) cache_info->columns)) { p=virtual_pixel; r=virtual_metacontent; break; } x_modulo=VirtualPixelModulo(x_offset,cache_info->columns); y_modulo=VirtualPixelModulo(y_offset,cache_info->rows); p=GetVirtualPixelCacheNexus(image,virtual_pixel_method, x_modulo.remainder,y_modulo.remainder,1UL,1UL,virtual_nexus, exception); r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus); break; } } if (p == (const Quantum *) NULL) break; (void) memcpy(q,p,(size_t) (cache_info->number_channels*length* sizeof(*p))); q+=cache_info->number_channels; if ((s != (void *) NULL) && (r != (const void *) NULL)) { (void) memcpy(s,r,(size_t) cache_info->metacontent_extent); s+=cache_info->metacontent_extent; } continue; } /* Transfer a run of pixels. */ p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,x_offset,y_offset, (size_t) length,1UL,virtual_nexus,exception); if (p == (const Quantum *) NULL) break; r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus); (void) memcpy(q,p,(size_t) (cache_info->number_channels*length* sizeof(*p))); q+=cache_info->number_channels*length; if ((r != (void *) NULL) && (s != (const void *) NULL)) { (void) memcpy(s,r,(size_t) length); s+=length*cache_info->metacontent_extent; } } if (u < (ssize_t) columns) break; } /* Free resources. */ if (virtual_metacontent != (void *) NULL) virtual_metacontent=(void *) RelinquishMagickMemory(virtual_metacontent); if (v < (ssize_t) rows) return((const Quantum *) NULL); return(pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t V i r t u a l P i x e l C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetVirtualPixelCache() get virtual pixels from the in-memory or disk pixel % cache as defined by the geometry parameters. A pointer to the pixels % is returned if the pixels are transferred, otherwise a NULL is returned. % % The format of the GetVirtualPixelCache() method is: % % const Quantum *GetVirtualPixelCache(const Image *image, % const VirtualPixelMethod virtual_pixel_method,const ssize_t x, % const ssize_t y,const size_t columns,const size_t rows, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o virtual_pixel_method: the virtual pixel method. % % o x,y,columns,rows: These values define the perimeter of a region of % pixels. % % o exception: return any errors or warnings in this structure. % */ static const Quantum *GetVirtualPixelCache(const Image *image, const VirtualPixelMethod virtual_pixel_method,const ssize_t x,const ssize_t y, const size_t columns,const size_t rows,ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); const Quantum *magick_restrict p; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); assert(id < (int) cache_info->number_threads); p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,x,y,columns,rows, cache_info->nexus_info[id],exception); return(p); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t V i r t u a l P i x e l Q u e u e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetVirtualPixelQueue() returns the virtual pixels associated corresponding % with the last call to QueueAuthenticPixels() or GetVirtualPixels(). % % The format of the GetVirtualPixelQueue() method is: % % const Quantum *GetVirtualPixelQueue(const Image image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport const Quantum *GetVirtualPixelQueue(const Image *image) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->methods.get_virtual_pixels_handler != (GetVirtualPixelsHandler) NULL) return(cache_info->methods.get_virtual_pixels_handler(image)); assert(id < (int) cache_info->number_threads); return(GetVirtualPixelsNexus(cache_info,cache_info->nexus_info[id])); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t V i r t u a l P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetVirtualPixels() returns an immutable pixel region. If the % region is successfully accessed, a pointer to it is returned, otherwise % NULL is returned. The returned pointer may point to a temporary working % copy of the pixels or it may point to the original pixels in memory. % Performance is maximized if the selected region is part of one row, or one % or more full rows, since there is opportunity to access the pixels in-place % (without a copy) if the image is in memory, or in a memory-mapped file. The % returned pointer must *never* be deallocated by the user. % % Pixels accessed via the returned pointer represent a simple array of type % Quantum. If the image type is CMYK or the storage class is PseudoClass, % call GetAuthenticMetacontent() after invoking GetAuthenticPixels() to % access the meta-content (of type void) corresponding to the the % region. % % If you plan to modify the pixels, use GetAuthenticPixels() instead. % % Note, the GetVirtualPixels() and GetAuthenticPixels() methods are not thread- % safe. In a threaded environment, use GetCacheViewVirtualPixels() or % GetCacheViewAuthenticPixels() instead. % % The format of the GetVirtualPixels() method is: % % const Quantum *GetVirtualPixels(const Image *image,const ssize_t x, % const ssize_t y,const size_t columns,const size_t rows, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o x,y,columns,rows: These values define the perimeter of a region of % pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport const Quantum *GetVirtualPixels(const Image *image, const ssize_t x,const ssize_t y,const size_t columns,const size_t rows, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); const Quantum *magick_restrict p; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->methods.get_virtual_pixel_handler != (GetVirtualPixelHandler) NULL) return(cache_info->methods.get_virtual_pixel_handler(image, GetPixelCacheVirtualMethod(image),x,y,columns,rows,exception)); assert(id < (int) cache_info->number_threads); p=GetVirtualPixelCacheNexus(image,GetPixelCacheVirtualMethod(image),x,y, columns,rows,cache_info->nexus_info[id],exception); return(p); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t V i r t u a l P i x e l s F r o m C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetVirtualPixelsCache() returns the pixels associated corresponding with the % last call to QueueAuthenticPixelsCache() or GetVirtualPixelCache(). % % The format of the GetVirtualPixelsCache() method is: % % Quantum *GetVirtualPixelsCache(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ static const Quantum *GetVirtualPixelsCache(const Image *image) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); assert(id < (int) cache_info->number_threads); return(GetVirtualPixelsNexus(image->cache,cache_info->nexus_info[id])); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t V i r t u a l P i x e l s N e x u s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetVirtualPixelsNexus() returns the pixels associated with the specified % cache nexus. % % The format of the GetVirtualPixelsNexus() method is: % % const Quantum *GetVirtualPixelsNexus(const Cache cache, % NexusInfo *nexus_info) % % A description of each parameter follows: % % o cache: the pixel cache. % % o nexus_info: the cache nexus to return the colormap pixels. % */ MagickPrivate const Quantum *GetVirtualPixelsNexus(const Cache cache, NexusInfo *magick_restrict nexus_info) { CacheInfo *magick_restrict cache_info; assert(cache != (Cache) NULL); cache_info=(CacheInfo *) cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->storage_class == UndefinedClass) return((Quantum *) NULL); return((const Quantum *) nexus_info->pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + M a s k P i x e l C a c h e N e x u s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MaskPixelCacheNexus() masks the cache nexus as defined by the image mask. % The method returns MagickTrue if the pixel region is masked, otherwise % MagickFalse. % % The format of the MaskPixelCacheNexus() method is: % % MagickBooleanType MaskPixelCacheNexus(Image *image, % NexusInfo *nexus_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o nexus_info: the cache nexus to clip. % % o exception: return any errors or warnings in this structure. % */ static inline Quantum ApplyPixelCompositeMask(const Quantum p, const MagickRealType alpha,const Quantum q,const MagickRealType beta) { double mask_alpha; Quantum pixel; if (fabs(alpha-OpaqueAlpha) < MagickEpsilon) return(p); mask_alpha=1.0-QuantumScale*QuantumScale*alpha*beta; mask_alpha=PerceptibleReciprocal(mask_alpha); pixel=ClampToQuantum(mask_alpha*MagickOver_((double) p,alpha,(double) q, beta)); return(pixel); } static MagickBooleanType MaskPixelCacheNexus(Image *image,NexusInfo *nexus_info, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; MagickSizeType number_pixels; register Quantum *magick_restrict p, *magick_restrict q; register ssize_t n; /* Apply clip mask. */ if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if ((image->channels & CompositeMaskChannel) == 0) return(MagickTrue); if ((nexus_info->region.width == 0) || (nexus_info->region.height == 0)) return(MagickTrue); cache_info=(CacheInfo *) image->cache; if (cache_info == (Cache) NULL) return(MagickFalse); p=GetAuthenticPixelCacheNexus(image,nexus_info->region.x,nexus_info->region.y, nexus_info->region.width,nexus_info->region.height, nexus_info->virtual_nexus,exception); q=nexus_info->pixels; number_pixels=(MagickSizeType) nexus_info->region.width* nexus_info->region.height; for (n=0; n < (ssize_t) number_pixels; n++) { double mask_alpha; register ssize_t i; if (p == (Quantum *) NULL) break; mask_alpha=(double) GetPixelCompositeMask(image,p); for (i=0; i < (ssize_t) image->number_channels; i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; q[i]=ApplyPixelCompositeMask(p[i],mask_alpha,q[i],(MagickRealType) GetPixelAlpha(image,q)); } p+=GetPixelChannels(image); q+=GetPixelChannels(image); } if (n < (ssize_t) number_pixels) return(MagickFalse); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + O p e n P i x e l C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % OpenPixelCache() allocates the pixel cache. This includes defining the cache % dimensions, allocating space for the image pixels and optionally the % metacontent, and memory mapping the cache if it is disk based. The cache % nexus array is initialized as well. % % The format of the OpenPixelCache() method is: % % MagickBooleanType OpenPixelCache(Image *image,const MapMode mode, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o mode: ReadMode, WriteMode, or IOMode. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType OpenPixelCacheOnDisk(CacheInfo *cache_info, const MapMode mode) { int file; /* Open pixel cache on disk. */ if ((cache_info->file != -1) && (cache_info->disk_mode == mode)) return(MagickTrue); /* cache already open and in the proper mode */ if (*cache_info->cache_filename == '\0') file=AcquireUniqueFileResource(cache_info->cache_filename); else switch (mode) { case ReadMode: { file=open_utf8(cache_info->cache_filename,O_RDONLY | O_BINARY,0); break; } case WriteMode: { file=open_utf8(cache_info->cache_filename,O_WRONLY | O_CREAT | O_BINARY | O_EXCL,S_MODE); if (file == -1) file=open_utf8(cache_info->cache_filename,O_WRONLY | O_BINARY,S_MODE); break; } case IOMode: default: { file=open_utf8(cache_info->cache_filename,O_RDWR | O_CREAT | O_BINARY | O_EXCL,S_MODE); if (file == -1) file=open_utf8(cache_info->cache_filename,O_RDWR | O_BINARY,S_MODE); break; } } if (file == -1) return(MagickFalse); (void) AcquireMagickResource(FileResource,1); if (cache_info->file != -1) (void) ClosePixelCacheOnDisk(cache_info); cache_info->file=file; cache_info->disk_mode=mode; return(MagickTrue); } static inline MagickOffsetType WritePixelCacheRegion( const CacheInfo *magick_restrict cache_info,const MagickOffsetType offset, const MagickSizeType length,const unsigned char *magick_restrict buffer) { register MagickOffsetType i; ssize_t count; #if !defined(MAGICKCORE_HAVE_PWRITE) if (lseek(cache_info->file,offset,SEEK_SET) < 0) return((MagickOffsetType) -1); #endif count=0; for (i=0; i < (MagickOffsetType) length; i+=count) { #if !defined(MAGICKCORE_HAVE_PWRITE) count=write(cache_info->file,buffer+i,(size_t) MagickMin(length-i,(size_t) SSIZE_MAX)); #else count=pwrite(cache_info->file,buffer+i,(size_t) MagickMin(length-i,(size_t) SSIZE_MAX),offset+i); #endif if (count <= 0) { count=0; if (errno != EINTR) break; } } return(i); } static MagickBooleanType SetPixelCacheExtent(Image *image,MagickSizeType length) { CacheInfo *magick_restrict cache_info; MagickOffsetType count, extent, offset; cache_info=(CacheInfo *) image->cache; if (image->debug != MagickFalse) { char format[MagickPathExtent], message[MagickPathExtent]; (void) FormatMagickSize(length,MagickFalse,"B",MagickPathExtent,format); (void) FormatLocaleString(message,MagickPathExtent, "extend %s (%s[%d], disk, %s)",cache_info->filename, cache_info->cache_filename,cache_info->file,format); (void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",message); } if (length != (MagickSizeType) ((MagickOffsetType) length)) return(MagickFalse); offset=(MagickOffsetType) lseek(cache_info->file,0,SEEK_END); if (offset < 0) return(MagickFalse); if ((MagickSizeType) offset >= length) count=(MagickOffsetType) 1; else { extent=(MagickOffsetType) length-1; count=WritePixelCacheRegion(cache_info,extent,1,(const unsigned char *) ""); if (count != 1) return(MagickFalse); #if defined(MAGICKCORE_HAVE_POSIX_FALLOCATE) if (cache_info->synchronize != MagickFalse) if (posix_fallocate(cache_info->file,offset+1,extent-offset) != 0) return(MagickFalse); #endif } offset=(MagickOffsetType) lseek(cache_info->file,0,SEEK_SET); if (offset < 0) return(MagickFalse); return(MagickTrue); } static MagickBooleanType OpenPixelCache(Image *image,const MapMode mode, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info, source_info; char format[MagickPathExtent], message[MagickPathExtent]; const char *hosts, *type; MagickBooleanType status; MagickSizeType length, number_pixels; size_t columns, packet_size; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (cache_anonymous_memory < 0) { char *value; /* Does the security policy require anonymous mapping for pixel cache? */ cache_anonymous_memory=0; value=GetPolicyValue("pixel-cache-memory"); if (value == (char *) NULL) value=GetPolicyValue("cache:memory-map"); if (LocaleCompare(value,"anonymous") == 0) { #if defined(MAGICKCORE_HAVE_MMAP) && defined(MAP_ANONYMOUS) cache_anonymous_memory=1; #else (void) ThrowMagickException(exception,GetMagickModule(), MissingDelegateError,"DelegateLibrarySupportNotBuiltIn", "'%s' (policy requires anonymous memory mapping)",image->filename); #endif } value=DestroyString(value); } if ((image->columns == 0) || (image->rows == 0)) ThrowBinaryException(CacheError,"NoPixelsDefinedInCache",image->filename); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); if (((MagickSizeType) image->columns > cache_info->width_limit) || ((MagickSizeType) image->rows > cache_info->height_limit)) ThrowBinaryException(ImageError,"WidthOrHeightExceedsLimit", image->filename); length=GetImageListLength(image); if (AcquireMagickResource(ListLengthResource,length) == MagickFalse) ThrowBinaryException(ResourceLimitError,"ListLengthExceedsLimit", image->filename); source_info=(*cache_info); source_info.file=(-1); (void) FormatLocaleString(cache_info->filename,MagickPathExtent,"%s[%.20g]", image->filename,(double) image->scene); cache_info->storage_class=image->storage_class; cache_info->colorspace=image->colorspace; cache_info->alpha_trait=image->alpha_trait; cache_info->channels=image->channels; cache_info->rows=image->rows; cache_info->columns=image->columns; InitializePixelChannelMap(image); cache_info->number_channels=GetPixelChannels(image); (void) memcpy(cache_info->channel_map,image->channel_map,MaxPixelChannels* sizeof(*image->channel_map)); cache_info->metacontent_extent=image->metacontent_extent; cache_info->mode=mode; number_pixels=(MagickSizeType) cache_info->columns*cache_info->rows; packet_size=cache_info->number_channels*sizeof(Quantum); if (image->metacontent_extent != 0) packet_size+=cache_info->metacontent_extent; length=number_pixels*packet_size; columns=(size_t) (length/cache_info->rows/packet_size); if ((cache_info->columns != columns) || ((ssize_t) cache_info->columns < 0) || ((ssize_t) cache_info->rows < 0)) ThrowBinaryException(ResourceLimitError,"PixelCacheAllocationFailed", image->filename); cache_info->length=length; if (image->ping != MagickFalse) { cache_info->storage_class=image->storage_class; cache_info->colorspace=image->colorspace; cache_info->type=PingCache; return(MagickTrue); } status=AcquireMagickResource(AreaResource,(MagickSizeType) cache_info->columns*cache_info->rows); if (cache_info->mode == PersistMode) status=MagickFalse; length=number_pixels*(cache_info->number_channels*sizeof(Quantum)+ cache_info->metacontent_extent); if ((status != MagickFalse) && (length == (MagickSizeType) ((size_t) length)) && ((cache_info->type == UndefinedCache) || (cache_info->type == MemoryCache))) { status=AcquireMagickResource(MemoryResource,cache_info->length); if (status != MagickFalse) { status=MagickTrue; if (cache_anonymous_memory <= 0) { cache_info->mapped=MagickFalse; cache_info->pixels=(Quantum *) MagickAssumeAligned( AcquireAlignedMemory(1,(size_t) cache_info->length)); } else { cache_info->mapped=MagickTrue; cache_info->pixels=(Quantum *) MapBlob(-1,IOMode,0,(size_t) cache_info->length); } if (cache_info->pixels == (Quantum *) NULL) { cache_info->mapped=source_info.mapped; cache_info->pixels=source_info.pixels; } else { /* Create memory pixel cache. */ cache_info->type=MemoryCache; cache_info->metacontent=(void *) NULL; if (cache_info->metacontent_extent != 0) cache_info->metacontent=(void *) (cache_info->pixels+ cache_info->number_channels*number_pixels); if ((source_info.storage_class != UndefinedClass) && (mode != ReadMode)) { status=ClonePixelCacheRepository(cache_info,&source_info, exception); RelinquishPixelCachePixels(&source_info); } if (image->debug != MagickFalse) { (void) FormatMagickSize(cache_info->length,MagickTrue,"B", MagickPathExtent,format); type=CommandOptionToMnemonic(MagickCacheOptions,(ssize_t) cache_info->type); (void) FormatLocaleString(message,MagickPathExtent, "open %s (%s %s, %.20gx%.20gx%.20g %s)", cache_info->filename,cache_info->mapped != MagickFalse ? "Anonymous" : "Heap",type,(double) cache_info->columns, (double) cache_info->rows,(double) cache_info->number_channels,format); (void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s", message); } cache_info->storage_class=image->storage_class; if (status == 0) { cache_info->type=UndefinedCache; return(MagickFalse); } return(MagickTrue); } } } status=AcquireMagickResource(DiskResource,cache_info->length); hosts=(const char *) GetImageRegistry(StringRegistryType,"cache:hosts", exception); if ((status == MagickFalse) && (hosts != (const char *) NULL)) { DistributeCacheInfo *server_info; /* Distribute the pixel cache to a remote server. */ server_info=AcquireDistributeCacheInfo(exception); if (server_info != (DistributeCacheInfo *) NULL) { status=OpenDistributePixelCache(server_info,image); if (status == MagickFalse) { ThrowFileException(exception,CacheError,"UnableToOpenPixelCache", GetDistributeCacheHostname(server_info)); server_info=DestroyDistributeCacheInfo(server_info); } else { /* Create a distributed pixel cache. */ status=MagickTrue; cache_info->type=DistributedCache; cache_info->server_info=server_info; (void) FormatLocaleString(cache_info->cache_filename, MagickPathExtent,"%s:%d",GetDistributeCacheHostname( (DistributeCacheInfo *) cache_info->server_info), GetDistributeCachePort((DistributeCacheInfo *) cache_info->server_info)); if ((source_info.storage_class != UndefinedClass) && (mode != ReadMode)) { status=ClonePixelCacheRepository(cache_info,&source_info, exception); RelinquishPixelCachePixels(&source_info); } if (image->debug != MagickFalse) { (void) FormatMagickSize(cache_info->length,MagickFalse,"B", MagickPathExtent,format); type=CommandOptionToMnemonic(MagickCacheOptions,(ssize_t) cache_info->type); (void) FormatLocaleString(message,MagickPathExtent, "open %s (%s[%d], %s, %.20gx%.20gx%.20g %s)", cache_info->filename,cache_info->cache_filename, GetDistributeCacheFile((DistributeCacheInfo *) cache_info->server_info),type,(double) cache_info->columns, (double) cache_info->rows,(double) cache_info->number_channels,format); (void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s", message); } if (status == 0) { cache_info->type=UndefinedCache; return(MagickFalse); } return(MagickTrue); } } cache_info->type=UndefinedCache; (void) ThrowMagickException(exception,GetMagickModule(),CacheError, "CacheResourcesExhausted","`%s'",image->filename); return(MagickFalse); } /* Create pixel cache on disk. */ if (status == MagickFalse) { cache_info->type=UndefinedCache; (void) ThrowMagickException(exception,GetMagickModule(),CacheError, "CacheResourcesExhausted","`%s'",image->filename); return(MagickFalse); } if ((source_info.storage_class != UndefinedClass) && (mode != ReadMode) && (cache_info->mode != PersistMode)) { (void) ClosePixelCacheOnDisk(cache_info); *cache_info->cache_filename='\0'; } if (OpenPixelCacheOnDisk(cache_info,mode) == MagickFalse) { cache_info->type=UndefinedCache; ThrowFileException(exception,CacheError,"UnableToOpenPixelCache", image->filename); return(MagickFalse); } status=SetPixelCacheExtent(image,(MagickSizeType) cache_info->offset+ cache_info->length); if (status == MagickFalse) { cache_info->type=UndefinedCache; ThrowFileException(exception,CacheError,"UnableToExtendCache", image->filename); return(MagickFalse); } length=number_pixels*(cache_info->number_channels*sizeof(Quantum)+ cache_info->metacontent_extent); if (length != (MagickSizeType) ((size_t) length)) cache_info->type=DiskCache; else { status=AcquireMagickResource(MapResource,cache_info->length); if (status == MagickFalse) cache_info->type=DiskCache; else if ((cache_info->type != MapCache) && (cache_info->type != MemoryCache)) { cache_info->type=DiskCache; RelinquishMagickResource(MapResource,cache_info->length); } else { cache_info->pixels=(Quantum *) MapBlob(cache_info->file,mode, cache_info->offset,(size_t) cache_info->length); if (cache_info->pixels == (Quantum *) NULL) { cache_info->type=DiskCache; cache_info->mapped=source_info.mapped; cache_info->pixels=source_info.pixels; RelinquishMagickResource(MapResource,cache_info->length); } else { /* Create file-backed memory-mapped pixel cache. */ (void) ClosePixelCacheOnDisk(cache_info); cache_info->type=MapCache; cache_info->mapped=MagickTrue; cache_info->metacontent=(void *) NULL; if (cache_info->metacontent_extent != 0) cache_info->metacontent=(void *) (cache_info->pixels+ cache_info->number_channels*number_pixels); if ((source_info.storage_class != UndefinedClass) && (mode != ReadMode)) { status=ClonePixelCacheRepository(cache_info,&source_info, exception); RelinquishPixelCachePixels(&source_info); } if (image->debug != MagickFalse) { (void) FormatMagickSize(cache_info->length,MagickTrue,"B", MagickPathExtent,format); type=CommandOptionToMnemonic(MagickCacheOptions,(ssize_t) cache_info->type); (void) FormatLocaleString(message,MagickPathExtent, "open %s (%s[%d], %s, %.20gx%.20gx%.20g %s)", cache_info->filename,cache_info->cache_filename, cache_info->file,type,(double) cache_info->columns, (double) cache_info->rows,(double) cache_info->number_channels,format); (void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s", message); } if (status == 0) { cache_info->type=UndefinedCache; return(MagickFalse); } return(MagickTrue); } } } status=MagickTrue; if ((source_info.storage_class != UndefinedClass) && (mode != ReadMode)) { status=ClonePixelCacheRepository(cache_info,&source_info,exception); RelinquishPixelCachePixels(&source_info); } if (image->debug != MagickFalse) { (void) FormatMagickSize(cache_info->length,MagickFalse,"B", MagickPathExtent,format); type=CommandOptionToMnemonic(MagickCacheOptions,(ssize_t) cache_info->type); (void) FormatLocaleString(message,MagickPathExtent, "open %s (%s[%d], %s, %.20gx%.20gx%.20g %s)",cache_info->filename, cache_info->cache_filename,cache_info->file,type,(double) cache_info->columns,(double) cache_info->rows,(double) cache_info->number_channels,format); (void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",message); } if (status == 0) { cache_info->type=UndefinedCache; return(MagickFalse); } return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + P e r s i s t P i x e l C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PersistPixelCache() attaches to or initializes a persistent pixel cache. A % persistent pixel cache is one that resides on disk and is not destroyed % when the program exits. % % The format of the PersistPixelCache() method is: % % MagickBooleanType PersistPixelCache(Image *image,const char *filename, % const MagickBooleanType attach,MagickOffsetType *offset, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o filename: the persistent pixel cache filename. % % o attach: A value other than zero initializes the persistent pixel cache. % % o initialize: A value other than zero initializes the persistent pixel % cache. % % o offset: the offset in the persistent cache to store pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType PersistPixelCache(Image *image, const char *filename,const MagickBooleanType attach,MagickOffsetType *offset, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info, *magick_restrict clone_info; MagickBooleanType status; ssize_t page_size; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(image->cache != (void *) NULL); assert(filename != (const char *) NULL); assert(offset != (MagickOffsetType *) NULL); page_size=GetMagickPageSize(); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); #if defined(MAGICKCORE_OPENCL_SUPPORT) CopyOpenCLBuffer(cache_info); #endif if (attach != MagickFalse) { /* Attach existing persistent pixel cache. */ if (image->debug != MagickFalse) (void) LogMagickEvent(CacheEvent,GetMagickModule(), "attach persistent cache"); (void) CopyMagickString(cache_info->cache_filename,filename, MagickPathExtent); cache_info->type=MapCache; cache_info->offset=(*offset); if (OpenPixelCache(image,ReadMode,exception) == MagickFalse) return(MagickFalse); *offset+=cache_info->length+page_size-(cache_info->length % page_size); return(MagickTrue); } /* Clone persistent pixel cache. */ status=AcquireMagickResource(DiskResource,cache_info->length); if (status == MagickFalse) { (void) ThrowMagickException(exception,GetMagickModule(),CacheError, "CacheResourcesExhausted","`%s'",image->filename); return(MagickFalse); } clone_info=(CacheInfo *) ClonePixelCache(cache_info); clone_info->type=DiskCache; (void) CopyMagickString(clone_info->cache_filename,filename,MagickPathExtent); clone_info->file=(-1); clone_info->storage_class=cache_info->storage_class; clone_info->colorspace=cache_info->colorspace; clone_info->alpha_trait=cache_info->alpha_trait; clone_info->channels=cache_info->channels; clone_info->columns=cache_info->columns; clone_info->rows=cache_info->rows; clone_info->number_channels=cache_info->number_channels; clone_info->metacontent_extent=cache_info->metacontent_extent; clone_info->mode=PersistMode; clone_info->length=cache_info->length; (void) memcpy(clone_info->channel_map,cache_info->channel_map, MaxPixelChannels*sizeof(*cache_info->channel_map)); clone_info->offset=(*offset); status=ClonePixelCacheRepository(clone_info,cache_info,exception); *offset+=cache_info->length+page_size-(cache_info->length % page_size); clone_info=(CacheInfo *) DestroyPixelCache(clone_info); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + Q u e u e A u t h e n t i c P i x e l C a c h e N e x u s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % QueueAuthenticPixelCacheNexus() allocates an region to store image pixels as % defined by the region rectangle and returns a pointer to the region. This % region is subsequently transferred from the pixel cache with % SyncAuthenticPixelsCache(). A pointer to the pixels is returned if the % pixels are transferred, otherwise a NULL is returned. % % The format of the QueueAuthenticPixelCacheNexus() method is: % % Quantum *QueueAuthenticPixelCacheNexus(Image *image,const ssize_t x, % const ssize_t y,const size_t columns,const size_t rows, % const MagickBooleanType clone,NexusInfo *nexus_info, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o x,y,columns,rows: These values define the perimeter of a region of % pixels. % % o nexus_info: the cache nexus to set. % % o clone: clone the pixel cache. % % o exception: return any errors or warnings in this structure. % */ MagickPrivate Quantum *QueueAuthenticPixelCacheNexus(Image *image, const ssize_t x,const ssize_t y,const size_t columns,const size_t rows, const MagickBooleanType clone,NexusInfo *nexus_info,ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; MagickOffsetType offset; MagickSizeType number_pixels; Quantum *magick_restrict pixels; /* Validate pixel cache geometry. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) GetImagePixelCache(image,clone,exception); if (cache_info == (Cache) NULL) return((Quantum *) NULL); assert(cache_info->signature == MagickCoreSignature); if ((cache_info->columns == 0) || (cache_info->rows == 0) || (x < 0) || (y < 0) || (x >= (ssize_t) cache_info->columns) || (y >= (ssize_t) cache_info->rows)) { (void) ThrowMagickException(exception,GetMagickModule(),CacheError, "PixelsAreNotAuthentic","`%s'",image->filename); return((Quantum *) NULL); } offset=(MagickOffsetType) y*cache_info->columns+x; if (offset < 0) return((Quantum *) NULL); number_pixels=(MagickSizeType) cache_info->columns*cache_info->rows; offset+=(MagickOffsetType) (rows-1)*cache_info->columns+columns-1; if ((MagickSizeType) offset >= number_pixels) return((Quantum *) NULL); /* Return pixel cache. */ pixels=SetPixelCacheNexusPixels(cache_info,WriteMode,x,y,columns,rows, ((image->channels & WriteMaskChannel) != 0) || ((image->channels & CompositeMaskChannel) != 0) ? MagickTrue : MagickFalse, nexus_info,exception); return(pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + Q u e u e A u t h e n t i c P i x e l s C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % QueueAuthenticPixelsCache() allocates an region to store image pixels as % defined by the region rectangle and returns a pointer to the region. This % region is subsequently transferred from the pixel cache with % SyncAuthenticPixelsCache(). A pointer to the pixels is returned if the % pixels are transferred, otherwise a NULL is returned. % % The format of the QueueAuthenticPixelsCache() method is: % % Quantum *QueueAuthenticPixelsCache(Image *image,const ssize_t x, % const ssize_t y,const size_t columns,const size_t rows, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o x,y,columns,rows: These values define the perimeter of a region of % pixels. % % o exception: return any errors or warnings in this structure. % */ static Quantum *QueueAuthenticPixelsCache(Image *image,const ssize_t x, const ssize_t y,const size_t columns,const size_t rows, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); Quantum *magick_restrict pixels; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); assert(id < (int) cache_info->number_threads); pixels=QueueAuthenticPixelCacheNexus(image,x,y,columns,rows,MagickFalse, cache_info->nexus_info[id],exception); return(pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % Q u e u e A u t h e n t i c P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % QueueAuthenticPixels() queues a mutable pixel region. If the region is % successfully initialized a pointer to a Quantum array representing the % region is returned, otherwise NULL is returned. The returned pointer may % point to a temporary working buffer for the pixels or it may point to the % final location of the pixels in memory. % % Write-only access means that any existing pixel values corresponding to % the region are ignored. This is useful if the initial image is being % created from scratch, or if the existing pixel values are to be % completely replaced without need to refer to their pre-existing values. % The application is free to read and write the pixel buffer returned by % QueueAuthenticPixels() any way it pleases. QueueAuthenticPixels() does not % initialize the pixel array values. Initializing pixel array values is the % application's responsibility. % % Performance is maximized if the selected region is part of one row, or % one or more full rows, since then there is opportunity to access the % pixels in-place (without a copy) if the image is in memory, or in a % memory-mapped file. The returned pointer must *never* be deallocated % by the user. % % Pixels accessed via the returned pointer represent a simple array of type % Quantum. If the image type is CMYK or the storage class is PseudoClass, % call GetAuthenticMetacontent() after invoking GetAuthenticPixels() to % obtain the meta-content (of type void) corresponding to the region. % Once the Quantum (and/or Quantum) array has been updated, the % changes must be saved back to the underlying image using % SyncAuthenticPixels() or they may be lost. % % The format of the QueueAuthenticPixels() method is: % % Quantum *QueueAuthenticPixels(Image *image,const ssize_t x, % const ssize_t y,const size_t columns,const size_t rows, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o x,y,columns,rows: These values define the perimeter of a region of % pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport Quantum *QueueAuthenticPixels(Image *image,const ssize_t x, const ssize_t y,const size_t columns,const size_t rows, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); Quantum *magick_restrict pixels; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->methods.queue_authentic_pixels_handler != (QueueAuthenticPixelsHandler) NULL) { pixels=cache_info->methods.queue_authentic_pixels_handler(image,x,y, columns,rows,exception); return(pixels); } assert(id < (int) cache_info->number_threads); pixels=QueueAuthenticPixelCacheNexus(image,x,y,columns,rows,MagickFalse, cache_info->nexus_info[id],exception); return(pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + R e a d P i x e l C a c h e M e t a c o n t e n t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReadPixelCacheMetacontent() reads metacontent from the specified region of % the pixel cache. % % The format of the ReadPixelCacheMetacontent() method is: % % MagickBooleanType ReadPixelCacheMetacontent(CacheInfo *cache_info, % NexusInfo *nexus_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o cache_info: the pixel cache. % % o nexus_info: the cache nexus to read the metacontent. % % o exception: return any errors or warnings in this structure. % */ static inline MagickOffsetType ReadPixelCacheRegion( const CacheInfo *magick_restrict cache_info,const MagickOffsetType offset, const MagickSizeType length,unsigned char *magick_restrict buffer) { register MagickOffsetType i; ssize_t count; #if !defined(MAGICKCORE_HAVE_PREAD) if (lseek(cache_info->file,offset,SEEK_SET) < 0) return((MagickOffsetType) -1); #endif count=0; for (i=0; i < (MagickOffsetType) length; i+=count) { #if !defined(MAGICKCORE_HAVE_PREAD) count=read(cache_info->file,buffer+i,(size_t) MagickMin(length-i,(size_t) SSIZE_MAX)); #else count=pread(cache_info->file,buffer+i,(size_t) MagickMin(length-i,(size_t) SSIZE_MAX),offset+i); #endif if (count <= 0) { count=0; if (errno != EINTR) break; } } return(i); } static MagickBooleanType ReadPixelCacheMetacontent( CacheInfo *magick_restrict cache_info,NexusInfo *magick_restrict nexus_info, ExceptionInfo *exception) { MagickOffsetType count, offset; MagickSizeType extent, length; register ssize_t y; register unsigned char *magick_restrict q; size_t rows; if (cache_info->metacontent_extent == 0) return(MagickFalse); if (nexus_info->authentic_pixel_cache != MagickFalse) return(MagickTrue); offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns+ nexus_info->region.x; length=(MagickSizeType) nexus_info->region.width* cache_info->metacontent_extent; extent=length*nexus_info->region.height; rows=nexus_info->region.height; y=0; q=(unsigned char *) nexus_info->metacontent; switch (cache_info->type) { case MemoryCache: case MapCache: { register unsigned char *magick_restrict p; /* Read meta-content from memory. */ if ((cache_info->columns == nexus_info->region.width) && (extent == (MagickSizeType) ((size_t) extent))) { length=extent; rows=1UL; } p=(unsigned char *) cache_info->metacontent+offset* cache_info->metacontent_extent; for (y=0; y < (ssize_t) rows; y++) { (void) memcpy(q,p,(size_t) length); p+=cache_info->metacontent_extent*cache_info->columns; q+=cache_info->metacontent_extent*nexus_info->region.width; } break; } case DiskCache: { /* Read meta content from disk. */ LockSemaphoreInfo(cache_info->file_semaphore); if (OpenPixelCacheOnDisk(cache_info,IOMode) == MagickFalse) { ThrowFileException(exception,FileOpenError,"UnableToOpenFile", cache_info->cache_filename); UnlockSemaphoreInfo(cache_info->file_semaphore); return(MagickFalse); } if ((cache_info->columns == nexus_info->region.width) && (extent <= MagickMaxBufferExtent)) { length=extent; rows=1UL; } extent=(MagickSizeType) cache_info->columns*cache_info->rows; for (y=0; y < (ssize_t) rows; y++) { count=ReadPixelCacheRegion(cache_info,cache_info->offset+extent* cache_info->number_channels*sizeof(Quantum)+offset* cache_info->metacontent_extent,length,(unsigned char *) q); if (count != (MagickOffsetType) length) break; offset+=cache_info->columns; q+=cache_info->metacontent_extent*nexus_info->region.width; } if (IsFileDescriptorLimitExceeded() != MagickFalse) (void) ClosePixelCacheOnDisk(cache_info); UnlockSemaphoreInfo(cache_info->file_semaphore); break; } case DistributedCache: { RectangleInfo region; /* Read metacontent from distributed cache. */ LockSemaphoreInfo(cache_info->file_semaphore); region=nexus_info->region; if ((cache_info->columns != nexus_info->region.width) || (extent > MagickMaxBufferExtent)) region.height=1UL; else { length=extent; rows=1UL; } for (y=0; y < (ssize_t) rows; y++) { count=ReadDistributePixelCacheMetacontent((DistributeCacheInfo *) cache_info->server_info,&region,length,(unsigned char *) q); if (count != (MagickOffsetType) length) break; q+=cache_info->metacontent_extent*nexus_info->region.width; region.y++; } UnlockSemaphoreInfo(cache_info->file_semaphore); break; } default: break; } if (y < (ssize_t) rows) { ThrowFileException(exception,CacheError,"UnableToReadPixelCache", cache_info->cache_filename); return(MagickFalse); } if ((cache_info->debug != MagickFalse) && (CacheTick(nexus_info->region.y,cache_info->rows) != MagickFalse)) (void) LogMagickEvent(CacheEvent,GetMagickModule(), "%s[%.20gx%.20g%+.20g%+.20g]",cache_info->filename,(double) nexus_info->region.width,(double) nexus_info->region.height,(double) nexus_info->region.x,(double) nexus_info->region.y); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + R e a d P i x e l C a c h e P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReadPixelCachePixels() reads pixels from the specified region of the pixel % cache. % % The format of the ReadPixelCachePixels() method is: % % MagickBooleanType ReadPixelCachePixels(CacheInfo *cache_info, % NexusInfo *nexus_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o cache_info: the pixel cache. % % o nexus_info: the cache nexus to read the pixels. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType ReadPixelCachePixels( CacheInfo *magick_restrict cache_info,NexusInfo *magick_restrict nexus_info, ExceptionInfo *exception) { MagickOffsetType count, offset; MagickSizeType extent, length; register Quantum *magick_restrict q; register ssize_t y; size_t number_channels, rows; if (nexus_info->authentic_pixel_cache != MagickFalse) return(MagickTrue); offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns; if ((ssize_t) (offset/cache_info->columns) != nexus_info->region.y) return(MagickFalse); offset+=nexus_info->region.x; number_channels=cache_info->number_channels; length=(MagickSizeType) number_channels*nexus_info->region.width* sizeof(Quantum); if ((length/number_channels/sizeof(Quantum)) != nexus_info->region.width) return(MagickFalse); rows=nexus_info->region.height; extent=length*rows; if ((extent == 0) || ((extent/length) != rows)) return(MagickFalse); y=0; q=nexus_info->pixels; switch (cache_info->type) { case MemoryCache: case MapCache: { register Quantum *magick_restrict p; /* Read pixels from memory. */ if ((cache_info->columns == nexus_info->region.width) && (extent == (MagickSizeType) ((size_t) extent))) { length=extent; rows=1UL; } p=cache_info->pixels+cache_info->number_channels*offset; for (y=0; y < (ssize_t) rows; y++) { (void) memcpy(q,p,(size_t) length); p+=cache_info->number_channels*cache_info->columns; q+=cache_info->number_channels*nexus_info->region.width; } break; } case DiskCache: { /* Read pixels from disk. */ LockSemaphoreInfo(cache_info->file_semaphore); if (OpenPixelCacheOnDisk(cache_info,IOMode) == MagickFalse) { ThrowFileException(exception,FileOpenError,"UnableToOpenFile", cache_info->cache_filename); UnlockSemaphoreInfo(cache_info->file_semaphore); return(MagickFalse); } if ((cache_info->columns == nexus_info->region.width) && (extent <= MagickMaxBufferExtent)) { length=extent; rows=1UL; } for (y=0; y < (ssize_t) rows; y++) { count=ReadPixelCacheRegion(cache_info,cache_info->offset+offset* cache_info->number_channels*sizeof(*q),length,(unsigned char *) q); if (count != (MagickOffsetType) length) break; offset+=cache_info->columns; q+=cache_info->number_channels*nexus_info->region.width; } if (IsFileDescriptorLimitExceeded() != MagickFalse) (void) ClosePixelCacheOnDisk(cache_info); UnlockSemaphoreInfo(cache_info->file_semaphore); break; } case DistributedCache: { RectangleInfo region; /* Read pixels from distributed cache. */ LockSemaphoreInfo(cache_info->file_semaphore); region=nexus_info->region; if ((cache_info->columns != nexus_info->region.width) || (extent > MagickMaxBufferExtent)) region.height=1UL; else { length=extent; rows=1UL; } for (y=0; y < (ssize_t) rows; y++) { count=ReadDistributePixelCachePixels((DistributeCacheInfo *) cache_info->server_info,&region,length,(unsigned char *) q); if (count != (MagickOffsetType) length) break; q+=cache_info->number_channels*nexus_info->region.width; region.y++; } UnlockSemaphoreInfo(cache_info->file_semaphore); break; } default: break; } if (y < (ssize_t) rows) { ThrowFileException(exception,CacheError,"UnableToReadPixelCache", cache_info->cache_filename); return(MagickFalse); } if ((cache_info->debug != MagickFalse) && (CacheTick(nexus_info->region.y,cache_info->rows) != MagickFalse)) (void) LogMagickEvent(CacheEvent,GetMagickModule(), "%s[%.20gx%.20g%+.20g%+.20g]",cache_info->filename,(double) nexus_info->region.width,(double) nexus_info->region.height,(double) nexus_info->region.x,(double) nexus_info->region.y); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + R e f e r e n c e P i x e l C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReferencePixelCache() increments the reference count associated with the % pixel cache returning a pointer to the cache. % % The format of the ReferencePixelCache method is: % % Cache ReferencePixelCache(Cache cache_info) % % A description of each parameter follows: % % o cache_info: the pixel cache. % */ MagickPrivate Cache ReferencePixelCache(Cache cache) { CacheInfo *magick_restrict cache_info; assert(cache != (Cache *) NULL); cache_info=(CacheInfo *) cache; assert(cache_info->signature == MagickCoreSignature); LockSemaphoreInfo(cache_info->semaphore); cache_info->reference_count++; UnlockSemaphoreInfo(cache_info->semaphore); return(cache_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + R e s e t P i x e l C a c h e C h a n n e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ResetPixelCacheChannels() resets the pixel cache channels. % % The format of the ResetPixelCacheChannels method is: % % void ResetPixelCacheChannels(Image *) % % A description of each parameter follows: % % o image: the image. % */ MagickPrivate void ResetPixelCacheChannels(Image *image) { CacheInfo *magick_restrict cache_info; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); cache_info->number_channels=GetPixelChannels(image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + R e s e t C a c h e A n o n y m o u s M e m o r y % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ResetCacheAnonymousMemory() resets the anonymous_memory value. % % The format of the ResetCacheAnonymousMemory method is: % % void ResetCacheAnonymousMemory(void) % */ MagickPrivate void ResetCacheAnonymousMemory(void) { cache_anonymous_memory=0; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + R e s e t P i x e l C a c h e E p o c h % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ResetPixelCacheEpoch() resets the pixel cache epoch. % % The format of the ResetPixelCacheEpoch method is: % % void ResetPixelCacheEpoch(void) % */ MagickPrivate void ResetPixelCacheEpoch(void) { cache_epoch=0; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + S e t P i x e l C a c h e M e t h o d s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetPixelCacheMethods() sets the image pixel methods to the specified ones. % % The format of the SetPixelCacheMethods() method is: % % SetPixelCacheMethods(Cache *,CacheMethods *cache_methods) % % A description of each parameter follows: % % o cache: the pixel cache. % % o cache_methods: Specifies a pointer to a CacheMethods structure. % */ MagickPrivate void SetPixelCacheMethods(Cache cache,CacheMethods *cache_methods) { CacheInfo *magick_restrict cache_info; GetOneAuthenticPixelFromHandler get_one_authentic_pixel_from_handler; GetOneVirtualPixelFromHandler get_one_virtual_pixel_from_handler; /* Set cache pixel methods. */ assert(cache != (Cache) NULL); assert(cache_methods != (CacheMethods *) NULL); cache_info=(CacheInfo *) cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", cache_info->filename); if (cache_methods->get_virtual_pixel_handler != (GetVirtualPixelHandler) NULL) cache_info->methods.get_virtual_pixel_handler= cache_methods->get_virtual_pixel_handler; if (cache_methods->destroy_pixel_handler != (DestroyPixelHandler) NULL) cache_info->methods.destroy_pixel_handler= cache_methods->destroy_pixel_handler; if (cache_methods->get_virtual_metacontent_from_handler != (GetVirtualMetacontentFromHandler) NULL) cache_info->methods.get_virtual_metacontent_from_handler= cache_methods->get_virtual_metacontent_from_handler; if (cache_methods->get_authentic_pixels_handler != (GetAuthenticPixelsHandler) NULL) cache_info->methods.get_authentic_pixels_handler= cache_methods->get_authentic_pixels_handler; if (cache_methods->queue_authentic_pixels_handler != (QueueAuthenticPixelsHandler) NULL) cache_info->methods.queue_authentic_pixels_handler= cache_methods->queue_authentic_pixels_handler; if (cache_methods->sync_authentic_pixels_handler != (SyncAuthenticPixelsHandler) NULL) cache_info->methods.sync_authentic_pixels_handler= cache_methods->sync_authentic_pixels_handler; if (cache_methods->get_authentic_pixels_from_handler != (GetAuthenticPixelsFromHandler) NULL) cache_info->methods.get_authentic_pixels_from_handler= cache_methods->get_authentic_pixels_from_handler; if (cache_methods->get_authentic_metacontent_from_handler != (GetAuthenticMetacontentFromHandler) NULL) cache_info->methods.get_authentic_metacontent_from_handler= cache_methods->get_authentic_metacontent_from_handler; get_one_virtual_pixel_from_handler= cache_info->methods.get_one_virtual_pixel_from_handler; if (get_one_virtual_pixel_from_handler != (GetOneVirtualPixelFromHandler) NULL) cache_info->methods.get_one_virtual_pixel_from_handler= cache_methods->get_one_virtual_pixel_from_handler; get_one_authentic_pixel_from_handler= cache_methods->get_one_authentic_pixel_from_handler; if (get_one_authentic_pixel_from_handler != (GetOneAuthenticPixelFromHandler) NULL) cache_info->methods.get_one_authentic_pixel_from_handler= cache_methods->get_one_authentic_pixel_from_handler; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + S e t P i x e l C a c h e N e x u s P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetPixelCacheNexusPixels() defines the region of the cache for the % specified cache nexus. % % The format of the SetPixelCacheNexusPixels() method is: % % Quantum SetPixelCacheNexusPixels( % const CacheInfo *magick_restrict cache_info,const MapMode mode, % const ssize_t x,const ssize_t y,const size_t width,const size_t height, % const MagickBooleanType buffered,NexusInfo *magick_restrict nexus_info, % ExceptionInfo *exception) % % A description of each parameter follows: % % o cache_info: the pixel cache. % % o mode: ReadMode, WriteMode, or IOMode. % % o x,y,width,height: define the region of this particular cache nexus. % % o buffered: if true, nexus pixels are buffered. % % o nexus_info: the cache nexus to set. % % o exception: return any errors or warnings in this structure. % */ static inline MagickBooleanType AcquireCacheNexusPixels( const CacheInfo *magick_restrict cache_info,const MagickSizeType length, NexusInfo *magick_restrict nexus_info,ExceptionInfo *exception) { if (length != (MagickSizeType) ((size_t) length)) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"PixelCacheAllocationFailed","`%s'", cache_info->filename); return(MagickFalse); } nexus_info->length=0; nexus_info->mapped=MagickFalse; if (cache_anonymous_memory <= 0) { nexus_info->cache=(Quantum *) MagickAssumeAligned(AcquireAlignedMemory(1, (size_t) length)); if (nexus_info->cache != (Quantum *) NULL) (void) memset(nexus_info->cache,0,(size_t) length); } else { nexus_info->cache=(Quantum *) MapBlob(-1,IOMode,0,(size_t) length); if (nexus_info->cache != (Quantum *) NULL) nexus_info->mapped=MagickTrue; } if (nexus_info->cache == (Quantum *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"PixelCacheAllocationFailed","`%s'", cache_info->filename); return(MagickFalse); } nexus_info->length=length; return(MagickTrue); } static inline void PrefetchPixelCacheNexusPixels(const NexusInfo *nexus_info, const MapMode mode) { if (nexus_info->length < CACHE_LINE_SIZE) return; if (mode == ReadMode) { MagickCachePrefetch((unsigned char *) nexus_info->pixels+CACHE_LINE_SIZE, 0,1); return; } MagickCachePrefetch((unsigned char *) nexus_info->pixels+CACHE_LINE_SIZE,1,1); } static Quantum *SetPixelCacheNexusPixels( const CacheInfo *magick_restrict cache_info,const MapMode mode, const ssize_t x,const ssize_t y,const size_t width,const size_t height, const MagickBooleanType buffered,NexusInfo *magick_restrict nexus_info, ExceptionInfo *exception) { MagickBooleanType status; MagickSizeType length, number_pixels; assert(cache_info != (const CacheInfo *) NULL); assert(cache_info->signature == MagickCoreSignature); if (cache_info->type == UndefinedCache) return((Quantum *) NULL); assert(nexus_info->signature == MagickCoreSignature); (void) memset(&nexus_info->region,0,sizeof(nexus_info->region)); if ((width == 0) || (height == 0)) { (void) ThrowMagickException(exception,GetMagickModule(),CacheError, "NoPixelsDefinedInCache","`%s'",cache_info->filename); return((Quantum *) NULL); } if (((cache_info->type == MemoryCache) || (cache_info->type == MapCache)) && (buffered == MagickFalse)) { if (((x >= 0) && (y >= 0) && (((ssize_t) height+y-1) < (ssize_t) cache_info->rows)) && (((x == 0) && (width == cache_info->columns)) || ((height == 1) && (((ssize_t) width+x-1) < (ssize_t) cache_info->columns)))) { MagickOffsetType offset; /* Pixels are accessed directly from memory. */ offset=(MagickOffsetType) y*cache_info->columns+x; nexus_info->pixels=cache_info->pixels+cache_info->number_channels* offset; nexus_info->metacontent=(void *) NULL; if (cache_info->metacontent_extent != 0) nexus_info->metacontent=(unsigned char *) cache_info->metacontent+ offset*cache_info->metacontent_extent; nexus_info->region.width=width; nexus_info->region.height=height; nexus_info->region.x=x; nexus_info->region.y=y; nexus_info->authentic_pixel_cache=MagickTrue; PrefetchPixelCacheNexusPixels(nexus_info,mode); return(nexus_info->pixels); } } /* Pixels are stored in a staging region until they are synced to the cache. */ if (((MagickSizeType) width > cache_info->width_limit) || ((MagickSizeType) height > cache_info->height_limit)) { (void) ThrowMagickException(exception,GetMagickModule(),ImageError, "WidthOrHeightExceedsLimit","`%s'",cache_info->filename); return((Quantum *) NULL); } number_pixels=(MagickSizeType) width*height; length=MagickMax(number_pixels,MagickMax(cache_info->columns, cache_info->rows))*cache_info->number_channels*sizeof(*nexus_info->pixels); if (cache_info->metacontent_extent != 0) length+=number_pixels*cache_info->metacontent_extent; status=MagickTrue; if (nexus_info->cache == (Quantum *) NULL) status=AcquireCacheNexusPixels(cache_info,length,nexus_info,exception); else if (nexus_info->length < length) { RelinquishCacheNexusPixels(nexus_info); status=AcquireCacheNexusPixels(cache_info,length,nexus_info,exception); } if (status == MagickFalse) return((Quantum *) NULL); nexus_info->pixels=nexus_info->cache; nexus_info->metacontent=(void *) NULL; if (cache_info->metacontent_extent != 0) nexus_info->metacontent=(void *) (nexus_info->pixels+ cache_info->number_channels*number_pixels); nexus_info->region.width=width; nexus_info->region.height=height; nexus_info->region.x=x; nexus_info->region.y=y; nexus_info->authentic_pixel_cache=cache_info->type == PingCache ? MagickTrue : MagickFalse; PrefetchPixelCacheNexusPixels(nexus_info,mode); return(nexus_info->pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t P i x e l C a c h e V i r t u a l M e t h o d % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetPixelCacheVirtualMethod() sets the "virtual pixels" method for the % pixel cache and returns the previous setting. A virtual pixel is any pixel % access that is outside the boundaries of the image cache. % % The format of the SetPixelCacheVirtualMethod() method is: % % VirtualPixelMethod SetPixelCacheVirtualMethod(Image *image, % const VirtualPixelMethod virtual_pixel_method,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o virtual_pixel_method: choose the type of virtual pixel. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType SetCacheAlphaChannel(Image *image,const Quantum alpha, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; CacheView *magick_restrict image_view; MagickBooleanType status; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); image->alpha_trait=BlendPixelTrait; status=MagickTrue; image_view=AcquireVirtualCacheView(image,exception); /* must be virtual */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { SetPixelAlpha(image,alpha,q); q+=GetPixelChannels(image); } status=SyncCacheViewAuthenticPixels(image_view,exception); } image_view=DestroyCacheView(image_view); return(status); } MagickPrivate VirtualPixelMethod SetPixelCacheVirtualMethod(Image *image, const VirtualPixelMethod virtual_pixel_method,ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; VirtualPixelMethod method; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); method=cache_info->virtual_pixel_method; cache_info->virtual_pixel_method=virtual_pixel_method; if ((image->columns != 0) && (image->rows != 0)) switch (virtual_pixel_method) { case BackgroundVirtualPixelMethod: { if ((image->background_color.alpha_trait != UndefinedPixelTrait) && (image->alpha_trait == UndefinedPixelTrait)) (void) SetCacheAlphaChannel(image,OpaqueAlpha,exception); if ((IsPixelInfoGray(&image->background_color) == MagickFalse) && (IsGrayColorspace(image->colorspace) != MagickFalse)) (void) SetImageColorspace(image,sRGBColorspace,exception); break; } case TransparentVirtualPixelMethod: { if (image->alpha_trait == UndefinedPixelTrait) (void) SetCacheAlphaChannel(image,OpaqueAlpha,exception); break; } default: break; } return(method); } #if defined(MAGICKCORE_OPENCL_SUPPORT) /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + S y n c A u t h e n t i c O p e n C L B u f f e r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SyncAuthenticOpenCLBuffer() makes sure that all the OpenCL operations have % been completed and updates the host memory. % % The format of the SyncAuthenticOpenCLBuffer() method is: % % void SyncAuthenticOpenCLBuffer(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ static void CopyOpenCLBuffer(CacheInfo *magick_restrict cache_info) { assert(cache_info != (CacheInfo *) NULL); assert(cache_info->signature == MagickCoreSignature); if ((cache_info->type != MemoryCache) || (cache_info->opencl == (MagickCLCacheInfo) NULL)) return; /* Ensure single threaded access to OpenCL environment. */ LockSemaphoreInfo(cache_info->semaphore); cache_info->opencl=CopyMagickCLCacheInfo(cache_info->opencl); UnlockSemaphoreInfo(cache_info->semaphore); } MagickPrivate void SyncAuthenticOpenCLBuffer(const Image *image) { CacheInfo *magick_restrict cache_info; assert(image != (const Image *) NULL); cache_info=(CacheInfo *) image->cache; CopyOpenCLBuffer(cache_info); } #endif /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + S y n c A u t h e n t i c P i x e l C a c h e N e x u s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SyncAuthenticPixelCacheNexus() saves the authentic image pixels to the % in-memory or disk cache. The method returns MagickTrue if the pixel region % is synced, otherwise MagickFalse. % % The format of the SyncAuthenticPixelCacheNexus() method is: % % MagickBooleanType SyncAuthenticPixelCacheNexus(Image *image, % NexusInfo *nexus_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o nexus_info: the cache nexus to sync. % % o exception: return any errors or warnings in this structure. % */ MagickPrivate MagickBooleanType SyncAuthenticPixelCacheNexus(Image *image, NexusInfo *magick_restrict nexus_info,ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; MagickBooleanType status; /* Transfer pixels to the cache. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->cache == (Cache) NULL) ThrowBinaryException(CacheError,"PixelCacheIsNotOpen",image->filename); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->type == UndefinedCache) return(MagickFalse); if (image->mask_trait != UpdatePixelTrait) { if (((image->channels & WriteMaskChannel) != 0) && (ClipPixelCacheNexus(image,nexus_info,exception) == MagickFalse)) return(MagickFalse); if (((image->channels & CompositeMaskChannel) != 0) && (MaskPixelCacheNexus(image,nexus_info,exception) == MagickFalse)) return(MagickFalse); } if (nexus_info->authentic_pixel_cache != MagickFalse) { if (image->taint == MagickFalse) image->taint=MagickTrue; return(MagickTrue); } assert(cache_info->signature == MagickCoreSignature); status=WritePixelCachePixels(cache_info,nexus_info,exception); if ((cache_info->metacontent_extent != 0) && (WritePixelCacheMetacontent(cache_info,nexus_info,exception) == MagickFalse)) return(MagickFalse); if ((status != MagickFalse) && (image->taint == MagickFalse)) image->taint=MagickTrue; return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + S y n c A u t h e n t i c P i x e l C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SyncAuthenticPixelsCache() saves the authentic image pixels to the in-memory % or disk cache. The method returns MagickTrue if the pixel region is synced, % otherwise MagickFalse. % % The format of the SyncAuthenticPixelsCache() method is: % % MagickBooleanType SyncAuthenticPixelsCache(Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType SyncAuthenticPixelsCache(Image *image, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); MagickBooleanType status; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); assert(id < (int) cache_info->number_threads); status=SyncAuthenticPixelCacheNexus(image,cache_info->nexus_info[id], exception); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S y n c A u t h e n t i c P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SyncAuthenticPixels() saves the image pixels to the in-memory or disk cache. % The method returns MagickTrue if the pixel region is flushed, otherwise % MagickFalse. % % The format of the SyncAuthenticPixels() method is: % % MagickBooleanType SyncAuthenticPixels(Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType SyncAuthenticPixels(Image *image, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); MagickBooleanType status; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->methods.sync_authentic_pixels_handler != (SyncAuthenticPixelsHandler) NULL) { status=cache_info->methods.sync_authentic_pixels_handler(image, exception); return(status); } assert(id < (int) cache_info->number_threads); status=SyncAuthenticPixelCacheNexus(image,cache_info->nexus_info[id], exception); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + S y n c I m a g e P i x e l C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SyncImagePixelCache() saves the image pixels to the in-memory or disk cache. % The method returns MagickTrue if the pixel region is flushed, otherwise % MagickFalse. % % The format of the SyncImagePixelCache() method is: % % MagickBooleanType SyncImagePixelCache(Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickPrivate MagickBooleanType SyncImagePixelCache(Image *image, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; assert(image != (Image *) NULL); assert(exception != (ExceptionInfo *) NULL); cache_info=(CacheInfo *) GetImagePixelCache(image,MagickTrue,exception); return(cache_info == (CacheInfo *) NULL ? MagickFalse : MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + W r i t e P i x e l C a c h e M e t a c o n t e n t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % WritePixelCacheMetacontent() writes the meta-content to the specified region % of the pixel cache. % % The format of the WritePixelCacheMetacontent() method is: % % MagickBooleanType WritePixelCacheMetacontent(CacheInfo *cache_info, % NexusInfo *nexus_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o cache_info: the pixel cache. % % o nexus_info: the cache nexus to write the meta-content. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType WritePixelCacheMetacontent(CacheInfo *cache_info, NexusInfo *magick_restrict nexus_info,ExceptionInfo *exception) { MagickOffsetType count, offset; MagickSizeType extent, length; register const unsigned char *magick_restrict p; register ssize_t y; size_t rows; if (cache_info->metacontent_extent == 0) return(MagickFalse); if (nexus_info->authentic_pixel_cache != MagickFalse) return(MagickTrue); offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns+ nexus_info->region.x; length=(MagickSizeType) nexus_info->region.width* cache_info->metacontent_extent; extent=(MagickSizeType) length*nexus_info->region.height; rows=nexus_info->region.height; y=0; p=(unsigned char *) nexus_info->metacontent; switch (cache_info->type) { case MemoryCache: case MapCache: { register unsigned char *magick_restrict q; /* Write associated pixels to memory. */ if ((cache_info->columns == nexus_info->region.width) && (extent == (MagickSizeType) ((size_t) extent))) { length=extent; rows=1UL; } q=(unsigned char *) cache_info->metacontent+offset* cache_info->metacontent_extent; for (y=0; y < (ssize_t) rows; y++) { (void) memcpy(q,p,(size_t) length); p+=nexus_info->region.width*cache_info->metacontent_extent; q+=cache_info->columns*cache_info->metacontent_extent; } break; } case DiskCache: { /* Write associated pixels to disk. */ LockSemaphoreInfo(cache_info->file_semaphore); if (OpenPixelCacheOnDisk(cache_info,IOMode) == MagickFalse) { ThrowFileException(exception,FileOpenError,"UnableToOpenFile", cache_info->cache_filename); UnlockSemaphoreInfo(cache_info->file_semaphore); return(MagickFalse); } if ((cache_info->columns == nexus_info->region.width) && (extent <= MagickMaxBufferExtent)) { length=extent; rows=1UL; } extent=(MagickSizeType) cache_info->columns*cache_info->rows; for (y=0; y < (ssize_t) rows; y++) { count=WritePixelCacheRegion(cache_info,cache_info->offset+extent* cache_info->number_channels*sizeof(Quantum)+offset* cache_info->metacontent_extent,length,(const unsigned char *) p); if (count != (MagickOffsetType) length) break; p+=cache_info->metacontent_extent*nexus_info->region.width; offset+=cache_info->columns; } if (IsFileDescriptorLimitExceeded() != MagickFalse) (void) ClosePixelCacheOnDisk(cache_info); UnlockSemaphoreInfo(cache_info->file_semaphore); break; } case DistributedCache: { RectangleInfo region; /* Write metacontent to distributed cache. */ LockSemaphoreInfo(cache_info->file_semaphore); region=nexus_info->region; if ((cache_info->columns != nexus_info->region.width) || (extent > MagickMaxBufferExtent)) region.height=1UL; else { length=extent; rows=1UL; } for (y=0; y < (ssize_t) rows; y++) { count=WriteDistributePixelCacheMetacontent((DistributeCacheInfo *) cache_info->server_info,&region,length,(const unsigned char *) p); if (count != (MagickOffsetType) length) break; p+=cache_info->metacontent_extent*nexus_info->region.width; region.y++; } UnlockSemaphoreInfo(cache_info->file_semaphore); break; } default: break; } if (y < (ssize_t) rows) { ThrowFileException(exception,CacheError,"UnableToWritePixelCache", cache_info->cache_filename); return(MagickFalse); } if ((cache_info->debug != MagickFalse) && (CacheTick(nexus_info->region.y,cache_info->rows) != MagickFalse)) (void) LogMagickEvent(CacheEvent,GetMagickModule(), "%s[%.20gx%.20g%+.20g%+.20g]",cache_info->filename,(double) nexus_info->region.width,(double) nexus_info->region.height,(double) nexus_info->region.x,(double) nexus_info->region.y); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + W r i t e C a c h e P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % WritePixelCachePixels() writes image pixels to the specified region of the % pixel cache. % % The format of the WritePixelCachePixels() method is: % % MagickBooleanType WritePixelCachePixels(CacheInfo *cache_info, % NexusInfo *nexus_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o cache_info: the pixel cache. % % o nexus_info: the cache nexus to write the pixels. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType WritePixelCachePixels( CacheInfo *magick_restrict cache_info,NexusInfo *magick_restrict nexus_info, ExceptionInfo *exception) { MagickOffsetType count, offset; MagickSizeType extent, length; register const Quantum *magick_restrict p; register ssize_t y; size_t rows; if (nexus_info->authentic_pixel_cache != MagickFalse) return(MagickTrue); offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns+ nexus_info->region.x; length=(MagickSizeType) cache_info->number_channels*nexus_info->region.width* sizeof(Quantum); extent=length*nexus_info->region.height; rows=nexus_info->region.height; y=0; p=nexus_info->pixels; switch (cache_info->type) { case MemoryCache: case MapCache: { register Quantum *magick_restrict q; /* Write pixels to memory. */ if ((cache_info->columns == nexus_info->region.width) && (extent == (MagickSizeType) ((size_t) extent))) { length=extent; rows=1UL; } q=cache_info->pixels+cache_info->number_channels*offset; for (y=0; y < (ssize_t) rows; y++) { (void) memcpy(q,p,(size_t) length); p+=cache_info->number_channels*nexus_info->region.width; q+=cache_info->number_channels*cache_info->columns; } break; } case DiskCache: { /* Write pixels to disk. */ LockSemaphoreInfo(cache_info->file_semaphore); if (OpenPixelCacheOnDisk(cache_info,IOMode) == MagickFalse) { ThrowFileException(exception,FileOpenError,"UnableToOpenFile", cache_info->cache_filename); UnlockSemaphoreInfo(cache_info->file_semaphore); return(MagickFalse); } if ((cache_info->columns == nexus_info->region.width) && (extent <= MagickMaxBufferExtent)) { length=extent; rows=1UL; } for (y=0; y < (ssize_t) rows; y++) { count=WritePixelCacheRegion(cache_info,cache_info->offset+offset* cache_info->number_channels*sizeof(*p),length,(const unsigned char *) p); if (count != (MagickOffsetType) length) break; p+=cache_info->number_channels*nexus_info->region.width; offset+=cache_info->columns; } if (IsFileDescriptorLimitExceeded() != MagickFalse) (void) ClosePixelCacheOnDisk(cache_info); UnlockSemaphoreInfo(cache_info->file_semaphore); break; } case DistributedCache: { RectangleInfo region; /* Write pixels to distributed cache. */ LockSemaphoreInfo(cache_info->file_semaphore); region=nexus_info->region; if ((cache_info->columns != nexus_info->region.width) || (extent > MagickMaxBufferExtent)) region.height=1UL; else { length=extent; rows=1UL; } for (y=0; y < (ssize_t) rows; y++) { count=WriteDistributePixelCachePixels((DistributeCacheInfo *) cache_info->server_info,&region,length,(const unsigned char *) p); if (count != (MagickOffsetType) length) break; p+=cache_info->number_channels*nexus_info->region.width; region.y++; } UnlockSemaphoreInfo(cache_info->file_semaphore); break; } default: break; } if (y < (ssize_t) rows) { ThrowFileException(exception,CacheError,"UnableToWritePixelCache", cache_info->cache_filename); return(MagickFalse); } if ((cache_info->debug != MagickFalse) && (CacheTick(nexus_info->region.y,cache_info->rows) != MagickFalse)) (void) LogMagickEvent(CacheEvent,GetMagickModule(), "%s[%.20gx%.20g%+.20g%+.20g]",cache_info->filename,(double) nexus_info->region.width,(double) nexus_info->region.height,(double) nexus_info->region.x,(double) nexus_info->region.y); return(MagickTrue); }
paddle_tensor_impl.h
// Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #pragma once #include <cmath> #include "paddle/fluid/framework/eigen.h" #include "paddle/fluid/framework/tensor.h" #include "paddle/fluid/platform/enforce.h" #include "paddle/fluid/platform/hostdevice.h" #include "unsupported/Eigen/CXX11/Tensor" #include "./type_utils.h" namespace common { using u128 = unsigned __int128; template <typename T> void PaddleTensor<T>::reshape(const std::vector<size_t> &shape) { std::vector<int64_t> shape_(shape.cbegin(), shape.cend()); paddle::framework::DDim dim(shape_.data(), shape_.size()); // 0 for default size _tensor.mutable_data<T>(dim, place(), 0); } template <typename T> void PaddleTensor<T>::add(const TensorAdapter<T> *rhs, TensorAdapter<T> *ret) const { auto rhs_ = dynamic_cast<const PaddleTensor<T> *>(rhs); auto ret_ = dynamic_cast<PaddleTensor<T> *>(ret); PADDLE_ENFORCE_EQ(_tensor.dims(), rhs_->_tensor.dims(), "Input dims should be equal."); auto eigen_x = paddle::framework::EigenVector<T>::Flatten(_tensor); auto eigen_y = paddle::framework::EigenVector<T>::Flatten(rhs_->_tensor); auto eigen_z = paddle::framework::EigenVector<T>::Flatten(ret_->_tensor); auto &place = *eigen_device(); eigen_z.device(place) = eigen_x + eigen_y; } template <typename T> void PaddleTensor<T>::sub(const TensorAdapter<T> *rhs, TensorAdapter<T> *ret) const { auto rhs_ = dynamic_cast<const PaddleTensor<T> *>(rhs); auto ret_ = dynamic_cast<PaddleTensor<T> *>(ret); PADDLE_ENFORCE_EQ(_tensor.dims(), rhs_->_tensor.dims(), "Input dims should be equal."); auto eigen_x = paddle::framework::EigenVector<T>::Flatten(_tensor); auto eigen_y = paddle::framework::EigenVector<T>::Flatten(rhs_->_tensor); auto eigen_z = paddle::framework::EigenVector<T>::Flatten(ret_->_tensor); auto &place = *eigen_device(); eigen_z.device(place) = eigen_x - eigen_y; } template <typename T> void PaddleTensor<T>::mul(const TensorAdapter<T> *rhs, TensorAdapter<T> *ret) const { auto rhs_ = dynamic_cast<const PaddleTensor<T> *>(rhs); auto ret_ = dynamic_cast<PaddleTensor<T> *>(ret); PADDLE_ENFORCE_EQ(_tensor.dims(), rhs_->_tensor.dims(), "Input dims should be equal."); auto eigen_x = paddle::framework::EigenVector<T>::Flatten(_tensor); auto eigen_y = paddle::framework::EigenVector<T>::Flatten(rhs_->_tensor); auto eigen_z = paddle::framework::EigenVector<T>::Flatten(ret_->_tensor); auto &place = *eigen_device(); eigen_z.device(place) = eigen_x * eigen_y; } template <typename T> void PaddleTensor<T>::div(const TensorAdapter<T> *rhs, TensorAdapter<T> *ret) const { auto rhs_ = dynamic_cast<const PaddleTensor<T> *>(rhs); PADDLE_ENFORCE_EQ(_tensor.dims(), rhs_->_tensor.dims(), "Input dims should be equal."); auto div_ = [](T a, T b) -> T { return a / b; }; std::transform(data(), data() + numel(), rhs->data(), ret->data(), div_); } template <typename T> void PaddleTensor<T>::mat_mul(const TensorAdapter<T> *rhs, TensorAdapter<T> *ret, bool transpose_lhs, bool transpose_rhs) const { auto rhs_ = dynamic_cast<const PaddleTensor<T> *>(rhs); auto ret_ = dynamic_cast<PaddleTensor<T> *>(ret); auto &mat_a = _tensor; auto &mat_b = rhs_->_tensor; auto &mat_out = ret_->_tensor; // tensor with dims like [ h, w ] or [ batch_size , h, w ] is matrix auto is_matrix = [](const paddle::framework::Tensor &t) -> bool { return t.dims().size() == 2 || t.dims().size() == 3; }; PADDLE_ENFORCE(mat_a.place() == mat_b.place() && mat_a.place() == mat_out.place(), "The places of matrices must be same"); PADDLE_ENFORCE(is_matrix(mat_a) && is_matrix(mat_b) && is_matrix(mat_out), "The input and output of matmul must be matrix " "or batched matrix."); PADDLE_ENFORCE(mat_a.dims().size() >= mat_b.dims().size(), "Only following dims are supported: " "Mat A is [BatchSize, H, W] and Mat B is [BatchSize, H, W]." "Mat A is [BatchSize, H, W] and Mat B is [H, W]." "Mat A is [H, W] and Mat B is [H, W]."); using EigenTensor = paddle::framework::EigenTensor<T, 3>; using EigenTensor4 = paddle::framework::EigenTensor<T, 4>; using EigenTensor2 = paddle::framework::EigenTensor<T, 2>; auto to_const_eigen_tensor = [](const paddle::framework::Tensor &t) { auto dims = t.dims(); if (dims.size() == 2) { dims = paddle::framework::make_ddim({1, dims[0], dims[1]}); } return EigenTensor::From(t, dims); }; auto to_eigen_tensor = [](paddle::framework::Tensor &t) { auto dims = t.dims(); if (dims.size() == 2) { dims = paddle::framework::make_ddim({1, dims[0], 1, dims[1]}); } else { // dims.size() == 3 dims = paddle::framework::make_ddim({dims[0], dims[1], 1, dims[2]}); } return EigenTensor4::From(t, dims); }; auto &place = *eigen_device(); auto t_a = to_const_eigen_tensor(mat_a); auto t_b = to_const_eigen_tensor(mat_b); auto t_c = to_eigen_tensor(mat_out); PADDLE_ENFORCE(t_a.dimension(2 - transpose_lhs) == t_b.dimension(1 + transpose_rhs), "W_A != H_B."); auto batch_size = t_a.dimension(0); auto batch_size_b = t_b.dimension(0); PADDLE_ENFORCE(batch_size_b == batch_size || batch_size_b == 1, "Mat B BatchSize mismatched."); PADDLE_ENFORCE(t_c.dimension(0) == batch_size, "Result Mat BatchSize mismatched."); auto hc = t_c.dimension(1); auto wc = t_c.dimension(3); // matrix product of tensor contractions // please refer to // github.com/eigenteam/eigen-git-mirror/blob/master/unsupported/Eigen/CXX11/src/Tensor/README.md Eigen::array<Eigen::IndexPair<int>, 1> axis = { Eigen::IndexPair<int>(1 - transpose_lhs, 0 + transpose_rhs)}; #pragma omp for for (int i = 0; i < batch_size; ++i) { Eigen::TensorMap<Eigen::Tensor<T, 2, Eigen::RowMajor, Eigen::DenseIndex>> t_c_chip(t_c.data() + i * hc * wc, hc, wc); int idx_t_b = batch_size_b == 1 ? 0 : i; t_c_chip.device(place) = t_a.chip(i, 0).contract(t_b.chip(idx_t_b, 0), axis); } } template <typename T> void PaddleTensor<T>::negative(TensorAdapter<T> *ret) const { auto neg_ = [](T a) -> T { return -a; }; std::transform(data(), data() + numel(), ret->data(), neg_); } template <typename T> void PaddleTensor<T>::bitwise_and(const TensorAdapter<T> *rhs, TensorAdapter<T> *ret) const { auto rhs_ = dynamic_cast<const PaddleTensor<T> *>(rhs); PADDLE_ENFORCE_EQ(_tensor.dims(), rhs_->_tensor.dims(), "Input dims should be equal."); auto and_ = [](T a, T b) -> T { return a & b; }; std::transform(data(), data() + numel(), rhs->data(), ret->data(), and_); } template <typename T> void PaddleTensor<T>::bitwise_or(const TensorAdapter<T> *rhs, TensorAdapter<T> *ret) const { auto rhs_ = dynamic_cast<const PaddleTensor<T> *>(rhs); PADDLE_ENFORCE_EQ(_tensor.dims(), rhs_->_tensor.dims(), "Input dims should be equal."); auto or_ = [](T a, T b) -> T { return a | b; }; std::transform(data(), data() + numel(), rhs->data(), ret->data(), or_); } template <typename T> void PaddleTensor<T>::bitwise_not(TensorAdapter<T> *ret) const { auto not_ = [](T a) -> T { return ~a; }; std::transform(data(), data() + numel(), ret->data(), not_); } template <typename T> void PaddleTensor<T>::bitwise_xor(const TensorAdapter<T> *rhs, TensorAdapter<T> *ret) const { auto rhs_ = dynamic_cast<const PaddleTensor<T> *>(rhs); PADDLE_ENFORCE_EQ(_tensor.dims(), rhs_->_tensor.dims(), "Input dims should be equal."); auto xor_ = [](T a, T b) -> T { return a ^ b; }; std::transform(data(), data() + numel(), rhs->data(), ret->data(), xor_); } template <typename T> void PaddleTensor<T>::lshift(size_t rhs, TensorAdapter<T> *ret) const { auto lshift_functor = [rhs](T a) -> T { return a << rhs; }; std::transform(data(), data() + numel(), ret->data(), lshift_functor); } template <typename T> void PaddleTensor<T>::rshift(size_t rhs, TensorAdapter<T> *ret) const { auto rshift_functor = [rhs](T a) -> T { return a >> rhs; }; std::transform(data(), data() + numel(), ret->data(), rshift_functor); } template <typename T> void PaddleTensor<T>::logical_rshift(size_t rhs, TensorAdapter<T> *ret) const { auto logical_rshift_functor = [rhs](T a) -> T { const size_t word_len = sizeof(T) * 8; T mask = (T)1 << word_len - rhs - 1; mask |= mask - 1; mask = rhs >= word_len ? 0 : mask; return a >> rhs & mask; }; std::transform(data(), data() + numel(), ret->data(), logical_rshift_functor); } template <typename T> void PaddleTensor<T>::add128(const TensorAdapter<T> *rhs, TensorAdapter<T> *ret, bool lhs_128, bool rhs_128) const { PADDLE_ENFORCE_EQ(numel() / (1 + lhs_128), rhs->numel() / (1 + rhs_128), "Input numel should be equal."); using ConstType = Eigen::Tensor<const __int128, 1>; using Type = Eigen::Tensor<u128, 1>; size_t numel_ = ret->numel() / (sizeof(u128) / sizeof(T)); Type x(numel_); for (size_t i = 0; i < numel_; ++i) { x(i) = lhs_128 ? *(reinterpret_cast<const u128*>(data()) + i) : *(data() + i); } Type y(numel_); for (size_t i = 0; i < numel_; ++i) { y(i) = rhs_128 ? *(reinterpret_cast<const u128*>(rhs->data()) + i) : *(rhs->data() + i); } Eigen::TensorMap<Type> z(reinterpret_cast<u128*>(ret->data()), numel_); auto &place = *eigen_device(); z.device(place) = x + y; } template <typename T> void PaddleTensor<T>::sub128(const TensorAdapter<T> *rhs, TensorAdapter<T> *ret, bool lhs_128, bool rhs_128) const { PADDLE_ENFORCE_EQ(numel() / (1 + lhs_128), rhs->numel() / (1 + rhs_128), "Input numel should be equal."); using ConstType = Eigen::Tensor<const u128, 1>; using Type = Eigen::Tensor<u128, 1>; size_t numel_ = ret->numel() / (sizeof(u128) / sizeof(T)); Type x(numel_); for (size_t i = 0; i < numel_; ++i) { x(i) = lhs_128 ? *(reinterpret_cast<const u128*>(data()) + i) : *(data() + i); } Type y(numel_); for (size_t i = 0; i < numel_; ++i) { y(i) = rhs_128 ? *(reinterpret_cast<const u128*>(rhs->data()) + i) : static_cast<typename unsigned_type<T>::value_type>(*(rhs->data() + i)); } Eigen::TensorMap<Type> z(reinterpret_cast<u128*>(ret->data()), numel_); auto &place = *eigen_device(); z.device(place) = x - y; } template <typename T> void PaddleTensor<T>::mul128_with_truncate(const TensorAdapter<T> *rhs, TensorAdapter<T> *ret, bool lhs_128, bool rhs_128) const { PADDLE_ENFORCE_EQ(numel() / (1 + lhs_128), rhs->numel() / (1 + rhs_128), "Input numel should be equal."); using ConstType = Eigen::Tensor<const __int128, 1>; using Type = Eigen::Tensor<__int128, 1>; size_t numel_ = ret->numel(); Type x(numel_); for (size_t i = 0; i < numel_; ++i) { x(i) = lhs_128 ? *(reinterpret_cast<const u128*>(data()) + i) : *(data() + i); } Type y(numel_); for (size_t i = 0; i < numel_; ++i) { y(i) = rhs_128 ? *(reinterpret_cast<const u128*>(rhs->data()) + i) : static_cast<typename unsigned_type<T>::value_type>(*(rhs->data() + i)); } Eigen::TensorMap<Eigen::Tensor<T, 1>> z(ret->data(), numel_); Type xy = x * y; Eigen::Tensor<T, 1> xy_trunc(numel_); // truncate for (size_t i = 0; i < numel_; ++i) { __int128 tmp = xy(i); xy_trunc(i) = (T)(tmp >> _scaling_factor); } auto &place = *eigen_device(); z.device(place) = xy_trunc; } template <typename T> template <typename U> PaddleTensor<T> & PaddleTensor<T>::from_float_point_type(const paddle::framework::Tensor &tensor, size_t scaling_factor) { double scale = std::pow(2, scaling_factor); auto cast = [scale](U a) -> T { return a * scale; }; _tensor.mutable_data<T>(tensor.dims(), place(), 0); std::transform(tensor.template data<U>(), tensor.template data<U>() + tensor.numel(), _tensor.template data<T>(), cast); this->scaling_factor() = scaling_factor; return *this; } template <typename T> template <typename U> PaddleTensor<T> &PaddleTensor<T>::from_float_point_scalar( const U &scalar, const std::vector<size_t> &shape, size_t scaling_factor) { double scale = std::pow(2, scaling_factor); auto trans = [scale, scalar](T) -> T { return scalar * scale; }; reshape(shape); std::transform(_tensor.template data<T>(), _tensor.template data<T>() + _tensor.numel(), _tensor.template data<T>(), trans); this->scaling_factor() = scaling_factor; return *this; } template <typename T> void PaddleTensor<T>::slice(size_t begin_idx, size_t end_idx, TensorAdapter<T> *ret) const { auto ret_ = dynamic_cast<PaddleTensor<T> *>(ret); ret_->_tensor = _tensor.Slice(begin_idx, end_idx); ret->scaling_factor() = scaling_factor(); } template<typename T> std::shared_ptr<TensorAdapter<T>> PaddleTensor<T>::operator[](size_t index) { PADDLE_ENFORCE_GT(this->shape().size(), 1, "lhs's shape must great than 1."); auto slice_shape = this->shape(); slice_shape.erase(slice_shape.begin()); std::shared_ptr<PaddleTensor<T>> ret = std::make_shared<PaddleTensor<T>>(_device_ctx); ret->reshape(slice_shape); this->slice(index, index + 1, ret.get()); ret->reshape(slice_shape); return ret; } template<typename T> const std::shared_ptr<TensorAdapter<T>> PaddleTensor<T>::operator[](size_t index) const { return const_cast<PaddleTensor*>(this)->operator[](index); } } // namespace common
resize.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % RRRR EEEEE SSSSS IIIII ZZZZZ EEEEE % % R R E SS I ZZ E % % RRRR EEE SSS I ZZZ EEE % % R R E SS I ZZ E % % R R EEEEE SSSSS IIIII ZZZZZ EEEEE % % % % % % MagickCore Image Resize Methods % % % % Software Design % % Cristy % % July 1992 % % % % % % Copyright 1999-2017 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/accelerate-private.h" #include "MagickCore/artifact.h" #include "MagickCore/blob.h" #include "MagickCore/cache.h" #include "MagickCore/cache-view.h" #include "MagickCore/channel.h" #include "MagickCore/color.h" #include "MagickCore/color-private.h" #include "MagickCore/draw.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/gem.h" #include "MagickCore/image.h" #include "MagickCore/image-private.h" #include "MagickCore/list.h" #include "MagickCore/memory_.h" #include "MagickCore/memory-private.h" #include "MagickCore/magick.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/property.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/nt-base-private.h" #include "MagickCore/option.h" #include "MagickCore/pixel.h" #include "MagickCore/pixel-private.h" #include "MagickCore/quantum-private.h" #include "MagickCore/resample.h" #include "MagickCore/resample-private.h" #include "MagickCore/resize.h" #include "MagickCore/resize-private.h" #include "MagickCore/resource_.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/thread-private.h" #include "MagickCore/token.h" #include "MagickCore/utility.h" #include "MagickCore/utility-private.h" #include "MagickCore/version.h" #if defined(MAGICKCORE_LQR_DELEGATE) #include <lqr.h> #endif /* Typedef declarations. */ struct _ResizeFilter { double (*filter)(const double,const ResizeFilter *), (*window)(const double,const ResizeFilter *), support, /* filter region of support - the filter support limit */ window_support, /* window support, usally equal to support (expert only) */ scale, /* dimension scaling to fit window support (usally 1.0) */ blur, /* x-scale (blur-sharpen) */ coefficient[7]; /* cubic coefficents for BC-cubic filters */ ResizeWeightingFunctionType filterWeightingType, windowWeightingType; size_t signature; }; /* Forward declaractions. */ static double I0(double x), BesselOrderOne(double), Sinc(const double, const ResizeFilter *), SincFast(const double, const ResizeFilter *); /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + F i l t e r F u n c t i o n s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % These are the various filter and windowing functions that are provided. % % They are internal to this module only. See AcquireResizeFilterInfo() for % details of the access to these functions, via the GetResizeFilterSupport() % and GetResizeFilterWeight() API interface. % % The individual filter functions have this format... % % static MagickRealtype *FilterName(const double x,const double support) % % A description of each parameter follows: % % o x: the distance from the sampling point generally in the range of 0 to % support. The GetResizeFilterWeight() ensures this a positive value. % % o resize_filter: current filter information. This allows function to % access support, and possibly other pre-calculated information defining % the functions. % */ static double Blackman(const double x, const ResizeFilter *magick_unused(resize_filter)) { /* Blackman: 2nd order cosine windowing function: 0.42 + 0.5 cos(pi x) + 0.08 cos(2pi x) Refactored by Chantal Racette and Nicolas Robidoux to one trig call and five flops. */ const double cosine=cos((double) (MagickPI*x)); magick_unreferenced(resize_filter); return(0.34+cosine*(0.5+cosine*0.16)); } static double Bohman(const double x, const ResizeFilter *magick_unused(resize_filter)) { /* Bohman: 2rd Order cosine windowing function: (1-x) cos(pi x) + sin(pi x) / pi. Refactored by Nicolas Robidoux to one trig call, one sqrt call, and 7 flops, taking advantage of the fact that the support of Bohman is 1.0 (so that we know that sin(pi x) >= 0). */ const double cosine=cos((double) (MagickPI*x)); const double sine=sqrt(1.0-cosine*cosine); magick_unreferenced(resize_filter); return((1.0-x)*cosine+(1.0/MagickPI)*sine); } static double Box(const double magick_unused(x), const ResizeFilter *magick_unused(resize_filter)) { magick_unreferenced(x); magick_unreferenced(resize_filter); /* A Box filter is a equal weighting function (all weights equal). DO NOT LIMIT results by support or resize point sampling will work as it requests points beyond its normal 0.0 support size. */ return(1.0); } static double Cosine(const double x, const ResizeFilter *magick_unused(resize_filter)) { magick_unreferenced(resize_filter); /* Cosine window function: cos((pi/2)*x). */ return((double)cos((double) (MagickPI2*x))); } static double CubicBC(const double x,const ResizeFilter *resize_filter) { /* Cubic Filters using B,C determined values: Mitchell-Netravali B = 1/3 C = 1/3 "Balanced" cubic spline filter Catmull-Rom B = 0 C = 1/2 Interpolatory and exact on linears Spline B = 1 C = 0 B-Spline Gaussian approximation Hermite B = 0 C = 0 B-Spline interpolator See paper by Mitchell and Netravali, Reconstruction Filters in Computer Graphics Computer Graphics, Volume 22, Number 4, August 1988 http://www.cs.utexas.edu/users/fussell/courses/cs384g/lectures/mitchell/ Mitchell.pdf. Coefficents are determined from B,C values: P0 = ( 6 - 2*B )/6 = coeff[0] P1 = 0 P2 = (-18 +12*B + 6*C )/6 = coeff[1] P3 = ( 12 - 9*B - 6*C )/6 = coeff[2] Q0 = ( 8*B +24*C )/6 = coeff[3] Q1 = ( -12*B -48*C )/6 = coeff[4] Q2 = ( 6*B +30*C )/6 = coeff[5] Q3 = ( - 1*B - 6*C )/6 = coeff[6] which are used to define the filter: P0 + P1*x + P2*x^2 + P3*x^3 0 <= x < 1 Q0 + Q1*x + Q2*x^2 + Q3*x^3 1 <= x < 2 which ensures function is continuous in value and derivative (slope). */ if (x < 1.0) return(resize_filter->coefficient[0]+x*(x* (resize_filter->coefficient[1]+x*resize_filter->coefficient[2]))); if (x < 2.0) return(resize_filter->coefficient[3]+x*(resize_filter->coefficient[4]+x* (resize_filter->coefficient[5]+x*resize_filter->coefficient[6]))); return(0.0); } static double CubicSpline(const double x,const ResizeFilter *resize_filter) { if (resize_filter->support <= 2.0) { /* 2-lobe Spline filter. */ if (x < 1.0) return(((x-9.0/5.0)*x-1.0/5.0)*x+1.0); if (x < 2.0) return(((-1.0/3.0*(x-1.0)+4.0/5.0)*(x-1.0)-7.0/15.0)*(x-1.0)); return(0.0); } if (resize_filter->support <= 3.0) { /* 3-lobe Spline filter. */ if (x < 1.0) return(((13.0/11.0*x-453.0/209.0)*x-3.0/209.0)*x+1.0); if (x < 2.0) return(((-6.0/11.0*(x-1.0)+270.0/209.0)*(x-1.0)-156.0/209.0)*(x-1.0)); if (x < 3.0) return(((1.0/11.0*(x-2.0)-45.0/209.0)*(x-2.0)+26.0/209.0)*(x-2.0)); return(0.0); } /* 4-lobe Spline filter. */ if (x < 1.0) return(((49.0/41.0*x-6387.0/2911.0)*x-3.0/2911.0)*x+1.0); if (x < 2.0) return(((-24.0/41.0*(x-1.0)+4032.0/2911.0)*(x-1.0)-2328.0/2911.0)*(x-1.0)); if (x < 3.0) return(((6.0/41.0*(x-2.0)-1008.0/2911.0)*(x-2.0)+582.0/2911.0)*(x-2.0)); if (x < 4.0) return(((-1.0/41.0*(x-3.0)+168.0/2911.0)*(x-3.0)-97.0/2911.0)*(x-3.0)); return(0.0); } static double Gaussian(const double x,const ResizeFilter *resize_filter) { /* Gaussian with a sigma = 1/2 (or as user specified) Gaussian Formula (1D) ... exp( -(x^2)/((2.0*sigma^2) ) / (sqrt(2*PI)*sigma^2)) Gaussian Formula (2D) ... exp( -(x^2+y^2)/(2.0*sigma^2) ) / (PI*sigma^2) ) or for radius exp( -(r^2)/(2.0*sigma^2) ) / (PI*sigma^2) ) Note that it is only a change from 1-d to radial form is in the normalization multiplier which is not needed or used when Gaussian is used as a filter. The constants are pre-calculated... coeff[0]=sigma; coeff[1]=1.0/(2.0*sigma^2); coeff[2]=1.0/(sqrt(2*PI)*sigma^2); exp( -coeff[1]*(x^2)) ) * coeff[2]; However the multiplier coeff[1] is need, the others are informative only. This separates the gaussian 'sigma' value from the 'blur/support' settings allowing for its use in special 'small sigma' gaussians, without the filter 'missing' pixels because the support becomes too small. */ return(exp((double)(-resize_filter->coefficient[1]*x*x))); } static double Hann(const double x, const ResizeFilter *magick_unused(resize_filter)) { /* Cosine window function: 0.5+0.5*cos(pi*x). */ const double cosine=cos((double) (MagickPI*x)); magick_unreferenced(resize_filter); return(0.5+0.5*cosine); } static double Hamming(const double x, const ResizeFilter *magick_unused(resize_filter)) { /* Offset cosine window function: .54 + .46 cos(pi x). */ const double cosine=cos((double) (MagickPI*x)); magick_unreferenced(resize_filter); return(0.54+0.46*cosine); } static double Jinc(const double x, const ResizeFilter *magick_unused(resize_filter)) { magick_unreferenced(resize_filter); /* See Pratt "Digital Image Processing" p.97 for Jinc/Bessel functions. http://mathworld.wolfram.com/JincFunction.html and page 11 of http://www.ph.ed.ac.uk/%7ewjh/teaching/mo/slides/lens/lens.pdf The original "zoom" program by Paul Heckbert called this "Bessel". But really it is more accurately named "Jinc". */ if (x == 0.0) return(0.5*MagickPI); return(BesselOrderOne(MagickPI*x)/x); } static double Kaiser(const double x,const ResizeFilter *resize_filter) { /* Kaiser Windowing Function (bessel windowing) I0( beta * sqrt( 1-x^2) ) / IO(0) Beta (coeff[0]) is a free value from 5 to 8 (defaults to 6.5). However it is typically defined in terms of Alpha*PI The normalization factor (coeff[1]) is not actually needed, but without it the filters has a large value at x=0 making it difficult to compare the function with other windowing functions. */ return(resize_filter->coefficient[1]*I0(resize_filter->coefficient[0]* sqrt((double) (1.0-x*x)))); } static double Lagrange(const double x,const ResizeFilter *resize_filter) { double value; register ssize_t i; ssize_t n, order; /* Lagrange piecewise polynomial fit of sinc: N is the 'order' of the lagrange function and depends on the overall support window size of the filter. That is: for a support of 2, it gives a lagrange-4 (piecewise cubic function). "n" identifies the piece of the piecewise polynomial. See Survey: Interpolation Methods, IEEE Transactions on Medical Imaging, Vol 18, No 11, November 1999, p1049-1075, -- Equation 27 on p1064. */ if (x > resize_filter->support) return(0.0); order=(ssize_t) (2.0*resize_filter->window_support); /* number of pieces */ n=(ssize_t) (resize_filter->window_support+x); value=1.0f; for (i=0; i < order; i++) if (i != n) value*=(n-i-x)/(n-i); return(value); } static double Quadratic(const double x, const ResizeFilter *magick_unused(resize_filter)) { magick_unreferenced(resize_filter); /* 2rd order (quadratic) B-Spline approximation of Gaussian. */ if (x < 0.5) return(0.75-x*x); if (x < 1.5) return(0.5*(x-1.5)*(x-1.5)); return(0.0); } static double Sinc(const double x, const ResizeFilter *magick_unused(resize_filter)) { magick_unreferenced(resize_filter); /* Scaled sinc(x) function using a trig call: sinc(x) == sin(pi x)/(pi x). */ if (x != 0.0) { const double alpha=(double) (MagickPI*x); return(sin((double) alpha)/alpha); } return((double) 1.0); } static double SincFast(const double x, const ResizeFilter *magick_unused(resize_filter)) { magick_unreferenced(resize_filter); /* Approximations of the sinc function sin(pi x)/(pi x) over the interval [-4,4] constructed by Nicolas Robidoux and Chantal Racette with funding from the Natural Sciences and Engineering Research Council of Canada. Although the approximations are polynomials (for low order of approximation) and quotients of polynomials (for higher order of approximation) and consequently are similar in form to Taylor polynomials / Pade approximants, the approximations are computed with a completely different technique. Summary: These approximations are "the best" in terms of bang (accuracy) for the buck (flops). More specifically: Among the polynomial quotients that can be computed using a fixed number of flops (with a given "+ - * / budget"), the chosen polynomial quotient is the one closest to the approximated function with respect to maximum absolute relative error over the given interval. The Remez algorithm, as implemented in the boost library's minimax package, is the key to the construction: http://www.boost.org/doc/libs/1_36_0/libs/ math/doc/sf_and_dist/html/math_toolkit/backgrounders/remez.html If outside of the interval of approximation, use the standard trig formula. */ if (x > 4.0) { const double alpha=(double) (MagickPI*x); return(sin((double) alpha)/alpha); } { /* The approximations only depend on x^2 (sinc is an even function). */ const double xx = x*x; #if MAGICKCORE_QUANTUM_DEPTH <= 8 /* Maximum absolute relative error 6.3e-6 < 1/2^17. */ const double c0 = 0.173610016489197553621906385078711564924e-2L; const double c1 = -0.384186115075660162081071290162149315834e-3L; const double c2 = 0.393684603287860108352720146121813443561e-4L; const double c3 = -0.248947210682259168029030370205389323899e-5L; const double c4 = 0.107791837839662283066379987646635416692e-6L; const double c5 = -0.324874073895735800961260474028013982211e-8L; const double c6 = 0.628155216606695311524920882748052490116e-10L; const double c7 = -0.586110644039348333520104379959307242711e-12L; const double p = c0+xx*(c1+xx*(c2+xx*(c3+xx*(c4+xx*(c5+xx*(c6+xx*c7)))))); return((xx-1.0)*(xx-4.0)*(xx-9.0)*(xx-16.0)*p); #elif MAGICKCORE_QUANTUM_DEPTH <= 16 /* Max. abs. rel. error 2.2e-8 < 1/2^25. */ const double c0 = 0.173611107357320220183368594093166520811e-2L; const double c1 = -0.384240921114946632192116762889211361285e-3L; const double c2 = 0.394201182359318128221229891724947048771e-4L; const double c3 = -0.250963301609117217660068889165550534856e-5L; const double c4 = 0.111902032818095784414237782071368805120e-6L; const double c5 = -0.372895101408779549368465614321137048875e-8L; const double c6 = 0.957694196677572570319816780188718518330e-10L; const double c7 = -0.187208577776590710853865174371617338991e-11L; const double c8 = 0.253524321426864752676094495396308636823e-13L; const double c9 = -0.177084805010701112639035485248501049364e-15L; const double p = c0+xx*(c1+xx*(c2+xx*(c3+xx*(c4+xx*(c5+xx*(c6+xx*(c7+xx*(c8+xx*c9)))))))); return((xx-1.0)*(xx-4.0)*(xx-9.0)*(xx-16.0)*p); #else /* Max. abs. rel. error 1.2e-12 < 1/2^39. */ const double c0 = 0.173611111110910715186413700076827593074e-2L; const double c1 = -0.289105544717893415815859968653611245425e-3L; const double c2 = 0.206952161241815727624413291940849294025e-4L; const double c3 = -0.834446180169727178193268528095341741698e-6L; const double c4 = 0.207010104171026718629622453275917944941e-7L; const double c5 = -0.319724784938507108101517564300855542655e-9L; const double c6 = 0.288101675249103266147006509214934493930e-11L; const double c7 = -0.118218971804934245819960233886876537953e-13L; const double p = c0+xx*(c1+xx*(c2+xx*(c3+xx*(c4+xx*(c5+xx*(c6+xx*c7)))))); const double d0 = 1.0L; const double d1 = 0.547981619622284827495856984100563583948e-1L; const double d2 = 0.134226268835357312626304688047086921806e-2L; const double d3 = 0.178994697503371051002463656833597608689e-4L; const double d4 = 0.114633394140438168641246022557689759090e-6L; const double q = d0+xx*(d1+xx*(d2+xx*(d3+xx*d4))); return((xx-1.0)*(xx-4.0)*(xx-9.0)*(xx-16.0)/q*p); #endif } } static double Triangle(const double x, const ResizeFilter *magick_unused(resize_filter)) { magick_unreferenced(resize_filter); /* 1st order (linear) B-Spline, bilinear interpolation, Tent 1D filter, or a Bartlett 2D Cone filter. Also used as a Bartlett Windowing function for Sinc(). */ if (x < 1.0) return(1.0-x); return(0.0); } static double Welch(const double x, const ResizeFilter *magick_unused(resize_filter)) { magick_unreferenced(resize_filter); /* Welch parabolic windowing filter. */ if (x < 1.0) return(1.0-x*x); return(0.0); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + A c q u i r e R e s i z e F i l t e r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquireResizeFilter() allocates the ResizeFilter structure. Choose from % these filters: % % FIR (Finite impulse Response) Filters % Box Triangle Quadratic % Spline Hermite Catrom % Mitchell % % IIR (Infinite impulse Response) Filters % Gaussian Sinc Jinc (Bessel) % % Windowed Sinc/Jinc Filters % Blackman Bohman Lanczos % Hann Hamming Cosine % Kaiser Welch Parzen % Bartlett % % Special Purpose Filters % Cubic SincFast LanczosSharp Lanczos2 Lanczos2Sharp % Robidoux RobidouxSharp % % The users "-filter" selection is used to lookup the default 'expert' % settings for that filter from a internal table. However any provided % 'expert' settings (see below) may override this selection. % % FIR filters are used as is, and are limited to that filters support window % (unless over-ridden). 'Gaussian' while classed as an IIR filter, is also % simply clipped by its support size (currently 1.5 or approximately 3*sigma % as recommended by many references) % % The special a 'cylindrical' filter flag will promote the default 4-lobed % Windowed Sinc filter to a 3-lobed Windowed Jinc equivalent, which is better % suited to this style of image resampling. This typically happens when using % such a filter for images distortions. % % SPECIFIC FILTERS: % % Directly requesting 'Sinc', 'Jinc' function as a filter will force the use % of function without any windowing, or promotion for cylindrical usage. This % is not recommended, except by image processing experts, especially as part % of expert option filter function selection. % % Two forms of the 'Sinc' function are available: Sinc and SincFast. Sinc is % computed using the traditional sin(pi*x)/(pi*x); it is selected if the user % specifically specifies the use of a Sinc filter. SincFast uses highly % accurate (and fast) polynomial (low Q) and rational (high Q) approximations, % and will be used by default in most cases. % % The Lanczos filter is a special 3-lobed Sinc-windowed Sinc filter (promoted % to Jinc-windowed Jinc for cylindrical (Elliptical Weighted Average) use). % The Sinc version is the most popular windowed filter. % % LanczosSharp is a slightly sharpened (blur=0.9812505644269356 < 1) form of % the Lanczos filter, specifically designed for EWA distortion (as a % Jinc-Jinc); it can also be used as a slightly sharper orthogonal Lanczos % (Sinc-Sinc) filter. The chosen blur value comes as close as possible to % satisfying the following condition without changing the character of the % corresponding EWA filter: % % 'No-Op' Vertical and Horizontal Line Preservation Condition: Images with % only vertical or horizontal features are preserved when performing 'no-op" % with EWA distortion. % % The Lanczos2 and Lanczos2Sharp filters are 2-lobe versions of the Lanczos % filters. The 'sharp' version uses a blur factor of 0.9549963639785485, % again chosen because the resulting EWA filter comes as close as possible to % satisfying the above condition. % % Robidoux is another filter tuned for EWA. It is the Keys cubic filter % defined by B=(228 - 108 sqrt(2))/199. Robidoux satisfies the "'No-Op' % Vertical and Horizontal Line Preservation Condition" exactly, and it % moderately blurs high frequency 'pixel-hash' patterns under no-op. It turns % out to be close to both Mitchell and Lanczos2Sharp. For example, its first % crossing is at (36 sqrt(2) + 123)/(72 sqrt(2) + 47), almost the same as the % first crossing of Mitchell and Lanczos2Sharp. % % RodidouxSharp is a slightly sharper version of Rodidoux, some believe it % is too sharp. It is designed to minimize the maximum possible change in % a pixel value which is at one of the extremes (e.g., 0 or 255) under no-op % conditions. Amazingly Mitchell falls roughly between Rodidoux and % RodidouxSharp, though this seems to have been pure coincidence. % % 'EXPERT' OPTIONS: % % These artifact "defines" are not recommended for production use without % expert knowledge of resampling, filtering, and the effects they have on the % resulting resampled (resized or distorted) image. % % They can be used to override any and all filter default, and it is % recommended you make good use of "filter:verbose" to make sure that the % overall effect of your selection (before and after) is as expected. % % "filter:verbose" controls whether to output the exact results of the % filter selections made, as well as plotting data for graphing the % resulting filter over the filters support range. % % "filter:filter" select the main function associated with this filter % name, as the weighting function of the filter. This can be used to % set a windowing function as a weighting function, for special % purposes, such as graphing. % % If a "filter:window" operation has not been provided, a 'Box' % windowing function will be set to denote that no windowing function is % being used. % % "filter:window" Select this windowing function for the filter. While any % filter could be used as a windowing function, using the 'first lobe' of % that filter over the whole support window, using a non-windowing % function is not advisible. If no weighting filter function is specified % a 'SincFast' filter is used. % % "filter:lobes" Number of lobes to use for the Sinc/Jinc filter. This a % simpler method of setting filter support size that will correctly % handle the Sinc/Jinc switch for an operators filtering requirements. % Only integers should be given. % % "filter:support" Set the support size for filtering to the size given. % This not recommended for Sinc/Jinc windowed filters (lobes should be % used instead). This will override any 'filter:lobes' option. % % "filter:win-support" Scale windowing function to this size instead. This % causes the windowing (or self-windowing Lagrange filter) to act is if % the support window it much much larger than what is actually supplied % to the calling operator. The filter however is still clipped to the % real support size given, by the support range supplied to the caller. % If unset this will equal the normal filter support size. % % "filter:blur" Scale the filter and support window by this amount. A value % of > 1 will generally result in a more blurred image with more ringing % effects, while a value <1 will sharpen the resulting image with more % aliasing effects. % % "filter:sigma" The sigma value to use for the Gaussian filter only. % Defaults to '1/2'. Using a different sigma effectively provides a % method of using the filter as a 'blur' convolution. Particularly when % using it for Distort. % % "filter:b" % "filter:c" Override the preset B,C values for a Cubic filter. % If only one of these are given it is assumes to be a 'Keys' type of % filter such that B+2C=1, where Keys 'alpha' value = C. % % Examples: % % Set a true un-windowed Sinc filter with 10 lobes (very slow): % -define filter:filter=Sinc % -define filter:lobes=8 % % Set an 8 lobe Lanczos (Sinc or Jinc) filter: % -filter Lanczos % -define filter:lobes=8 % % The format of the AcquireResizeFilter method is: % % ResizeFilter *AcquireResizeFilter(const Image *image, % const FilterType filter_type,const MagickBooleanType cylindrical, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o filter: the filter type, defining a preset filter, window and support. % The artifact settings listed above will override those selections. % % o blur: blur the filter by this amount, use 1.0 if unknown. Image % artifact "filter:blur" will override this API call usage, including any % internal change (such as for cylindrical usage). % % o radial: use a 1D orthogonal filter (Sinc) or 2D cylindrical (radial) % filter (Jinc). % % o exception: return any errors or warnings in this structure. % */ MagickPrivate ResizeFilter *AcquireResizeFilter(const Image *image, const FilterType filter,const MagickBooleanType cylindrical, ExceptionInfo *exception) { const char *artifact; FilterType filter_type, window_type; double B, C, value; register ResizeFilter *resize_filter; /* Table Mapping given Filter, into Weighting and Windowing functions. A 'Box' windowing function means its a simble non-windowed filter. An 'SincFast' filter function could be upgraded to a 'Jinc' filter if a "cylindrical" is requested, unless a 'Sinc' or 'SincFast' filter was specifically requested by the user. WARNING: The order of this table must match the order of the FilterType enumeration specified in "resample.h", or the filter names will not match the filter being setup. You can check filter setups with the "filter:verbose" expert setting. */ static struct { FilterType filter, window; } const mapping[SentinelFilter] = { { UndefinedFilter, BoxFilter }, /* Undefined (default to Box) */ { PointFilter, BoxFilter }, /* SPECIAL: Nearest neighbour */ { BoxFilter, BoxFilter }, /* Box averaging filter */ { TriangleFilter, BoxFilter }, /* Linear interpolation filter */ { HermiteFilter, BoxFilter }, /* Hermite interpolation filter */ { SincFastFilter, HannFilter }, /* Hann -- cosine-sinc */ { SincFastFilter, HammingFilter }, /* Hamming -- '' variation */ { SincFastFilter, BlackmanFilter }, /* Blackman -- 2*cosine-sinc */ { GaussianFilter, BoxFilter }, /* Gaussian blur filter */ { QuadraticFilter, BoxFilter }, /* Quadratic Gaussian approx */ { CubicFilter, BoxFilter }, /* General Cubic Filter, Spline */ { CatromFilter, BoxFilter }, /* Cubic-Keys interpolator */ { MitchellFilter, BoxFilter }, /* 'Ideal' Cubic-Keys filter */ { JincFilter, BoxFilter }, /* Raw 3-lobed Jinc function */ { SincFilter, BoxFilter }, /* Raw 4-lobed Sinc function */ { SincFastFilter, BoxFilter }, /* Raw fast sinc ("Pade"-type) */ { SincFastFilter, KaiserFilter }, /* Kaiser -- square root-sinc */ { LanczosFilter, WelchFilter }, /* Welch -- parabolic (3 lobe) */ { SincFastFilter, CubicFilter }, /* Parzen -- cubic-sinc */ { SincFastFilter, BohmanFilter }, /* Bohman -- 2*cosine-sinc */ { SincFastFilter, TriangleFilter }, /* Bartlett -- triangle-sinc */ { LagrangeFilter, BoxFilter }, /* Lagrange self-windowing */ { LanczosFilter, LanczosFilter }, /* Lanczos Sinc-Sinc filters */ { LanczosSharpFilter, LanczosSharpFilter }, /* | these require */ { Lanczos2Filter, Lanczos2Filter }, /* | special handling */ { Lanczos2SharpFilter, Lanczos2SharpFilter }, { RobidouxFilter, BoxFilter }, /* Cubic Keys tuned for EWA */ { RobidouxSharpFilter, BoxFilter }, /* Sharper Cubic Keys for EWA */ { LanczosFilter, CosineFilter }, /* Cosine window (3 lobes) */ { SplineFilter, BoxFilter }, /* Spline Cubic Filter */ { LanczosRadiusFilter, LanczosFilter }, /* Lanczos with integer radius */ { CubicSplineFilter, BoxFilter }, /* CubicSpline (2/3/4 lobes) */ }; /* Table mapping the filter/window from the above table to an actual function. The default support size for that filter as a weighting function, the range to scale with to use that function as a sinc windowing function, (typ 1.0). Note that the filter_type -> function is 1 to 1 except for Sinc(), SincFast(), and CubicBC() functions, which may have multiple filter to function associations. See "filter:verbose" handling below for the function -> filter mapping. */ static struct { double (*function)(const double,const ResizeFilter*), support, /* Default lobes/support size of the weighting filter. */ scale, /* Support when function used as a windowing function Typically equal to the location of the first zero crossing. */ B,C; /* BC-spline coefficients, ignored if not a CubicBC filter. */ ResizeWeightingFunctionType weightingFunctionType; } const filters[SentinelFilter] = { /* .--- support window (if used as a Weighting Function) | .--- first crossing (if used as a Windowing Function) | | .--- B value for Cubic Function | | | .---- C value for Cubic Function | | | | */ { Box, 0.5, 0.5, 0.0, 0.0, BoxWeightingFunction }, /* Undefined (default to Box) */ { Box, 0.0, 0.5, 0.0, 0.0, BoxWeightingFunction }, /* Point (special handling) */ { Box, 0.5, 0.5, 0.0, 0.0, BoxWeightingFunction }, /* Box */ { Triangle, 1.0, 1.0, 0.0, 0.0, TriangleWeightingFunction }, /* Triangle */ { CubicBC, 1.0, 1.0, 0.0, 0.0, CubicBCWeightingFunction }, /* Hermite (cubic B=C=0) */ { Hann, 1.0, 1.0, 0.0, 0.0, HannWeightingFunction }, /* Hann, cosine window */ { Hamming, 1.0, 1.0, 0.0, 0.0, HammingWeightingFunction }, /* Hamming, '' variation */ { Blackman, 1.0, 1.0, 0.0, 0.0, BlackmanWeightingFunction }, /* Blackman, 2*cosine window */ { Gaussian, 2.0, 1.5, 0.0, 0.0, GaussianWeightingFunction }, /* Gaussian */ { Quadratic, 1.5, 1.5, 0.0, 0.0, QuadraticWeightingFunction },/* Quadratic gaussian */ { CubicBC, 2.0, 2.0, 1.0, 0.0, CubicBCWeightingFunction }, /* General Cubic Filter */ { CubicBC, 2.0, 1.0, 0.0, 0.5, CubicBCWeightingFunction }, /* Catmull-Rom (B=0,C=1/2) */ { CubicBC, 2.0, 8.0/7.0, 1./3., 1./3., CubicBCWeightingFunction }, /* Mitchell (B=C=1/3) */ { Jinc, 3.0, 1.2196698912665045, 0.0, 0.0, JincWeightingFunction }, /* Raw 3-lobed Jinc */ { Sinc, 4.0, 1.0, 0.0, 0.0, SincWeightingFunction }, /* Raw 4-lobed Sinc */ { SincFast, 4.0, 1.0, 0.0, 0.0, SincFastWeightingFunction }, /* Raw fast sinc ("Pade"-type) */ { Kaiser, 1.0, 1.0, 0.0, 0.0, KaiserWeightingFunction }, /* Kaiser (square root window) */ { Welch, 1.0, 1.0, 0.0, 0.0, WelchWeightingFunction }, /* Welch (parabolic window) */ { CubicBC, 2.0, 2.0, 1.0, 0.0, CubicBCWeightingFunction }, /* Parzen (B-Spline window) */ { Bohman, 1.0, 1.0, 0.0, 0.0, BohmanWeightingFunction }, /* Bohman, 2*Cosine window */ { Triangle, 1.0, 1.0, 0.0, 0.0, TriangleWeightingFunction }, /* Bartlett (triangle window) */ { Lagrange, 2.0, 1.0, 0.0, 0.0, LagrangeWeightingFunction }, /* Lagrange sinc approximation */ { SincFast, 3.0, 1.0, 0.0, 0.0, SincFastWeightingFunction }, /* Lanczos, 3-lobed Sinc-Sinc */ { SincFast, 3.0, 1.0, 0.0, 0.0, SincFastWeightingFunction }, /* Lanczos, Sharpened */ { SincFast, 2.0, 1.0, 0.0, 0.0, SincFastWeightingFunction }, /* Lanczos, 2-lobed */ { SincFast, 2.0, 1.0, 0.0, 0.0, SincFastWeightingFunction }, /* Lanczos2, sharpened */ /* Robidoux: Keys cubic close to Lanczos2D sharpened */ { CubicBC, 2.0, 1.1685777620836932, 0.37821575509399867, 0.31089212245300067, CubicBCWeightingFunction }, /* RobidouxSharp: Sharper version of Robidoux */ { CubicBC, 2.0, 1.105822933719019, 0.2620145123990142, 0.3689927438004929, CubicBCWeightingFunction }, { Cosine, 1.0, 1.0, 0.0, 0.0, CosineWeightingFunction }, /* Low level cosine window */ { CubicBC, 2.0, 2.0, 1.0, 0.0, CubicBCWeightingFunction }, /* Cubic B-Spline (B=1,C=0) */ { SincFast, 3.0, 1.0, 0.0, 0.0, SincFastWeightingFunction }, /* Lanczos, Interger Radius */ { CubicSpline,2.0, 0.5, 0.0, 0.0, BoxWeightingFunction }, /* Spline Lobes 2-lobed */ }; /* The known zero crossings of the Jinc() or more accurately the Jinc(x*PI) function being used as a filter. It is used by the "filter:lobes" expert setting and for 'lobes' for Jinc functions in the previous table. This way users do not have to deal with the highly irrational lobe sizes of the Jinc filter. Values taken from http://cose.math.bas.bg/webMathematica/webComputing/BesselZeros.jsp using Jv-function with v=1, then dividing by PI. */ static double jinc_zeros[16] = { 1.2196698912665045, 2.2331305943815286, 3.2383154841662362, 4.2410628637960699, 5.2427643768701817, 6.2439216898644877, 7.2447598687199570, 8.2453949139520427, 9.2458926849494673, 10.246293348754916, 11.246622794877883, 12.246898461138105, 13.247132522181061, 14.247333735806849, 15.247508563037300, 16.247661874700962 }; /* Allocate resize filter. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(UndefinedFilter < filter && filter < SentinelFilter); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); resize_filter=(ResizeFilter *) AcquireCriticalMemory(sizeof(*resize_filter)); (void) ResetMagickMemory(resize_filter,0,sizeof(*resize_filter)); /* Defaults for the requested filter. */ filter_type=mapping[filter].filter; window_type=mapping[filter].window; resize_filter->blur=1.0; /* Promote 1D Windowed Sinc Filters to a 2D Windowed Jinc filters */ if ( cylindrical != MagickFalse && (filter_type == SincFastFilter) && (filter != SincFastFilter)) filter_type=JincFilter; /* 1D Windowed Sinc => 2D Windowed Jinc filters */ /* Expert filter setting override */ artifact=GetImageArtifact(image,"filter:filter"); if (IsStringTrue(artifact) != MagickFalse) { ssize_t option; option=ParseCommandOption(MagickFilterOptions,MagickFalse,artifact); if ((UndefinedFilter < option) && (option < SentinelFilter)) { /* Raw filter request - no window function. */ filter_type=(FilterType) option; window_type=BoxFilter; } /* Filter override with a specific window function. */ artifact=GetImageArtifact(image,"filter:window"); if (artifact != (const char *) NULL) { option=ParseCommandOption(MagickFilterOptions,MagickFalse,artifact); if ((UndefinedFilter < option) && (option < SentinelFilter)) window_type=(FilterType) option; } } else { /* Window specified, but no filter function? Assume Sinc/Jinc. */ artifact=GetImageArtifact(image,"filter:window"); if (artifact != (const char *) NULL) { ssize_t option; option=ParseCommandOption(MagickFilterOptions,MagickFalse,artifact); if ((UndefinedFilter < option) && (option < SentinelFilter)) { filter_type= cylindrical != MagickFalse ? JincFilter : SincFastFilter; window_type=(FilterType) option; } } } /* Assign the real functions to use for the filters selected. */ resize_filter->filter=filters[filter_type].function; resize_filter->support=filters[filter_type].support; resize_filter->filterWeightingType=filters[filter_type].weightingFunctionType; resize_filter->window=filters[window_type].function; resize_filter->windowWeightingType=filters[window_type].weightingFunctionType; resize_filter->scale=filters[window_type].scale; resize_filter->signature=MagickCoreSignature; /* Filter Modifications for orthogonal/cylindrical usage */ if (cylindrical != MagickFalse) switch (filter_type) { case BoxFilter: /* Support for Cylindrical Box should be sqrt(2)/2 */ resize_filter->support=(double) MagickSQ1_2; break; case LanczosFilter: case LanczosSharpFilter: case Lanczos2Filter: case Lanczos2SharpFilter: case LanczosRadiusFilter: resize_filter->filter=filters[JincFilter].function; resize_filter->window=filters[JincFilter].function; resize_filter->scale=filters[JincFilter].scale; /* number of lobes (support window size) remain unchanged */ break; default: break; } /* Global Sharpening (regardless of orthoginal/cylindrical) */ switch (filter_type) { case LanczosSharpFilter: resize_filter->blur *= 0.9812505644269356; break; case Lanczos2SharpFilter: resize_filter->blur *= 0.9549963639785485; break; /* case LanczosRadius: blur adjust is done after lobes */ default: break; } /* Expert Option Modifications. */ /* User Gaussian Sigma Override - no support change */ if ((resize_filter->filter == Gaussian) || (resize_filter->window == Gaussian) ) { value=0.5; /* guassian sigma default, half pixel */ artifact=GetImageArtifact(image,"filter:sigma"); if (artifact != (const char *) NULL) value=StringToDouble(artifact,(char **) NULL); /* Define coefficents for Gaussian */ resize_filter->coefficient[0]=value; /* note sigma too */ resize_filter->coefficient[1]=PerceptibleReciprocal(2.0*value*value); /* sigma scaling */ resize_filter->coefficient[2]=PerceptibleReciprocal(Magick2PI*value*value); /* normalization - not actually needed or used! */ if ( value > 0.5 ) resize_filter->support *= 2*value; /* increase support linearly */ } /* User Kaiser Alpha Override - no support change */ if ((resize_filter->filter == Kaiser) || (resize_filter->window == Kaiser) ) { value=6.5; /* default beta value for Kaiser bessel windowing function */ artifact=GetImageArtifact(image,"filter:alpha"); /* FUTURE: depreciate */ if (artifact != (const char *) NULL) value=StringToDouble(artifact,(char **) NULL); artifact=GetImageArtifact(image,"filter:kaiser-beta"); if (artifact != (const char *) NULL) value=StringToDouble(artifact,(char **) NULL); artifact=GetImageArtifact(image,"filter:kaiser-alpha"); if (artifact != (const char *) NULL) value=StringToDouble(artifact,(char **) NULL)*MagickPI; /* Define coefficents for Kaiser Windowing Function */ resize_filter->coefficient[0]=value; /* alpha */ resize_filter->coefficient[1]=PerceptibleReciprocal(I0(value)); /* normalization */ } /* Support Overrides */ artifact=GetImageArtifact(image,"filter:lobes"); if (artifact != (const char *) NULL) { ssize_t lobes; lobes=(ssize_t) StringToLong(artifact); if (lobes < 1) lobes=1; resize_filter->support=(double) lobes; } if (resize_filter->filter == Jinc) { /* Convert a Jinc function lobes value to a real support value. */ if (resize_filter->support > 16) resize_filter->support=jinc_zeros[15]; /* largest entry in table */ else resize_filter->support=jinc_zeros[((long) resize_filter->support)-1]; /* Blur this filter so support is a integer value (lobes dependant). */ if (filter_type == LanczosRadiusFilter) resize_filter->blur*=floor(resize_filter->support)/ resize_filter->support; } /* Expert blur override. */ artifact=GetImageArtifact(image,"filter:blur"); if (artifact != (const char *) NULL) resize_filter->blur*=StringToDouble(artifact,(char **) NULL); if (resize_filter->blur < MagickEpsilon) resize_filter->blur=(double) MagickEpsilon; /* Expert override of the support setting. */ artifact=GetImageArtifact(image,"filter:support"); if (artifact != (const char *) NULL) resize_filter->support=fabs(StringToDouble(artifact,(char **) NULL)); /* Scale windowing function separately to the support 'clipping' window that calling operator is planning to actually use. (Expert override) */ resize_filter->window_support=resize_filter->support; /* default */ artifact=GetImageArtifact(image,"filter:win-support"); if (artifact != (const char *) NULL) resize_filter->window_support=fabs(StringToDouble(artifact,(char **) NULL)); /* Adjust window function scaling to match windowing support for weighting function. This avoids a division on every filter call. */ resize_filter->scale/=resize_filter->window_support; /* * Set Cubic Spline B,C values, calculate Cubic coefficients. */ B=0.0; C=0.0; if ((resize_filter->filter == CubicBC) || (resize_filter->window == CubicBC) ) { B=filters[filter_type].B; C=filters[filter_type].C; if (filters[window_type].function == CubicBC) { B=filters[window_type].B; C=filters[window_type].C; } artifact=GetImageArtifact(image,"filter:b"); if (artifact != (const char *) NULL) { B=StringToDouble(artifact,(char **) NULL); C=(1.0-B)/2.0; /* Calculate C to get a Keys cubic filter. */ artifact=GetImageArtifact(image,"filter:c"); /* user C override */ if (artifact != (const char *) NULL) C=StringToDouble(artifact,(char **) NULL); } else { artifact=GetImageArtifact(image,"filter:c"); if (artifact != (const char *) NULL) { C=StringToDouble(artifact,(char **) NULL); B=1.0-2.0*C; /* Calculate B to get a Keys cubic filter. */ } } { const double twoB = B+B; /* Convert B,C values into Cubic Coefficents. See CubicBC(). */ resize_filter->coefficient[0]=1.0-(1.0/3.0)*B; resize_filter->coefficient[1]=-3.0+twoB+C; resize_filter->coefficient[2]=2.0-1.5*B-C; resize_filter->coefficient[3]=(4.0/3.0)*B+4.0*C; resize_filter->coefficient[4]=-8.0*C-twoB; resize_filter->coefficient[5]=B+5.0*C; resize_filter->coefficient[6]=(-1.0/6.0)*B-C; } } /* Expert Option Request for verbose details of the resulting filter. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp master { #endif if (IsStringTrue(GetImageArtifact(image,"filter:verbose")) != MagickFalse) { double support, x; /* Set the weighting function properly when the weighting function may not exactly match the filter of the same name. EG: a Point filter is really uses a Box weighting function with a different support than is typically used. */ if (resize_filter->filter == Box) filter_type=BoxFilter; if (resize_filter->filter == Sinc) filter_type=SincFilter; if (resize_filter->filter == SincFast) filter_type=SincFastFilter; if (resize_filter->filter == Jinc) filter_type=JincFilter; if (resize_filter->filter == CubicBC) filter_type=CubicFilter; if (resize_filter->window == Box) window_type=BoxFilter; if (resize_filter->window == Sinc) window_type=SincFilter; if (resize_filter->window == SincFast) window_type=SincFastFilter; if (resize_filter->window == Jinc) window_type=JincFilter; if (resize_filter->window == CubicBC) window_type=CubicFilter; /* Report Filter Details. */ support=GetResizeFilterSupport(resize_filter); /* practical_support */ (void) FormatLocaleFile(stdout, "# Resampling Filter (for graphing)\n#\n"); (void) FormatLocaleFile(stdout,"# filter = %s\n", CommandOptionToMnemonic(MagickFilterOptions,filter_type)); (void) FormatLocaleFile(stdout,"# window = %s\n", CommandOptionToMnemonic(MagickFilterOptions,window_type)); (void) FormatLocaleFile(stdout,"# support = %.*g\n", GetMagickPrecision(),(double) resize_filter->support); (void) FormatLocaleFile(stdout,"# window-support = %.*g\n", GetMagickPrecision(),(double) resize_filter->window_support); (void) FormatLocaleFile(stdout,"# scale-blur = %.*g\n", GetMagickPrecision(),(double)resize_filter->blur); if ((filter_type == GaussianFilter) || (window_type == GaussianFilter)) (void) FormatLocaleFile(stdout,"# gaussian-sigma = %.*g\n", GetMagickPrecision(),(double)resize_filter->coefficient[0]); if ( filter_type == KaiserFilter || window_type == KaiserFilter ) (void) FormatLocaleFile(stdout,"# kaiser-beta = %.*g\n", GetMagickPrecision(),(double)resize_filter->coefficient[0]); (void) FormatLocaleFile(stdout,"# practical-support = %.*g\n", GetMagickPrecision(), (double)support); if ( filter_type == CubicFilter || window_type == CubicFilter ) (void) FormatLocaleFile(stdout,"# B,C = %.*g,%.*g\n", GetMagickPrecision(),(double)B, GetMagickPrecision(),(double)C); (void) FormatLocaleFile(stdout,"\n"); /* Output values of resulting filter graph -- for graphing filter result. */ for (x=0.0; x <= support; x+=0.01f) (void) FormatLocaleFile(stdout,"%5.2lf\t%.*g\n",x, GetMagickPrecision(),(double) GetResizeFilterWeight(resize_filter,x)); /* A final value so gnuplot can graph the 'stop' properly. */ (void) FormatLocaleFile(stdout,"%5.2lf\t%.*g\n",support, GetMagickPrecision(),0.0); } /* Output the above once only for each image - remove setting */ (void) DeleteImageArtifact((Image *) image,"filter:verbose"); #if defined(MAGICKCORE_OPENMP_SUPPORT) } #endif return(resize_filter); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A d a p t i v e R e s i z e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AdaptiveResizeImage() adaptively resize image with pixel resampling. % % This is shortcut function for a fast interpolative resize using mesh % interpolation. It works well for small resizes of less than +/- 50% % of the original image size. For larger resizing on images a full % filtered and slower resize function should be used instead. % % The format of the AdaptiveResizeImage method is: % % Image *AdaptiveResizeImage(const Image *image,const size_t columns, % const size_t rows,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o columns: the number of columns in the resized image. % % o rows: the number of rows in the resized image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *AdaptiveResizeImage(const Image *image, const size_t columns,const size_t rows,ExceptionInfo *exception) { Image *resize_image; resize_image=InterpolativeResizeImage(image,columns,rows,MeshInterpolatePixel, exception); return(resize_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + B e s s e l O r d e r O n e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % BesselOrderOne() computes the Bessel function of x of the first kind of % order 0. This is used to create the Jinc() filter function below. % % Reduce x to |x| since j1(x)= -j1(-x), and for x in (0,8] % % j1(x) = x*j1(x); % % For x in (8,inf) % % j1(x) = sqrt(2/(pi*x))*(p1(x)*cos(x1)-q1(x)*sin(x1)) % % where x1 = x-3*pi/4. Compute sin(x1) and cos(x1) as follow: % % cos(x1) = cos(x)cos(3pi/4)+sin(x)sin(3pi/4) % = 1/sqrt(2) * (sin(x) - cos(x)) % sin(x1) = sin(x)cos(3pi/4)-cos(x)sin(3pi/4) % = -1/sqrt(2) * (sin(x) + cos(x)) % % The format of the BesselOrderOne method is: % % double BesselOrderOne(double x) % % A description of each parameter follows: % % o x: double value. % */ #undef I0 static double I0(double x) { double sum, t, y; register ssize_t i; /* Zeroth order Bessel function of the first kind. */ sum=1.0; y=x*x/4.0; t=y; for (i=2; t > MagickEpsilon; i++) { sum+=t; t*=y/((double) i*i); } return(sum); } #undef J1 static double J1(double x) { double p, q; register ssize_t i; static const double Pone[] = { 0.581199354001606143928050809e+21, -0.6672106568924916298020941484e+20, 0.2316433580634002297931815435e+19, -0.3588817569910106050743641413e+17, 0.2908795263834775409737601689e+15, -0.1322983480332126453125473247e+13, 0.3413234182301700539091292655e+10, -0.4695753530642995859767162166e+7, 0.270112271089232341485679099e+4 }, Qone[] = { 0.11623987080032122878585294e+22, 0.1185770712190320999837113348e+20, 0.6092061398917521746105196863e+17, 0.2081661221307607351240184229e+15, 0.5243710262167649715406728642e+12, 0.1013863514358673989967045588e+10, 0.1501793594998585505921097578e+7, 0.1606931573481487801970916749e+4, 0.1e+1 }; p=Pone[8]; q=Qone[8]; for (i=7; i >= 0; i--) { p=p*x*x+Pone[i]; q=q*x*x+Qone[i]; } return(p/q); } #undef P1 static double P1(double x) { double p, q; register ssize_t i; static const double Pone[] = { 0.352246649133679798341724373e+5, 0.62758845247161281269005675e+5, 0.313539631109159574238669888e+5, 0.49854832060594338434500455e+4, 0.2111529182853962382105718e+3, 0.12571716929145341558495e+1 }, Qone[] = { 0.352246649133679798068390431e+5, 0.626943469593560511888833731e+5, 0.312404063819041039923015703e+5, 0.4930396490181088979386097e+4, 0.2030775189134759322293574e+3, 0.1e+1 }; p=Pone[5]; q=Qone[5]; for (i=4; i >= 0; i--) { p=p*(8.0/x)*(8.0/x)+Pone[i]; q=q*(8.0/x)*(8.0/x)+Qone[i]; } return(p/q); } #undef Q1 static double Q1(double x) { double p, q; register ssize_t i; static const double Pone[] = { 0.3511751914303552822533318e+3, 0.7210391804904475039280863e+3, 0.4259873011654442389886993e+3, 0.831898957673850827325226e+2, 0.45681716295512267064405e+1, 0.3532840052740123642735e-1 }, Qone[] = { 0.74917374171809127714519505e+4, 0.154141773392650970499848051e+5, 0.91522317015169922705904727e+4, 0.18111867005523513506724158e+4, 0.1038187585462133728776636e+3, 0.1e+1 }; p=Pone[5]; q=Qone[5]; for (i=4; i >= 0; i--) { p=p*(8.0/x)*(8.0/x)+Pone[i]; q=q*(8.0/x)*(8.0/x)+Qone[i]; } return(p/q); } static double BesselOrderOne(double x) { double p, q; if (x == 0.0) return(0.0); p=x; if (x < 0.0) x=(-x); if (x < 8.0) return(p*J1(x)); q=sqrt((double) (2.0/(MagickPI*x)))*(P1(x)*(1.0/sqrt(2.0)*(sin((double) x)- cos((double) x)))-8.0/x*Q1(x)*(-1.0/sqrt(2.0)*(sin((double) x)+ cos((double) x)))); if (p < 0.0) q=(-q); return(q); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e s t r o y R e s i z e F i l t e r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyResizeFilter() destroy the resize filter. % % The format of the DestroyResizeFilter method is: % % ResizeFilter *DestroyResizeFilter(ResizeFilter *resize_filter) % % A description of each parameter follows: % % o resize_filter: the resize filter. % */ MagickPrivate ResizeFilter *DestroyResizeFilter(ResizeFilter *resize_filter) { assert(resize_filter != (ResizeFilter *) NULL); assert(resize_filter->signature == MagickCoreSignature); resize_filter->signature=(~MagickCoreSignature); resize_filter=(ResizeFilter *) RelinquishMagickMemory(resize_filter); return(resize_filter); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t R e s i z e F i l t e r S u p p o r t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetResizeFilterSupport() return the current support window size for this % filter. Note that this may have been enlarged by filter:blur factor. % % The format of the GetResizeFilterSupport method is: % % double GetResizeFilterSupport(const ResizeFilter *resize_filter) % % A description of each parameter follows: % % o filter: Image filter to use. % */ MagickPrivate double *GetResizeFilterCoefficient( const ResizeFilter *resize_filter) { assert(resize_filter != (ResizeFilter *) NULL); assert(resize_filter->signature == MagickCoreSignature); return((double *) resize_filter->coefficient); } MagickPrivate double GetResizeFilterBlur(const ResizeFilter *resize_filter) { assert(resize_filter != (ResizeFilter *) NULL); assert(resize_filter->signature == MagickCoreSignature); return(resize_filter->blur); } MagickPrivate double GetResizeFilterScale(const ResizeFilter *resize_filter) { assert(resize_filter != (ResizeFilter *) NULL); assert(resize_filter->signature == MagickCoreSignature); return(resize_filter->scale); } MagickPrivate double GetResizeFilterWindowSupport( const ResizeFilter *resize_filter) { assert(resize_filter != (ResizeFilter *) NULL); assert(resize_filter->signature == MagickCoreSignature); return(resize_filter->window_support); } MagickPrivate ResizeWeightingFunctionType GetResizeFilterWeightingType( const ResizeFilter *resize_filter) { assert(resize_filter != (ResizeFilter *) NULL); assert(resize_filter->signature == MagickCoreSignature); return(resize_filter->filterWeightingType); } MagickPrivate ResizeWeightingFunctionType GetResizeFilterWindowWeightingType( const ResizeFilter *resize_filter) { assert(resize_filter != (ResizeFilter *) NULL); assert(resize_filter->signature == MagickCoreSignature); return(resize_filter->windowWeightingType); } MagickPrivate double GetResizeFilterSupport(const ResizeFilter *resize_filter) { assert(resize_filter != (ResizeFilter *) NULL); assert(resize_filter->signature == MagickCoreSignature); return(resize_filter->support*resize_filter->blur); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t R e s i z e F i l t e r W e i g h t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetResizeFilterWeight evaluates the specified resize filter at the point x % which usally lies between zero and the filters current 'support' and % returns the weight of the filter function at that point. % % The format of the GetResizeFilterWeight method is: % % double GetResizeFilterWeight(const ResizeFilter *resize_filter, % const double x) % % A description of each parameter follows: % % o filter: the filter type. % % o x: the point. % */ MagickPrivate double GetResizeFilterWeight(const ResizeFilter *resize_filter, const double x) { double scale, weight, x_blur; /* Windowing function - scale the weighting filter by this amount. */ assert(resize_filter != (ResizeFilter *) NULL); assert(resize_filter->signature == MagickCoreSignature); x_blur=fabs((double) x)/resize_filter->blur; /* X offset with blur scaling */ if ((resize_filter->window_support < MagickEpsilon) || (resize_filter->window == Box)) scale=1.0; /* Point or Box Filter -- avoid division by zero */ else { scale=resize_filter->scale; scale=resize_filter->window(x_blur*scale,resize_filter); } weight=scale*resize_filter->filter(x_blur,resize_filter); return(weight); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I n t e r p o l a t i v e R e s i z e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % InterpolativeResizeImage() resizes an image using the specified % interpolation method. % % The format of the InterpolativeResizeImage method is: % % Image *InterpolativeResizeImage(const Image *image,const size_t columns, % const size_t rows,const PixelInterpolateMethod method, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o columns: the number of columns in the resized image. % % o rows: the number of rows in the resized image. % % o method: the pixel interpolation method. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *InterpolativeResizeImage(const Image *image, const size_t columns,const size_t rows,const PixelInterpolateMethod method, ExceptionInfo *exception) { #define InterpolativeResizeImageTag "Resize/Image" CacheView *image_view, *resize_view; Image *resize_image; MagickBooleanType status; MagickOffsetType progress; PointInfo scale; ssize_t y; /* Interpolatively resize image. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if ((columns == 0) || (rows == 0)) ThrowImageException(ImageError,"NegativeOrZeroImageSize"); if ((columns == image->columns) && (rows == image->rows)) return(CloneImage(image,0,0,MagickTrue,exception)); resize_image=CloneImage(image,columns,rows,MagickTrue,exception); if (resize_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(resize_image,DirectClass,exception) == MagickFalse) { resize_image=DestroyImage(resize_image); return((Image *) NULL); } status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); resize_view=AcquireAuthenticCacheView(resize_image,exception); scale.x=(double) image->columns/resize_image->columns; scale.y=(double) image->rows/resize_image->rows; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_number_threads(image,resize_image,resize_image->rows,1) #endif for (y=0; y < (ssize_t) resize_image->rows; y++) { PointInfo offset; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=QueueCacheViewAuthenticPixels(resize_view,0,y,resize_image->columns,1, exception); if (q == (Quantum *) NULL) continue; offset.y=((double) y+0.5)*scale.y-0.5; for (x=0; x < (ssize_t) resize_image->columns; x++) { register ssize_t i; if (GetPixelWriteMask(resize_image,q) <= (QuantumRange/2)) { q+=GetPixelChannels(resize_image); continue; } for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel; PixelTrait resize_traits, traits; channel=GetPixelChannelChannel(image,i); traits=GetPixelChannelTraits(image,channel); resize_traits=GetPixelChannelTraits(resize_image,channel); if ((traits == UndefinedPixelTrait) || (resize_traits == UndefinedPixelTrait)) continue; offset.x=((double) x+0.5)*scale.x-0.5; status=InterpolatePixelChannels(image,image_view,resize_image,method, offset.x,offset.y,q,exception); } q+=GetPixelChannels(resize_image); } if (SyncCacheViewAuthenticPixels(resize_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_InterpolativeResizeImage) #endif proceed=SetImageProgress(image,InterpolativeResizeImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } resize_view=DestroyCacheView(resize_view); image_view=DestroyCacheView(image_view); if (status == MagickFalse) resize_image=DestroyImage(resize_image); return(resize_image); } #if defined(MAGICKCORE_LQR_DELEGATE) /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % L i q u i d R e s c a l e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % LiquidRescaleImage() rescales image with seam carving. % % The format of the LiquidRescaleImage method is: % % Image *LiquidRescaleImage(const Image *image,const size_t columns, % const size_t rows,const double delta_x,const double rigidity, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o columns: the number of columns in the rescaled image. % % o rows: the number of rows in the rescaled image. % % o delta_x: maximum seam transversal step (0 means straight seams). % % o rigidity: introduce a bias for non-straight seams (typically 0). % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *LiquidRescaleImage(const Image *image,const size_t columns, const size_t rows,const double delta_x,const double rigidity, ExceptionInfo *exception) { #define LiquidRescaleImageTag "Rescale/Image" CacheView *image_view, *rescale_view; gfloat *packet, *pixels; Image *rescale_image; int x_offset, y_offset; LqrCarver *carver; LqrRetVal lqr_status; MagickBooleanType status; MemoryInfo *pixel_info; register gfloat *q; ssize_t y; /* Liquid rescale image. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if ((columns == 0) || (rows == 0)) ThrowImageException(ImageError,"NegativeOrZeroImageSize"); if ((columns == image->columns) && (rows == image->rows)) return(CloneImage(image,0,0,MagickTrue,exception)); if ((columns <= 2) || (rows <= 2)) return(ResizeImage(image,columns,rows,image->filter,exception)); pixel_info=AcquireVirtualMemory(image->columns,image->rows*MaxPixelChannels* sizeof(*pixels)); if (pixel_info == (MemoryInfo *) NULL) return((Image *) NULL); pixels=(gfloat *) GetVirtualMemoryBlob(pixel_info); status=MagickTrue; q=pixels; image_view=AcquireVirtualCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { register const Quantum *magick_restrict p; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) *q++=QuantumScale*p[i]; p+=GetPixelChannels(image); } } image_view=DestroyCacheView(image_view); carver=lqr_carver_new_ext(pixels,(int) image->columns,(int) image->rows, (int) GetPixelChannels(image),LQR_COLDEPTH_32F); if (carver == (LqrCarver *) NULL) { pixel_info=RelinquishVirtualMemory(pixel_info); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } lqr_carver_set_preserve_input_image(carver); lqr_status=lqr_carver_init(carver,(int) delta_x,rigidity); lqr_status=lqr_carver_resize(carver,(int) columns,(int) rows); (void) lqr_status; rescale_image=CloneImage(image,lqr_carver_get_width(carver), lqr_carver_get_height(carver),MagickTrue,exception); if (rescale_image == (Image *) NULL) { pixel_info=RelinquishVirtualMemory(pixel_info); return((Image *) NULL); } if (SetImageStorageClass(rescale_image,DirectClass,exception) == MagickFalse) { pixel_info=RelinquishVirtualMemory(pixel_info); rescale_image=DestroyImage(rescale_image); return((Image *) NULL); } rescale_view=AcquireAuthenticCacheView(rescale_image,exception); (void) lqr_carver_scan_reset(carver); while (lqr_carver_scan_ext(carver,&x_offset,&y_offset,(void **) &packet) != 0) { register Quantum *magick_restrict p; register ssize_t i; p=QueueCacheViewAuthenticPixels(rescale_view,x_offset,y_offset,1,1, exception); if (p == (Quantum *) NULL) break; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel; PixelTrait rescale_traits, traits; channel=GetPixelChannelChannel(image,i); traits=GetPixelChannelTraits(image,channel); rescale_traits=GetPixelChannelTraits(rescale_image,channel); if ((traits == UndefinedPixelTrait) || (rescale_traits == UndefinedPixelTrait)) continue; SetPixelChannel(rescale_image,channel,ClampToQuantum(QuantumRange* packet[i]),p); } if (SyncCacheViewAuthenticPixels(rescale_view,exception) == MagickFalse) break; } rescale_view=DestroyCacheView(rescale_view); pixel_info=RelinquishVirtualMemory(pixel_info); lqr_carver_destroy(carver); return(rescale_image); } #else MagickExport Image *LiquidRescaleImage(const Image *image, const size_t magick_unused(columns),const size_t magick_unused(rows), const double magick_unused(delta_x),const double magick_unused(rigidity), ExceptionInfo *exception) { assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); (void) ThrowMagickException(exception,GetMagickModule(),MissingDelegateError, "DelegateLibrarySupportNotBuiltIn","'%s' (LQR)",image->filename); return((Image *) NULL); } #endif /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g n i f y I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagnifyImage() doubles the size of the image with a pixel art scaling % algorithm. % % The format of the MagnifyImage method is: % % Image *MagnifyImage(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *MagnifyImage(const Image *image,ExceptionInfo *exception) { #define MagnifyImageTag "Magnify/Image" CacheView *image_view, *magnify_view; Image *magnify_image; MagickBooleanType status; MagickOffsetType progress; ssize_t y; /* Initialize magnified image attributes. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); magnify_image=CloneImage(image,2*image->columns,2*image->rows,MagickTrue, exception); if (magnify_image == (Image *) NULL) return((Image *) NULL); /* Magnify image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); magnify_view=AcquireAuthenticCacheView(magnify_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_number_threads(image,magnify_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=QueueCacheViewAuthenticPixels(magnify_view,0,2*y,magnify_image->columns,2, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } /* Magnify this row of pixels. */ for (x=0; x < (ssize_t) image->columns; x++) { MagickRealType intensity[9]; register const Quantum *magick_restrict p; register Quantum *magick_restrict r; register ssize_t i; size_t channels; p=GetCacheViewVirtualPixels(image_view,x-1,y-1,3,3,exception); if (p == (const Quantum *) NULL) { status=MagickFalse; continue; } channels=GetPixelChannels(image); for (i=0; i < 9; i++) intensity[i]=GetPixelIntensity(image,p+i*channels); r=q; if ((fabs(intensity[1]-intensity[7]) < MagickEpsilon) || (fabs(intensity[3]-intensity[5]) < MagickEpsilon)) { /* Clone center pixel. */ for (i=0; i < (ssize_t) channels; i++) r[i]=p[4*channels+i]; r+=GetPixelChannels(magnify_image); for (i=0; i < (ssize_t) channels; i++) r[i]=p[4*channels+i]; r+=GetPixelChannels(magnify_image)*(magnify_image->columns-1); for (i=0; i < (ssize_t) channels; i++) r[i]=p[4*channels+i]; r+=GetPixelChannels(magnify_image); for (i=0; i < (ssize_t) channels; i++) r[i]=p[4*channels+i]; } else { /* Selectively clone pixel. */ if (fabs(intensity[1]-intensity[3]) < MagickEpsilon) for (i=0; i < (ssize_t) channels; i++) r[i]=p[3*channels+i]; else for (i=0; i < (ssize_t) channels; i++) r[i]=p[4*channels+i]; r+=GetPixelChannels(magnify_image); if (fabs(intensity[1]-intensity[5]) < MagickEpsilon) for (i=0; i < (ssize_t) channels; i++) r[i]=p[5*channels+i]; else for (i=0; i < (ssize_t) channels; i++) r[i]=p[4*channels+i]; r+=GetPixelChannels(magnify_image)*(magnify_image->columns-1); if (fabs(intensity[3]-intensity[7]) < MagickEpsilon) for (i=0; i < (ssize_t) channels; i++) r[i]=p[3*channels+i]; else for (i=0; i < (ssize_t) channels; i++) r[i]=p[4*channels+i]; r+=GetPixelChannels(magnify_image); if (fabs(intensity[5]-intensity[7]) < MagickEpsilon) for (i=0; i < (ssize_t) channels; i++) r[i]=p[5*channels+i]; else for (i=0; i < (ssize_t) channels; i++) r[i]=p[4*channels+i]; } q+=2*GetPixelChannels(magnify_image); } if (SyncCacheViewAuthenticPixels(magnify_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_MagnifyImage) #endif proceed=SetImageProgress(image,MagnifyImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } magnify_view=DestroyCacheView(magnify_view); image_view=DestroyCacheView(image_view); if (status == MagickFalse) magnify_image=DestroyImage(magnify_image); return(magnify_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M i n i f y I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MinifyImage() is a convenience method that scales an image proportionally to % half its size. % % The format of the MinifyImage method is: % % Image *MinifyImage(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *MinifyImage(const Image *image,ExceptionInfo *exception) { Image *minify_image; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); minify_image=ResizeImage(image,image->columns/2,image->rows/2,SplineFilter, exception); return(minify_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e s a m p l e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ResampleImage() resize image in terms of its pixel size, so that when % displayed at the given resolution it will be the same size in terms of % real world units as the original image at the original resolution. % % The format of the ResampleImage method is: % % Image *ResampleImage(Image *image,const double x_resolution, % const double y_resolution,const FilterType filter, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image to be resized to fit the given resolution. % % o x_resolution: the new image x resolution. % % o y_resolution: the new image y resolution. % % o filter: Image filter to use. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ResampleImage(const Image *image,const double x_resolution, const double y_resolution,const FilterType filter,ExceptionInfo *exception) { #define ResampleImageTag "Resample/Image" Image *resample_image; size_t height, width; /* Initialize sampled image attributes. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); width=(size_t) (x_resolution*image->columns/(image->resolution.x == 0.0 ? 72.0 : image->resolution.x)+0.5); height=(size_t) (y_resolution*image->rows/(image->resolution.y == 0.0 ? 72.0 : image->resolution.y)+0.5); resample_image=ResizeImage(image,width,height,filter,exception); if (resample_image != (Image *) NULL) { resample_image->resolution.x=x_resolution; resample_image->resolution.y=y_resolution; } return(resample_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e s i z e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ResizeImage() scales an image to the desired dimensions, using the given % filter (see AcquireFilterInfo()). % % If an undefined filter is given the filter defaults to Mitchell for a % colormapped image, a image with a matte channel, or if the image is % enlarged. Otherwise the filter defaults to a Lanczos. % % ResizeImage() was inspired by Paul Heckbert's "zoom" program. % % The format of the ResizeImage method is: % % Image *ResizeImage(Image *image,const size_t columns,const size_t rows, % const FilterType filter,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o columns: the number of columns in the scaled image. % % o rows: the number of rows in the scaled image. % % o filter: Image filter to use. % % o exception: return any errors or warnings in this structure. % */ typedef struct _ContributionInfo { double weight; ssize_t pixel; } ContributionInfo; static ContributionInfo **DestroyContributionThreadSet( ContributionInfo **contribution) { register ssize_t i; assert(contribution != (ContributionInfo **) NULL); for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++) if (contribution[i] != (ContributionInfo *) NULL) contribution[i]=(ContributionInfo *) RelinquishAlignedMemory( contribution[i]); contribution=(ContributionInfo **) RelinquishMagickMemory(contribution); return(contribution); } static ContributionInfo **AcquireContributionThreadSet(const size_t count) { register ssize_t i; ContributionInfo **contribution; size_t number_threads; number_threads=(size_t) GetMagickResourceLimit(ThreadResource); contribution=(ContributionInfo **) AcquireQuantumMemory(number_threads, sizeof(*contribution)); if (contribution == (ContributionInfo **) NULL) return((ContributionInfo **) NULL); (void) ResetMagickMemory(contribution,0,number_threads*sizeof(*contribution)); for (i=0; i < (ssize_t) number_threads; i++) { contribution[i]=(ContributionInfo *) MagickAssumeAligned( AcquireAlignedMemory(count,sizeof(**contribution))); if (contribution[i] == (ContributionInfo *) NULL) return(DestroyContributionThreadSet(contribution)); } return(contribution); } static MagickBooleanType HorizontalFilter(const ResizeFilter *resize_filter, const Image *image,Image *resize_image,const double x_factor, const MagickSizeType span,MagickOffsetType *offset,ExceptionInfo *exception) { #define ResizeImageTag "Resize/Image" CacheView *image_view, *resize_view; ClassType storage_class; ContributionInfo **magick_restrict contributions; MagickBooleanType status; double scale, support; ssize_t x; /* Apply filter to resize horizontally from image to resize image. */ scale=MagickMax(1.0/x_factor+MagickEpsilon,1.0); support=scale*GetResizeFilterSupport(resize_filter); storage_class=support > 0.5 ? DirectClass : image->storage_class; if (SetImageStorageClass(resize_image,storage_class,exception) == MagickFalse) return(MagickFalse); if (support < 0.5) { /* Support too small even for nearest neighbour: Reduce to point sampling. */ support=(double) 0.5; scale=1.0; } contributions=AcquireContributionThreadSet((size_t) (2.0*support+3.0)); if (contributions == (ContributionInfo **) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(MagickFalse); } status=MagickTrue; scale=PerceptibleReciprocal(scale); image_view=AcquireVirtualCacheView(image,exception); resize_view=AcquireAuthenticCacheView(resize_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_number_threads(image,resize_image,resize_image->columns,1) #endif for (x=0; x < (ssize_t) resize_image->columns; x++) { const int id = GetOpenMPThreadId(); double bisect, density; register const Quantum *magick_restrict p; register ContributionInfo *magick_restrict contribution; register Quantum *magick_restrict q; register ssize_t y; ssize_t n, start, stop; if (status == MagickFalse) continue; bisect=(double) (x+0.5)/x_factor+MagickEpsilon; start=(ssize_t) MagickMax(bisect-support+0.5,0.0); stop=(ssize_t) MagickMin(bisect+support+0.5,(double) image->columns); density=0.0; contribution=contributions[id]; for (n=0; n < (stop-start); n++) { contribution[n].pixel=start+n; contribution[n].weight=GetResizeFilterWeight(resize_filter,scale* ((double) (start+n)-bisect+0.5)); density+=contribution[n].weight; } if (n == 0) continue; if ((density != 0.0) && (density != 1.0)) { register ssize_t i; /* Normalize. */ density=PerceptibleReciprocal(density); for (i=0; i < n; i++) contribution[i].weight*=density; } p=GetCacheViewVirtualPixels(image_view,contribution[0].pixel,0,(size_t) (contribution[n-1].pixel-contribution[0].pixel+1),image->rows,exception); q=QueueCacheViewAuthenticPixels(resize_view,x,0,1,resize_image->rows, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (y=0; y < (ssize_t) resize_image->rows; y++) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double alpha, gamma, pixel; PixelChannel channel; PixelTrait resize_traits, traits; register ssize_t j; ssize_t k; channel=GetPixelChannelChannel(image,i); traits=GetPixelChannelTraits(image,channel); resize_traits=GetPixelChannelTraits(resize_image,channel); if ((traits == UndefinedPixelTrait) || (resize_traits == UndefinedPixelTrait)) continue; if (((resize_traits & CopyPixelTrait) != 0) || (GetPixelWriteMask(resize_image,q) <= (QuantumRange/2))) { j=(ssize_t) (MagickMin(MagickMax(bisect,(double) start),(double) stop-1.0)+0.5); k=y*(contribution[n-1].pixel-contribution[0].pixel+1)+ (contribution[j-start].pixel-contribution[0].pixel); SetPixelChannel(resize_image,channel,p[k*GetPixelChannels(image)+i], q); continue; } pixel=0.0; if ((resize_traits & BlendPixelTrait) == 0) { /* No alpha blending. */ for (j=0; j < n; j++) { k=y*(contribution[n-1].pixel-contribution[0].pixel+1)+ (contribution[j].pixel-contribution[0].pixel); alpha=contribution[j].weight; pixel+=alpha*p[k*GetPixelChannels(image)+i]; } SetPixelChannel(resize_image,channel,ClampToQuantum(pixel),q); continue; } /* Alpha blending. */ gamma=0.0; for (j=0; j < n; j++) { k=y*(contribution[n-1].pixel-contribution[0].pixel+1)+ (contribution[j].pixel-contribution[0].pixel); alpha=contribution[j].weight*QuantumScale* GetPixelAlpha(image,p+k*GetPixelChannels(image)); pixel+=alpha*p[k*GetPixelChannels(image)+i]; gamma+=alpha; } gamma=PerceptibleReciprocal(gamma); SetPixelChannel(resize_image,channel,ClampToQuantum(gamma*pixel),q); } q+=GetPixelChannels(resize_image); } if (SyncCacheViewAuthenticPixels(resize_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_HorizontalFilter) #endif proceed=SetImageProgress(image,ResizeImageTag,(*offset)++,span); if (proceed == MagickFalse) status=MagickFalse; } } resize_view=DestroyCacheView(resize_view); image_view=DestroyCacheView(image_view); contributions=DestroyContributionThreadSet(contributions); return(status); } static MagickBooleanType VerticalFilter(const ResizeFilter *resize_filter, const Image *image,Image *resize_image,const double y_factor, const MagickSizeType span,MagickOffsetType *offset,ExceptionInfo *exception) { CacheView *image_view, *resize_view; ClassType storage_class; ContributionInfo **magick_restrict contributions; double scale, support; MagickBooleanType status; ssize_t y; /* Apply filter to resize vertically from image to resize image. */ scale=MagickMax(1.0/y_factor+MagickEpsilon,1.0); support=scale*GetResizeFilterSupport(resize_filter); storage_class=support > 0.5 ? DirectClass : image->storage_class; if (SetImageStorageClass(resize_image,storage_class,exception) == MagickFalse) return(MagickFalse); if (support < 0.5) { /* Support too small even for nearest neighbour: Reduce to point sampling. */ support=(double) 0.5; scale=1.0; } contributions=AcquireContributionThreadSet((size_t) (2.0*support+3.0)); if (contributions == (ContributionInfo **) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(MagickFalse); } status=MagickTrue; scale=PerceptibleReciprocal(scale); image_view=AcquireVirtualCacheView(image,exception); resize_view=AcquireAuthenticCacheView(resize_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_number_threads(image,resize_image,resize_image->rows,1) #endif for (y=0; y < (ssize_t) resize_image->rows; y++) { const int id = GetOpenMPThreadId(); double bisect, density; register const Quantum *magick_restrict p; register ContributionInfo *magick_restrict contribution; register Quantum *magick_restrict q; register ssize_t x; ssize_t n, start, stop; if (status == MagickFalse) continue; bisect=(double) (y+0.5)/y_factor+MagickEpsilon; start=(ssize_t) MagickMax(bisect-support+0.5,0.0); stop=(ssize_t) MagickMin(bisect+support+0.5,(double) image->rows); density=0.0; contribution=contributions[id]; for (n=0; n < (stop-start); n++) { contribution[n].pixel=start+n; contribution[n].weight=GetResizeFilterWeight(resize_filter,scale* ((double) (start+n)-bisect+0.5)); density+=contribution[n].weight; } if (n == 0) continue; if ((density != 0.0) && (density != 1.0)) { register ssize_t i; /* Normalize. */ density=PerceptibleReciprocal(density); for (i=0; i < n; i++) contribution[i].weight*=density; } p=GetCacheViewVirtualPixels(image_view,0,contribution[0].pixel, image->columns,(size_t) (contribution[n-1].pixel-contribution[0].pixel+1), exception); q=QueueCacheViewAuthenticPixels(resize_view,0,y,resize_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) resize_image->columns; x++) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double alpha, gamma, pixel; PixelChannel channel; PixelTrait resize_traits, traits; register ssize_t j; ssize_t k; channel=GetPixelChannelChannel(image,i); traits=GetPixelChannelTraits(image,channel); resize_traits=GetPixelChannelTraits(resize_image,channel); if ((traits == UndefinedPixelTrait) || (resize_traits == UndefinedPixelTrait)) continue; if (((resize_traits & CopyPixelTrait) != 0) || (GetPixelWriteMask(resize_image,q) <= (QuantumRange/2))) { j=(ssize_t) (MagickMin(MagickMax(bisect,(double) start),(double) stop-1.0)+0.5); k=(ssize_t) ((contribution[j-start].pixel-contribution[0].pixel)* image->columns+x); SetPixelChannel(resize_image,channel,p[k*GetPixelChannels(image)+i], q); continue; } pixel=0.0; if ((resize_traits & BlendPixelTrait) == 0) { /* No alpha blending. */ for (j=0; j < n; j++) { k=(ssize_t) ((contribution[j].pixel-contribution[0].pixel)* image->columns+x); alpha=contribution[j].weight; pixel+=alpha*p[k*GetPixelChannels(image)+i]; } SetPixelChannel(resize_image,channel,ClampToQuantum(pixel),q); continue; } gamma=0.0; for (j=0; j < n; j++) { k=(ssize_t) ((contribution[j].pixel-contribution[0].pixel)* image->columns+x); alpha=contribution[j].weight*QuantumScale*GetPixelAlpha(image,p+k* GetPixelChannels(image)); pixel+=alpha*p[k*GetPixelChannels(image)+i]; gamma+=alpha; } gamma=PerceptibleReciprocal(gamma); SetPixelChannel(resize_image,channel,ClampToQuantum(gamma*pixel),q); } q+=GetPixelChannels(resize_image); } if (SyncCacheViewAuthenticPixels(resize_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_VerticalFilter) #endif proceed=SetImageProgress(image,ResizeImageTag,(*offset)++,span); if (proceed == MagickFalse) status=MagickFalse; } } resize_view=DestroyCacheView(resize_view); image_view=DestroyCacheView(image_view); contributions=DestroyContributionThreadSet(contributions); return(status); } MagickExport Image *ResizeImage(const Image *image,const size_t columns, const size_t rows,const FilterType filter,ExceptionInfo *exception) { double x_factor, y_factor; FilterType filter_type; Image *filter_image, *resize_image; MagickOffsetType offset; MagickSizeType span; MagickStatusType status; ResizeFilter *resize_filter; /* Acquire resize image. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if ((columns == 0) || (rows == 0)) ThrowImageException(ImageError,"NegativeOrZeroImageSize"); if ((columns == image->columns) && (rows == image->rows) && (filter == UndefinedFilter)) return(CloneImage(image,0,0,MagickTrue,exception)); /* Acquire resize filter. */ x_factor=(double) columns/(double) image->columns; y_factor=(double) rows/(double) image->rows; filter_type=LanczosFilter; if (filter != UndefinedFilter) filter_type=filter; else if ((x_factor == 1.0) && (y_factor == 1.0)) filter_type=PointFilter; else if ((image->storage_class == PseudoClass) || (image->alpha_trait != UndefinedPixelTrait) || ((x_factor*y_factor) > 1.0)) filter_type=MitchellFilter; resize_filter=AcquireResizeFilter(image,filter_type,MagickFalse,exception); #if defined(MAGICKCORE_OPENCL_SUPPORT) resize_image=AccelerateResizeImage(image,columns,rows,resize_filter, exception); if (resize_image != (Image *) NULL) { resize_filter=DestroyResizeFilter(resize_filter); return(resize_image); } #endif resize_image=CloneImage(image,columns,rows,MagickTrue,exception); if (resize_image == (Image *) NULL) { resize_filter=DestroyResizeFilter(resize_filter); return(resize_image); } if (x_factor > y_factor) filter_image=CloneImage(image,columns,image->rows,MagickTrue,exception); else filter_image=CloneImage(image,image->columns,rows,MagickTrue,exception); if (filter_image == (Image *) NULL) { resize_filter=DestroyResizeFilter(resize_filter); return(DestroyImage(resize_image)); } /* Resize image. */ offset=0; if (x_factor > y_factor) { span=(MagickSizeType) (filter_image->columns+rows); status=HorizontalFilter(resize_filter,image,filter_image,x_factor,span, &offset,exception); status&=VerticalFilter(resize_filter,filter_image,resize_image,y_factor, span,&offset,exception); } else { span=(MagickSizeType) (filter_image->rows+columns); status=VerticalFilter(resize_filter,image,filter_image,y_factor,span, &offset,exception); status&=HorizontalFilter(resize_filter,filter_image,resize_image,x_factor, span,&offset,exception); } /* Free resources. */ filter_image=DestroyImage(filter_image); resize_filter=DestroyResizeFilter(resize_filter); if (status == MagickFalse) { resize_image=DestroyImage(resize_image); return((Image *) NULL); } resize_image->type=image->type; return(resize_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S a m p l e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SampleImage() scales an image to the desired dimensions with pixel % sampling. Unlike other scaling methods, this method does not introduce % any additional color into the scaled image. % % The format of the SampleImage method is: % % Image *SampleImage(const Image *image,const size_t columns, % const size_t rows,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o columns: the number of columns in the sampled image. % % o rows: the number of rows in the sampled image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *SampleImage(const Image *image,const size_t columns, const size_t rows,ExceptionInfo *exception) { #define SampleImageTag "Sample/Image" CacheView *image_view, *sample_view; Image *sample_image; MagickBooleanType status; MagickOffsetType progress; register ssize_t x1; ssize_t *x_offset, y; PointInfo sample_offset; /* Initialize sampled image attributes. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if ((columns == 0) || (rows == 0)) ThrowImageException(ImageError,"NegativeOrZeroImageSize"); if ((columns == image->columns) && (rows == image->rows)) return(CloneImage(image,0,0,MagickTrue,exception)); sample_image=CloneImage(image,columns,rows,MagickTrue,exception); if (sample_image == (Image *) NULL) return((Image *) NULL); /* Set the sampling offset, default is in the mid-point of sample regions. */ sample_offset.x=sample_offset.y=0.5-MagickEpsilon; { const char *value; value=GetImageArtifact(image,"sample:offset"); if (value != (char *) NULL) { GeometryInfo geometry_info; MagickStatusType flags; (void) ParseGeometry(value,&geometry_info); flags=ParseGeometry(value,&geometry_info); sample_offset.x=sample_offset.y=geometry_info.rho/100.0-MagickEpsilon; if ((flags & SigmaValue) != 0) sample_offset.y=geometry_info.sigma/100.0-MagickEpsilon; } } /* Allocate scan line buffer and column offset buffers. */ x_offset=(ssize_t *) AcquireQuantumMemory((size_t) sample_image->columns, sizeof(*x_offset)); if (x_offset == (ssize_t *) NULL) { sample_image=DestroyImage(sample_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } for (x1=0; x1 < (ssize_t) sample_image->columns; x1++) x_offset[x1]=(ssize_t) ((((double) x1+sample_offset.x)*image->columns)/ sample_image->columns); /* Sample each row. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); sample_view=AcquireAuthenticCacheView(sample_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_number_threads(image,sample_image,sample_image->rows,1) #endif for (y=0; y < (ssize_t) sample_image->rows; y++) { register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t x; ssize_t y_offset; if (status == MagickFalse) continue; y_offset=(ssize_t) ((((double) y+sample_offset.y)*image->rows)/ sample_image->rows); p=GetCacheViewVirtualPixels(image_view,0,y_offset,image->columns,1, exception); q=QueueCacheViewAuthenticPixels(sample_view,0,y,sample_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } /* Sample each column. */ for (x=0; x < (ssize_t) sample_image->columns; x++) { register ssize_t i; if (GetPixelWriteMask(sample_image,q) <= (QuantumRange/2)) { q+=GetPixelChannels(sample_image); continue; } for (i=0; i < (ssize_t) GetPixelChannels(sample_image); i++) { PixelChannel channel; PixelTrait image_traits, traits; channel=GetPixelChannelChannel(sample_image,i); traits=GetPixelChannelTraits(sample_image,channel); image_traits=GetPixelChannelTraits(image,channel); if ((traits == UndefinedPixelTrait) || (image_traits == UndefinedPixelTrait)) continue; SetPixelChannel(sample_image,channel,p[x_offset[x]*GetPixelChannels( image)+i],q); } q+=GetPixelChannels(sample_image); } if (SyncCacheViewAuthenticPixels(sample_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_SampleImage) #endif proceed=SetImageProgress(image,SampleImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); sample_view=DestroyCacheView(sample_view); x_offset=(ssize_t *) RelinquishMagickMemory(x_offset); sample_image->type=image->type; if (status == MagickFalse) sample_image=DestroyImage(sample_image); return(sample_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S c a l e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ScaleImage() changes the size of an image to the given dimensions. % % The format of the ScaleImage method is: % % Image *ScaleImage(const Image *image,const size_t columns, % const size_t rows,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o columns: the number of columns in the scaled image. % % o rows: the number of rows in the scaled image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ScaleImage(const Image *image,const size_t columns, const size_t rows,ExceptionInfo *exception) { #define ScaleImageTag "Scale/Image" CacheView *image_view, *scale_view; double alpha, pixel[CompositePixelChannel], *scale_scanline, *scanline, *x_vector, *y_vector; Image *scale_image; MagickBooleanType next_column, next_row, proceed, status; PixelTrait scale_traits; PointInfo scale, span; register ssize_t i; ssize_t n, number_rows, y; /* Initialize scaled image attributes. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if ((columns == 0) || (rows == 0)) ThrowImageException(ImageError,"NegativeOrZeroImageSize"); if ((columns == image->columns) && (rows == image->rows)) return(CloneImage(image,0,0,MagickTrue,exception)); scale_image=CloneImage(image,columns,rows,MagickTrue,exception); if (scale_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(scale_image,DirectClass,exception) == MagickFalse) { scale_image=DestroyImage(scale_image); return((Image *) NULL); } /* Allocate memory. */ x_vector=(double *) AcquireQuantumMemory((size_t) image->columns, MaxPixelChannels*sizeof(*x_vector)); scanline=x_vector; if (image->rows != scale_image->rows) scanline=(double *) AcquireQuantumMemory((size_t) image->columns, MaxPixelChannels*sizeof(*scanline)); scale_scanline=(double *) AcquireQuantumMemory((size_t) scale_image->columns, MaxPixelChannels*sizeof(*scale_scanline)); y_vector=(double *) AcquireQuantumMemory((size_t) image->columns, MaxPixelChannels*sizeof(*y_vector)); if ((scanline == (double *) NULL) || (scale_scanline == (double *) NULL) || (x_vector == (double *) NULL) || (y_vector == (double *) NULL)) { if ((image->rows != scale_image->rows) && (scanline != (double *) NULL)) scanline=(double *) RelinquishMagickMemory(scanline); if (scale_scanline != (double *) NULL) scale_scanline=(double *) RelinquishMagickMemory(scale_scanline); if (x_vector != (double *) NULL) x_vector=(double *) RelinquishMagickMemory(x_vector); if (y_vector != (double *) NULL) y_vector=(double *) RelinquishMagickMemory(y_vector); scale_image=DestroyImage(scale_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } /* Scale image. */ number_rows=0; next_row=MagickTrue; span.y=1.0; scale.y=(double) scale_image->rows/(double) image->rows; (void) ResetMagickMemory(y_vector,0,(size_t) MaxPixelChannels*image->columns* sizeof(*y_vector)); n=0; status=MagickTrue; image_view=AcquireVirtualCacheView(image,exception); scale_view=AcquireAuthenticCacheView(scale_image,exception); for (y=0; y < (ssize_t) scale_image->rows; y++) { register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) break; q=QueueCacheViewAuthenticPixels(scale_view,0,y,scale_image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; break; } alpha=1.0; if (scale_image->rows == image->rows) { /* Read a new scanline. */ p=GetCacheViewVirtualPixels(image_view,0,n++,image->columns,1, exception); if (p == (const Quantum *) NULL) { status=MagickFalse; break; } for (x=0; x < (ssize_t) image->columns; x++) { if (GetPixelWriteMask(image,p) <= (QuantumRange/2)) { p+=GetPixelChannels(image); continue; } if (image->alpha_trait != UndefinedPixelTrait) alpha=QuantumScale*GetPixelAlpha(image,p); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & BlendPixelTrait) == 0) { x_vector[x*GetPixelChannels(image)+i]=(double) p[i]; continue; } x_vector[x*GetPixelChannels(image)+i]=alpha*p[i]; } p+=GetPixelChannels(image); } } else { /* Scale Y direction. */ while (scale.y < span.y) { if ((next_row != MagickFalse) && (number_rows < (ssize_t) image->rows)) { /* Read a new scanline. */ p=GetCacheViewVirtualPixels(image_view,0,n++,image->columns,1, exception); if (p == (const Quantum *) NULL) { status=MagickFalse; break; } for (x=0; x < (ssize_t) image->columns; x++) { if (GetPixelWriteMask(image,p) <= (QuantumRange/2)) { p+=GetPixelChannels(image); continue; } if (image->alpha_trait != UndefinedPixelTrait) alpha=QuantumScale*GetPixelAlpha(image,p); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & BlendPixelTrait) == 0) { x_vector[x*GetPixelChannels(image)+i]=(double) p[i]; continue; } x_vector[x*GetPixelChannels(image)+i]=alpha*p[i]; } p+=GetPixelChannels(image); } number_rows++; } for (x=0; x < (ssize_t) image->columns; x++) for (i=0; i < (ssize_t) GetPixelChannels(image); i++) y_vector[x*GetPixelChannels(image)+i]+=scale.y* x_vector[x*GetPixelChannels(image)+i]; span.y-=scale.y; scale.y=(double) scale_image->rows/(double) image->rows; next_row=MagickTrue; } if ((next_row != MagickFalse) && (number_rows < (ssize_t) image->rows)) { /* Read a new scanline. */ p=GetCacheViewVirtualPixels(image_view,0,n++,image->columns,1, exception); if (p == (const Quantum *) NULL) { status=MagickFalse; break; } for (x=0; x < (ssize_t) image->columns; x++) { if (GetPixelWriteMask(image,p) <= (QuantumRange/2)) { p+=GetPixelChannels(image); continue; } if (image->alpha_trait != UndefinedPixelTrait) alpha=QuantumScale*GetPixelAlpha(image,p); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & BlendPixelTrait) == 0) { x_vector[x*GetPixelChannels(image)+i]=(double) p[i]; continue; } x_vector[x*GetPixelChannels(image)+i]=alpha*p[i]; } p+=GetPixelChannels(image); } number_rows++; next_row=MagickFalse; } for (x=0; x < (ssize_t) image->columns; x++) { for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { pixel[i]=y_vector[x*GetPixelChannels(image)+i]+span.y* x_vector[x*GetPixelChannels(image)+i]; scanline[x*GetPixelChannels(image)+i]=pixel[i]; y_vector[x*GetPixelChannels(image)+i]=0.0; } } scale.y-=span.y; if (scale.y <= 0) { scale.y=(double) scale_image->rows/(double) image->rows; next_row=MagickTrue; } span.y=1.0; } if (scale_image->columns == image->columns) { /* Transfer scanline to scaled image. */ for (x=0; x < (ssize_t) scale_image->columns; x++) { if (GetPixelWriteMask(scale_image,q) <= (QuantumRange/2)) { q+=GetPixelChannels(scale_image); continue; } if (image->alpha_trait != UndefinedPixelTrait) { alpha=QuantumScale*scanline[x*GetPixelChannels(image)+ GetPixelChannelOffset(image,AlphaPixelChannel)]; alpha=PerceptibleReciprocal(alpha); } for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); scale_traits=GetPixelChannelTraits(scale_image,channel); if ((traits == UndefinedPixelTrait) || (scale_traits == UndefinedPixelTrait)) continue; if ((traits & BlendPixelTrait) == 0) { SetPixelChannel(scale_image,channel,ClampToQuantum( scanline[x*GetPixelChannels(image)+i]),q); continue; } SetPixelChannel(scale_image,channel,ClampToQuantum(alpha*scanline[ x*GetPixelChannels(image)+i]),q); } q+=GetPixelChannels(scale_image); } } else { ssize_t t; /* Scale X direction. */ for (i=0; i < (ssize_t) GetPixelChannels(image); i++) pixel[i]=0.0; next_column=MagickFalse; span.x=1.0; t=0; for (x=0; x < (ssize_t) image->columns; x++) { scale.x=(double) scale_image->columns/(double) image->columns; while (scale.x >= span.x) { if (next_column != MagickFalse) { for (i=0; i < (ssize_t) GetPixelChannels(image); i++) pixel[i]=0.0; t++; } for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if (traits == UndefinedPixelTrait) continue; pixel[i]+=span.x*scanline[x*GetPixelChannels(image)+i]; scale_scanline[t*GetPixelChannels(image)+i]=pixel[i]; } scale.x-=span.x; span.x=1.0; next_column=MagickTrue; } if (scale.x > 0) { if (next_column != MagickFalse) { for (i=0; i < (ssize_t) GetPixelChannels(image); i++) pixel[i]=0.0; next_column=MagickFalse; t++; } for (i=0; i < (ssize_t) GetPixelChannels(image); i++) pixel[i]+=scale.x*scanline[x*GetPixelChannels(image)+i]; span.x-=scale.x; } } if (span.x > 0) { for (i=0; i < (ssize_t) GetPixelChannels(image); i++) pixel[i]+=span.x*scanline[(x-1)*GetPixelChannels(image)+i]; } if ((next_column == MagickFalse) && (t < (ssize_t) scale_image->columns)) for (i=0; i < (ssize_t) GetPixelChannels(image); i++) scale_scanline[t*GetPixelChannels(image)+i]=pixel[i]; /* Transfer scanline to scaled image. */ for (x=0; x < (ssize_t) scale_image->columns; x++) { if (GetPixelWriteMask(scale_image,q) <= (QuantumRange/2)) { q+=GetPixelChannels(scale_image); continue; } if (image->alpha_trait != UndefinedPixelTrait) { alpha=QuantumScale*scale_scanline[x*GetPixelChannels(image)+ GetPixelChannelOffset(image,AlphaPixelChannel)]; alpha=PerceptibleReciprocal(alpha); } for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); scale_traits=GetPixelChannelTraits(scale_image,channel); if ((traits == UndefinedPixelTrait) || (scale_traits == UndefinedPixelTrait)) continue; if ((traits & BlendPixelTrait) == 0) { SetPixelChannel(scale_image,channel,ClampToQuantum( scale_scanline[x*GetPixelChannels(image)+i]),q); continue; } SetPixelChannel(scale_image,channel,ClampToQuantum(alpha* scale_scanline[x*GetPixelChannels(image)+i]),q); } q+=GetPixelChannels(scale_image); } } if (SyncCacheViewAuthenticPixels(scale_view,exception) == MagickFalse) { status=MagickFalse; break; } proceed=SetImageProgress(image,ScaleImageTag,(MagickOffsetType) y, image->rows); if (proceed == MagickFalse) { status=MagickFalse; break; } } scale_view=DestroyCacheView(scale_view); image_view=DestroyCacheView(image_view); /* Free allocated memory. */ y_vector=(double *) RelinquishMagickMemory(y_vector); scale_scanline=(double *) RelinquishMagickMemory(scale_scanline); if (scale_image->rows != image->rows) scanline=(double *) RelinquishMagickMemory(scanline); x_vector=(double *) RelinquishMagickMemory(x_vector); scale_image->type=image->type; if (status == MagickFalse) scale_image=DestroyImage(scale_image); return(scale_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T h u m b n a i l I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ThumbnailImage() changes the size of an image to the given dimensions and % removes any associated profiles. The goal is to produce small low cost % thumbnail images suited for display on the Web. % % The format of the ThumbnailImage method is: % % Image *ThumbnailImage(const Image *image,const size_t columns, % const size_t rows,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o columns: the number of columns in the scaled image. % % o rows: the number of rows in the scaled image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ThumbnailImage(const Image *image,const size_t columns, const size_t rows,ExceptionInfo *exception) { #define SampleFactor 5 char *url, value[MagickPathExtent]; const char *name; Image *thumbnail_image; double x_factor, y_factor; struct stat attributes; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); x_factor=(double) columns/(double) image->columns; y_factor=(double) rows/(double) image->rows; if ((x_factor*y_factor) > 0.1) thumbnail_image=ResizeImage(image,columns,rows,image->filter,exception); else if (((SampleFactor*columns) < 128) || ((SampleFactor*rows) < 128)) thumbnail_image=ResizeImage(image,columns,rows,image->filter,exception); else { Image *sample_image; sample_image=SampleImage(image,SampleFactor*columns,SampleFactor*rows, exception); if (sample_image == (Image *) NULL) return((Image *) NULL); thumbnail_image=ResizeImage(sample_image,columns,rows,image->filter, exception); sample_image=DestroyImage(sample_image); } if (thumbnail_image == (Image *) NULL) return(thumbnail_image); (void) ParseAbsoluteGeometry("0x0+0+0",&thumbnail_image->page); if (thumbnail_image->alpha_trait == UndefinedPixelTrait) (void) SetImageAlphaChannel(thumbnail_image,OpaqueAlphaChannel,exception); thumbnail_image->depth=8; thumbnail_image->interlace=NoInterlace; /* Strip all profiles except color profiles. */ ResetImageProfileIterator(thumbnail_image); for (name=GetNextImageProfile(thumbnail_image); name != (const char *) NULL; ) { if ((LocaleCompare(name,"icc") != 0) && (LocaleCompare(name,"icm") != 0)) { (void) DeleteImageProfile(thumbnail_image,name); ResetImageProfileIterator(thumbnail_image); } name=GetNextImageProfile(thumbnail_image); } (void) DeleteImageProperty(thumbnail_image,"comment"); (void) CopyMagickString(value,image->magick_filename,MagickPathExtent); if (strstr(image->magick_filename,"//") == (char *) NULL) (void) FormatLocaleString(value,MagickPathExtent,"file://%s", image->magick_filename); (void) SetImageProperty(thumbnail_image,"Thumb::URI",value,exception); (void) CopyMagickString(value,image->magick_filename,MagickPathExtent); if ( GetPathAttributes(image->filename,&attributes) != MagickFalse ) { (void) FormatLocaleString(value,MagickPathExtent,"%.20g",(double) attributes.st_mtime); (void) SetImageProperty(thumbnail_image,"Thumb::MTime",value,exception); } (void) FormatLocaleString(value,MagickPathExtent,"%.20g",(double) attributes.st_mtime); (void) FormatMagickSize(GetBlobSize(image),MagickFalse,"B",MagickPathExtent, value); (void) SetImageProperty(thumbnail_image,"Thumb::Size",value,exception); (void) FormatLocaleString(value,MagickPathExtent,"image/%s",image->magick); LocaleLower(value); (void) SetImageProperty(thumbnail_image,"Thumb::Mimetype",value,exception); url=GetMagickHomeURL(); (void) SetImageProperty(thumbnail_image,"software",url,exception); url=DestroyString(url); (void) FormatLocaleString(value,MagickPathExtent,"%.20g",(double) image->magick_columns); (void) SetImageProperty(thumbnail_image,"Thumb::Image::Width",value, exception); (void) FormatLocaleString(value,MagickPathExtent,"%.20g",(double) image->magick_rows); (void) SetImageProperty(thumbnail_image,"Thumb::Image::Height",value, exception); (void) FormatLocaleString(value,MagickPathExtent,"%.20g",(double) GetImageListLength(image)); (void) SetImageProperty(thumbnail_image,"Thumb::Document::Pages",value, exception); return(thumbnail_image); }
GB_binop__eq_uint8.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__eq_uint8) // A.*B function (eWiseMult): GB (_AemultB_08__eq_uint8) // A.*B function (eWiseMult): GB (_AemultB_02__eq_uint8) // A.*B function (eWiseMult): GB (_AemultB_04__eq_uint8) // A.*B function (eWiseMult): GB (_AemultB_bitmap__eq_uint8) // A*D function (colscale): GB (_AxD__eq_uint8) // D*A function (rowscale): GB (_DxB__eq_uint8) // C+=B function (dense accum): GB (_Cdense_accumB__eq_uint8) // C+=b function (dense accum): GB (_Cdense_accumb__eq_uint8) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__eq_uint8) // C=scalar+B GB (_bind1st__eq_uint8) // C=scalar+B' GB (_bind1st_tran__eq_uint8) // C=A+scalar GB (_bind2nd__eq_uint8) // C=A'+scalar GB (_bind2nd_tran__eq_uint8) // C type: bool // A type: uint8_t // B,b type: uint8_t // BinaryOp: cij = (aij == bij) #define GB_ATYPE \ uint8_t #define GB_BTYPE \ uint8_t #define GB_CTYPE \ bool // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 0 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 0 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint8_t aij = GBX (Ax, pA, A_iso) // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ uint8_t bij = GBX (Bx, pB, B_iso) // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ bool t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x == y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_EQ || GxB_NO_UINT8 || GxB_NO_EQ_UINT8) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__eq_uint8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__eq_uint8) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { #include "GB_dense_subassign_23_template.c" } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__eq_uint8) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { // get the scalar b for C += b, of type uint8_t uint8_t bwork = (*((uint8_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__eq_uint8) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__eq_uint8) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__eq_uint8) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__eq_uint8) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__eq_uint8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__eq_uint8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__eq_uint8) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__eq_uint8) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *Cx = (bool *) Cx_output ; uint8_t x = (*((uint8_t *) x_input)) ; uint8_t *Bx = (uint8_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; uint8_t bij = GBX (Bx, p, false) ; Cx [p] = (x == bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__eq_uint8) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; bool *Cx = (bool *) Cx_output ; uint8_t *Ax = (uint8_t *) Ax_input ; uint8_t y = (*((uint8_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint8_t aij = GBX (Ax, p, false) ; Cx [p] = (aij == y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x == aij) ; \ } GrB_Info GB (_bind1st_tran__eq_uint8) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint8_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t x = (*((const uint8_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint8_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij == y) ; \ } GrB_Info GB (_bind2nd_tran__eq_uint8) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t y = (*((const uint8_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
shear.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % SSSSS H H EEEEE AAA RRRR % % SS H H E A A R R % % SSS HHHHH EEE AAAAA RRRR % % SS H H E A A R R % % SSSSS H H EEEEE A A R R % % % % % % MagickCore Methods to Shear or Rotate an Image by an Arbitrary Angle % % % % Software Design % % Cristy % % July 1992 % % % % % % Copyright 1999-2020 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % The XShearImage() and YShearImage() methods are based on the paper "A Fast % Algorithm for General Raster Rotation" by Alan W. Paeth, Graphics % Interface '86 (Vancouver). ShearRotateImage() is adapted from a similar % method based on the Paeth paper written by Michael Halle of the Spatial % Imaging Group, MIT Media Lab. % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/artifact.h" #include "MagickCore/attribute.h" #include "MagickCore/blob-private.h" #include "MagickCore/cache-private.h" #include "MagickCore/channel.h" #include "MagickCore/color-private.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/composite.h" #include "MagickCore/composite-private.h" #include "MagickCore/decorate.h" #include "MagickCore/distort.h" #include "MagickCore/draw.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/gem.h" #include "MagickCore/geometry.h" #include "MagickCore/image.h" #include "MagickCore/image-private.h" #include "MagickCore/matrix.h" #include "MagickCore/memory_.h" #include "MagickCore/list.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/nt-base-private.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/quantum.h" #include "MagickCore/resource_.h" #include "MagickCore/shear.h" #include "MagickCore/statistic.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/thread-private.h" #include "MagickCore/threshold.h" #include "MagickCore/transform.h" /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C r o p T o F i t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CropToFitImage() crops the sheared image as determined by the bounding box % as defined by width and height and shearing angles. % % The format of the CropToFitImage method is: % % MagickBooleanType CropToFitImage(Image **image, % const double x_shear,const double x_shear, % const double width,const double height, % const MagickBooleanType rotate,ExceptionInfo *exception) % % A description of each parameter follows. % % o image: the image. % % o x_shear, y_shear, width, height: Defines a region of the image to crop. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType CropToFitImage(Image **image, const double x_shear,const double y_shear, const double width,const double height, const MagickBooleanType rotate,ExceptionInfo *exception) { Image *crop_image; PointInfo extent[4], min, max; RectangleInfo geometry, page; register ssize_t i; /* Calculate the rotated image size. */ extent[0].x=(double) (-width/2.0); extent[0].y=(double) (-height/2.0); extent[1].x=(double) width/2.0; extent[1].y=(double) (-height/2.0); extent[2].x=(double) (-width/2.0); extent[2].y=(double) height/2.0; extent[3].x=(double) width/2.0; extent[3].y=(double) height/2.0; for (i=0; i < 4; i++) { extent[i].x+=x_shear*extent[i].y; extent[i].y+=y_shear*extent[i].x; if (rotate != MagickFalse) extent[i].x+=x_shear*extent[i].y; extent[i].x+=(double) (*image)->columns/2.0; extent[i].y+=(double) (*image)->rows/2.0; } min=extent[0]; max=extent[0]; for (i=1; i < 4; i++) { if (min.x > extent[i].x) min.x=extent[i].x; if (min.y > extent[i].y) min.y=extent[i].y; if (max.x < extent[i].x) max.x=extent[i].x; if (max.y < extent[i].y) max.y=extent[i].y; } geometry.x=CastDoubleToLong(ceil(min.x-0.5)); geometry.y=CastDoubleToLong(ceil(min.y-0.5)); geometry.width=(size_t) CastDoubleToLong(floor(max.x-min.x+0.5)); geometry.height=(size_t) CastDoubleToLong(floor(max.y-min.y+0.5)); page=(*image)->page; (void) ParseAbsoluteGeometry("0x0+0+0",&(*image)->page); crop_image=CropImage(*image,&geometry,exception); if (crop_image == (Image *) NULL) return(MagickFalse); crop_image->page=page; *image=DestroyImage(*image); *image=crop_image; return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e s k e w I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DeskewImage() removes skew from the image. Skew is an artifact that % occurs in scanned images because of the camera being misaligned, % imperfections in the scanning or surface, or simply because the paper was % not placed completely flat when scanned. % % The result will be auto-croped if the artifact "deskew:auto-crop" is % defined, while the amount the image is to be deskewed, in degrees is also % saved as the artifact "deskew:angle". % % The format of the DeskewImage method is: % % Image *DeskewImage(const Image *image,const double threshold, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o threshold: separate background from foreground. % % o exception: return any errors or warnings in this structure. % */ static void RadonProjection(const Image *image,MatrixInfo *source_matrixs, MatrixInfo *destination_matrixs,const ssize_t sign,size_t *projection) { MatrixInfo *swap; register MatrixInfo *p, *q; register ssize_t x; size_t step; p=source_matrixs; q=destination_matrixs; for (step=1; step < GetMatrixColumns(p); step*=2) { for (x=0; x < (ssize_t) GetMatrixColumns(p); x+=2*(ssize_t) step) { register ssize_t i; ssize_t y; unsigned short element, neighbor; for (i=0; i < (ssize_t) step; i++) { for (y=0; y < (ssize_t) (GetMatrixRows(p)-i-1); y++) { if (GetMatrixElement(p,x+i,y,&element) == MagickFalse) continue; if (GetMatrixElement(p,x+i+step,y+i,&neighbor) == MagickFalse) continue; neighbor+=element; if (SetMatrixElement(q,x+2*i,y,&neighbor) == MagickFalse) continue; if (GetMatrixElement(p,x+i+step,y+i+1,&neighbor) == MagickFalse) continue; neighbor+=element; if (SetMatrixElement(q,x+2*i+1,y,&neighbor) == MagickFalse) continue; } for ( ; y < (ssize_t) (GetMatrixRows(p)-i); y++) { if (GetMatrixElement(p,x+i,y,&element) == MagickFalse) continue; if (GetMatrixElement(p,x+i+step,y+i,&neighbor) == MagickFalse) continue; neighbor+=element; if (SetMatrixElement(q,x+2*i,y,&neighbor) == MagickFalse) continue; if (SetMatrixElement(q,x+2*i+1,y,&element) == MagickFalse) continue; } for ( ; y < (ssize_t) GetMatrixRows(p); y++) { if (GetMatrixElement(p,x+i,y,&element) == MagickFalse) continue; if (SetMatrixElement(q,x+2*i,y,&element) == MagickFalse) continue; if (SetMatrixElement(q,x+2*i+1,y,&element) == MagickFalse) continue; } } } swap=p; p=q; q=swap; } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) \ magick_number_threads(image,image,GetMatrixColumns(p),1) #endif for (x=0; x < (ssize_t) GetMatrixColumns(p); x++) { register ssize_t y; size_t sum; sum=0; for (y=0; y < (ssize_t) (GetMatrixRows(p)-1); y++) { ssize_t delta; unsigned short element, neighbor; if (GetMatrixElement(p,x,y,&element) == MagickFalse) continue; if (GetMatrixElement(p,x,y+1,&neighbor) == MagickFalse) continue; delta=(ssize_t) element-(ssize_t) neighbor; sum+=delta*delta; } projection[GetMatrixColumns(p)+sign*x-1]=sum; } } static MagickBooleanType RadonTransform(const Image *image, const double threshold,size_t *projection,ExceptionInfo *exception) { CacheView *image_view; MatrixInfo *destination_matrixs, *source_matrixs; MagickBooleanType status; size_t count, width; ssize_t j, y; unsigned char c; unsigned short bits[256]; for (width=1; width < ((image->columns+7)/8); width<<=1) ; source_matrixs=AcquireMatrixInfo(width,image->rows,sizeof(unsigned short), exception); destination_matrixs=AcquireMatrixInfo(width,image->rows, sizeof(unsigned short),exception); if ((source_matrixs == (MatrixInfo *) NULL) || (destination_matrixs == (MatrixInfo *) NULL)) { if (destination_matrixs != (MatrixInfo *) NULL) destination_matrixs=DestroyMatrixInfo(destination_matrixs); if (source_matrixs != (MatrixInfo *) NULL) source_matrixs=DestroyMatrixInfo(source_matrixs); return(MagickFalse); } if (NullMatrix(source_matrixs) == MagickFalse) { destination_matrixs=DestroyMatrixInfo(destination_matrixs); source_matrixs=DestroyMatrixInfo(source_matrixs); return(MagickFalse); } for (j=0; j < 256; j++) { c=(unsigned char) j; for (count=0; c != 0; c>>=1) count+=c & 0x01; bits[j]=(unsigned short) count; } status=MagickTrue; image_view=AcquireVirtualCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register const Quantum *magick_restrict p; register ssize_t i, x; size_t bit, byte; unsigned short value; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) { status=MagickFalse; continue; } bit=0; byte=0; i=(ssize_t) (image->columns+7)/8; for (x=0; x < (ssize_t) image->columns; x++) { byte<<=1; if (((MagickRealType) GetPixelRed(image,p) < threshold) || ((MagickRealType) GetPixelGreen(image,p) < threshold) || ((MagickRealType) GetPixelBlue(image,p) < threshold)) byte|=0x01; bit++; if (bit == 8) { value=bits[byte]; (void) SetMatrixElement(source_matrixs,--i,y,&value); bit=0; byte=0; } p+=GetPixelChannels(image); } if (bit != 0) { byte<<=(8-bit); value=bits[byte]; (void) SetMatrixElement(source_matrixs,--i,y,&value); } } RadonProjection(image,source_matrixs,destination_matrixs,-1,projection); (void) NullMatrix(source_matrixs); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register const Quantum *magick_restrict p; register ssize_t i, x; size_t bit, byte; unsigned short value; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) { status=MagickFalse; continue; } bit=0; byte=0; i=0; for (x=0; x < (ssize_t) image->columns; x++) { byte<<=1; if (((MagickRealType) GetPixelRed(image,p) < threshold) || ((MagickRealType) GetPixelGreen(image,p) < threshold) || ((MagickRealType) GetPixelBlue(image,p) < threshold)) byte|=0x01; bit++; if (bit == 8) { value=bits[byte]; (void) SetMatrixElement(source_matrixs,i++,y,&value); bit=0; byte=0; } p+=GetPixelChannels(image); } if (bit != 0) { byte<<=(8-bit); value=bits[byte]; (void) SetMatrixElement(source_matrixs,i++,y,&value); } } RadonProjection(image,source_matrixs,destination_matrixs,1,projection); image_view=DestroyCacheView(image_view); destination_matrixs=DestroyMatrixInfo(destination_matrixs); source_matrixs=DestroyMatrixInfo(source_matrixs); return(MagickTrue); } static void GetImageBackgroundColor(Image *image,const ssize_t offset, ExceptionInfo *exception) { CacheView *image_view; PixelInfo background; double count; ssize_t y; /* Compute average background color. */ if (offset <= 0) return; GetPixelInfo(image,&background); count=0.0; image_view=AcquireVirtualCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { register const Quantum *magick_restrict p; register ssize_t x; if ((y >= offset) && (y < ((ssize_t) image->rows-offset))) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) continue; for (x=0; x < (ssize_t) image->columns; x++) { if ((x >= offset) && (x < ((ssize_t) image->columns-offset))) continue; background.red+=QuantumScale*GetPixelRed(image,p); background.green+=QuantumScale*GetPixelGreen(image,p); background.blue+=QuantumScale*GetPixelBlue(image,p); if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) background.alpha+=QuantumScale*GetPixelAlpha(image,p); count++; p+=GetPixelChannels(image); } } image_view=DestroyCacheView(image_view); image->background_color.red=(double) ClampToQuantum(QuantumRange* background.red/count); image->background_color.green=(double) ClampToQuantum(QuantumRange* background.green/count); image->background_color.blue=(double) ClampToQuantum(QuantumRange* background.blue/count); if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) image->background_color.alpha=(double) ClampToQuantum(QuantumRange* background.alpha/count); } MagickExport Image *DeskewImage(const Image *image,const double threshold, ExceptionInfo *exception) { AffineMatrix affine_matrix; const char *artifact; double degrees; Image *clone_image, *crop_image, *deskew_image, *median_image; MagickBooleanType status; RectangleInfo geometry; register ssize_t i; size_t max_projection, *projection, width; ssize_t skew; /* Compute deskew angle. */ for (width=1; width < ((image->columns+7)/8); width<<=1) ; projection=(size_t *) AcquireQuantumMemory((size_t) (2*width-1), sizeof(*projection)); if (projection == (size_t *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); status=RadonTransform(image,threshold,projection,exception); if (status == MagickFalse) { projection=(size_t *) RelinquishMagickMemory(projection); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } max_projection=0; skew=0; for (i=0; i < (ssize_t) (2*width-1); i++) { if (projection[i] > max_projection) { skew=i-(ssize_t) width+1; max_projection=projection[i]; } } projection=(size_t *) RelinquishMagickMemory(projection); degrees=RadiansToDegrees(-atan((double) skew/width/8)); if (image->debug != MagickFalse) (void) LogMagickEvent(TransformEvent,GetMagickModule(), " Deskew angle: %g",degrees); /* Deskew image. */ clone_image=CloneImage(image,0,0,MagickTrue,exception); if (clone_image == (Image *) NULL) return((Image *) NULL); { char angle[MagickPathExtent]; (void) FormatLocaleString(angle,MagickPathExtent,"%.20g",degrees); (void) SetImageArtifact(clone_image,"deskew:angle",angle); } (void) SetImageVirtualPixelMethod(clone_image,BackgroundVirtualPixelMethod, exception); affine_matrix.sx=cos(DegreesToRadians(fmod((double) degrees,360.0))); affine_matrix.rx=sin(DegreesToRadians(fmod((double) degrees,360.0))); affine_matrix.ry=(-sin(DegreesToRadians(fmod((double) degrees,360.0)))); affine_matrix.sy=cos(DegreesToRadians(fmod((double) degrees,360.0))); affine_matrix.tx=0.0; affine_matrix.ty=0.0; artifact=GetImageArtifact(image,"deskew:auto-crop"); if (IsStringTrue(artifact) == MagickFalse) { deskew_image=AffineTransformImage(clone_image,&affine_matrix,exception); clone_image=DestroyImage(clone_image); return(deskew_image); } /* Auto-crop image. */ GetImageBackgroundColor(clone_image,(ssize_t) StringToLong(artifact), exception); deskew_image=AffineTransformImage(clone_image,&affine_matrix,exception); clone_image=DestroyImage(clone_image); if (deskew_image == (Image *) NULL) return((Image *) NULL); median_image=StatisticImage(deskew_image,MedianStatistic,3,3,exception); if (median_image == (Image *) NULL) { deskew_image=DestroyImage(deskew_image); return((Image *) NULL); } geometry=GetImageBoundingBox(median_image,exception); median_image=DestroyImage(median_image); if (image->debug != MagickFalse) (void) LogMagickEvent(TransformEvent,GetMagickModule()," Deskew geometry: " "%.20gx%.20g%+.20g%+.20g",(double) geometry.width,(double) geometry.height,(double) geometry.x,(double) geometry.y); crop_image=CropImage(deskew_image,&geometry,exception); deskew_image=DestroyImage(deskew_image); return(crop_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I n t e g r a l R o t a t e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IntegralRotateImage() rotates the image an integral of 90 degrees. It % allocates the memory necessary for the new Image structure and returns a % pointer to the rotated image. % % The format of the IntegralRotateImage method is: % % Image *IntegralRotateImage(const Image *image,size_t rotations, % ExceptionInfo *exception) % % A description of each parameter follows. % % o image: the image. % % o rotations: Specifies the number of 90 degree rotations. % */ MagickExport Image *IntegralRotateImage(const Image *image,size_t rotations, ExceptionInfo *exception) { #define RotateImageTag "Rotate/Image" CacheView *image_view, *rotate_view; Image *rotate_image; MagickBooleanType status; MagickOffsetType progress; RectangleInfo page; /* Initialize rotated image attributes. */ assert(image != (Image *) NULL); page=image->page; rotations%=4; switch (rotations) { case 0: default: { rotate_image=CloneImage(image,0,0,MagickTrue,exception); break; } case 2: { rotate_image=CloneImage(image,image->columns,image->rows,MagickTrue, exception); break; } case 1: case 3: { rotate_image=CloneImage(image,image->rows,image->columns,MagickTrue, exception); break; } } if (rotate_image == (Image *) NULL) return((Image *) NULL); if (rotations == 0) return(rotate_image); /* Integral rotate the image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); rotate_view=AcquireAuthenticCacheView(rotate_image,exception); switch (rotations) { case 1: { size_t tile_height, tile_width; ssize_t tile_y; /* Rotate 90 degrees. */ GetPixelCacheTileSize(image,&tile_width,&tile_height); tile_width=image->columns; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,rotate_image,image->rows/tile_height,1) #endif for (tile_y=0; tile_y < (ssize_t) image->rows; tile_y+=(ssize_t) tile_height) { register ssize_t tile_x; if (status == MagickFalse) continue; tile_x=0; for ( ; tile_x < (ssize_t) image->columns; tile_x+=(ssize_t) tile_width) { MagickBooleanType sync; register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t y; size_t height, width; width=tile_width; if ((tile_x+(ssize_t) tile_width) > (ssize_t) image->columns) width=(size_t) (tile_width-(tile_x+tile_width-image->columns)); height=tile_height; if ((tile_y+(ssize_t) tile_height) > (ssize_t) image->rows) height=(size_t) (tile_height-(tile_y+tile_height-image->rows)); p=GetCacheViewVirtualPixels(image_view,tile_x,tile_y,width,height, exception); if (p == (const Quantum *) NULL) { status=MagickFalse; break; } for (y=0; y < (ssize_t) width; y++) { register const Quantum *magick_restrict tile_pixels; register ssize_t x; if (status == MagickFalse) continue; q=QueueCacheViewAuthenticPixels(rotate_view,(ssize_t) (rotate_image->columns-(tile_y+height)),y+tile_x,height,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } tile_pixels=p+((height-1)*width+y)*GetPixelChannels(image); for (x=0; x < (ssize_t) height; x++) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait rotate_traits = GetPixelChannelTraits(rotate_image, channel); if ((traits == UndefinedPixelTrait) || (rotate_traits == UndefinedPixelTrait)) continue; SetPixelChannel(rotate_image,channel,tile_pixels[i],q); } tile_pixels-=width*GetPixelChannels(image); q+=GetPixelChannels(rotate_image); } sync=SyncCacheViewAuthenticPixels(rotate_view,exception); if (sync == MagickFalse) status=MagickFalse; } } if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,RotateImageTag,progress+=tile_height, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } (void) SetImageProgress(image,RotateImageTag,(MagickOffsetType) image->rows-1,image->rows); Swap(page.width,page.height); Swap(page.x,page.y); if (page.width != 0) page.x=(ssize_t) (page.width-rotate_image->columns-page.x); break; } case 2: { register ssize_t y; /* Rotate 180 degrees. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,rotate_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=QueueCacheViewAuthenticPixels(rotate_view,0,(ssize_t) (image->rows-y- 1),image->columns,1,exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } q+=GetPixelChannels(rotate_image)*image->columns; for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t i; q-=GetPixelChannels(rotate_image); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait rotate_traits = GetPixelChannelTraits(rotate_image, channel); if ((traits == UndefinedPixelTrait) || (rotate_traits == UndefinedPixelTrait)) continue; SetPixelChannel(rotate_image,channel,p[i],q); } p+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(rotate_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,RotateImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } (void) SetImageProgress(image,RotateImageTag,(MagickOffsetType) image->rows-1,image->rows); if (page.width != 0) page.x=(ssize_t) (page.width-rotate_image->columns-page.x); if (page.height != 0) page.y=(ssize_t) (page.height-rotate_image->rows-page.y); break; } case 3: { size_t tile_height, tile_width; ssize_t tile_y; /* Rotate 270 degrees. */ GetPixelCacheTileSize(image,&tile_width,&tile_height); tile_width=image->columns; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,rotate_image,image->rows/tile_height,1) #endif for (tile_y=0; tile_y < (ssize_t) image->rows; tile_y+=(ssize_t) tile_height) { register ssize_t tile_x; if (status == MagickFalse) continue; tile_x=0; for ( ; tile_x < (ssize_t) image->columns; tile_x+=(ssize_t) tile_width) { MagickBooleanType sync; register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t y; size_t height, width; width=tile_width; if ((tile_x+(ssize_t) tile_width) > (ssize_t) image->columns) width=(size_t) (tile_width-(tile_x+tile_width-image->columns)); height=tile_height; if ((tile_y+(ssize_t) tile_height) > (ssize_t) image->rows) height=(size_t) (tile_height-(tile_y+tile_height-image->rows)); p=GetCacheViewVirtualPixels(image_view,tile_x,tile_y,width,height, exception); if (p == (const Quantum *) NULL) { status=MagickFalse; break; } for (y=0; y < (ssize_t) width; y++) { register const Quantum *magick_restrict tile_pixels; register ssize_t x; if (status == MagickFalse) continue; q=QueueCacheViewAuthenticPixels(rotate_view,tile_y,(ssize_t) (y+ rotate_image->rows-(tile_x+width)),height,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } tile_pixels=p+((width-1)-y)*GetPixelChannels(image); for (x=0; x < (ssize_t) height; x++) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait rotate_traits = GetPixelChannelTraits(rotate_image, channel); if ((traits == UndefinedPixelTrait) || (rotate_traits == UndefinedPixelTrait)) continue; SetPixelChannel(rotate_image,channel,tile_pixels[i],q); } tile_pixels+=width*GetPixelChannels(image); q+=GetPixelChannels(rotate_image); } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_IntegralRotateImage) #endif sync=SyncCacheViewAuthenticPixels(rotate_view,exception); if (sync == MagickFalse) status=MagickFalse; } } if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,RotateImageTag,progress+=tile_height, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } (void) SetImageProgress(image,RotateImageTag,(MagickOffsetType) image->rows-1,image->rows); Swap(page.width,page.height); Swap(page.x,page.y); if (page.height != 0) page.y=(ssize_t) (page.height-rotate_image->rows-page.y); break; } default: break; } rotate_view=DestroyCacheView(rotate_view); image_view=DestroyCacheView(image_view); rotate_image->type=image->type; rotate_image->page=page; if (status == MagickFalse) rotate_image=DestroyImage(rotate_image); return(rotate_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + X S h e a r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % XShearImage() shears the image in the X direction with a shear angle of % 'degrees'. Positive angles shear counter-clockwise (right-hand rule), and % negative angles shear clockwise. Angles are measured relative to a vertical % Y-axis. X shears will widen an image creating 'empty' triangles on the left % and right sides of the source image. % % The format of the XShearImage method is: % % MagickBooleanType XShearImage(Image *image,const double degrees, % const size_t width,const size_t height, % const ssize_t x_offset,const ssize_t y_offset,ExceptionInfo *exception) % % A description of each parameter follows. % % o image: the image. % % o degrees: A double representing the shearing angle along the X % axis. % % o width, height, x_offset, y_offset: Defines a region of the image % to shear. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType XShearImage(Image *image,const double degrees, const size_t width,const size_t height,const ssize_t x_offset, const ssize_t y_offset,ExceptionInfo *exception) { #define XShearImageTag "XShear/Image" typedef enum { LEFT, RIGHT } ShearDirection; CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; PixelInfo background; ssize_t y; /* X shear image. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); status=MagickTrue; background=image->background_color; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,height,1) #endif for (y=0; y < (ssize_t) height; y++) { PixelInfo pixel, source, destination; double area, displacement; register Quantum *magick_restrict p, *magick_restrict q; register ssize_t i; ShearDirection direction; ssize_t step; if (status == MagickFalse) continue; p=GetCacheViewAuthenticPixels(image_view,0,y_offset+y,image->columns,1, exception); if (p == (Quantum *) NULL) { status=MagickFalse; continue; } p+=x_offset*GetPixelChannels(image); displacement=degrees*(double) (y-height/2.0); if (displacement == 0.0) continue; if (displacement > 0.0) direction=RIGHT; else { displacement*=(-1.0); direction=LEFT; } step=CastDoubleToLong(floor((double) displacement)); area=(double) (displacement-step); step++; pixel=background; GetPixelInfo(image,&source); GetPixelInfo(image,&destination); switch (direction) { case LEFT: { /* Transfer pixels left-to-right. */ if (step > x_offset) break; q=p-step*GetPixelChannels(image); for (i=0; i < (ssize_t) width; i++) { if ((x_offset+i) < step) { p+=GetPixelChannels(image); GetPixelInfoPixel(image,p,&pixel); q+=GetPixelChannels(image); continue; } GetPixelInfoPixel(image,p,&source); CompositePixelInfoAreaBlend(&pixel,(double) pixel.alpha, &source,(double) GetPixelAlpha(image,p),area,&destination); SetPixelViaPixelInfo(image,&destination,q); GetPixelInfoPixel(image,p,&pixel); p+=GetPixelChannels(image); q+=GetPixelChannels(image); } CompositePixelInfoAreaBlend(&pixel,(double) pixel.alpha, &background,(double) background.alpha,area,&destination); SetPixelViaPixelInfo(image,&destination,q); q+=GetPixelChannels(image); for (i=0; i < (step-1); i++) { SetPixelViaPixelInfo(image,&background,q); q+=GetPixelChannels(image); } break; } case RIGHT: { /* Transfer pixels right-to-left. */ p+=width*GetPixelChannels(image); q=p+step*GetPixelChannels(image); for (i=0; i < (ssize_t) width; i++) { p-=GetPixelChannels(image); q-=GetPixelChannels(image); if ((size_t) (x_offset+width+step-i) > image->columns) continue; GetPixelInfoPixel(image,p,&source); CompositePixelInfoAreaBlend(&pixel,(double) pixel.alpha, &source,(double) GetPixelAlpha(image,p),area,&destination); SetPixelViaPixelInfo(image,&destination,q); GetPixelInfoPixel(image,p,&pixel); } CompositePixelInfoAreaBlend(&pixel,(double) pixel.alpha, &background,(double) background.alpha,area,&destination); q-=GetPixelChannels(image); SetPixelViaPixelInfo(image,&destination,q); for (i=0; i < (step-1); i++) { q-=GetPixelChannels(image); SetPixelViaPixelInfo(image,&background,q); } break; } } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,XShearImageTag,progress,height); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + Y S h e a r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % YShearImage shears the image in the Y direction with a shear angle of % 'degrees'. Positive angles shear counter-clockwise (right-hand rule), and % negative angles shear clockwise. Angles are measured relative to a % horizontal X-axis. Y shears will increase the height of an image creating % 'empty' triangles on the top and bottom of the source image. % % The format of the YShearImage method is: % % MagickBooleanType YShearImage(Image *image,const double degrees, % const size_t width,const size_t height, % const ssize_t x_offset,const ssize_t y_offset,ExceptionInfo *exception) % % A description of each parameter follows. % % o image: the image. % % o degrees: A double representing the shearing angle along the Y % axis. % % o width, height, x_offset, y_offset: Defines a region of the image % to shear. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType YShearImage(Image *image,const double degrees, const size_t width,const size_t height,const ssize_t x_offset, const ssize_t y_offset,ExceptionInfo *exception) { #define YShearImageTag "YShear/Image" typedef enum { UP, DOWN } ShearDirection; CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; PixelInfo background; ssize_t x; /* Y Shear image. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); status=MagickTrue; progress=0; background=image->background_color; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,width,1) #endif for (x=0; x < (ssize_t) width; x++) { double area, displacement; PixelInfo pixel, source, destination; register Quantum *magick_restrict p, *magick_restrict q; register ssize_t i; ShearDirection direction; ssize_t step; if (status == MagickFalse) continue; p=GetCacheViewAuthenticPixels(image_view,x_offset+x,0,1,image->rows, exception); if (p == (Quantum *) NULL) { status=MagickFalse; continue; } p+=y_offset*GetPixelChannels(image); displacement=degrees*(double) (x-width/2.0); if (displacement == 0.0) continue; if (displacement > 0.0) direction=DOWN; else { displacement*=(-1.0); direction=UP; } step=CastDoubleToLong(floor((double) displacement)); area=(double) (displacement-step); step++; pixel=background; GetPixelInfo(image,&source); GetPixelInfo(image,&destination); switch (direction) { case UP: { /* Transfer pixels top-to-bottom. */ if (step > y_offset) break; q=p-step*GetPixelChannels(image); for (i=0; i < (ssize_t) height; i++) { if ((y_offset+i) < step) { p+=GetPixelChannels(image); GetPixelInfoPixel(image,p,&pixel); q+=GetPixelChannels(image); continue; } GetPixelInfoPixel(image,p,&source); CompositePixelInfoAreaBlend(&pixel,(double) pixel.alpha, &source,(double) GetPixelAlpha(image,p),area, &destination); SetPixelViaPixelInfo(image,&destination,q); GetPixelInfoPixel(image,p,&pixel); p+=GetPixelChannels(image); q+=GetPixelChannels(image); } CompositePixelInfoAreaBlend(&pixel,(double) pixel.alpha, &background,(double) background.alpha,area,&destination); SetPixelViaPixelInfo(image,&destination,q); q+=GetPixelChannels(image); for (i=0; i < (step-1); i++) { SetPixelViaPixelInfo(image,&background,q); q+=GetPixelChannels(image); } break; } case DOWN: { /* Transfer pixels bottom-to-top. */ p+=height*GetPixelChannels(image); q=p+step*GetPixelChannels(image); for (i=0; i < (ssize_t) height; i++) { p-=GetPixelChannels(image); q-=GetPixelChannels(image); if ((size_t) (y_offset+height+step-i) > image->rows) continue; GetPixelInfoPixel(image,p,&source); CompositePixelInfoAreaBlend(&pixel,(double) pixel.alpha, &source,(double) GetPixelAlpha(image,p),area, &destination); SetPixelViaPixelInfo(image,&destination,q); GetPixelInfoPixel(image,p,&pixel); } CompositePixelInfoAreaBlend(&pixel,(double) pixel.alpha, &background,(double) background.alpha,area,&destination); q-=GetPixelChannels(image); SetPixelViaPixelInfo(image,&destination,q); for (i=0; i < (step-1); i++) { q-=GetPixelChannels(image); SetPixelViaPixelInfo(image,&background,q); } break; } } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,YShearImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S h e a r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ShearImage() creates a new image that is a shear_image copy of an existing % one. Shearing slides one edge of an image along the X or Y axis, creating % a parallelogram. An X direction shear slides an edge along the X axis, % while a Y direction shear slides an edge along the Y axis. The amount of % the shear is controlled by a shear angle. For X direction shears, x_shear % is measured relative to the Y axis, and similarly, for Y direction shears % y_shear is measured relative to the X axis. Empty triangles left over from % shearing the image are filled with the background color defined by member % 'background_color' of the image.. ShearImage() allocates the memory % necessary for the new Image structure and returns a pointer to the new image. % % ShearImage() is based on the paper "A Fast Algorithm for General Raster % Rotatation" by Alan W. Paeth. % % The format of the ShearImage method is: % % Image *ShearImage(const Image *image,const double x_shear, % const double y_shear,ExceptionInfo *exception) % % A description of each parameter follows. % % o image: the image. % % o x_shear, y_shear: Specifies the number of degrees to shear the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ShearImage(const Image *image,const double x_shear, const double y_shear,ExceptionInfo *exception) { Image *integral_image, *shear_image; MagickBooleanType status; PointInfo shear; RectangleInfo border_info, bounds; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if ((x_shear != 0.0) && (fmod(x_shear,90.0) == 0.0)) ThrowImageException(ImageError,"AngleIsDiscontinuous"); if ((y_shear != 0.0) && (fmod(y_shear,90.0) == 0.0)) ThrowImageException(ImageError,"AngleIsDiscontinuous"); /* Initialize shear angle. */ integral_image=CloneImage(image,0,0,MagickTrue,exception); if (integral_image == (Image *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); shear.x=(-tan(DegreesToRadians(fmod(x_shear,360.0)))); shear.y=tan(DegreesToRadians(fmod(y_shear,360.0))); if ((shear.x == 0.0) && (shear.y == 0.0)) return(integral_image); if (SetImageStorageClass(integral_image,DirectClass,exception) == MagickFalse) { integral_image=DestroyImage(integral_image); return(integral_image); } if (integral_image->alpha_trait == UndefinedPixelTrait) (void) SetImageAlphaChannel(integral_image,OpaqueAlphaChannel,exception); /* Compute image size. */ bounds.width=image->columns+CastDoubleToLong(floor(fabs(shear.x)* image->rows+0.5)); bounds.x=CastDoubleToLong(ceil((double) image->columns+((fabs(shear.x)* image->rows)-image->columns)/2.0-0.5)); bounds.y=CastDoubleToLong(ceil((double) image->rows+((fabs(shear.y)* bounds.width)-image->rows)/2.0-0.5)); /* Surround image with border. */ integral_image->border_color=integral_image->background_color; integral_image->compose=CopyCompositeOp; border_info.width=(size_t) bounds.x; border_info.height=(size_t) bounds.y; shear_image=BorderImage(integral_image,&border_info,image->compose,exception); integral_image=DestroyImage(integral_image); if (shear_image == (Image *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); /* Shear the image. */ if (shear_image->alpha_trait == UndefinedPixelTrait) (void) SetImageAlphaChannel(shear_image,OpaqueAlphaChannel,exception); status=XShearImage(shear_image,shear.x,image->columns,image->rows,bounds.x, (ssize_t) (shear_image->rows-image->rows)/2,exception); if (status == MagickFalse) { shear_image=DestroyImage(shear_image); return((Image *) NULL); } status=YShearImage(shear_image,shear.y,bounds.width,image->rows,(ssize_t) (shear_image->columns-bounds.width)/2,bounds.y,exception); if (status == MagickFalse) { shear_image=DestroyImage(shear_image); return((Image *) NULL); } status=CropToFitImage(&shear_image,shear.x,shear.y,(MagickRealType) image->columns,(MagickRealType) image->rows,MagickFalse,exception); shear_image->alpha_trait=image->alpha_trait; shear_image->compose=image->compose; shear_image->page.width=0; shear_image->page.height=0; if (status == MagickFalse) shear_image=DestroyImage(shear_image); return(shear_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S h e a r R o t a t e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ShearRotateImage() creates a new image that is a rotated copy of an existing % one. Positive angles rotate counter-clockwise (right-hand rule), while % negative angles rotate clockwise. Rotated images are usually larger than % the originals and have 'empty' triangular corners. X axis. Empty % triangles left over from shearing the image are filled with the background % color defined by member 'background_color' of the image. ShearRotateImage % allocates the memory necessary for the new Image structure and returns a % pointer to the new image. % % ShearRotateImage() is based on the paper "A Fast Algorithm for General % Raster Rotatation" by Alan W. Paeth. ShearRotateImage is adapted from a % similar method based on the Paeth paper written by Michael Halle of the % Spatial Imaging Group, MIT Media Lab. % % The format of the ShearRotateImage method is: % % Image *ShearRotateImage(const Image *image,const double degrees, % ExceptionInfo *exception) % % A description of each parameter follows. % % o image: the image. % % o degrees: Specifies the number of degrees to rotate the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ShearRotateImage(const Image *image,const double degrees, ExceptionInfo *exception) { Image *integral_image, *rotate_image; MagickBooleanType status; MagickRealType angle; PointInfo shear; RectangleInfo border_info, bounds; size_t height, rotations, shear_width, width; /* Adjust rotation angle. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); angle=fmod(degrees,360.0); if (angle < -45.0) angle+=360.0; for (rotations=0; angle > 45.0; rotations++) angle-=90.0; rotations%=4; /* Calculate shear equations. */ integral_image=IntegralRotateImage(image,rotations,exception); if (integral_image == (Image *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); shear.x=(-tan((double) DegreesToRadians(angle)/2.0)); shear.y=sin((double) DegreesToRadians(angle)); if ((shear.x == 0.0) && (shear.y == 0.0)) return(integral_image); if (SetImageStorageClass(integral_image,DirectClass,exception) == MagickFalse) { integral_image=DestroyImage(integral_image); return(integral_image); } if (integral_image->alpha_trait == UndefinedPixelTrait) (void) SetImageAlphaChannel(integral_image,OpaqueAlphaChannel,exception); /* Compute maximum bounds for 3 shear operations. */ width=integral_image->columns; height=integral_image->rows; bounds.width=(size_t) floor(fabs((double) height*shear.x)+width+0.5); bounds.height=(size_t) floor(fabs((double) bounds.width*shear.y)+height+0.5); shear_width=(size_t) floor(fabs((double) bounds.height*shear.x)+ bounds.width+0.5); bounds.x=CastDoubleToLong(floor((double) ((shear_width > bounds.width) ? width : bounds.width-shear_width+2)/2.0+0.5)); bounds.y=CastDoubleToLong(floor(((double) bounds.height-height+2)/2.0+0.5)); /* Surround image with a border. */ integral_image->border_color=integral_image->background_color; integral_image->compose=CopyCompositeOp; border_info.width=(size_t) bounds.x; border_info.height=(size_t) bounds.y; rotate_image=BorderImage(integral_image,&border_info,image->compose, exception); integral_image=DestroyImage(integral_image); if (rotate_image == (Image *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); /* Rotate the image. */ status=XShearImage(rotate_image,shear.x,width,height,bounds.x,(ssize_t) (rotate_image->rows-height)/2,exception); if (status == MagickFalse) { rotate_image=DestroyImage(rotate_image); return((Image *) NULL); } status=YShearImage(rotate_image,shear.y,bounds.width,height,(ssize_t) (rotate_image->columns-bounds.width)/2,bounds.y,exception); if (status == MagickFalse) { rotate_image=DestroyImage(rotate_image); return((Image *) NULL); } status=XShearImage(rotate_image,shear.x,bounds.width,bounds.height,(ssize_t) (rotate_image->columns-bounds.width)/2,(ssize_t) (rotate_image->rows- bounds.height)/2,exception); if (status == MagickFalse) { rotate_image=DestroyImage(rotate_image); return((Image *) NULL); } status=CropToFitImage(&rotate_image,shear.x,shear.y,(MagickRealType) width, (MagickRealType) height,MagickTrue,exception); rotate_image->alpha_trait=image->alpha_trait; rotate_image->compose=image->compose; rotate_image->page.width=0; rotate_image->page.height=0; if (status == MagickFalse) rotate_image=DestroyImage(rotate_image); return(rotate_image); }
feature.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % FFFFF EEEEE AAA TTTTT U U RRRR EEEEE % % F E A A T U U R R E % % FFF EEE AAAAA T U U RRRR EEE % % F E A A T U U R R E % % F EEEEE A A T UUU R R EEEEE % % % % % % MagickCore Image Feature Methods % % % % Software Design % % Cristy % % July 1992 % % % % % % Copyright 1999-2020 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/animate.h" #include "MagickCore/artifact.h" #include "MagickCore/blob.h" #include "MagickCore/blob-private.h" #include "MagickCore/cache.h" #include "MagickCore/cache-private.h" #include "MagickCore/cache-view.h" #include "MagickCore/channel.h" #include "MagickCore/client.h" #include "MagickCore/color.h" #include "MagickCore/color-private.h" #include "MagickCore/colorspace.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/composite.h" #include "MagickCore/composite-private.h" #include "MagickCore/compress.h" #include "MagickCore/constitute.h" #include "MagickCore/display.h" #include "MagickCore/draw.h" #include "MagickCore/enhance.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/feature.h" #include "MagickCore/gem.h" #include "MagickCore/geometry.h" #include "MagickCore/list.h" #include "MagickCore/image-private.h" #include "MagickCore/magic.h" #include "MagickCore/magick.h" #include "MagickCore/matrix.h" #include "MagickCore/memory_.h" #include "MagickCore/module.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/morphology-private.h" #include "MagickCore/option.h" #include "MagickCore/paint.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/profile.h" #include "MagickCore/property.h" #include "MagickCore/quantize.h" #include "MagickCore/quantum-private.h" #include "MagickCore/random_.h" #include "MagickCore/resource_.h" #include "MagickCore/segment.h" #include "MagickCore/semaphore.h" #include "MagickCore/signature-private.h" #include "MagickCore/string_.h" #include "MagickCore/thread-private.h" #include "MagickCore/timer.h" #include "MagickCore/utility.h" #include "MagickCore/version.h" /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C a n n y E d g e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CannyEdgeImage() uses a multi-stage algorithm to detect a wide range of % edges in images. % % The format of the CannyEdgeImage method is: % % Image *CannyEdgeImage(const Image *image,const double radius, % const double sigma,const double lower_percent, % const double upper_percent,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the gaussian smoothing filter. % % o sigma: the sigma of the gaussian smoothing filter. % % o lower_percent: percentage of edge pixels in the lower threshold. % % o upper_percent: percentage of edge pixels in the upper threshold. % % o exception: return any errors or warnings in this structure. % */ typedef struct _CannyInfo { double magnitude, intensity; int orientation; ssize_t x, y; } CannyInfo; static inline MagickBooleanType IsAuthenticPixel(const Image *image, const ssize_t x,const ssize_t y) { if ((x < 0) || (x >= (ssize_t) image->columns)) return(MagickFalse); if ((y < 0) || (y >= (ssize_t) image->rows)) return(MagickFalse); return(MagickTrue); } static MagickBooleanType TraceEdges(Image *edge_image,CacheView *edge_view, MatrixInfo *canny_cache,const ssize_t x,const ssize_t y, const double lower_threshold,ExceptionInfo *exception) { CannyInfo edge, pixel; MagickBooleanType status; register Quantum *q; register ssize_t i; q=GetCacheViewAuthenticPixels(edge_view,x,y,1,1,exception); if (q == (Quantum *) NULL) return(MagickFalse); *q=QuantumRange; status=SyncCacheViewAuthenticPixels(edge_view,exception); if (status == MagickFalse) return(MagickFalse); if (GetMatrixElement(canny_cache,0,0,&edge) == MagickFalse) return(MagickFalse); edge.x=x; edge.y=y; if (SetMatrixElement(canny_cache,0,0,&edge) == MagickFalse) return(MagickFalse); for (i=1; i != 0; ) { ssize_t v; i--; status=GetMatrixElement(canny_cache,i,0,&edge); if (status == MagickFalse) return(MagickFalse); for (v=(-1); v <= 1; v++) { ssize_t u; for (u=(-1); u <= 1; u++) { if ((u == 0) && (v == 0)) continue; if (IsAuthenticPixel(edge_image,edge.x+u,edge.y+v) == MagickFalse) continue; /* Not an edge if gradient value is below the lower threshold. */ q=GetCacheViewAuthenticPixels(edge_view,edge.x+u,edge.y+v,1,1, exception); if (q == (Quantum *) NULL) return(MagickFalse); status=GetMatrixElement(canny_cache,edge.x+u,edge.y+v,&pixel); if (status == MagickFalse) return(MagickFalse); if ((GetPixelIntensity(edge_image,q) == 0.0) && (pixel.intensity >= lower_threshold)) { *q=QuantumRange; status=SyncCacheViewAuthenticPixels(edge_view,exception); if (status == MagickFalse) return(MagickFalse); edge.x+=u; edge.y+=v; status=SetMatrixElement(canny_cache,i,0,&edge); if (status == MagickFalse) return(MagickFalse); i++; } } } } return(MagickTrue); } MagickExport Image *CannyEdgeImage(const Image *image,const double radius, const double sigma,const double lower_percent,const double upper_percent, ExceptionInfo *exception) { #define CannyEdgeImageTag "CannyEdge/Image" CacheView *edge_view; CannyInfo element; char geometry[MagickPathExtent]; double lower_threshold, max, min, upper_threshold; Image *edge_image; KernelInfo *kernel_info; MagickBooleanType status; MagickOffsetType progress; MatrixInfo *canny_cache; ssize_t y; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); /* Filter out noise. */ (void) FormatLocaleString(geometry,MagickPathExtent, "blur:%.20gx%.20g;blur:%.20gx%.20g+90",radius,sigma,radius,sigma); kernel_info=AcquireKernelInfo(geometry,exception); if (kernel_info == (KernelInfo *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); edge_image=MorphologyImage(image,ConvolveMorphology,1,kernel_info,exception); kernel_info=DestroyKernelInfo(kernel_info); if (edge_image == (Image *) NULL) return((Image *) NULL); if (TransformImageColorspace(edge_image,GRAYColorspace,exception) == MagickFalse) { edge_image=DestroyImage(edge_image); return((Image *) NULL); } (void) SetImageAlphaChannel(edge_image,OffAlphaChannel,exception); /* Find the intensity gradient of the image. */ canny_cache=AcquireMatrixInfo(edge_image->columns,edge_image->rows, sizeof(CannyInfo),exception); if (canny_cache == (MatrixInfo *) NULL) { edge_image=DestroyImage(edge_image); return((Image *) NULL); } status=MagickTrue; edge_view=AcquireVirtualCacheView(edge_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(edge_image,edge_image,edge_image->rows,1) #endif for (y=0; y < (ssize_t) edge_image->rows; y++) { register const Quantum *magick_restrict p; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(edge_view,0,y,edge_image->columns+1,2, exception); if (p == (const Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) edge_image->columns; x++) { CannyInfo pixel; double dx, dy; register const Quantum *magick_restrict kernel_pixels; ssize_t v; static double Gx[2][2] = { { -1.0, +1.0 }, { -1.0, +1.0 } }, Gy[2][2] = { { +1.0, +1.0 }, { -1.0, -1.0 } }; (void) memset(&pixel,0,sizeof(pixel)); dx=0.0; dy=0.0; kernel_pixels=p; for (v=0; v < 2; v++) { ssize_t u; for (u=0; u < 2; u++) { double intensity; intensity=GetPixelIntensity(edge_image,kernel_pixels+u); dx+=0.5*Gx[v][u]*intensity; dy+=0.5*Gy[v][u]*intensity; } kernel_pixels+=edge_image->columns+1; } pixel.magnitude=hypot(dx,dy); pixel.orientation=0; if (fabs(dx) > MagickEpsilon) { double slope; slope=dy/dx; if (slope < 0.0) { if (slope < -2.41421356237) pixel.orientation=0; else if (slope < -0.414213562373) pixel.orientation=1; else pixel.orientation=2; } else { if (slope > 2.41421356237) pixel.orientation=0; else if (slope > 0.414213562373) pixel.orientation=3; else pixel.orientation=2; } } if (SetMatrixElement(canny_cache,x,y,&pixel) == MagickFalse) continue; p+=GetPixelChannels(edge_image); } } edge_view=DestroyCacheView(edge_view); /* Non-maxima suppression, remove pixels that are not considered to be part of an edge. */ progress=0; (void) GetMatrixElement(canny_cache,0,0,&element); max=element.intensity; min=element.intensity; edge_view=AcquireAuthenticCacheView(edge_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(edge_image,edge_image,edge_image->rows,1) #endif for (y=0; y < (ssize_t) edge_image->rows; y++) { register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(edge_view,0,y,edge_image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) edge_image->columns; x++) { CannyInfo alpha_pixel, beta_pixel, pixel; (void) GetMatrixElement(canny_cache,x,y,&pixel); switch (pixel.orientation) { case 0: default: { /* 0 degrees, north and south. */ (void) GetMatrixElement(canny_cache,x,y-1,&alpha_pixel); (void) GetMatrixElement(canny_cache,x,y+1,&beta_pixel); break; } case 1: { /* 45 degrees, northwest and southeast. */ (void) GetMatrixElement(canny_cache,x-1,y-1,&alpha_pixel); (void) GetMatrixElement(canny_cache,x+1,y+1,&beta_pixel); break; } case 2: { /* 90 degrees, east and west. */ (void) GetMatrixElement(canny_cache,x-1,y,&alpha_pixel); (void) GetMatrixElement(canny_cache,x+1,y,&beta_pixel); break; } case 3: { /* 135 degrees, northeast and southwest. */ (void) GetMatrixElement(canny_cache,x+1,y-1,&beta_pixel); (void) GetMatrixElement(canny_cache,x-1,y+1,&alpha_pixel); break; } } pixel.intensity=pixel.magnitude; if ((pixel.magnitude < alpha_pixel.magnitude) || (pixel.magnitude < beta_pixel.magnitude)) pixel.intensity=0; (void) SetMatrixElement(canny_cache,x,y,&pixel); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_CannyEdgeImage) #endif { if (pixel.intensity < min) min=pixel.intensity; if (pixel.intensity > max) max=pixel.intensity; } *q=0; q+=GetPixelChannels(edge_image); } if (SyncCacheViewAuthenticPixels(edge_view,exception) == MagickFalse) status=MagickFalse; } edge_view=DestroyCacheView(edge_view); /* Estimate hysteresis threshold. */ lower_threshold=lower_percent*(max-min)+min; upper_threshold=upper_percent*(max-min)+min; /* Hysteresis threshold. */ edge_view=AcquireAuthenticCacheView(edge_image,exception); for (y=0; y < (ssize_t) edge_image->rows; y++) { register ssize_t x; if (status == MagickFalse) continue; for (x=0; x < (ssize_t) edge_image->columns; x++) { CannyInfo pixel; register const Quantum *magick_restrict p; /* Edge if pixel gradient higher than upper threshold. */ p=GetCacheViewVirtualPixels(edge_view,x,y,1,1,exception); if (p == (const Quantum *) NULL) continue; status=GetMatrixElement(canny_cache,x,y,&pixel); if (status == MagickFalse) continue; if ((GetPixelIntensity(edge_image,p) == 0.0) && (pixel.intensity >= upper_threshold)) status=TraceEdges(edge_image,edge_view,canny_cache,x,y,lower_threshold, exception); } if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,CannyEdgeImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } edge_view=DestroyCacheView(edge_view); /* Free resources. */ canny_cache=DestroyMatrixInfo(canny_cache); return(edge_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e F e a t u r e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageFeatures() returns features for each channel in the image in % each of four directions (horizontal, vertical, left and right diagonals) % for the specified distance. The features include the angular second % moment, contrast, correlation, sum of squares: variance, inverse difference % moment, sum average, sum varience, sum entropy, entropy, difference variance, % difference entropy, information measures of correlation 1, information % measures of correlation 2, and maximum correlation coefficient. You can % access the red channel contrast, for example, like this: % % channel_features=GetImageFeatures(image,1,exception); % contrast=channel_features[RedPixelChannel].contrast[0]; % % Use MagickRelinquishMemory() to free the features buffer. % % The format of the GetImageFeatures method is: % % ChannelFeatures *GetImageFeatures(const Image *image, % const size_t distance,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o distance: the distance. % % o exception: return any errors or warnings in this structure. % */ static inline double MagickLog10(const double x) { #define Log10Epsilon (1.0e-11) if (fabs(x) < Log10Epsilon) return(log10(Log10Epsilon)); return(log10(fabs(x))); } MagickExport ChannelFeatures *GetImageFeatures(const Image *image, const size_t distance,ExceptionInfo *exception) { typedef struct _ChannelStatistics { PixelInfo direction[4]; /* horizontal, vertical, left and right diagonals */ } ChannelStatistics; CacheView *image_view; ChannelFeatures *channel_features; ChannelStatistics **cooccurrence, correlation, *density_x, *density_xy, *density_y, entropy_x, entropy_xy, entropy_xy1, entropy_xy2, entropy_y, mean, **Q, *sum, sum_squares, variance; PixelPacket gray, *grays; MagickBooleanType status; register ssize_t i, r; size_t length; unsigned int number_grays; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if ((image->columns < (distance+1)) || (image->rows < (distance+1))) return((ChannelFeatures *) NULL); length=MaxPixelChannels+1UL; channel_features=(ChannelFeatures *) AcquireQuantumMemory(length, sizeof(*channel_features)); if (channel_features == (ChannelFeatures *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); (void) memset(channel_features,0,length* sizeof(*channel_features)); /* Form grays. */ grays=(PixelPacket *) AcquireQuantumMemory(MaxMap+1UL,sizeof(*grays)); if (grays == (PixelPacket *) NULL) { channel_features=(ChannelFeatures *) RelinquishMagickMemory( channel_features); (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(channel_features); } for (i=0; i <= (ssize_t) MaxMap; i++) { grays[i].red=(~0U); grays[i].green=(~0U); grays[i].blue=(~0U); grays[i].alpha=(~0U); grays[i].black=(~0U); } status=MagickTrue; image_view=AcquireVirtualCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (r=0; r < (ssize_t) image->rows; r++) { register const Quantum *magick_restrict p; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,r,image->columns,1,exception); if (p == (const Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { grays[ScaleQuantumToMap(GetPixelRed(image,p))].red= ScaleQuantumToMap(GetPixelRed(image,p)); grays[ScaleQuantumToMap(GetPixelGreen(image,p))].green= ScaleQuantumToMap(GetPixelGreen(image,p)); grays[ScaleQuantumToMap(GetPixelBlue(image,p))].blue= ScaleQuantumToMap(GetPixelBlue(image,p)); if (image->colorspace == CMYKColorspace) grays[ScaleQuantumToMap(GetPixelBlack(image,p))].black= ScaleQuantumToMap(GetPixelBlack(image,p)); if (image->alpha_trait != UndefinedPixelTrait) grays[ScaleQuantumToMap(GetPixelAlpha(image,p))].alpha= ScaleQuantumToMap(GetPixelAlpha(image,p)); p+=GetPixelChannels(image); } } image_view=DestroyCacheView(image_view); if (status == MagickFalse) { grays=(PixelPacket *) RelinquishMagickMemory(grays); channel_features=(ChannelFeatures *) RelinquishMagickMemory( channel_features); return(channel_features); } (void) memset(&gray,0,sizeof(gray)); for (i=0; i <= (ssize_t) MaxMap; i++) { if (grays[i].red != ~0U) grays[gray.red++].red=grays[i].red; if (grays[i].green != ~0U) grays[gray.green++].green=grays[i].green; if (grays[i].blue != ~0U) grays[gray.blue++].blue=grays[i].blue; if (image->colorspace == CMYKColorspace) if (grays[i].black != ~0U) grays[gray.black++].black=grays[i].black; if (image->alpha_trait != UndefinedPixelTrait) if (grays[i].alpha != ~0U) grays[gray.alpha++].alpha=grays[i].alpha; } /* Allocate spatial dependence matrix. */ number_grays=gray.red; if (gray.green > number_grays) number_grays=gray.green; if (gray.blue > number_grays) number_grays=gray.blue; if (image->colorspace == CMYKColorspace) if (gray.black > number_grays) number_grays=gray.black; if (image->alpha_trait != UndefinedPixelTrait) if (gray.alpha > number_grays) number_grays=gray.alpha; cooccurrence=(ChannelStatistics **) AcquireQuantumMemory(number_grays, sizeof(*cooccurrence)); density_x=(ChannelStatistics *) AcquireQuantumMemory(2*(number_grays+1), sizeof(*density_x)); density_xy=(ChannelStatistics *) AcquireQuantumMemory(2*(number_grays+1), sizeof(*density_xy)); density_y=(ChannelStatistics *) AcquireQuantumMemory(2*(number_grays+1), sizeof(*density_y)); Q=(ChannelStatistics **) AcquireQuantumMemory(number_grays,sizeof(*Q)); sum=(ChannelStatistics *) AcquireQuantumMemory(number_grays,sizeof(*sum)); if ((cooccurrence == (ChannelStatistics **) NULL) || (density_x == (ChannelStatistics *) NULL) || (density_xy == (ChannelStatistics *) NULL) || (density_y == (ChannelStatistics *) NULL) || (Q == (ChannelStatistics **) NULL) || (sum == (ChannelStatistics *) NULL)) { if (Q != (ChannelStatistics **) NULL) { for (i=0; i < (ssize_t) number_grays; i++) Q[i]=(ChannelStatistics *) RelinquishMagickMemory(Q[i]); Q=(ChannelStatistics **) RelinquishMagickMemory(Q); } if (sum != (ChannelStatistics *) NULL) sum=(ChannelStatistics *) RelinquishMagickMemory(sum); if (density_y != (ChannelStatistics *) NULL) density_y=(ChannelStatistics *) RelinquishMagickMemory(density_y); if (density_xy != (ChannelStatistics *) NULL) density_xy=(ChannelStatistics *) RelinquishMagickMemory(density_xy); if (density_x != (ChannelStatistics *) NULL) density_x=(ChannelStatistics *) RelinquishMagickMemory(density_x); if (cooccurrence != (ChannelStatistics **) NULL) { for (i=0; i < (ssize_t) number_grays; i++) cooccurrence[i]=(ChannelStatistics *) RelinquishMagickMemory(cooccurrence[i]); cooccurrence=(ChannelStatistics **) RelinquishMagickMemory( cooccurrence); } grays=(PixelPacket *) RelinquishMagickMemory(grays); channel_features=(ChannelFeatures *) RelinquishMagickMemory( channel_features); (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(channel_features); } (void) memset(&correlation,0,sizeof(correlation)); (void) memset(density_x,0,2*(number_grays+1)*sizeof(*density_x)); (void) memset(density_xy,0,2*(number_grays+1)*sizeof(*density_xy)); (void) memset(density_y,0,2*(number_grays+1)*sizeof(*density_y)); (void) memset(&mean,0,sizeof(mean)); (void) memset(sum,0,number_grays*sizeof(*sum)); (void) memset(&sum_squares,0,sizeof(sum_squares)); (void) memset(density_xy,0,2*number_grays*sizeof(*density_xy)); (void) memset(&entropy_x,0,sizeof(entropy_x)); (void) memset(&entropy_xy,0,sizeof(entropy_xy)); (void) memset(&entropy_xy1,0,sizeof(entropy_xy1)); (void) memset(&entropy_xy2,0,sizeof(entropy_xy2)); (void) memset(&entropy_y,0,sizeof(entropy_y)); (void) memset(&variance,0,sizeof(variance)); for (i=0; i < (ssize_t) number_grays; i++) { cooccurrence[i]=(ChannelStatistics *) AcquireQuantumMemory(number_grays, sizeof(**cooccurrence)); Q[i]=(ChannelStatistics *) AcquireQuantumMemory(number_grays,sizeof(**Q)); if ((cooccurrence[i] == (ChannelStatistics *) NULL) || (Q[i] == (ChannelStatistics *) NULL)) break; (void) memset(cooccurrence[i],0,number_grays* sizeof(**cooccurrence)); (void) memset(Q[i],0,number_grays*sizeof(**Q)); } if (i < (ssize_t) number_grays) { for (i--; i >= 0; i--) { if (Q[i] != (ChannelStatistics *) NULL) Q[i]=(ChannelStatistics *) RelinquishMagickMemory(Q[i]); if (cooccurrence[i] != (ChannelStatistics *) NULL) cooccurrence[i]=(ChannelStatistics *) RelinquishMagickMemory(cooccurrence[i]); } Q=(ChannelStatistics **) RelinquishMagickMemory(Q); cooccurrence=(ChannelStatistics **) RelinquishMagickMemory(cooccurrence); sum=(ChannelStatistics *) RelinquishMagickMemory(sum); density_y=(ChannelStatistics *) RelinquishMagickMemory(density_y); density_xy=(ChannelStatistics *) RelinquishMagickMemory(density_xy); density_x=(ChannelStatistics *) RelinquishMagickMemory(density_x); grays=(PixelPacket *) RelinquishMagickMemory(grays); channel_features=(ChannelFeatures *) RelinquishMagickMemory( channel_features); (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(channel_features); } /* Initialize spatial dependence matrix. */ status=MagickTrue; image_view=AcquireVirtualCacheView(image,exception); for (r=0; r < (ssize_t) image->rows; r++) { register const Quantum *magick_restrict p; register ssize_t x; ssize_t offset, u, v; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,-(ssize_t) distance,r,image->columns+ 2*distance,distance+2,exception); if (p == (const Quantum *) NULL) { status=MagickFalse; continue; } p+=distance*GetPixelChannels(image);; for (x=0; x < (ssize_t) image->columns; x++) { for (i=0; i < 4; i++) { switch (i) { case 0: default: { /* Horizontal adjacency. */ offset=(ssize_t) distance; break; } case 1: { /* Vertical adjacency. */ offset=(ssize_t) (image->columns+2*distance); break; } case 2: { /* Right diagonal adjacency. */ offset=(ssize_t) ((image->columns+2*distance)-distance); break; } case 3: { /* Left diagonal adjacency. */ offset=(ssize_t) ((image->columns+2*distance)+distance); break; } } u=0; v=0; while (grays[u].red != ScaleQuantumToMap(GetPixelRed(image,p))) u++; while (grays[v].red != ScaleQuantumToMap(GetPixelRed(image,p+offset*GetPixelChannels(image)))) v++; cooccurrence[u][v].direction[i].red++; cooccurrence[v][u].direction[i].red++; u=0; v=0; while (grays[u].green != ScaleQuantumToMap(GetPixelGreen(image,p))) u++; while (grays[v].green != ScaleQuantumToMap(GetPixelGreen(image,p+offset*GetPixelChannels(image)))) v++; cooccurrence[u][v].direction[i].green++; cooccurrence[v][u].direction[i].green++; u=0; v=0; while (grays[u].blue != ScaleQuantumToMap(GetPixelBlue(image,p))) u++; while (grays[v].blue != ScaleQuantumToMap(GetPixelBlue(image,p+offset*GetPixelChannels(image)))) v++; cooccurrence[u][v].direction[i].blue++; cooccurrence[v][u].direction[i].blue++; if (image->colorspace == CMYKColorspace) { u=0; v=0; while (grays[u].black != ScaleQuantumToMap(GetPixelBlack(image,p))) u++; while (grays[v].black != ScaleQuantumToMap(GetPixelBlack(image,p+offset*GetPixelChannels(image)))) v++; cooccurrence[u][v].direction[i].black++; cooccurrence[v][u].direction[i].black++; } if (image->alpha_trait != UndefinedPixelTrait) { u=0; v=0; while (grays[u].alpha != ScaleQuantumToMap(GetPixelAlpha(image,p))) u++; while (grays[v].alpha != ScaleQuantumToMap(GetPixelAlpha(image,p+offset*GetPixelChannels(image)))) v++; cooccurrence[u][v].direction[i].alpha++; cooccurrence[v][u].direction[i].alpha++; } } p+=GetPixelChannels(image); } } grays=(PixelPacket *) RelinquishMagickMemory(grays); image_view=DestroyCacheView(image_view); if (status == MagickFalse) { for (i=0; i < (ssize_t) number_grays; i++) cooccurrence[i]=(ChannelStatistics *) RelinquishMagickMemory(cooccurrence[i]); cooccurrence=(ChannelStatistics **) RelinquishMagickMemory(cooccurrence); channel_features=(ChannelFeatures *) RelinquishMagickMemory( channel_features); (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(channel_features); } /* Normalize spatial dependence matrix. */ for (i=0; i < 4; i++) { double normalize; register ssize_t y; switch (i) { case 0: default: { /* Horizontal adjacency. */ normalize=2.0*image->rows*(image->columns-distance); break; } case 1: { /* Vertical adjacency. */ normalize=2.0*(image->rows-distance)*image->columns; break; } case 2: { /* Right diagonal adjacency. */ normalize=2.0*(image->rows-distance)*(image->columns-distance); break; } case 3: { /* Left diagonal adjacency. */ normalize=2.0*(image->rows-distance)*(image->columns-distance); break; } } normalize=PerceptibleReciprocal(normalize); for (y=0; y < (ssize_t) number_grays; y++) { register ssize_t x; for (x=0; x < (ssize_t) number_grays; x++) { cooccurrence[x][y].direction[i].red*=normalize; cooccurrence[x][y].direction[i].green*=normalize; cooccurrence[x][y].direction[i].blue*=normalize; if (image->colorspace == CMYKColorspace) cooccurrence[x][y].direction[i].black*=normalize; if (image->alpha_trait != UndefinedPixelTrait) cooccurrence[x][y].direction[i].alpha*=normalize; } } } /* Compute texture features. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,number_grays,1) #endif for (i=0; i < 4; i++) { register ssize_t y; for (y=0; y < (ssize_t) number_grays; y++) { register ssize_t x; for (x=0; x < (ssize_t) number_grays; x++) { /* Angular second moment: measure of homogeneity of the image. */ channel_features[RedPixelChannel].angular_second_moment[i]+= cooccurrence[x][y].direction[i].red* cooccurrence[x][y].direction[i].red; channel_features[GreenPixelChannel].angular_second_moment[i]+= cooccurrence[x][y].direction[i].green* cooccurrence[x][y].direction[i].green; channel_features[BluePixelChannel].angular_second_moment[i]+= cooccurrence[x][y].direction[i].blue* cooccurrence[x][y].direction[i].blue; if (image->colorspace == CMYKColorspace) channel_features[BlackPixelChannel].angular_second_moment[i]+= cooccurrence[x][y].direction[i].black* cooccurrence[x][y].direction[i].black; if (image->alpha_trait != UndefinedPixelTrait) channel_features[AlphaPixelChannel].angular_second_moment[i]+= cooccurrence[x][y].direction[i].alpha* cooccurrence[x][y].direction[i].alpha; /* Correlation: measure of linear-dependencies in the image. */ sum[y].direction[i].red+=cooccurrence[x][y].direction[i].red; sum[y].direction[i].green+=cooccurrence[x][y].direction[i].green; sum[y].direction[i].blue+=cooccurrence[x][y].direction[i].blue; if (image->colorspace == CMYKColorspace) sum[y].direction[i].black+=cooccurrence[x][y].direction[i].black; if (image->alpha_trait != UndefinedPixelTrait) sum[y].direction[i].alpha+=cooccurrence[x][y].direction[i].alpha; correlation.direction[i].red+=x*y*cooccurrence[x][y].direction[i].red; correlation.direction[i].green+=x*y* cooccurrence[x][y].direction[i].green; correlation.direction[i].blue+=x*y* cooccurrence[x][y].direction[i].blue; if (image->colorspace == CMYKColorspace) correlation.direction[i].black+=x*y* cooccurrence[x][y].direction[i].black; if (image->alpha_trait != UndefinedPixelTrait) correlation.direction[i].alpha+=x*y* cooccurrence[x][y].direction[i].alpha; /* Inverse Difference Moment. */ channel_features[RedPixelChannel].inverse_difference_moment[i]+= cooccurrence[x][y].direction[i].red/((y-x)*(y-x)+1); channel_features[GreenPixelChannel].inverse_difference_moment[i]+= cooccurrence[x][y].direction[i].green/((y-x)*(y-x)+1); channel_features[BluePixelChannel].inverse_difference_moment[i]+= cooccurrence[x][y].direction[i].blue/((y-x)*(y-x)+1); if (image->colorspace == CMYKColorspace) channel_features[BlackPixelChannel].inverse_difference_moment[i]+= cooccurrence[x][y].direction[i].black/((y-x)*(y-x)+1); if (image->alpha_trait != UndefinedPixelTrait) channel_features[AlphaPixelChannel].inverse_difference_moment[i]+= cooccurrence[x][y].direction[i].alpha/((y-x)*(y-x)+1); /* Sum average. */ density_xy[y+x+2].direction[i].red+= cooccurrence[x][y].direction[i].red; density_xy[y+x+2].direction[i].green+= cooccurrence[x][y].direction[i].green; density_xy[y+x+2].direction[i].blue+= cooccurrence[x][y].direction[i].blue; if (image->colorspace == CMYKColorspace) density_xy[y+x+2].direction[i].black+= cooccurrence[x][y].direction[i].black; if (image->alpha_trait != UndefinedPixelTrait) density_xy[y+x+2].direction[i].alpha+= cooccurrence[x][y].direction[i].alpha; /* Entropy. */ channel_features[RedPixelChannel].entropy[i]-= cooccurrence[x][y].direction[i].red* MagickLog10(cooccurrence[x][y].direction[i].red); channel_features[GreenPixelChannel].entropy[i]-= cooccurrence[x][y].direction[i].green* MagickLog10(cooccurrence[x][y].direction[i].green); channel_features[BluePixelChannel].entropy[i]-= cooccurrence[x][y].direction[i].blue* MagickLog10(cooccurrence[x][y].direction[i].blue); if (image->colorspace == CMYKColorspace) channel_features[BlackPixelChannel].entropy[i]-= cooccurrence[x][y].direction[i].black* MagickLog10(cooccurrence[x][y].direction[i].black); if (image->alpha_trait != UndefinedPixelTrait) channel_features[AlphaPixelChannel].entropy[i]-= cooccurrence[x][y].direction[i].alpha* MagickLog10(cooccurrence[x][y].direction[i].alpha); /* Information Measures of Correlation. */ density_x[x].direction[i].red+=cooccurrence[x][y].direction[i].red; density_x[x].direction[i].green+=cooccurrence[x][y].direction[i].green; density_x[x].direction[i].blue+=cooccurrence[x][y].direction[i].blue; if (image->alpha_trait != UndefinedPixelTrait) density_x[x].direction[i].alpha+= cooccurrence[x][y].direction[i].alpha; if (image->colorspace == CMYKColorspace) density_x[x].direction[i].black+= cooccurrence[x][y].direction[i].black; density_y[y].direction[i].red+=cooccurrence[x][y].direction[i].red; density_y[y].direction[i].green+=cooccurrence[x][y].direction[i].green; density_y[y].direction[i].blue+=cooccurrence[x][y].direction[i].blue; if (image->colorspace == CMYKColorspace) density_y[y].direction[i].black+= cooccurrence[x][y].direction[i].black; if (image->alpha_trait != UndefinedPixelTrait) density_y[y].direction[i].alpha+= cooccurrence[x][y].direction[i].alpha; } mean.direction[i].red+=y*sum[y].direction[i].red; sum_squares.direction[i].red+=y*y*sum[y].direction[i].red; mean.direction[i].green+=y*sum[y].direction[i].green; sum_squares.direction[i].green+=y*y*sum[y].direction[i].green; mean.direction[i].blue+=y*sum[y].direction[i].blue; sum_squares.direction[i].blue+=y*y*sum[y].direction[i].blue; if (image->colorspace == CMYKColorspace) { mean.direction[i].black+=y*sum[y].direction[i].black; sum_squares.direction[i].black+=y*y*sum[y].direction[i].black; } if (image->alpha_trait != UndefinedPixelTrait) { mean.direction[i].alpha+=y*sum[y].direction[i].alpha; sum_squares.direction[i].alpha+=y*y*sum[y].direction[i].alpha; } } /* Correlation: measure of linear-dependencies in the image. */ channel_features[RedPixelChannel].correlation[i]= (correlation.direction[i].red-mean.direction[i].red* mean.direction[i].red)/(sqrt(sum_squares.direction[i].red- (mean.direction[i].red*mean.direction[i].red))*sqrt( sum_squares.direction[i].red-(mean.direction[i].red* mean.direction[i].red))); channel_features[GreenPixelChannel].correlation[i]= (correlation.direction[i].green-mean.direction[i].green* mean.direction[i].green)/(sqrt(sum_squares.direction[i].green- (mean.direction[i].green*mean.direction[i].green))*sqrt( sum_squares.direction[i].green-(mean.direction[i].green* mean.direction[i].green))); channel_features[BluePixelChannel].correlation[i]= (correlation.direction[i].blue-mean.direction[i].blue* mean.direction[i].blue)/(sqrt(sum_squares.direction[i].blue- (mean.direction[i].blue*mean.direction[i].blue))*sqrt( sum_squares.direction[i].blue-(mean.direction[i].blue* mean.direction[i].blue))); if (image->colorspace == CMYKColorspace) channel_features[BlackPixelChannel].correlation[i]= (correlation.direction[i].black-mean.direction[i].black* mean.direction[i].black)/(sqrt(sum_squares.direction[i].black- (mean.direction[i].black*mean.direction[i].black))*sqrt( sum_squares.direction[i].black-(mean.direction[i].black* mean.direction[i].black))); if (image->alpha_trait != UndefinedPixelTrait) channel_features[AlphaPixelChannel].correlation[i]= (correlation.direction[i].alpha-mean.direction[i].alpha* mean.direction[i].alpha)/(sqrt(sum_squares.direction[i].alpha- (mean.direction[i].alpha*mean.direction[i].alpha))*sqrt( sum_squares.direction[i].alpha-(mean.direction[i].alpha* mean.direction[i].alpha))); } /* Compute more texture features. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,number_grays,1) #endif for (i=0; i < 4; i++) { register ssize_t x; for (x=2; x < (ssize_t) (2*number_grays); x++) { /* Sum average. */ channel_features[RedPixelChannel].sum_average[i]+= x*density_xy[x].direction[i].red; channel_features[GreenPixelChannel].sum_average[i]+= x*density_xy[x].direction[i].green; channel_features[BluePixelChannel].sum_average[i]+= x*density_xy[x].direction[i].blue; if (image->colorspace == CMYKColorspace) channel_features[BlackPixelChannel].sum_average[i]+= x*density_xy[x].direction[i].black; if (image->alpha_trait != UndefinedPixelTrait) channel_features[AlphaPixelChannel].sum_average[i]+= x*density_xy[x].direction[i].alpha; /* Sum entropy. */ channel_features[RedPixelChannel].sum_entropy[i]-= density_xy[x].direction[i].red* MagickLog10(density_xy[x].direction[i].red); channel_features[GreenPixelChannel].sum_entropy[i]-= density_xy[x].direction[i].green* MagickLog10(density_xy[x].direction[i].green); channel_features[BluePixelChannel].sum_entropy[i]-= density_xy[x].direction[i].blue* MagickLog10(density_xy[x].direction[i].blue); if (image->colorspace == CMYKColorspace) channel_features[BlackPixelChannel].sum_entropy[i]-= density_xy[x].direction[i].black* MagickLog10(density_xy[x].direction[i].black); if (image->alpha_trait != UndefinedPixelTrait) channel_features[AlphaPixelChannel].sum_entropy[i]-= density_xy[x].direction[i].alpha* MagickLog10(density_xy[x].direction[i].alpha); /* Sum variance. */ channel_features[RedPixelChannel].sum_variance[i]+= (x-channel_features[RedPixelChannel].sum_entropy[i])* (x-channel_features[RedPixelChannel].sum_entropy[i])* density_xy[x].direction[i].red; channel_features[GreenPixelChannel].sum_variance[i]+= (x-channel_features[GreenPixelChannel].sum_entropy[i])* (x-channel_features[GreenPixelChannel].sum_entropy[i])* density_xy[x].direction[i].green; channel_features[BluePixelChannel].sum_variance[i]+= (x-channel_features[BluePixelChannel].sum_entropy[i])* (x-channel_features[BluePixelChannel].sum_entropy[i])* density_xy[x].direction[i].blue; if (image->colorspace == CMYKColorspace) channel_features[BlackPixelChannel].sum_variance[i]+= (x-channel_features[BlackPixelChannel].sum_entropy[i])* (x-channel_features[BlackPixelChannel].sum_entropy[i])* density_xy[x].direction[i].black; if (image->alpha_trait != UndefinedPixelTrait) channel_features[AlphaPixelChannel].sum_variance[i]+= (x-channel_features[AlphaPixelChannel].sum_entropy[i])* (x-channel_features[AlphaPixelChannel].sum_entropy[i])* density_xy[x].direction[i].alpha; } } /* Compute more texture features. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,number_grays,1) #endif for (i=0; i < 4; i++) { register ssize_t y; for (y=0; y < (ssize_t) number_grays; y++) { register ssize_t x; for (x=0; x < (ssize_t) number_grays; x++) { /* Sum of Squares: Variance */ variance.direction[i].red+=(y-mean.direction[i].red+1)* (y-mean.direction[i].red+1)*cooccurrence[x][y].direction[i].red; variance.direction[i].green+=(y-mean.direction[i].green+1)* (y-mean.direction[i].green+1)*cooccurrence[x][y].direction[i].green; variance.direction[i].blue+=(y-mean.direction[i].blue+1)* (y-mean.direction[i].blue+1)*cooccurrence[x][y].direction[i].blue; if (image->colorspace == CMYKColorspace) variance.direction[i].black+=(y-mean.direction[i].black+1)* (y-mean.direction[i].black+1)*cooccurrence[x][y].direction[i].black; if (image->alpha_trait != UndefinedPixelTrait) variance.direction[i].alpha+=(y-mean.direction[i].alpha+1)* (y-mean.direction[i].alpha+1)* cooccurrence[x][y].direction[i].alpha; /* Sum average / Difference Variance. */ density_xy[MagickAbsoluteValue(y-x)].direction[i].red+= cooccurrence[x][y].direction[i].red; density_xy[MagickAbsoluteValue(y-x)].direction[i].green+= cooccurrence[x][y].direction[i].green; density_xy[MagickAbsoluteValue(y-x)].direction[i].blue+= cooccurrence[x][y].direction[i].blue; if (image->colorspace == CMYKColorspace) density_xy[MagickAbsoluteValue(y-x)].direction[i].black+= cooccurrence[x][y].direction[i].black; if (image->alpha_trait != UndefinedPixelTrait) density_xy[MagickAbsoluteValue(y-x)].direction[i].alpha+= cooccurrence[x][y].direction[i].alpha; /* Information Measures of Correlation. */ entropy_xy.direction[i].red-=cooccurrence[x][y].direction[i].red* MagickLog10(cooccurrence[x][y].direction[i].red); entropy_xy.direction[i].green-=cooccurrence[x][y].direction[i].green* MagickLog10(cooccurrence[x][y].direction[i].green); entropy_xy.direction[i].blue-=cooccurrence[x][y].direction[i].blue* MagickLog10(cooccurrence[x][y].direction[i].blue); if (image->colorspace == CMYKColorspace) entropy_xy.direction[i].black-=cooccurrence[x][y].direction[i].black* MagickLog10(cooccurrence[x][y].direction[i].black); if (image->alpha_trait != UndefinedPixelTrait) entropy_xy.direction[i].alpha-= cooccurrence[x][y].direction[i].alpha*MagickLog10( cooccurrence[x][y].direction[i].alpha); entropy_xy1.direction[i].red-=(cooccurrence[x][y].direction[i].red* MagickLog10(density_x[x].direction[i].red*density_y[y].direction[i].red)); entropy_xy1.direction[i].green-=(cooccurrence[x][y].direction[i].green* MagickLog10(density_x[x].direction[i].green* density_y[y].direction[i].green)); entropy_xy1.direction[i].blue-=(cooccurrence[x][y].direction[i].blue* MagickLog10(density_x[x].direction[i].blue*density_y[y].direction[i].blue)); if (image->colorspace == CMYKColorspace) entropy_xy1.direction[i].black-=( cooccurrence[x][y].direction[i].black*MagickLog10( density_x[x].direction[i].black*density_y[y].direction[i].black)); if (image->alpha_trait != UndefinedPixelTrait) entropy_xy1.direction[i].alpha-=( cooccurrence[x][y].direction[i].alpha*MagickLog10( density_x[x].direction[i].alpha*density_y[y].direction[i].alpha)); entropy_xy2.direction[i].red-=(density_x[x].direction[i].red* density_y[y].direction[i].red*MagickLog10(density_x[x].direction[i].red* density_y[y].direction[i].red)); entropy_xy2.direction[i].green-=(density_x[x].direction[i].green* density_y[y].direction[i].green*MagickLog10(density_x[x].direction[i].green* density_y[y].direction[i].green)); entropy_xy2.direction[i].blue-=(density_x[x].direction[i].blue* density_y[y].direction[i].blue*MagickLog10(density_x[x].direction[i].blue* density_y[y].direction[i].blue)); if (image->colorspace == CMYKColorspace) entropy_xy2.direction[i].black-=(density_x[x].direction[i].black* density_y[y].direction[i].black*MagickLog10( density_x[x].direction[i].black*density_y[y].direction[i].black)); if (image->alpha_trait != UndefinedPixelTrait) entropy_xy2.direction[i].alpha-=(density_x[x].direction[i].alpha* density_y[y].direction[i].alpha*MagickLog10( density_x[x].direction[i].alpha*density_y[y].direction[i].alpha)); } } channel_features[RedPixelChannel].variance_sum_of_squares[i]= variance.direction[i].red; channel_features[GreenPixelChannel].variance_sum_of_squares[i]= variance.direction[i].green; channel_features[BluePixelChannel].variance_sum_of_squares[i]= variance.direction[i].blue; if (image->colorspace == CMYKColorspace) channel_features[BlackPixelChannel].variance_sum_of_squares[i]= variance.direction[i].black; if (image->alpha_trait != UndefinedPixelTrait) channel_features[AlphaPixelChannel].variance_sum_of_squares[i]= variance.direction[i].alpha; } /* Compute more texture features. */ (void) memset(&variance,0,sizeof(variance)); (void) memset(&sum_squares,0,sizeof(sum_squares)); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,number_grays,1) #endif for (i=0; i < 4; i++) { register ssize_t x; for (x=0; x < (ssize_t) number_grays; x++) { /* Difference variance. */ variance.direction[i].red+=density_xy[x].direction[i].red; variance.direction[i].green+=density_xy[x].direction[i].green; variance.direction[i].blue+=density_xy[x].direction[i].blue; if (image->colorspace == CMYKColorspace) variance.direction[i].black+=density_xy[x].direction[i].black; if (image->alpha_trait != UndefinedPixelTrait) variance.direction[i].alpha+=density_xy[x].direction[i].alpha; sum_squares.direction[i].red+=density_xy[x].direction[i].red* density_xy[x].direction[i].red; sum_squares.direction[i].green+=density_xy[x].direction[i].green* density_xy[x].direction[i].green; sum_squares.direction[i].blue+=density_xy[x].direction[i].blue* density_xy[x].direction[i].blue; if (image->colorspace == CMYKColorspace) sum_squares.direction[i].black+=density_xy[x].direction[i].black* density_xy[x].direction[i].black; if (image->alpha_trait != UndefinedPixelTrait) sum_squares.direction[i].alpha+=density_xy[x].direction[i].alpha* density_xy[x].direction[i].alpha; /* Difference entropy. */ channel_features[RedPixelChannel].difference_entropy[i]-= density_xy[x].direction[i].red* MagickLog10(density_xy[x].direction[i].red); channel_features[GreenPixelChannel].difference_entropy[i]-= density_xy[x].direction[i].green* MagickLog10(density_xy[x].direction[i].green); channel_features[BluePixelChannel].difference_entropy[i]-= density_xy[x].direction[i].blue* MagickLog10(density_xy[x].direction[i].blue); if (image->colorspace == CMYKColorspace) channel_features[BlackPixelChannel].difference_entropy[i]-= density_xy[x].direction[i].black* MagickLog10(density_xy[x].direction[i].black); if (image->alpha_trait != UndefinedPixelTrait) channel_features[AlphaPixelChannel].difference_entropy[i]-= density_xy[x].direction[i].alpha* MagickLog10(density_xy[x].direction[i].alpha); /* Information Measures of Correlation. */ entropy_x.direction[i].red-=(density_x[x].direction[i].red* MagickLog10(density_x[x].direction[i].red)); entropy_x.direction[i].green-=(density_x[x].direction[i].green* MagickLog10(density_x[x].direction[i].green)); entropy_x.direction[i].blue-=(density_x[x].direction[i].blue* MagickLog10(density_x[x].direction[i].blue)); if (image->colorspace == CMYKColorspace) entropy_x.direction[i].black-=(density_x[x].direction[i].black* MagickLog10(density_x[x].direction[i].black)); if (image->alpha_trait != UndefinedPixelTrait) entropy_x.direction[i].alpha-=(density_x[x].direction[i].alpha* MagickLog10(density_x[x].direction[i].alpha)); entropy_y.direction[i].red-=(density_y[x].direction[i].red* MagickLog10(density_y[x].direction[i].red)); entropy_y.direction[i].green-=(density_y[x].direction[i].green* MagickLog10(density_y[x].direction[i].green)); entropy_y.direction[i].blue-=(density_y[x].direction[i].blue* MagickLog10(density_y[x].direction[i].blue)); if (image->colorspace == CMYKColorspace) entropy_y.direction[i].black-=(density_y[x].direction[i].black* MagickLog10(density_y[x].direction[i].black)); if (image->alpha_trait != UndefinedPixelTrait) entropy_y.direction[i].alpha-=(density_y[x].direction[i].alpha* MagickLog10(density_y[x].direction[i].alpha)); } /* Difference variance. */ channel_features[RedPixelChannel].difference_variance[i]= (((double) number_grays*number_grays*sum_squares.direction[i].red)- (variance.direction[i].red*variance.direction[i].red))/ ((double) number_grays*number_grays*number_grays*number_grays); channel_features[GreenPixelChannel].difference_variance[i]= (((double) number_grays*number_grays*sum_squares.direction[i].green)- (variance.direction[i].green*variance.direction[i].green))/ ((double) number_grays*number_grays*number_grays*number_grays); channel_features[BluePixelChannel].difference_variance[i]= (((double) number_grays*number_grays*sum_squares.direction[i].blue)- (variance.direction[i].blue*variance.direction[i].blue))/ ((double) number_grays*number_grays*number_grays*number_grays); if (image->colorspace == CMYKColorspace) channel_features[BlackPixelChannel].difference_variance[i]= (((double) number_grays*number_grays*sum_squares.direction[i].black)- (variance.direction[i].black*variance.direction[i].black))/ ((double) number_grays*number_grays*number_grays*number_grays); if (image->alpha_trait != UndefinedPixelTrait) channel_features[AlphaPixelChannel].difference_variance[i]= (((double) number_grays*number_grays*sum_squares.direction[i].alpha)- (variance.direction[i].alpha*variance.direction[i].alpha))/ ((double) number_grays*number_grays*number_grays*number_grays); /* Information Measures of Correlation. */ channel_features[RedPixelChannel].measure_of_correlation_1[i]= (entropy_xy.direction[i].red-entropy_xy1.direction[i].red)/ (entropy_x.direction[i].red > entropy_y.direction[i].red ? entropy_x.direction[i].red : entropy_y.direction[i].red); channel_features[GreenPixelChannel].measure_of_correlation_1[i]= (entropy_xy.direction[i].green-entropy_xy1.direction[i].green)/ (entropy_x.direction[i].green > entropy_y.direction[i].green ? entropy_x.direction[i].green : entropy_y.direction[i].green); channel_features[BluePixelChannel].measure_of_correlation_1[i]= (entropy_xy.direction[i].blue-entropy_xy1.direction[i].blue)/ (entropy_x.direction[i].blue > entropy_y.direction[i].blue ? entropy_x.direction[i].blue : entropy_y.direction[i].blue); if (image->colorspace == CMYKColorspace) channel_features[BlackPixelChannel].measure_of_correlation_1[i]= (entropy_xy.direction[i].black-entropy_xy1.direction[i].black)/ (entropy_x.direction[i].black > entropy_y.direction[i].black ? entropy_x.direction[i].black : entropy_y.direction[i].black); if (image->alpha_trait != UndefinedPixelTrait) channel_features[AlphaPixelChannel].measure_of_correlation_1[i]= (entropy_xy.direction[i].alpha-entropy_xy1.direction[i].alpha)/ (entropy_x.direction[i].alpha > entropy_y.direction[i].alpha ? entropy_x.direction[i].alpha : entropy_y.direction[i].alpha); channel_features[RedPixelChannel].measure_of_correlation_2[i]= (sqrt(fabs(1.0-exp(-2.0*(double) (entropy_xy2.direction[i].red- entropy_xy.direction[i].red))))); channel_features[GreenPixelChannel].measure_of_correlation_2[i]= (sqrt(fabs(1.0-exp(-2.0*(double) (entropy_xy2.direction[i].green- entropy_xy.direction[i].green))))); channel_features[BluePixelChannel].measure_of_correlation_2[i]= (sqrt(fabs(1.0-exp(-2.0*(double) (entropy_xy2.direction[i].blue- entropy_xy.direction[i].blue))))); if (image->colorspace == CMYKColorspace) channel_features[BlackPixelChannel].measure_of_correlation_2[i]= (sqrt(fabs(1.0-exp(-2.0*(double) (entropy_xy2.direction[i].black- entropy_xy.direction[i].black))))); if (image->alpha_trait != UndefinedPixelTrait) channel_features[AlphaPixelChannel].measure_of_correlation_2[i]= (sqrt(fabs(1.0-exp(-2.0*(double) (entropy_xy2.direction[i].alpha- entropy_xy.direction[i].alpha))))); } /* Compute more texture features. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,number_grays,1) #endif for (i=0; i < 4; i++) { ssize_t z; for (z=0; z < (ssize_t) number_grays; z++) { register ssize_t y; ChannelStatistics pixel; (void) memset(&pixel,0,sizeof(pixel)); for (y=0; y < (ssize_t) number_grays; y++) { register ssize_t x; for (x=0; x < (ssize_t) number_grays; x++) { /* Contrast: amount of local variations present in an image. */ if (((y-x) == z) || ((x-y) == z)) { pixel.direction[i].red+=cooccurrence[x][y].direction[i].red; pixel.direction[i].green+=cooccurrence[x][y].direction[i].green; pixel.direction[i].blue+=cooccurrence[x][y].direction[i].blue; if (image->colorspace == CMYKColorspace) pixel.direction[i].black+=cooccurrence[x][y].direction[i].black; if (image->alpha_trait != UndefinedPixelTrait) pixel.direction[i].alpha+= cooccurrence[x][y].direction[i].alpha; } /* Maximum Correlation Coefficient. */ if ((fabs(density_x[z].direction[i].red) > MagickEpsilon) && (fabs(density_y[x].direction[i].red) > MagickEpsilon)) Q[z][y].direction[i].red+=cooccurrence[z][x].direction[i].red* cooccurrence[y][x].direction[i].red/density_x[z].direction[i].red/ density_y[x].direction[i].red; if ((fabs(density_x[z].direction[i].green) > MagickEpsilon) && (fabs(density_y[x].direction[i].red) > MagickEpsilon)) Q[z][y].direction[i].green+=cooccurrence[z][x].direction[i].green* cooccurrence[y][x].direction[i].green/ density_x[z].direction[i].green/density_y[x].direction[i].red; if ((fabs(density_x[z].direction[i].blue) > MagickEpsilon) && (fabs(density_y[x].direction[i].blue) > MagickEpsilon)) Q[z][y].direction[i].blue+=cooccurrence[z][x].direction[i].blue* cooccurrence[y][x].direction[i].blue/ density_x[z].direction[i].blue/density_y[x].direction[i].blue; if (image->colorspace == CMYKColorspace) if ((fabs(density_x[z].direction[i].black) > MagickEpsilon) && (fabs(density_y[x].direction[i].black) > MagickEpsilon)) Q[z][y].direction[i].black+=cooccurrence[z][x].direction[i].black* cooccurrence[y][x].direction[i].black/ density_x[z].direction[i].black/density_y[x].direction[i].black; if (image->alpha_trait != UndefinedPixelTrait) if ((fabs(density_x[z].direction[i].alpha) > MagickEpsilon) && (fabs(density_y[x].direction[i].alpha) > MagickEpsilon)) Q[z][y].direction[i].alpha+= cooccurrence[z][x].direction[i].alpha* cooccurrence[y][x].direction[i].alpha/ density_x[z].direction[i].alpha/ density_y[x].direction[i].alpha; } } channel_features[RedPixelChannel].contrast[i]+=z*z* pixel.direction[i].red; channel_features[GreenPixelChannel].contrast[i]+=z*z* pixel.direction[i].green; channel_features[BluePixelChannel].contrast[i]+=z*z* pixel.direction[i].blue; if (image->colorspace == CMYKColorspace) channel_features[BlackPixelChannel].contrast[i]+=z*z* pixel.direction[i].black; if (image->alpha_trait != UndefinedPixelTrait) channel_features[AlphaPixelChannel].contrast[i]+=z*z* pixel.direction[i].alpha; } /* Maximum Correlation Coefficient. Future: return second largest eigenvalue of Q. */ channel_features[RedPixelChannel].maximum_correlation_coefficient[i]= sqrt((double) -1.0); channel_features[GreenPixelChannel].maximum_correlation_coefficient[i]= sqrt((double) -1.0); channel_features[BluePixelChannel].maximum_correlation_coefficient[i]= sqrt((double) -1.0); if (image->colorspace == CMYKColorspace) channel_features[BlackPixelChannel].maximum_correlation_coefficient[i]= sqrt((double) -1.0); if (image->alpha_trait != UndefinedPixelTrait) channel_features[AlphaPixelChannel].maximum_correlation_coefficient[i]= sqrt((double) -1.0); } /* Relinquish resources. */ sum=(ChannelStatistics *) RelinquishMagickMemory(sum); for (i=0; i < (ssize_t) number_grays; i++) Q[i]=(ChannelStatistics *) RelinquishMagickMemory(Q[i]); Q=(ChannelStatistics **) RelinquishMagickMemory(Q); density_y=(ChannelStatistics *) RelinquishMagickMemory(density_y); density_xy=(ChannelStatistics *) RelinquishMagickMemory(density_xy); density_x=(ChannelStatistics *) RelinquishMagickMemory(density_x); for (i=0; i < (ssize_t) number_grays; i++) cooccurrence[i]=(ChannelStatistics *) RelinquishMagickMemory(cooccurrence[i]); cooccurrence=(ChannelStatistics **) RelinquishMagickMemory(cooccurrence); return(channel_features); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % H o u g h L i n e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % Use HoughLineImage() in conjunction with any binary edge extracted image (we % recommand Canny) to identify lines in the image. The algorithm accumulates % counts for every white pixel for every possible orientation (for angles from % 0 to 179 in 1 degree increments) and distance from the center of the image to % the corner (in 1 px increments) and stores the counts in an accumulator % matrix of angle vs distance. The size of the accumulator is 180x(diagonal/2). % Next it searches this space for peaks in counts and converts the locations % of the peaks to slope and intercept in the normal x,y input image space. Use % the slope/intercepts to find the endpoints clipped to the bounds of the % image. The lines are then drawn. The counts are a measure of the length of % the lines. % % The format of the HoughLineImage method is: % % Image *HoughLineImage(const Image *image,const size_t width, % const size_t height,const size_t threshold,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o width, height: find line pairs as local maxima in this neighborhood. % % o threshold: the line count threshold. % % o exception: return any errors or warnings in this structure. % */ static inline double MagickRound(double x) { /* Round the fraction to nearest integer. */ if ((x-floor(x)) < (ceil(x)-x)) return(floor(x)); return(ceil(x)); } static Image *RenderHoughLines(const ImageInfo *image_info,const size_t columns, const size_t rows,ExceptionInfo *exception) { #define BoundingBox "viewbox" DrawInfo *draw_info; Image *image; MagickBooleanType status; /* Open image. */ image=AcquireImage(image_info,exception); status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception); if (status == MagickFalse) { image=DestroyImageList(image); return((Image *) NULL); } image->columns=columns; image->rows=rows; draw_info=CloneDrawInfo(image_info,(DrawInfo *) NULL); draw_info->affine.sx=image->resolution.x == 0.0 ? 1.0 : image->resolution.x/ DefaultResolution; draw_info->affine.sy=image->resolution.y == 0.0 ? 1.0 : image->resolution.y/ DefaultResolution; image->columns=(size_t) (draw_info->affine.sx*image->columns); image->rows=(size_t) (draw_info->affine.sy*image->rows); status=SetImageExtent(image,image->columns,image->rows,exception); if (status == MagickFalse) return(DestroyImageList(image)); if (SetImageBackgroundColor(image,exception) == MagickFalse) { image=DestroyImageList(image); return((Image *) NULL); } /* Render drawing. */ if (GetBlobStreamData(image) == (unsigned char *) NULL) draw_info->primitive=FileToString(image->filename,~0UL,exception); else { draw_info->primitive=(char *) AcquireMagickMemory((size_t) GetBlobSize(image)+1); if (draw_info->primitive != (char *) NULL) { (void) memcpy(draw_info->primitive,GetBlobStreamData(image), (size_t) GetBlobSize(image)); draw_info->primitive[GetBlobSize(image)]='\0'; } } (void) DrawImage(image,draw_info,exception); draw_info=DestroyDrawInfo(draw_info); (void) CloseBlob(image); return(GetFirstImageInList(image)); } MagickExport Image *HoughLineImage(const Image *image,const size_t width, const size_t height,const size_t threshold,ExceptionInfo *exception) { #define HoughLineImageTag "HoughLine/Image" CacheView *image_view; char message[MagickPathExtent], path[MagickPathExtent]; const char *artifact; double hough_height; Image *lines_image = NULL; ImageInfo *image_info; int file; MagickBooleanType status; MagickOffsetType progress; MatrixInfo *accumulator; PointInfo center; register ssize_t y; size_t accumulator_height, accumulator_width, line_count; /* Create the accumulator. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); accumulator_width=180; hough_height=((sqrt(2.0)*(double) (image->rows > image->columns ? image->rows : image->columns))/2.0); accumulator_height=(size_t) (2.0*hough_height); accumulator=AcquireMatrixInfo(accumulator_width,accumulator_height, sizeof(double),exception); if (accumulator == (MatrixInfo *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); if (NullMatrix(accumulator) == MagickFalse) { accumulator=DestroyMatrixInfo(accumulator); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } /* Populate the accumulator. */ status=MagickTrue; progress=0; center.x=(double) image->columns/2.0; center.y=(double) image->rows/2.0; image_view=AcquireVirtualCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { register const Quantum *magick_restrict p; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { if (GetPixelIntensity(image,p) > (QuantumRange/2.0)) { register ssize_t i; for (i=0; i < 180; i++) { double count, radius; radius=(((double) x-center.x)*cos(DegreesToRadians((double) i)))+ (((double) y-center.y)*sin(DegreesToRadians((double) i))); (void) GetMatrixElement(accumulator,i,(ssize_t) MagickRound(radius+hough_height),&count); count++; (void) SetMatrixElement(accumulator,i,(ssize_t) MagickRound(radius+hough_height),&count); } } p+=GetPixelChannels(image); } if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,CannyEdgeImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); if (status == MagickFalse) { accumulator=DestroyMatrixInfo(accumulator); return((Image *) NULL); } /* Generate line segments from accumulator. */ file=AcquireUniqueFileResource(path); if (file == -1) { accumulator=DestroyMatrixInfo(accumulator); return((Image *) NULL); } (void) FormatLocaleString(message,MagickPathExtent, "# Hough line transform: %.20gx%.20g%+.20g\n",(double) width, (double) height,(double) threshold); if (write(file,message,strlen(message)) != (ssize_t) strlen(message)) status=MagickFalse; (void) FormatLocaleString(message,MagickPathExtent, "viewbox 0 0 %.20g %.20g\n",(double) image->columns,(double) image->rows); if (write(file,message,strlen(message)) != (ssize_t) strlen(message)) status=MagickFalse; (void) FormatLocaleString(message,MagickPathExtent, "# x1,y1 x2,y2 # count angle distance\n"); if (write(file,message,strlen(message)) != (ssize_t) strlen(message)) status=MagickFalse; line_count=image->columns > image->rows ? image->columns/4 : image->rows/4; if (threshold != 0) line_count=threshold; for (y=0; y < (ssize_t) accumulator_height; y++) { register ssize_t x; for (x=0; x < (ssize_t) accumulator_width; x++) { double count; (void) GetMatrixElement(accumulator,x,y,&count); if (count >= (double) line_count) { double maxima; SegmentInfo line; ssize_t v; /* Is point a local maxima? */ maxima=count; for (v=(-((ssize_t) height/2)); v <= (((ssize_t) height/2)); v++) { ssize_t u; for (u=(-((ssize_t) width/2)); u <= (((ssize_t) width/2)); u++) { if ((u != 0) || (v !=0)) { (void) GetMatrixElement(accumulator,x+u,y+v,&count); if (count > maxima) { maxima=count; break; } } } if (u < (ssize_t) (width/2)) break; } (void) GetMatrixElement(accumulator,x,y,&count); if (maxima > count) continue; if ((x >= 45) && (x <= 135)) { /* y = (r-x cos(t))/sin(t) */ line.x1=0.0; line.y1=((double) (y-(accumulator_height/2.0))-((line.x1- (image->columns/2.0))*cos(DegreesToRadians((double) x))))/ sin(DegreesToRadians((double) x))+(image->rows/2.0); line.x2=(double) image->columns; line.y2=((double) (y-(accumulator_height/2.0))-((line.x2- (image->columns/2.0))*cos(DegreesToRadians((double) x))))/ sin(DegreesToRadians((double) x))+(image->rows/2.0); } else { /* x = (r-y cos(t))/sin(t) */ line.y1=0.0; line.x1=((double) (y-(accumulator_height/2.0))-((line.y1- (image->rows/2.0))*sin(DegreesToRadians((double) x))))/ cos(DegreesToRadians((double) x))+(image->columns/2.0); line.y2=(double) image->rows; line.x2=((double) (y-(accumulator_height/2.0))-((line.y2- (image->rows/2.0))*sin(DegreesToRadians((double) x))))/ cos(DegreesToRadians((double) x))+(image->columns/2.0); } (void) FormatLocaleString(message,MagickPathExtent, "line %g,%g %g,%g # %g %g %g\n",line.x1,line.y1,line.x2,line.y2, maxima,(double) x,(double) y); if (write(file,message,strlen(message)) != (ssize_t) strlen(message)) status=MagickFalse; } } } (void) close(file); /* Render lines to image canvas. */ image_info=AcquireImageInfo(); image_info->background_color=image->background_color; (void) FormatLocaleString(image_info->filename,MagickPathExtent,"%s",path); artifact=GetImageArtifact(image,"background"); if (artifact != (const char *) NULL) (void) SetImageOption(image_info,"background",artifact); artifact=GetImageArtifact(image,"fill"); if (artifact != (const char *) NULL) (void) SetImageOption(image_info,"fill",artifact); artifact=GetImageArtifact(image,"stroke"); if (artifact != (const char *) NULL) (void) SetImageOption(image_info,"stroke",artifact); artifact=GetImageArtifact(image,"strokewidth"); if (artifact != (const char *) NULL) (void) SetImageOption(image_info,"strokewidth",artifact); lines_image=RenderHoughLines(image_info,image->columns,image->rows,exception); artifact=GetImageArtifact(image,"hough-lines:accumulator"); if ((lines_image != (Image *) NULL) && (IsStringTrue(artifact) != MagickFalse)) { Image *accumulator_image; accumulator_image=MatrixToImage(accumulator,exception); if (accumulator_image != (Image *) NULL) AppendImageToList(&lines_image,accumulator_image); } /* Free resources. */ accumulator=DestroyMatrixInfo(accumulator); image_info=DestroyImageInfo(image_info); (void) RelinquishUniqueFileResource(path); return(GetFirstImageInList(lines_image)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M e a n S h i f t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MeanShiftImage() delineate arbitrarily shaped clusters in the image. For % each pixel, it visits all the pixels in the neighborhood specified by % the window centered at the pixel and excludes those that are outside the % radius=(window-1)/2 surrounding the pixel. From those pixels, it finds those % that are within the specified color distance from the current mean, and % computes a new x,y centroid from those coordinates and a new mean. This new % x,y centroid is used as the center for a new window. This process iterates % until it converges and the final mean is replaces the (original window % center) pixel value. It repeats this process for the next pixel, etc., % until it processes all pixels in the image. Results are typically better with % colorspaces other than sRGB. We recommend YIQ, YUV or YCbCr. % % The format of the MeanShiftImage method is: % % Image *MeanShiftImage(const Image *image,const size_t width, % const size_t height,const double color_distance, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o width, height: find pixels in this neighborhood. % % o color_distance: the color distance. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *MeanShiftImage(const Image *image,const size_t width, const size_t height,const double color_distance,ExceptionInfo *exception) { #define MaxMeanShiftIterations 100 #define MeanShiftImageTag "MeanShift/Image" CacheView *image_view, *mean_view, *pixel_view; Image *mean_image; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); mean_image=CloneImage(image,0,0,MagickTrue,exception); if (mean_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(mean_image,DirectClass,exception) == MagickFalse) { mean_image=DestroyImage(mean_image); return((Image *) NULL); } status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); pixel_view=AcquireVirtualCacheView(image,exception); mean_view=AcquireAuthenticCacheView(mean_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status,progress) \ magick_number_threads(mean_image,mean_image,mean_image->rows,1) #endif for (y=0; y < (ssize_t) mean_image->rows; y++) { register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=GetCacheViewAuthenticPixels(mean_view,0,y,mean_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) mean_image->columns; x++) { PixelInfo mean_pixel, previous_pixel; PointInfo mean_location, previous_location; register ssize_t i; GetPixelInfo(image,&mean_pixel); GetPixelInfoPixel(image,p,&mean_pixel); mean_location.x=(double) x; mean_location.y=(double) y; for (i=0; i < MaxMeanShiftIterations; i++) { double distance, gamma; PixelInfo sum_pixel; PointInfo sum_location; ssize_t count, v; sum_location.x=0.0; sum_location.y=0.0; GetPixelInfo(image,&sum_pixel); previous_location=mean_location; previous_pixel=mean_pixel; count=0; for (v=(-((ssize_t) height/2)); v <= (((ssize_t) height/2)); v++) { ssize_t u; for (u=(-((ssize_t) width/2)); u <= (((ssize_t) width/2)); u++) { if ((v*v+u*u) <= (ssize_t) ((width/2)*(height/2))) { PixelInfo pixel; status=GetOneCacheViewVirtualPixelInfo(pixel_view,(ssize_t) MagickRound(mean_location.x+u),(ssize_t) MagickRound( mean_location.y+v),&pixel,exception); distance=(mean_pixel.red-pixel.red)*(mean_pixel.red-pixel.red)+ (mean_pixel.green-pixel.green)*(mean_pixel.green-pixel.green)+ (mean_pixel.blue-pixel.blue)*(mean_pixel.blue-pixel.blue); if (distance <= (color_distance*color_distance)) { sum_location.x+=mean_location.x+u; sum_location.y+=mean_location.y+v; sum_pixel.red+=pixel.red; sum_pixel.green+=pixel.green; sum_pixel.blue+=pixel.blue; sum_pixel.alpha+=pixel.alpha; count++; } } } } gamma=PerceptibleReciprocal(count); mean_location.x=gamma*sum_location.x; mean_location.y=gamma*sum_location.y; mean_pixel.red=gamma*sum_pixel.red; mean_pixel.green=gamma*sum_pixel.green; mean_pixel.blue=gamma*sum_pixel.blue; mean_pixel.alpha=gamma*sum_pixel.alpha; distance=(mean_location.x-previous_location.x)* (mean_location.x-previous_location.x)+ (mean_location.y-previous_location.y)* (mean_location.y-previous_location.y)+ 255.0*QuantumScale*(mean_pixel.red-previous_pixel.red)* 255.0*QuantumScale*(mean_pixel.red-previous_pixel.red)+ 255.0*QuantumScale*(mean_pixel.green-previous_pixel.green)* 255.0*QuantumScale*(mean_pixel.green-previous_pixel.green)+ 255.0*QuantumScale*(mean_pixel.blue-previous_pixel.blue)* 255.0*QuantumScale*(mean_pixel.blue-previous_pixel.blue); if (distance <= 3.0) break; } SetPixelRed(mean_image,ClampToQuantum(mean_pixel.red),q); SetPixelGreen(mean_image,ClampToQuantum(mean_pixel.green),q); SetPixelBlue(mean_image,ClampToQuantum(mean_pixel.blue),q); SetPixelAlpha(mean_image,ClampToQuantum(mean_pixel.alpha),q); p+=GetPixelChannels(image); q+=GetPixelChannels(mean_image); } if (SyncCacheViewAuthenticPixels(mean_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,MeanShiftImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } mean_view=DestroyCacheView(mean_view); pixel_view=DestroyCacheView(pixel_view); image_view=DestroyCacheView(image_view); return(mean_image); }
modifier_view.h
// ========================================================================== // SeqAn - The Library for Sequence Analysis // ========================================================================== // Copyright (c) 2006-2010, Knut Reinert, FU Berlin // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of Knut Reinert or the FU Berlin nor the names of // its contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" // AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE // ARE DISCLAIMED. IN NO EVENT SHALL KNUT REINERT OR THE FU BERLIN BE LIABLE // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT // LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY // OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH // DAMAGE. // // ========================================================================== // Author: David Weese <david.weese@fu-berlin.de> // ========================================================================== #ifndef SEQAN_MODIFIER_MODIFIER_VIEW_H_ #define SEQAN_MODIFIER_MODIFIER_VIEW_H_ namespace seqan { /** .Spec.ModView: ..summary:Transforms the characters of the $THost$ string/iterator using a custom function. ..cat:Modifier ..general:Class.ModifiedIterator ..general:Class.ModifiedString ..signature:ModifiedIterator<THost, ModView<TFunctor> > ..signature:ModifiedString<THost, ModView<TFunctor> > ..param.THost:Original string/iterator. ...type:Concept.RandomAccessIteratorConcept ..param.TFunctor:A unary function (see STL's $unary_function$). ...remarks:The argument type of $TFunctor$ must be $VALUE<THost>::Type$. ..remarks:The @Metafunction.Value@ type of this modifier is the result type of $TFunctor$. ..include:seqan/modifier.h */ template <typename TFunctor> struct ModView {}; template <typename TFunctor> struct ModViewCargo { TFunctor func; }; ////////////////////////////////////////////////////////////////////////////// // view iterator ////////////////////////////////////////////////////////////////////////////// template <typename THost, typename TFunctor> struct Cargo< ModifiedIterator<THost, ModView<TFunctor> > > { typedef ModViewCargo<TFunctor> Type; }; template <typename THost, typename TFunctor> class ModifiedIterator<THost, ModView<TFunctor> > { public: Holder<THost, Simple> data_host; typename Cargo<ModifiedIterator>::Type data_cargo; mutable typename Value<ModifiedIterator>::Type tmp_value; ModifiedIterator() {} explicit ModifiedIterator(TFunctor &_func) { SEQAN_CHECKPOINT; assignModViewFunctor(*this, _func); } explicit ModifiedIterator(TFunctor const &_func) { SEQAN_CHECKPOINT; assignModViewFunctor(*this, _func); } ModifiedIterator(ModifiedIterator &_origin): data_host(_origin.data_host), data_cargo(_origin.data_cargo) { SEQAN_CHECKPOINT; } ModifiedIterator(ModifiedIterator const &_origin): data_host(_origin.data_host), data_cargo(_origin.data_cargo) { SEQAN_CHECKPOINT; } template <typename T> ModifiedIterator(T & _origin) { SEQAN_CHECKPOINT; assign(*this, _origin); } template <typename T> ModifiedIterator(T const & _origin) { SEQAN_CHECKPOINT; assign(*this, _origin); } //____________________________________________________________________________ template <typename T> inline ModifiedIterator const & operator = (T & _origin) { SEQAN_CHECKPOINT; assign(*this, _origin); return *this; } template <typename T> inline ModifiedIterator const & operator = (T const & _origin) { SEQAN_CHECKPOINT; assign(*this, _origin); return *this; } }; template <typename THost, typename TFunctor> struct Value< ModifiedIterator<THost, ModView<TFunctor> > > { typedef typename TFunctor::result_type TResult; typedef typename RemoveConst_<TResult>::Type Type; }; template <typename THost, typename TFunctor> struct GetValue< ModifiedIterator<THost, ModView<TFunctor> > >: Value< ModifiedIterator<THost, ModView<TFunctor> > > {}; template <typename THost, typename TFunctor> struct Reference< ModifiedIterator<THost, ModView<TFunctor> > > { typedef typename Value< ModifiedIterator<THost, ModView<TFunctor> > >::Type & Type; }; ////////////////////////////////////////////////////////////////////////////// // value ////////////////////////////////////////////////////////////////////////////// template <typename THost, typename TFunctor> inline typename Reference<ModifiedIterator<THost, ModView<TFunctor> > >::Type value(ModifiedIterator<THost, ModView<TFunctor> > & me) { SEQAN_CHECKPOINT; me.tmp_value = cargo(me).func(getValue(host(me))); return me.tmp_value; } template <typename THost, typename TFunctor> inline typename Reference<ModifiedIterator<THost, ModView<TFunctor> > const>::Type value(ModifiedIterator<THost, ModView<TFunctor> > const & me) { SEQAN_CHECKPOINT; me.tmp_value = cargo(me).func(getValue(host(me))); return me.tmp_value; } ////////////////////////////////////////////////////////////////////////////// // getValue ////////////////////////////////////////////////////////////////////////////// template <typename THost, typename TFunctor> inline typename GetValue<ModifiedIterator<THost, ModView<TFunctor> > >::Type getValue(ModifiedIterator<THost, ModView<TFunctor> > & me) { SEQAN_CHECKPOINT; return cargo(me).func(getValue(host(me))); } template <typename THost, typename TFunctor> inline typename GetValue<ModifiedIterator<THost, ModView<TFunctor> > const>::Type getValue(ModifiedIterator<THost, ModView<TFunctor> > const & me) { SEQAN_CHECKPOINT; return cargo(me).func(getValue(host(me))); } ////////////////////////////////////////////////////////////////////////////// // assignModViewFunctor ////////////////////////////////////////////////////////////////////////////// template <typename THost, typename TFunctor> inline void assignModViewFunctor(ModifiedIterator<THost, ModView<TFunctor> > & me, TFunctor const & _func) { SEQAN_CHECKPOINT; cargo(me).func = _func; } ////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////// // view string ////////////////////////////////////////////////////////////////////////////// template <typename THost, typename TFunctor> struct Cargo< ModifiedString<THost, ModView<TFunctor> > > { typedef ModViewCargo<TFunctor> Type; }; template <typename THost, typename TFunctor> class ModifiedString<THost, ModView<TFunctor> > { public: Holder<THost> data_host; typename Cargo<ModifiedString>::Type data_cargo; mutable typename Value<ModifiedString>::Type tmp_value; ModifiedString() {} explicit ModifiedString(TFunctor &_func) { SEQAN_CHECKPOINT; cargo(*this).func = _func; } explicit ModifiedString(TFunctor const &_func) { SEQAN_CHECKPOINT; cargo(*this).func = _func; } explicit ModifiedString(ModifiedString const &_origin, TFunctor const &_func): data_host(_origin.data_host) { SEQAN_CHECKPOINT; cargo(*this).func = _func; } ModifiedString(ModifiedString &_origin): data_host(_origin.data_host), data_cargo(_origin.data_cargo) { SEQAN_CHECKPOINT; } ModifiedString(ModifiedString const &_origin): data_host(_origin.data_host), data_cargo(_origin.data_cargo) { SEQAN_CHECKPOINT; } ModifiedString(THost &_origin) { SEQAN_CHECKPOINT; setHost(*this, _origin); } template <typename T> ModifiedString(T & _origin) { SEQAN_CHECKPOINT; setValue(*this, _origin); } template <typename T> ModifiedString(T const & _origin) { SEQAN_CHECKPOINT; setValue(*this, _origin); } template <typename T> inline ModifiedString const & operator = (T & _origin) { SEQAN_CHECKPOINT; assign(*this, _origin); return *this; } template <typename TPos> inline typename Reference<ModifiedString>::Type operator [] (TPos pos) { SEQAN_CHECKPOINT; return value(*this, pos); } template <typename TPos> inline typename Reference<ModifiedString const>::Type operator [] (TPos pos) const { SEQAN_CHECKPOINT; return value(*this, pos); } }; ////////////////////////////////////////////////////////////////////////////// // value ////////////////////////////////////////////////////////////////////////////// template <typename THost, typename TFunctor, typename TPos> inline typename Reference<ModifiedString<THost, ModView<TFunctor> > >::Type value(ModifiedString<THost, ModView<TFunctor> > & me, TPos pos) { SEQAN_CHECKPOINT; me.tmp_value = cargo(me).func(getValue(host(me), pos)); return me.tmp_value; } template <typename THost, typename TFunctor, typename TPos> inline typename Reference<ModifiedString<THost, ModView<TFunctor> > const>::Type value(ModifiedString<THost, ModView<TFunctor> > const & me, TPos pos) { SEQAN_CHECKPOINT; me.tmp_value = cargo(me).func(getValue(host(me), pos)); return me.tmp_value; } ////////////////////////////////////////////////////////////////////////////// // getValue ////////////////////////////////////////////////////////////////////////////// template <typename THost, typename TFunctor, typename TPos> inline typename GetValue<ModifiedString<THost, ModView<TFunctor> > >::Type getValue(ModifiedString<THost, ModView<TFunctor> > & me, TPos pos) { SEQAN_CHECKPOINT; return cargo(me).func(getValue(host(me), pos)); } template <typename THost, typename TFunctor, typename TPos> inline typename GetValue<ModifiedString<THost, ModView<TFunctor> > const>::Type getValue(ModifiedString<THost, ModView<TFunctor> > const & me, TPos pos) { SEQAN_CHECKPOINT; return cargo(me).func(getValue(host(me), pos)); } ////////////////////////////////////////////////////////////////////////////// // assignModViewFunctor ////////////////////////////////////////////////////////////////////////////// template <typename THost, typename TFunctor> inline void assignModViewFunctor(ModifiedString<THost, ModView<TFunctor> > & me, TFunctor const & _func) { SEQAN_CHECKPOINT; cargo(me).func = _func; } ////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////// // convert ////////////////////////////////////////////////////////////////////////////// template < typename TSequence, typename TFunctor > inline void convert(TSequence & sequence, TFunctor const &F) { SEQAN_CHECKPOINT; #if defined (_OPENMP) && defined (SEQAN_PARALLEL) // OpenMP does not support for loop with iterators. Therefore use index variables. typedef typename Position<TSequence>::Type TPos; typedef typename MakeSigned_<TPos>::Type TSignedPos; #pragma omp parallel for if(length(sequence) > 1000000) for(TSignedPos p = 0; p < (TSignedPos)length(sequence); ++p) sequence[p] = F(sequence[p]); #else typedef typename Iterator<TSequence, Standard>::Type TIter; TIter it = begin(sequence, Standard()); TIter itEnd = end(sequence, Standard()); for(; it != itEnd; ++it) *it = F(*it); #endif } template < typename TSequence, typename TFunctor > inline void convert(TSequence const & sequence, TFunctor const &F) { SEQAN_CHECKPOINT; #if defined (_OPENMP) && defined (SEQAN_PARALLEL) // OpenMP does not support for loop with iterators. Therefore use index variables. typedef typename Position<TSequence>::Type TPos; typedef typename MakeSigned_<TPos>::Type TSignedPos; #pragma omp parallel for if(length(sequence) > 1000000) for(TSignedPos p = 0; p < (TSignedPos)length(sequence); ++p) sequence[p] = F(sequence[p]); #else typedef typename Iterator<TSequence, Standard>::Type TIter; TIter it = begin(sequence, Standard()); TIter itEnd = end(sequence, Standard()); for(; it != itEnd; ++it) *it = F(*it); #endif } } // namespace seqan #endif // SEQAN_MODIFIER_MODIFIER_VIEW_H_
GB_unaryop__ainv_uint16_uint32.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__ainv_uint16_uint32 // op(A') function: GB_tran__ainv_uint16_uint32 // C type: uint16_t // A type: uint32_t // cast: uint16_t cij = (uint16_t) aij // unaryop: cij = -aij #define GB_ATYPE \ uint32_t #define GB_CTYPE \ uint16_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = -x ; // casting #define GB_CASTING(z, aij) \ uint16_t z = (uint16_t) aij ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (z, aij) ; \ GB_OP (GB_CX (pC), z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_AINV || GxB_NO_UINT16 || GxB_NO_UINT32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__ainv_uint16_uint32 ( uint16_t *Cx, // Cx and Ax may be aliased uint32_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__ainv_uint16_uint32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
core64.c
#undef DT32 //#define DT32 //<- This should be the ONLY difference between core32 and core64! #ifdef DT32 #define flt float #define DT_CALC DT_FLOAT32 #define epsilon FLT_EPSILON #else #define flt double #define DT_CALC DT_FLOAT64 #define epsilon DBL_EPSILON #endif #include <stdio.h> #include <stdlib.h> #include <nifti2_io.h> #include <float.h> //FLT_EPSILON #ifdef __aarch64__ #include "arm_malloc.h" #else #include <immintrin.h> #endif #include <limits.h> #include <math.h> #if defined(_OPENMP) #include <omp.h> #endif #include "core.h" #define bandpass #ifdef bandpass #include "bw.h" #endif //#define slicetimer //tensor_decomp support is optional #ifdef slicetimer #include "afni.h" #endif #define tensor_decomp //tensor_decomp support is optional #ifdef tensor_decomp #include "tensor.h" #endif //#define TFCE //formerly we used Christian Gaser's tfce, new bespoke code handles connectivity //#ifdef TFCE //we now use in-built tfce function // #include "tfce_pthread.h" //#endif static int show_helpx( void ) { printf("Fatal: show_help shown by wrapper function\n"); exit(1); } static flt vx(flt * f, int p, int q) { if ((f[p] == INFINITY) || (f[q] == INFINITY)) return INFINITY; else return ((f[q] + q*q) - (f[p] + p*p)) / (2.0*q - 2.0*p); } static void edt(flt * f, int n) { int q, p, k; flt s, dx; flt * d = (flt *)_mm_malloc((n)*sizeof(flt), 64); flt * z = (flt *)_mm_malloc((n)*sizeof(flt), 64); int * v = (int *)_mm_malloc((n)*sizeof(int), 64); /*# Find the lower envelope of a sequence of parabolas. # f...source data (returns the Y of the parabola vertex at X) # d...destination data (final distance values are written here) # z...temporary used to store X coords of parabola intersections # v...temporary used to store X coords of parabola vertices # i...resulting X coords of parabola vertices # n...number of pixels in "f" to process # Always add the first pixel to the enveloping set since it is # obviously lower than all parabolas processed so far.*/ k = 0; v[0] = 0; z[0] = -INFINITY; z[1] = INFINITY; for (q = 1; q < n; q++ ) { /* If the new parabola is lower than the right-most parabola in # the envelope, remove it from the envelope. To make this # determination, find the X coordinate of the intersection (s) # between the parabolas with vertices at (q,f[q]) and (p,f[p]).*/ p = v[k]; s = vx(f, p,q); while (s <= z[k]) { k = k - 1; p = v[k]; s = vx(f, p,q); } //# Add the new parabola to the envelope. k = k + 1; v[k] = q; z[k] = s; z[k + 1] = INFINITY; } /*# Go back through the parabolas in the envelope and evaluate them # in order to populate the distance values at each X coordinate.*/ k = 0; for (q = 0; q < n; q++ ) { while (z[k + 1] < q) k = k + 1; dx = (q - v[k]); d[q] = dx * dx + f[v[k]]; } for (q = 0; q < n; q++ ) f[q] = d[q]; _mm_free (d); _mm_free (z); _mm_free (v); } static void edt1(flt * df, int n) { //first dimension is simple int q, prevX; flt prevY, v; prevX = 0; prevY = INFINITY; //forward for (q = 0; q < n; q++ ) { if (df[q] == 0) { prevX = q; prevY = 0; } else df[q] = sqr(q-prevX)+prevY; } //reverse prevX = n; prevY = INFINITY; for (q = (n-1); q >= 0; q-- ) { v = sqr(q-prevX)+prevY; if (df[q] < v) { prevX = q; prevY = df[q]; } else df[q] = v; } } static int nifti_edt(nifti_image * nim) { //https://github.com/neurolabusc/DistanceFields if ((nim->nvox < 1) || (nim->nx < 2) || (nim->ny < 2) || (nim->nz < 1)) return 1; if (nim->datatype != DT_CALC) return 1; flt * img = (flt *) nim->data; //int nVol = 1; //for (int i = 4; i < 8; i++ ) // nVol *= MAX(nim->dim[i],1); int nvox3D = nim->nx * nim->ny * MAX(nim->nz, 1); int nVol = nim->nvox / nvox3D; if ((nvox3D * nVol) != nim->nvox) return 1; int nx = nim->nx; int ny = nim->ny; int nz = nim->nz; flt threshold = 0.0; for (size_t i = 0; i < nim->nvox; i++ ) { if (img[i] > threshold) img[i] = INFINITY; else img[i] = 0; } size_t nRow = 1; for (int i = 2; i < 8; i++ ) nRow *= MAX(nim->dim[i],1); //EDT in left-right direction for (int r = 0; r < nRow; r++ ) { flt * imgRow = img + (r * nx); edt1(imgRow, nx); } //EDT in anterior-posterior direction nRow = nim->nx * nim->nz; //transpose XYZ to YXZ and blur Y columns with XZ Rows for (int v = 0; v < nVol; v++ ) { //transpose each volume separately flt * img3D = (flt *)_mm_malloc(nvox3D*sizeof(flt), 64); //alloc for each volume to allow openmp //transpose data size_t vo = v * nvox3D; //volume offset for (int z = 0; z < nz; z++ ) { int zo = z * nx * ny; for (int y = 0; y < ny; y++ ) { int xo = 0; for (int x = 0; x < nx; x++ ) { img3D[zo+xo+y] = img[vo]; vo += 1; xo += ny; } } } //perform EDT for all rows for (int r = 0; r < nRow; r++ ) { flt * imgRow = img3D + (r * ny); edt(imgRow, ny); } //transpose data back vo = v * nvox3D; //volume offset for (int z = 0; z < nz; z++ ) { int zo = z * nx * ny; for (int y = 0; y < ny; y++ ) { int xo = 0; for (int x = 0; x < nx; x++ ) { img[vo] = img3D[zo+xo+y]; vo += 1; xo += ny; } } } _mm_free (img3D); } //for each volume //EDT in head-foot direction nRow = nim->nx * nim->ny; //transpose XYZ to ZXY and blur Z columns with XY Rows #pragma omp parallel for for (int v = 0; v < nVol; v++ ) { //transpose each volume separately flt * img3D = (flt *)_mm_malloc(nvox3D*sizeof(flt), 64); //alloc for each volume to allow openmp //transpose data size_t vo = v * nvox3D; //volume offset for (int z = 0; z < nz; z++ ) { for (int y = 0; y < ny; y++ ) { int yo = y * nz * nx; int xo = 0; for (int x = 0; x < nx; x++ ) { img3D[z+xo+yo] = img[vo]; vo += 1; xo += nz; } } } //perform EDT for all "rows" for (int r = 0; r < nRow; r++ ) { flt * imgRow = img3D + (r * nz); edt(imgRow, nz); } //transpose data back vo = v * nvox3D; //volume offset for (int z = 0; z < nz; z++ ) { for (int y = 0; y < ny; y++ ) { int yo = y * nz * nx; int xo = 0; for (int x = 0; x < nx; x++ ) { img[vo] = img3D[z+xo+yo]; vo += 1; xo += nz; } //x } //y } //z _mm_free (img3D); } //for each volume return 0; } //Gaussian blur, both serial and parallel variants, https://github.com/neurolabusc/niiSmooth static void blurS(flt * img, int nx, int ny, flt xmm, flt Sigmamm) { //serial blur //make kernels if ((xmm == 0) || (nx < 2) || (ny < 1) || (Sigmamm <= 0.0)) return; //flt sigma = (FWHMmm/xmm)/sqrt(8*log(2)); flt sigma = (Sigmamm/xmm); //mm to vox //round(6*sigma), ceil(4*sigma) seems spot on larger than fslmaths //int cutoffvox = round(6*sigma); //filter width to 6 sigma: faster but lower precision AFNI_BLUR_FIRFAC = 2.5 int cutoffvox = ceil(4*sigma); //filter width to 6 sigma: faster but lower precision AFNI_BLUR_FIRFAC = 2.5 //printf(".Blur Cutoff (%g) %d\n", 4*sigma, cutoffvox); //validated on SPM12's 1.5mm isotropic mask_ICV.nii (discrete jump in number of non-zero voxels) //fslmaths mask -s 2.26 f6.nii //Blur Cutoff (6.02667) 7 //fslmaths mask -s 2.24 f4.nii //Blur Cutoff (5.97333) 6 cutoffvox = MAX(cutoffvox, 1); flt * k = (flt *)_mm_malloc((cutoffvox+1)*sizeof(flt), 64); //FIR Gaussian flt expd = 2*sigma*sigma; for (int i = 0; i <= cutoffvox; i++ ) k[i] = exp(-1.0f*(i*i)/expd); //calculate start, end for each voxel in int * kStart = (int *)_mm_malloc(nx*sizeof(int), 64); //-cutoff except left left columns, e.g. 0, -1, -2... cutoffvox int * kEnd = (int *)_mm_malloc(nx*sizeof(int), 64); //+cutoff except right columns flt * kWeight = (flt *)_mm_malloc(nx*sizeof(flt), 64); //ensure sum of kernel = 1.0 for (int i = 0; i < nx; i++ ) { kStart[i] = MAX(-cutoffvox, -i);//do not read below 0 kEnd[i] = MIN(cutoffvox, nx-i-1);//do not read beyond final columnn if ((i > 0) && (kStart[i] == (kStart[i-1])) && (kEnd[i] == (kEnd[i-1]))) { //reuse weight kWeight[i] = kWeight[i-1]; continue; } flt wt = 0.0f; for (int j = kStart[i]; j <= kEnd[i]; j++ ) wt += k[abs(j)]; kWeight[i] = 1 / wt; //printf("%d %d->%d %g\n", i, kStart[i], kEnd[i], kWeight[i]); } //apply kernel to each row flt * tmp = _mm_malloc(nx*sizeof(flt), 64); //input values prior to blur for (int y = 0; y < ny; y++ ) { //printf("-+ %d:%d\n", y, ny); memcpy(tmp, img, nx*sizeof(flt)); for (int x = 0; x < nx; x++ ) { flt sum = 0; for (int i = kStart[x]; i <= kEnd[x]; i++ ) sum += tmp[x+i] * k[abs(i)]; img[x] = sum * kWeight[x]; } img += nx; } //blurX //free kernel _mm_free (tmp); _mm_free (k); _mm_free (kStart); _mm_free (kEnd); _mm_free (kWeight); } #if defined(_OPENMP) static void blurP(flt * img, int nx, int ny, flt xmm, flt FWHMmm) { //parallel blur //make kernels if ((xmm == 0) || (nx < 2) || (ny < 1) || (FWHMmm <= 0.0)) return; //flt sigma = (FWHMmm/xmm)/sqrt(8*log(2)); flt sigma = (FWHMmm/xmm); //mm to vox int cutoffvox = round(6*sigma); //filter width to 6 sigma: faster but lower precision AFNI_BLUR_FIRFAC = 2.5 cutoffvox = MAX(cutoffvox, 1); flt * k = (flt *)_mm_malloc((cutoffvox+1)*sizeof(flt), 64); //FIR Gaussian flt expd = 2*sigma*sigma; for (int i = 0; i <= cutoffvox; i++ ) k[i] = exp(-1.0f*(i*i)/expd); //calculate start, end for each voxel in int * kStart = (int *)_mm_malloc(nx*sizeof(int), 64); //-cutoff except left left columns, e.g. 0, -1, -2... cutoffvox int * kEnd = (int *)_mm_malloc(nx*sizeof(int), 64); //+cutoff except right columns flt * kWeight = (flt *)_mm_malloc(nx*sizeof(flt), 64); //ensure sum of kernel = 1.0 for (int i = 0; i < nx; i++ ) { kStart[i] = MAX(-cutoffvox, -i);//do not read below 0 kEnd[i] = MIN(cutoffvox, nx-i-1);//do not read beyond final columnn if ((i > 0) && (kStart[i] == (kStart[i-1])) && (kEnd[i] == (kEnd[i-1]))) { //reuse weight kWeight[i] = kWeight[i-1]; continue; } flt wt = 0.0f; for (int j = kStart[i]; j <= kEnd[i]; j++ ) wt += k[abs(j)]; kWeight[i] = 1 / wt; //printf("%d %d->%d %g\n", i, kStart[i], kEnd[i], kWeight[i]); } //apply kernel to each row #pragma omp parallel for for (int y = 0; y < ny; y++ ) { flt * tmp = _mm_malloc(nx*sizeof(flt), 64); //input values prior to blur flt * imgx = img; imgx += (nx * y); memcpy(tmp, imgx, nx*sizeof(flt)); for (int x = 0; x < nx; x++ ) { flt sum = 0; for (int i = kStart[x]; i <= kEnd[x]; i++ ) sum += tmp[x+i] * k[abs(i)]; imgx[x] = sum * kWeight[x]; } _mm_free (tmp); } //free kernel _mm_free (k); _mm_free (kStart); _mm_free (kEnd); _mm_free (kWeight); } //blurP #endif static int nifti_smooth_gauss(nifti_image * nim, flt SigmammX, flt SigmammY, flt SigmammZ) { //https://github.com/afni/afni/blob/699775eba3c58c816d13947b81cf3a800cec606f/src/edt_blur.c if ((nim->nvox < 1) || (nim->nx < 2) || (nim->ny < 2) || (nim->nz < 1)) return 1; if (nim->datatype != DT_CALC) return 1; flt * img = (flt *) nim->data; //int nVol = 1; //for (int i = 4; i < 8; i++ ) // nVol *= MAX(nim->dim[i],1); int nvox3D = nim->nx * nim->ny * MAX(nim->nz, 1); int nVol = nim->nvox / nvox3D; if ((nvox3D * nVol) != nim->nvox) return 1; int nx = nim->nx; int ny = nim->ny; int nz = nim->nz; if (SigmammX <= 0.0) goto DO_Y_BLUR ; //BLUR X int nRow = 1; for (int i = 2; i < 8; i++ ) nRow *= MAX(nim->dim[i],1); #if defined(_OPENMP) //printf(">>>%d\n", omp_get_num_threads()); if (omp_get_max_threads() > 1) blurP(img, nim->nx, nRow, nim->dx, SigmammX); else blurS(img, nim->nx, nRow, nim->dx, SigmammX); #else blurS(img, nim->nx, nRow, nim->dx, SigmammX); #endif //blurX(img, nim->nx, nRow, nim->dx, SigmammX); DO_Y_BLUR: //BLUR Y if (SigmammY <= 0.0) goto DO_Z_BLUR ; nRow = nim->nx * nim->nz; //transpose XYZ to YXZ and blur Y columns with XZ Rows #pragma omp parallel for for (int v = 0; v < nVol; v++ ) { //transpose each volume separately flt * img3D = (flt *)_mm_malloc(nvox3D*sizeof(flt), 64); //alloc for each volume to allow openmp size_t vo = v * nvox3D; //volume offset for (int z = 0; z < nz; z++ ) { int zo = z * nx * ny; for (int y = 0; y < ny; y++ ) { int xo = 0; for (int x = 0; x < nx; x++ ) { img3D[zo+xo+y] = img[vo]; vo += 1; xo += ny; } } } blurS(img3D, nim->ny, nRow, nim->dy, SigmammY); vo = v * nvox3D; //volume offset for (int z = 0; z < nz; z++ ) { int zo = z * nx * ny; for (int y = 0; y < ny; y++ ) { int xo = 0; for (int x = 0; x < nx; x++ ) { img[vo] = img3D[zo+xo+y]; vo += 1; xo += ny; } } } _mm_free (img3D); } //for each volume DO_Z_BLUR: //BLUR Z: if ((SigmammZ <= 0.0) || (nim->nz < 2)) return 0; //all done! nRow = nim->nx * nim->ny; //transpose XYZ to ZXY and blur Z columns with XY Rows //#pragma omp parallel //#pragma omp for #pragma omp parallel for for (int v = 0; v < nVol; v++ ) { //transpose each volume separately flt * img3D = (flt *)_mm_malloc(nvox3D*sizeof(flt), 64); //alloc for each volume to allow openmp size_t vo = v * nvox3D; //volume offset for (int z = 0; z < nz; z++ ) { for (int y = 0; y < ny; y++ ) { int yo = y * nz * nx; int xo = 0; for (int x = 0; x < nx; x++ ) { img3D[z+xo+yo] = img[vo]; vo += 1; xo += nz; } } } blurS(img3D, nz, nRow, nim->dz, SigmammZ); vo = v * nvox3D; //volume offset for (int z = 0; z < nz; z++ ) { for (int y = 0; y < ny; y++ ) { int yo = y * nz * nx; int xo = 0; for (int x = 0; x < nx; x++ ) { img[vo] = img3D[z+xo+yo]; vo += 1; xo += nz; } //x } //y } //z _mm_free (img3D); } //for each volume return 0; } static int nifti_otsu(nifti_image * nim, int ignoreZeroVoxels) { //binarize image using Otsu's method if ((nim->nvox < 1) || (nim->nx < 2) || (nim->ny < 2) || (nim->nz < 1)) return 1; if (nim->datatype != DT_CALC) return 1; flt * inimg = (flt *) nim->data; flt mn = INFINITY; //better that inimg[0] in case NaN flt mx = -INFINITY; for (int i = 0; i < nim->nvox; i++ ) { mn = MIN(mn, inimg[i]); mx = MAX(mx, inimg[i]); } if (mn >= mx) return 0; //no variability #define nBins 1001 flt scl = (nBins-1)/(mx-mn); int hist[nBins]; for (int i = 0; i < nBins; i++ ) hist[i] = 0; if (ignoreZeroVoxels) { for (int i = 0; i < nim->nvox; i++ ) { if (isnan(inimg[i])) continue; if (inimg[i] == 0.0) continue; hist[(int)round((inimg[i]-mn)*scl) ]++; } } else { for (int i = 0; i < nim->nvox; i++ ) { if (isnan(inimg[i])) continue; hist[(int)round((inimg[i]-mn)*scl) ]++; } } //https://en.wikipedia.org/wiki/Otsu%27s_method size_t total = 0; for (int i = 0; i < nBins; i++ ) total += hist[i]; int top = nBins - 1; int level = 0; double sumB = 0; double wB = 0; double maximum = 0.0; double sum1 = 0.0; for (int i = 0; i < nBins; i++ ) sum1 += (i * hist[i]); for (int ii = 0; ii < nBins; ii++ ) { double wF = total - wB; if ((wB > 0) && (wF > 0)) { double mF = (sum1 - sumB) / wF; double val = wB * wF * ((sumB / wB) - mF) * ((sumB / wB) - mF); if ( val >= maximum ) { level = ii; maximum = val; } } wB = wB + hist[ii]; sumB = sumB + (ii-1) * hist[ii]; } double threshold = (level / scl)+mn; if (ignoreZeroVoxels) { for (int i = 0; i < nim->nvox; i++ ) { if (inimg[i] == 0.0) continue; inimg[i] = (inimg[i] < threshold) ? 0.0 : 1.0; } } else { for (int i = 0; i < nim->nvox; i++ ) inimg[i] = (inimg[i] < threshold) ? 0.0 : 1.0; } //fprintf(stderr,"range %g..%g threshold %g bin %d\n", mn, mx, threshold, level); return 0; } static int nifti_unsharp(nifti_image * nim, flt SigmammX, flt SigmammY, flt SigmammZ, flt amount) { //https://github.com/afni/afni/blob/699775eba3c58c816d13947b81cf3a800cec606f/src/edt_blur.c if ((nim->nvox < 1) || (nim->nx < 2) || (nim->ny < 2) || (nim->nz < 1)) return 1; if (nim->datatype != DT_CALC) return 1; if (amount == 0.0) return 0; flt * inimg = (flt *) nim->data; void * indat = (void *) nim->data; flt mn = INFINITY; //better that inimg[0] in case NaN flt mx = -INFINITY; for (int i = 0; i < nim->nvox; i++ ) { mn = MIN(mn, inimg[i]); mx = MAX(mx, inimg[i]); } if (mn >= mx) return 0; //no variability size_t nvox3D = nim->nx * nim->ny * MAX(nim->nz, 1); size_t nVol = nim->nvox / nvox3D; if ((nvox3D * nVol) != nim->nvox) return 1; //process each 3D volume independently: reduce memory pressure nim->nvox = nvox3D; void * sdat = (void *)calloc(1,nim->nvox * sizeof(flt)) ; nim->data = sdat; flt * simg = (flt *) sdat; for (int v = 0; v < nVol; v++ ) { memcpy(simg, inimg, nim->nvox*sizeof(flt)); nifti_smooth_gauss(nim, SigmammX, SigmammY, SigmammZ); for (int i = 0; i < nim->nvox; i++ ) { //sharpened = original + (original - blurred) * amount inimg[i] += (inimg[i] - simg[i]) * amount; //keep in original range inimg[i] = MAX(inimg[i], mn); inimg[i] = MIN(inimg[i], mx); } inimg += nim->nvox; } free(sdat); //return original data nim->data = indat; nim->nvox = nvox3D * nVol; return 0; } //nifti_unsharp() static int nifti_crop(nifti_image * nim, int tmin, int tsize) { if (tsize == 0) { fprintf(stderr,"tsize must not be 0\n"); return 1; } if (nim->nvox < 1) return 1; if (nim->datatype != DT_CALC) return 1; int nvox3D = nim->nx * nim->ny * MAX(nim->nz, 1); if ((nvox3D < 1) || ((nim->nvox % nvox3D) != 0) ) return 1; int nvol = (nim->nvox / nvox3D); //in if (nvol < 2) { fprintf(stderr,"crop only appropriate for 4D volumes"); return 1; } if (tmin >= nvol) { fprintf(stderr,"tmin must be from 0..%d, not %d\n", nvol-1, tmin); return 1; } int tminVol = MAX(0,tmin); int tFinalVol = tminVol+tsize-1; //e.g. if tmin=0 and tsize=1, tFinal=0 if (tsize < 0) { tFinalVol = INT_MAX; } tFinalVol = MIN(tFinalVol, nvol-1); if ((tminVol == 0) && (tFinalVol == (nvol-1)) ) return 0; int nvolOut = tFinalVol-tminVol+1; flt * imgIn = (flt *) nim->data; nim->nvox = nvox3D * nvolOut; void * dat = (void *)calloc(1,nim->nvox * sizeof(flt)) ; flt * imgOut = (flt *) dat; imgIn += tminVol * nvox3D; memcpy(imgOut, imgIn, nim->nvox*sizeof(flt)); free(nim->data); nim->data = dat; if (nvolOut == 1) nim->dim[0] = 3; else nim->dim[0] = 4; nim->ndim = nim->dim[0]; nim->dim[4] = nvolOut; nim->nt = nvolOut; nim->nu = 1; nim->nv = 1; nim->nw = 1; for (int i = 5; i < 8; i++ ) nim->dim[i] = 1; return 0; } static int nifti_rescale ( nifti_image * nim, double scale , double intercept) { //linear transform of data if (nim->nvox < 1) return 1; if (nim->datatype == DT_CALC) { flt scl = scale; flt inter = intercept; flt * f32 = (flt *) nim->data; for (size_t i = 0; i < nim->nvox; i++ ) f32[i] = (f32[i] * scl) + inter; return 0; } fprintf(stderr,"nifti_rescale: Unsupported datatype %d\n", nim->datatype); return 1; } static int nifti_tfceS(nifti_image * nim, double H, double E, int c, int x, int y, int z, double tfce_thresh) { if (nim->nvox < 1) return 1; if (nim->datatype != DT_CALC) return 1; if ((x < 0) || (x >= nim->dim[1]) || (y < 0) || (y >= nim->dim[2]) || (z < 0) || (z >= nim->dim[3])) { fprintf(stderr,"tfceS x/y/z must be in range 0..%"PRId64"/0..%"PRId64"/0..%"PRId64"\n", nim->dim[1]-1, nim->dim[2]-1, nim->dim[3]-1); } if (!neg_determ(nim)) x = nim->dim[1] - x - 1; int seed = x + (y * nim->dim[1]) + (z * nim->dim[1] * nim->dim[2]); flt * inimg = (flt *) nim->data; if (inimg[seed] < H) { fprintf(stderr,"it doesn't reach to specified threshold\n"); return 1; } size_t nvox3D = nim->dim[1]*nim->dim[2]*nim->dim[3]; if (nim->nvox > nvox3D) { fprintf(stderr,"tfceS not suitable for 4D data.\n"); return 1; } //printf("peak %g\n", inimg[seed]); int numk = c; if ((c != 6) && (c != 18) && (c != 26)) { fprintf(stderr,"suitable values for c are 6, 18 or 26\n"); numk = 6; } //set up kernel to search for neighbors. Since we already included sides, we do not worry about A<->P and L<->R wrap int32_t * k = (int32_t *)_mm_malloc(3*numk*sizeof(int32_t), 64); //kernel: offset, x, y int mxDx = 1; //connectivity 6: faces only if (numk == 18) mxDx = 2; //connectivity 18: faces+edges if (numk == 26) mxDx = 3; //connectivity 26: faces+edges+corners int j = 0; for (int z = -1; z <= 1; z++ ) for (int y = -1; y <= 1; y++ ) for (int x = -1; x <= 1; x++ ) { int dx = abs(x)+abs(y)+abs(z); if ((dx > mxDx) || (dx == 0)) continue; k[j] = x + (y * nim->nx) + (z * nim->nx * nim->ny); k[j+numk] = x; //avoid left-right wrap k[j+numk+numk] = x; //avoid anterior-posterior wrap j++; } //for x flt mx = (inimg[0]); for (size_t i = 0; i < nvox3D; i++ ) mx = MAX((inimg[i]),mx); double dh = mx/100.0; flt * outimg = (flt *)_mm_malloc(nvox3D*sizeof(flt), 64); //output image int32_t * q = (int32_t *)_mm_malloc(nvox3D*sizeof(int32_t), 64); //queue with untested seed uint8_t * vxs = (uint8_t *)_mm_malloc(nvox3D*sizeof(uint8_t), 64); for (int i = 0; i < nvox3D; i++ ) outimg[i] = 0.0; int n_steps = (int)ceil(mx/dh); //for (int step=0; step<n_steps; step++) { for (int step=n_steps-1; step >= 0; step--) { flt thresh = (step+1)*dh; memset(vxs, 0, nvox3D*sizeof(uint8_t)); for (int i = 0; i < nvox3D; i++ ) if (inimg[i] >= thresh) vxs[i] = 1; //survives, unclustered int qlo = 0; int qhi = 0; q[qhi] = seed; //add starting voxel as seed in queue vxs[seed] = 0; //do not find again! while (qhi >= qlo) { //first in, first out queue //retire one seed, add 0..6, 0..18 or 0..26 new ones (depending on connectivity) for (int j = 0; j < numk; j++) { int jj = q[qlo] + k[j]; if ((jj < 0) || (jj >= nvox3D)) continue; //voxel in volume if (vxs[jj] == 0) continue; //already found or did not survive threshold int dx = x+k[j+numk]; if ((dx < 0) || (dx >= nim->nx)) continue; //wrapped left-right int dy = y+k[j+numk+numk]; if ((dy < 0) || (dy >= nim->ny)) continue; //wrapped anterior-posterior //add new seed: vxs[jj] = 0; //do not find again! qhi++; q[qhi] = jj; } qlo++; } //while qhi >= qlo: continue until all seeds tested flt valToAdd = pow(qhi+1, E) * pow(thresh, H); //"supporting section", Dark Gray in Figure 1 for (int j = 0; j <= qhi; j++) outimg[q[j]] += valToAdd; //printf("step %d thresh %g\n", step, outimg[seed]); if (outimg[seed] >= tfce_thresh) break; } //for each step if ( outimg[seed] < tfce_thresh) fprintf(stderr,"it doesn't reach to specified threshold (%g < %g)\n", outimg[seed], tfce_thresh); for (size_t i = 0; i < nvox3D; i++ ) if (outimg[i] == 0.0) inimg[i] = 0.0; _mm_free (q); _mm_free (vxs); _mm_free (outimg); _mm_free (k); return 0; } static int nifti_tfce(nifti_image * nim, double H, double E, int c) { //https://www.fmrib.ox.ac.uk/datasets/techrep/tr08ss1/tr08ss1.pdf if (nim->nvox < 1) return 1; if (nim->datatype != DT_CALC) return 1; int nvox3D = nim->dim[1] * nim->dim[2] * nim->dim[3]; int nvol = nim->nvox / nvox3D; int numk = c; if ((c != 6) && (c != 18) && (c != 26)) { fprintf(stderr,"suitable values for c are 6, 18 or 26\n"); numk = 6; } //set up kernel to search for neighbors. Since we already included sides, we do not worry about A<->P and L<->R wrap int32_t * k = (int32_t *)_mm_malloc(3*numk*sizeof(int32_t), 64); //kernel: offset, x, y int mxDx = 1; //connectivity 6: faces only if (numk == 18) mxDx = 2; //connectivity 18: faces+edges if (numk == 26) mxDx = 3; //connectivity 26: faces+edges+corners int j = 0; for (int z = -1; z <= 1; z++ ) for (int y = -1; y <= 1; y++ ) for (int x = -1; x <= 1; x++ ) { int dx = abs(x)+abs(y)+abs(z); if ((dx > mxDx) || (dx == 0)) continue; k[j] = x + (y * nim->nx) + (z * nim->nx * nim->ny); k[j+numk] = x; //avoid left-right wrap k[j+numk+numk] = x; //avoid anterior-posterior wrap j++; } //for x //omp notes: here we compute each volume independently. // Christian Gaser computes the step loop in parallel, which accelerates 3D cases // This code is very quick on 3D, so this does not seem crucial, and avoids critical sections #pragma omp parallel for for (int vol = 0; vol < nvol; vol++ ) { //identify clusters flt * inimg = (flt *) nim->data; inimg += vol * nvox3D; flt mx = (inimg[0]); for (size_t i = 0; i < nvox3D; i++ ) mx = MAX((inimg[i]),mx); double dh = mx/100.0; flt * outimg = (flt *)_mm_malloc(nvox3D*sizeof(flt), 64); //output image int32_t * q = (int32_t *)_mm_malloc(nvox3D*sizeof(int32_t), 64); //queue with untested seed uint8_t * vxs = (uint8_t *)_mm_malloc(nvox3D*sizeof(uint8_t), 64); for (int i = 0; i < nvox3D; i++ ) outimg[i] = 0.0; int n_steps = (int)ceil(mx/dh); for (int step=0; step<n_steps; step++) { flt thresh = (step+1)*dh; memset(vxs, 0, nvox3D*sizeof(uint8_t)); for (int i = 0; i < nvox3D; i++ ) if (inimg[i] >= thresh) vxs[i] = 1; //survives, unclustered int i = 0; for (int z = 0; z < nim->nz; z++ ) for (int y = 0; y < nim->ny; y++ ) for (int x = 0; x < nim->nx; x++ ) { if (vxs[i] == 0) { i++; continue; } //voxel did not survive or already clustered int qlo = 0; int qhi = 0; q[qhi] = i; //add starting voxel as seed in queue vxs[i] = 0; //do not find again! while (qhi >= qlo) { //first in, first out queue //retire one seed, add 0..6, 0..18 or 0..26 new ones (depending on connectivity) for (int j = 0; j < numk; j++) { int jj = q[qlo] + k[j]; if ((jj < 0) || (jj >= nvox3D)) continue; //voxel in volume if (vxs[jj] == 0) continue; //already found or did not survive threshold int dx = x+k[j+numk]; if ((dx < 0) || (dx >= nim->nx)) continue; //wrapped left-right int dy = y+k[j+numk+numk]; if ((dy < 0) || (dy >= nim->ny)) continue; //wrapped anterior-posterior //add new seed: vxs[jj] = 0; //do not find again! qhi++; q[qhi] = jj; } qlo++; } //while qhi >= qlo: continue until all seeds tested flt valToAdd = pow(qhi+1, E) * pow(thresh, H); //"supporting section", Dark Gray in Figure 1 for (int j = 0; j <= qhi; j++) outimg[q[j]] += valToAdd; i++; } //for each voxel } //for each step for (int i = 0; i < nvox3D; i++ ) inimg[i] = outimg[i]; _mm_free (q); _mm_free (vxs); _mm_free (outimg); } _mm_free (k); return 0; } //nifti_tfce() static int nifti_grid( nifti_image * nim, double v, int spacing) { if ((nim->nvox < 1) || (nim->nx < 2) || (nim->ny < 2)) return 1; if (nim->datatype != DT_CALC) return 1; size_t nxy = (nim->nx * nim->ny); size_t nzt = nim->nvox / nxy; flt * f32 = (flt *) nim->data; flt fv = v; #pragma omp parallel for for (size_t i = 0; i < nzt; i++ ) { //for each 2D slices size_t so = i * nxy; //slice offset int z = (i % nim->nz); if ((nim->nz > 1) && ((z % spacing) == 0) ) { //whole slice is grid for (size_t j = 0; j < nxy; j++ ) f32[so++] = fv; continue; } for (size_t y = 0; y < nim->ny; y++ ) for (size_t x = 0; x < nim->nx; x++ ) { if ((x % spacing) == 0) f32[so] = fv; so ++; } so = i * nxy; //slice offset for (size_t y = 0; y < nim->ny; y++ ) for(size_t x = 0; x < nim->nx; x++ ) { if ((y % spacing) == 0) f32[so] = fv; so ++; } } //for i: each 2D slice return 0; } static int nifti_rem ( nifti_image * nim, double v, int isFrac) { //remainder (modulo) : fslmaths /*fmod(0.45, 2) = 0.45 : 0 fmod(0.9, 2) = 0.9 : 0 fmod(1.35, 2) = 1.35 : 1 fmod(1.8, 2) = 1.8 : 1 fmod(-0.45, 2) = -0.45 : 0 fmod(-0.9, 2) = -0.9 : 0 fmod(-1.35, 2) = -1.35 : -1 fmod(-1.8, 2) = -1.8 : -1 */ if (nim->datatype != DT_CALC) return 1; if (nim->nvox < 1) return 1; if (v == 0.0) { fprintf(stderr,"Exception: '-rem 0' does not make sense\n"); return 1; } flt fv = v; flt * f32 = (flt *) nim->data; if (isFrac) { for (size_t i = 0; i < nim->nvox; i++ ) f32[i] = fmod(f32[i], fv); } else { for (size_t i = 0; i < nim->nvox; i++ ) { //printf("fmod(%g, %g) = %g : %g\n", f32[i], fv, fmod(f32[i],fv), trunc(fmod(f32[i],fv)) ); f32[i] = trunc(fmod(f32[i], fv)); } } return 0; } static int nifti_thr( nifti_image * nim, double v, int zeroBrightVoxels) { if (nim->nvox < 1) return 1; if (nim->datatype == DT_CALC) { flt fv = v; flt * f32 = (flt *) nim->data; if (zeroBrightVoxels) { for (size_t i = 0; i < nim->nvox; i++ ) if (f32[i] > fv) f32[i] = 0.0f; } else { for (size_t i = 0; i < nim->nvox; i++ ) if (f32[i] < fv) f32[i] = 0.0f; } return 0; } fprintf(stderr,"nifti_thr: Unsupported datatype %d\n", nim->datatype); return 1; } // nifti_thr() static int nifti_max( nifti_image * nim, double v, int useMin) { if (nim->nvox < 1) return 1; if (nim->datatype == DT_CALC) { flt fv = v; flt * f32 = (flt *) nim->data; if (useMin) { for (size_t i = 0; i < nim->nvox; i++ ) f32[i] = fmin(f32[i], fv); } else { for (size_t i = 0; i < nim->nvox; i++ ) f32[i] = fmax(f32[i], fv); } return 0; } fprintf(stderr,"nifti_max: Unsupported datatype %d\n", nim->datatype); return 1; } // nifti_max() static int nifti_inm( nifti_image * nim, double M) { //https://www.jiscmail.ac.uk/cgi-bin/webadmin?A2=fsl;bf9d21d2.1610 //With '-inm <value>', every voxel in the input volume is multiplied by <value> / M // where M is the mean across all voxels. //n.b.: regardless of description, mean appears to only include voxels > 0 if (nim->nvox < 1) return 1; if (nim->datatype != DT_CALC) return 1; int nvox3D = nim->nx * nim->ny * MAX(nim->nz, 1); if ((nvox3D < 1) || ((nim->nvox % nvox3D) != 0) ) return 1; int nvol = nim->nvox / nvox3D; flt * f32 = (flt *) nim->data; #pragma omp parallel for for (int v = 0; v < nvol; v++ ) { size_t vi = v * nvox3D; double sum = 0.0; #define gt0 #ifdef gt0 int n = 0; for (size_t i = 0; i < nvox3D; i++ ) { if (f32[vi+i] > 0.0f) { n ++; sum += f32[vi+i]; } } if (sum == 0.0) continue; double ave = sum / n; #else for (int i = 0; i < nvox3D; i++ ) sum += f32[vi+i]; if (sum == 0.0) continue; double ave = sum / nvox3D; #endif //printf("%g %g\n", ave, M); flt scale = M / ave; for (int i = 0; i < nvox3D; i++ ) f32[vi+i] *= scale; } return 0; } // nifti_inm() static int nifti_ing( nifti_image * nim, double M) { //https://www.jiscmail.ac.uk/cgi-bin/webadmin?A2=fsl;bf9d21d2.1610 //With '-inm <value>', every voxel in the input volume is multiplied by <value> / M // where M is the mean across all voxels. //n.b.: regardless of description, mean appears to only include voxels > 0 if (nim->nvox < 1) return 1; if (nim->datatype != DT_CALC) return 1; flt * f32 = (flt *) nim->data; double sum = 0.0; int n = 0; for (size_t i = 0; i < nim->nvox; i++ ) { if (f32[i] > 0.0f) { n ++; sum += f32[i]; } } if (sum == 0) return 0; double ave = sum / n; flt scale = M / ave; #pragma omp parallel for for (int i = 0; i < nim->nvox; i++ ) f32[i] *= scale; return 0; } //nifti_ing() static int nifti_robust_range(nifti_image * nim, flt * pct2, flt * pct98, int ignoreZeroVoxels) { //https://www.jiscmail.ac.uk/cgi-bin/webadmin?A2=fsl;31f309c1.1307 // robust range is essentially the 2nd and 98th percentiles // "but ensuring that the majority of the intensity range is captured, even for binary images." // fsl uses 1000 bins, also limits for volumes less than 100 voxels taylor.hanayik@ndcn.ox.ac.uk 20190107 //fslstats trick -r // 0.000000 1129.141968 //niimath >fslstats trick -R // 0.000000 2734.000000 *pct2 = 0.0; *pct98 = 1.0; if (nim->nvox < 1) return 1; if (nim->datatype != DT_CALC) return 1; flt * f32 = (flt *) nim->data; flt mn = INFINITY; flt mx = -INFINITY; size_t nZero = 0; size_t nNan = 0; for (size_t i = 0; i < nim->nvox; i++ ) { if (isnan(f32[i])) { nNan ++; continue; } if ( f32[i] == 0.0 ) { nZero++; continue; } mn = fmin(f32[i],mn); mx = fmax(f32[i],mx); } if ((nZero > 0) && (mn > 0.0) && (!ignoreZeroVoxels)) mn = 0.0; if (mn > mx) return 0; //all NaN if (mn == mx) { *pct2 = mn; *pct98 = mx; return 0; } if (!ignoreZeroVoxels) nZero = 0; nZero += nNan; size_t n2pct = round((nim->nvox - nZero)* 0.02); if ((n2pct < 1) || (mn == mx) || ((nim->nvox -nZero) < 100) ) { //T Hanayik mentioned issue with very small volumes *pct2 = mn; *pct98 = mx; return 0; } #define nBins 1001 flt scl = (nBins-1)/(mx-mn); int hist[nBins]; for (int i = 0; i < nBins; i++ ) hist[i] = 0; if (ignoreZeroVoxels) { for (int i = 0; i < nim->nvox; i++ ) { if (isnan(f32[i])) continue; if (f32[i] == 0.0) continue; hist[(int)round((f32[i]-mn)*scl) ]++; } } else { for (int i = 0; i < nim->nvox; i++ ) { if (isnan(f32[i])) continue; hist[(int)round((f32[i]-mn)*scl) ]++; } } size_t n = 0; size_t lo = 0; while (n < n2pct) { n += hist[lo]; //if (lo < 10) // printf("%zu %zu %zu %d\n",lo, n, n2pct, ignoreZeroVoxels); lo++; } lo --; //remove final increment n = 0; int hi = nBins; while (n < n2pct) { hi--; n += hist[hi]; } /*if ((lo+1) < hi) { size_t nGray = 0; for (int i = lo+1; i < hi; i++ ) { nGray += hist[i]; //printf("%d %d\n", i, hist[i]); } float fracGray = (float)nGray/(float)(nim->nvox - nZero); printf("histogram[%d..%d] = %zu %g\n", lo, hi, nGray, fracGray); }*/ if (lo == hi) { //MAJORITY are not black or white int ok = -1; while (ok != 0) { if (lo > 0) { lo--; if (hist[lo] > 0) ok = 0; } if ((ok != 0) && (hi < (nBins-1))) { hi++; if (hist[hi] > 0) ok = 0; } if ((lo == 0) && (hi == (nBins-1))) ok = 0; } //while not ok } //if lo == hi *pct2 = (lo)/scl + mn; *pct98 = (hi)/scl + mn; printf("full range %g..%g (voxels 0 or NaN =%zu) robust range %g..%g\n", mn, mx, nZero, *pct2, *pct98); return 0; } enum eDimReduceOp{Tmean,Tstd,Tmax,Tmaxn,Tmin,Tmedian,Tperc,Tar1}; static int compare (const void * a, const void * b) { flt fa = *(const flt*) a; flt fb = *(const flt*) b; return (fa > fb) - (fa < fb); } static void dtrend(flt * xx, int npt, int pt0) { //linear detrend, first point is set to zero // if pt0=0 then mean is zero, pt0=1 then first point is zero, if pt0=2 final point is zero double t1,t3,t10 , x0,x1 ; int ii ; if( npt < 2 || xx == NULL ) return ; x0 = xx[0] ; x1 = 0.0 ; for( ii=1 ; ii < npt ; ii++ ){ x0 += xx[ii] ; x1 += xx[ii] * ii ; } t1 = npt*x0; t3 = 1.0/npt; t10 = npt*npt; double f0 = (double)(2.0/(npt+1.0)*t3*(2.0*t1-3.0*x1-x0)); double f1 = (double)(-6.0/(t10-1.0)*t3*(-x0-2.0*x1+t1)); //printf("%.8g %.8g %g\n", f0, f1, xx[0]); if (pt0 == 1) f0 = xx[0]; if (pt0 == 2) f0 = xx[npt-1]- (f1*(npt-1)); for( ii=0 ; ii < npt ; ii++ ) xx[ii] -= (f0 + f1*ii) ; } static int nifti_detrend_linear(nifti_image * nim) { if (nim->datatype != DT_CALC) return 1; size_t nvox3D = nim->nx * nim->ny * MAX(1,nim->nz); if (nvox3D < 1) return 1; int nvol = nim->nvox / nvox3D; if ((nvox3D * nvol) != nim->nvox) return 1; if (nvol < 2) { fprintf(stderr,"detrend requires a 4D image with at least three volumes\n"); return 1; } flt * img = (flt *) nim->data; #pragma omp parallel for for (size_t i = 0; i < nvox3D; i++) { flt * data = (flt *)_mm_malloc(nvol*sizeof(flt), 64); //load one voxel across all timepoints int j = 0; for (size_t v = i; v < nim->nvox; v+= nvox3D) { data[j] = img[v]; j++; } //detrend dtrend(data, nvol, 0); //save one voxel across all timepoints j = 0; for (size_t v = i; v < nim->nvox; v+= nvox3D) { img[v] = data[j]; j++; } _mm_free (data); } return 0; } #ifdef bandpass //https://github.com/QtSignalProcessing/QtSignalProcessing/blob/master/src/iir.cpp //https://github.com/rkuchumov/day_plot_diagrams/blob/8df48af431dc76b1656a627f1965d83e8693ddd7/data.c //https://scipy-cookbook.readthedocs.io/items/ButterworthBandpass.html // Sample rate and desired cutoff frequencies (in Hz). // double highcut = 1250; // double lowcut = 500; // double samp_rate = 5000; //[b,a] = butter(2, [0.009, 0.08]); //https://afni.nimh.nih.gov/afni/community/board/read.php?1,84373,137180#msg-137180 //Power 2011, Satterthwaite 2013, Carp 2011, Power's reply to Carp 2012 // https://github.com/lindenmp/rs-fMRI/blob/master/func/ButterFilt.m //https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.filtfilt.html /* The function butterworth_filter() emulates Jan Simon's FiltFiltM it uses Gustafsson’s method and padding to reduce ringing at start/end https://www.mathworks.com/matlabcentral/fileexchange/32261-filterm?focused=5193423&tab=function Copyright (c) 2011, Jan Simon All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.*/ static int butterworth_filter(flt * img, int nvox3D, int nvol, double fs, double highcut, double lowcut) { //sample rate, low cut and high cut are all in Hz //this attempts to emulate performance of https://www.mathworks.com/matlabcentral/fileexchange/32261-filterm // specifically, prior to the forward and reverse pass the coefficients are estimated by a forward and reverse pass int order = 2; if (order <= 0) return 1; if ((highcut <= 0.0) && (lowcut <= 0.0)) return 1; if (fs <= 0.0) return 1; if ((lowcut > 0.0) && (highcut > 0.0)) printf("butter bandpass lowcut=%g highcut=%g fs=%g order=%d (effectively %d due to filtfilt)\n", lowcut, highcut, fs, order, 2*order); else if (highcut > 0.0) printf("butter lowpass highcut=%g fs=%g order=%d (effectively %d due to filtfilt)\n", highcut, fs, order, 2*order); else if (lowcut > 0.0) printf("butter highpass lowcut=%g fs=%g order=%d (effectively %d due to filtfilt)\n", lowcut, fs, order, 2*order); else { printf("Butterworth parameters do not make sense\n"); return 1; } double * a; double * b; double * IC; int nX = nvol; int nA = 0; nA = butter_design(order, 2.0*lowcut/fs, 2.0*highcut/fs, &a, &b, &IC); int nEdge = 3 * (nA -1); if ((nA < 1) || (nX <= nEdge)) { printf("filter requires at least %d samples\n", nEdge); _mm_free(a); _mm_free(b); _mm_free(IC); return 1; } #pragma omp parallel for for (int vx = 0; vx < nvox3D; vx++) { double * X = (double *)_mm_malloc(nX*sizeof(double), 64); size_t vo = vx; flt mn = INFINITY; flt mx = -INFINITY; for (int j = 0; j < nX; j++) { X[j] = img[vo]; mn = MIN(mn, X[j]); mx = MAX(mx, X[j]); vo += nvox3D; } if (mn < mx) { //some variability double * Xi = (double *)_mm_malloc(nEdge * sizeof(double), 64); for (int i = 0; i < nEdge; i++) Xi[nEdge-i-1] = X[0]-(X[i+1]-X[0]); double * CC = (double *)_mm_malloc((nA-1) * sizeof(double), 64); for (int i = 0; i < (nA-1); i++) CC[i] = IC[i]* Xi[0]; double * Xf = (double *)_mm_malloc(nEdge * sizeof(double), 64); for (int i = 0; i < nEdge; i++) Xf[i] = X[nX-1]-(X[nX-2-i]-X[nX-1]); Filt(Xi, nEdge, a, b, nA-1, CC); //filter head Filt(X, nX, a, b, nA-1, CC); //filter array Filt(Xf, nEdge, a, b, nA-1, CC); //filter tail //reverse for (int i = 0; i < (nA-1); i++) CC[i] = IC[i]* Xf[nEdge-1]; FiltRev(Xf, nEdge, a, b, nA-1, CC); //filter tail FiltRev(X, nX, a, b, nA-1, CC); //filter array _mm_free (Xi); _mm_free (Xf); _mm_free (CC); } else { //else no variability: set all voxels to zero for (int j = 0; j < nX; j++) X[j] = 0; } //save data to 4D array vo = vx; for (int j = 0; j < nX; j++) { img[vo] = X[j]; vo += nvox3D; } _mm_free (X); } //for vx _mm_free(b); _mm_free(a); _mm_free(IC); return 0; } static int nifti_bandpass(nifti_image * nim, double hp_hz, double lp_hz, double TRsec) { if (nim->datatype != DT_CALC) return 1; size_t nvox3D = nim->nx * nim->ny * MAX(1,nim->nz); if (TRsec <= 0.0) TRsec = nim->pixdim[4]; if (TRsec <= 0) { fprintf(stderr,"Unable to determine sample rate\n"); return 1; } if (nvox3D < 1) return 1; int nvol = nim->nvox / nvox3D; if ((nvox3D * nvol) != nim->nvox) return 1; if (nvol < 1) { fprintf(stderr,"bandpass requires 4D datasets\n"); return 1; } return butterworth_filter((flt *) nim->data, nvox3D, nvol, 1/TRsec, hp_hz, lp_hz); } #endif static int nifti_bptf(nifti_image * nim, double hp_sigma, double lp_sigma, int demean) { //Spielberg Matlab code: https://cpb-us-w2.wpmucdn.com/sites.udel.edu/dist/7/4542/files/2016/09/fsl_temporal_filt-15sywxn.m //5.0.7 highpass temporal filter removes the mean component https://fsl.fmrib.ox.ac.uk/fsl/fslwiki/WhatsNew#anchor1 /* http://www.fast.u-psud.fr/ezyfit/html/ezfit.html fitting functions are: - linear y = m * x - affine or poly1 y = a*x + b - poly{n} y = a0 + a1 * x + ... + an * x^n - power y = c*x^n - sin y = a * sin (b * x) - cos y = a * cos (b * x) - exp y = a * exp (b * x) - log y = a * log (b * x) - cngauss y = exp(-x^2/(2*s^2))/(2*pi*s^2)^(1/2) - cfgauss y = a*exp(-x^2/(2*s^2)) - ngauss y = exp(-(x-x0)^2/(2*s^2))/(2*pi*s^2)^(1/2) - gauss y = a*exp(-(x-x0)^2/(2*s^2)) */ // y = a*exp(-(x-x0)^2/(2*s^2)) // regression formula (https://www.mathsisfun.com/data/least-squares-regression.html) modulated by weight if (nim->datatype != DT_CALC) return 1; if ((hp_sigma <= 0) && (lp_sigma <= 0)) return 0; size_t nvox3D = nim->nx * nim->ny * MAX(1,nim->nz); if (nvox3D < 1) return 1; int nvol = nim->nvox / nvox3D; if ((nvox3D * nvol) != nim->nvox) return 1; if (nvol < 1) { fprintf(stderr,"bptf requires 4D datasets\n"); return 1; } int * hpStart, * hpEnd; double * hpSumX, * hpDenom, * hpSumWt, * hp, * hp0; if (hp_sigma > 0) { //initialize high-pass reusables //Spielberg's code uses 8*sigma, does not match current fslmaths: //tested with fslmaths freq4d -bptf 10 -1 nhp //cutoff ~3: most difference: 4->0.0128902 3->2.98023e-08 2->-0.0455322 1->0.379412 int cutoffhp = ceil(3*hp_sigma); //to do: check this! ~3 hp = (double *)_mm_malloc((cutoffhp+1+cutoffhp)*sizeof(double), 64); //-cutoffhp..+cutoffhp hp0 = hp + cutoffhp; //convert from 0..(2*cutoffhp) to -cutoffhp..+cutoffhp for (int k = -cutoffhp; k <= cutoffhp; k++) //for each index in kernel hp0[k] = exp(-sqr(k)/(2 * sqr(hp_sigma))); hpStart = (int *)_mm_malloc(nvol*sizeof(int), 64); hpEnd = (int *)_mm_malloc(nvol*sizeof(int), 64); hpSumX = (double *)_mm_malloc(nvol*sizeof(double), 64); // hpDenom = (double *)_mm_malloc(nvol*sizeof(double), 64); // N*Sum(x^2) - (Sum(x))^2 hpSumWt = (double *)_mm_malloc(nvol*sizeof(double), 64); //sum of weight, N for (int v = 0; v < nvol; v++) { //linear regression with "gauss" fitting hpStart[v] = MAX(0,v-cutoffhp); hpEnd[v] = MIN(nvol-1,v+cutoffhp); double sumX = 0.0; double sumX2 = 0.0; double sumWt = 0.0; for (int k = hpStart[v]; k <= hpEnd[v]; k++) { //for each index in kernel int x = k-v; double wt = hp0[x]; //kernel weight sumX += wt * x; sumX2 += wt * x * x; sumWt += wt; } hpSumX[v] = sumX; hpDenom[v] = (sumWt * sumX2) - sqr(sumX); // N*Sum(x^2) - (Sum(x))^2 if (hpDenom[v] == 0.0) hpDenom[v] = 1.0; //should never happen, x is known index hpDenom[v] = 1.0 / hpDenom[v]; //use reciprocal so we can use faster multiplication later hpSumWt[v] = sumWt; } //for each volume } //high-pass reusables //low-pass AFTER high-pass: fslmaths freq4d -bptf 45 5 fbp int * lpStart, * lpEnd; double * lpSumWt, * lp, * lp0; if (lp_sigma > 0) { //initialize low-pass reusables //simple Gaussian blur in time domain //freq4d -bptf -1 5 flp // fslmaths rest -bptf -1 5 flp // 3->0.00154053 4->3.5204e-05 5->2.98023e-07, 6->identical // Spielberg's code uses 8*sigma, so we will use that, even though precision seems excessive int cutofflp = ceil(8*lp_sigma); //to do: check this! at least 6 lp = (double *)_mm_malloc((cutofflp+1+cutofflp)*sizeof(double), 64); //-cutofflp..+cutofflp lp0 = lp + cutofflp; //convert from 0..(2*cutofflp) to -cutofflp..+cutofflp for (int k = -cutofflp; k <= cutofflp; k++) //for each index in kernel lp0[k] = exp(-sqr(k)/(2 * sqr(lp_sigma))); lpStart = (int *)_mm_malloc(nvol*sizeof(int), 64); lpEnd = (int *)_mm_malloc(nvol*sizeof(int), 64); lpSumWt = (double *)_mm_malloc(nvol*sizeof(double), 64); //sum of weight, N for (int v = 0; v < nvol; v++) { lpStart[v] = MAX(0,v-cutofflp); lpEnd[v] = MIN(nvol-1,v+cutofflp); double sumWt = 0.0; for (int k = lpStart[v]; k <= lpEnd[v]; k++) //for each index in kernel sumWt += lp0[k-v]; //kernel weight if (sumWt == 0.0) sumWt = 1.0; //will never happen lpSumWt[v] = 1.0 / sumWt; //use reciprocal so we can use faster multiplication later } //for each volume } //low-pass reusables //https://www.jiscmail.ac.uk/cgi-bin/webadmin?A2=FSL;5b8cace9.0902 //if TR=2s and 100 second cutoff is requested choose "-bptf 50 -1" //The 'cutoff' is defined as the FWHM of the filter, so if you ask for //100s that means 50 Trs, so the sigma, or HWHM, is 25 TRs. // -bptf <hp_sigma> <lp_sigma> flt * img = (flt *) nim->data; #pragma omp parallel for for (size_t i = 0; i < nvox3D; i++) { //read input data flt * imgIn = (flt *)_mm_malloc((nvol)*sizeof(flt), 64); flt * imgOut = (flt *)_mm_malloc((nvol)*sizeof(flt), 64); int j = 0; for (size_t v = i; v < nim->nvox; v+= nvox3D) { imgIn[j] = img[v]; j++; } if (hp_sigma > 0) { double sumOut = 0.0; for (int v = 0; v < nvol; v++) { //each volume double sumY = 0.0; double sumXY = 0.0; for (int k = hpStart[v]; k <= hpEnd[v]; k++) { //for each index in kernel int x = k-v; double wt = hp0[x]; flt y = imgIn[k]; sumY += wt * y; sumXY += wt * x * y; } double n = hpSumWt[v]; double m = ((n*sumXY) - (hpSumX[v] * sumY) ) * hpDenom[v]; //slope double b = (sumY - (m * hpSumX[v]))/n; //intercept imgOut[v] = imgIn[v] - b; sumOut += imgOut[v]; } //for each volume //"fslmaths -bptf removes timeseries mean (for FSL 5.0.7 onward)" n.b. except low-pass double mean = sumOut / (double)nvol; //de-mean AFTER high-pass if (demean) { for (int v = 0; v < nvol; v++) //each volume imgOut[v] -= mean; } } //hp_sigma > 0 if (lp_sigma > 0) { //low pass does not de-mean data //if BOTH low-pass and high-pass, apply low pass AFTER high pass: // fslmaths freq4d -bptf 45 5 fbp // difference 1.86265e-08 //still room for improvement: // fslmaths /Users/chris/src/rest -bptf 45 5 fbp // r=1.0 identical voxels 73% max difference 0.000488281 if (hp_sigma > 0) memcpy(imgIn, imgOut, nvol*sizeof(flt)); for (int v = 0; v < nvol; v++) { //each volume double sum = 0.0; for (int k = lpStart[v]; k <= lpEnd[v]; k++) //for each index in kernel sum += imgIn[k] * lp0[k-v]; imgOut[v] = sum * lpSumWt[v]; } // for each volume } //lp_sigma > 0 //write filtered data j = 0; for (size_t v = i; v < nim->nvox; v+= nvox3D) { img[v] = imgOut[j]; j++; } _mm_free (imgIn); _mm_free (imgOut); } if (hp_sigma > 0) { //initialize high-pass reuseables _mm_free (hp); _mm_free (hpStart); _mm_free (hpEnd); _mm_free (hpSumX); _mm_free (hpDenom); _mm_free (hpSumWt); } if (lp_sigma > 0) { //initialize high-pass reuseables _mm_free (lp); _mm_free (lpStart); _mm_free (lpEnd); _mm_free (lpSumWt); } return 0; } // nifti_bptf() static int nifti_demean(nifti_image * nim) { if (nim->datatype != DT_CALC) return 1; size_t nvox3D = nim->nx * nim->ny * MAX(1,nim->nz); if (nvox3D < 1) return 1; int nvol = nim->nvox / nvox3D; if ((nvox3D * nvol) != nim->nvox) return 1; if (nvol < 1) { fprintf(stderr,"demean requires 4D datasets\n"); return 1; } flt * img = (flt *) nim->data; #pragma omp parallel for for (size_t i = 0; i < nvox3D; i++) { double sum = 0.0; for (size_t v = i; v < nim->nvox; v+= nvox3D) sum += img[v]; double mean = sum / nvol; for (size_t v = i; v < nim->nvox; v+= nvox3D) img[v] -= mean; } return 0; } static int nifti_dim_reduce(nifti_image * nim, enum eDimReduceOp op, int dim, int percentage) { //e.g. nifti_dim_reduce(nim, Tmean, 4) reduces 4th dimension, saving mean int nReduce = nim->dim[dim]; if ((nReduce <= 1) || (dim < 1) || (dim > 4)) return 0; //nothing to reduce, fslmaths does not generate an error if ((nim->nvox < 1) || (nim->nx < 1) || (nim->ny < 1) || (nim->nz < 1)) return 1; //size_t nvox3D = nim->nx * nim->ny * nim->nz; //int nvol = nim->nvox / nvox3D; //if ((nvox3D * nvol) != nim->nvox) return 1; if (nim->datatype != DT_CALC) return 1; if (nim->dim[0] > 4) fprintf(stderr,"dimension reduction collapsing %"PRId64"D into to 4D\n", nim->dim[0]); int dims[8], indims[8]; for (int i = 0; i < 4; i++ ) dims[i] = MAX(nim->dim[i],1); //XYZT limits to 4 dimensions, so collapse dims [4,5,6,7] dims[4] = nim->nvox / (dims[1]*dims[2]*dims[3]); for (int i = 5; i < 8; i++ ) dims[i] = 1; for (int i = 0; i < 8; i++ ) indims[i] = dims[i]; if ((dims[1]*dims[2]*dims[3]*dims[4]) != nim->nvox) return 1; //e.g. data in dim 5..7! dims[dim] = 1; if (dim == 4) dims[0] = 3; //reduce 4D to 3D size_t nvox = dims[1]*dims[2]*dims[3]*dims[4]; flt * i32 = (flt *) nim->data; void * dat = (void *)calloc(1,nim->nvox * sizeof(flt)) ; flt * o32 = (flt *) dat; int collapseStep; //e.g. if we collapse 4th dimension, we will collapse across voxels separated by X*Y*Z if (dim == 1) collapseStep = 1; //collapse by columns else if (dim == 2) collapseStep = indims[1]; //collapse by rows else if (dim == 3) collapseStep = indims[1]*indims[2]; //collapse by slices else collapseStep = indims[1]*indims[2]*indims[3]; //collapse by volumes int xy = dims[1]*dims[2]; int xyz = xy * dims[3]; if ((op == Tmedian) || (op == Tstd) || (op == Tperc) || (op == Tar1)) { //for even number of items, two options for median, consider 4 volumes ranked // meam of 2nd and 3rd: problem one can return values not in data // 2nd value. Representative //here we use the latter approach //int itm = ((nReduce-1) * 0.5); int itm = (nReduce * 0.5); //seems correct tested with odd and even number of volumes if (op == Tperc) { double frac = ((double)percentage)/100.0; //itm = ((nReduce-1) * frac); itm = ((nReduce) * frac); itm = MAX(itm, 0); itm = MIN(itm, nReduce-1); } #pragma omp parallel for for (size_t i = 0; i < nvox; i++ ) { flt * vxls = (flt *)_mm_malloc((nReduce)*sizeof(flt), 64); size_t inPos = i; if (dim < 4) { //i is in output space, convert to input space, allows single loop for OpenMP int T = (i / xyz); //volume int r = i % (xyz); int Z = (r / xy); //slice r = r % (xy); int Y = (r / dims[1]); //row int X = r % dims[1]; inPos = X+(Y*indims[1])+(Z*indims[1]*indims[2])+(T*indims[1]*indims[2]*indims[3]); } for (int v = 0; v < nReduce; v++ ) { vxls[v] = i32[inPos]; inPos += collapseStep; } if ((op == Tstd) || (op == Tar1)) { //computed in cache, far fewer operations than Welford //note 64-bit double precision even if 32-bit DT_CALC //neither precision gives identical results // double precision attenuates catastrophic cancellation double sum = 0.0; for (int v = 0; v < nReduce; v++ ) sum += vxls[v]; double mean = sum / nReduce; double sumSqr = 0.0; for (int v = 0; v < nReduce; v++ ) sumSqr += sqr(vxls[v]- mean); if (op == Tstd) o32[i] = sqrt(sumSqr / (nReduce - 1)); else { //Tar1 if (sumSqr == 0.0) { o32[i] = 0.0; continue; } for (int v = 0; v < nReduce; v++ ) vxls[v] = vxls[v] - mean; //demean double r = 0.0; for (int v = 1; v < nReduce; v++ ) r += (vxls[v] * vxls[v-1])/sumSqr; o32[i] = r; } } else { //Tperc or Tmedian qsort (vxls, nReduce, sizeof(flt), compare); o32[i] = vxls[itm]; } _mm_free (vxls); } //for i: each voxel } else { #pragma omp parallel for for (size_t i = 0; i < nvox; i++ ) { size_t inPos = i; //ok if dim==4 if (dim < 4) { //i is in output space, convert to input space, allows single loop for OpenMP int T = (i / xyz); //volume int r = i % (xyz); int Z = (r / xy); //slice r = r % (xy); int Y = (r / dims[1]); //row int X = r % dims[1]; inPos = X+(Y*indims[1])+(Z*indims[1]*indims[2])+(T*indims[1]*indims[2]*indims[3]); } double sum = 0.0; flt mx = i32[inPos]; flt mn = mx; int mxn = 0; //flt sd = 0.0; //flt mean = 0.0; for (int v = 0; v < nReduce; v++ ) { flt f = i32[inPos]; sum += f; if (f > mx) { mx = f; mxn = v; } mn = MIN(mn, f); //Welford https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance // 2-pass method faster //flt delta = f - mean; //mean = mean + delta / (v+1); //sd = sd + delta*(f- mean); inPos += collapseStep; } if (op == Tmean) o32[i] = sum / nReduce; //mean else if (op == Tmax) o32[i] = mx; //max else if (op == Tmaxn) o32[i] = mxn; //maxn else if (op == Tmin) o32[i] = mn; //min } } //if opel nim->nvox = nvox; for (int i = 0; i < 4; i++ ) nim->dim[i] = dims[i]; nim->ndim = dims[0]; nim->nx = dims[1]; nim->ny = dims[2]; nim->nz = dims[3]; nim->nt = dims[4]; nim->nu = dims[5]; nim->nv = dims[6]; nim->nw = dims[7]; free(nim->data); nim->data = dat; return 0; } //Tar1 enum eOp{unknown, add, sub, mul, divX, rem, mod, mas, thr, thrp, thrP, uthr, uthrp, uthrP, max, min, power, seed, inm, ing, smth, exp1,log1,sin1,cos1,tan1,asin1,acos1,atan1,sqr1,sqrt1,recip1,abs1,bin1,binv1,edge1, index1, nan1, nanm1, rand1, randn1,range1, rank1, ranknorm1, pval1, pval01, cpval1, ztop1, ptoz1, dilMk,dilDk,dilFk,dilallk,erok,eroFk,fmediank,fmeank,fmeanuk, subsamp2,subsamp2offc }; static int * make_kernel_gauss(nifti_image * nim, int * nkernel, double sigmamm) { sigmamm = fabs(sigmamm); if (sigmamm == 0.0) return NULL; double mmCutoff = sigmamm * 6.0; //maximum extent int x = (2*floor(mmCutoff/nim->dx))+1; int y = (2*floor(mmCutoff/nim->dy))+1; int z = (2*floor(mmCutoff/nim->dz))+1; int xlo = (int)(-x / 2); int ylo = (int)(-y / 2); int zlo = (int)(-z / 2); //betterthanfsl // fsl computes gaussian for all values in cube // from first principles, a spherical filter has less bias // since weighting is very low at these edge voxels, it has little impact on // "-fmean", however with other filters like "dilM", fsl's solution works like // a "box" filter, not a "sphere" filter // default is to clone fsl #ifdef betterthanfsl //true sphere at cutouff //first pass: determine number of surviving voxels (n) int n = 0; for (int zi = zlo; zi < (zlo+z); zi++ ) for (int yi = ylo; yi < (ylo+y); yi++ ) for (int xi = xlo; xi < (xlo+x); xi++ ) { flt dx = (xi * nim->dx); flt dy = (yi * nim->dy); flt dz = (zi * nim->dz); flt dist = sqrt(dx*dx + dy*dy + dz*dz); if (dist > mmCutoff) continue; n++; } *nkernel = n; int kernelWeight = (int)((double)INT_MAX/(double)n); //requires <limits.h> int * kernel = (int *)_mm_malloc((n*4)*sizeof(int), 64); //4 values: offset, xpos, ypos, weight double * wt = (double *)_mm_malloc((n)*sizeof(double), 64); //precess weight: temporary //second pass: fill surviving voxels int i = 0; double expd = 2.0*sigmamm*sigmamm; for (int zi = zlo; zi < (zlo+z); zi++ ) for (int yi = ylo; yi < (ylo+y); yi++ ) for (int xi = xlo; xi < (xlo+x); xi++ ) { flt dx = (xi * nim->dx); flt dy = (yi * nim->dy); flt dz = (zi * nim->dz); flt dist = sqrt(dx*dx + dy*dy + dz*dz); if (dist > mmCutoff) continue; kernel[i] = xi + (yi * nim->nx) + (zi * nim->nx * nim->ny); kernel[i+n] = xi; //left-right wrap detection kernel[i+n+n] = yi; //anterior-posterior wrap detection //kernel[i+n+n+n] = kernelWeight; //kernel height wt[i] = exp(-1.0*(dist*dist)/expd); i++; } #else int n = x * y * z; *nkernel = n; int * kernel = (int *)_mm_malloc((n*4)*sizeof(int), 64); //4 values: offset, xpos, ypos, weight double * wt = (double *)_mm_malloc((n)*sizeof(double), 64); //precess weight: temporary int i = 0; double expd = 2.0*sigmamm*sigmamm; for (int zi = zlo; zi < (zlo+z); zi++ ) for (int yi = ylo; yi < (ylo+y); yi++ ) for (int xi = xlo; xi < (xlo+x); xi++ ) { flt dx = (xi * nim->dx); flt dy = (yi * nim->dy); flt dz = (zi * nim->dz); flt dist = sqrt(dx*dx + dy*dy + dz*dz); //if (dist > mmCutoff) continue; //<- fsl fills all kernel[i] = xi + (yi * nim->nx) + (zi * nim->nx * nim->ny); kernel[i+n] = xi; //left-right wrap detection kernel[i+n+n] = yi; //anterior-posterior wrap detection //kernel[i+n+n+n] = kernelWeight; //kernel height wt[i] = exp(-1.0*(dist*dist)/expd); i++; } #endif double sum = 0.0; for (int i = 0; i < n; i++ ) sum += wt[i]; //sum of entire gaussian is 1 double scale = 1.0 / sum; scale *= (double)INT_MAX; //we use integer scaling: in future faster to typecast integer as flt (if int=32bit) or double (if int=64bit) for (int i = 0; i < n; i++ ) kernel[i+n+n+n] = wt[i]*scale; _mm_free (wt); return kernel; } //make_kernel_gauss() static flt calmax(nifti_image * nim){ if ((nim->nvox < 1) || (nim->datatype != DT_CALC)) return 0.0; flt * in32 = (flt *) nim->data; flt mx = in32[0]; for (size_t i = 0; i < nim->nvox; i++ ) mx = MAX(mx, in32[i]); return mx; } static flt calmin(nifti_image * nim){ if ((nim->nvox < 1) || (nim->datatype != DT_CALC)) return 0.0; flt * in32 = (flt *) nim->data; flt mn = in32[0]; for (size_t i = 0; i < nim->nvox; i++ ) mn = MIN(mn, in32[i]); return mn; } /*void swapSign(nifti_image * nim){ if ((nim->nvox < 1) || (nim->datatype != DT_CALC)) return; flt * in32 = (flt *) nim->data; for (size_t i = 0; i < nim->nvox; i++ ) in32[i] = -in32[i]; }*/ static int nifti_tensor_2(nifti_image * nim, int lower2upper) { int nvox3D = nim->dim[1] * nim->dim[2] * nim->dim[3]; if ((nim->datatype != DT_CALC) || (nvox3D < 1)) return 1; int nVol = (int)(nim->nvox/nvox3D); if (nVol != 6) { fprintf(stderr,"nifti_tensor_2: input must have precisely 6 volumes (not %d)\n", nVol); return 1; } //3dAFNItoNIFTI does not set intent_code to NIFTI_INTENT_SYMMATRIX, so check dimensions if ((lower2upper) && (nim->dim[4] == 6)) fprintf(stderr,"nifti_tensor_2: check images (header suggests already in upper triangle format)\n"); if ((!lower2upper) && (nim->dim[4] == 6)) fprintf(stderr,"nifti_tensor_2: check images (header suggests already in lower triangle format)\n"); //lower xx xy yy xz yz zz //upper xx xy xz yy yz zz //swap volumes 3 and 4 flt * in32 = (flt *) nim->data; flt * tmp = (flt *)_mm_malloc(nvox3D*sizeof(flt), 64); flt * v3 = in32 + (2 * nvox3D); flt * v4 = in32 + (3 * nvox3D); memcpy(tmp, v4, nvox3D*sizeof(flt)); memcpy(v4, v3, nvox3D*sizeof(flt)); memcpy(v3, tmp, nvox3D*sizeof(flt)); _mm_free (tmp); if (lower2upper) { //FSL uses non-standard upper triangle nim->dim[0] = 4; for (int i = 4; i < 8; i++) nim->dim[i] = 1; nim->dim[4] = 6; nim->ndim = 4; nim->nt = 6; nim->nu = 1; nim->nv = 1; nim->nw = 1; } else { //upper2lower //lower is NIfTI default, used by AFNI, Camino, ANTS nim->intent_code = NIFTI_INTENT_SYMMATRIX; /*! To store an NxN symmetric matrix at each voxel: - dataset must have a 5th dimension - intent_code must be NIFTI_INTENT_SYMMATRIX - dim[5] must be N*(N+1)/2 - intent_p1 must be N (in float format) - the matrix values A[i][[j] are stored in row-order: - A[0][0] - A[1][0] A[1][1] - A[2][0] A[2][1] A[2][2] - etc.: row-by-row */ nim->dim[0] = 5; for (int i = 4; i < 8; i++) nim->dim[i] = 1; nim->dim[5] = 6; nim->ndim = 5; nim->nt = 1; nim->nu = 6; nim->nv = 1; nim->nw = 1; } return 0; } static int nifti_tensor_decomp(nifti_image * nim, int isUpperTriangle) { // MD= (Dxx+Dyy+Dzz)/3 //https://github.com/ANTsX/ANTs/wiki/Importing-diffusion-tensor-data-from-other-software // dtifit produces upper-triangular order: xx xy xz yy yz zz //MD = 1/3*(Dxx+Dyy+Dzz) //FA= sqrt(3/2)*sqrt(((Dx-MD)^2+(Dy-MD)^2+(Dz-MD^2))/(Dx^2+Dy^2+Dz^2)) //fslmaths tensor.nii -tensor_decomp bork.nii // 3dDTeig -uddata -sep_dsets -prefix AFNIdwi.nii tensor.nii //3dDTeig expects LOWER diagonal order unless -uddata // Dxx,Dxy,Dyy,Dxz,Dyz,Dzz // https://afni.nimh.nih.gov/pub/dist/doc/program_help/3dDTeig.html //dxx, dxy, dyy, dxz, dyz, dzz // 3dDTeig -uddata -prefix AFNIdwi.nii tensor.nii // fslmaths tensor.nii -tensor_decomp bork.nii // Creates 5*3D and 3*4D files for a total of 14 volumes L1,L2,L3,V1(3),V2(3),V3(3),FA,MD #ifdef tensor_decomp if ((nim->nvox < 1) || (nim->nx < 2) || (nim->ny < 2) || (nim->nz < 1)) return 1; if (nim->datatype != DT_CALC) return 1; int nvox3D = nim->dim[1] * nim->dim[2] * nim->dim[3]; int nVol = (int)(nim->nvox/nvox3D); if (nVol != 6) { fprintf(stderr,"nifti_tensor_decomp: input must have precisely 6 volumes (not %d)\n", nVol); return 1; } flt * in32 = (flt *) nim->data; //detect if data is upper or lower triangle // The "YY" component should be brighter (stronlgy positive) than the off axis XZ #define detectUpperOrLower #ifdef detectUpperOrLower double sumV3 = 0.0; //3rd volume, YY for lower, XZ for upper double sumV4 = 0.0; //4th volume, XZ for lower, YY for upper flt * v32 = in32 + (nvox3D * 2); //offset to 3rd volume for (size_t i = 0; i < nvox3D; i++ ) sumV3 += v32[i]; v32 = in32 + (nvox3D * 3); //offset to 4th volume for (size_t i = 0; i < nvox3D; i++ ) sumV4 += v32[i]; if ((sumV4 > sumV3) && (!isUpperTriangle)) fprintf(stderr,"nifti_tensor_decomp: check results, input looks like UPPER triangle.\n"); if ((sumV4 < sumV3) && (isUpperTriangle)) fprintf(stderr,"nifti_tensor_decomp: check results, input looks like LOWER triangle.\n"); #endif flt * out32 = (flt *)_mm_malloc(14*nvox3D*sizeof(flt), 64); for (size_t i = 0; i < nvox3D; i++ ) { //n.b. in6 and out14 are ALWAYS float regradless of DT32, e.g. single even if DT=double float * in6 = (float *)_mm_malloc(6*sizeof(float), 64); float * out14 = (float *)_mm_malloc(14*sizeof(float), 64); size_t iv = i; for (int v = 0; v < 6; v++) { in6[v] = in32[iv]; iv += nvox3D; } EIG_tsfunc(0.0, 0.0, 0, in6, 0.0, 0.0, NULL, 0, out14, isUpperTriangle); size_t ov = i; for (int v = 0; v < 14; v++) { out32[ov] = out14[v]; ov += nvox3D; } _mm_free (out14); _mm_free (in6); } free(nim->data); // Creates 5*3D and 3*4D files for a total of 14 volumes L1(0),L2(1),L3(2),V1(3,4,5),V2(6,7,8),V3(9,10,11),FA(12),MD(13) flt * outv; //save 4D images nim->cal_min = -1; nim->cal_max = 1; nim->nvox = nvox3D * 3; nim->ndim = 4; nim->nt = 3; nim->nu = 1; nim->nv = 1; nim->nw = 1; nim->dim[0] = 4; nim->dim[4] = 3; for (int i = 5; i < 8; i++) nim->dim[i] = 1; //void * dat = (void *)calloc(1, 3*nvox3D * sizeof(flt)) ; //nim->data = dat; //flt * fa32 = (flt *) dat; //save V1 outv = out32 + (nvox3D * 3); //memcpy(fa32, outv, 3*nvox3D*sizeof(flt)); /*for (size_t i = 0; i < (3*nvox3D); i++ ) if (outv[i] != 0.0) // do not create "-0.0" outv[i] = -outv[i]; */ nim->data = (void *)outv; nifti_save(nim, "_V1"); //save V2 outv = out32 + (nvox3D * 6); //memcpy(fa32, outv, 3*nvox3D*sizeof(flt)); nim->data = (void *)outv; nifti_save(nim, "_V2"); //save V3 outv = out32 + (nvox3D * 9); //memcpy(fa32, outv, 3*nvox3D*sizeof(flt)); nim->data = (void *)outv; nifti_save(nim, "_V3"); //release 4D memory //free(dat); //save 3D images nim->cal_min = 0; nim->cal_max = 0; nim->nvox = nvox3D * 1; nim->ndim = 3; nim->nt = 1; nim->dim[0] = 3; nim->dim[4] = 1; //save L1 outv = out32; //memcpy(fa32, outv, nvox3D*sizeof(flt)); nim->data = (void *)outv; nim->cal_max = calmax(nim); nifti_save(nim, "_L1"); //save L2 outv = out32 + (nvox3D * 1); //memcpy(fa32, outv, nvox3D*sizeof(flt)); nim->data = (void *)outv; nim->cal_max = calmax(nim); nifti_save(nim, "_L2"); //save L3 outv = out32 + (nvox3D * 2); //memcpy(fa32, outv, nvox3D*sizeof(flt)); nim->data = (void *)outv; nim->cal_max = calmax(nim); nifti_save(nim, "_L3"); //save MD outv = out32 + (nvox3D * 13); //memcpy(fa32, outv, nvox3D*sizeof(flt)); nim->data = (void *)outv; nim->cal_min = calmin(nim); nim->cal_max = calmax(nim); nifti_save(nim, "_MD"); //single volume data void * dat = (void *)calloc(1, nvox3D * sizeof(flt)) ; nim->data = dat; flt * fa32 = (flt *) dat; //save MO //MODE https://www.jiscmail.ac.uk/cgi-bin/webadmin?A2=FSL;4fbed3d1.1103 // compute MO (MODE) from L1, L2, L3, MD //e1=l1-MD, e2=l2-MD, e3=l3-MD; //n = (e1 + e2 - 2*e3)*(2*e1 - e2 - e3)*(e1 - 2*e2 + e3); //d = (e1*e1 + e2*e2 + e3*e3 - e1*e2 - e2*e3 - e1*e3); //d = 2*d*d*d; //mode = n/d; //something is wrong with this formula. // a. Ennis 2006 includes a sqrt that can not be factored out // b. results differ from fslmaths nim->cal_min = -1; nim->cal_max = 1; flt * L1 = out32; flt * L2 = out32 + (nvox3D * 1); flt * L3 = out32 + (nvox3D * 2); flt * MD = out32 + (nvox3D * 13); for (size_t i = 0; i < nvox3D; i++ ) { flt e1 = L1[i] - MD[i]; flt e2 = L2[i] - MD[i]; flt e3 = L3[i] - MD[i]; flt n = (e1 + e2 - 2*e3)*(2*e1 - e2 - e3)*(e1 - 2*e2 + e3); flt d = (e1*e1 + e2*e2 + e3*e3 - e1*e2 - e2*e3 - e1*e3); d = sqrt(d); //Correlation r = 0.999746 d = 2*d*d*d; //d = sqrt(d); //Correlation r = 0.990319 if (d != 0) d = n / d; //mode d = MIN(d, 1.0); d = MAX(d, -1.0); fa32[i] = d; } nifti_save(nim, "_MO"); //save FA outv = out32 + (nvox3D * 12); memcpy(fa32, outv, nvox3D*sizeof(flt)); nim->cal_min = 0; nim->cal_max = 1; nifti_save(nim, "_FA"); //keep FA in memory nim->cal_max = 0; _mm_free (out32); return 0; #else fprintf(stderr,"not compiled to support tensor_decomp\n"); return 1; #endif } //nifti_tensor_decomp() static void kernel3D_dilall( nifti_image * nim, int * kernel, int nkernel, int vol) { int nVox3D = nim->dim[1] * nim->dim[2] * nim->dim[3]; flt * f32 = (flt *) nim->data; f32 += (nVox3D * vol); flt * inf32 = (flt *)_mm_malloc(nVox3D*sizeof(flt), 64); memcpy(inf32, f32, nVox3D*sizeof(flt)); int nxy = nim->nx * nim->ny; size_t nZero = 1; while (nZero > 0) { nZero = 0; for (int z = 0; z < nim->nz; z++ ) { int i = (z * nxy) -1; //offset for (int y = 0; y < nim->ny; y++ ) { for (int x = 0; x < nim->nx; x++ ) { i++; if (f32[i] != 0.0) continue; int nNot0 = 0; flt sum = 0.0f; for (size_t k = 0; k < nkernel; k++) { size_t vx = i + kernel[k]; if ((vx < 0) || (vx >= nVox3D) || (inf32[vx] == 0.0)) continue; //next handle edge cases int dx = x+kernel[k+nkernel]; if ((dx < 0) || (dx >= nim->nx)) continue; //wrapped left-right int dy = y+kernel[k+nkernel+nkernel]; if ((dy < 0) || (dy >= nim->ny)) continue; //wrapped anterior-posterior nNot0 ++; sum += inf32[vx]; } //for k if (nNot0 > 0) f32[i] = sum / nNot0; nZero++; } //for x } //for y } //for z memcpy(inf32, f32, nVox3D*sizeof(flt)); //printf("n=0: %zu\n", nZero); } //nZero > 0 _mm_free (inf32); } //kernel3D_dilall() static int kernel3D( nifti_image * nim, enum eOp op, int * kernel, int nkernel, int vol) { int nVox3D = nim->dim[1] * nim->dim[2] * nim->dim[3]; flt * f32 = (flt *) nim->data; f32 += (nVox3D * vol); flt * inf32 = (flt *)_mm_malloc(nVox3D*sizeof(flt), 64); memcpy(inf32, f32, nVox3D*sizeof(flt)); int nxy = nim->nx * nim->ny; if (op == fmediank) { flt * vxls = (flt *)_mm_malloc((nkernel)*sizeof(flt), 64); for (int z = 0; z < nim->nz; z++ ) { int i = (z * nxy) -1; //offset for (int y = 0; y < nim->ny; y++ ) { for (int x = 0; x < nim->nx; x++ ) { i++; int nOK = 0; for (size_t k = 0; k < nkernel; k++) { size_t vx = i + kernel[k]; if ((vx < 0) || (vx >= nVox3D)) continue; //next handle edge cases int dx = x+kernel[k+nkernel]; if ((dx < 0) || (dx >= nim->nx)) continue; //wrapped left-right int dy = y+kernel[k+nkernel+nkernel]; if ((dy < 0) || (dy >= nim->ny)) continue; //wrapped anterior-posterior vxls[nOK] = inf32[vx]; nOK ++; } //for k qsort (vxls, nOK, sizeof(flt), compare); int itm = (nOK * 0.5); f32[i] = vxls[itm]; } //for x } //for y } //for z _mm_free (vxls); } else if (op == dilMk) { for (int z = 0; z < nim->nz; z++ ) { int i = (z * nxy) -1; //offset for (int y = 0; y < nim->ny; y++ ) { for (int x = 0; x < nim->nx; x++ ) { i++; if (f32[i] != 0.0) continue; int nNot0 = 0; flt sum = 0.0f; for (size_t k = 0; k < nkernel; k++) { size_t vx = i + kernel[k]; if ((vx < 0) || (vx >= nVox3D) || (inf32[vx] == 0.0)) continue; //next handle edge cases int dx = x+kernel[k+nkernel]; if ((dx < 0) || (dx >= nim->nx)) continue; //wrapped left-right int dy = y+kernel[k+nkernel+nkernel]; if ((dy < 0) || (dy >= nim->ny)) continue; //wrapped anterior-posterior nNot0 ++; sum += inf32[vx]; } //for k if (nNot0 > 0) f32[i] = sum / nNot0; } //for x } //for y } //for z } else if (op == dilDk){ //maximum - fslmaths 6.0.1 emulation, note really MODE, max non-zero for (int z = 0; z < nim->nz; z++ ) { int i = (z * nxy) -1; //offset for (int y = 0; y < nim->ny; y++ ) { for (int x = 0; x < nim->nx; x++ ) { i++; if (f32[i] != 0.0) continue; //flt mx = -INFINITY; flt mx = NAN; for (int k = 0; k < nkernel; k++) { int vx = i + kernel[k]; if ((vx < 0) || (vx >= nVox3D)) continue; //next handle edge cases int dx = x+kernel[k+nkernel]; if ((dx < 0) || (dx >= nim->nx)) continue; //wrapped left-right int dy = y+kernel[k+nkernel+nkernel]; if ((dy < 0) || (dy >= nim->ny)) continue; //wrapped anterior-posterior flt v = inf32[vx]; if (v == 0.0) continue; mx = MAX(mx,inf32[vx]); //with dilD a input voxel of 0 } //for k //https://stackoverflow.com/questions/570669/checking-if-a-double-or-float-is-nan-in-c // f != f will be true only if f is NaN if (!(mx != mx)) f32[i] = mx; } //for x } //for y } //for z } else if (op == dilFk) { //maximum - fslmaths 6.0.1 appears to use "dilF" when the user requests "dilD" for (int z = 0; z < nim->nz; z++ ) { int i = (z * nxy) -1; //offset for (int y = 0; y < nim->ny; y++ ) { for (int x = 0; x < nim->nx; x++ ) { i++; flt mx = f32[i]; for (int k = 0; k < nkernel; k++) { int vx = i + kernel[k]; if ((vx < 0) || (vx >= nVox3D) || (inf32[vx] <= mx)) continue; //next handle edge cases int dx = x+kernel[k+nkernel]; if ((dx < 0) || (dx >= nim->nx)) continue; //wrapped left-right int dy = y+kernel[k+nkernel+nkernel]; if ((dy < 0) || (dy >= nim->ny)) continue; //wrapped anterior-posterior mx = MAX(mx,inf32[vx]); //if (mx < 0) continue; //with dilF, do not make a zero voxel darker than 0 } //for k f32[i] = mx; } //for x } //for y } //for z } else if (op == dilallk) {// -dilall : Apply -dilM repeatedly until the entire FOV is covered"); kernel3D_dilall(nim, kernel, nkernel, vol); } else if (op == eroFk) { //Minimum filtering of all voxels for (int z = 0; z < nim->nz; z++ ) { int i = (z * nxy) -1; //offset for (int y = 0; y < nim->ny; y++ ) { for (int x = 0; x < nim->nx; x++ ) { i++; for (int k = 0; k < nkernel; k++) { int vx = i + kernel[k]; if ((vx < 0) || (vx >= nVox3D)) continue; //next handle edge cases int dx = x+kernel[k+nkernel]; if ((dx < 0) || (dx >= nim->nx)) continue; //wrapped left-right int dy = y+kernel[k+nkernel+nkernel]; if ((dy < 0) || (dy >= nim->ny)) continue; //wrapped anterior-posterior f32[i] = MIN(f32[i], inf32[vx]); } //for k } //for x } //for y } //for z } else if (op == fmeank) { flt * kwt = (flt *)_mm_malloc(nkernel*sizeof(flt), 64); for (int k = 0; k < nkernel; k++) kwt[k] = ((double)kernel[k+nkernel+nkernel+nkernel]/(double)INT_MAX ); for (int z = 0; z < nim->nz; z++ ) { int i = (z * nxy) -1; //offset for (int y = 0; y < nim->ny; y++ ) { for (int x = 0; x < nim->nx; x++ ) { i++; flt sum = 0.0f; flt wt = 0.0f; for (int k = 0; k < nkernel; k++) { int vx = i + kernel[k]; if ((vx < 0) || (vx >= nVox3D)) continue; //next handle edge cases int dx = x+kernel[k+nkernel]; if ((dx < 0) || (dx >= nim->nx)) continue; //wrapped left-right int dy = y+kernel[k+nkernel+nkernel]; if ((dy < 0) || (dy >= nim->ny)) continue; //wrapped anterior-posterior sum += (inf32[vx]* kwt[k]); wt += kwt[k]; } //for k f32[i] = sum / wt; } //for x } //for y } //for z _mm_free (kwt); } else if (op == fmeanuk) { flt * kwt = (flt *)_mm_malloc(nkernel*sizeof(flt), 64); for (int k = 0; k < nkernel; k++) kwt[k] = ((double)kernel[k+nkernel+nkernel+nkernel]/(double)INT_MAX ); for (int z = 0; z < nim->nz; z++ ) { int i = (z * nxy) -1; //offset for (int y = 0; y < nim->ny; y++ ) { for (int x = 0; x < nim->nx; x++ ) { i++; flt sum = 0.0f; //flt wt = 0.0f; for (int k = 0; k < nkernel; k++) { int vx = i + kernel[k]; if ((vx < 0) || (vx >= nVox3D)) continue; //next handle edge cases int dx = x+kernel[k+nkernel]; if ((dx < 0) || (dx >= nim->nx)) continue; //wrapped left-right int dy = y+kernel[k+nkernel+nkernel]; if ((dy < 0) || (dy >= nim->ny)) continue; //wrapped anterior-posterior sum += (inf32[vx]* kwt[k]); //wt += kwt[k]; } //for k //f32[i] = sum / wt; f32[i] = sum; } //for x } //for y } //for z _mm_free (kwt); } else if (op == erok) { for (int z = 0; z < nim->nz; z++ ) { int i = (z * nxy) -1; //offset for (int y = 0; y < nim->ny; y++ ) { for (int x = 0; x < nim->nx; x++ ) { i++; if (f32[i] == 0.0) continue; for (int k = 0; k < nkernel; k++) { int vx = i + kernel[k]; if ((vx < 0) || (vx >= nVox3D) || (inf32[vx] != 0.0)) continue; //next handle edge cases int dx = x+kernel[k+nkernel]; if ((dx < 0) || (dx >= nim->nx)) continue; //wrapped left-right int dy = y+kernel[k+nkernel+nkernel]; if ((dy < 0) || (dy >= nim->ny)) continue; //wrapped anterior-posterior f32[i] = 0.0; } //for k } //for x } //for y } //for z } else { fprintf(stderr,"kernel3D: Unsupported operation\n"); _mm_free (inf32); return 1; } _mm_free (inf32); return 0; } //kernel3D static int nifti_kernel ( nifti_image * nim, enum eOp op, int * kernel, int nkernel) { if ((nim->nvox < 1) || (nim->nx < 2) || (nim->ny < 2) || (nim->nz < 1)) return 1; if (nim->datatype != DT_CALC) return 1; int nVox3D = nim->dim[1] * nim->dim[2] * nim->dim[3]; int nVol = (int)(nim->nvox/nVox3D); if (nVol < 1) return 1; if ((nkernel < 1) || (kernel == NULL)) return 1; for (int v = 0; v < nVol; v++ ) { int ok = kernel3D(nim, op, kernel, nkernel, v); if (ok != 0) return ok; } return 0; } static int nifti_roi ( nifti_image * nim, int xmin, int xsize, int ymin, int ysize, int zmin, int zsize, int tmin, int tsize) { // "fslmaths LAS -roi 3 32 0 40 0 40 0 5 f " int nt = nim->nvox / (nim->nx * nim->ny * nim->nz); if ((nim->nvox < 1) || (nt < 1)) return 1; if (nim->datatype != DT_CALC) return 1; flt * f32 = (flt *) nim->data; //if (neg_determ(nim)) // do something profound; //determinants do not seem to influence "-roi"? int xmax = xmin + xsize - 1; int ymax = ymin + ysize - 1; int zmax = zmin + zsize - 1; int tmax = tmin + tsize - 1; //printf("%d..%d", zmin, zmax); size_t i = 0; for (int t = 0; t < nt; t++) { int tOK = 1; if ((t < tmin) || (t > tmax)) tOK = 0; for (int z = 0; z < nim->nz; z++) { int zOK = 1; if ((z < zmin) || (z > zmax)) zOK = 0; for (int y = 0; y < nim->ny; y++) { int yOK = 1; if ((y < ymin) || (y > ymax)) yOK = 0; for (int x = 0; x < nim->nx; x++) { int xOK = 1; if ((x < xmin) || (x > xmax)) xOK = 0; if ((xOK == 0) || (yOK == 0) || (zOK == 0) || (tOK == 0)) f32[i] = 0.0; i++; } //x } //y } //z }//t return 0; } static int nifti_sobel( nifti_image * nim, int offc) { //sobel is simply one kernel pass per dimension. // this could be achieved with successive passes of "-kernel" // here it is done in a single pass for cache efficiency // https://en.wikipedia.org/wiki/Sobel_operator int vox3D = nim->nx*nim->ny*MAX(nim->nz,1); if (nim->datatype != DT_CALC) return 1; int nvol = nim->nvox/vox3D; int numk = 6;//center voxel and all its neighbors int * kx = (int *)_mm_malloc((numk*4)*sizeof(int), 64); //4 values: offset, xpos, ypos, weight int * ky = (int *)_mm_malloc((numk*4)*sizeof(int), 64); //4 values: offset, xpos, ypos, weight int * kz = (int *)_mm_malloc((numk*4)*sizeof(int), 64); //4 values: offset, xpos, ypos, weight int i = 0; for (int x = 0; x <= 1; x++) for (int y = -1; y <= 1; y++) { int sgn = (2*x)-1; //-1 or +1 int weight = sgn * (2 - abs(y)); //kx compare left and right kx[i+numk] = (2*x)-1; //left/right wrap kx[i+numk+numk] = y; //anterior/posterior wrap kx[i] = kx[i+numk] + (kx[i+numk+numk] * (nim->nx)); //voxel offset kx[i+numk+numk+numk] = weight; //weight //ky compare anterior and posterior ky[i+numk] = y; //left/right wrap ky[i+numk+numk] = (2*x)-1; //anterior/posterior wrap ky[i] = ky[i+numk] + (ky[i+numk+numk] * (nim->nx)); //voxel offset ky[i+numk+numk+numk] = weight; //weight //kz superior/inferior kz[i+numk] = y; //left/right wrap kz[i+numk+numk] = 0; //anterior/posterior wrap kz[i] = y + (((2*x)-1) * nim->nx * nim->ny); //voxel offset kz[i+numk+numk+numk] = weight; //weight //printf("x%d y%d wt%d\n", kx[i+numk], kx[i+numk+numk], kx[i+numk+numk+numk]); //printf("x%d y%d wt%d\n", ky[i+numk], ky[i+numk+numk], ky[i+numk+numk+numk]); i++; } //for y flt * i32 = (flt *) nim->data; //input volumes #pragma omp parallel for for (int v = 0; v < nvol; v++) { flt * iv32 = i32 + (v * vox3D); flt * imgin = _mm_malloc(vox3D*sizeof(flt), 64); //input values prior to blur memcpy(imgin, iv32, vox3D*sizeof(flt)); int i = 0; for (int z = 0; z < nim->nz; z++ ) for (int y = 0; y < nim->ny; y++ ) for (size_t x = 0; x < nim->nx; x++ ) { //compute z gradient flt gx = 0.0f; for (size_t k = 0; k < numk; k++) { size_t vx = i + kx[k]; if ((vx < 0) || (vx >= vox3D)) continue; //next handle edge cases int dx = x+kx[k+numk]; if ((dx < 0) || (dx >= nim->nx)) continue; //wrapped left-right int dy = y+kx[k+numk+numk]; if ((dy < 0) || (dy >= nim->ny)) continue; //wrapped anterior-posterior gx += imgin[vx] * kx[k+numk+numk+numk] ; } //for k //compute y gradient flt gy = 0.0f; for (size_t k = 0; k < numk; k++) { size_t vx = i + ky[k]; if ((vx < 0) || (vx >= vox3D)) continue; //next handle edge cases int dx = x+ky[k+numk]; if ((dx < 0) || (dx >= nim->nx)) continue; //wrapped left-right int dy = y+ky[k+numk+numk]; if ((dy < 0) || (dy >= nim->ny)) continue; //wrapped anterior-posterior gy += imgin[vx] * ky[k+numk+numk+numk] ; } //for k //compute z gradient flt gz = 0.0f; //always 0 for 2D, we could add conditional to skip but optimize for 3D for (size_t k = 0; k < numk; k++) { size_t vx = i + kz[k]; if ((vx < 0) || (vx >= vox3D)) continue; //next handle edge cases int dx = x+kz[k+numk]; if ((dx < 0) || (dx >= nim->nx)) continue; //wrapped left-right int dy = y+kz[k+numk+numk]; if ((dy < 0) || (dy >= nim->ny)) continue; //wrapped anterior-posterior gz += imgin[vx] * kz[k+numk+numk+numk] ; } //for k iv32[i] = sqrt(sqr(gx)+sqr(gy)+sqr(gz)); i++; } //for x _mm_free (imgin); } _mm_free (kx); _mm_free (ky); _mm_free (kz); return 0; } //nifti_sobel() static int nifti_subsamp2 ( nifti_image * nim, int offc) { //naive downsampling: this is provided purely to mimic the behavior of fslmaths // see https://nbviewer.jupyter.org/urls/dl.dropbox.com/s/s0nw827nc4kcnaa/Aliasing.ipynb // no anti-aliasing filter https://en.wikipedia.org/wiki/Image_scaling int invox3D = nim->nx*nim->ny*MAX(nim->nz,1); int indim[5]; for (int i = 1; i < 5; i++) indim[i] = MAX(nim->dim[i],1); int nvol = nim->nvox/invox3D; int x_odd = indim[1] % 2; if ((nim->nvox < 1) || (nvol < 1)) return 1; if (nim->datatype != DT_CALC) return 1; int nx = ceil(nim->nx * 0.5); int ny = ceil(nim->ny * 0.5); int nz = ceil(nim->nz * 0.5); if ((nx == nim->nx) && (ny == nim->ny) && (nz == nim->nz)) return 0; int nvox3D = nx*ny*nz; flt * i32 = (flt *) nim->data; void * dat = (void *)calloc(1, nvox3D * nvol * sizeof(flt)) ; flt * o32 = (flt *) dat; int x_flip = 0; if (!neg_determ(nim)) x_flip = 1; if (offc) { int * wt = _mm_malloc(nvox3D * nvol *sizeof(int), 64); //weight, just for edges for (int i = 0; i < (nvox3D * nvol); i++) { wt[i] = 0; o32[i] = 0.0; } int boost = 0; if ((x_odd) && (x_flip)) boost = 1; size_t i = 0; for (int v = 0; v < indim[4]; v++) { size_t vo = v * nvox3D; //volumes do not get reduced for (int z = 0; z < indim[3]; z++) { size_t zo = vo + ((z / 2) * ny * nx); for (int y = 0; y < indim[2]; y++) { size_t yo = zo + ((y / 2) * nx); for (int x = 0; x < indim[1]; x++) { size_t xo = yo + ((x+boost) / 2) ; wt[xo]++; o32[xo] += i32[i]; i++; } //x }//y }//z }//vol for (int i = 0; i < (nvox3D * nvol); i++) if (wt[i] > 0) o32[i] /= wt[i]; _mm_free (wt); } else { //if subsamp2offc else subsamp2 int numk = 27;//center voxel and all its neighbors int * kernel = (int *)_mm_malloc((numk*4)*sizeof(int), 64); //4 values: offset, xpos, ypos, weight int i = 0; for (int z = -1; z <= 1; z++ ) for (int y = -1; y <= 1; y++ ) for (int x = -1; x <= 1; x++ ) { kernel[i] = x + (y * indim[1]) + (z * indim[1] * indim[2]); kernel[i+numk] = x; //left-right wrap detection kernel[i+numk+numk] = y; //anterior-posterior wrap detection kernel[i+numk+numk+numk] = 8/(pow(2,sqr(x)+sqr(y)+sqr(z))); //kernel weight i++; } int boost = 0; //if ((xflip == 1) && (odd == 0)) boost = 1; if ((x_flip == 1) && (x_odd == 0)) boost = 1; //printf("boost %d\n", boost); size_t nvox3Din = indim[1]*indim[2]*indim[3]; size_t o = 0; for (int v = 0; v < nvol; v++) { size_t vi = v * nvox3Din; for (int z = 0; z < nz; z++) { int zi = (2 * z * indim[1] *indim[2]); //printf("%zu \n", zi); for (int y = 0; y < ny; y++) { int yy = y+y; //y*2 input y int yi = zi + (yy * indim[1]); for (int x = 0; x < nx; x++) { //int xx = x+x+xflip; //x*2 input x int xx = x+x+boost; //x*2 input x int xi = yi + xx; //flt sum = 0.0; //flt wt = 0.0; double sum = 0.0; double wt = 0.0; for (int k = 0; k < numk; k++ ) { if ((xi+kernel[k]) < 0) continue; //position would be less than 0 - outside volume, avoid negative values in size_t size_t pos = xi + kernel[k]; //offset if (pos >= nvox3Din) continue; //position outside volume, e.g. slice above top of volume int xin = xx+kernel[k+numk]; if ((xin < 0) || (xin >= indim[1])) continue; //wrap left or right int yin = yy+kernel[k+numk+numk]; if ((yin < 0) || (yin >= indim[2])) continue; //wrap anterior or posterior flt w = kernel[k+numk+numk+numk]; wt += w; sum += i32[vi+pos] * w; } //if (wt > 0.0) //no need to check: every voxel has at least one contributor (itself) o32[o] = sum/wt; //else { // o32[o] = 666.6; o++; } //x }//y }//z }//vol _mm_free (kernel); } //if subsamp2offc else subsamp2 nim->nvox = nvox3D * nvol; nim->nx = nx; nim->ny = ny; nim->nz = nz; nim->dim[1] = nx; nim->dim[2] = ny; nim->dim[3] = nz; nim->dx *= 2; nim->dy *= 2; nim->dz *= 2; nim->pixdim[1] *= 2; nim->pixdim[2] *= 2; nim->pixdim[3] *= 2; //adjust origin mat44 m = xform(nim); vec4 vx = setVec4(0,0,0); vec4 pos = nifti_vect44mat44_mul(vx, m); //vx = setVec4(0.5,0.5,0.5); //vx = setVec4(1.0,0.0,0.0); if (offc) { //printf("%d flip odd %d\n", x_flip, x_odd); if ((x_odd) && (x_flip)) vx = setVec4(-0.5,-0.5,-0.5); //subsamp2offc else vx = setVec4(0.5,0.5,0.5); //subsamp2offc //if (!xflip) { // vx = setVec4(0.5,0.5,0.5); // printf("y\n"); //} } else { if (x_odd) vx = setVec4(0,0,0); //subsamp2 else vx = setVec4(1,0,0); //subsamp2 if (!x_flip) vx = setVec4(0,0,0); } vec4 pos1 = nifti_vect44mat44_mul(vx, m); vx = setVec4(pos1.v[0]-pos.v[0], pos1.v[1]-pos.v[1], pos1.v[2]-pos.v[2]); m.m[0][3] += vx.v[0]; m.m[1][3] += vx.v[1]; m.m[2][3] += vx.v[2]; //scale spatial transform for (int i = 0; i < 3; i++) for (int j = 0; j < 3; j++) m.m[i][j] *= 2; //apply to both sform and qform in case VTK user for (int i = 0; i < 4; i++) for (int j = 0; j < 4; j++) { nim->sto_xyz.m[i][j] = m.m[i][j]; nim->qto_xyz.m[i][j] = m.m[i][j]; } free(nim->data); nim->data = dat; return 0; } static int nifti_resize ( nifti_image * nim, flt zx, flt zy, flt zz, int interp_method) { //see AFNI's 3dresample //better than fslmaths: fslmaths can not resample 4D data // time 3dresample -dxyz 4.8 4.8 4.8 -rmode Linear -prefix afni.nii -input rest.nii // time ./sm rest.nii -subsamp2 out.nii //However, aliasing artifacts // time 3dresample -dxyz 4.8 4.8 4.8 -rmode Linear -prefix afni2.nii -input zoneplate3d_129.nii int invox3D = nim->nx*nim->ny*nim->nz; int nvol = nim->nvox/invox3D; if ((nim->nvox < 1) || (nvol < 1)) return 1; if (nim->datatype != DT_CALC) return 1; int nx = ceil(nim->nx * zx); int ny = ceil(nim->ny * zy); int nz = ceil(nim->nz * zz); if ((nx == nim->nx) && (ny == nim->ny) && (nz == nim->nz)) return 0; int nvox3D = nx*ny*nz; flt * i32 = (flt *) nim->data; void * dat = (void *)calloc(1, nvox3D * nvol * sizeof(flt)) ; flt * o32 = (flt *) dat; #pragma omp parallel for for (int v = 0; v < nvol; v++) { flt * iv32 = i32 + (v * invox3D); //reduce in X: half the width: 1/2 input file size flt * imgx = _mm_malloc(nx*nim->ny*nim->nz*sizeof(flt), 64); //input values prior to blur if (nx == nim->nx) //no change in x dimension memcpy(imgx, iv32, nx*nim->ny*nim->nz*sizeof(flt)); else { CLIST * contrib = createFilter(nim->nx, nx, interp_method); size_t i = 0; for (size_t y = 0; y < (nim->ny * nim->nz); y++) { for (int x = 0; x < nx; x++) { flt weight = 0.0; for (int j = 0; j < contrib[x].n; j++) weight += iv32[contrib[x].p[j].pixel]* contrib[x].p[j].weight; imgx[i++] = weight; } iv32 += nim->nx; } //for y for (i = 0; i < nx; i++) free(contrib[i].p); free(contrib); } //reduce in Y: half the height: 1/4 input size flt * imgy = _mm_malloc(nx*ny*nim->nz*sizeof(flt), 64); //input values prior to blur if (ny == nim->ny) //no change in y dimension memcpy(imgy, imgx, nx*ny*nim->nz*sizeof(flt)); else { CLIST * contrib = createFilter(nim->ny, ny, interp_method); flt * iny = _mm_malloc(nim->ny*sizeof(flt), 64); //input values prior to resize for (int z = 0; z < nim->nz; z++) { for (int x = 0; x < nx; x++) { int yo = (z * nx * ny) + x; //output int yi = (z * nx * nim->ny) + x;//input for (int j = 0; j < nim->ny; j++) { //iny[j] = imgx[yi+(j*nx)]; iny[j] = imgx[yi]; yi += nx; } for (int y = 0; y < ny; y++) { flt weight = 0.0; for (int j = 0; j < contrib[y].n; j++) weight += iny[contrib[y].p[j].pixel]* contrib[y].p[j].weight; //weight = y; imgy[yo] = weight; yo += nx; } //y } //x } //z _mm_free (iny); for (int i = 0; i < ny; i++) free(contrib[i].p); free(contrib); } _mm_free (imgx); //reduce in Z flt * ov32 = o32 + (v * nvox3D); if (nz == nim->nz) //no change in x dimension memcpy(ov32, imgy, nx*ny*nz*sizeof(flt)); else { CLIST * contrib = createFilter(nim->nz, nz, interp_method); flt * inz = _mm_malloc(nim->nz*sizeof(flt), 64); //input values prior to resize int nxy = nx * ny; for (int y = 0; y < ny; y++) { for (int x = 0; x < nx; x++) { int zo = x + (y * nx); //output offset int zi = x + (y * nx); //input offset for (int j = 0; j < nim->nz; j++) { inz[j] = imgy[zi]; zi += nxy; } for (int z = 0; z < nz; z++) { //for (int j = 0; j < nim->nz; j++) // inz[j] = imgy[zi+(j*nx*ny)]; flt weight = 0.0; for (int j = 0; j < contrib[z].n; j++) weight += inz[contrib[z].p[j].pixel]* contrib[z].p[j].weight; //weight = y; ov32[zo] = weight; zo += nx*ny; } //for z } //for x } //for y _mm_free (inz); for (int i = 0; i < nz; i++) free(contrib[i].p); free(contrib); } _mm_free (imgy); } //for v nim->nvox = nvox3D * nvol; nim->nx = nx; nim->ny = ny; nim->nz = nz; nim->dim[1] = nx; nim->dim[2] = ny; nim->dim[3] = nz; nim->dx /= zx; nim->dy /= zy; nim->dz /= zz; nim->pixdim[1] /= zx; nim->pixdim[2] /= zy; nim->pixdim[3] /= zz; //adjust origin - again, just like fslmaths mat44 m = xform(nim); /*vec4 vx = setVec4(0,0,0); vec4 pos = nifti_vect44mat44_mul(vx, m); vx = setVec4(0.5,0.5,0.5); //subsamp2offc vx = setVec4(1,0,0); //subsamp2 vec4 pos1 = nifti_vect44mat44_mul(vx, m); vx = setVec4(pos1.v[0]-pos.v[0], pos1.v[1]-pos.v[1], pos1.v[2]-pos.v[2]); m.m[0][3] += vx.v[0]; m.m[1][3] += vx.v[1]; m.m[2][3] += vx.v[2];*/ m.m[0][0] /= zx; m.m[1][0] /= zx; m.m[2][0] /= zx; m.m[0][1] /= zy; m.m[1][1] /= zy; m.m[2][1] /= zy; m.m[0][2] /= zz; m.m[1][2] /= zz; m.m[2][2] /= zz; for (int i = 0; i < 4; i++) //transform BOTH sform and qform (e.g. ANTs/ITK user) for (int j = 0; j < 4; j++) { nim->sto_xyz.m[i][j] = m.m[i][j]; nim->qto_xyz.m[i][j] = m.m[i][j]; } free(nim->data); nim->data = dat; return 0; } static int essentiallyEqual(float a, float b) { if (isnan(a) && isnan(b)) return 1; //surprisingly, with C nan != nan return fabs(a - b) <= ( (fabs(a) > fabs(b) ? fabs(b) : fabs(a)) * epsilon); } static void nifti_compare(nifti_image * nim, char * fin) { if (nim->nvox < 1) exit( 1); if (nim->datatype != DT_CALC) { fprintf(stderr,"nifti_compare: Unsupported datatype %d\n", nim->datatype); exit( 1); } nifti_image * nim2 = nifti_image_read2(fin, 1); if( !nim2 ) { fprintf(stderr,"** failed to read NIfTI image from '%s'\n", fin); exit(2); } if ((nim->nx != nim2->nx) || (nim->ny != nim2->ny) || (nim->nz != nim2->nz) ) { fprintf(stderr,"** Attempted to process images of different sizes %"PRId64"x%"PRId64"x%"PRId64"vs %"PRId64"x%"PRId64"x%"PRId64"\n", nim->nx,nim->ny,nim->nz, nim2->nx,nim2->ny,nim2->nz); nifti_image_free( nim2 ); exit(1); } if (nim->nvox != nim2->nvox) { fprintf(stderr," Number of volumes differ\n"); nifti_image_free( nim2 ); exit(1); } if (max_displacement_mm(nim, nim2) > 0.5) { //fslmaths appears to use mm not voxel difference to determine alignment, threshold ~0.5mm fprintf(stderr,"WARNING:: Inconsistent orientations for individual images in pipeline! (%gmm)\n", max_displacement_mm(nim, nim2)); fprintf(stderr," Will use voxel-based orientation which is probably incorrect - *PLEASE CHECK*!\n"); } in_hdr ihdr = set_input_hdr(nim2); if (nifti_image_change_datatype(nim2, nim->datatype, &ihdr) != 0) { nifti_image_free( nim2 ); exit(1); } flt * img = (flt *) nim->data; flt * img2 = (flt *) nim2->data; size_t differentVox = nim->nvox; double sum = 0.0; double sum2 = 0.0; double maxDiff = 0.0; size_t nNotNan = 0; size_t nDifferent = 0; for (size_t i = 0; i < nim->nvox; i++ ) { if (!essentiallyEqual(img[i], img2[i])) { if (fabs(img[i]-img2[i]) > maxDiff) { differentVox = i; maxDiff = fabs(img[i]-img2[i]); } nDifferent ++; } if (isnan(img[i]) || isnan(img[i]) ) continue; nNotNan++; sum += img[i]; sum2 += img2[i]; } if (differentVox >= nim->nvox) { //fprintf(stderr,"Images essentially equal\n"); */ nifti_image_free( nim2 ); exit(0); } //second pass - one pass correlation is inaccurate or slow nNotNan = MAX(1, nNotNan); flt mn = INFINITY; //do not set to item 1, in case it is nan flt mx = -INFINITY; flt sd = 0.0; flt ave = sum / nNotNan; flt mn2 = INFINITY; flt mx2 = -INFINITY; flt sd2 = 0.0; flt ave2 = sum2 / nNotNan; //for i := 0 to (n - 1) do // sd := sd + sqr(y[i] - mn); //sd := sqrt(sd / (n - 1)); double sumDx = 0.0; for (size_t i = 0; i < nim->nvox; i++ ) { if (isnan(img[i]) || isnan(img[i]) ) continue; mn = MIN(mn, img[i]); mx = MAX(mx, img[i]); sd += sqr(img[i] - ave); mn2 = MIN(mn2, img2[i]); mx2 = MAX(mx2, img2[i]); sd2 += sqr(img2[i] - ave2); sumDx += (img[i] - ave)*(img2[i] - ave2); } double r = 0.0; nNotNan = MAX(2, nNotNan); if (nim->nvox < 2) { sd = 0.0; sd2 = 0.0; } else { sd = sqrt(sd / (nNotNan - 1)); //if (sd != 0.0) sd = 1.0/sd; sd2 = sqrt(sd2 / (nNotNan - 1)); //if (sd2 != 0.0) sd2 = 1.0/sd2; if ((sd * sd2) != 0.0) r = sumDx/(sd*sd2*(nNotNan - 1)); //r = r / (nim->nvox - 1); } r = MIN(r,1.0); r = MAX(r, -1.0); fprintf(stderr,"Images Differ: Correlation r = %g, identical voxels %d%%\n", r, (int)floor(100.0*(1.0-(double)nDifferent/(double)nim->nvox))); if (nNotNan < nim->nvox) { fprintf(stderr," %"PRId64" voxels have a NaN in at least one image.\n", nim->nvox - nNotNan); fprintf(stderr," Descriptives consider voxels that are numeric in both images.\n"); } fprintf(stderr," Most different voxel %g vs %g (difference %g)\n", img[differentVox], img2[differentVox], maxDiff); int nvox3D = nim->nx * nim->ny * MAX(nim->nz,1); int nVol = nim->nvox/nvox3D; size_t vx[4]; vx[3] = differentVox/nvox3D; vx[2] = (differentVox / (nim->nx*nim->ny)) % nim->nz; vx[1] = (differentVox / nim->nx) % nim->ny; vx[0] = differentVox % nim->nx; fprintf(stderr," Most different voxel locatoin %zux%zux%zu volume %zu\n", vx[0],vx[1],vx[2], vx[3]); fprintf(stderr,"Image 1 Descriptives\n"); fprintf(stderr," Range: %g..%g Mean %g StDev %g\n", mn, mx, ave, sd); fprintf(stderr,"Image 2 Descriptives\n"); fprintf(stderr," Range: %g..%g Mean %g StDev %g\n", mn2, mx2, ave2, sd2); //V1 comparison - EXIT_SUCCESS if all vectors are parallel (for DWI up vector [1 0 0] has same direction as down [-1 0 0]) if (nVol != 3) { nifti_image_free( nim2 ); exit(1); } int allParallel = 1; //niimath ft_V1 -compare nt_V1 for (size_t i = 0; i < nvox3D; i++ ) { //check angle of two vectors... assume unit vectors flt v[3]; //vector, image 1 v[0] = img[i]; v[1] = img[i+nvox3D]; v[2] = img[i+nvox3D+nvox3D]; flt v2[3]; //vector, image 2 v2[0] = img2[i]; v2[1] = img2[i+nvox3D]; v2[2] = img2[i+nvox3D+nvox3D]; flt x[3]; //cross product x[0] = (v[1]*v2[2]) - (v[2]*v2[1]); x[1] = (v[2]*v2[0]) - (v[0]*v2[2]); x[2] = (v[0]*v2[1]) - (v[1]*v2[0]); flt len = sqrt((x[0]*x[0])+(x[1]*x[1])+(x[2]*x[2])); if (len > 0.01) { allParallel = 0; //fprintf(stderr,"[%g %g %g] vs [%g %g %g]\n", v[0],v[1], v[2], v2[0], v2[1], v2[2]); break; } } if ( allParallel ) { fprintf(stderr,"Despite polarity differences, all vectors are parallel.\n"); nifti_image_free( nim2 ); exit(0); } nifti_image_free( nim2 ); exit(1); } //nifti_compare() static int nifti_binary_power ( nifti_image * nim, double v) { //clone operations from ANTS ImageMath: power //https://manpages.debian.org/jessie/ants/ImageMath.1.en.html if (nim->nvox < 1) return 1; if (nim->datatype!= DT_CALC) return 1; flt fv = v; flt * f32 = (flt *) nim->data; for (size_t i = 0; i < nim->nvox; i++ ) f32[i] = pow(f32[i], v); return 0; } static int nifti_binary ( nifti_image * nim, char * fin, enum eOp op) { if (nim->nvox < 1) return 1; if (nim->datatype != DT_CALC) { fprintf(stderr,"nifti_binary: Unsupported datatype %d\n", nim->datatype); return 1; } nifti_image * nim2 = nifti_image_read2(fin, 1); if( !nim2 ) { fprintf(stderr,"** failed to read NIfTI image from '%s'\n", fin); return 2; } if ((nim->nx != nim2->nx) || (nim->ny != nim2->ny) || (nim->nz != nim2->nz) ) { fprintf(stderr,"** Attempted to process images of different sizes %"PRId64"x%"PRId64"x%"PRId64" vs %"PRId64"x%"PRId64"x%"PRId64"\n", nim->nx,nim->ny,nim->nz, nim2->nx,nim2->ny,nim2->nz); nifti_image_free( nim2 ); return 1; } if (max_displacement_mm(nim, nim2) > 0.5) { //fslmaths appears to use mm not voxel difference to determine alignment, threshold ~0.5mm fprintf(stderr,"WARNING:: Inconsistent orientations for individual images in pipeline! (%gmm)\n", max_displacement_mm(nim, nim2)); fprintf(stderr," Will use voxel-based orientation which is probably incorrect - *PLEASE CHECK*!\n"); } in_hdr ihdr = set_input_hdr(nim2); if (nifti_image_change_datatype(nim2, nim->datatype, &ihdr) != 0) { nifti_image_free( nim2 ); return 1; } flt * imga = (flt *) nim->data; flt * imgb = (flt *) nim2->data; int nvox3D = nim->nx * nim->ny * nim->nz; int nvola = nim->nvox / nvox3D; int nvolb = nim2->nvox / nvox3D; int rem0 = 0; int swap4D = 0; //if 1: input nim was 3D, but nim2 is 4D: output will be 4D if ((nvolb > 1) && (nim->nvox != nim2->nvox) && ((op == uthr) || (op == thr))) { //"niimath 3D -uthr 4D out" only uses 1st volume of 4D, only one volume out nvolb = 1; //fslmaths printf("threshold operation expects 3D mask\n"); //fslmaths makes not modification to image if (op == uthr) //strictly for fslmaths compatibility - makes no sense for (size_t i = 0; i < nim->nvox; i++ ) imga[i] = 0; nifti_image_free( nim2 ); return 0; } else if (nim->nvox != nim2->nvox) { //situation where one input is 3D and the other is 4D if ((nvola != 1) && ((nvolb != 1))) { fprintf(stderr,"nifti_binary: both images must have the same number of volumes, or one must have a single volume (%d and %d)\n", nvola, nvolb); nifti_image_free( nim2 ); return 1; } if (nvola == 1) { imgb = (flt *) nim->data; imga = (flt *) nim2->data; swap4D = 1; nvolb = nim->nvox / nvox3D; nvola = nim2->nvox / nvox3D; } } //make it so imga/novla >= imgb/nvolb for (int v = 0; v < nvola; v++ ) { // int va = v * nvox3D; //start of volume for image A int vb = (v % nvolb) * nvox3D; //start of volume for image B if (op == add) { for (int i = 0; i < nvox3D; i++ ) imga[va+i] += imgb[vb+i]; } else if (op == sub) { if (swap4D) { for (int i = 0; i < nvox3D; i++ ) { imga[va+i] = imgb[vb+i] - imga[va+i]; //printf(">>[%d]/[%d] %g/%g = %g\n",vb+i, va+i, imgb[vb+i], x, imga[va+i]); } } else { for (int i = 0; i < nvox3D; i++ ) { //printf("[%d]/[%d] %g/%g\n", va+i, vb+i, imga[va+i], imga[vb+i]); imga[va+i] = imga[va+i] - imgb[vb+i]; } } } else if (op == mul) { for (int i = 0; i < nvox3D; i++ ) imga[va+i] *= imgb[vb+i]; } else if (op == max) { for (int i = 0; i < nvox3D; i++ ) imga[va+i] = MAX(imga[va+i], imgb[vb+i]); } else if (op == min) { for (int i = 0; i < nvox3D; i++ ) imga[va+i] = MIN(imga[va+i], imgb[vb+i]); } else if (op == thr) { //thr : use following number to threshold current image (zero anything below the number) for (int i = 0; i < nvox3D; i++ ) if (imga[va+i] < imgb[vb+i]) imga[va+i] = 0; } else if (op == uthr) { //uthr : use following number to upper-threshold current image (zero anything above the number) for (int i = 0; i < nvox3D; i++ ) if (imga[va+i] > imgb[vb+i]) imga[va+i] = 0; } else if (op == mas) { if (swap4D) { for (int i = 0; i < nvox3D; i++ ) { if (imga[va+i] > 0) imga[va+i] = imgb[vb+i]; else imga[va+i] = 0; } } else { for (int i = 0; i < nvox3D; i++ ) if (imgb[vb+i] <= 0) imga[va+i] = 0; } } else if (op == divX) { if (swap4D) { for (int i = 0; i < nvox3D; i++ ) { //flt x = imga[va+i]; if (imga[va+i] != 0.0f) imga[va+i] = imgb[vb+i]/imga[va+i]; //printf(">>[%d]/[%d] %g/%g = %g\n",vb+i, va+i, imgb[vb+i], x, imga[va+i]); } } else { for (int i = 0; i < nvox3D; i++ ) { //printf("[%d]/[%d] %g/%g\n", va+i, vb+i, imga[va+i], imga[vb+i]); if (imgb[vb+i] == 0.0f) imga[va+i] = 0.0f; else imga[va+i] = imga[va+i]/imgb[vb+i]; } } } else if (op == mod) { //afni mod function, divide by zero yields 0 (unlike Matlab, see remtest.m) //fractional remainder: if (swap4D) { for (int i = 0; i < nvox3D; i++ ) { //printf("!>[%d]/[%d] %g/%g = %g\n",vb+i, va+i, imgb[vb+i], imga[va+i], fmod(trunc(imgb[vb+i]), trunc(imga[va+i])) ); if (imga[va+i] != 0.0f) imga[va+i] = fmod(imgb[vb+i], imga[va+i]); else { rem0 = 1; imga[va+i] = 0;//imgb[vb+i]; } } } else { for (int i = 0; i < nvox3D; i++ ) { //printf("?>[%d]/[%d] %g/%g = %g : %g\n", va+i, vb+i, imga[va+i], imgb[vb+i], fmod(imga[va+i], imgb[vb+i]), fmod(trunc(imga[va+i]), trunc(imgb[vb+i])) ); if (imgb[vb+i] != 0.0f) //imga[va+i] = round(fmod(imga[va+i], imgb[vb+i])); imga[va+i] = fmod(imga[va+i], imgb[vb+i]); else { rem0 = 1; imga[va+i] = 0; } } } } else if (op == rem) { //fmod _rem //fractional remainder: if (swap4D) { for (int i = 0; i < nvox3D; i++ ) { //printf("!>[%d]/[%d] %g/%g = %g\n",vb+i, va+i, imgb[vb+i], imga[va+i], fmod(trunc(imgb[vb+i]), trunc(imga[va+i])) ); if (trunc(imga[va+i]) != 0.0f) imga[va+i] = fmod(trunc(imgb[vb+i]), trunc(imga[va+i])); else { rem0 = 1; imga[va+i] = imgb[vb+i]; } } } else { for (int i = 0; i < nvox3D; i++ ) { //printf("?>[%d]/[%d] %g/%g = %g : %g\n", va+i, vb+i, imga[va+i], imgb[vb+i], fmod(imga[va+i], imgb[vb+i]), fmod(trunc(imga[va+i]), trunc(imgb[vb+i])) ); if (trunc(imgb[vb+i]) != 0.0f) //imga[va+i] = round(fmod(imga[va+i], imgb[vb+i])); imga[va+i] = fmod(trunc(imga[va+i]), trunc(imgb[vb+i])); else rem0 = 1; } } } else { fprintf(stderr,"nifti_binary: unsupported operation %d\n", op); nifti_image_free( nim2 ); return 1; } } if (swap4D) { //if 1: input nim was 3D, but nim2 is 4D: output will be 4D nim->nvox = nim2->nvox; nim->ndim = nim2->ndim; nim->nt =nim2->nt; nim->nu =nim2->nu; nim->nv =nim2->nv; nim->nw =nim2->nw; for (int i = 4; i < 8; i++ ) { nim->dim[i] =nim2->dim[i]; nim->pixdim[i] =nim2->pixdim[i]; } nim->dt =nim2->dt; nim->du =nim2->du; nim->dv =nim2->dv; nim->dw =nim2->dw; free(nim->data); nim->data = nim2->data; nim2->data = NULL; } nifti_image_free( nim2 ); if (rem0) { fprintf(stderr,"Warning -rem image included zeros (fslmaths exception)\n"); return 0; } return 0; } // nifti_binary() struct sortIdx { flt val; int idx; }; static int nifti_roc( nifti_image * nim, double fpThresh, const char * foutfile, const char * fnoise, const char * ftruth) { if (nim->datatype != DT_CALC) return 1; //(nim, thresh, argv[outfile], fnoise, argv[truth]); //fslmaths appears to ignore voxels on edge of image, and will crash with small images: // error: sort(): given object has non-finite elements //therefore, there is a margin ("border") around the volume int border = 5; //in voxels int mindim = border + border + 1; //e.g. minimum size has one voxel surrounded by border on each side if ((nim->nx < mindim) || (nim->ny < mindim) || (nim->nz < mindim)) { fprintf(stderr,"volume too small for ROC analyses\n"); return 1; } if (nim->nvox > (nim->nx * nim->ny * nim->nz)) { fprintf(stderr,"ROC input should be 3D image (not 4D)\n"); //fslmaths seg faults return 1; } if ((fpThresh <= 0.0) || (fpThresh >= 1.0)) { fprintf(stderr,"ROC false-positive threshold should be between 0 and 1, not '%g'\n", fpThresh); return 1; } nifti_image * nimTrue = nifti_image_read2(ftruth, 1); if( !nimTrue ) { fprintf(stderr,"** failed to read NIfTI image from '%s'\n", ftruth); exit(2); } if ((nim->nx != nimTrue->nx) || (nim->ny != nimTrue->ny) || (nim->nz != nimTrue->nz) ) { fprintf(stderr,"** Truth image is the wrong size %"PRId64"x%"PRId64"x%"PRId64" vs %"PRId64"x%"PRId64"x%"PRId64"\n", nim->nx,nim->ny,nim->nz, nimTrue->nx,nimTrue->ny,nimTrue->nz); nifti_image_free( nimTrue ); exit(1); } if (nimTrue->nvox > (nimTrue->nx * nimTrue->ny * nimTrue->nz)) { fprintf(stderr,"ROC truth should be 3D image (not 4D)\n"); //fslmaths seg faults return 1; } nifti_image * nimNoise = NULL; //count number of tests //If the truth image contains negative voxels these get excluded from all calculations int nTest = 0; int nTrue = 0; size_t i = 0; flt * imgTrue = (flt *) nimTrue->data; flt * imgObs = (flt *) nim->data; for (int z = 0; z < nim->nz; z++ ) for (int y = 0; y < nim->ny; y++ ) for (int x = 0; x < nim->nx; x++ ) { if ((imgTrue[i] >= 0) && (x >= border) && (y >= border) && (z >= border) && (x < (nim->nx - border)) && (y < (nim->ny - border)) && (z < (nim->nz - border)) ) { nTest++; if (imgTrue[i] > 0) nTrue++; } i++; } if (nTest < 1) { fprintf(stderr,"** All truth voxels inside border are negative\n"); exit(1); } //printf("%d %d = %d\n", nTrue, nFalse, nTest); if (nTest == nTrue) fprintf(stderr,"Warning: All truth voxels inside border are the same (all true or all false)\n"); struct sortIdx * k = (struct sortIdx *)_mm_malloc(nTest*sizeof(struct sortIdx), 64); //load the data nTest = 0; i = 0; for (int z = 0; z < nim->nz; z++ ) for (int y = 0; y < nim->ny; y++ ) for (int x = 0; x < nim->nx; x++ ) { if ((imgTrue[i] >= 0) && (x >= border) && (y >= border) && (z >= border) && (x < (nim->nx - border)) && (y < (nim->ny - border)) && (z < (nim->nz - border)) ) { k[nTest].val = imgObs[i]; k[nTest].idx = imgTrue[i] > 0; nTest++; } i++; } qsort(k, nTest, sizeof(struct sortIdx), compare); //for (int v = 0; v < nvol; v++ ) // f32[ k[v].idx ] = v + 1; //printf("%d tests, intensity range %g..%g\n", nTest, k[0].val, k[nTest-1].val); FILE* txt = fopen(foutfile, "w+"); flt threshold = k[nTest-1].val; //maximum observed intensity int bins = 1000; //step size: how often are results reported flt step = (threshold-k[0].val)/bins; //[max-min]/bins int fp = 0; int tp = 0; if (fnoise != NULL) { nimNoise = nifti_image_read2(fnoise, 1); if ((nim->nx != nimNoise->nx) || (nim->ny != nimNoise->ny) || (nim->nz != nimNoise->nz) ) { fprintf(stderr,"** Noise image is the wrong size %"PRId64"x%"PRId64"x%"PRId64" vs %"PRId64"x%"PRId64"x%"PRId64"\n", nim->nx,nim->ny,nim->nz, nimNoise->nx,nimNoise->ny,nimNoise->nz); nifti_image_free( nimTrue ); nifti_image_free( nimNoise ); exit(1); } //Matlab script roc.m generates samples you can process with fslmaths.\ // The fslmaths text file includes two additional columns of output not described by the help documentation // Appears to find maximum signal in each noise volume, regardless of whether it is a hit or false alarm. int nvox3D = nim->dim[1] * nim->dim[2] * nim->dim[3]; int nvol = nimNoise->nvox / nvox3D; if (nvol < 10) fprintf(stderr,"Warning: Noise images should include many volumes for estimating familywise error/\n"); flt * imgNoise = (flt *) nimNoise->data; flt * mxVox = (flt *)_mm_malloc(nvol*sizeof(flt), 64); for (int v = 0; v < nvol; v++ ) { //for each volume mxVox[v] = -INFINITY; size_t vo = v * nvox3D; size_t vi = 0; for (int z = 0; z < nim->nz; z++ ) for (int y = 0; y < nim->ny; y++ ) for (int x = 0; x < nim->nx; x++ ) { if ((imgTrue[vi] >= 0) && (x >= border) && (y >= border) && (z >= border) && (x < (nim->nx - border)) && (y < (nim->ny - border)) && (z < (nim->nz - border)) ) mxVox[v] = MAX(mxVox[v], imgNoise[vo+vi]); vi++; } } //for each volume nifti_image_free( nimNoise ); qsort (mxVox, nvol, sizeof(flt), compare); int idx = nTest - 1; flt mxNoise = mxVox[nvol-1]; while ((idx >= 1) && (k[idx].val > mxNoise)) { tp ++; idx --; if ((k[idx].val != k[idx-1].val) && (k[idx].val <= threshold) ) { fprintf(txt, "%g %g %g\n", (double)fp/(double)nvol, (double)tp/(double)nTrue, threshold); threshold = threshold - step; //delay next report } } //more significant than any noise... int fpThreshInt = round(fpThresh * nvol); //stop when number of false positives exceed this for (int i = nvol-1; i >= 1; i--) { fp ++; //false alarm while ((idx >= 1) && (k[idx].val >= mxVox[i])) { tp ++; idx --; if ((k[idx].val != k[idx-1].val) && (k[idx].val <= threshold) ) { fprintf(txt, "%g %g %g\n", (double)fp/(double)nvol, (double)tp/(double)nTrue, threshold); threshold = threshold - step; //delay next report } } //at least as significant as current noise if ((fp > fpThreshInt) || ((k[i].val != k[i-1].val) && (k[i].val <= threshold) ) ) { //printf("%g %g %g\n", (double)fp/(double)nFalse, (double)tp/(double)nTrue, threshold); fprintf(txt, "%g %g %g\n", (double)fp/(double)nvol, (double)tp/(double)nTrue, threshold); threshold = threshold - step; //delay next report } if (fp > fpThreshInt) break; } //inspect all tests... _mm_free (mxVox); exit(1); } else { //if noise image else infer FP/TP from input image int nFalse = nTest - nTrue; int fpThreshInt = ceil(fpThresh * nFalse); //stop when number of false positives exceed this for (int i = nTest-1; i >= 1; i--) { if (k[i].idx == 0) fp ++; //false alarm else tp ++; //hit if ((fp > fpThreshInt) || ((k[i].val != k[i-1].val) && (k[i].val <= threshold) ) ) { //printf("%g %g %g\n", (double)fp/(double)nFalse, (double)tp/(double)nTrue, threshold); fprintf(txt, "%g %g %g\n", (double)fp/(double)nFalse, (double)tp/(double)nTrue, threshold); threshold = threshold - step; //delay next report } if (fp > fpThreshInt) break; } //inspect all tests... } //if noise else... fclose(txt); _mm_free (k); nifti_image_free( nimTrue ); return 0; } static int nifti_fillh (nifti_image * nim, int is26) { if (nim->nvox < 1) return 1; if (nim->datatype != DT_CALC) return 1; int nvox3D = nim->dim[1] * nim->dim[2] * nim->dim[3]; int nvol = nim->nvox / nvox3D; //size_t nxy = nim->nx * nim->ny; //slice increment uint8_t * vx = (uint8_t *)_mm_malloc(nim->nvox*sizeof(uint8_t), 64); memset(vx, 0, nim->nvox*sizeof(uint8_t)); size_t n1 = 0; flt * f32 = (flt *) nim->data; for (size_t i = 0; i < nim->nvox; i++ ) if (f32[i] > 0.0) { n1++; vx[i] = 1; } if ((n1 < 1) || (nim->nx < 3) || (nim->ny < 3) || (nim->nz < 3)) { //if fewer than 3 rows, columns or slices all voxels touch edge. //only a binary threshold, not a flood fill for (size_t i = 0; i < nim->nvox; i++ ) f32[i] = vx[i]; _mm_free (vx); return 1; } //set up kernel to search for neighbors. Since we already included sides, we do not worry about A<->P and L<->R wrap int numk = 6; if (is26) numk = 26; int32_t * k = (int32_t *)_mm_malloc(numk*sizeof(int32_t), 64); //queue with untested seed if (is26) { int j = 0; for (int z = -1; z <= 1; z++ ) for (int y = -1; y <= 1; y++ ) for (int x = -1; x <= 1; x++ ) { k[j] = x + (y * nim->nx) + (z * nim->nx * nim->ny); j++; } //for x } else { //if 26 neighbors else 6.. k[0] = nim->nx * nim->ny; //up k[1] = -k[0]; //down k[2] = nim->nx; //anterior k[3] = -k[2]; //posterior k[4] = 1; //left k[5] = -1; } //https://en.wikipedia.org/wiki/Flood_fill #pragma omp parallel for for (int v = 0; v < nvol; v++ ) { uint8_t * vxv = vx; vxv += (v * nvox3D); uint8_t * vxs = (uint8_t *)_mm_malloc(nim->nvox*sizeof(uint8_t), 64); memcpy(vxs, vxv, nvox3D*sizeof(uint8_t)); //dst, src int32_t * q = (int32_t *)_mm_malloc(nvox3D*sizeof(int32_t), 64); //queue with untested seed int qlo = 0; int qhi = -1; //ints always signed in C! //load edges size_t i = 0; for (int z = 0; z < nim->nz; z++ ) { int zedge = 0; if ((z == 0) || (z == (nim->nz-1))) zedge = 1; for (int y = 0; y < nim->ny; y++ ) { int yedge = 0; if ((y == 0) || (y == (nim->ny-1))) yedge = 1; for (int x = 0; x < nim->nx; x++ ) { if ((vxs[i] == 0) && (zedge || yedge || (x == 0) || (x == (nim->nx-1))) ) { //found new seed vxs[i] = 1; //do not find again qhi++; q[qhi] = i; } // new seed i++; } //for x }//y } //z //printf("seeds %d kernel %d\n", qhi+1, numk); //run a 'first in, first out' queue while (qhi >= qlo) { //retire one seed, add 0..6 new ones (fillh) or 0..26 new ones (fillh26) for (int j = 0; j < numk; j++) { int jj = q[qlo] + k[j]; if ((jj < 0) || (jj >= nvox3D)) continue; if (vxs[jj] != 0) continue; //add new seed; vxs[jj] = 1; qhi++; q[qhi] = jj; } qlo++; } //while qhi >= qlo: continue until all seeds tested for (size_t i = 0; i < nvox3D; i++ ) if (vxs[i] == 0) vxv[i] = 1; //hidden internal voxel not found from the fill _mm_free (vxs); _mm_free (q); } //for each volume for (size_t i = 0; i < nim->nvox; i++ ) f32[i] = vx[i]; _mm_free (vx); _mm_free (k); return 0; } static void rand_test() { //https://www.phoronix.com/scan.php?page=news_item&px=Linux-RdRand-Sanity-Check int r0 = rand(); for (int i = 0; i < 7; i++ ) if (rand() != r0) return; fprintf(stderr,"RDRAND gives funky output: update firmware\n"); } static int nifti_unary ( nifti_image * nim, enum eOp op) { if (nim->nvox < 1) return 1; if (nim->datatype != DT_CALC) { fprintf(stderr,"nifti_unary: Unsupported datatype %d\n", nim->datatype); return 1; } flt * f32 = (flt *) nim->data; if (op == exp1) { for (size_t i = 0; i < nim->nvox; i++ ) f32[i] = exp(f32[i]); } else if (op == log1) { for (size_t i = 0; i < nim->nvox; i++ ) f32[i] = log(f32[i]); } else if (op == sin1) { for (size_t i = 0; i < nim->nvox; i++ ) f32[i] = sin(f32[i]); } else if (op == cos1) { for (size_t i = 0; i < nim->nvox; i++ ) f32[i] = cos(f32[i]); } else if (op == tan1) { for (size_t i = 0; i < nim->nvox; i++ ) f32[i] = tan(f32[i]); } else if (op == asin1) { for (size_t i = 0; i < nim->nvox; i++ ) f32[i] = asin(f32[i]); } else if (op == acos1) { for (size_t i = 0; i < nim->nvox; i++ ) f32[i] = acos(f32[i]); } else if (op == atan1) { for (size_t i = 0; i < nim->nvox; i++ ) f32[i] = atan(f32[i]); } else if (op == sqr1) { for (size_t i = 0; i < nim->nvox; i++ ) f32[i] = f32[i]*f32[i]; //<- pow(a,x) uses flt for x } else if (op == sqrt1) { for (size_t i = 0; i < nim->nvox; i++ ) f32[i] = sqrt(f32[i]); } else if (op == recip1) { for (size_t i = 0; i < nim->nvox; i++ ) { if (f32[i] == 0.0f) continue; f32[i] = 1.0 / f32[i]; } } else if (op == abs1) { for (size_t i = 0; i < nim->nvox; i++ ) f32[i] = fabs(f32[i]); } else if (op == bin1) { for (size_t i = 0; i < nim->nvox; i++ ) { if (f32[i] > 0) f32[i] = 1.0f; else f32[i] = 0.0f; } } else if (op == binv1) { for (size_t i = 0; i < nim->nvox; i++ ) { if (f32[i] > 0) f32[i] = 0.0f; else f32[i] = 1.0f; } } else if (op == edge1) { if ((nim->dx == 0.0) || (nim->dy == 0.0) || (nim->dz == 0.0)) { fprintf(stderr,"edge requires non-zero pixdim1/pixdim2/pixdim3\n"); return 1; } flt xscl = 1.0/(sqr(nim->dx)); flt yscl = 1.0/(sqr(nim->dy)); flt zscl = 1.0/(sqr(nim->dz)); flt xyzscl = 1.0/(2.0 * sqrt(xscl+yscl+zscl)); if (nim->dim[3] < 2) { //no slices 'above' or 'below' for 2D size_t nxy = nim->nx * nim->ny; //slice increment int nvol = nim->nvox / nxy; if ((nvol * nxy) != nim->nvox) return 1; #pragma omp parallel for for (int v = 0; v < nvol; v++ ) { //find maximum for each entire volume (excepted observed volume 0) flt * inp = (flt *)_mm_malloc(nxy*sizeof(flt), 64); flt *o32 = (flt *) f32; o32 += v * nxy; memcpy(inp, o32, nxy*sizeof(flt)); //dst, src for (int y = 1; (y < (nim->ny -1)); y++ ) { size_t yo =y * nim->nx; for (int x = 1; (x < (nim->nx -1)); x++ ) { size_t vx = yo + x; flt xv = sqr(inp[vx+1] - inp[vx-1]) * xscl; flt yv = sqr(inp[vx+nim->nx] - inp[vx-nim->nx]) * yscl; o32[vx] = sqrt(xv+yv)*xyzscl; } //x } //y _mm_free (inp); }//for v return 1; } //edge for 2D volume(s) int nvox3D = nim->dim[1] * nim->dim[2] * nim->dim[3]; int nvol = nim->nvox / nvox3D; if ((nvox3D * nvol) != nim->nvox) return 1; size_t nxy = nim->nx * nim->ny; //slice increment #pragma omp parallel for for (int v = 0; v < nvol; v++ ) { //find maximum for each entire volume (excepted observed volume 0) flt * inp = (flt *)_mm_malloc(nvox3D*sizeof(flt), 64); flt *o32 = (flt *) f32; o32 += v * nvox3D; memcpy(inp, o32, nvox3D*sizeof(flt)); //dst, src for (int z = 1; (z < (nim->nz -1)); z++ ) { size_t zo = z * nxy; for (int y = 1; (y < (nim->ny -1)); y++ ) { size_t yo =y * nim->nx; for (int x = 1; (x < (nim->nx -1)); x++ ) { size_t vx = zo + yo + x; flt xv = sqr(inp[vx+1] - inp[vx-1]) * xscl; flt yv = sqr(inp[vx+nim->nx] - inp[vx-nim->nx]) * yscl; flt zv = sqr(inp[vx+nxy] - inp[vx-nxy]) * zscl; o32[vx] = sqrt(xv+yv+zv)*xyzscl; } //x } //y } //z _mm_free (inp); }//for v return 1; //edge for 3D volume(s) } else if (op == index1) { //nb FSLmaths flips dim[1] depending on determinant size_t idx = 0; if (!neg_determ(nim)) { //flip x size_t nyzt = nim->nvox / nim->nx; if ((nyzt * nim->nx) != nim->nvox) return 1; for (size_t i = 0; i <nyzt; i++ ) { size_t row = i * nim->nx;; int x = nim->nx; while (x > 0) { x--; if (f32[row+x] != 0) f32[row+x] = idx++; } //for each column (x) } //for each row (yzt) } else //don't flip x for (size_t i = 0; i < nim->nvox; i++ ) if (f32[i] != 0) f32[i] = idx++; } else if (op == nan1) { for (size_t i = 0; i < nim->nvox; i++ ) if (isnan(f32[i])) f32[i] = 0.0; } else if (op == nanm1) { for (size_t i = 0; i < nim->nvox; i++ ) if (isnan(f32[i])) f32[i] = 1.0; else f32[i] = 0.0; } else if (op == rand1) { rand_test(); flt scl = (1.0 / RAND_MAX); for (size_t i = 0; i < nim->nvox; i++ ) f32[i] += rand() * scl; } else if (op == randn1) { rand_test(); //https://en.wikipedia.org/wiki/Box–Muller_transform //for SIMD see https://github.com/miloyip/normaldist-benchmark static const flt sigma = 1.0f; static const flt mu = 0.0; //static const flt epsilon = FLT_EPSILON; static const flt two_pi = 2.0*3.14159265358979323846; static const flt scl = (1.0 / RAND_MAX); //fill pairs for (size_t i = 0; i < (nim->nvox-1); i += 2 ) { flt u1, u2; do { u1 = rand() * scl; u2 = rand() * scl; } while (u1 <= epsilon); flt su1 = sqrt(-2.0 * log(u1)); flt z0 = su1 * cos(two_pi * u2); flt z1 = su1 * sin(two_pi * u2); f32[i] += z0 * sigma + mu; f32[i+1] += z1 * sigma + mu; } //if odd, fill final voxel if ( nim->nvox %2 != 0 ) { flt u1, u2; do { u1 = rand() * scl; u2 = rand() * scl; } while (u1 <= epsilon); flt z0 = sqrt(-2.0 * log(u1)) * cos(two_pi * u2); f32[nim->nvox-1] += z0 * sigma + mu; } } else if (op == range1) { flt mn = f32[0]; flt mx = mn; for (size_t i = 0; i < nim->nvox; i++ ) { mn = fmin(f32[i], mn); mx = fmax(f32[i],mx); } nim->cal_min = mn; nim->cal_max = mx; } else if (op == rank1) { int nvox3D = nim->dim[1] * nim->dim[2] * nim->dim[3]; int nvol = nim->nvox / nvox3D; if ((nvox3D * nvol) != nim->nvox) return 1; if (nvol <= 1 ) { //you are always first if you are the only one to show up... for (size_t i = 0; i < nim->nvox; i++ ) f32[i] = 1; } else { #pragma omp parallel for for (int i = 0; i < nvox3D; i++ ) { //how do we handle ties? struct sortIdx * k = (struct sortIdx *)_mm_malloc(nvol*sizeof(struct sortIdx), 64); size_t j = i; for (int v = 0; v < nvol; v++ ) { k[v].val = f32[j]; k[v].idx = j; j += nvox3D; } int varies = 0; for (int v = 0; v < nvol; v++ ) { if (k[v].val != k[0].val) { varies = 1; break; } } if (varies) { qsort (k, nvol, sizeof(struct sortIdx), compare); for (int v = 0; v < nvol; v++ ) f32[ k[v].idx ] = v + 1; } else { j = i; for (int v = 0; v < nvol; v++ ) { f32[j] = v + 1; j += nvox3D; } } _mm_free (k); } //for i } //nvol > 1 } else if ((op == rank1) || (op == ranknorm1)) { int nvox3D = nim->dim[1] * nim->dim[2] * nim->dim[3]; int nvol = nim->nvox / nvox3D; if ((nvox3D * nvol) != nim->nvox) return 1; if (nvol <= 1 ) { //you are always first if you are the only one to show up... for (int i = 0; i < nim->nvox; i++ ) f32[i] = 0; } else { #pragma omp parallel for for (int i = 0; i < nvox3D; i++ ) { struct sortIdx * k = (struct sortIdx *)_mm_malloc(nvol*sizeof(struct sortIdx), 64); size_t j = i; double sum = 0.0; for (int v = 0; v < nvol; v++ ) { k[v].val = f32[j]; sum += k[v].val; k[v].idx = j; j += nvox3D; } double mean = sum / nvol; double sumSqr = 0.0; for (int v = 0; v < nvol; v++ ) sumSqr += sqr(k[v].val- mean); double stdev = sqrt(sumSqr / (nvol - 1)); qsort (k, nvol, sizeof(struct sortIdx), compare); //strange formula, but replicates fslmaths, consider nvol=3 rank[2,0,1] will be pval [2.5/3, 1.5/3, 0.5/3] for (int v = 0; v < nvol; v++ ) f32[ k[v].idx ] = (stdev * -qginv((double)(v + 0.5)/(double)nvol)) + mean; _mm_free (k); } //for i } //nvol > 1 //double qginv( double p ) } else if (op == ztop1) { for (size_t i = 0; i < nim->nvox; i++ ) f32[i] = qg(f32[i]); } else if (op == ptoz1) { /*** given p, return x such that Q(x)=p, for 0 < p < 1 ***/ // #ifdef DT32 const flt kNaN = NAN; //const flt kNaN = 0.0 / 0.0; for (size_t i = 0; i < nim->nvox; i++ ) { if ((f32[i] < 0.0) || (f32[i] > 1.0)) f32[i] = kNaN; else f32[i] = qginv(f32[i]); } } else if ((op == pval1) || (op == pval01)) { int nvox3D = nim->dim[1] * nim->dim[2] * nim->dim[3]; int nvol = nim->nvox / nvox3D; if ((nvox3D * nvol) != nim->nvox) return 1; if (nvol <= 1 ) { fprintf(stderr,"permutation tests require 4D datasets.\n"); return 1; } void * dat = (void *)calloc(1, nvox3D * sizeof(flt)) ; flt *o32 = (flt *) dat; #pragma omp parallel for for (int i = 0; i < nvox3D; i++ ) { size_t vi = i; flt obs = f32[vi]; //observed value - see if it is extreme relative to permutations int nNotZero = 0; int nGreater = 0; int nEqual = 0; //observation in first volume flt f32v0 = f32[vi]; for (int v = 0; v < nvol; v++ ) { if (f32[vi] != 0) nNotZero ++; if (f32[vi] == f32v0) nEqual ++; if (f32[vi] >= obs) nGreater ++; vi += nvox3D; } if (op == pval1) { //if (nEqual == nvol) // o32[i] = 0.0; //else o32[i] = (double)nGreater / (double) nvol ; } else { if (nEqual == nvol) o32[i] = 0.0; else if (obs == 0) o32[i] = 1.0; else //nZero must be at least 1: the observed data is not zero o32[i] = (double)nGreater / (double) (nNotZero) ; } } //for i nim->nvox = nvox3D; nim->ndim = 3; nim->nt = 1; nim->dim[0] = 3; nim->dim[4] = 1; free(nim->data); nim->data = dat; } else if (op == cpval1) { int nvox3D = nim->dim[1] * nim->dim[2] * nim->dim[3]; int nvol = nim->nvox / nvox3D; if ((nvox3D * nvol) != nim->nvox) return 1; if (nvol <= 1 ) { fprintf(stderr,"permutation tests require 4D datasets.\n"); return 1; } void * dat = (void *)calloc(1, nvox3D * sizeof(flt)) ; flt *o32 = (flt *) dat; flt * vmax = (flt *)_mm_malloc(nvol*sizeof(flt), 64); #pragma omp parallel for for (int v = 1; v < nvol; v++ ) { //find maximum for each entire volume (excepted observed volume 0) size_t vo = v * nvox3D; flt mx = f32[vo]; for (int i = 0; i < nvox3D; i++ ) mx = MAX(mx, f32[vo+i]); vmax[v] = mx; //printf("%d %g\n", v, mx); } #pragma omp parallel for for (int i = 0; i < nvox3D; i++ ) { flt obs = f32[i]; //observed value - see if it is extreme relative to permutations int nGreater = 1; //count observation for (int v = 1; v < nvol; v++ ) if (vmax[v] >= obs) nGreater ++; o32[i] = (double)nGreater / (double) nvol ; } //for i _mm_free (vmax); nim->nvox = nvox3D; nim->ndim = 3; nim->nt = 1; nim->dim[0] = 3; nim->dim[4] = 1; free(nim->data); nim->data = dat; } else { fprintf(stderr,"nifti_unary: Unsupported operation\n"); return 1; } return 0; }//nifti_unary() static int nifti_thrp(nifti_image * nim, double v, enum eOp op) { // -thrp: use following percentage (0-100) of ROBUST RANGE to threshold current image (zero anything below the number) // -thrP: use following percentage (0-100) of ROBUST RANGE of non-zero voxels and threshold below // -uthrp : use following percentage (0-100) of ROBUST RANGE to upper-threshold current image (zero anything above the number) // -uthrP : use following percentage (0-100) of ROBUST RANGE of non-zero voxels and threshold above if ((v <= 0.0) || (v >= 100.0)) { fprintf(stderr,"nifti_thrp: threshold should be between 0..100\n"); return 1; } flt pct2, pct98; int ignoreZeroVoxels = 0; if ((op == thrP) || (op == uthrP)) ignoreZeroVoxels = 1; if (nifti_robust_range(nim, &pct2, &pct98, ignoreZeroVoxels) != 0) return 1; flt thresh = pct2 + ((v/100.0) * (pct98-pct2)); int zeroBrightVoxels = 0; if ((op == uthrp) || (op == uthrP)) zeroBrightVoxels = 1; nifti_thr(nim, thresh, zeroBrightVoxels); return 0; } //nifti_thrp() #ifdef DT32 int main32(int argc, char * argv[]) { #else int main64(int argc, char * argv[]) { printf("beta: Using 64-bit calc\n"); #endif char * fin=NULL, * fout=NULL; //fslmaths in.nii out.nii changes datatype to flt, here we retain (similar to earlier versions of fslmaths) //fslmsths in.nii -rem 10 out.nii uses integer modulus not fmod //fslmaths robust range not fully described, this emulation is close //fslmaths ing/inm are listed as "unary" but should be listed as binary if( argc < 3 ) return show_helpx(); //minimal command has input and output: "niimath in.nii out.nii" int dtCalc = DT_FLOAT32; //data type for calculation int dtOut = DT_FLOAT32; //data type for calculation int ac = 1; // '-dt' sets datatype for calculations if( ! strcmp(argv[ac], "-dt") ) { if (! strcmp(argv[ac+1], "double") ) { dtCalc = DT_FLOAT64; } else if (strcmp(argv[ac+1], "float") ) { fprintf(stderr,"'-dt' error: only float or double calculations supported\n"); return 1; } ac += 2; if( argc < (ac+2) ) return 1; //insufficient arguments remain } //special case: pass through // no calculation, simple pass through copy, e.g. "niimaths in.nii out.nii.gz" // note fslmaths would save as flt type... but lossless conversion in native format is faster // note here we use nifti_image_read not nifti_image_read2 to preserve cal_min, cal_max if (ac+2 == argc) { fin = argv[ac]; /* no string copy, just pointer assignment */ ac ++; nifti_image * nim = nifti_image_read(fin, 1); fout = argv[ac]; /* no string copy, just pointer assignment */ ac ++; if (nifti_set_filenames(nim, fout, 0, 1) ) return 1; nifti_save(nim, ""); //nifti_image_write( nim ); nifti_image_free( nim ); return 0; } //end pass through // next argument is input file fin = argv[ac]; /* no string copy, just pointer assignment */ ac ++; //clock_t startTime = clock(); nifti_image * nim = nifti_image_read2(fin, 1); if( !nim ) { fprintf(stderr,"** failed to read NIfTI image from '%s'\n", fin); return 2; } //printf("read time: %ld ms\n", timediff(startTime, clock())); in_hdr ihdr = set_input_hdr(nim); int nkernel = 0; //number of voxels in kernel int * kernel = make_kernel(nim, &nkernel, 3,3,3); //check for "-odt" must be last couplet if ( ! strcmp(argv[argc-2], "-odt") ) { if (! strcmp(argv[argc-1], "double") ) { dtOut = DT_FLOAT64; } else if (! strcmp(argv[argc-1], "flt") ) { dtOut = DT_FLOAT32; } else if (! strcmp(argv[argc-1], "int") ) { dtOut = DT_INT32; } else if (! strcmp(argv[argc-1], "short") ) { dtOut = DT_INT16; } else if (! strcmp(argv[argc-1], "ushort") ) { dtOut = DT_UINT16; } else if (! strcmp(argv[argc-1], "char") ) { dtOut = DT_UINT8; } else if (! strcmp(argv[argc-1], "input") ) { dtOut = nim->datatype;//ihdr.datatype; //! } else { fprintf(stderr,"Error: Unknown datatype '%s' - Possible datatypes are: char short ushort int flt double input\n", argv[argc-1]); return 2; } argc = argc - 2; } //odt //convert data to calculation type (-dt) if (nifti_image_change_datatype(nim, dtCalc, &ihdr) != 0) return 1; //check output filename, e.g does file exist fout = argv[argc-1]; /* no string copy, just pointer assignment */ if( nifti_set_filenames(nim, fout, 0, 1) ) return 1; argc = argc - 1; #if defined(_OPENMP) const int maxNumThreads = omp_get_max_threads(); const char *key = "AFNI_COMPRESSOR"; char *value; value = getenv(key); //export AFNI_COMPRESSOR=PIGZ char pigzKey[5] = "PIGZ"; if ((value != NULL) && (strstr(value,pigzKey))) { omp_set_num_threads(maxNumThreads); fprintf(stderr,"Using %d threads\n", maxNumThreads); } else { omp_set_num_threads(1); fprintf(stderr,"Single threaded\n"); } #endif //read operations char* end; int ok = 0; while (ac < argc) { enum eOp op = unknown; if ( ! strcmp(argv[ac], "-add") ) op = add; if ( ! strcmp(argv[ac], "-sub") ) op = sub; if ( ! strcmp(argv[ac], "-mul") ) op = mul; if ( ! strcmp(argv[ac], "-div") ) op = divX; if ( ! strcmp(argv[ac], "-rem") ) op = rem; if ( ! strcmp(argv[ac], "-mod") ) op = mod; if ( ! strcmp(argv[ac], "-mas") ) op = mas; if ( ! strcmp(argv[ac], "-thr") ) op = thr; if ( ! strcmp(argv[ac], "-thrp") ) op = thrp; if ( ! strcmp(argv[ac], "-thrP") ) op = thrP; if ( ! strcmp(argv[ac], "-uthr") ) op = uthr; if ( ! strcmp(argv[ac], "-uthrp") ) op = uthrp; if ( ! strcmp(argv[ac], "-uthrP") ) op = uthrP; if ( ! strcmp(argv[ac], "-max") ) op = max; if ( ! strcmp(argv[ac], "-min") ) op = min; if ( ! strcmp(argv[ac], "-max") ) op = max; //if ( ! strcmp(argv[ac], "-addtozero") ) op = addtozero; //variation of mas //if ( ! strcmp(argv[ac], "-overadd") ) op = overadd; //variation of mas if ( ! strcmp(argv[ac], "power") ) op = power; if ( ! strcmp(argv[ac], "-seed") ) op = seed; //if ( ! strcmp(argv[ac], "-restart") ) op = restart; //if ( ! strcmp(argv[ac], "-save") ) op = save; if ( ! strcmp(argv[ac], "-inm") ) op = inm; if ( ! strcmp(argv[ac], "-ing") ) op = ing; if ( ! strcmp(argv[ac], "-s") ) op = smth; if ( ! strcmp(argv[ac], "-exp") ) op = exp1; if ( ! strcmp(argv[ac], "-log") ) op = log1; if ( ! strcmp(argv[ac], "-sin") ) op = sin1; if ( ! strcmp(argv[ac], "-cos") ) op = cos1; if ( ! strcmp(argv[ac], "-tan") ) op = tan1; if ( ! strcmp(argv[ac], "-asin") ) op = asin1; if ( ! strcmp(argv[ac], "-acos") ) op = acos1; if ( ! strcmp(argv[ac], "-atan") ) op = atan1; if ( ! strcmp(argv[ac], "-sqr") ) op = sqr1; if ( ! strcmp(argv[ac], "-sqrt") ) op = sqrt1; if ( ! strcmp(argv[ac], "-recip") ) op = recip1; if ( ! strcmp(argv[ac], "-abs") ) op = abs1; if ( ! strcmp(argv[ac], "-bin") ) op = bin1; if ( ! strcmp(argv[ac], "-binv") ) op = binv1; if ( ! strcmp(argv[ac], "-edge") ) op = edge1; if ( ! strcmp(argv[ac], "-index") ) op = index1; if ( ! strcmp(argv[ac], "-nan") ) op = nan1; if ( ! strcmp(argv[ac], "-nanm") ) op = nanm1; if ( ! strcmp(argv[ac], "-rand") ) op = rand1; if ( ! strcmp(argv[ac], "-randn") ) op = randn1; if ( ! strcmp(argv[ac], "-range") ) op = range1; if ( ! strcmp(argv[ac], "-rank") ) op = rank1; if ( ! strcmp(argv[ac], "-ranknorm") ) op = ranknorm1; if ( ! strcmp(argv[ac], "-ztop") ) op = ztop1; if ( ! strcmp(argv[ac], "-ptoz") ) op = ptoz1; if ( ! strcmp(argv[ac], "-pval") ) op = pval1; if ( ! strcmp(argv[ac], "-pval0") ) op = pval01; if ( ! strcmp(argv[ac], "-cpval") ) op = cpval1; //kernel operations if ( ! strcmp(argv[ac], "-dilM") ) op = dilMk; if ( ! strcmp(argv[ac], "-dilD") ) op = dilDk; if ( ! strcmp(argv[ac], "-dilF") ) op = dilFk; if ( ! strcmp(argv[ac], "-dilall") ) op = dilallk; if ( ! strcmp(argv[ac], "-ero") ) op = erok; if ( ! strcmp(argv[ac], "-eroF") ) op = eroFk; if ( ! strcmp(argv[ac], "-fmedian") ) op = fmediank; if ( ! strcmp(argv[ac], "-fmean") ) op = fmeank; if ( ! strcmp(argv[ac], "-fmeanu") ) op = fmeanuk; if ( ! strcmp(argv[ac], "-p") ) { ac++; #if defined(_OPENMP) int nProcessors = atoi(argv[ac]); if (nProcessors < 1) { omp_set_num_threads(maxNumThreads); fprintf(stderr,"Using %d threads\n", maxNumThreads); } else omp_set_num_threads(nProcessors); #else fprintf(stderr,"Warning: not compiled for OpenMP: '-p' ignored\n"); #endif } else //All Dimensionality reduction operations names begin with Capital letter, no other commands do! if ((strlen(argv[ac]) > 4) && (argv[ac][0] == '-') && (isupper(argv[ac][1]))) { //isupper int dim = 0; switch (argv[ac][1]) { case 'X': // dim = 1; break; case 'Y': // code to be executed if n = 2; dim = 2; break; case 'Z': // dim = 3; break; case 'T': // code to be executed if n = 2; dim = 4; break; } if (dim == 0) { fprintf(stderr,"Error: unknown dimensionality reduction operation: %s\n", argv[ac]); goto fail; } if ( strstr(argv[ac], "mean") ) ok = nifti_dim_reduce(nim, Tmean, dim, 0); else if ( strstr(argv[ac], "std") ) ok = nifti_dim_reduce(nim, Tstd, dim, 0); else if ( strstr(argv[ac], "maxn") ) ok = nifti_dim_reduce(nim, Tmaxn, dim, 0); //test maxn BEFORE max else if ( strstr(argv[ac], "max") ) ok = nifti_dim_reduce(nim, Tmax, dim, 0); else if ( strstr(argv[ac], "min") ) ok = nifti_dim_reduce(nim, Tmin, dim, 0 ); else if ( strstr(argv[ac], "median") ) ok = nifti_dim_reduce(nim, Tmedian, dim, 0); else if ( strstr(argv[ac], "perc") ) { ac++; int pct = atoi(argv[ac]); ok = nifti_dim_reduce(nim, Tperc, dim, pct); } else if ( strstr(argv[ac], "ar1") ) ok = nifti_dim_reduce(nim, Tar1, dim, 0); else { fprintf(stderr,"Error unknown dimensionality reduction operation: %s\n", argv[ac]); ok = 1; } } else if ( ! strcmp(argv[ac], "-roi") ) { //int , int , int , int , int , int , int , int ) if ((argc-ac) < 8) { fprintf(stderr,"not enough arguments for '-roi'\n"); //start.size for 4 dimensions: user might forget volumes goto fail; } ac ++; int xmin = atoi(argv[ac]); ac ++; int xsize = atoi(argv[ac]); ac ++; int ymin = atoi(argv[ac]); ac ++; int ysize = atoi(argv[ac]); ac ++; int zmin = atoi(argv[ac]); ac ++; int zsize = atoi(argv[ac]); ac ++; int tmin = atoi(argv[ac]); ac ++; int tsize = atoi(argv[ac]); nifti_roi(nim, xmin, xsize, ymin, ysize, zmin, zsize, tmin, tsize); } else if ( ! strcmp(argv[ac], "-bptfm") ) { ac++; double hp_sigma = strtod(argv[ac], &end); ac++; double lp_sigma = strtod(argv[ac], &end); ok = nifti_bptf(nim, hp_sigma, lp_sigma, 0); } else if ( ! strcmp(argv[ac], "-bptf") ) { ac++; double hp_sigma = strtod(argv[ac], &end); ac++; double lp_sigma = strtod(argv[ac], &end); //ok = nifti_bptf(nim, hp_sigma, lp_sigma); ok = nifti_bptf(nim, hp_sigma, lp_sigma, 1); #ifdef bandpass } else if ( ! strcmp(argv[ac], "-bandpass") ) { // niimath test4D -bandpass 0.08 0.008 0 c ac++; double lp_hz = strtod(argv[ac], &end); ac++; double hp_hz = strtod(argv[ac], &end); ac++; double TRsec = strtod(argv[ac], &end); ok = nifti_bandpass(nim, lp_hz, hp_hz, TRsec); #endif } else if ( ! strcmp(argv[ac], "-roc") ) { //-roc <AROC-thresh> <outfile> [4Dnoiseonly] <truth> //-roc <AROC-thresh> <outfile> [4Dnoiseonly] <truth> ac++; double thresh = strtod(argv[ac], &end); ac++; int outfile = ac; char * fnoise =NULL; if (thresh > 0.0) { ac++; fnoise = argv[ac]; } ac++; int truth = ac; //ok = nifti_bptf(nim, hp_sigma, lp_sigma); ok = nifti_roc(nim, fabs(thresh), argv[outfile], fnoise, argv[truth]); if (ac >= argc) { fprintf(stderr,"Error: no output filename specified!\n"); //e.g. volume size might differ goto fail; } } else if ( ! strcmp(argv[ac], "-unsharp") ) { ac++; double sigma = strtod(argv[ac], &end); ac++; double amount = strtod(argv[ac], &end); nifti_unsharp(nim, sigma, sigma, sigma, amount); } else if ( ! strcmp(argv[ac], "-otsu") ) ok = nifti_otsu(nim, 0); else if ( ! strcmp(argv[ac], "-otsu0") ) ok = nifti_otsu(nim, 1); else if ( ! strcmp(argv[ac], "-subsamp2") ) ok = nifti_subsamp2(nim, 0); else if ( ! strcmp(argv[ac], "-subsamp2offc") ) ok = nifti_subsamp2(nim, 1); else if ( ! strcmp(argv[ac], "-sobel") ) ok = nifti_sobel(nim, 1); else if ( ! strcmp(argv[ac], "-demean") ) ok = nifti_demean(nim); else if ( ! strcmp(argv[ac], "-detrend") ) ok = nifti_detrend_linear(nim); else if ( ! strcmp(argv[ac], "-resize") ) { ac++; double X = strtod(argv[ac], &end); ac++; double Y = strtod(argv[ac], &end); ac++; double Z = strtod(argv[ac], &end); ac ++; int interp_method = atoi(argv[ac]); ok = nifti_resize(nim, X, Y, Z, interp_method); } else if ( ! strcmp(argv[ac], "-crop") ) { ac ++; int tmin = atoi(argv[ac]); ac ++; int tsize = atoi(argv[ac]); ok = nifti_crop(nim, tmin, tsize); } else if ( ! strcmp(argv[ac], "--compare") ) { //--function terminates without saving image ac ++; nifti_compare(nim, argv[ac]); //always terminates } else if ( ! strcmp(argv[ac], "-edt") ) ok = nifti_edt(nim); else if ( ! strcmp(argv[ac], "-fillh") ) ok = nifti_fillh(nim, 0); else if ( ! strcmp(argv[ac], "-fillh26") ) ok = nifti_fillh(nim, 1); else if ( ! strcmp(argv[ac], "-kernel") ) { ac ++; if (kernel != NULL) _mm_free(kernel); kernel = NULL; if ( ! strcmp(argv[ac], "3D") ) kernel = make_kernel(nim, &nkernel, 3,3,3); if ( ! strcmp(argv[ac], "2D") ) kernel = make_kernel(nim, &nkernel, 3,3,1); if ( ! strcmp(argv[ac], "boxv") ) { ac++; int vx = atoi(argv[ac]); kernel = make_kernel(nim, &nkernel, vx,vx,vx); } if ( ! strcmp(argv[ac], "sphere") ) { ac++; double mm = strtod(argv[ac], &end); kernel = make_kernel_sphere(nim, &nkernel, mm); } if ( ! strcmp(argv[ac], "file") ) { ac++; kernel = make_kernel_file(nim, &nkernel, argv[ac]); } if ( ! strcmp(argv[ac], "gauss") ) { ac++; double mm = strtod(argv[ac], &end); kernel = make_kernel_gauss(nim, &nkernel, mm); } if ( ! strcmp(argv[ac], "box") ) { //all voxels in a cube of width <size> mm centered on target voxel"); ac++; double mm = strtod(argv[ac], &end); int vx = (2*floor(mm/nim->dx))+1; int vy = (2*floor(mm/nim->dy))+1; int vz = (2*floor(mm/nim->dz))+1; kernel = make_kernel(nim, &nkernel, vx,vy,vz); } if ( ! strcmp(argv[ac], "boxv3") ) { ac++; int vx = atoi(argv[ac]); ac++; int vy = atoi(argv[ac]); ac++; int vz = atoi(argv[ac]); kernel = make_kernel(nim, &nkernel, vx,vy,vz); } if (kernel == NULL){ fprintf(stderr,"Error: '-kernel' option failed.\n"); //e.g. volume size might differ ok = 1; } } else if ( ! strcmp(argv[ac], "-tensor_2lower") ) { ok = nifti_tensor_2(nim, 0); } else if ( ! strcmp(argv[ac], "-tensor_2upper") ) { ok = nifti_tensor_2(nim, 1); } else if ( ! strcmp(argv[ac], "-tensor_decomp") ) { ok = nifti_tensor_decomp(nim,1); } else if ( ! strcmp(argv[ac], "-tensor_decomp_lower") ) { ok = nifti_tensor_decomp(nim,0); } else if ( ! strcmp(argv[ac], "-slicetimer") ) { #ifdef slicetimer ok = nifti_slicetimer(nim); #else fprintf(stderr,"Recompile to support slice timer\n"); //e.g. volume size might differ ok = 1; #endif } else if ( ! strcmp(argv[ac], "-save") ) { ac ++; char * fout2 = argv[ac]; if( nifti_set_filenames(nim, fout2, 1, 1) ) ok = 1; else { nifti_save(nim, ""); //nifti_image_write( nim ); nifti_set_filenames(nim, fout, 1, 1); } } else if ( ! strcmp(argv[ac], "-restart") ) { if (kernel != NULL) fprintf(stderr,"Warning: 'restart' resets the kernel\n"); //e.g. volume size might differ nifti_image_free( nim ); if (kernel != NULL) _mm_free(kernel); kernel = make_kernel(nim, &nkernel, 3,3,3); ac++; nim = nifti_image_read(argv[ac], 1); if( !nim )ok = 1; //error } else if ( ! strcmp(argv[ac], "-grid") ) { ac++; double v = strtod(argv[ac], &end); ac++; int s = atoi(argv[ac]); ok = nifti_grid(nim, v, s); } else if ( ! strcmp(argv[ac], "-tfce") ) { ac++; double H = strtod(argv[ac], &end); ac++; double E = strtod(argv[ac], &end); ac++; int c = atoi(argv[ac]); ok = nifti_tfce(nim, H, E, c); } else if ( ! strcmp(argv[ac], "-tfceS") ) { ac++; double H = strtod(argv[ac], &end); ac++; double E = strtod(argv[ac], &end); ac++; int c = atoi(argv[ac]); ac++; int x = atoi(argv[ac]); ac++; int y = atoi(argv[ac]); ac++; int z = atoi(argv[ac]); ac++; double tfce_thresh = strtod(argv[ac], &end); ok = nifti_tfceS(nim, H, E, c, x, y, z, tfce_thresh); } else if (op == unknown) { fprintf(stderr,"!!Error: unsupported operation '%s'\n", argv[ac]); goto fail; } if ((op >= dilMk) && (op <= fmeanuk)) ok = nifti_kernel (nim, op, kernel, nkernel); if ((op >= exp1) && (op <= ptoz1)) nifti_unary(nim, op); if ((op >= add) && (op < exp1)) { //binary operations ac++; double v = strtod(argv[ac], &end); //if (end == argv[ac]) { if (strlen(argv[ac]) != (end - argv[ac])) { // "4d" will return numeric "4" if ((op == power) || (op == thrp) || (op == thrP) || (op == uthrp) || (op == uthrP) || (op == seed) ) { fprintf(stderr,"Error: '%s' expects numeric value\n", argv[ac-1]); goto fail; } else ok = nifti_binary(nim, argv[ac], op); } else { if (op == add) ok = nifti_rescale(nim , 1.0, v); if (op == sub) ok = nifti_rescale(nim , 1.0, -v); if (op == mul) ok = nifti_rescale(nim , v, 0.0); if (op == divX) ok = nifti_rescale(nim , 1.0/v, 0.0); if (op == mod) ok = nifti_rem(nim, v, 1); if (op == rem) ok = nifti_rem(nim, v, 0); if (op == mas) { fprintf(stderr,"Error: -mas expects image not number\n"); goto fail; } if (op == power) ok = nifti_binary_power(nim, v); if (op == thr) ok = nifti_thr(nim, v, 0); if ((op == thrp) || (op == thrP) || (op == uthrp) || (op == uthrP)) ok = nifti_thrp(nim, v, op); if (op == uthr) ok = nifti_thr(nim, v, 1); if (op == max) ok = nifti_max(nim, v, 0); if (op == min) ok = nifti_max(nim, v, 1); if (op == inm) ok = nifti_inm(nim, v); if (op == ing) ok = nifti_ing(nim, v); if (op == smth) ok = nifti_smooth_gauss(nim, v, v, v); if (op == seed) { if ((v > 0) && (v < 1)) v *= RAND_MAX; srand((unsigned)fabs(v)); } } } //binary operations if (ok != 0) goto fail; ac ++; } //convert data to output type (-odt) if (nifti_image_change_datatype(nim, dtOut, &ihdr) != 0) return 1; /* if we get here, write the output dataset */ //startTime = clock(); nifti_save(nim, ""); //nifti_image_write( nim ); //printf("write time: %ld ms\n", timediff(startTime, clock())); /* and clean up memory */ nifti_image_free( nim ); if (kernel != NULL) _mm_free(kernel); return 0; fail: nifti_image_free( nim ); if (kernel != NULL) _mm_free(kernel); return 1; } //main()
Segmentation_private.h
#include <algorithm> #include <cassert> #include <cfloat> #include <cmath> #include <list> #include <cstdio> #include <fstream> #include <string> #include <iostream> #define SQUARE(a) ((a) * (a)) namespace ImgClass { // ----- Constructor ----- template <class T> Segmentation<T>::Segmentation(void) { _size = 0; _width = 0; _height = 0; _kernel_spatial = 10.0; _kernel_intensity = 0.1; } template <class T> Segmentation<T>::Segmentation(const ImgVector<T>& image, const double &kernel_spatial_radius, const double &kernel_intensity_radius, const size_t &min_number_of_pixels) { _image.copy(image); _size = _image.size(); _width = _image.width(); _height = _image.height(); _min_pixels = min_number_of_pixels; _kernel_spatial = kernel_spatial_radius; _kernel_intensity = kernel_intensity_radius; if (_kernel_spatial <= 0.0) { _kernel_spatial = 1.0; } if (_kernel_intensity <= 0.0) { _kernel_intensity = 1.0; } _vector_converge_list_map.reset(_width, _height); _color_quantized_image.reset(_width, _height); _shift_vector_spatial.reset(_width, _height); _shift_vector_color.reset(_width, _height); _segmentation_map.reset(_width, _height); // Initial Segmentation Segmentation_MeanShift(); } template <class T> Segmentation<T>::Segmentation(const Segmentation<T>& segmentation) // Copy constructor { _size = segmentation._size; _width = segmentation._width; _height = segmentation._height; _min_pixels = segmentation._min_pixels; _kernel_spatial = segmentation._kernel_spatial; _kernel_intensity = segmentation._kernel_intensity; _image.copy(segmentation._image); _color_quantized_image.copy(segmentation._color_quantized_image); _vector_converge_list_map.copy(segmentation._vector_converge_list_map); _shift_vector_spatial.copy(segmentation._shift_vector_spatial); _shift_vector_color.copy(segmentation._shift_vector_color); _segmentation_map.copy(segmentation._segmentation_map); _regions.assign(segmentation._regions.begin(), segmentation._regions.end()); } template <class T> Segmentation<T> & Segmentation<T>::reset(const ImgVector<T>& image, const int IterMax, const double &kernel_spatial_radius, const double &kernel_intensity_radius, const size_t &min_number_of_pixels) { _image.copy(image); _size = _image.size(); _width = _image.width(); _height = _image.height(); _min_pixels = min_number_of_pixels; _kernel_spatial = kernel_spatial_radius; _kernel_intensity = kernel_intensity_radius; if (_kernel_spatial <= 0.0) { _kernel_spatial = 1.0; } if (_kernel_intensity <= 0.0) { _kernel_intensity = 1.0; } _color_quantized_image.reset(_width, _height); _segmentation_map.reset(_width, _height); _regions.clear(); _vector_converge_list_map.reset(_width, _height); _shift_vector_spatial.reset(_width, _height); _shift_vector_color.reset(_width, _height); // Initial Segmentation #if defined(OUTPUT_IMG_CLASS) || defined(OUTPUT_IMG_CLASS_SEGMENTATION) printf("IterMax = %d\n", IterMax); #endif if (IterMax > 0) { Segmentation_MeanShift(IterMax); } else { Segmentation_MeanShift(); } return *this; } template <class T> Segmentation<T> & Segmentation<T>::copy(const Segmentation<T>& segmentation) { _size = segmentation._size; _width = segmentation._width; _height = segmentation._height; _min_pixels = segmentation._min_pixels; _kernel_spatial = segmentation._kernel_spatial; _kernel_intensity = segmentation._kernel_intensity; _image.copy(segmentation._image); _color_quantized_image.copy(segmentation._color_quantized_image); _vector_converge_list_map.copy(segmentation._vector_converge_list_map); _shift_vector_spatial.copy(segmentation._shift_vector_spatial); _shift_vector_color.copy(segmentation._shift_vector_color); _segmentation_map.copy(segmentation._segmentation_map); _regions.assign(segmentation._regions.begin(), segmentation._regions.end()); return *this; } // ----- Destructor ----- template <class T> Segmentation<T>::~Segmentation(void) { } // ----- Setter ------ template <class T> void Segmentation<T>::set_kernel(const double &kernel_spatial_radius, const double &kernel_intensity_radius) { _kernel_spatial = kernel_spatial_radius; _kernel_intensity = kernel_intensity_radius; } template <class T> void Segmentation<T>::set_min_pixels(const size_t &min_number_of_pixels) { _min_pixels = min_number_of_pixels; } template <class T> Segmentation<T> & Segmentation<T>::operator=(const Segmentation<T>& rvalue) { _size = rvalue._size; _width = rvalue._width; _height = rvalue._height; _min_pixels = rvalue._min_pixels; _kernel_spatial = rvalue._kernel_spatial; _kernel_intensity = rvalue._kernel_intensity; _image.copy(rvalue._image); _color_quantized_image.copy(rvalue._color_quantized_image); _shift_vector_spatial.copy(rvalue._shift_vector_spatial); _shift_vector_color.copy(rvalue._shift_vector_color); _segmentation_map.copy(rvalue._segmentation_map); _regions.assign(rvalue._regions.begin(), rvalue._regions.end()); return *this; } // ----- Data ----- template <class T> const ImgVector<T> & Segmentation<T>::ref_color_quantized_image(void) const { return _color_quantized_image; } template <class T> const ImgVector<size_t> & Segmentation<T>::ref_segmentation_map(void) const { return _segmentation_map; } template <class T> const ImgVector<std::list<VECTOR_2D<int> > > & Segmentation<T>::ref_vector_converge_list_map(void) const { return _vector_converge_list_map; } template <class T> const ImgVector<VECTOR_2D<double> > & Segmentation<T>::ref_shift_vector_spatial(void) const { return _shift_vector_spatial; } template <class T> const ImgVector<T> & Segmentation<T>::ref_shift_vector_color(void) const { return _shift_vector_color; } template <class T> const std::vector<std::vector<VECTOR_2D<int> > > & Segmentation<T>::ref_regions(void) const { return _regions; } // ----- Accessor ----- template <class T> int Segmentation<T>::width(void) const { return _width; } template <class T> int Segmentation<T>::height(void) const { return _height; } template <class T> size_t Segmentation<T>::size(void) const { return _size; } template <class T> size_t & Segmentation<T>::operator[](size_t n) { return _segmentation_map[n]; } template <class T> size_t & Segmentation<T>::at(size_t n) { assert(0 <= n && n < _size); return _segmentation_map[n]; } template <class T> size_t & Segmentation<T>::at(int x, int y) { return _segmentation_map.at(x, y); } template <class T> size_t & Segmentation<T>::at_repeat(int x, int y) { return _segmentation_map.at_repeat(x, y); } template <class T> size_t & Segmentation<T>::at_mirror(int x, int y) { return _segmentation_map.at_mirror(x, y); } template <class T> size_t Segmentation<T>::get(size_t n) const { return _segmentation_map.get(n); } template <class T> size_t Segmentation<T>::get(int x, int y) const { return _segmentation_map.get(x, y); } template <class T> size_t Segmentation<T>::get_zeropad(int x, int y) const { return _segmentation_map.get_zeropad(x, y); } template <class T> size_t Segmentation<T>::get_repeat(int x, int y) const { return _segmentation_map.get_repeat(x, y); } template <class T> size_t Segmentation<T>::get_mirror(int x, int y) const { return _segmentation_map.get_mirror(x, y); } // ----- Mean Shift ----- template <class T> void Segmentation<T>::Segmentation_MeanShift(const int Iter_Max) { const VECTOR_2D<int> adjacent[8] = { VECTOR_2D<int>(-1, -1), VECTOR_2D<int>(0, -1), VECTOR_2D<int>(1, -1), VECTOR_2D<int>(-1, 0), VECTOR_2D<int>(1, 0), VECTOR_2D<int>(-1, 1), VECTOR_2D<int>(0, 1), VECTOR_2D<int>(1, 1)}; const double Decreased_Gray_Max = 255.0; std::vector<VECTOR_2D<int> > pel_list; if (_width <= 0 || _height <= 0) { return; } // Make pixel list pel_list.reserve(SQUARE(size_t(ceil(2.0 * _kernel_spatial)) + 1)); { for (int m = -int(ceil(_kernel_spatial)); m <= int(ceil(_kernel_spatial)); m++) { for (int n = -int(ceil(_kernel_spatial)); n <= int(ceil(_kernel_spatial)); n++) { if (n * n + m * m <= SQUARE(_kernel_spatial)) { pel_list.push_back(VECTOR_2D<int>(n, m)); } } } } // Compute Mean Shift vector #if defined(OUTPUT_IMG_CLASS) || defined(OUTPUT_IMG_CLASS_SEGMENTATION) size_t finished = 0; size_t progress = .0; printf(" Mean-Shift method: 0.0%%\x1b[1A\n"); #endif // Compute Mean Shift #ifdef _OPENMP #pragma omp parallel for schedule(dynamic) #endif for (int y = 0; y < _height; y++) { for (int x = 0; x < _width; x++) { Segmentation<T>::tuple tmp = MeanShift(x, y, pel_list, Iter_Max); auto lambda = [](double value, double max) -> double { return value >= 0 ? value < max ? value : max - 1.0 : 0; }; // Saturation tmp.spatial.x = lambda(tmp.spatial.x, _width); tmp.spatial.y = lambda(tmp.spatial.y, _height); // Set vector _shift_vector_spatial.at(x, y) = tmp.spatial; _shift_vector_color.at(x, y) = tmp.color; // Assign start point to converge list VECTOR_2D<int> shift(int(tmp.spatial.x), int(tmp.spatial.y)); #ifdef _OPENMP #pragma omp critical #endif { _vector_converge_list_map.at(shift.x, shift.y).push_back(VECTOR_2D<int>(x, y)); } #if defined(OUTPUT_IMG_CLASS) || defined(OUTPUT_IMG_CLASS_SEGMENTATION) #ifdef _OPENMP #pragma omp critical #endif { double ratio = double(++finished) / _image.size(); if (round(ratio * 1000.0) > progress) { progress = size_t(round(ratio * 1000.0)); // Take account of Over-Run printf("\r Mean-Shift method: %5.1f%%\x1b[1A\n", progress * 0.1); } } #endif } } // Concatenate the list of connected regions #if defined(OUTPUT_IMG_CLASS) || defined(OUTPUT_IMG_CLASS_SEGMENTATION) printf(" Mean-Shift method: Concatenation\n"); #endif for (int y = 0; y < _height; y++) { for (int x = 0; x < _width; x++) { if (_vector_converge_list_map.at(x, y).size() > 0) { std::list<VECTOR_2D<int> > tmp_list; tmp_list.push_back(VECTOR_2D<int>(x, y)); for (auto ite = tmp_list.begin(); ite != tmp_list.end(); ++ite) { for (size_t i = 0; i < 8; i++) { VECTOR_2D<int> r(ite->x + adjacent[i].x, ite->y + adjacent[i].y); if (0 <= r.x && r.x < _width && 0 <= r.y && r.y < _height && r.x != x && r.y != y && _vector_converge_list_map.at(r.x, r.y).size() > 0) { tmp_list.push_back(r); _vector_converge_list_map.at(x, y).splice( _vector_converge_list_map.at(x, y).end(), _vector_converge_list_map.at(r.x, r.y)); } } } } } } std::list<std::list<VECTOR_2D<int> > > regions_list; // start_point, shift_vector_spatial, shift_vector_color #if defined(OUTPUT_IMG_CLASS) || defined(OUTPUT_IMG_CLASS_SEGMENTATION) printf(" Mean-Shift method: Concatenate pixels which converge into same connected region"); #endif for (int y = 0; y < _height; y++) { for (int x = 0; x < _width; x++) { // Search converge point if (_vector_converge_list_map.at(x, y).size() > 0) { std::list<Segmentation<T>::Region> tmp_regions_list; for (VECTOR_2D<int>& candidate : _vector_converge_list_map.at(x, y)) { bool found = false; const T& color_cand = color_quantize(_shift_vector_color.get(candidate.x, candidate.y)); for (Segmentation<T>::Region& region : tmp_regions_list) { if (normalized_distance(color_cand, region.color) < 0.5) { region.points.push_back(candidate); found = true; break; } } if (found == false) { Segmentation<T>::Region new_region; new_region.points.push_back(candidate); new_region.color = color_cand; tmp_regions_list.push_back(new_region); } } for (auto region : tmp_regions_list) { regions_list.push_back(region.points); } } } } #if defined(OUTPUT_IMG_CLASS) || defined(OUTPUT_IMG_CLASS_SEGMENTATION) printf("\n Mean-Shift method: Finished\n"); #endif // Make region map _segmentation_map.resize(_width, _height); { size_t num = 1; for (const std::list<VECTOR_2D<int> >& region : regions_list) { for (const VECTOR_2D<int>& r : region) { _segmentation_map.at(r.x, r.y) = num; } num++; } } // Collect connected regions from Mean-Shift filtered image size_t num_region = collect_regions_in_segmentation_map(&regions_list); // Set _segmentation_map by _regions No. { size_t n = 1; for (const std::list<VECTOR_2D<int> >& region : regions_list) { for (const VECTOR_2D<int>& r : region) { _segmentation_map.at(r.x, r.y) = n; } n++; } } #if defined(OUTPUT_IMG_CLASS) || defined(OUTPUT_IMG_CLASS_SEGMENTATION) printf(" Mean-Shift method: Eliminate small regions\n"); #endif // Eliminate small regions small_region_concatenator(&regions_list); small_region_eliminator(&regions_list); #if defined(OUTPUT_IMG_CLASS) || defined(OUTPUT_IMG_CLASS_SEGMENTATION) std::cout << " Mean-Shift method: The number of regions reduced " << num_region << " -> " << regions_list.size() << std::endl; #endif // Copy regions_list to _regions num_region = regions_list.size(); _regions.resize(num_region); { size_t test = 0; std::vector<std::vector<VECTOR_2D<int> > >::iterator ite = _regions.begin(); for (const std::list<VECTOR_2D<int> >& region : regions_list) { ite->assign(region.begin(), region.end()); ++ite; test += region.size(); } } regions_list.clear(); // Reset _segmentation_map by _regions No. for (size_t n = 0; n < _regions.size(); n++) { for (size_t i = 0; i < _regions[n].size(); i++) { _segmentation_map.at(_regions[n][i].x, _regions[n][i].y) = n + 1; } } // Make color-quantized image for (size_t n = 0; n < _regions.size(); n++) { T sum_color = T(); for (size_t k = 0; k < _regions[n].size(); k++) { sum_color += _image.get(_regions[n][k].x, _regions[n][k].y); } T quantized_color = sum_color * Decreased_Gray_Max / double(_regions[n].size()); for (size_t k = 0; k < _regions[n].size(); k++) { _color_quantized_image.at(_regions[n][k].x, _regions[n][k].y) += quantized_color; } } } template <class T> size_t Segmentation<T>::collect_regions_in_segmentation_map(std::list<std::list<VECTOR_2D<int> > >* regions_list) { const VECTOR_2D<int> adjacent[8] = { VECTOR_2D<int>(-1, -1), VECTOR_2D<int>(0, -1), VECTOR_2D<int>(1, -1), VECTOR_2D<int>(-1, 0), VECTOR_2D<int>(1, 0), VECTOR_2D<int>(-1, 1), VECTOR_2D<int>(0, 1), VECTOR_2D<int>(1, 1)}; ImgVector<bool> collected(_width, _height, false); regions_list->clear(); size_t num_region = 0; for (int y = 0; y < _height; y++) { for (int x = 0; x < _width; x++) { if (collected.get(x, y)) { continue; } VECTOR_2D<int> r(x, y); collected.at(x, y) = true; size_t N = _segmentation_map.get(r.x, r.y); regions_list->push_back(std::list<VECTOR_2D<int> >(1, r)); num_region++; // Search connected regions with 8-adjacent for (const VECTOR_2D<int>& element : regions_list->back()) { for (int k = 0; k < 8; k++) { r.x = element.x + adjacent[k].x; r.y = element.y + adjacent[k].y; if (0 <= r.x && r.x < _width && 0 <= r.y && r.y < _height && collected.get(r.x, r.y) == false && _segmentation_map.get(r.x, r.y) == N) { collected.at(r.x, r.y) = true; regions_list->back().push_back(r); } } } } } return num_region; } template <class T> size_t Segmentation<T>::small_region_concatenator(std::list<std::list<VECTOR_2D<int> > >* region_list) { const VECTOR_2D<int> adjacent[8] = { VECTOR_2D<int>(-1, -1), VECTOR_2D<int>(0, -1), VECTOR_2D<int>(1, -1), VECTOR_2D<int>(-1, 0), VECTOR_2D<int>(1, 0), VECTOR_2D<int>(-1, 1), VECTOR_2D<int>(0, 1), VECTOR_2D<int>(1, 1)}; std::list<std::list<VECTOR_2D<int> > > small_region_list; ImgVector<bool> small_region_map(_width, _height, false); ImgVector<bool> dense_small_region_map(_width, _height, false); ImgVector<std::list<VECTOR_2D<int> >*> region_list_map(_width, _height); size_t num_small_region = 0; // Make region_list_map for (std::list<std::list<VECTOR_2D<int> > >::iterator ite = region_list->begin(); ite != region_list->end(); ++ite) { for (const VECTOR_2D<int>& r : *ite) { region_list_map.at(r.x, r.y) = &(*ite); } } // Search small regions { std::list<std::list<VECTOR_2D<int> > >::iterator ite = region_list->begin(); while (ite != region_list->end()) { if (ite->size() < _min_pixels) { // Count the number of small regions here because some regions may become larger on this routine num_small_region++; small_region_list.emplace_back(); small_region_list.back().assign(ite->begin(), ite->end()); for (const VECTOR_2D<int>& r : *ite) { small_region_map.at(r.x, r.y) = true; region_list_map.at(r.x, r.y) = &(small_region_list.back()); } ite = region_list->erase(ite); } else { ++ite; } } } // Search densely small regions for (std::list<std::list<VECTOR_2D<int> > >::const_iterator ite_region = small_region_list.begin(); ite_region != small_region_list.end(); ++ite_region) { bool dense = true; for (const VECTOR_2D<int>& r : *ite_region) { for (size_t i = 0; i < 8; i++) { if (small_region_map.get_zeropad(r.x + adjacent[i].x, r.y + adjacent[i].y) == false) { dense = false; break; } } if (dense == false) { break; } } if (dense) { for (const VECTOR_2D<int>& r : *ite_region) { dense_small_region_map.at(r.x, r.y) = true; } } } // Concatenate dense small regions and small regions connecting to dense small regions for (int y = 0; y < _height; y++) { for (int x = 0; x < _width; x++) { if (dense_small_region_map.get(x, y) == false) { continue; } // Search connected region of the dense small region and its neighborhood std::list<VECTOR_2D<int> > tmp_list(1, VECTOR_2D<int>(x, y)); dense_small_region_map.at(x, y) = false; small_region_map.at(x, y) = false; for (std::list<VECTOR_2D<int> >::iterator ite = tmp_list.begin(); ite != tmp_list.end(); ++ite) { for (size_t i = 0; i < 8; i++) { VECTOR_2D<int> r(ite->x + adjacent[i].x, ite->y + adjacent[i].y); if (0 <= r.x && r.x < _width && 0 <= r.y && r.y < _height && small_region_map.get(r.x, r.y)) { small_region_map.at(r.x, r.y) = false; if (dense_small_region_map.get(r.x, r.y)) { dense_small_region_map.at(r.x, r.y) = false; tmp_list.push_back(r); } else { tmp_list.push_front(r); } } } } // Append new region to region_list region_list->emplace_back(std::list<VECTOR_2D<int> >(0)); for (const VECTOR_2D<int>& r : tmp_list) { if (region_list_map.get(r.x, r.y) != nullptr) { std::list<VECTOR_2D<int> >* small_region = region_list_map.get(r.x, r.y); for (const VECTOR_2D<int>& r_tmp : *small_region) { region_list_map.at(r_tmp.x, r_tmp.y) = nullptr; } region_list->back().splice(region_list->back().end(), *small_region); } } } } // Append the rest of small regions which is not next to densely one to region_list for (std::list<std::list<VECTOR_2D<int> > >::iterator ite = small_region_list.begin(); ite != small_region_list.end(); ++ite) { if (ite->size() > 0) { region_list->emplace_back(); region_list->back().assign(ite->begin(), ite->end()); } } // Remove empty region from region_list region_list->remove_if([](std::list<VECTOR_2D<int> >& region) { return region.size() == 0; }); return num_small_region; } template <class T> void Segmentation<T>::small_region_eliminator(std::list<std::list<VECTOR_2D<int> > >* region_list) { const VECTOR_2D<int> adjacent[8] = { VECTOR_2D<int>(-1, -1), VECTOR_2D<int>(0, -1), VECTOR_2D<int>(1, -1), VECTOR_2D<int>(-1, 0), VECTOR_2D<int>(1, 0), VECTOR_2D<int>(-1, 1), VECTOR_2D<int>(0, 1), VECTOR_2D<int>(1, 1)}; std::list<VECTOR_2D<int> > small_region; ImgVector<std::list<VECTOR_2D<int> >*> region_list_map(_width, _height); // Search small regions { std::list<std::list<VECTOR_2D<int> > >::iterator ite = region_list->begin(); while (ite != region_list->end()) { if (ite->size() < _min_pixels) { // Count the number of small regions here because some regions may become larger on this routine small_region.splice(small_region.end(), *ite); ite = region_list->erase(ite); } else { ++ite; } } } // Make region_list_map for (std::list<std::list<VECTOR_2D<int> > >::iterator ite = region_list->begin(); ite != region_list->end(); ++ite) { for (const VECTOR_2D<int>& r : *ite) { region_list_map.at(r.x, r.y) = &(*ite); } } // Concatenate dense small regions and small regions connecting to dense small regions { while (small_region.size() > 0) { VECTOR_2D<int> r(small_region.front()); VECTOR_2D<int> r_new(r); T color = _image.get(r.x, r.y); double min = DBL_MAX; for (size_t i = 0; i < 8; i++) { VECTOR_2D<int> r_tmp(r + adjacent[i]); if (0 <= r_tmp.x && r_tmp.x < _width && 0 <= r_tmp.y && r_tmp.y < _height && region_list_map.get(r_tmp.x, r_tmp.y) != nullptr) { double dist = norm_squared(color - _image.get(r_tmp.x, r_tmp.y)); if (dist < min) { min = dist; r_new = r_tmp; } } } // Concatenate the pixel to nearest region if (region_list_map.get(r_new.x, r_new.y) != nullptr) { region_list_map.at(r_new.x, r_new.y)->push_back(r); region_list_map.at(r.x, r.y) = region_list_map.get(r_new.x, r_new.y); } else { small_region.push_back(r); } small_region.pop_front(); } } } // Distance in any space template <class T> double Segmentation<T>::distance(const T& lvalue, const T& rvalue) { return fabs(lvalue - rvalue); } template <> double Segmentation<ImgClass::RGB>::distance(const ImgClass::RGB& lvalue, const ImgClass::RGB& rvalue); template <> double Segmentation<ImgClass::Lab>::distance(const ImgClass::Lab& lvalue, const ImgClass::Lab& rvalue); /* * std::vector<double> kernel has kernel radius for each dimensions. * The values it needs are below: * _kernel_spatial : the spatial radius of mean shift kernel * _kernel_intensity : the intensity threshold of mean shift kernel */ template <class T> const typename Segmentation<T>::tuple Segmentation<T>::MeanShift(const int x, const int y, std::vector<VECTOR_2D<int> >& pel_list, int Iter_Max) { const double radius_spatial_squared = SQUARE(_kernel_spatial); const double radius_intensity_squared = SQUARE(_kernel_intensity); const double displacement_min = SQUARE(0.01); // Initialize Segmentation<T>::tuple tuple; tuple.spatial = VECTOR_2D<double>(static_cast<double>(x), static_cast<double>(y)); tuple.color = _image.get(x, y); // Iterate until it converge for (int i = 0; i < Iter_Max; i++) { double N = 0.0; double sum_intensity_diff = 0.0; VECTOR_2D<double> sum_d(0.0, 0.0); for (size_t n = 0; n < pel_list.size(); n++) { VECTOR_2D<int> r( static_cast<int>(round(tuple.spatial.x) + pel_list[n].x), static_cast<int>(round(tuple.spatial.y) + pel_list[n].y)); if (0 <= r.x && r.x < _width && 0 <= r.y && r.y < _height) { double intensity_diff = _image.get(r.x, r.y) - tuple.color; VECTOR_2D<double> d(r.x - tuple.spatial.x, r.y - tuple.spatial.y); double ratio_intensity = SQUARE(intensity_diff) / radius_intensity_squared; double ratio_spatial = norm_squared(d) / radius_spatial_squared; if (ratio_intensity <= 1.0 && ratio_spatial <= 1.0) { double coeff = 1.0 - (ratio_intensity * ratio_spatial); N += coeff; sum_intensity_diff += intensity_diff * coeff; sum_d += d * coeff; } } } tuple.color += sum_intensity_diff / N; VECTOR_2D<double> displacement(sum_d.x / N, sum_d.y / N); tuple.spatial += displacement; if (norm_squared(sum_intensity_diff / N) * norm_squared(displacement) < displacement_min) { break; } } return tuple; } /* * std::vector<double> kernel has kernel radius for each dimensions. * The values it needs are below: * _kernel_spatial : the spatial radius of mean shift kernel * _kernel_intensity : the norm threshold of mean shift kernel in L*a*b* space */ template <> // Specialized for ImgClass::RGB<double> const Segmentation<ImgClass::RGB>::tuple Segmentation<ImgClass::RGB>::MeanShift(const int x, const int y, std::vector<VECTOR_2D<int> >& pel_list, int Iter_Max); /* * std::vector<double> kernel has kernel radius for each dimensions. * The values it needs are below: * _kernel_spatial : the spatial radius of mean shift kernel * _kernel_intensity : the norm threshold of mean shift kernel in L*a*b* space */ template <> // Specialized for ImgClass::Lab const Segmentation<ImgClass::Lab>::tuple Segmentation<ImgClass::Lab>::MeanShift(const int x, const int y, std::vector<VECTOR_2D<int> >& pel_list, int Iter_Max); }
periodic_function.h
// All rights reserved. // // Redistribution and use in source and binary forms, with or without modification, are permitted provided that // the following conditions are met: // // 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the // following disclaimer. // 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions // and the following disclaimer in the documentation and/or other materials provided with the distribution. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED // WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A // PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR // ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR // OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. /** \file periodic_function.h * * \brief Contains declaration and partial implementation of sirius::Periodic_function class. */ #ifndef __PERIODIC_FUNCTION_H__ #define __PERIODIC_FUNCTION_H__ #include "simulation_context.h" #include "spheric_function.h" #include "smooth_periodic_function.h" #include "mixer.h" namespace sirius { /// Representation of the periodical function on the muffin-tin geometry. /** Inside each muffin-tin the spherical expansion is used: * \f[ * f({\bf r}) = \sum_{\ell m} f_{\ell m}(r) Y_{\ell m}(\hat {\bf r}) * \f] * or * \f[ * f({\bf r}) = \sum_{\ell m} f_{\ell m}(r) R_{\ell m}(\hat {\bf r}) * \f] * In the interstitial region function is stored on the real-space grid or as a Fourier series: * \f[ * f({\bf r}) = \sum_{{\bf G}} f({\bf G}) e^{i{\bf G}{\bf r}} * \f] */ template<typename T> class Periodic_function: public Smooth_periodic_function<T> { protected: /* forbid copy constructor */ Periodic_function(const Periodic_function<T>& src) = delete; /* forbid assigment operator */ Periodic_function<T>& operator=(const Periodic_function<T>& src) = delete; private: /// Complex counterpart for a given type T. typedef typename type_wrapper<T>::complex_t complex_t; Simulation_parameters const& parameters_; Unit_cell const& unit_cell_; Step_function const& step_function_; Communicator const& comm_; /// Local part of muffin-tin functions. mdarray<Spheric_function<spectral, T>, 1> f_mt_local_; /// Global muffin-tin array mdarray<T, 3> f_mt_; Gvec const& gvec_; /// Plane-wave expansion coefficients mdarray<complex_t, 1> f_pw_; /// Angular domain size. int angular_domain_size_; /// Set pointer to local part of muffin-tin functions void set_local_mt_ptr() { for (int ialoc = 0; ialoc < unit_cell_.spl_num_atoms().local_size(); ialoc++) { int ia = unit_cell_.spl_num_atoms(ialoc); f_mt_local_(ialoc) = Spheric_function<spectral, T>(&f_mt_(0, 0, ia), angular_domain_size_, unit_cell_.atom(ia).radial_grid()); } } /// True if plane wave part is allocated. bool is_f_pw_allocated_{false}; public: /// Constructor Periodic_function(Simulation_context& ctx__, int angular_domain_size__, int allocate_pw__) : Smooth_periodic_function<T>(ctx__.fft(), ctx__.gvec()) , parameters_(ctx__) , unit_cell_(ctx__.unit_cell()) , step_function_(ctx__.step_function()) , comm_(ctx__.comm()) , gvec_(ctx__.gvec()) , angular_domain_size_(angular_domain_size__) { if (allocate_pw__) { allocate_pw(); } if (parameters_.full_potential()) { f_mt_local_ = mdarray<Spheric_function<spectral, T>, 1>(unit_cell_.spl_num_atoms().local_size()); } } ///// Check if PW array is allocated. //bool is_f_pw_allocated() const //{ // return is_f_pw_allocated_; //} /// Allocated memory for the plane-wave expansion coefficients. void allocate_pw() { if (is_f_pw_allocated_) { return; } f_pw_ = mdarray<double_complex, 1>(gvec_.num_gvec()); this->f_pw_local_ = mdarray<double_complex, 1>(&f_pw_[this->gvec().partition().gvec_offset_fft()], this->fft_->local_size()); is_f_pw_allocated_ = true; } /// Allocate memory for muffin-tin part. void allocate_mt(bool allocate_global__) { if (parameters_.full_potential()) { if (allocate_global__) { f_mt_ = mdarray<T, 3>(angular_domain_size_, unit_cell_.max_num_mt_points(), unit_cell_.num_atoms()); set_local_mt_ptr(); } else { for (int ialoc = 0; ialoc < unit_cell_.spl_num_atoms().local_size(); ialoc++) { int ia = unit_cell_.spl_num_atoms(ialoc); f_mt_local_(ialoc) = Spheric_function<spectral, T>(angular_domain_size_, unit_cell_.atom(ia).radial_grid()); } } } } /// Syncronize global muffin-tin array. void sync_mt() { PROFILE("sirius::Periodic_function::sync_mt"); assert(f_mt_.size() != 0); int ld = angular_domain_size_ * unit_cell_.max_num_mt_points(); comm_.allgather(&f_mt_(0, 0, 0), ld * unit_cell_.spl_num_atoms().global_offset(), ld * unit_cell_.spl_num_atoms().local_size()); } /// Zero the function. void zero() { f_mt_.zero(); this->f_rg_.zero(); f_pw_.zero(); if (parameters_.full_potential()) { for (int ialoc = 0; ialoc < unit_cell_.spl_num_atoms().local_size(); ialoc++) { f_mt_local_(ialoc).zero(); } } } inline void copy_to_global_ptr(T* f_mt__, T* f_it__) const { std::memcpy(f_it__, this->f_rg_.template at<CPU>(), this->fft_->local_size() * sizeof(T)); if (parameters_.full_potential()) { mdarray<T, 3> f_mt(f_mt__, angular_domain_size_, unit_cell_.max_num_mt_points(), unit_cell_.num_atoms()); for (int ialoc = 0; ialoc < unit_cell_.spl_num_atoms().local_size(); ialoc++) { int ia = unit_cell_.spl_num_atoms(ialoc); std::memcpy(&f_mt(0, 0, ia), &f_mt_local_(ialoc)(0, 0), f_mt_local_(ialoc).size() * sizeof(T)); } int ld = angular_domain_size_ * unit_cell_.max_num_mt_points(); comm_.allgather(f_mt__, ld * unit_cell_.spl_num_atoms().global_offset(), ld * unit_cell_.spl_num_atoms().local_size()); } } /// Add the function void add(Periodic_function<T>* g) { PROFILE("sirius::Periodic_function::add"); #pragma omp parallel for schedule(static) for (int irloc = 0; irloc < this->fft_->local_size(); irloc++) { this->f_rg_(irloc) += g->f_rg(irloc); } if (parameters_.full_potential()) { for (int ialoc = 0; ialoc < unit_cell_.spl_num_atoms().local_size(); ialoc++) f_mt_local_(ialoc) += g->f_mt(ialoc); } } T integrate(std::vector<T>& mt_val, T& it_val) { PROFILE("sirius::Periodic_function::integrate"); it_val = 0; if (!parameters_.full_potential()) { #pragma omp parallel { T it_val_t = 0; #pragma omp for schedule(static) for (int irloc = 0; irloc < this->fft_->local_size(); irloc++) { it_val_t += this->f_rg_(irloc); } #pragma omp critical it_val += it_val_t; } } else { for (int irloc = 0; irloc < this->fft_->local_size(); irloc++) { it_val += this->f_rg_(irloc) * step_function_.theta_r(irloc); } } it_val *= (unit_cell_.omega() / this->fft_->size()); this->fft_->comm().allreduce(&it_val, 1); T total = it_val; if (parameters_.full_potential()) { mt_val = std::vector<T>(unit_cell_.num_atoms(), 0); for (int ialoc = 0; ialoc < unit_cell_.spl_num_atoms().local_size(); ialoc++) { int ia = unit_cell_.spl_num_atoms(ialoc); mt_val[ia] = f_mt_local_(ialoc).component(0).integrate(2) * fourpi * y00; } comm_.allreduce(&mt_val[0], unit_cell_.num_atoms()); for (int ia = 0; ia < unit_cell_.num_atoms(); ia++) { total += mt_val[ia]; } } return total; } template <index_domain_t index_domain> inline T& f_mt(int idx0, int ir, int ia) { switch (index_domain) { case index_domain_t::local: { return f_mt_local_(ia)(idx0, ir); } case index_domain_t::global: { return f_mt_(idx0, ir, ia); } } } /** \todo write and read distributed functions */ void hdf5_write(HDF5_tree h5f) { if (parameters_.full_potential()) { h5f.write("f_mt", f_mt_); } h5f.write("f_pw", f_pw_); //h5f.write("f_rg", this->f_rg_); } void hdf5_read(HDF5_tree h5f) { if (parameters_.full_potential()) { h5f.read("f_mt", f_mt_); } h5f.read("f_pw", f_pw_); //h5f.read_mdarray("f_rg", this->f_rg_); } size_t size() const { //size_t size = this->fft_->local_size(); size_t size = gvec_.num_gvec() * 2; if (parameters_.full_potential()) { for (int ic = 0; ic < unit_cell_.num_atom_symmetry_classes(); ic++) { size += angular_domain_size_ * unit_cell_.atom_symmetry_class(ic).atom_type().num_mt_points() * unit_cell_.atom_symmetry_class(ic).num_atoms(); } } return size; } size_t pack(size_t offset__, Mixer<double>& mixer__) { PROFILE("sirius::Periodic_function::pack"); size_t n = 0; if (parameters_.full_potential()) { for (int ia = 0; ia < unit_cell_.num_atoms(); ia++) { for (int i1 = 0; i1 < unit_cell_.atom(ia).num_mt_points(); i1++) { for (int i0 = 0; i0 < angular_domain_size_; i0++) { mixer__.input(offset__ + n++, f_mt_(i0, i1, ia)); } } } } double* pw = reinterpret_cast<double*>(this->f_pw_.template at<CPU>()); for (int ig = 0; ig < gvec_.num_gvec() * 2; ig++) { mixer__.input(offset__ + n++, pw[ig]); } //for (int ir = 0; ir < this->fft_->local_size(); ir++) { // mixer__->input(offset__ + n++, this->f_rg_(ir)); //} return n; } size_t unpack(T const* array__) { PROFILE("sirius::Periodic_function::unpack"); size_t n = 0; if (parameters_.full_potential()) { for (int ia = 0; ia < unit_cell_.num_atoms(); ia++) { for (int i1 = 0; i1 < unit_cell_.atom(ia).num_mt_points(); i1++) { for (int i0 = 0; i0 < angular_domain_size_; i0++) { f_mt_(i0, i1, ia) = array__[n++]; } } } } double* pw = reinterpret_cast<double*>(this->f_pw_.template at<CPU>()); for (int ig = 0; ig < gvec_.num_gvec() * 2; ig++) { pw[ig] = array__[n++]; } this->fft_transform(1); //for (int ir = 0; ir < this->fft_->local_size(); ir++) { // this->f_rg_(ir) = array__[n++]; //} return n; } /// Set the global pointer to the muffin-tin part void set_mt_ptr(T* mt_ptr__) { f_mt_ = mdarray<T, 3>(mt_ptr__, angular_domain_size_, unit_cell_.max_num_mt_points(), unit_cell_.num_atoms()); set_local_mt_ptr(); } /// Set the pointer to the interstitial part void set_rg_ptr(T* rg_ptr__) { this->f_rg_ = mdarray<T, 1>(rg_ptr__, this->fft_->local_size()); } inline Spheric_function<spectral, T> const& f_mt(int ialoc__) const { return f_mt_local_(ialoc__); } inline complex_t& f_pw(int ig__) { return f_pw_(ig__); } inline complex_t const& f_pw(int ig__) const { return f_pw_(ig__); } inline complex_t& f_pw(vector3d<int> const& G__) { return f_pw_(this->gvec().index_by_gvec(G__)); } double value(vector3d<double>& vc) { int ja, jr; double dr, tp[2]; if (unit_cell_.is_point_in_mt(vc, ja, jr, dr, tp)) { int lmax = Utils::lmax_by_lmmax(angular_domain_size_); std::vector<double> rlm(angular_domain_size_); SHT::spherical_harmonics(lmax, tp[0], tp[1], &rlm[0]); double p = 0.0; for (int lm = 0; lm < angular_domain_size_; lm++) { double d = (f_mt_(lm, jr + 1, ja) - f_mt_(lm, jr, ja)) / unit_cell_.atom(ja).type().radial_grid().dx(jr); p += rlm[lm] * (f_mt_(lm, jr, ja) + d * dr); } return p; } else { double p = 0.0; for (int ig = 0; ig < gvec_.num_gvec(); ig++) { vector3d<double> vgc = gvec_.gvec_cart(ig); p += std::real(f_pw_(ig) * std::exp(double_complex(0.0, vc * vgc))); } return p; } } inline T checksum_rg() const { T cs = this->f_rg_.checksum(); this->fft_->comm().allreduce(&cs, 1); return cs; } inline complex_t checksum_pw() const { return f_pw_.checksum(); } //int64_t hash() //{ // STOP(); // int64_t h = this->f_rg_.hash(); // h += f_pw_.hash(); // return h; //} void fft_transform(int direction__) { Smooth_periodic_function<T>::fft_transform(direction__); /* collect all PW coefficients */ if (direction__ == -1) { sddk::timer t("sirius::Periodic_function::fft_transform|comm"); this->fft_->comm().allgather(&f_pw_(0), this->gvec().partition().gvec_offset_fft(), this->gvec().partition().gvec_count_fft()); } } mdarray<T, 3>& f_mt() { return f_mt_; } /// Compute inner product <f|g> T inner(Periodic_function<T> const* g__) const { PROFILE("sirius::Periodic_function::inner"); assert(this->fft_ == g__->fft_); assert(&step_function_ == &g__->step_function_); assert(&unit_cell_ == &g__->unit_cell_); assert(&comm_ == &g__->comm_); T result_rg{0}; if (!parameters_.full_potential()) { #pragma omp parallel { T rt{0}; #pragma omp for schedule(static) for (int irloc = 0; irloc < this->fft_->local_size(); irloc++) { rt += type_wrapper<T>::conjugate(this->f_rg(irloc)) * g__->f_rg(irloc); } #pragma omp critical result_rg += rt; } } else { for (int irloc = 0; irloc < this->fft_->local_size(); irloc++) { result_rg += type_wrapper<T>::conjugate(this->f_rg(irloc)) * g__->f_rg(irloc) * this->step_function_.theta_r(irloc); } } result_rg *= (unit_cell_.omega() / this->fft_->size()); this->fft_->comm().allreduce(&result_rg, 1); T result_mt{0}; if (parameters_.full_potential()) { for (int ialoc = 0; ialoc < unit_cell_.spl_num_atoms().local_size(); ialoc++) { auto r = sirius::inner(f_mt(ialoc), g__->f_mt(ialoc)); result_mt += r; } comm_.allreduce(&result_mt, 1); } return result_mt + result_rg; } /// Compute inner product <f|g> T xinner(Periodic_function<T> const* g__) const { PROFILE("sirius::Periodic_function::inner"); assert(this->fft_ == g__->fft_); assert(&step_function_ == &g__->step_function_); assert(&unit_cell_ == &g__->unit_cell_); assert(&comm_ == &g__->comm_); T result_rgx{0}; if (!parameters_.full_potential()) { #pragma omp parallel { T rtx{0}; #pragma omp for schedule(static) for (int irloc = 0; irloc < this->fft_->local_size(); irloc++) { rtx += type_wrapper<T>::conjugate(this->f_rg(irloc)) * g__->f_rg(irloc); } #pragma omp critical result_rgx += rtx; } } else { for (int irloc = 0; irloc < this->fft_->local_size(); irloc++) { result_rgx += type_wrapper<T>::conjugate(this->f_rg(irloc)) * g__->f_rg(irloc) * this->step_function_.theta_r(irloc); } } result_rgx *= (unit_cell_.omega() / this->fft_->size()); this->fft_->comm().allreduce(&result_rgx, 1); T result_mtx{0}; if (parameters_.full_potential()) { for (int ialoc = 0; ialoc < unit_cell_.spl_num_atoms().local_size(); ialoc++) { /// auto r = sirius::inner(f_mt(ialoc), g__->f_mt(ialoc)); /// result_mtx += r; } comm_.allreduce(&result_mtx, 1); } return result_mtx + result_rgx; } }; }; #endif // __PERIODIC_FUNCTION_H__
munit.c
#pragma GCC diagnostic ignored "-Wsign-conversion" /* Copyright (c) 2013-2018 Evan Nemerson <evan@nemerson.com> * * Permission is hereby granted, free of charge, to any person * obtaining a copy of this software and associated documentation * files (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, * modify, merge, publish, distribute, sublicense, and/or sell copies * of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be * included in all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ /*** Configuration ***/ /* This is just where the output from the test goes. It's really just * meant to let you choose stdout or stderr, but if anyone really want * to direct it to a file let me know, it would be fairly easy to * support. */ #if !defined(MUNIT_OUTPUT_FILE) # define MUNIT_OUTPUT_FILE stdout #endif /* This is a bit more useful; it tells µnit how to format the seconds in * timed tests. If your tests run for longer you might want to reduce * it, and if your computer is really fast and your tests are tiny you * can increase it. */ #if !defined(MUNIT_TEST_TIME_FORMAT) # define MUNIT_TEST_TIME_FORMAT "0.8f" #endif /* If you have long test names you might want to consider bumping * this. The result information takes 43 characters. */ #if !defined(MUNIT_TEST_NAME_LEN) # define MUNIT_TEST_NAME_LEN 37 #endif /* If you don't like the timing information, you can disable it by * defining MUNIT_DISABLE_TIMING. */ #if !defined(MUNIT_DISABLE_TIMING) # define MUNIT_ENABLE_TIMING #endif /*** End configuration ***/ #if defined(_POSIX_C_SOURCE) && (_POSIX_C_SOURCE < 200809L) # undef _POSIX_C_SOURCE #endif #if !defined(_POSIX_C_SOURCE) # define _POSIX_C_SOURCE 200809L #endif /* Solaris freaks out if you try to use a POSIX or SUS standard without * the "right" C standard. */ #if defined(_XOPEN_SOURCE) # undef _XOPEN_SOURCE #endif #if defined(__STDC_VERSION__) # if __STDC_VERSION__ >= 201112L # define _XOPEN_SOURCE 700 # elif __STDC_VERSION__ >= 199901L # define _XOPEN_SOURCE 600 # endif #endif /* Because, according to Microsoft, POSIX is deprecated. You've got * to appreciate the chutzpah. */ #if defined(_MSC_VER) && !defined(_CRT_NONSTDC_NO_DEPRECATE) # define _CRT_NONSTDC_NO_DEPRECATE #endif #if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) # include <stdbool.h> #elif defined(_WIN32) /* https://msdn.microsoft.com/en-us/library/tf4dy80a.aspx */ #endif #include <limits.h> #include <time.h> #include <errno.h> #include <string.h> #include <stdlib.h> #include <stdio.h> #include <stdarg.h> #include <setjmp.h> #if !defined(MUNIT_NO_NL_LANGINFO) && !defined(_WIN32) #define MUNIT_NL_LANGINFO #include <locale.h> #include <langinfo.h> #include <strings.h> #endif #if !defined(_WIN32) # include <unistd.h> # include <sys/types.h> # include <sys/wait.h> #else # include <windows.h> # include <io.h> # include <fcntl.h> # if !defined(STDERR_FILENO) # define STDERR_FILENO _fileno(stderr) # endif #endif #include "munit.h" #define MUNIT_STRINGIFY(x) #x #define MUNIT_XSTRINGIFY(x) MUNIT_STRINGIFY(x) #if defined(__GNUC__) || defined(__INTEL_COMPILER) || defined(__SUNPRO_CC) || defined(__IBMCPP__) # define MUNIT_THREAD_LOCAL __thread #elif (defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201102L)) || defined(_Thread_local) # define MUNIT_THREAD_LOCAL _Thread_local #elif defined(_WIN32) # define MUNIT_THREAD_LOCAL __declspec(thread) #endif /* MSVC 12.0 will emit a warning at /W4 for code like 'do { ... } * while (0)', or 'do { ... } while (1)'. I'm pretty sure nobody * at Microsoft compiles with /W4. */ #if defined(_MSC_VER) && (_MSC_VER <= 1800) #pragma warning(disable: 4127) #endif #if defined(_WIN32) || defined(__EMSCRIPTEN__) # define MUNIT_NO_FORK #endif #if defined(__EMSCRIPTEN__) # define MUNIT_NO_BUFFER #endif /*** Logging ***/ static MunitLogLevel munit_log_level_visible = MUNIT_LOG_INFO; static MunitLogLevel munit_log_level_fatal = MUNIT_LOG_ERROR; #if defined(MUNIT_THREAD_LOCAL) static MUNIT_THREAD_LOCAL munit_bool munit_error_jmp_buf_valid = 0; static MUNIT_THREAD_LOCAL jmp_buf munit_error_jmp_buf; #endif /* At certain warning levels, mingw will trigger warnings about * suggesting the format attribute, which we've explicity *not* set * because it will then choke on our attempts to use the MS-specific * I64 modifier for size_t (which we have to use since MSVC doesn't * support the C99 z modifier). */ #if defined(__MINGW32__) || defined(__MINGW64__) # pragma GCC diagnostic push # pragma GCC diagnostic ignored "-Wsuggest-attribute=format" #endif MUNIT_PRINTF(5,0) static void munit_logf_exv(MunitLogLevel level, FILE* fp, const char* filename, int line, const char* format, va_list ap) { if (level < munit_log_level_visible) return; switch (level) { case MUNIT_LOG_DEBUG: fputs("Debug", fp); break; case MUNIT_LOG_INFO: fputs("Info", fp); break; case MUNIT_LOG_WARNING: fputs("Warning", fp); break; case MUNIT_LOG_ERROR: fputs("Error", fp); break; default: munit_logf_ex(MUNIT_LOG_ERROR, filename, line, "Invalid log level (%d)", level); return; } fputs(": ", fp); if (filename != NULL) fprintf(fp, "%s:%d: ", filename, line); vfprintf(fp, format, ap); fputc('\n', fp); } MUNIT_PRINTF(3,4) static void munit_logf_internal(MunitLogLevel level, FILE* fp, const char* format, ...) { va_list ap; va_start(ap, format); munit_logf_exv(level, fp, NULL, 0, format, ap); va_end(ap); } static void munit_log_internal(MunitLogLevel level, FILE* fp, const char* message) { munit_logf_internal(level, fp, "%s", message); } void munit_logf_ex(MunitLogLevel level, const char* filename, int line, const char* format, ...) { va_list ap; va_start(ap, format); munit_logf_exv(level, stderr, filename, line, format, ap); va_end(ap); if (level >= munit_log_level_fatal) { #if defined(MUNIT_THREAD_LOCAL) if (munit_error_jmp_buf_valid) longjmp(munit_error_jmp_buf, 1); #endif abort(); } } void munit_errorf_ex(const char* filename, int line, const char* format, ...) { va_list ap; va_start(ap, format); munit_logf_exv(MUNIT_LOG_ERROR, stderr, filename, line, format, ap); va_end(ap); #if defined(MUNIT_THREAD_LOCAL) if (munit_error_jmp_buf_valid) longjmp(munit_error_jmp_buf, 1); #endif abort(); } #if defined(__MINGW32__) || defined(__MINGW64__) #pragma GCC diagnostic pop #endif #if !defined(MUNIT_STRERROR_LEN) # define MUNIT_STRERROR_LEN 80 #endif static void munit_log_errno(MunitLogLevel level, FILE* fp, const char* msg) { #if defined(MUNIT_NO_STRERROR_R) || (defined(__MINGW32__) && !defined(MINGW_HAS_SECURE_API)) munit_logf_internal(level, fp, "%s: %s (%d)", msg, strerror(errno), errno); #else char munit_error_str[MUNIT_STRERROR_LEN]; munit_error_str[0] = '\0'; #if !defined(_WIN32) strerror_r(errno, munit_error_str, MUNIT_STRERROR_LEN); #else strerror_s(munit_error_str, MUNIT_STRERROR_LEN, errno); #endif munit_logf_internal(level, fp, "%s: %s (%d)", msg, munit_error_str, errno); #endif } /*** Memory allocation ***/ void* munit_malloc_ex(const char* filename, int line, size_t size) { void* ptr; if (size == 0) return NULL; ptr = calloc(1, size); if (MUNIT_UNLIKELY(ptr == NULL)) { munit_logf_ex(MUNIT_LOG_ERROR, filename, line, "Failed to allocate %" MUNIT_SIZE_MODIFIER "u bytes.", size); } return ptr; } /*** Timer code ***/ #if defined(MUNIT_ENABLE_TIMING) #define psnip_uint64_t munit_uint64_t #define psnip_uint32_t munit_uint32_t /* Code copied from portable-snippets * <https://github.com/nemequ/portable-snippets/>. If you need to * change something, please do it there so we can keep the code in * sync. */ /* Clocks (v1) * Portable Snippets - https://gitub.com/nemequ/portable-snippets * Created by Evan Nemerson <evan@nemerson.com> * * To the extent possible under law, the authors have waived all * copyright and related or neighboring rights to this code. For * details, see the Creative Commons Zero 1.0 Universal license at * https://creativecommons.org/publicdomain/zero/1.0/ */ #if !defined(PSNIP_CLOCK_H) #define PSNIP_CLOCK_H #if !defined(psnip_uint64_t) # include "../exact-int/exact-int.h" #endif #if !defined(PSNIP_CLOCK_STATIC_INLINE) # if defined(__GNUC__) # define PSNIP_CLOCK__COMPILER_ATTRIBUTES __attribute__((__unused__)) # else # define PSNIP_CLOCK__COMPILER_ATTRIBUTES # endif # define PSNIP_CLOCK__FUNCTION PSNIP_CLOCK__COMPILER_ATTRIBUTES static #endif enum PsnipClockType { /* This clock provides the current time, in units since 1970-01-01 * 00:00:00 UTC not including leap seconds. In other words, UNIX * time. Keep in mind that this clock doesn't account for leap * seconds, and can go backwards (think NTP adjustments). */ PSNIP_CLOCK_TYPE_WALL = 1, /* The CPU time is a clock which increases only when the current * process is active (i.e., it doesn't increment while blocking on * I/O). */ PSNIP_CLOCK_TYPE_CPU = 2, /* Monotonic time is always running (unlike CPU time), but it only ever moves forward unless you reboot the system. Things like NTP adjustments have no effect on this clock. */ PSNIP_CLOCK_TYPE_MONOTONIC = 3 }; struct PsnipClockTimespec { psnip_uint64_t seconds; psnip_uint64_t nanoseconds; }; /* Methods we support: */ #define PSNIP_CLOCK_METHOD_CLOCK_GETTIME 1 #define PSNIP_CLOCK_METHOD_TIME 2 #define PSNIP_CLOCK_METHOD_GETTIMEOFDAY 3 #define PSNIP_CLOCK_METHOD_QUERYPERFORMANCECOUNTER 4 #define PSNIP_CLOCK_METHOD_MACH_ABSOLUTE_TIME 5 #define PSNIP_CLOCK_METHOD_CLOCK 6 #define PSNIP_CLOCK_METHOD_GETPROCESSTIMES 7 #define PSNIP_CLOCK_METHOD_GETRUSAGE 8 #define PSNIP_CLOCK_METHOD_GETSYSTEMTIMEPRECISEASFILETIME 9 #define PSNIP_CLOCK_METHOD_GETTICKCOUNT64 10 #include <assert.h> #if defined(HEDLEY_UNREACHABLE) # define PSNIP_CLOCK_UNREACHABLE() HEDLEY_UNREACHABLE() #else # define PSNIP_CLOCK_UNREACHABLE() assert(0) #endif /* Choose an implementation */ /* #undef PSNIP_CLOCK_WALL_METHOD */ /* #undef PSNIP_CLOCK_CPU_METHOD */ /* #undef PSNIP_CLOCK_MONOTONIC_METHOD */ /* We want to be able to detect the libc implementation, so we include <limits.h> (<features.h> isn't available everywhere). */ #if defined(__unix__) || defined(__unix) || defined(__linux__) # include <limits.h> # include <unistd.h> #endif #if defined(_POSIX_TIMERS) && (_POSIX_TIMERS > 0) /* These are known to work without librt. If you know of others * please let us know so we can add them. */ # if \ (defined(__GLIBC__) && (__GLIBC__ > 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ >= 17))) || \ (defined(__FreeBSD__)) # define PSNIP_CLOCK_HAVE_CLOCK_GETTIME # elif !defined(PSNIP_CLOCK_NO_LIBRT) # define PSNIP_CLOCK_HAVE_CLOCK_GETTIME # endif #endif #if defined(_WIN32) # if !defined(PSNIP_CLOCK_CPU_METHOD) # define PSNIP_CLOCK_CPU_METHOD PSNIP_CLOCK_METHOD_GETPROCESSTIMES # endif # if !defined(PSNIP_CLOCK_MONOTONIC_METHOD) # define PSNIP_CLOCK_MONOTONIC_METHOD PSNIP_CLOCK_METHOD_QUERYPERFORMANCECOUNTER # endif #endif #if defined(__MACH__) && !defined(__gnu_hurd__) # if !defined(PSNIP_CLOCK_MONOTONIC_METHOD) # define PSNIP_CLOCK_MONOTONIC_METHOD PSNIP_CLOCK_METHOD_MACH_ABSOLUTE_TIME # endif #endif #if defined(PSNIP_CLOCK_HAVE_CLOCK_GETTIME) # include <time.h> # if !defined(PSNIP_CLOCK_WALL_METHOD) # if defined(CLOCK_REALTIME_PRECISE) # define PSNIP_CLOCK_WALL_METHOD PSNIP_CLOCK_METHOD_CLOCK_GETTIME # define PSNIP_CLOCK_CLOCK_GETTIME_WALL CLOCK_REALTIME_PRECISE # elif !defined(__sun) # define PSNIP_CLOCK_WALL_METHOD PSNIP_CLOCK_METHOD_CLOCK_GETTIME # define PSNIP_CLOCK_CLOCK_GETTIME_WALL CLOCK_REALTIME # endif # endif # if !defined(PSNIP_CLOCK_CPU_METHOD) # if defined(_POSIX_CPUTIME) || defined(CLOCK_PROCESS_CPUTIME_ID) # define PSNIP_CLOCK_CPU_METHOD PSNIP_CLOCK_METHOD_CLOCK_GETTIME # define PSNIP_CLOCK_CLOCK_GETTIME_CPU CLOCK_PROCESS_CPUTIME_ID # elif defined(CLOCK_VIRTUAL) # define PSNIP_CLOCK_CPU_METHOD PSNIP_CLOCK_METHOD_CLOCK_GETTIME # define PSNIP_CLOCK_CLOCK_GETTIME_CPU CLOCK_VIRTUAL # endif # endif # if !defined(PSNIP_CLOCK_MONOTONIC_METHOD) # if defined(CLOCK_MONOTONIC_RAW) # define PSNIP_CLOCK_MONOTONIC_METHOD PSNIP_CLOCK_METHOD_CLOCK_GETTIME # define PSNIP_CLOCK_CLOCK_GETTIME_MONOTONIC CLOCK_MONOTONIC # elif defined(CLOCK_MONOTONIC_PRECISE) # define PSNIP_CLOCK_MONOTONIC_METHOD PSNIP_CLOCK_METHOD_CLOCK_GETTIME # define PSNIP_CLOCK_CLOCK_GETTIME_MONOTONIC CLOCK_MONOTONIC_PRECISE # elif defined(_POSIX_MONOTONIC_CLOCK) || defined(CLOCK_MONOTONIC) # define PSNIP_CLOCK_MONOTONIC_METHOD PSNIP_CLOCK_METHOD_CLOCK_GETTIME # define PSNIP_CLOCK_CLOCK_GETTIME_MONOTONIC CLOCK_MONOTONIC # endif # endif #endif #if defined(_POSIX_VERSION) && (_POSIX_VERSION >= 200112L) # if !defined(PSNIP_CLOCK_WALL_METHOD) # define PSNIP_CLOCK_WALL_METHOD PSNIP_CLOCK_METHOD_GETTIMEOFDAY # endif #endif #if !defined(PSNIP_CLOCK_WALL_METHOD) # define PSNIP_CLOCK_WALL_METHOD PSNIP_CLOCK_METHOD_TIME #endif #if !defined(PSNIP_CLOCK_CPU_METHOD) # define PSNIP_CLOCK_CPU_METHOD PSNIP_CLOCK_METHOD_CLOCK #endif /* Primarily here for testing. */ #if !defined(PSNIP_CLOCK_MONOTONIC_METHOD) && defined(PSNIP_CLOCK_REQUIRE_MONOTONIC) # error No monotonic clock found. #endif /* Implementations */ #if \ (defined(PSNIP_CLOCK_CPU_METHOD) && (PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_CLOCK_GETTIME)) || \ (defined(PSNIP_CLOCK_WALL_METHOD) && (PSNIP_CLOCK_WALL_METHOD == PSNIP_CLOCK_METHOD_CLOCK_GETTIME)) || \ (defined(PSNIP_CLOCK_MONOTONIC_METHOD) && (PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_CLOCK_GETTIME)) || \ (defined(PSNIP_CLOCK_CPU_METHOD) && (PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_CLOCK)) || \ (defined(PSNIP_CLOCK_WALL_METHOD) && (PSNIP_CLOCK_WALL_METHOD == PSNIP_CLOCK_METHOD_CLOCK)) || \ (defined(PSNIP_CLOCK_MONOTONIC_METHOD) && (PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_CLOCK)) || \ (defined(PSNIP_CLOCK_CPU_METHOD) && (PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_TIME)) || \ (defined(PSNIP_CLOCK_WALL_METHOD) && (PSNIP_CLOCK_WALL_METHOD == PSNIP_CLOCK_METHOD_TIME)) || \ (defined(PSNIP_CLOCK_MONOTONIC_METHOD) && (PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_TIME)) # include <time.h> #endif #if \ (defined(PSNIP_CLOCK_CPU_METHOD) && (PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_GETTIMEOFDAY)) || \ (defined(PSNIP_CLOCK_WALL_METHOD) && (PSNIP_CLOCK_WALL_METHOD == PSNIP_CLOCK_METHOD_GETTIMEOFDAY)) || \ (defined(PSNIP_CLOCK_MONOTONIC_METHOD) && (PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_GETTIMEOFDAY)) # include <sys/time.h> #endif #if \ (defined(PSNIP_CLOCK_CPU_METHOD) && (PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_GETPROCESSTIMES)) || \ (defined(PSNIP_CLOCK_WALL_METHOD) && (PSNIP_CLOCK_WALL_METHOD == PSNIP_CLOCK_METHOD_GETPROCESSTIMES)) || \ (defined(PSNIP_CLOCK_MONOTONIC_METHOD) && (PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_GETPROCESSTIMES)) || \ (defined(PSNIP_CLOCK_CPU_METHOD) && (PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_GETTICKCOUNT64)) || \ (defined(PSNIP_CLOCK_WALL_METHOD) && (PSNIP_CLOCK_WALL_METHOD == PSNIP_CLOCK_METHOD_GETTICKCOUNT64)) || \ (defined(PSNIP_CLOCK_MONOTONIC_METHOD) && (PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_GETTICKCOUNT64)) # include <windows.h> #endif #if \ (defined(PSNIP_CLOCK_CPU_METHOD) && (PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_GETRUSAGE)) || \ (defined(PSNIP_CLOCK_WALL_METHOD) && (PSNIP_CLOCK_WALL_METHOD == PSNIP_CLOCK_METHOD_GETRUSAGE)) || \ (defined(PSNIP_CLOCK_MONOTONIC_METHOD) && (PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_GETRUSAGE)) # include <sys/time.h> # include <sys/resource.h> #endif #if \ (defined(PSNIP_CLOCK_CPU_METHOD) && (PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_MACH_ABSOLUTE_TIME)) || \ (defined(PSNIP_CLOCK_WALL_METHOD) && (PSNIP_CLOCK_WALL_METHOD == PSNIP_CLOCK_METHOD_MACH_ABSOLUTE_TIME)) || \ (defined(PSNIP_CLOCK_MONOTONIC_METHOD) && (PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_MACH_ABSOLUTE_TIME)) # include <CoreServices/CoreServices.h> # include <mach/mach.h> # include <mach/mach_time.h> #endif /*** Implementations ***/ #define PSNIP_CLOCK_NSEC_PER_SEC ((psnip_uint32_t) (1000000000ULL)) #if \ (defined(PSNIP_CLOCK_CPU_METHOD) && (PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_CLOCK_GETTIME)) || \ (defined(PSNIP_CLOCK_WALL_METHOD) && (PSNIP_CLOCK_WALL_METHOD == PSNIP_CLOCK_METHOD_CLOCK_GETTIME)) || \ (defined(PSNIP_CLOCK_MONOTONIC_METHOD) && (PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_CLOCK_GETTIME)) PSNIP_CLOCK__FUNCTION psnip_uint32_t psnip_clock__clock_getres (clockid_t clk_id) { struct timespec res; int r; r = clock_getres(clk_id, &res); if (r != 0) return 0; return (psnip_uint32_t) (PSNIP_CLOCK_NSEC_PER_SEC / res.tv_nsec); } PSNIP_CLOCK__FUNCTION int psnip_clock__clock_gettime (clockid_t clk_id, struct PsnipClockTimespec* res) { struct timespec ts; if (clock_gettime(clk_id, &ts) != 0) return -10; res->seconds = (psnip_uint64_t) (ts.tv_sec); res->nanoseconds = (psnip_uint64_t) (ts.tv_nsec); return 0; } #endif PSNIP_CLOCK__FUNCTION psnip_uint32_t psnip_clock_wall_get_precision (void) { #if !defined(PSNIP_CLOCK_WALL_METHOD) return 0; #elif defined(PSNIP_CLOCK_WALL_METHOD) && PSNIP_CLOCK_WALL_METHOD == PSNIP_CLOCK_METHOD_CLOCK_GETTIME return psnip_clock__clock_getres(PSNIP_CLOCK_CLOCK_GETTIME_WALL); #elif defined(PSNIP_CLOCK_WALL_METHOD) && PSNIP_CLOCK_WALL_METHOD == PSNIP_CLOCK_METHOD_GETTIMEOFDAY return 1000000; #elif defined(PSNIP_CLOCK_WALL_METHOD) && PSNIP_CLOCK_WALL_METHOD == PSNIP_CLOCK_METHOD_TIME return 1; #else return 0; #endif } PSNIP_CLOCK__FUNCTION int psnip_clock_wall_get_time (struct PsnipClockTimespec* res) { (void) res; #if !defined(PSNIP_CLOCK_WALL_METHOD) return -2; #elif defined(PSNIP_CLOCK_WALL_METHOD) && PSNIP_CLOCK_WALL_METHOD == PSNIP_CLOCK_METHOD_CLOCK_GETTIME return psnip_clock__clock_gettime(PSNIP_CLOCK_CLOCK_GETTIME_WALL, res); #elif defined(PSNIP_CLOCK_WALL_METHOD) && PSNIP_CLOCK_WALL_METHOD == PSNIP_CLOCK_METHOD_TIME res->seconds = time(NULL); res->nanoseconds = 0; #elif defined(PSNIP_CLOCK_WALL_METHOD) && PSNIP_CLOCK_WALL_METHOD == PSNIP_CLOCK_METHOD_GETTIMEOFDAY struct timeval tv; if (gettimeofday(&tv, NULL) != 0) return -6; res->seconds = tv.tv_sec; res->nanoseconds = tv.tv_usec * 1000; #else return -2; #endif return 0; } PSNIP_CLOCK__FUNCTION psnip_uint32_t psnip_clock_cpu_get_precision (void) { #if !defined(PSNIP_CLOCK_CPU_METHOD) return 0; #elif defined(PSNIP_CLOCK_CPU_METHOD) && PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_CLOCK_GETTIME return psnip_clock__clock_getres(PSNIP_CLOCK_CLOCK_GETTIME_CPU); #elif defined(PSNIP_CLOCK_CPU_METHOD) && PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_CLOCK return CLOCKS_PER_SEC; #elif defined(PSNIP_CLOCK_CPU_METHOD) && PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_GETPROCESSTIMES return PSNIP_CLOCK_NSEC_PER_SEC / 100; #else return 0; #endif } PSNIP_CLOCK__FUNCTION int psnip_clock_cpu_get_time (struct PsnipClockTimespec* res) { #if !defined(PSNIP_CLOCK_CPU_METHOD) (void) res; return -2; #elif defined(PSNIP_CLOCK_CPU_METHOD) && PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_CLOCK_GETTIME return psnip_clock__clock_gettime(PSNIP_CLOCK_CLOCK_GETTIME_CPU, res); #elif defined(PSNIP_CLOCK_CPU_METHOD) && PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_CLOCK clock_t t = clock(); if (t == ((clock_t) -1)) return -5; res->seconds = t / CLOCKS_PER_SEC; res->nanoseconds = (t % CLOCKS_PER_SEC) * (PSNIP_CLOCK_NSEC_PER_SEC / CLOCKS_PER_SEC); #elif defined(PSNIP_CLOCK_CPU_METHOD) && PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_GETPROCESSTIMES FILETIME CreationTime, ExitTime, KernelTime, UserTime; LARGE_INTEGER date, adjust; if (!GetProcessTimes(GetCurrentProcess(), &CreationTime, &ExitTime, &KernelTime, &UserTime)) return -7; /* http://www.frenk.com/2009/12/convert-filetime-to-unix-timestamp/ */ date.HighPart = UserTime.dwHighDateTime; date.LowPart = UserTime.dwLowDateTime; adjust.QuadPart = 11644473600000 * 10000; date.QuadPart -= adjust.QuadPart; res->seconds = date.QuadPart / 10000000; res->nanoseconds = (date.QuadPart % 10000000) * (PSNIP_CLOCK_NSEC_PER_SEC / 100); #elif PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_GETRUSAGE struct rusage usage; if (getrusage(RUSAGE_SELF, &usage) != 0) return -8; res->seconds = usage.ru_utime.tv_sec; res->nanoseconds = tv.tv_usec * 1000; #else (void) res; return -2; #endif return 0; } PSNIP_CLOCK__FUNCTION psnip_uint32_t psnip_clock_monotonic_get_precision (void) { #if !defined(PSNIP_CLOCK_MONOTONIC_METHOD) return 0; #elif defined(PSNIP_CLOCK_MONOTONIC_METHOD) && PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_CLOCK_GETTIME return psnip_clock__clock_getres(PSNIP_CLOCK_CLOCK_GETTIME_MONOTONIC); #elif defined(PSNIP_CLOCK_MONOTONIC_METHOD) && PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_MACH_ABSOLUTE_TIME static mach_timebase_info_data_t tbi = { 0, }; if (tbi.denom == 0) mach_timebase_info(&tbi); return (psnip_uint32_t) (tbi.numer / tbi.denom); #elif defined(PSNIP_CLOCK_MONOTONIC_METHOD) && PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_GETTICKCOUNT64 return 1000; #elif defined(PSNIP_CLOCK_MONOTONIC_METHOD) && PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_QUERYPERFORMANCECOUNTER LARGE_INTEGER Frequency; QueryPerformanceFrequency(&Frequency); return (psnip_uint32_t) ((Frequency.QuadPart > PSNIP_CLOCK_NSEC_PER_SEC) ? PSNIP_CLOCK_NSEC_PER_SEC : Frequency.QuadPart); #else return 0; #endif } PSNIP_CLOCK__FUNCTION int psnip_clock_monotonic_get_time (struct PsnipClockTimespec* res) { #if !defined(PSNIP_CLOCK_MONOTONIC_METHOD) (void) res; return -2; #elif defined(PSNIP_CLOCK_MONOTONIC_METHOD) && PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_CLOCK_GETTIME return psnip_clock__clock_gettime(PSNIP_CLOCK_CLOCK_GETTIME_MONOTONIC, res); #elif defined(PSNIP_CLOCK_MONOTONIC_METHOD) && PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_MACH_ABSOLUTE_TIME psnip_uint64_t nsec = mach_absolute_time(); static mach_timebase_info_data_t tbi = { 0, }; if (tbi.denom == 0) mach_timebase_info(&tbi); nsec *= ((psnip_uint64_t) tbi.numer) / ((psnip_uint64_t) tbi.denom); res->seconds = nsec / PSNIP_CLOCK_NSEC_PER_SEC; res->nanoseconds = nsec % PSNIP_CLOCK_NSEC_PER_SEC; #elif defined(PSNIP_CLOCK_MONOTONIC_METHOD) && PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_QUERYPERFORMANCECOUNTER LARGE_INTEGER t, f; if (QueryPerformanceCounter(&t) == 0) return -12; QueryPerformanceFrequency(&f); res->seconds = t.QuadPart / f.QuadPart; res->nanoseconds = t.QuadPart % f.QuadPart; if (f.QuadPart > PSNIP_CLOCK_NSEC_PER_SEC) res->nanoseconds /= f.QuadPart / PSNIP_CLOCK_NSEC_PER_SEC; else res->nanoseconds *= PSNIP_CLOCK_NSEC_PER_SEC / f.QuadPart; #elif defined(PSNIP_CLOCK_MONOTONIC_METHOD) && PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_GETTICKCOUNT64 const ULONGLONG msec = GetTickCount64(); res->seconds = msec / 1000; res->nanoseconds = sec % 1000; #else return -2; #endif return 0; } /* Returns the number of ticks per second for the specified clock. * For example, a clock with millisecond precision would return 1000, * and a clock with 1 second (such as the time() function) would * return 1. * * If the requested clock isn't available, it will return 0. * Hopefully this will be rare, but if it happens to you please let us * know so we can work on finding a way to support your system. * * Note that different clocks on the same system often have a * different precisions. */ PSNIP_CLOCK__FUNCTION psnip_uint32_t psnip_clock_get_precision (enum PsnipClockType clock_type) { switch (clock_type) { case PSNIP_CLOCK_TYPE_MONOTONIC: return psnip_clock_monotonic_get_precision (); case PSNIP_CLOCK_TYPE_CPU: return psnip_clock_cpu_get_precision (); case PSNIP_CLOCK_TYPE_WALL: return psnip_clock_wall_get_precision (); } PSNIP_CLOCK_UNREACHABLE(); return 0; } /* Set the provided timespec to the requested time. Returns 0 on * success, or a negative value on failure. */ PSNIP_CLOCK__FUNCTION int psnip_clock_get_time (enum PsnipClockType clock_type, struct PsnipClockTimespec* res) { assert(res != NULL); switch (clock_type) { case PSNIP_CLOCK_TYPE_MONOTONIC: return psnip_clock_monotonic_get_time (res); case PSNIP_CLOCK_TYPE_CPU: return psnip_clock_cpu_get_time (res); case PSNIP_CLOCK_TYPE_WALL: return psnip_clock_wall_get_time (res); } return -1; } #endif /* !defined(PSNIP_CLOCK_H) */ static psnip_uint64_t munit_clock_get_elapsed(struct PsnipClockTimespec* start, struct PsnipClockTimespec* end) { psnip_uint64_t r = (end->seconds - start->seconds) * PSNIP_CLOCK_NSEC_PER_SEC; if (end->nanoseconds < start->nanoseconds) { r -= (start->nanoseconds - end->nanoseconds); } else { r += (end->nanoseconds - start->nanoseconds); } return r; } #else # include <time.h> #endif /* defined(MUNIT_ENABLE_TIMING) */ /*** PRNG stuff ***/ /* This is (unless I screwed up, which is entirely possible) the * version of PCG with 32-bit state. It was chosen because it has a * small enough state that we should reliably be able to use CAS * instead of requiring a lock for thread-safety. * * If I did screw up, I probably will not bother changing it unless * there is a significant bias. It's really not important this be * particularly strong, as long as it is fairly random it's much more * important that it be reproducible, so bug reports have a better * chance of being reproducible. */ #if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L) && !defined(__STDC_NO_ATOMICS__) && !defined(__EMSCRIPTEN__) && (!defined(__GNUC_MINOR__) || (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ > 8)) # define HAVE_STDATOMIC #elif defined(__clang__) # if __has_extension(c_atomic) # define HAVE_CLANG_ATOMICS # endif #endif /* Workaround for http://llvm.org/bugs/show_bug.cgi?id=26911 */ #if defined(__clang__) && defined(_WIN32) # undef HAVE_STDATOMIC # if defined(__c2__) # undef HAVE_CLANG_ATOMICS # endif #endif #if defined(_OPENMP) # define ATOMIC_UINT32_T uint32_t # define ATOMIC_UINT32_INIT(x) (x) #elif defined(HAVE_STDATOMIC) # include <stdatomic.h> # define ATOMIC_UINT32_T _Atomic uint32_t # define ATOMIC_UINT32_INIT(x) ATOMIC_VAR_INIT(x) #elif defined(HAVE_CLANG_ATOMICS) # define ATOMIC_UINT32_T _Atomic uint32_t # define ATOMIC_UINT32_INIT(x) (x) #elif defined(_WIN32) # define ATOMIC_UINT32_T volatile LONG # define ATOMIC_UINT32_INIT(x) (x) #else # define ATOMIC_UINT32_T volatile uint32_t # define ATOMIC_UINT32_INIT(x) (x) #endif static ATOMIC_UINT32_T munit_rand_state = ATOMIC_UINT32_INIT(42); #if defined(_OPENMP) static inline void munit_atomic_store(ATOMIC_UINT32_T* dest, ATOMIC_UINT32_T value) { #pragma omp critical (munit_atomics) *dest = value; } static inline uint32_t munit_atomic_load(ATOMIC_UINT32_T* src) { int ret; #pragma omp critical (munit_atomics) ret = *src; return ret; } static inline uint32_t munit_atomic_cas(ATOMIC_UINT32_T* dest, ATOMIC_UINT32_T* expected, ATOMIC_UINT32_T desired) { munit_bool ret; #pragma omp critical (munit_atomics) { if (*dest == *expected) { *dest = desired; ret = 1; } else { ret = 0; } } return ret; } #elif defined(HAVE_STDATOMIC) # define munit_atomic_store(dest, value) atomic_store(dest, value) # define munit_atomic_load(src) atomic_load(src) # define munit_atomic_cas(dest, expected, value) atomic_compare_exchange_weak(dest, expected, value) #elif defined(HAVE_CLANG_ATOMICS) # define munit_atomic_store(dest, value) __c11_atomic_store(dest, value, __ATOMIC_SEQ_CST) # define munit_atomic_load(src) __c11_atomic_load(src, __ATOMIC_SEQ_CST) # define munit_atomic_cas(dest, expected, value) __c11_atomic_compare_exchange_weak(dest, expected, value, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST) #elif defined(__GNUC__) && (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 7) # define munit_atomic_store(dest, value) __atomic_store_n(dest, value, __ATOMIC_SEQ_CST) # define munit_atomic_load(src) __atomic_load_n(src, __ATOMIC_SEQ_CST) # define munit_atomic_cas(dest, expected, value) __atomic_compare_exchange_n(dest, expected, value, 1, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST) #elif defined(__GNUC__) && (__GNUC__ >= 4) # define munit_atomic_store(dest,value) do { *(dest) = (value); } while (0) # define munit_atomic_load(src) (*(src)) # define munit_atomic_cas(dest, expected, value) __sync_bool_compare_and_swap(dest, *expected, value) #elif defined(_WIN32) /* Untested */ # define munit_atomic_store(dest,value) do { *(dest) = (value); } while (0) # define munit_atomic_load(src) (*(src)) # define munit_atomic_cas(dest, expected, value) InterlockedCompareExchange((dest), (value), *(expected)) #else # warning No atomic implementation, PRNG will not be thread-safe # define munit_atomic_store(dest, value) do { *(dest) = (value); } while (0) # define munit_atomic_load(src) (*(src)) static inline munit_bool munit_atomic_cas(ATOMIC_UINT32_T* dest, ATOMIC_UINT32_T* expected, ATOMIC_UINT32_T desired) { if (*dest == *expected) { *dest = desired; return 1; } else { return 0; } } #endif #define MUNIT_PRNG_MULTIPLIER (747796405U) #define MUNIT_PRNG_INCREMENT (1729U) static munit_uint32_t munit_rand_next_state(munit_uint32_t state) { return state * MUNIT_PRNG_MULTIPLIER + MUNIT_PRNG_INCREMENT; } static munit_uint32_t munit_rand_from_state(munit_uint32_t state) { munit_uint32_t res = ((state >> ((state >> 28) + 4)) ^ state) * (277803737U); res ^= res >> 22; return res; } void munit_rand_seed(munit_uint32_t seed) { munit_uint32_t state = munit_rand_next_state(seed + MUNIT_PRNG_INCREMENT); munit_atomic_store(&munit_rand_state, state); } static munit_uint32_t munit_rand_generate_seed(void) { munit_uint32_t seed, state; #if defined(MUNIT_ENABLE_TIMING) struct PsnipClockTimespec wc = { 0, }; psnip_clock_get_time(PSNIP_CLOCK_TYPE_WALL, &wc); seed = (munit_uint32_t) wc.nanoseconds; #else seed = (munit_uint32_t) time(NULL); #endif state = munit_rand_next_state(seed + MUNIT_PRNG_INCREMENT); return munit_rand_from_state(state); } static munit_uint32_t munit_rand_state_uint32(munit_uint32_t* state) { const munit_uint32_t old = *state; *state = munit_rand_next_state(old); return munit_rand_from_state(old); } munit_uint32_t munit_rand_uint32(void) { munit_uint32_t old, state; do { old = munit_atomic_load(&munit_rand_state); state = munit_rand_next_state(old); } while (!munit_atomic_cas(&munit_rand_state, &old, state)); return munit_rand_from_state(old); } static void munit_rand_state_memory(munit_uint32_t* state, size_t size, munit_uint8_t data[MUNIT_ARRAY_PARAM(size)]) { size_t members_remaining = size / sizeof(munit_uint32_t); size_t bytes_remaining = size % sizeof(munit_uint32_t); munit_uint8_t* b = data; munit_uint32_t rv; while (members_remaining-- > 0) { rv = munit_rand_state_uint32(state); memcpy(b, &rv, sizeof(munit_uint32_t)); b += sizeof(munit_uint32_t); } if (bytes_remaining != 0) { rv = munit_rand_state_uint32(state); memcpy(b, &rv, bytes_remaining); } } void munit_rand_memory(size_t size, munit_uint8_t data[MUNIT_ARRAY_PARAM(size)]) { munit_uint32_t old, state; do { state = old = munit_atomic_load(&munit_rand_state); munit_rand_state_memory(&state, size, data); } while (!munit_atomic_cas(&munit_rand_state, &old, state)); } static munit_uint32_t munit_rand_state_at_most(munit_uint32_t* state, munit_uint32_t salt, munit_uint32_t max) { /* We want (UINT32_MAX + 1) % max, which in unsigned arithmetic is the same * as (UINT32_MAX + 1 - max) % max = -max % max. We compute -max using not * to avoid compiler warnings. */ const munit_uint32_t min = (~max + 1U) % max; munit_uint32_t x; if (max == (~((munit_uint32_t) 0U))) return munit_rand_state_uint32(state) ^ salt; max++; do { x = munit_rand_state_uint32(state) ^ salt; } while (x < min); return x % max; } static munit_uint32_t munit_rand_at_most(munit_uint32_t salt, munit_uint32_t max) { munit_uint32_t old, state; munit_uint32_t retval; do { state = old = munit_atomic_load(&munit_rand_state); retval = munit_rand_state_at_most(&state, salt, max); } while (!munit_atomic_cas(&munit_rand_state, &old, state)); return retval; } int munit_rand_int_range(int min, int max) { munit_uint64_t range = (munit_uint64_t) max - (munit_uint64_t) min; if (min > max) return munit_rand_int_range(max, min); if (range > (~((munit_uint32_t) 0U))) range = (~((munit_uint32_t) 0U)); return min + munit_rand_at_most(0, (munit_uint32_t) range); } double munit_rand_double(void) { munit_uint32_t old, state; double retval = 0.0; do { state = old = munit_atomic_load(&munit_rand_state); /* See http://mumble.net/~campbell/tmp/random_real.c for how to do * this right. Patches welcome if you feel that this is too * biased. */ retval = munit_rand_state_uint32(&state) / ((~((munit_uint32_t) 0U)) + 1.0); } while (!munit_atomic_cas(&munit_rand_state, &old, state)); return retval; } /*** Test suite handling ***/ typedef struct { unsigned int successful; unsigned int skipped; unsigned int failed; unsigned int errored; #if defined(MUNIT_ENABLE_TIMING) munit_uint64_t cpu_clock; munit_uint64_t wall_clock; #endif } MunitReport; typedef struct { const char* prefix; const MunitSuite* suite; const char** tests; munit_uint32_t seed; unsigned int iterations; MunitParameter* parameters; munit_bool single_parameter_mode; void* user_data; MunitReport report; munit_bool colorize; munit_bool fork; munit_bool show_stderr; munit_bool fatal_failures; } MunitTestRunner; const char* munit_parameters_get(const MunitParameter params[], const char* key) { const MunitParameter* param; for (param = params ; param != NULL && param->name != NULL ; param++) if (strcmp(param->name, key) == 0) return param->value; return NULL; } #if defined(MUNIT_ENABLE_TIMING) static void munit_print_time(FILE* fp, munit_uint64_t nanoseconds) { fprintf(fp, "%" MUNIT_TEST_TIME_FORMAT, ((double) nanoseconds) / ((double) PSNIP_CLOCK_NSEC_PER_SEC)); } #endif /* Add a paramter to an array of parameters. */ static MunitResult munit_parameters_add(size_t* params_size, MunitParameter* params[MUNIT_ARRAY_PARAM(*params_size)], char* name, char* value) { *params = realloc(*params, sizeof(MunitParameter) * (*params_size + 2)); if (*params == NULL) return MUNIT_ERROR; (*params)[*params_size].name = name; (*params)[*params_size].value = value; (*params_size)++; (*params)[*params_size].name = NULL; (*params)[*params_size].value = NULL; return MUNIT_OK; } /* Concatenate two strings, but just return one of the components * unaltered if the other is NULL or "". */ static char* munit_maybe_concat(size_t* len, char* prefix, char* suffix) { char* res; size_t res_l; const size_t prefix_l = prefix != NULL ? strlen(prefix) : 0; const size_t suffix_l = suffix != NULL ? strlen(suffix) : 0; if (prefix_l == 0 && suffix_l == 0) { res = NULL; res_l = 0; } else if (prefix_l == 0 && suffix_l != 0) { res = suffix; res_l = suffix_l; } else if (prefix_l != 0 && suffix_l == 0) { res = prefix; res_l = prefix_l; } else { res_l = prefix_l + suffix_l; res = malloc(res_l + 1); memcpy(res, prefix, prefix_l); memcpy(res + prefix_l, suffix, suffix_l); res[res_l] = 0; } if (len != NULL) *len = res_l; return res; } /* Possbily free a string returned by munit_maybe_concat. */ static void munit_maybe_free_concat(char* s, const char* prefix, const char* suffix) { if (prefix != s && suffix != s) free(s); } /* Cheap string hash function, just used to salt the PRNG. */ static munit_uint32_t munit_str_hash(const char* name) { const char *p; munit_uint32_t h = 5381U; for (p = name; *p != '\0'; p++) h = (h << 5) + h + *p; return h; } static void munit_splice(int from, int to) { munit_uint8_t buf[1024]; #if !defined(_WIN32) ssize_t len; ssize_t bytes_written; ssize_t write_res; #else int len; int bytes_written; int write_res; #endif do { len = read(from, buf, sizeof(buf)); if (len > 0) { bytes_written = 0; do { write_res = write(to, buf + bytes_written, len - bytes_written); if (write_res < 0) break; bytes_written += write_res; } while (bytes_written < len); } else break; } while (1); } /* This is the part that should be handled in the child process */ static MunitResult munit_test_runner_exec(MunitTestRunner* runner, const MunitTest* test, const MunitParameter params[], MunitReport* report) { unsigned int iterations = runner->iterations; MunitResult result = MUNIT_FAIL; #if defined(MUNIT_ENABLE_TIMING) struct PsnipClockTimespec wall_clock_begin = { 0, }, wall_clock_end = { 0, }; struct PsnipClockTimespec cpu_clock_begin = { 0, }, cpu_clock_end = { 0, }; #endif unsigned int i = 0; if ((test->options & MUNIT_TEST_OPTION_SINGLE_ITERATION) == MUNIT_TEST_OPTION_SINGLE_ITERATION) iterations = 1; else if (iterations == 0) iterations = runner->suite->iterations; munit_rand_seed(runner->seed); do { void* data = (test->setup == NULL) ? runner->user_data : test->setup(params, runner->user_data); #if defined(MUNIT_ENABLE_TIMING) psnip_clock_get_time(PSNIP_CLOCK_TYPE_WALL, &wall_clock_begin); psnip_clock_get_time(PSNIP_CLOCK_TYPE_CPU, &cpu_clock_begin); #endif result = test->test(params, data); #if defined(MUNIT_ENABLE_TIMING) psnip_clock_get_time(PSNIP_CLOCK_TYPE_WALL, &wall_clock_end); psnip_clock_get_time(PSNIP_CLOCK_TYPE_CPU, &cpu_clock_end); #endif if (test->tear_down != NULL) test->tear_down(data); if (MUNIT_LIKELY(result == MUNIT_OK)) { report->successful++; #if defined(MUNIT_ENABLE_TIMING) report->wall_clock += munit_clock_get_elapsed(&wall_clock_begin, &wall_clock_end); report->cpu_clock += munit_clock_get_elapsed(&cpu_clock_begin, &cpu_clock_end); #endif } else { switch ((int) result) { case MUNIT_SKIP: report->skipped++; break; case MUNIT_FAIL: report->failed++; break; case MUNIT_ERROR: report->errored++; break; default: break; } break; } } while (++i < iterations); return result; } #if defined(MUNIT_EMOTICON) # define MUNIT_RESULT_STRING_OK ":)" # define MUNIT_RESULT_STRING_SKIP ":|" # define MUNIT_RESULT_STRING_FAIL ":(" # define MUNIT_RESULT_STRING_ERROR ":o" # define MUNIT_RESULT_STRING_TODO ":/" #else # define MUNIT_RESULT_STRING_OK "OK " # define MUNIT_RESULT_STRING_SKIP "SKIP " # define MUNIT_RESULT_STRING_FAIL "FAIL " # define MUNIT_RESULT_STRING_ERROR "ERROR" # define MUNIT_RESULT_STRING_TODO "TODO " #endif static void munit_test_runner_print_color(const MunitTestRunner* runner, const char* string, char color) { if (runner->colorize) fprintf(MUNIT_OUTPUT_FILE, "\x1b[3%cm%s\x1b[39m", color, string); else fputs(string, MUNIT_OUTPUT_FILE); } #if !defined(MUNIT_NO_BUFFER) static int munit_replace_stderr(FILE* stderr_buf) { if (stderr_buf != NULL) { const int orig_stderr = dup(STDERR_FILENO); int errfd = fileno(stderr_buf); if (MUNIT_UNLIKELY(errfd == -1)) { exit(EXIT_FAILURE); } dup2(errfd, STDERR_FILENO); return orig_stderr; } return -1; } static void munit_restore_stderr(int orig_stderr) { if (orig_stderr != -1) { dup2(orig_stderr, STDERR_FILENO); close(orig_stderr); } } #endif /* !defined(MUNIT_NO_BUFFER) */ /* Run a test with the specified parameters. */ static void munit_test_runner_run_test_with_params(MunitTestRunner* runner, const MunitTest* test, const MunitParameter params[]) { MunitResult result = MUNIT_OK; MunitReport report = { 0, 0, 0, 0, #if defined(MUNIT_ENABLE_TIMING) 0, 0 #endif }; unsigned int output_l; munit_bool first; const MunitParameter* param; FILE* stderr_buf; #if !defined(MUNIT_NO_FORK) int pipefd[2]; pid_t fork_pid; int orig_stderr; ssize_t bytes_written = 0; ssize_t write_res; ssize_t bytes_read = 0; ssize_t read_res; int status = 0; pid_t changed_pid; #endif if (params != NULL) { output_l = 2; fputs(" ", MUNIT_OUTPUT_FILE); first = 1; for (param = params ; param != NULL && param->name != NULL ; param++) { if (!first) { fputs(", ", MUNIT_OUTPUT_FILE); output_l += 2; } else { first = 0; } output_l += fprintf(MUNIT_OUTPUT_FILE, "%s=%s", param->name, param->value); } while (output_l++ < MUNIT_TEST_NAME_LEN) { fputc(' ', MUNIT_OUTPUT_FILE); } } fflush(MUNIT_OUTPUT_FILE); stderr_buf = NULL; #if !defined(_WIN32) || defined(__MINGW32__) stderr_buf = tmpfile(); #else tmpfile_s(&stderr_buf); #endif if (stderr_buf == NULL) { munit_log_errno(MUNIT_LOG_ERROR, stderr, "unable to create buffer for stderr"); result = MUNIT_ERROR; goto print_result; } #if !defined(MUNIT_NO_FORK) if (runner->fork) { pipefd[0] = -1; pipefd[1] = -1; if (pipe(pipefd) != 0) { munit_log_errno(MUNIT_LOG_ERROR, stderr, "unable to create pipe"); result = MUNIT_ERROR; goto print_result; } fork_pid = fork(); if (fork_pid == 0) { close(pipefd[0]); orig_stderr = munit_replace_stderr(stderr_buf); munit_test_runner_exec(runner, test, params, &report); /* Note that we don't restore stderr. This is so we can buffer * things written to stderr later on (such as by * asan/tsan/ubsan, valgrind, etc.) */ close(orig_stderr); do { write_res = write(pipefd[1], ((munit_uint8_t*) (&report)) + bytes_written, sizeof(report) - bytes_written); if (write_res < 0) { if (stderr_buf != NULL) { munit_log_errno(MUNIT_LOG_ERROR, stderr, "unable to write to pipe"); } exit(EXIT_FAILURE); } bytes_written += write_res; } while ((size_t) bytes_written < sizeof(report)); if (stderr_buf != NULL) fclose(stderr_buf); close(pipefd[1]); exit(EXIT_SUCCESS); } else if (fork_pid == -1) { close(pipefd[0]); close(pipefd[1]); if (stderr_buf != NULL) { munit_log_errno(MUNIT_LOG_ERROR, stderr, "unable to fork"); } report.errored++; result = MUNIT_ERROR; } else { close(pipefd[1]); do { read_res = read(pipefd[0], ((munit_uint8_t*) (&report)) + bytes_read, sizeof(report) - bytes_read); if (read_res < 1) break; bytes_read += read_res; } while (bytes_read < (ssize_t) sizeof(report)); changed_pid = waitpid(fork_pid, &status, 0); if (MUNIT_LIKELY(changed_pid == fork_pid) && MUNIT_LIKELY(WIFEXITED(status))) { if (bytes_read != sizeof(report)) { munit_logf_internal(MUNIT_LOG_ERROR, stderr_buf, "child exited unexpectedly with status %d", WEXITSTATUS(status)); report.errored++; } else if (WEXITSTATUS(status) != EXIT_SUCCESS) { munit_logf_internal(MUNIT_LOG_ERROR, stderr_buf, "child exited with status %d", WEXITSTATUS(status)); report.errored++; } } else { if (WIFSIGNALED(status)) { #if defined(_XOPEN_VERSION) && (_XOPEN_VERSION >= 700) munit_logf_internal(MUNIT_LOG_ERROR, stderr_buf, "child killed by signal %d (%s)", WTERMSIG(status), strsignal(WTERMSIG(status))); #else munit_logf_internal(MUNIT_LOG_ERROR, stderr_buf, "child killed by signal %d", WTERMSIG(status)); #endif } else if (WIFSTOPPED(status)) { munit_logf_internal(MUNIT_LOG_ERROR, stderr_buf, "child stopped by signal %d", WSTOPSIG(status)); } report.errored++; } close(pipefd[0]); waitpid(fork_pid, NULL, 0); } } else #endif { #if !defined(MUNIT_NO_BUFFER) const volatile int orig_stderr = munit_replace_stderr(stderr_buf); #endif #if defined(MUNIT_THREAD_LOCAL) if (MUNIT_UNLIKELY(setjmp(munit_error_jmp_buf) != 0)) { result = MUNIT_FAIL; report.failed++; } else { munit_error_jmp_buf_valid = 1; result = munit_test_runner_exec(runner, test, params, &report); } #else result = munit_test_runner_exec(runner, test, params, &report); #endif #if !defined(MUNIT_NO_BUFFER) munit_restore_stderr(orig_stderr); #endif /* Here just so that the label is used on Windows and we don't get * a warning */ goto print_result; } print_result: fputs("[ ", MUNIT_OUTPUT_FILE); if ((test->options & MUNIT_TEST_OPTION_TODO) == MUNIT_TEST_OPTION_TODO) { if (report.failed != 0 || report.errored != 0 || report.skipped != 0) { munit_test_runner_print_color(runner, MUNIT_RESULT_STRING_TODO, '3'); result = MUNIT_OK; } else { munit_test_runner_print_color(runner, MUNIT_RESULT_STRING_ERROR, '1'); if (MUNIT_LIKELY(stderr_buf != NULL)) munit_log_internal(MUNIT_LOG_ERROR, stderr_buf, "Test marked TODO, but was successful."); runner->report.failed++; result = MUNIT_ERROR; } } else if (report.failed > 0) { munit_test_runner_print_color(runner, MUNIT_RESULT_STRING_FAIL, '1'); runner->report.failed++; result = MUNIT_FAIL; } else if (report.errored > 0) { munit_test_runner_print_color(runner, MUNIT_RESULT_STRING_ERROR, '1'); runner->report.errored++; result = MUNIT_ERROR; } else if (report.skipped > 0) { munit_test_runner_print_color(runner, MUNIT_RESULT_STRING_SKIP, '3'); runner->report.skipped++; result = MUNIT_SKIP; } else if (report.successful > 1) { munit_test_runner_print_color(runner, MUNIT_RESULT_STRING_OK, '2'); #if defined(MUNIT_ENABLE_TIMING) fputs(" ] [ ", MUNIT_OUTPUT_FILE); munit_print_time(MUNIT_OUTPUT_FILE, report.wall_clock / report.successful); fputs(" / ", MUNIT_OUTPUT_FILE); munit_print_time(MUNIT_OUTPUT_FILE, report.cpu_clock / report.successful); fprintf(MUNIT_OUTPUT_FILE, " CPU ]\n %-" MUNIT_XSTRINGIFY(MUNIT_TEST_NAME_LEN) "s Total: [ ", ""); munit_print_time(MUNIT_OUTPUT_FILE, report.wall_clock); fputs(" / ", MUNIT_OUTPUT_FILE); munit_print_time(MUNIT_OUTPUT_FILE, report.cpu_clock); fputs(" CPU", MUNIT_OUTPUT_FILE); #endif runner->report.successful++; result = MUNIT_OK; } else if (report.successful > 0) { munit_test_runner_print_color(runner, MUNIT_RESULT_STRING_OK, '2'); #if defined(MUNIT_ENABLE_TIMING) fputs(" ] [ ", MUNIT_OUTPUT_FILE); munit_print_time(MUNIT_OUTPUT_FILE, report.wall_clock); fputs(" / ", MUNIT_OUTPUT_FILE); munit_print_time(MUNIT_OUTPUT_FILE, report.cpu_clock); fputs(" CPU", MUNIT_OUTPUT_FILE); #endif runner->report.successful++; result = MUNIT_OK; } fputs(" ]\n", MUNIT_OUTPUT_FILE); if (stderr_buf != NULL) { if (result == MUNIT_FAIL || result == MUNIT_ERROR || runner->show_stderr) { fflush(MUNIT_OUTPUT_FILE); rewind(stderr_buf); munit_splice(fileno(stderr_buf), STDERR_FILENO); fflush(stderr); } fclose(stderr_buf); } } static void munit_test_runner_run_test_wild(MunitTestRunner* runner, const MunitTest* test, const char* test_name, MunitParameter* params, MunitParameter* p) { const MunitParameterEnum* pe; char** values; MunitParameter* next; for (pe = test->parameters ; pe != NULL && pe->name != NULL ; pe++) { if (p->name == pe->name) break; } if (pe == NULL) return; for (values = pe->values ; *values != NULL ; values++) { next = p + 1; p->value = *values; if (next->name == NULL) { munit_test_runner_run_test_with_params(runner, test, params); } else { munit_test_runner_run_test_wild(runner, test, test_name, params, next); } if (runner->fatal_failures && (runner->report.failed != 0 || runner->report.errored != 0)) break; } } /* Run a single test, with every combination of parameters * requested. */ static void munit_test_runner_run_test(MunitTestRunner* runner, const MunitTest* test, const char* prefix) { char* test_name = munit_maybe_concat(NULL, (char*) prefix, (char*) test->name); /* The array of parameters to pass to * munit_test_runner_run_test_with_params */ MunitParameter* params = NULL; size_t params_l = 0; /* Wildcard parameters are parameters which have possible values * specified in the test, but no specific value was passed to the * CLI. That means we want to run the test once for every * possible combination of parameter values or, if --single was * passed to the CLI, a single time with a random set of * parameters. */ MunitParameter* wild_params = NULL; size_t wild_params_l = 0; const MunitParameterEnum* pe; const MunitParameter* cli_p; munit_bool filled; unsigned int possible; char** vals; size_t first_wild; const MunitParameter* wp; int pidx; munit_rand_seed(runner->seed); fprintf(MUNIT_OUTPUT_FILE, "%-" MUNIT_XSTRINGIFY(MUNIT_TEST_NAME_LEN) "s", test_name); if (test->parameters == NULL) { /* No parameters. Simple, nice. */ munit_test_runner_run_test_with_params(runner, test, NULL); } else { fputc('\n', MUNIT_OUTPUT_FILE); for (pe = test->parameters ; pe != NULL && pe->name != NULL ; pe++) { /* Did we received a value for this parameter from the CLI? */ filled = 0; for (cli_p = runner->parameters ; cli_p != NULL && cli_p->name != NULL ; cli_p++) { if (strcmp(cli_p->name, pe->name) == 0) { if (MUNIT_UNLIKELY(munit_parameters_add(&params_l, &params, pe->name, cli_p->value) != MUNIT_OK)) goto cleanup; filled = 1; break; } } if (filled) continue; /* Nothing from CLI, is the enum NULL/empty? We're not a * fuzzer… */ if (pe->values == NULL || pe->values[0] == NULL) continue; /* If --single was passed to the CLI, choose a value from the * list of possibilities randomly. */ if (runner->single_parameter_mode) { possible = 0; for (vals = pe->values ; *vals != NULL ; vals++) possible++; /* We want the tests to be reproducible, even if you're only * running a single test, but we don't want every test with * the same number of parameters to choose the same parameter * number, so use the test name as a primitive salt. */ pidx = munit_rand_at_most(munit_str_hash(test_name), possible - 1); if (MUNIT_UNLIKELY(munit_parameters_add(&params_l, &params, pe->name, pe->values[pidx]) != MUNIT_OK)) goto cleanup; } else { /* We want to try every permutation. Put in a placeholder * entry, we'll iterate through them later. */ if (MUNIT_UNLIKELY(munit_parameters_add(&wild_params_l, &wild_params, pe->name, NULL) != MUNIT_OK)) goto cleanup; } } if (wild_params_l != 0) { first_wild = params_l; for (wp = wild_params ; wp != NULL && wp->name != NULL ; wp++) { for (pe = test->parameters ; pe != NULL && pe->name != NULL && pe->values != NULL ; pe++) { if (strcmp(wp->name, pe->name) == 0) { if (MUNIT_UNLIKELY(munit_parameters_add(&params_l, &params, pe->name, pe->values[0]) != MUNIT_OK)) goto cleanup; } } } munit_test_runner_run_test_wild(runner, test, test_name, params, params + first_wild); } else { munit_test_runner_run_test_with_params(runner, test, params); } cleanup: free(params); free(wild_params); } munit_maybe_free_concat(test_name, prefix, test->name); } /* Recurse through the suite and run all the tests. If a list of * tests to run was provied on the command line, run only those * tests. */ static void munit_test_runner_run_suite(MunitTestRunner* runner, const MunitSuite* suite, const char* prefix) { size_t pre_l; char* pre = munit_maybe_concat(&pre_l, (char*) prefix, (char*) suite->prefix); const MunitTest* test; const char** test_name; const MunitSuite* child_suite; /* Run the tests. */ for (test = suite->tests ; test != NULL && test->test != NULL ; test++) { if (runner->tests != NULL) { /* Specific tests were requested on the CLI */ for (test_name = runner->tests ; test_name != NULL && *test_name != NULL ; test_name++) { if ((pre_l == 0 || strncmp(pre, *test_name, pre_l) == 0) && strncmp(test->name, *test_name + pre_l, strlen(*test_name + pre_l)) == 0) { munit_test_runner_run_test(runner, test, pre); if (runner->fatal_failures && (runner->report.failed != 0 || runner->report.errored != 0)) goto cleanup; } } } else { /* Run all tests */ munit_test_runner_run_test(runner, test, pre); } } if (runner->fatal_failures && (runner->report.failed != 0 || runner->report.errored != 0)) goto cleanup; /* Run any child suites. */ for (child_suite = suite->suites ; child_suite != NULL && child_suite->prefix != NULL ; child_suite++) { munit_test_runner_run_suite(runner, child_suite, pre); } cleanup: munit_maybe_free_concat(pre, prefix, suite->prefix); } static void munit_test_runner_run(MunitTestRunner* runner) { munit_test_runner_run_suite(runner, runner->suite, NULL); } static void munit_print_help(int argc, char* const argv[MUNIT_ARRAY_PARAM(argc + 1)], void* user_data, const MunitArgument arguments[]) { const MunitArgument* arg; (void) argc; printf("USAGE: %s [OPTIONS...] [TEST...]\n\n", argv[0]); puts(" --seed SEED\n" " Value used to seed the PRNG. Must be a 32-bit integer in decimal\n" " notation with no separators (commas, decimals, spaces, etc.), or\n" " hexidecimal prefixed by \"0x\".\n" " --iterations N\n" " Run each test N times. 0 means the default number.\n" " --param name value\n" " A parameter key/value pair which will be passed to any test with\n" " takes a parameter of that name. If not provided, the test will be\n" " run once for each possible parameter value.\n" " --list Write a list of all available tests.\n" " --list-params\n" " Write a list of all available tests and their possible parameters.\n" " --single Run each parameterized test in a single configuration instead of\n" " every possible combination\n" " --log-visible debug|info|warning|error\n" " --log-fatal debug|info|warning|error\n" " Set the level at which messages of different severities are visible,\n" " or cause the test to terminate.\n" #if !defined(MUNIT_NO_FORK) " --no-fork Do not execute tests in a child process. If this option is supplied\n" " and a test crashes (including by failing an assertion), no further\n" " tests will be performed.\n" #endif " --fatal-failures\n" " Stop executing tests as soon as a failure is found.\n" " --show-stderr\n" " Show data written to stderr by the tests, even if the test succeeds.\n" " --color auto|always|never\n" " Colorize (or don't) the output.\n" /* 12345678901234567890123456789012345678901234567890123456789012345678901234567890 */ " --help Print this help message and exit.\n"); #if defined(MUNIT_NL_LANGINFO) setlocale(LC_ALL, ""); fputs((strcasecmp("UTF-8", nl_langinfo(CODESET)) == 0) ? "µnit" : "munit", stdout); #else puts("munit"); #endif printf(" %d.%d.%d\n" "Full documentation at: https://nemequ.github.io/munit/\n", (MUNIT_CURRENT_VERSION >> 16) & 0xff, (MUNIT_CURRENT_VERSION >> 8) & 0xff, (MUNIT_CURRENT_VERSION >> 0) & 0xff); for (arg = arguments ; arg != NULL && arg->name != NULL ; arg++) arg->write_help(arg, user_data); } static const MunitArgument* munit_arguments_find(const MunitArgument arguments[], const char* name) { const MunitArgument* arg; for (arg = arguments ; arg != NULL && arg->name != NULL ; arg++) if (strcmp(arg->name, name) == 0) return arg; return NULL; } static void munit_suite_list_tests(const MunitSuite* suite, munit_bool show_params, const char* prefix) { size_t pre_l; char* pre = munit_maybe_concat(&pre_l, (char*) prefix, (char*) suite->prefix); const MunitTest* test; const MunitParameterEnum* params; munit_bool first; char** val; const MunitSuite* child_suite; for (test = suite->tests ; test != NULL && test->name != NULL ; test++) { if (pre != NULL) fputs(pre, stdout); puts(test->name); if (show_params) { for (params = test->parameters ; params != NULL && params->name != NULL ; params++) { fprintf(stdout, " - %s: ", params->name); if (params->values == NULL) { puts("Any"); } else { first = 1; for (val = params->values ; *val != NULL ; val++ ) { if(!first) { fputs(", ", stdout); } else { first = 0; } fputs(*val, stdout); } putc('\n', stdout); } } } } for (child_suite = suite->suites ; child_suite != NULL && child_suite->prefix != NULL ; child_suite++) { munit_suite_list_tests(child_suite, show_params, pre); } munit_maybe_free_concat(pre, prefix, suite->prefix); } static munit_bool munit_stream_supports_ansi(FILE *stream) { #if !defined(_WIN32) return isatty(fileno(stream)); #else #if !defined(__MINGW32__) size_t ansicon_size = 0; #endif if (isatty(fileno(stream))) { #if !defined(__MINGW32__) getenv_s(&ansicon_size, NULL, 0, "ANSICON"); return ansicon_size != 0; #else return getenv("ANSICON") != NULL; #endif } return 0; #endif } int munit_suite_main_custom(const MunitSuite* suite, void* user_data, int argc, char* const argv[MUNIT_ARRAY_PARAM(argc + 1)], const MunitArgument arguments[]) { int result = EXIT_FAILURE; MunitTestRunner runner; size_t parameters_size = 0; size_t tests_size = 0; int arg; char* envptr; unsigned long ts; char* endptr; unsigned long long iterations; MunitLogLevel level; const MunitArgument* argument; const char** runner_tests; unsigned int tests_run; unsigned int tests_total; runner.prefix = NULL; runner.suite = NULL; runner.tests = NULL; runner.seed = 0; runner.iterations = 0; runner.parameters = NULL; runner.single_parameter_mode = 0; runner.user_data = NULL; runner.report.successful = 0; runner.report.skipped = 0; runner.report.failed = 0; runner.report.errored = 0; #if defined(MUNIT_ENABLE_TIMING) runner.report.cpu_clock = 0; runner.report.wall_clock = 0; #endif runner.colorize = 0; #if !defined(_WIN32) runner.fork = 1; #else runner.fork = 0; #endif runner.show_stderr = 0; runner.fatal_failures = 0; runner.suite = suite; runner.user_data = user_data; runner.seed = munit_rand_generate_seed(); runner.colorize = munit_stream_supports_ansi(MUNIT_OUTPUT_FILE); for (arg = 1 ; arg < argc ; arg++) { if (strncmp("--", argv[arg], 2) == 0) { if (strcmp("seed", argv[arg] + 2) == 0) { if (arg + 1 >= argc) { munit_logf_internal(MUNIT_LOG_ERROR, stderr, "%s requires an argument", argv[arg]); goto cleanup; } envptr = argv[arg + 1]; ts = strtoul(argv[arg + 1], &envptr, 0); if (*envptr != '\0' || ts > (~((munit_uint32_t) 0U))) { munit_logf_internal(MUNIT_LOG_ERROR, stderr, "invalid value ('%s') passed to %s", argv[arg + 1], argv[arg]); goto cleanup; } runner.seed = (munit_uint32_t) ts; arg++; } else if (strcmp("iterations", argv[arg] + 2) == 0) { if (arg + 1 >= argc) { munit_logf_internal(MUNIT_LOG_ERROR, stderr, "%s requires an argument", argv[arg]); goto cleanup; } endptr = argv[arg + 1]; iterations = strtoul(argv[arg + 1], &endptr, 0); if (*endptr != '\0' || iterations > UINT_MAX) { munit_logf_internal(MUNIT_LOG_ERROR, stderr, "invalid value ('%s') passed to %s", argv[arg + 1], argv[arg]); goto cleanup; } runner.iterations = (unsigned int) iterations; arg++; } else if (strcmp("param", argv[arg] + 2) == 0) { if (arg + 2 >= argc) { munit_logf_internal(MUNIT_LOG_ERROR, stderr, "%s requires two arguments", argv[arg]); goto cleanup; } runner.parameters = realloc(runner.parameters, sizeof(MunitParameter) * (parameters_size + 2)); if (runner.parameters == NULL) { munit_log_internal(MUNIT_LOG_ERROR, stderr, "failed to allocate memory"); goto cleanup; } runner.parameters[parameters_size].name = (char*) argv[arg + 1]; runner.parameters[parameters_size].value = (char*) argv[arg + 2]; parameters_size++; runner.parameters[parameters_size].name = NULL; runner.parameters[parameters_size].value = NULL; arg += 2; } else if (strcmp("color", argv[arg] + 2) == 0) { if (arg + 1 >= argc) { munit_logf_internal(MUNIT_LOG_ERROR, stderr, "%s requires an argument", argv[arg]); goto cleanup; } if (strcmp(argv[arg + 1], "always") == 0) runner.colorize = 1; else if (strcmp(argv[arg + 1], "never") == 0) runner.colorize = 0; else if (strcmp(argv[arg + 1], "auto") == 0) runner.colorize = munit_stream_supports_ansi(MUNIT_OUTPUT_FILE); else { munit_logf_internal(MUNIT_LOG_ERROR, stderr, "invalid value ('%s') passed to %s", argv[arg + 1], argv[arg]); goto cleanup; } arg++; } else if (strcmp("help", argv[arg] + 2) == 0) { munit_print_help(argc, argv, user_data, arguments); result = EXIT_SUCCESS; goto cleanup; } else if (strcmp("single", argv[arg] + 2) == 0) { runner.single_parameter_mode = 1; } else if (strcmp("show-stderr", argv[arg] + 2) == 0) { runner.show_stderr = 1; #if !defined(_WIN32) } else if (strcmp("no-fork", argv[arg] + 2) == 0) { runner.fork = 0; #endif } else if (strcmp("fatal-failures", argv[arg] + 2) == 0) { runner.fatal_failures = 1; } else if (strcmp("log-visible", argv[arg] + 2) == 0 || strcmp("log-fatal", argv[arg] + 2) == 0) { if (arg + 1 >= argc) { munit_logf_internal(MUNIT_LOG_ERROR, stderr, "%s requires an argument", argv[arg]); goto cleanup; } if (strcmp(argv[arg + 1], "debug") == 0) level = MUNIT_LOG_DEBUG; else if (strcmp(argv[arg + 1], "info") == 0) level = MUNIT_LOG_INFO; else if (strcmp(argv[arg + 1], "warning") == 0) level = MUNIT_LOG_WARNING; else if (strcmp(argv[arg + 1], "error") == 0) level = MUNIT_LOG_ERROR; else { munit_logf_internal(MUNIT_LOG_ERROR, stderr, "invalid value ('%s') passed to %s", argv[arg + 1], argv[arg]); goto cleanup; } if (strcmp("log-visible", argv[arg] + 2) == 0) munit_log_level_visible = level; else munit_log_level_fatal = level; arg++; } else if (strcmp("list", argv[arg] + 2) == 0) { munit_suite_list_tests(suite, 0, NULL); result = EXIT_SUCCESS; goto cleanup; } else if (strcmp("list-params", argv[arg] + 2) == 0) { munit_suite_list_tests(suite, 1, NULL); result = EXIT_SUCCESS; goto cleanup; } else { argument = munit_arguments_find(arguments, argv[arg] + 2); if (argument == NULL) { munit_logf_internal(MUNIT_LOG_ERROR, stderr, "unknown argument ('%s')", argv[arg]); goto cleanup; } if (!argument->parse_argument(suite, user_data, &arg, argc, argv)) goto cleanup; } } else { runner_tests = realloc((void*) runner.tests, sizeof(char*) * (tests_size + 2)); if (runner_tests == NULL) { munit_log_internal(MUNIT_LOG_ERROR, stderr, "failed to allocate memory"); goto cleanup; } runner.tests = runner_tests; runner.tests[tests_size++] = argv[arg]; runner.tests[tests_size] = NULL; } } fflush(stderr); fprintf(MUNIT_OUTPUT_FILE, "Running test suite with seed 0x%08" PRIx32 "...\n", runner.seed); munit_test_runner_run(&runner); tests_run = runner.report.successful + runner.report.failed + runner.report.errored; tests_total = tests_run + runner.report.skipped; if (tests_run == 0) { fprintf(stderr, "No tests run, %d (100%%) skipped.\n", runner.report.skipped); } else { fprintf(MUNIT_OUTPUT_FILE, "%d of %d (%0.0f%%) tests successful, %d (%0.0f%%) test skipped.\n", runner.report.successful, tests_run, (((double) runner.report.successful) / ((double) tests_run)) * 100.0, runner.report.skipped, (((double) runner.report.skipped) / ((double) tests_total)) * 100.0); } if (runner.report.failed == 0 && runner.report.errored == 0) { result = EXIT_SUCCESS; } cleanup: free(runner.parameters); free((void*) runner.tests); return result; } int munit_suite_main(const MunitSuite* suite, void* user_data, int argc, char* const argv[MUNIT_ARRAY_PARAM(argc + 1)]) { return munit_suite_main_custom(suite, user_data, argc, argv, NULL); }
convertCode.c
#include <dlfcn.h> #include <stdio.h> #include <stdlib.h> /* This needs to be included every time you use PAPI */ #include <time.h> #include <pthread.h> #include <unistd.h> #include <sys/syscall.h> #include <errno.h> #include <stdbool.h> #include <math.h> #include <string.h> #include <iostream> #include <sstream> #include <vector> #include <list> #include <map> #include <stdarg.h> #define MAXMEMACCESSINLOOP 20 #define TOTALTYPESOFEVENTS 25 #define MAXFRAMESIZE 10 #define TOTALTHREAD 100 #define ARRAY_SIZE(x) (sizeof(x) / sizeof(*(x))) /* type of opcode (load/store/prefetch,code) */ #define PERF_MEM_OP_NA 0x01 /* not available */ #define PERF_MEM_OP_LOAD 0x02 /* load instruction */ #define PERF_MEM_OP_STORE 0x04 /* store instruction */ #define PERF_MEM_OP_PFETCH 0x08 /* prefetch */ #define PERF_MEM_OP_EXEC 0x10 /* code (execution) */ /* memory hierarchy (memory level, hit or miss) */ #define PERF_MEM_LVL_NA 0x01 /* not available */ #define PERF_MEM_LVL_HIT 0x02 /* hit level */ #define PERF_MEM_LVL_MISS 0x04 /* miss level */ #define PERF_MEM_LVL_L1 0x08 /* L1 */ #define PERF_MEM_LVL_LFB 0x10 /* Line Fill Buffer */ #define PERF_MEM_LVL_L2 0x20 /* L2 hit */ #define PERF_MEM_LVL_L3 0x40 /* L3 hit */ #define PERF_MEM_LVL_LOC_RAM 0x80 /* Local DRAM */ #define PERF_MEM_LVL_REM_RAM1 0x100 /* Remote DRAM (1 hop) */ #define PERF_MEM_LVL_REM_RAM2 0x200 /* Remote DRAM (2 hops) */ #define PERF_MEM_LVL_REM_CCE1 0x400 /* Remote Cache (1 hop) */ #define PERF_MEM_LVL_REM_CCE2 0x800 /* Remote Cache (2 hops) */ #define PERF_MEM_LVL_IO 0x1000 /* I/O memory */ #define PERF_MEM_LVL_UNC 0x2000 /* Uncached memory */ /* snoop mode */ #define PERF_MEM_SNOOP_NA 0x01 /* not available */ #define PERF_MEM_SNOOP_NONE 0x02 /* no snoop */ #define PERF_MEM_SNOOP_HIT 0x04 /* snoop hit */ #define PERF_MEM_SNOOP_MISS 0x08 /* snoop miss */ #define PERF_MEM_SNOOP_HITM 0x10 /* snoop hit modified */ /* locked instruction */ #define PERF_MEM_LOCK_NA 0x01 /* not available */ #define PERF_MEM_LOCK_LOCKED 0x02 /* locked transaction */ /* TLB access */ #define PERF_MEM_TLB_NA 0x01 /* not available */ #define PERF_MEM_TLB_HIT 0x02 /* hit level */ #define PERF_MEM_TLB_MISS 0x04 /* miss level */ #define PERF_MEM_TLB_L1 0x08 /* L1 */ #define PERF_MEM_TLB_L2 0x10 /* L2 */ #define PERF_MEM_TLB_WK 0x20 /* Hardware Walker*/ #define PERF_MEM_TLB_OS 0x40 /* OS fault handler */ int datafile = 1; std::map<unsigned long, int> threadIndexMap; typedef enum { MALLOC = 0, FREE = 1, PEBSOVERFLOW = 2, LOOP = 3 }EVENTTYPE; union perf_mem_data_src { unsigned long long val; struct { unsigned long long mem_op:5, /* type of opcode */ mem_lvl:14, /* memory hierarchy level */ mem_snoop:5, /* snoop mode */ mem_lock:2, /* lock instr */ mem_dtlb:7, /* tlb access */ mem_rsvd:31; }; }; enum { OP, LVL, SNP, LCK, TLB }; static const struct { unsigned long long bit; int64_t field; const char *name; } decode_bits[] = { { PERF_MEM_OP_LOAD, OP, "LOAD" }, { PERF_MEM_OP_STORE, OP, "STORE" }, { PERF_MEM_OP_NA, OP, "OP_NA" }, { PERF_MEM_OP_PFETCH, OP, "PFETCH" }, { PERF_MEM_OP_EXEC, OP, "EXEC" }, { PERF_MEM_LVL_LFB, LVL, "LFB" }, { PERF_MEM_LVL_L1, LVL, "L1" }, { PERF_MEM_LVL_L2, LVL, "L2" }, { PERF_MEM_LVL_L3, LVL, "LCL_LLC" }, { PERF_MEM_LVL_LOC_RAM, LVL, "LCL_RAM" }, { PERF_MEM_LVL_REM_RAM1, LVL, "RMT_RAM" }, { PERF_MEM_LVL_REM_RAM2, LVL, "RMT_RAM" }, { PERF_MEM_LVL_REM_CCE1, LVL, "RMT_LLC" }, { PERF_MEM_LVL_REM_CCE2, LVL, "RMT_LLC" }, { PERF_MEM_LVL_IO, LVL, "I/O" }, { PERF_MEM_LVL_UNC, LVL, "UNCACHED" }, { PERF_MEM_LVL_NA, LVL, "LVL_N" }, { PERF_MEM_LVL_HIT, LVL, "HIT" }, { PERF_MEM_LVL_MISS, LVL, "MISS" }, { PERF_MEM_SNOOP_NONE, SNP, "SNP NONE" }, { PERF_MEM_SNOOP_HIT, SNP, "SNP HIT" }, { PERF_MEM_SNOOP_MISS, SNP, "SNP MISS" }, { PERF_MEM_SNOOP_HITM, SNP, "SNP HITM" }, { PERF_MEM_SNOOP_NA, SNP, "SNP NA" }, { PERF_MEM_LOCK_LOCKED, LCK, "LOCKED" }, { PERF_MEM_LOCK_NA, LCK, "LOCK_NA" }, { PERF_MEM_TLB_NA, TLB, "TLB_N" }, { PERF_MEM_TLB_HIT, TLB, "TLB_HIT" }, { PERF_MEM_TLB_MISS, TLB, "TLB_MISS" }, { PERF_MEM_TLB_L1, TLB, "TLB_L1" }, { PERF_MEM_TLB_L2, TLB, "TLB_L2" }, { PERF_MEM_TLB_WK, TLB, "TLB_WK" }, { PERF_MEM_TLB_OS, TLB, "TLB_OS" }, }; struct MallocEventData { EVENTTYPE eventtype; int ThreadId; size_t size; unsigned long handle; char array[MAXFRAMESIZE][300]; int frameSize; struct timespec tmstop_alloc, tmstart_alloc; unsigned int MallocCodeAddress; }; struct OverFlowEventData { EVENTTYPE eventtype; int EventSet; unsigned long address; unsigned long data_addr; unsigned long weight; unsigned long data_src; unsigned long cpu; unsigned long phys; unsigned long cacheSet; int freq; //struct timespec tmevent; unsigned long tmevent; }; struct loopStruct { int loopId; int startLineLoop; int endLineLoop; long int MinAddRange; long int MaxAddRange; }; struct loopStructLinkedList { struct loopStruct *loopitself; struct loopStructLinkedList *nextLoop; struct loopStructLinkedList *prevLoop; }; struct OverFlowEventLinkedList { struct OverFlowEventData *this_Elem; struct loopStruct *loop; struct OverFlowEventLinkedList *prev; struct OverFlowEventLinkedList *next; };//*overFlowEventLinkedList; struct MallocEventLinkedList { struct MallocEventData *this_Elem; struct MallocEventLinkedList *prev; struct MallocEventLinkedList *next; }; struct MallocEventPEBSEventMap { struct MallocEventData *this_Elem; struct OverFlowEventLinkedList *startPtr; int totalPEBSEvents; struct MallocEventPEBSEventMap *prev; struct MallocEventPEBSEventMap *next; unsigned int *sorted_data_src; unsigned int *Inter_data_src_distance; double *Normalized_data_src; }; //*mallocEventLinkedList; //int mallocEventCounter = 0; //int OverFlowEventCounter = 0; int ThreadEventMapCurrentPtr = -1; typedef struct { pid_t threadid; int PAPI_event; /* a place holder for an event preset */ int EventSet; int total; /* we use total to track the amount of overflows that occured */ struct MallocEventLinkedList *mallocEventLinkedList; int mallocEventCounter; struct OverFlowEventLinkedList *overFlowEventLinkedList; int OverFlowEventCounter; struct MallocEventPEBSEventMap *mallocPEBSMap; int mallocPEBSMapCounter; } ThreadEvent; ThreadEvent ThreadEventMap[TOTALTHREAD]; bool doesThreadIdExist[TOTALTHREAD]; int *mallocAccessed[TOTALTHREAD]; struct loopStructLinkedList *loopLinkedListHead; void printDataSRC(unsigned long long val) { union perf_mem_data_src dsrc; dsrc.val = val; int i; printf("\t"); for (i = 0; i < ARRAY_SIZE(decode_bits); i++) { int bitval; switch (decode_bits[i].field) { case OP: bitval = decode_bits[i].bit & dsrc.mem_op; break; case LVL: bitval = decode_bits[i].bit & dsrc.mem_lvl; break; case SNP: bitval = decode_bits[i].bit & dsrc.mem_snoop; break; case LCK: bitval = decode_bits[i].bit & dsrc.mem_lock; break; case TLB: bitval = decode_bits[i].bit & dsrc.mem_dtlb; break; default: bitval = 0; break; } if (!bitval) continue; printf("%s, ",decode_bits[i].name); //first_present = false; } //printf("\n"); } void FindLoopWhereItBelongs(struct OverFlowEventLinkedList *overflowEvent) { struct loopStructLinkedList *temp = loopLinkedListHead; int range = -1; while(NULL != temp) { struct OverFlowEventData* a = ((struct OverFlowEventData*)overflowEvent->this_Elem); int b = ((struct loopStruct *)temp->loopitself)->MinAddRange; int c = ((struct loopStruct *)temp->loopitself)->MaxAddRange; if(b <= ((struct OverFlowEventData*)overflowEvent->this_Elem)->address && c >= ((struct OverFlowEventData*)overflowEvent->this_Elem)->address) { if((-1==range)||(range>(c-b))) { range = c-b; overflowEvent->loop = temp->loopitself; } //return; } temp = temp->nextLoop; } } struct MallocEvent *findMemoryAllocForMemLocation(struct OverFlowEventData *overflowEvent) { return NULL; } void findPEBSInsideMallocRegion() { struct MallocEventData *mallocEvent; struct OverFlowEventData *overflowEvent; struct MallocEventLinkedList* mallocEventPtr; struct OverFlowEventLinkedList* overFlowEventptr; int index, i; #pragma omp for for(index = 0; index<=ThreadEventMapCurrentPtr;index++) { mallocAccessed[index] = (int *)malloc(sizeof(int)*ThreadEventMap[index].mallocEventCounter); int k=0; for(k=0;k<ThreadEventMap[index].mallocEventCounter;k++) mallocAccessed[index][k]= 0; } #if 1 for(index = 0; index<=ThreadEventMapCurrentPtr;index++) { for(i =0;i<ThreadEventMap[index].OverFlowEventCounter;i++ ) { if(0==i) { overFlowEventptr = (struct OverFlowEventLinkedList *)ThreadEventMap[index].overFlowEventLinkedList; } else { if(NULL!=overFlowEventptr->next) { overFlowEventptr = overFlowEventptr->next; } else { printf("\ngoing out probir************************> \n"); return; } } overflowEvent = overFlowEventptr->this_Elem; if(NULL!=overflowEvent) { findMemoryAllocForMemLocation(overflowEvent);//overflowEvent->data_addr fflush(stdout); } } } #endif } int threadIndex_c = 0; /* Obtain a backtrace and print it to stdout. */ void restoreData (FILE* input, EVENTTYPE eventType) { int threadIndex = 0; switch(eventType) { case MALLOC: { struct MallocEventData tempData; while(0!=fread(&tempData, sizeof(tempData), 1, input)) { struct MallocEventLinkedList *tempMallocEventLinkedList; struct MallocEventData *data = (struct MallocEventData *)malloc(sizeof(struct MallocEventData)); tempMallocEventLinkedList = (struct MallocEventLinkedList *)malloc(sizeof(struct MallocEventLinkedList)); memcpy(data,&tempData,sizeof(tempData));//why did I do this_Elem copying? std::map<unsigned long,int>::iterator it_c; it_c = threadIndexMap.find(data->ThreadId); if(it_c != threadIndexMap.end()) { threadIndex = threadIndexMap.find(data->ThreadId)->second; } else { threadIndex = threadIndex_c; threadIndexMap.insert( std::pair<unsigned long, int>(data->ThreadId, threadIndex)); threadIndex_c++; ThreadEventMapCurrentPtr++; } tempMallocEventLinkedList->this_Elem = data; if(0==ThreadEventMap[threadIndex].mallocEventCounter) { tempMallocEventLinkedList->prev = NULL; tempMallocEventLinkedList->next = NULL; ThreadEventMap[threadIndex].mallocEventLinkedList = tempMallocEventLinkedList; } else { ((struct MallocEventLinkedList *)ThreadEventMap[threadIndex].mallocEventLinkedList)->prev = tempMallocEventLinkedList; tempMallocEventLinkedList->next = ((struct MallocEventLinkedList *)ThreadEventMap[threadIndex].mallocEventLinkedList); tempMallocEventLinkedList->prev = NULL; ThreadEventMap[threadIndex].mallocEventLinkedList = tempMallocEventLinkedList; } ThreadEventMap[threadIndex].mallocEventCounter++; if( data->eventtype == FREE) printf("33\t444\t%d\t%d\t%lu\t%d\t%lu\t%d\n",threadIndexMap.find(data->ThreadId)->second, data->size,data->handle,data->frameSize,data->MallocCodeAddress,(data->tmstart_alloc).tv_sec); else printf("33\t111\t%d\t%d\t%lu\t%d\t%lu\t%d\n", threadIndexMap.find(data->ThreadId)->second, data->size,data->handle,data->frameSize,data->MallocCodeAddress,(data->tmstart_alloc).tv_sec); } if(0!=feof(input)) { return; } } break; case PEBSOVERFLOW: { struct OverFlowEventData tempData; while(0!=fread(&tempData, sizeof(tempData), 1, input)) { struct OverFlowEventLinkedList *tempOverFlowEventLinkedList; struct OverFlowEventData *data = (struct OverFlowEventData *)malloc(sizeof(struct OverFlowEventData)); tempOverFlowEventLinkedList = (struct OverFlowEventLinkedList *)malloc(sizeof(struct OverFlowEventLinkedList)); memcpy(data,&tempData,sizeof(tempData)); threadIndex = data->EventSet; std::map<unsigned long,int>::iterator it_c; it_c = threadIndexMap.find(data->EventSet); if(it_c != threadIndexMap.end()) { threadIndex = threadIndexMap.find(data->EventSet)->second; } else { threadIndex = threadIndex_c; threadIndexMap.insert( std::pair<unsigned long, int>(data->EventSet, threadIndex)); threadIndex_c++; } tempOverFlowEventLinkedList->this_Elem = data; if(0==ThreadEventMap[threadIndex].OverFlowEventCounter) { tempOverFlowEventLinkedList->prev = NULL; tempOverFlowEventLinkedList->next = NULL; ThreadEventMap[threadIndex].overFlowEventLinkedList = tempOverFlowEventLinkedList; } else { ((struct OverFlowEventLinkedList *)ThreadEventMap[threadIndex].overFlowEventLinkedList)->prev = tempOverFlowEventLinkedList; tempOverFlowEventLinkedList->next = ((struct OverFlowEventLinkedList *)ThreadEventMap[threadIndex].overFlowEventLinkedList); tempOverFlowEventLinkedList->prev = NULL; ThreadEventMap[threadIndex].overFlowEventLinkedList = tempOverFlowEventLinkedList; } ThreadEventMap[threadIndex].OverFlowEventCounter++; printf("32\t%d\t%lu\t%lu\t%d\t%lu\t%d\t%lu\t%lu\t%lu\n",threadIndexMap.find(data->EventSet)->second, data->address,data->data_addr,data->weight,data->data_src,data->cpu,data->phys, data->cacheSet,data->tmevent); //printf("\n\nindex:%d EventType %d, EventSet %d, address %p, data_addr %p weight %d data_src %p cpu %d\n",data->EventSet, data->eventtype,data->EventSet,data->address,data->data_addr,data->weight,data->data_src,data->cpu); } if(0!=feof(input)) { return; } } break; case LOOP: { printf("%d loop loop : \n"); struct loopStruct tempData; while(0!=fread(&tempData, sizeof(tempData), 1, input)) { struct loopStruct *data = (struct loopStruct *)malloc(sizeof(struct loopStruct)); struct loopStructLinkedList *dataList = (struct loopStructLinkedList *)malloc(sizeof(struct loopStructLinkedList)); memcpy(data,&tempData,sizeof(tempData)); dataList->loopitself = data; //printf("%d loop Start %x end %x loop : %d,%d\n",data->loopId,data->MinAddRange,data->MaxAddRange, data->startLineLoop, data->endLineLoop); if(NULL == loopLinkedListHead)//0==ThreadEventMap[threadIndex].OverFlowEventCounter) { dataList->prevLoop = NULL; dataList->nextLoop = NULL; loopLinkedListHead = dataList; } else { ((struct loopStructLinkedList *)loopLinkedListHead)->prevLoop = dataList; dataList->nextLoop = ((struct loopStructLinkedList*)loopLinkedListHead); dataList->prevLoop = NULL; loopLinkedListHead = dataList; } } if(0!=feof(input)) { return; } } break; } } void initThreadEventMap() { int threadIndex=0; #pragma omp for for(threadIndex=0;threadIndex<TOTALTHREAD;threadIndex++) { ThreadEventMap[threadIndex].mallocEventCounter = 0; ThreadEventMap[threadIndex].OverFlowEventCounter = 0; ThreadEventMap[threadIndex].mallocPEBSMapCounter = 0; doesThreadIdExist[threadIndex] = false; } } void calculateDistanceFromBase(unsigned int base, unsigned int *list, unsigned int * Inter_data_src_distance_list, int count) { int m=0; while(count-m>0) { Inter_data_src_distance_list[m]=list[m]-base; m++; } //printf("\ncount %d \n",count); // m=0; // while(count-m>1) // { //printf(" %u",Inter_data_src_distance_list[m]); // m++; // } //printf("\n",count); } void calculateDistance(unsigned int *list, unsigned int * Inter_data_src_distance_list, int count) { int m=0; while(count-m>0) { if(list[m]>list[m+1]) Inter_data_src_distance_list[m]=list[m]-list[m+1]; else Inter_data_src_distance_list[m]=list[m+1] - list[m]; m++; } //printf("\ncount %d \n",count); // m=0; // while(count-m>1) // { //printf(" %u",Inter_data_src_distance_list[m]); // m++; // } //printf("\n",count); } void sort(unsigned int *list, int count) { float length , index; length = count; int m = 0; //printf("\n before sorting :\n"); //m=0; // while(count-m>0) // printf(" %u",list[m++]); //printf("\n "); //m=0; while(length>1) { bool DoneSorting = false; while(!DoneSorting) //max Heapyfy { index = length; DoneSorting = true; while(index>1) { float nextIndex = index - 1; if(0!=((int)index)%2) { if(list[(int)(2*floor(index/2))-1]>list[(int)(2*floor(index/2)+1)-1]) { unsigned int temp = list[(int)(2*floor(index/2))-1]; list[(int)(2*floor(index/2))-1] = list[(int)(2*floor(index/2)+1)-1]; list[(int)(2*floor(index/2)+1)-1] = temp; } nextIndex = index - 2; } if(list[(int)(2*floor(index/2)-1)]<list[(int)(floor(index/2))-1]) { //printf("came here %d < %d \n",list[(int)(2*floor(index/2)-1)],list[(int)(floor(index/2))-1]); unsigned int temp = list[(int)(2*floor(index/2))-1]; list[(int)(2*floor(index/2))-1] = list[(int)(floor(index/2))-1]; list[(int)(floor(index/2))-1] = temp; DoneSorting = false; //printf("came here \n"); } index = nextIndex; } } unsigned int temp1 = list[1-1]; list[1-1] = list[(int)length-1]; list[(int)length-1] = temp1; length=length-1; } // printf("\n sorted :\n"); //m=0; //while(count-m>0) // printf(" %u",list[m++]); //printf("\n "); return; } int gcd ( unsigned int a, unsigned int b ) { unsigned int c; while ( a != 0 ) { c = a; a = b%a; b = c; } return b; } unsigned int calculateGCD(unsigned int * Inter_data_src_distance_list, int count) { int m=0; unsigned int a = Inter_data_src_distance_list[m]; if(count>1) { unsigned int b = Inter_data_src_distance_list[m+1]; do { b = Inter_data_src_distance_list[m+1]; a = gcd(a,b); m++; }while(count-m>1); } return a; } void calculateNormal(unsigned int *list, int count,unsigned int start,unsigned int end, double *listNormal) { int m = 0; printf("\n Normalized\n"); while(count-m>0) { listNormal[m] = (double)(list[m]-start)/(double)(end-start); printf("%f ",listNormal[m]); m++; } printf("\n"); } int calulatePosition(unsigned int handle,unsigned int lowestAddress, int gcdValue) { unsigned int sub = lowestAddress - handle; if(gcdValue == 0) return -1; else return (int)(((int)sub) % gcdValue); } #define MAXMALLOC 10000 unsigned int visitedList[MAXMALLOC]; int nextEntry = 0; struct mallocRecordsInLoop { unsigned int* listOfAccessPerAllocAddr; unsigned int* distListOfAccessPerAllocAddr; int accessCount; int count; unsigned int handle; unsigned int lowestAddr; unsigned long long weightInLoop; unsigned int address; bool visited; struct mallocRecordsInLoop* prev; }; void findPEBSInsideLoopRegion() { struct OverFlowEventData *overflowEvent; struct OverFlowEventLinkedList* overFlowEventptr; int index, i; #if 1 for(index = 0; index<=ThreadEventMapCurrentPtr;index++) { for(i =0;i<ThreadEventMap[index].OverFlowEventCounter;i++ ) { if(0==i) { overFlowEventptr = (struct OverFlowEventLinkedList *)ThreadEventMap[index].overFlowEventLinkedList; } else { if(NULL!=overFlowEventptr->next) { overFlowEventptr = overFlowEventptr->next; } else { printf("\ngoing out probir************************> \n"); return; } } overflowEvent = overFlowEventptr->this_Elem; if(NULL!=overflowEvent) { FindLoopWhereItBelongs(overFlowEventptr); fflush(stdout); } } } #endif } void printLoopPEBSMAP() { int index=0; struct loopStructLinkedList *tempLoopList = loopLinkedListHead; while(tempLoopList) { printf("\n\n Loop: Line %d-%d: \n",tempLoopList->loopitself->startLineLoop,tempLoopList->loopitself->endLineLoop); for(index = 0; index<=ThreadEventMapCurrentPtr;index++) { struct OverFlowEventLinkedList *tempPEBS = ThreadEventMap[index].overFlowEventLinkedList; if(0==index) printf("\t Process :\n"); else printf("\t Thread Index %d:\n",index); unsigned int* tempEventsList = (unsigned int*)malloc(sizeof(unsigned int) * ThreadEventMap[index].OverFlowEventCounter); unsigned int* tempEventsListPerCache = NULL;//malloc(sizeof(unsigned int) * ThreadEventMap[index].OverFlowEventCounter); int count = 0; int perCachePEBSCount = 0; bool hasEvent = false; unsigned long data_src = 0; while(NULL!=tempPEBS) { if(tempPEBS->loop) { //printf("Loop Ids %d --- %d\n",tempLoopList->loopitself->loopId,tempPEBS->loop->loopId); if(tempLoopList->loopitself->loopId == tempPEBS->loop->loopId) { if(!hasEvent) { printf("\n\n\t\t\t PEBS: "); } if(data_src != tempPEBS->this_Elem->data_src) { if(perCachePEBSCount!=0) { printf("\n\n"); unsigned int* tempdistanceList = (unsigned int*) malloc(sizeof(unsigned int) * (perCachePEBSCount)); calculateDistance(tempEventsListPerCache, tempdistanceList, perCachePEBSCount); printf("\nGCD With in cache: %d \n",calculateGCD(tempdistanceList, perCachePEBSCount-1)); perCachePEBSCount = 0; } tempEventsListPerCache = (unsigned int*)malloc(sizeof(unsigned int) * ThreadEventMap[index].OverFlowEventCounter); data_src = tempPEBS->this_Elem->data_src; printf("\n\t\t\t data_src: %p\n",data_src); printDataSRC(data_src); } else { tempEventsListPerCache[perCachePEBSCount] = tempPEBS->this_Elem->data_addr; perCachePEBSCount++; } printf("0x%x : 0x%x, ",tempPEBS->this_Elem->address, tempPEBS->this_Elem->data_addr); tempEventsList[count] = tempPEBS->this_Elem->data_addr; hasEvent = true; count++; } } tempPEBS = tempPEBS->next; } //GCD if(hasEvent) { printf("\n\n"); unsigned int* tempdistanceList = (unsigned int*)malloc(sizeof(unsigned int) * (count)); calculateDistance(tempEventsList, tempdistanceList, count); printf("\nGCD: %d \n",calculateGCD(tempdistanceList, count-1)); } } tempLoopList = tempLoopList->nextLoop; } } void ListMallocPositions() { struct MallocEventLinkedList *mallocEventPtr; int k = 0; int i = 0; for(k = 0; k<=ThreadEventMapCurrentPtr;k++) { mallocEventPtr = (struct MallocEventLinkedList *)ThreadEventMap[k].mallocEventLinkedList; i=0; bool found = false; do { //printf("mallocEventPtr 0x%x\n",mallocEventPtr->this_Elem->MallocCodeAddress); found = false; for(i=0;i<nextEntry;i++) { //printf("Malloc Position : 0x%x, number 0x%x i%d\n",visitedList[i],number,i); if(visitedList[i]==mallocEventPtr->this_Elem->MallocCodeAddress) { found = true; break; } } if(!found) { visitedList[i] = mallocEventPtr->this_Elem->MallocCodeAddress; nextEntry++; } mallocEventPtr = mallocEventPtr->next; }while(mallocEventPtr!=NULL); } i=0; while(i<nextEntry) { printf("i:%d visitedList[i]: 0x%x\n",i,visitedList[i]); i++; } } void CountTotalEvents() { struct OverFlowEventData *overflowEvent; struct OverFlowEventLinkedList* overFlowEventptr; unsigned long tempEventTypesIndex[TOTALTYPESOFEVENTS]; int tempEventTypesCount[TOTALTYPESOFEVENTS]; int eventIndex = 0; int index, i; int total = 0; #if 1 for(index = 0; index<=ThreadEventMapCurrentPtr;index++) { overFlowEventptr = (struct OverFlowEventLinkedList *)ThreadEventMap[index].overFlowEventLinkedList; while(NULL!=overFlowEventptr){ bool found = false; overflowEvent = overFlowEventptr->this_Elem; for( i =0;i<eventIndex;i++) { //printf("Count : %d \n",i); if(tempEventTypesIndex[i]==overflowEvent->data_src) { found = true; tempEventTypesCount[i]++; } } if(found==false) { tempEventTypesIndex[i] = overflowEvent->data_src; tempEventTypesCount[i] = 1; eventIndex++; } total++; overFlowEventptr = overFlowEventptr->next; } } printf("\n------------------------- Counting ---------------------- \n"); for(i =0;i<eventIndex;i++) { float ratio = (float )((float)tempEventTypesCount[i]/total); printDataSRC(tempEventTypesIndex[i]); printf("Count : %d ratio: %f and data_src %d \n",tempEventTypesCount[i], ratio, tempEventTypesIndex[i]); } #endif } /* A dummy function to make the backtrace more interesting. */ void readFromFile (char *DataMalloc,char *DataPEBS) { //ThreadEventMapCurrentPtr = 0; int i =0; initThreadEventMap(); FILE* input; if(datafile == 1) { input = fopen(DataMalloc, "rb"); restoreData(input,MALLOC); fclose(input); } else { input = fopen(DataPEBS, "rb"); restoreData(input,PEBSOVERFLOW); fclose(input); } } int main (int argc, char* argv[]) { if (argc == 4) { // We expect 3 arguments: the program name, the malloc data file and the overflow data path datafile = atoi(argv[3]); readFromFile(argv[1],argv[2]); } return 0; }
host_function.c
#include <stdio.h> #include <omp.h> #pragma omp declare target void hostrpc_fptr0(void* fun_ptr); #pragma omp end declare target // A host function will synchronously call from a device as a function pointer void myfun() { fprintf(stderr, " This is myfun writing to stderr \n"); } int main() { int N = 10; int a[N]; int b[N]; int i; for (i=0; i<N; i++){ a[i]=0; b[i]=i; } //void (*fun_ptr)(int) = &myfun; void (*fun_ptr)() = &myfun; printf("Testing myfun execution as a function pointer \n"); (*fun_ptr)(); printf("Testing myfun execution from device using hostrpc_fptr0\n"); #pragma omp target parallel for map(from: a[0:N]) map(to: b[0:N]) is_device_ptr(fun_ptr) for (int j = 0; j< N; j++) { a[j]=b[j]; hostrpc_fptr0(fun_ptr); } printf("Testing the host fallback of hostrpc_fptr0 \n"); hostrpc_fptr0(fun_ptr); int rc = 0; for (i=0; i<N; i++) if (a[i] != b[i] ) { rc++; printf ("Wrong value: a[%d]=%d\n", i, a[i]); } if (!rc){ printf("Success\n"); return EXIT_SUCCESS; } else{ printf("Failure\n"); return EXIT_FAILURE; } }
stereo_sgm.h
#ifndef RECONSTRUCTION_BASE_SEMI_GLOBAL_MATCHING_ #define RECONSTRUCTION_BASE_SEMI_GLOBAL_MATCHING_ #include <iostream> #include <cassert> #include <opencv2/core/core.hpp> //#define COST_CENSUS #define COST_ZSAD namespace recon { struct StereoSGMParams { int disp_range; int window_sz; int penalty1; int penalty2; }; #ifdef COST_SAD // SAD typedef uint16_t CostType; typedef uint32_t ACostType; #endif #ifdef COST_ZSAD // ZSAD typedef float CostType; // for ZSAD typedef float ACostType; // accumulated cost type #endif #ifdef COST_CENSUS // Census typedef uint8_t CostType; // for 1x1 SAD, 5x5 Census typedef uint32_t ACostType; // accumulated cost type, Census #endif //typedef uint8_t ACostType; // accumulated cost type - byte for Census? // if we want to use raw arrays //typedef CostType* CostArray1D; //typedef CostType*** CostArray3D; //typedef ACostType* ACostArray1D; //typedef ACostType*** ACostArray3D; typedef std::vector<CostType> CostArray1D; typedef std::vector<ACostType> ACostArray1D; typedef std::vector<std::vector<CostArray1D>> CostArray; typedef std::vector<std::vector<ACostArray1D>> ACostArray; class StereoSGM { public: StereoSGM(StereoSGMParams& params) : params_(params) {} void compute(cv::Mat& left_img, cv::Mat& right_img, cv::Mat& disp); protected: void aggregate_costs(const cv::Mat& img, CostArray const& costs, int DIRX, int DIRY, ACostArray& aggr_costs); void sum_costs(const ACostArray& costs1, ACostArray& costs2); template<typename T1, typename T2> void copy_vector(const std::vector<T1>& vec1, std::vector<T2>& vec2); template<typename T1, typename T2> void sum_vectors(const std::vector<T1>& vec1, std::vector<T2>& vec2); template<typename T> T get_min(const std::vector<T>& vec); int FindMinDisp(const std::vector<CostType>& costs); int find_min_disp(const std::vector<ACostType>& costs); int find_min_disp_right(const std::vector<std::vector<ACostType>>& costs, int x); void init_costs(ACostType init_val, ACostArray& costs); cv::Mat GetDisparityImage(const CostArray& costs, int msz); cv::Mat get_disparity_matrix_float(const ACostArray& costs, int msz); cv::Mat get_disparity_image_uint16(const ACostArray& costs, int msz); cv::Mat get_disparity_image(const ACostArray& costs, int msz); void aggregate_path(const std::vector<ACostType>& prior, const std::vector<CostType>& local, std::vector<ACostType>& costs, int gradient); //inline getCensusCost(); StereoSGMParams params_; }; template<typename T1, typename T2> inline void StereoSGM::sum_vectors(const std::vector<T1>& vec1, std::vector<T2>& vec2) { assert(vec1.size() == vec2.size()); for(size_t i = 0; i < vec2.size(); i++) vec2[i] += (T2)vec1[i]; } template<typename T1, typename T2> inline void StereoSGM::copy_vector(const std::vector<T1>& vec1, std::vector<T2>& vec2) { assert(vec1.size() == vec2.size()); for(size_t i = 0; i < vec1.size(); i++) { vec2[i] = (T2)vec1[i]; //std::cout << "d = " << i << " - " << (T2)vec1[i] << " == " << vec2[i] << "\n"; } } inline void StereoSGM::sum_costs(const ACostArray& costs1, ACostArray& costs2) { size_t height = costs1.size(); size_t width = costs1[0].size(); assert(costs1.size() == costs2.size()); for(size_t y = 0; y < height; y++) { assert(costs1[y].size() == costs2[y].size()); for(size_t x = 0; x < width; x++) { sum_vectors<ACostType,ACostType>(costs1[y][x], costs2[y][x]); } } } inline void StereoSGM::aggregate_path(const std::vector<ACostType>& prior, const std::vector<CostType>& local, std::vector<ACostType>& costs, int gradient) { assert(params_.disp_range == costs.size()); int P1 = params_.penalty1; int P2 = params_.penalty2; copy_vector<CostType,ACostType>(local, costs); int max_disp = params_.disp_range; ACostType min_prior = get_min<ACostType>(prior); // decrease the P2 error if the gradient is big which is a clue for the discontinuites // TODO: works very bad on KITTI... //P2 = std::max(P1, gradient ? (int)std::round(static_cast<float>(P2/gradient)) : P2); for(int d = 0; d < max_disp; d++) { ACostType error = min_prior + P2; error = std::min(error, prior[d]); if(d > 0) error = std::min(error, prior[d-1] + P1); if(d < (max_disp - 1)) error = std::min(error, prior[d+1] + P1); // ACostType can be uint8_t and e_smooth int // Normalize by subtracting min of prior cost // Now we have upper limit on cost: e_smooth <= C_max + P2 // LR check won't work without this normalization also costs[d] += (error - min_prior); } } //inline //std::vector<ACostType> StereoSGM::aggregate_path(const std::vector<ACostType>& prior, // const std::vector<CostType>& local) //{ // int P1 = params_.penalty1; // int P2 = params_.penalty2; // std::vector<ACostType> curr_cost; // copy_vector<CostType,ACostType>(local, curr_cost); // for(int d = 0; d < params_.disp_range; d++) { // //int e_smooth = std::numeric_limits<int>::max(); // ACostType e_smooth = std::numeric_limits<ACostType>::max(); // for(int d_p = 0; d_p < params_.disp_range; d_p++) { // if(d_p - d == 0) { // // No penality // e_smooth = std::min(e_smooth, prior[d_p]); // } // else if(std::abs(d_p - d) == 1) { // // Small penality // e_smooth = std::min(e_smooth, prior[d_p] + P1); // } else { // // Large penality // //e_smooth = std::min(e_smooth, prior[d_p] + std::max(P1, path_gradient ? P2/path_gradient : P2)); // e_smooth = std::min(e_smooth, prior[d_p] + P2); // } // } // curr_cost[d] += e_smooth; // } // // // TODO: why // // Normalize by subtracting min of prior cost // //ACostType min = get_min<ACostType>(prior); // //for(size_t i = 0; i < curr_cost.size(); i++) // // curr_cost[i] -= min; // return curr_cost; //} inline void StereoSGM::init_costs(ACostType init_val, ACostArray& costs) { size_t disp_range = costs[0][0].size(); size_t width = costs[0].size(); size_t height = costs.size(); for(size_t i = 0; i < height; i++) for(size_t j = 0; j < width; j++) for(size_t k = 0; k < disp_range; k++) costs[i][j][k] = init_val; } template<typename T> inline T StereoSGM::get_min(const std::vector<T>& vec) { T min = vec[0]; for(size_t i = 1; i < vec.size(); i++) { if(vec[i] < min) min = vec[i]; } return min; } inline int StereoSGM::find_min_disp(const std::vector<ACostType>& costs) { int d = 0; for(size_t i = 1; i < costs.size(); i++) { if(costs[i] < costs[d]) d = i; } return d; } inline int StereoSGM::FindMinDisp(const std::vector<CostType>& costs) { int d = 0; for(size_t i = 1; i < costs.size(); i++) { if(costs[i] < costs[d]) d = i; } return d; } inline int StereoSGM::find_min_disp_right(const std::vector<std::vector<ACostType>>& costs, int x) { int d = 0; //ACostType min_cost = costs[x+d][d]; int width = costs.size(); int max_disp = std::min(params_.disp_range, (width - x)); for(int i = 1; i < max_disp; i++) { if(costs[x+i][i] < costs[x+d][d]) d = i; } return d; } inline cv::Mat StereoSGM::GetDisparityImage(const CostArray& costs, int msz) { int height = costs.size(); int width = costs[0].size(); cv::Mat img = cv::Mat::zeros(height + 2*msz, width + 2*msz, CV_8U); for(int y = 0; y < height; y++) { for(int x = 0; x < width; x++) { int d = FindMinDisp(costs[y][x]); //img.at<uint8_t>(y,x) = 4 * d; img.at<uint8_t>(msz+y, msz+x) = d; } } return img; } inline cv::Mat StereoSGM::get_disparity_image(const ACostArray& costs, int msz) { int height = costs.size(); int width = costs[0].size(); cv::Mat img = cv::Mat::zeros(height + 2*msz, width + 2*msz, CV_8U); for(int y = 0; y < height; y++) { for(int x = 0; x < width; x++) { int d = find_min_disp(costs[y][x]); //img.at<uint8_t>(y,x) = 4 * d; img.at<uint8_t>(msz+y, msz+x) = d; } } return img; } inline cv::Mat StereoSGM::get_disparity_image_uint16(const ACostArray& costs, int msz) { int height = costs.size(); int width = costs[0].size(); cv::Mat img = cv::Mat::zeros(height + 2*msz, width + 2*msz, CV_16U); for(int y = 0; y < height; y++) { for(int x = 0; x < width; x++) { // find minimum cost disparity int d = find_min_disp(costs[y][x]); // TODO: do the fast LR check if((x-d) >= 0) { int d_right = find_min_disp_right(costs[y], x-d); //std::cout << "d = " << d << " , " << " d_r = " << d_right << "\n"; if(std::abs(d - d_right) > 2) { img.at<uint16_t>(msz+y, msz+x) = 0; continue; } } else { img.at<uint16_t>(msz+y, msz+x) = 0; continue; } // perform equiangular subpixel interpolation if(d >= 1 && d < (params_.disp_range-1)) { float C_left = costs[y][x][d-1]; float C_center = costs[y][x][d]; float C_right = costs[y][x][d+1]; float d_s = 0; if(C_right < C_left) d_s = 0.5f * (C_right - C_left) / (C_center - C_left); else d_s = 0.5f * (C_right - C_left) / (C_center - C_right); //std::cout << d << " -- " << d+d_s << "\n"; img.at<uint16_t>(msz+y, msz+x) = static_cast<uint16_t>(std::round(256.0 * (d + d_s))); } else { img.at<uint16_t>(msz+y, msz+x) = static_cast<uint16_t>(std::round(256.0 * d)); } } } return img; } inline cv::Mat StereoSGM::get_disparity_matrix_float(const ACostArray& costs, int msz) { int height = costs.size(); int width = costs[0].size(); cv::Mat img = cv::Mat::zeros(height + 2*msz, width + 2*msz, CV_32F); //#pragma omp parallel for for(int y = 0; y < height; y++) { for(int x = 0; x < width; x++) { // find minimum cost disparity int d = find_min_disp(costs[y][x]); // TODO: do the fast LR check if((x-d) >= 0) { int d_right = find_min_disp_right(costs[y], x-d); //std::cout << "d = " << d << " , " << " d_r = " << d_right << "\n"; if(std::abs(d - d_right) > 2) { img.at<float>(msz+y, msz+x) = -1.0f; continue; } } else { img.at<float>(msz+y, msz+x) = -1.0f; continue; } // perform equiangular subpixel interpolation if(d >= 1 && d < (params_.disp_range-1)) { float C_left = costs[y][x][d-1]; float C_center = costs[y][x][d]; float C_right = costs[y][x][d+1]; float d_s = 0; if(C_right < C_left) d_s = 0.5f * (C_right - C_left) / (C_center - C_left); else d_s = 0.5f * (C_right - C_left) / (C_center - C_right); //std::cout << d << " -- " << d+d_s << "\n"; img.at<float>(msz+y, msz+x) = static_cast<float>(d + d_s); } else img.at<float>(msz+y, msz+x) = static_cast<float>(d); } } return img; } } #endif
GxB_Scalar_wait.c
//------------------------------------------------------------------------------ // GxB_Scalar_wait: wait for a scalar to complete //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // Finishes all work on a scalar, followed by an OpenMP flush. #include "GB.h" #define GB_FREE_ALL ; GrB_Info GxB_Scalar_wait // finish all work on a scalar ( GxB_Scalar *s ) { //-------------------------------------------------------------------------- // check inputs //-------------------------------------------------------------------------- #pragma omp flush GB_WHERE ((*s), "GxB_Scalar_wait (&s)") ; GB_RETURN_IF_NULL (s) ; GB_RETURN_IF_NULL_OR_FAULTY (*s) ; //-------------------------------------------------------------------------- // finish all pending work on the scalar //-------------------------------------------------------------------------- if (GB_ANY_PENDING_WORK (*s)) { GrB_Info info ; GB_BURBLE_START ("GxB_Scalar_wait") ; GB_OK (GB_Matrix_wait ((GrB_Matrix) (*s), "scalar", Context)) ; GB_BURBLE_END ; } //-------------------------------------------------------------------------- // return result //-------------------------------------------------------------------------- #pragma omp flush return (GrB_SUCCESS) ; }
2018-static-origin-yes.c
// Declared in a scope inside the construct: private by default // but if static storage: it is shared #include <stdlib.h> #include <stdio.h> int main(int argc, char* argv[]) { int i; int len=100; int a[100]; for (i=0;i<len;i++) a[i]=i; #pragma omp parallel for for (i=0;i<len;i++) { static int tmp = 0; tmp = a[i]+i; a[i] = tmp; } printf("a[50]=%d\n", a[50]); return 0; }
5-4.c
#include <omp.h> #include <stdio.h> int main() { int w = 10; #pragma omp parallel num_threads(2) #pragma omp for reduction(+ : w) for (int i = 0; i < 100; i++) { int id = omp_get_thread_num(); printf("T%d:ai%d w=%d\n", id, i, w++); } printf("W=%d\n", w); }
pr26171.c
/* PR c/26171 */ /* { dg-do run } */ /* { dg-options "-fopenmp" } */ /* { dg-require-effective-target tls_runtime } */ int thrv = 0; #pragma omp threadprivate (thrv) int main () { thrv = 1; return 0; }
timestep_opt3.c
#include <math.h> #include "timestep.h" #define REAL_CELL 1 double timestep(int ncells, double g, double sigma, int* celltype, double* H, double* U, double* V, double* dx, double* dy){ double mymindt = 1.0e20; #pragma omp simd reduction(min:mymindt) for (int ic=0; ic<ncells ; ic++) { if (celltype[ic] == REAL_CELL) { double wavespeed = sqrt(g*H[ic]); double xspeed = (fabs(U[ic])+wavespeed)/dx[ic]; double yspeed = (fabs(V[ic])+wavespeed)/dy[ic]; double dt=sigma/(xspeed+yspeed); if (dt < mymindt) mymindt = dt; } } return(mymindt); }
paint.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % PPPP AAA IIIII N N TTTTT % % P P A A I NN N T % % PPPP AAAAA I N N N T % % P A A I N NN T % % P A A IIIII N N T % % % % % % Methods to Paint on an Image % % % % Software Design % % Cristy % % July 1998 % % % % % % Copyright @ 1999 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/artifact.h" #include "MagickCore/channel.h" #include "MagickCore/color.h" #include "MagickCore/color-private.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/composite.h" #include "MagickCore/composite-private.h" #include "MagickCore/draw.h" #include "MagickCore/draw-private.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/gem.h" #include "MagickCore/gem-private.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/option.h" #include "MagickCore/paint.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/resource_.h" #include "MagickCore/statistic.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/thread-private.h" /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % F l o o d f i l l P a i n t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % FloodfillPaintImage() changes the color value of any pixel that matches % target and is an immediate neighbor. If the method FillToBorderMethod is % specified, the color value is changed for any neighbor pixel that does not % match the bordercolor member of image. % % By default target must match a particular pixel color exactly. However, % in many cases two colors may differ by a small amount. The fuzz member of % image defines how much tolerance is acceptable to consider two colors as % the same. For example, set fuzz to 10 and the color red at intensities of % 100 and 102 respectively are now interpreted as the same color for the % purposes of the floodfill. % % The format of the FloodfillPaintImage method is: % % MagickBooleanType FloodfillPaintImage(Image *image, % const DrawInfo *draw_info,const PixelInfo target, % const ssize_t x_offset,const ssize_t y_offset, % const MagickBooleanType invert,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % % o target: the RGB value of the target color. % % o x_offset,y_offset: the starting location of the operation. % % o invert: paint any pixel that does not match the target color. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType FloodfillPaintImage(Image *image, const DrawInfo *draw_info,const PixelInfo *target,const ssize_t x_offset, const ssize_t y_offset,const MagickBooleanType invert, ExceptionInfo *exception) { #define MaxStacksize 524288UL #define PushSegmentStack(up,left,right,delta) \ { \ if (s >= (segment_stack+MaxStacksize)) \ { \ segment_info=RelinquishVirtualMemory(segment_info); \ image_view=DestroyCacheView(image_view); \ floodplane_view=DestroyCacheView(floodplane_view); \ floodplane_image=DestroyImage(floodplane_image); \ ThrowBinaryException(DrawError,"SegmentStackOverflow",image->filename) \ } \ else \ { \ if ((((up)+(delta)) >= 0) && (((up)+(delta)) < (ssize_t) image->rows)) \ { \ s->x1=(double) (left); \ s->y1=(double) (up); \ s->x2=(double) (right); \ s->y2=(double) (delta); \ s++; \ } \ } \ } CacheView *floodplane_view, *image_view; Image *floodplane_image; MagickBooleanType skip, status; MemoryInfo *segment_info; PixelInfo fill_color, pixel; SegmentInfo *s; SegmentInfo *segment_stack; ssize_t offset, start, x1, x2, y; /* Check boundary conditions. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(draw_info != (DrawInfo *) NULL); assert(draw_info->signature == MagickCoreSignature); if (IsEventLogging() != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if ((x_offset < 0) || (x_offset >= (ssize_t) image->columns)) return(MagickFalse); if ((y_offset < 0) || (y_offset >= (ssize_t) image->rows)) return(MagickFalse); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); if (IsGrayColorspace(image->colorspace) != MagickFalse) (void) SetImageColorspace(image,sRGBColorspace,exception); if ((image->alpha_trait == UndefinedPixelTrait) && (draw_info->fill.alpha_trait != UndefinedPixelTrait)) (void) SetImageAlpha(image,OpaqueAlpha,exception); /* Set floodfill state. */ floodplane_image=CloneImage(image,0,0,MagickTrue,exception); if (floodplane_image == (Image *) NULL) return(MagickFalse); floodplane_image->alpha_trait=UndefinedPixelTrait; floodplane_image->colorspace=GRAYColorspace; (void) QueryColorCompliance("#000",AllCompliance, &floodplane_image->background_color,exception); (void) SetImageBackgroundColor(floodplane_image,exception); segment_info=AcquireVirtualMemory(MaxStacksize,sizeof(*segment_stack)); if (segment_info == (MemoryInfo *) NULL) { floodplane_image=DestroyImage(floodplane_image); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } segment_stack=(SegmentInfo *) GetVirtualMemoryBlob(segment_info); /* Push initial segment on stack. */ status=MagickTrue; start=0; s=segment_stack; GetPixelInfo(image,&pixel); image_view=AcquireVirtualCacheView(image,exception); floodplane_view=AcquireAuthenticCacheView(floodplane_image,exception); PushSegmentStack(y_offset,x_offset,x_offset,1); PushSegmentStack(y_offset+1,x_offset,x_offset,-1); while (s > segment_stack) { const Quantum *magick_restrict p; Quantum *magick_restrict q; ssize_t x; /* Pop segment off stack. */ s--; x1=(ssize_t) s->x1; x2=(ssize_t) s->x2; offset=(ssize_t) s->y2; y=(ssize_t) s->y1+offset; /* Recolor neighboring pixels. */ p=GetCacheViewVirtualPixels(image_view,0,y,(size_t) (x1+1),1,exception); q=GetCacheViewAuthenticPixels(floodplane_view,0,y,(size_t) (x1+1),1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) break; p+=x1*GetPixelChannels(image); q+=x1*GetPixelChannels(floodplane_image); for (x=x1; x >= 0; x--) { if (GetPixelGray(floodplane_image,q) != 0) break; GetPixelInfoPixel(image,p,&pixel); if (IsFuzzyEquivalencePixelInfo(&pixel,target) == invert) break; SetPixelGray(floodplane_image,QuantumRange,q); p-=GetPixelChannels(image); q-=GetPixelChannels(floodplane_image); } if (SyncCacheViewAuthenticPixels(floodplane_view,exception) == MagickFalse) break; skip=x >= x1 ? MagickTrue : MagickFalse; if (skip == MagickFalse) { start=x+1; if (start < x1) PushSegmentStack(y,start,x1-1,-offset); x=x1+1; } do { if (skip == MagickFalse) { if (x < (ssize_t) image->columns) { p=GetCacheViewVirtualPixels(image_view,x,y,image->columns-x,1, exception); q=GetCacheViewAuthenticPixels(floodplane_view,x,y,image->columns- x,1,exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) break; for ( ; x < (ssize_t) image->columns; x++) { if (GetPixelGray(floodplane_image,q) != 0) break; GetPixelInfoPixel(image,p,&pixel); if (IsFuzzyEquivalencePixelInfo(&pixel,target) == invert) break; SetPixelGray(floodplane_image,QuantumRange,q); p+=GetPixelChannels(image); q+=GetPixelChannels(floodplane_image); } status=SyncCacheViewAuthenticPixels(floodplane_view,exception); if (status == MagickFalse) break; } PushSegmentStack(y,start,x-1,offset); if (x > (x2+1)) PushSegmentStack(y,x2+1,x-1,-offset); } skip=MagickFalse; x++; if (x <= x2) { p=GetCacheViewVirtualPixels(image_view,x,y,(size_t) (x2-x+1),1, exception); q=GetCacheViewAuthenticPixels(floodplane_view,x,y,(size_t) (x2-x+1),1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) break; for ( ; x <= x2; x++) { if (GetPixelGray(floodplane_image,q) != 0) break; GetPixelInfoPixel(image,p,&pixel); if (IsFuzzyEquivalencePixelInfo(&pixel,target) != invert) break; p+=GetPixelChannels(image); q+=GetPixelChannels(floodplane_image); } } start=x; } while (x <= x2); } status=MagickTrue; for (y=0; y < (ssize_t) image->rows; y++) { const Quantum *magick_restrict p; Quantum *magick_restrict q; ssize_t x; /* Tile fill color onto floodplane. */ if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(floodplane_view,0,y,image->columns,1,exception); q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { if (GetPixelGray(floodplane_image,p) != 0) { GetFillColor(draw_info,x,y,&fill_color,exception); SetPixelViaPixelInfo(image,&fill_color,q); } p+=GetPixelChannels(floodplane_image); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } floodplane_view=DestroyCacheView(floodplane_view); image_view=DestroyCacheView(image_view); segment_info=RelinquishVirtualMemory(segment_info); floodplane_image=DestroyImage(floodplane_image); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G r a d i e n t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GradientImage() applies a continuously smooth color transitions along a % vector from one color to another. % % Note, the interface of this method will change in the future to support % more than one transistion. % % The format of the GradientImage method is: % % MagickBooleanType GradientImage(Image *image,const GradientType type, % const SpreadMethod method,const PixelInfo *start_color, % const PixelInfo *stop_color,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o type: the gradient type: linear or radial. % % o spread: the gradient spread meathod: pad, reflect, or repeat. % % o start_color: the start color. % % o stop_color: the stop color. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType GradientImage(Image *image, const GradientType type,const SpreadMethod method,const StopInfo *stops, const size_t number_stops,ExceptionInfo *exception) { const char *artifact; DrawInfo *draw_info; GradientInfo *gradient; MagickBooleanType status; /* Set gradient start-stop end points. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(stops != (const StopInfo *) NULL); assert(number_stops > 0); if (IsEventLogging() != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); draw_info=AcquireDrawInfo(); gradient=(&draw_info->gradient); gradient->type=type; gradient->bounding_box.width=image->columns; gradient->bounding_box.height=image->rows; artifact=GetImageArtifact(image,"gradient:bounding-box"); if (artifact != (const char *) NULL) (void) ParseAbsoluteGeometry(artifact,&gradient->bounding_box); gradient->gradient_vector.x2=(double) image->columns-1; gradient->gradient_vector.y2=(double) image->rows-1; artifact=GetImageArtifact(image,"gradient:direction"); if (artifact != (const char *) NULL) { GravityType direction; direction=(GravityType) ParseCommandOption(MagickGravityOptions, MagickFalse,artifact); switch (direction) { case NorthWestGravity: { gradient->gradient_vector.x1=(double) image->columns-1; gradient->gradient_vector.y1=(double) image->rows-1; gradient->gradient_vector.x2=0.0; gradient->gradient_vector.y2=0.0; break; } case NorthGravity: { gradient->gradient_vector.x1=0.0; gradient->gradient_vector.y1=(double) image->rows-1; gradient->gradient_vector.x2=0.0; gradient->gradient_vector.y2=0.0; break; } case NorthEastGravity: { gradient->gradient_vector.x1=0.0; gradient->gradient_vector.y1=(double) image->rows-1; gradient->gradient_vector.x2=(double) image->columns-1; gradient->gradient_vector.y2=0.0; break; } case WestGravity: { gradient->gradient_vector.x1=(double) image->columns-1; gradient->gradient_vector.y1=0.0; gradient->gradient_vector.x2=0.0; gradient->gradient_vector.y2=0.0; break; } case EastGravity: { gradient->gradient_vector.x1=0.0; gradient->gradient_vector.y1=0.0; gradient->gradient_vector.x2=(double) image->columns-1; gradient->gradient_vector.y2=0.0; break; } case SouthWestGravity: { gradient->gradient_vector.x1=(double) image->columns-1; gradient->gradient_vector.y1=0.0; gradient->gradient_vector.x2=0.0; gradient->gradient_vector.y2=(double) image->rows-1; break; } case SouthGravity: { gradient->gradient_vector.x1=0.0; gradient->gradient_vector.y1=0.0; gradient->gradient_vector.x2=0.0; gradient->gradient_vector.y2=(double) image->columns-1; break; } case SouthEastGravity: { gradient->gradient_vector.x1=0.0; gradient->gradient_vector.y1=0.0; gradient->gradient_vector.x2=(double) image->columns-1; gradient->gradient_vector.y2=(double) image->rows-1; break; } default: break; } } artifact=GetImageArtifact(image,"gradient:angle"); if (artifact != (const char *) NULL) gradient->angle=StringToDouble(artifact,(char **) NULL); artifact=GetImageArtifact(image,"gradient:vector"); if (artifact != (const char *) NULL) (void) sscanf(artifact,"%lf%*[ ,]%lf%*[ ,]%lf%*[ ,]%lf", &gradient->gradient_vector.x1,&gradient->gradient_vector.y1, &gradient->gradient_vector.x2,&gradient->gradient_vector.y2); if ((GetImageArtifact(image,"gradient:angle") == (const char *) NULL) && (GetImageArtifact(image,"gradient:direction") == (const char *) NULL) && (GetImageArtifact(image,"gradient:extent") == (const char *) NULL) && (GetImageArtifact(image,"gradient:vector") == (const char *) NULL)) if ((type == LinearGradient) && (gradient->gradient_vector.y2 != 0.0)) gradient->gradient_vector.x2=0.0; gradient->center.x=(double) gradient->gradient_vector.x2/2.0; gradient->center.y=(double) gradient->gradient_vector.y2/2.0; artifact=GetImageArtifact(image,"gradient:center"); if (artifact != (const char *) NULL) (void) sscanf(artifact,"%lf%*[ ,]%lf",&gradient->center.x, &gradient->center.y); artifact=GetImageArtifact(image,"gradient:angle"); if ((type == LinearGradient) && (artifact != (const char *) NULL)) { double sine, cosine, distance; /* Reference https://drafts.csswg.org/css-images-3/#linear-gradients. */ sine=sin((double) DegreesToRadians(gradient->angle-90.0)); cosine=cos((double) DegreesToRadians(gradient->angle-90.0)); distance=fabs((double) (image->columns-1.0)*cosine)+ fabs((double) (image->rows-1.0)*sine); gradient->gradient_vector.x1=0.5*((image->columns-1.0)-distance*cosine); gradient->gradient_vector.y1=0.5*((image->rows-1.0)-distance*sine); gradient->gradient_vector.x2=0.5*((image->columns-1.0)+distance*cosine); gradient->gradient_vector.y2=0.5*((image->rows-1.0)+distance*sine); } gradient->radii.x=(double) MagickMax((image->columns-1.0),(image->rows-1.0))/ 2.0; gradient->radii.y=gradient->radii.x; artifact=GetImageArtifact(image,"gradient:extent"); if (artifact != (const char *) NULL) { if (LocaleCompare(artifact,"Circle") == 0) { gradient->radii.x=(double) MagickMax((image->columns-1.0), (image->rows-1.0))/2.0; gradient->radii.y=gradient->radii.x; } if (LocaleCompare(artifact,"Diagonal") == 0) { gradient->radii.x=(double) (sqrt((double) (image->columns-1.0)* (image->columns-1.0)+(image->rows-1.0)*(image->rows-1.0)))/2.0; gradient->radii.y=gradient->radii.x; } if (LocaleCompare(artifact,"Ellipse") == 0) { gradient->radii.x=(double) (image->columns-1.0)/2.0; gradient->radii.y=(double) (image->rows-1.0)/2.0; } if (LocaleCompare(artifact,"Maximum") == 0) { gradient->radii.x=(double) MagickMax((image->columns-1.0), (image->rows-1.0))/2.0; gradient->radii.y=gradient->radii.x; } if (LocaleCompare(artifact,"Minimum") == 0) { gradient->radii.x=(double) (MagickMin((image->columns-1.0), (image->rows-1.0)))/2.0; gradient->radii.y=gradient->radii.x; } } artifact=GetImageArtifact(image,"gradient:radii"); if (artifact != (const char *) NULL) (void) sscanf(artifact,"%lf%*[ ,]%lf",&gradient->radii.x, &gradient->radii.y); gradient->radius=MagickMax(gradient->radii.x,gradient->radii.y); gradient->spread=method; /* Define the gradient to fill between the stops. */ gradient->number_stops=number_stops; gradient->stops=(StopInfo *) AcquireQuantumMemory(gradient->number_stops, sizeof(*gradient->stops)); if (gradient->stops == (StopInfo *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); (void) memcpy(gradient->stops,stops,(size_t) number_stops*sizeof(*stops)); /* Draw a gradient on the image. */ status=DrawGradientImage(image,draw_info,exception); draw_info=DestroyDrawInfo(draw_info); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % O i l P a i n t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % OilPaintImage() applies a special effect filter that simulates an oil % painting. Each pixel is replaced by the most frequent color occurring % in a circular region defined by radius. % % The format of the OilPaintImage method is: % % Image *OilPaintImage(const Image *image,const double radius, % const double sigma,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the circular neighborhood. % % o sigma: the standard deviation of the Gaussian, in pixels. % % o exception: return any errors or warnings in this structure. % */ static size_t **DestroyHistogramTLS(size_t **histogram) { ssize_t i; assert(histogram != (size_t **) NULL); for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++) if (histogram[i] != (size_t *) NULL) histogram[i]=(size_t *) RelinquishMagickMemory(histogram[i]); histogram=(size_t **) RelinquishMagickMemory(histogram); return(histogram); } static size_t **AcquireHistogramTLS(const size_t count) { ssize_t i; size_t **histogram, number_threads; number_threads=(size_t) GetMagickResourceLimit(ThreadResource); histogram=(size_t **) AcquireQuantumMemory(number_threads,sizeof(*histogram)); if (histogram == (size_t **) NULL) return((size_t **) NULL); (void) memset(histogram,0,number_threads*sizeof(*histogram)); for (i=0; i < (ssize_t) number_threads; i++) { histogram[i]=(size_t *) AcquireQuantumMemory(count,sizeof(**histogram)); if (histogram[i] == (size_t *) NULL) return(DestroyHistogramTLS(histogram)); } return(histogram); } MagickExport Image *OilPaintImage(const Image *image,const double radius, const double sigma,ExceptionInfo *exception) { #define NumberPaintBins 256 #define OilPaintImageTag "OilPaint/Image" CacheView *image_view, *paint_view; Image *linear_image, *paint_image; MagickBooleanType status; MagickOffsetType progress; size_t **histograms, width; ssize_t center, y; /* Initialize painted image attributes. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if (IsEventLogging() != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); width=GetOptimalKernelWidth2D(radius,sigma); linear_image=CloneImage(image,0,0,MagickTrue,exception); paint_image=CloneImage(image,0,0,MagickTrue,exception); if ((linear_image == (Image *) NULL) || (paint_image == (Image *) NULL)) { if (linear_image != (Image *) NULL) linear_image=DestroyImage(linear_image); if (paint_image != (Image *) NULL) linear_image=DestroyImage(paint_image); return((Image *) NULL); } if (SetImageStorageClass(paint_image,DirectClass,exception) == MagickFalse) { linear_image=DestroyImage(linear_image); paint_image=DestroyImage(paint_image); return((Image *) NULL); } histograms=AcquireHistogramTLS(NumberPaintBins); if (histograms == (size_t **) NULL) { linear_image=DestroyImage(linear_image); paint_image=DestroyImage(paint_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } /* Oil paint image. */ status=MagickTrue; progress=0; center=(ssize_t) GetPixelChannels(linear_image)*(linear_image->columns+width)* (width/2L)+GetPixelChannels(linear_image)*(width/2L); image_view=AcquireVirtualCacheView(linear_image,exception); paint_view=AcquireAuthenticCacheView(paint_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(linear_image,paint_image,linear_image->rows,1) #endif for (y=0; y < (ssize_t) linear_image->rows; y++) { const Quantum *magick_restrict p; Quantum *magick_restrict q; size_t *histogram; ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,-((ssize_t) width/2L),y-(ssize_t) (width/2L),linear_image->columns+width,width,exception); q=QueueCacheViewAuthenticPixels(paint_view,0,y,paint_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } histogram=histograms[GetOpenMPThreadId()]; for (x=0; x < (ssize_t) linear_image->columns; x++) { ssize_t i, u; size_t count; ssize_t j, k, n, v; /* Assign most frequent color. */ k=0; j=0; count=0; (void) memset(histogram,0,NumberPaintBins* sizeof(*histogram)); for (v=0; v < (ssize_t) width; v++) { for (u=0; u < (ssize_t) width; u++) { n=(ssize_t) ScaleQuantumToChar(ClampToQuantum(GetPixelIntensity( linear_image,p+GetPixelChannels(linear_image)*(u+k)))); histogram[n]++; if (histogram[n] > count) { j=k+u; count=histogram[n]; } } k+=(ssize_t) (linear_image->columns+width); } for (i=0; i < (ssize_t) GetPixelChannels(linear_image); i++) { PixelChannel channel = GetPixelChannelChannel(linear_image,i); PixelTrait traits = GetPixelChannelTraits(linear_image,channel); PixelTrait paint_traits=GetPixelChannelTraits(paint_image,channel); if ((traits == UndefinedPixelTrait) || (paint_traits == UndefinedPixelTrait)) continue; if ((paint_traits & CopyPixelTrait) != 0) { SetPixelChannel(paint_image,channel,p[center+i],q); continue; } SetPixelChannel(paint_image,channel,p[j*GetPixelChannels(linear_image)+ i],q); } p+=GetPixelChannels(linear_image); q+=GetPixelChannels(paint_image); } if (SyncCacheViewAuthenticPixels(paint_view,exception) == MagickFalse) status=MagickFalse; if (linear_image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(linear_image,OilPaintImageTag,progress, linear_image->rows); if (proceed == MagickFalse) status=MagickFalse; } } paint_view=DestroyCacheView(paint_view); image_view=DestroyCacheView(image_view); histograms=DestroyHistogramTLS(histograms); linear_image=DestroyImage(linear_image); if (status == MagickFalse) paint_image=DestroyImage(paint_image); return(paint_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % O p a q u e P a i n t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % OpaquePaintImage() changes any pixel that matches color with the color % defined by fill argument. % % By default color must match a particular pixel color exactly. However, in % many cases two colors may differ by a small amount. Fuzz defines how much % tolerance is acceptable to consider two colors as the same. For example, % set fuzz to 10 and the color red at intensities of 100 and 102 respectively % are now interpreted as the same color. % % The format of the OpaquePaintImage method is: % % MagickBooleanType OpaquePaintImage(Image *image,const PixelInfo *target, % const PixelInfo *fill,const MagickBooleanType invert, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o target: the RGB value of the target color. % % o fill: the replacement color. % % o invert: paint any pixel that does not match the target color. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType OpaquePaintImage(Image *image, const PixelInfo *target,const PixelInfo *fill,const MagickBooleanType invert, ExceptionInfo *exception) { #define OpaquePaintImageTag "Opaque/Image" CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; PixelInfo conform_fill, conform_target, zero; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(target != (PixelInfo *) NULL); assert(fill != (PixelInfo *) NULL); if (IsEventLogging() != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); ConformPixelInfo(image,fill,&conform_fill,exception); ConformPixelInfo(image,target,&conform_target,exception); /* Make image color opaque. */ status=MagickTrue; progress=0; GetPixelInfo(image,&zero); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { PixelInfo pixel; Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } pixel=zero; for (x=0; x < (ssize_t) image->columns; x++) { GetPixelInfoPixel(image,q,&pixel); if (IsFuzzyEquivalencePixelInfo(&pixel,&conform_target) != invert) { PixelTrait traits; traits=GetPixelChannelTraits(image,RedPixelChannel); if ((traits & UpdatePixelTrait) != 0) SetPixelRed(image,(Quantum) conform_fill.red,q); traits=GetPixelChannelTraits(image,GreenPixelChannel); if ((traits & UpdatePixelTrait) != 0) SetPixelGreen(image,(Quantum) conform_fill.green,q); traits=GetPixelChannelTraits(image,BluePixelChannel); if ((traits & UpdatePixelTrait) != 0) SetPixelBlue(image,(Quantum) conform_fill.blue,q); traits=GetPixelChannelTraits(image,BlackPixelChannel); if ((traits & UpdatePixelTrait) != 0) SetPixelBlack(image,(Quantum) conform_fill.black,q); traits=GetPixelChannelTraits(image,AlphaPixelChannel); if ((traits & UpdatePixelTrait) != 0) SetPixelAlpha(image,(Quantum) conform_fill.alpha,q); } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,OpaquePaintImageTag,progress, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T r a n s p a r e n t P a i n t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TransparentPaintImage() changes the opacity value associated with any pixel % that matches color to the value defined by opacity. % % By default color must match a particular pixel color exactly. However, in % many cases two colors may differ by a small amount. Fuzz defines how much % tolerance is acceptable to consider two colors as the same. For example, % set fuzz to 10 and the color red at intensities of 100 and 102 respectively % are now interpreted as the same color. % % The format of the TransparentPaintImage method is: % % MagickBooleanType TransparentPaintImage(Image *image, % const PixelInfo *target,const Quantum opacity, % const MagickBooleanType invert,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o target: the target color. % % o opacity: the replacement opacity value. % % o invert: paint any pixel that does not match the target color. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType TransparentPaintImage(Image *image, const PixelInfo *target,const Quantum opacity,const MagickBooleanType invert, ExceptionInfo *exception) { #define TransparentPaintImageTag "Transparent/Image" CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; PixelInfo zero; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(target != (PixelInfo *) NULL); if (IsEventLogging() != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); if (image->alpha_trait == UndefinedPixelTrait) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception); /* Make image color transparent. */ status=MagickTrue; progress=0; GetPixelInfo(image,&zero); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { PixelInfo pixel; ssize_t x; Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } pixel=zero; for (x=0; x < (ssize_t) image->columns; x++) { GetPixelInfoPixel(image,q,&pixel); if (IsFuzzyEquivalencePixelInfo(&pixel,target) != invert) SetPixelAlpha(image,opacity,q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,TransparentPaintImageTag,progress, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T r a n s p a r e n t P a i n t I m a g e C h r o m a % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TransparentPaintImageChroma() changes the opacity value associated with any % pixel that matches color to the value defined by opacity. % % As there is one fuzz value for the all the channels, TransparentPaintImage() % is not suitable for the operations like chroma, where the tolerance for % similarity of two color component (RGB) can be different. Thus we define % this method to take two target pixels (one low and one high) and all the % pixels of an image which are lying between these two pixels are made % transparent. % % The format of the TransparentPaintImageChroma method is: % % MagickBooleanType TransparentPaintImageChroma(Image *image, % const PixelInfo *low,const PixelInfo *high,const Quantum opacity, % const MagickBooleanType invert,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o low: the low target color. % % o high: the high target color. % % o opacity: the replacement opacity value. % % o invert: paint any pixel that does not match the target color. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType TransparentPaintImageChroma(Image *image, const PixelInfo *low,const PixelInfo *high,const Quantum opacity, const MagickBooleanType invert,ExceptionInfo *exception) { #define TransparentPaintImageTag "Transparent/Image" CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(high != (PixelInfo *) NULL); assert(low != (PixelInfo *) NULL); if (IsEventLogging() != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); if (image->alpha_trait == UndefinedPixelTrait) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception); /* Make image color transparent. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType match; PixelInfo pixel; Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } GetPixelInfo(image,&pixel); for (x=0; x < (ssize_t) image->columns; x++) { GetPixelInfoPixel(image,q,&pixel); match=((pixel.red >= low->red) && (pixel.red <= high->red) && (pixel.green >= low->green) && (pixel.green <= high->green) && (pixel.blue >= low->blue) && (pixel.blue <= high->blue)) ? MagickTrue : MagickFalse; if (match != invert) SetPixelAlpha(image,opacity,q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,TransparentPaintImageTag,progress, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); }
core_zlacpy.c
/** * * @file * * PLASMA is a software package provided by: * University of Tennessee, US, * University of Manchester, UK. * * @precisions normal z -> c d s * **/ #include <plasma_core_blas.h> #include "plasma_types.h" #include "plasma_internal.h" #include "core_lapack.h" /***************************************************************************//** * * @ingroup core_lacpy * * Copies all or part of a two-dimensional matrix A to another matrix B. * ******************************************************************************* * * @param[in] uplo * - PlasmaGeneral: entire A, * - PlasmaUpper: upper triangle, * - PlasmaLower: lower triangle. * * @param[in] transa * - PlasmaNoTrans: A is not transposed, * - PlasmaTrans: A is transposed, * - PlasmaConjTrans: A is conjugate transposed. * * @param[in] m * The number of rows of the matrices A and B. * m >= 0. * * @param[in] n * The number of columns of the matrices A and B. * n >= 0. * * @param[in] A * The m-by-n matrix to copy. * * @param[in] lda * The leading dimension of the array A. * lda >= max(1,m). * * @param[out] B * The m-by-n copy of the matrix A. * On exit, B = A ONLY in the locations specified by uplo. * * @param[in] ldb * The leading dimension of the array B. * ldb >= max(1,m). * ******************************************************************************/ __attribute__((weak)) void plasma_core_zlacpy(plasma_enum_t uplo, plasma_enum_t transa, int m, int n, const plasma_complex64_t *A, int lda, plasma_complex64_t *B, int ldb) { if (transa == PlasmaNoTrans) { LAPACKE_zlacpy_work(LAPACK_COL_MAJOR, lapack_const(uplo), m, n, A, lda, B, ldb); } else if (transa == PlasmaTrans) { switch (uplo) { case PlasmaUpper: for (int i = 0; i < imin(m, n); i++) for (int j = i; j < n; j++) B[j + i*ldb] = A[i + j*lda]; break; case PlasmaLower: for (int i = 0; i < m; i++) for (int j = 0; j <= imin(i, n); j++) B[j + i*ldb] = A[i + j*lda]; break; case PlasmaGeneral: for (int i = 0; i < m; i++) for (int j = 0; j < n; j++) B[j + i*ldb] = A[i + j*lda]; break; } } else { switch (uplo) { case PlasmaUpper: for (int i = 0; i < imin(m, n); i++) for (int j = i; j < n; j++) B[j + i*ldb] = conj(A[i + j*lda]); break; case PlasmaLower: for (int i = 0; i < m; i++) for (int j = 0; j <= imin(i, n); j++) B[j + i*ldb] = conj(A[i + j*lda]); break; case PlasmaGeneral: for (int i = 0; i < m; i++) for (int j = 0; j < n; j++) B[j + i*ldb] = conj(A[i + j*lda]); break; } } } /******************************************************************************/ void plasma_core_omp_zlacpy(plasma_enum_t uplo, plasma_enum_t transa, int m, int n, const plasma_complex64_t *A, int lda, plasma_complex64_t *B, int ldb, plasma_sequence_t *sequence, plasma_request_t *request) { #pragma omp task depend(in:A[0:lda*n]) \ depend(out:B[0:ldb*n]) { if (sequence->status == PlasmaSuccess) plasma_core_zlacpy(uplo, transa, m, n, A, lda, B, ldb); } }
simple_env.c
// RUN: %libomp-compile // RUN: env OMP_DISPLAY_AFFINITY=true OMP_AFFINITY_FORMAT='TESTER-ENV: tl:%L tn:%n nt:%N' OMP_NUM_THREADS=8 %libomp-run | %python %S/check.py -c 'CHECK-8' %s // REQUIRES: !abt #include <stdio.h> #include <stdlib.h> #include <omp.h> int main(int argc, char** argv) { #pragma omp parallel { } #pragma omp parallel { } return 0; } // CHECK-8: num_threads=8 TESTER-ENV: tl:1 tn:[0-7] nt:8$
LCC04HCPTraversal.h
/** * @file LCC04HCPTraversal.h * @author sabrinakrallmann * @date 30.03.2020 */ #pragma once #include "autopas/containers/cellPairTraversals/C08BasedTraversal.h" #include "autopas/containers/linkedCells/traversals/LCC08CellHandler.h" #include "autopas/containers/linkedCells/traversals/LCTraversalInterface.h" #include "autopas/utils/ArrayUtils.h" #include "autopas/utils/ThreeDimensionalMapping.h" namespace autopas { /** * This class provides the c04 hcp traversal. The traversal is based on Tchipev, N. Algorithmic and Implementational * Optimizations of Molecular Dynamics Simulations for Process Engineering, Chapter 8 Outlook. * * The traversal uses the c04 base step performed on every single cell. Since * these steps overlap a domain coloring with four colors is applied. It differs from c04 in the shape of the colored * blocks. The chosen block-shape for the c08-base-steps in the lc_c04_HCP traversal is a 2x1x3-shape cuboid. * * @tparam ParticleCell the type of cells * @tparam PairwiseFunctor The functor that defines the interaction of two particles. * @tparam useSoA * @tparam useNewton3 */ template <class ParticleCell, class PairwiseFunctor, DataLayoutOption::Value dataLayout, bool useNewton3> class LCC04HCPTraversal : public C08BasedTraversal<ParticleCell, PairwiseFunctor, dataLayout, useNewton3>, public LCTraversalInterface<ParticleCell> { public: /** * Constructor of c04hcp * @param dims The dimensions of the cellblock, i.e. the number of cells in x, * y and z direction. * @param pairwiseFunctor The functor that defines the interaction of two particles. * @param interactionLength Interaction length. * @param cellLength cell length. */ LCC04HCPTraversal(const std::array<unsigned long, 3> &dims, PairwiseFunctor *pairwiseFunctor, const double interactionLength, const std::array<double, 3> &cellLength) : C08BasedTraversal<ParticleCell, PairwiseFunctor, dataLayout, useNewton3>(dims, pairwiseFunctor, interactionLength, cellLength), _cellHandler(pairwiseFunctor, this->_cellsPerDimension, interactionLength, cellLength, this->_overlap), _end(utils::ArrayMath::subScalar(utils::ArrayUtils::static_cast_array<long>(this->_cellsPerDimension), 1l)) {} void traverseParticlePairs() override; [[nodiscard]] TraversalOption getTraversalType() const override { return TraversalOption::lc_c04_HCP; } [[nodiscard]] DataLayoutOption getDataLayout() const override { return dataLayout; } [[nodiscard]] bool getUseNewton3() const override { return useNewton3; } [[nodiscard]] bool isApplicable() const override { // The cellsize cannot be smaller then the cutoff, if OpenMP is used. // Also see: https://github.com/AutoPas/AutoPas/issues/464 const double minLength = *std::min_element(this->_cellLength.cbegin(), this->_cellLength.cend()); return minLength >= this->_interactionLength; } private: void traverseSingleColor(std::vector<ParticleCell> &cells, int color); void processBasePack6(std::vector<ParticleCell> &cells, const std::array<long, 3> &base3DIndex); LCC08CellHandler<ParticleCell, PairwiseFunctor, dataLayout, useNewton3> _cellHandler; const std::array<long, 3> _end; }; template <class ParticleCell, class PairwiseFunctor, DataLayoutOption::Value dataLayout, bool useNewton3> void LCC04HCPTraversal<ParticleCell, PairwiseFunctor, dataLayout, useNewton3>::processBasePack6( std::vector<ParticleCell> &cells, const std::array<long, 3> &base3DIndex) { using utils::ThreeDimensionalMapping::threeToOneD; std::array<long, 3> index{}; const std::array<long, 3> signedDims = utils::ArrayUtils::static_cast_array<long>(this->_cellsPerDimension); // go through the six cells for (long z = 0; z < 3; ++z) { for (long x = 0; x < 2; ++x) { index[0] = base3DIndex[0] + x; index[1] = base3DIndex[1]; index[2] = base3DIndex[2] + z; bool isIn = true; for (int d = 0; d < 3; ++d) { // prevent using overlapping cells and cells outside the boundaries isIn &= (index[d] >= 0l) and (index[d] <= (_end[d] - this->_overlap[d])); } // skip cells outside radius if (isIn) { const unsigned long ulIndex = threeToOneD(index, signedDims); _cellHandler.processBaseCell(cells, ulIndex); } } } } template <class ParticleCell, class PairwiseFunctor, DataLayoutOption::Value dataLayout, bool useNewton3> void LCC04HCPTraversal<ParticleCell, PairwiseFunctor, dataLayout, useNewton3>::traverseParticlePairs() { auto &cells = *(this->_cells); #if defined(AUTOPAS_OPENMP) #pragma omp parallel #endif { for (int color = 0; color < 4; ++color) { traverseSingleColor(cells, color); #if defined(AUTOPAS_OPENMP) if (color < 3) { #pragma omp barrier } #endif } } // close parallel region } /** * Go through one color and search for cuboids belonging to the specified color. * Uses shifts to go through the different dimensions and prevent overlapping of the cuboids. * * @tparam ParticleCell * @tparam PairwiseFunctor * @tparam dataLayout * @tparam useNewton3 * @param cells * @param color */ template <class ParticleCell, class PairwiseFunctor, DataLayoutOption::Value dataLayout, bool useNewton3> void LCC04HCPTraversal<ParticleCell, PairwiseFunctor, dataLayout, useNewton3>::traverseSingleColor( std::vector<ParticleCell> &cells, int color) { // determine a starting point of one of the grids std::array<long, 3> startOfThisColor{}; // coordinates: {x,y,z} // different starting points for different colors // some colors are starting outside the grid because only part of their cuboids are part of the grid // this way the starting points of sticking out cuboids can be determined as well switch (color) { case 0: startOfThisColor = {0l, 0l, 0l}; break; case 1: startOfThisColor = {-4l, 0l, 1l}; break; case 2: startOfThisColor = {-4l, 0l, -2l}; break; case 3: startOfThisColor = {-2l, 0l, -1l}; break; default: autopas::utils::ExceptionHandler::exception("LCC04HCPTraversal::traverseSingleColor: invalid color ({})", color); } // to fix intel64 icpc compiler complaints about perfectly nested loop. const long startX = startOfThisColor[0], endX = _end[0]; const long startY = startOfThisColor[1], endY = _end[1]; const long startZ = startOfThisColor[2], endZ = _end[2]; // iterate over cartesian grid #if defined(AUTOPAS_OPENMP) #pragma omp for schedule(dynamic, 1) collapse(3) nowait #endif for (long z = startZ; z < endZ; z += 4) { for (long y = startY; y < endY; y++) { /* color starts every 6th column again, the +4 is needed to prevent ending too early, since it will be shifted back inside the loop */ for (long x = startX; x < (endX + 4); x += 6) { long x_index = x; /* shift on x-axis according to z-value: shift two times and then go back to original x-value first: no shift second: -4 shift third: -2 shift fourth: go back to first every 12th z, the shifting pattern repeats again at the origin of x without shift, because z is shifted by 4 in every loop run and every third z-shift the pattern repeats */ switch ((z - startZ) % 12 / 4) { case 0: break; case 1: x_index -= 4; break; case 2: x_index -= 2; break; default: break; } // shift x-axis every second y-row if ((y - startY) % 2 != 0) { x_index += 3; } processBasePack6(cells, {x_index, y, z}); } } } } } // namespace autopas
KDTree.h
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #ifndef _SPTAG_COMMON_KDTREE_H_ #define _SPTAG_COMMON_KDTREE_H_ #include <iostream> #include <vector> #include <string> #include <shared_mutex> #include "../VectorIndex.h" #include "CommonUtils.h" #include "QueryResultSet.h" #include "WorkSpace.h" #pragma warning(disable:4996) // 'fopen': This function or variable may be unsafe. Consider using fopen_s instead. To disable deprecation, use _CRT_SECURE_NO_WARNINGS. See online help for details. namespace SPTAG { namespace COMMON { // node type for storing KDT struct KDTNode { SizeType left; SizeType right; DimensionType split_dim; float split_value; }; class KDTree { public: KDTree() : m_iTreeNumber(2), m_numTopDimensionKDTSplit(5), m_iSamples(1000), m_lock(new std::shared_timed_mutex) {} KDTree(const KDTree& other) : m_iTreeNumber(other.m_iTreeNumber), m_numTopDimensionKDTSplit(other.m_numTopDimensionKDTSplit), m_iSamples(other.m_iSamples), m_lock(new std::shared_timed_mutex) {} ~KDTree() {} inline const KDTNode& operator[](SizeType index) const { return m_pTreeRoots[index]; } inline KDTNode& operator[](SizeType index) { return m_pTreeRoots[index]; } inline SizeType size() const { return (SizeType)m_pTreeRoots.size(); } inline SizeType sizePerTree() const { std::shared_lock<std::shared_timed_mutex> lock(*m_lock); return (SizeType)m_pTreeRoots.size() - m_pTreeStart.back(); } template <typename T> void Rebuild(VectorIndex* p_index) { COMMON::KDTree newTrees(*this); newTrees.BuildTrees<T>(p_index, nullptr, 1); std::unique_lock<std::shared_timed_mutex> lock(*m_lock); m_pTreeRoots.swap(newTrees.m_pTreeRoots); m_pTreeStart.swap(newTrees.m_pTreeStart); } template <typename T> void BuildTrees(VectorIndex* p_index, std::vector<SizeType>* indices = nullptr, int numOfThreads = omp_get_num_threads()) { std::vector<SizeType> localindices; if (indices == nullptr) { localindices.resize(p_index->GetNumSamples()); for (SizeType i = 0; i < localindices.size(); i++) localindices[i] = i; } else { localindices.assign(indices->begin(), indices->end()); } m_pTreeRoots.resize(m_iTreeNumber * localindices.size()); m_pTreeStart.resize(m_iTreeNumber, 0); #pragma omp parallel for num_threads(numOfThreads) for (int i = 0; i < m_iTreeNumber; i++) { Sleep(i * 100); std::srand(clock()); std::vector<SizeType> pindices(localindices.begin(), localindices.end()); std::random_shuffle(pindices.begin(), pindices.end()); m_pTreeStart[i] = i * (SizeType)pindices.size(); std::cout << "Start to build KDTree " << i + 1 << std::endl; SizeType iTreeSize = m_pTreeStart[i]; DivideTree<T>(p_index, pindices, 0, (SizeType)pindices.size() - 1, m_pTreeStart[i], iTreeSize); std::cout << i + 1 << " KDTree built, " << iTreeSize - m_pTreeStart[i] << " " << pindices.size() << std::endl; } } inline std::uint64_t BufferSize() const { return sizeof(int) + sizeof(SizeType) * m_iTreeNumber + sizeof(SizeType) + sizeof(KDTNode) * m_pTreeRoots.size(); } bool SaveTrees(std::ostream& p_outstream) const { std::shared_lock<std::shared_timed_mutex> lock(*m_lock); p_outstream.write((char*)&m_iTreeNumber, sizeof(int)); p_outstream.write((char*)m_pTreeStart.data(), sizeof(SizeType) * m_iTreeNumber); SizeType treeNodeSize = (SizeType)m_pTreeRoots.size(); p_outstream.write((char*)&treeNodeSize, sizeof(SizeType)); p_outstream.write((char*)m_pTreeRoots.data(), sizeof(KDTNode) * treeNodeSize); std::cout << "Save KDT (" << m_iTreeNumber << "," << treeNodeSize << ") Finish!" << std::endl; return true; } bool SaveTrees(std::string sTreeFileName) const { std::cout << "Save KDT to " << sTreeFileName << std::endl; std::ofstream output(sTreeFileName, std::ios::binary); if (!output.is_open()) return false; SaveTrees(output); output.close(); return true; } bool LoadTrees(char* pKDTMemFile) { m_iTreeNumber = *((int*)pKDTMemFile); pKDTMemFile += sizeof(int); m_pTreeStart.resize(m_iTreeNumber); memcpy(m_pTreeStart.data(), pKDTMemFile, sizeof(SizeType) * m_iTreeNumber); pKDTMemFile += sizeof(SizeType)*m_iTreeNumber; SizeType treeNodeSize = *((SizeType*)pKDTMemFile); pKDTMemFile += sizeof(SizeType); m_pTreeRoots.resize(treeNodeSize); memcpy(m_pTreeRoots.data(), pKDTMemFile, sizeof(KDTNode) * treeNodeSize); std::cout << "Load KDT (" << m_iTreeNumber << "," << treeNodeSize << ") Finish!" << std::endl; return true; } bool LoadTrees(std::string sTreeFileName) { std::cout << "Load KDT From " << sTreeFileName << std::endl; std::ifstream input(sTreeFileName, std::ios::binary); if (!input.is_open()) return false; input.read((char*)&m_iTreeNumber, sizeof(int)); m_pTreeStart.resize(m_iTreeNumber); input.read((char*)m_pTreeStart.data(), sizeof(SizeType) * m_iTreeNumber); SizeType treeNodeSize; input.read((char*)&treeNodeSize, sizeof(SizeType)); m_pTreeRoots.resize(treeNodeSize); input.read((char*)m_pTreeRoots.data(), sizeof(KDTNode) * treeNodeSize); input.close(); std::cout << "Load KDT (" << m_iTreeNumber << "," << treeNodeSize << ") Finish!" << std::endl; return true; } template <typename T> void InitSearchTrees(const VectorIndex* p_index, const COMMON::QueryResultSet<T> &p_query, COMMON::WorkSpace &p_space) const { for (int i = 0; i < m_iTreeNumber; i++) { KDTSearch(p_index, p_query, p_space, m_pTreeStart[i], 0); } } template <typename T> void SearchTrees(const VectorIndex* p_index, const COMMON::QueryResultSet<T> &p_query, COMMON::WorkSpace &p_space, const int p_limits) const { while (!p_space.m_SPTQueue.empty() && p_space.m_iNumberOfCheckedLeaves < p_limits) { auto& tcell = p_space.m_SPTQueue.pop(); KDTSearch(p_index, p_query, p_space, tcell.node, tcell.distance); } } private: template <typename T> void KDTSearch(const VectorIndex* p_index, const COMMON::QueryResultSet<T> &p_query, COMMON::WorkSpace& p_space, const SizeType node, const float distBound) const { if (node < 0) { SizeType index = -node - 1; if (index >= p_index->GetNumSamples()) return; #ifdef PREFETCH const char* data = (const char *)(p_index->GetSample(index)); _mm_prefetch(data, _MM_HINT_T0); _mm_prefetch(data + 64, _MM_HINT_T0); #endif if (p_space.CheckAndSet(index)) return; ++p_space.m_iNumberOfTreeCheckedLeaves; ++p_space.m_iNumberOfCheckedLeaves; p_space.m_NGQueue.insert(COMMON::HeapCell(index, p_index->ComputeDistance((const void*)p_query.GetTarget(), (const void*)data))); return; } auto& tnode = m_pTreeRoots[node]; float diff = (p_query.GetTarget())[tnode.split_dim] - tnode.split_value; float distanceBound = distBound + diff * diff; SizeType otherChild, bestChild; if (diff < 0) { bestChild = tnode.left; otherChild = tnode.right; } else { otherChild = tnode.left; bestChild = tnode.right; } p_space.m_SPTQueue.insert(COMMON::HeapCell(otherChild, distanceBound)); KDTSearch(p_index, p_query, p_space, bestChild, distBound); } template <typename T> void DivideTree(VectorIndex* p_index, std::vector<SizeType>& indices, SizeType first, SizeType last, SizeType index, SizeType &iTreeSize) { ChooseDivision<T>(p_index, m_pTreeRoots[index], indices, first, last); SizeType i = Subdivide<T>(p_index, m_pTreeRoots[index], indices, first, last); if (i - 1 <= first) { m_pTreeRoots[index].left = -indices[first] - 1; } else { iTreeSize++; m_pTreeRoots[index].left = iTreeSize; DivideTree<T>(p_index, indices, first, i - 1, iTreeSize, iTreeSize); } if (last == i) { m_pTreeRoots[index].right = -indices[last] - 1; } else { iTreeSize++; m_pTreeRoots[index].right = iTreeSize; DivideTree<T>(p_index, indices, i, last, iTreeSize, iTreeSize); } } template <typename T> void ChooseDivision(VectorIndex* p_index, KDTNode& node, const std::vector<SizeType>& indices, const SizeType first, const SizeType last) { std::vector<float> meanValues(p_index->GetFeatureDim(), 0); std::vector<float> varianceValues(p_index->GetFeatureDim(), 0); SizeType end = min(first + m_iSamples, last); SizeType count = end - first + 1; // calculate the mean of each dimension for (SizeType j = first; j <= end; j++) { const T* v = (const T*)p_index->GetSample(indices[j]); for (DimensionType k = 0; k < p_index->GetFeatureDim(); k++) { meanValues[k] += v[k]; } } for (DimensionType k = 0; k < p_index->GetFeatureDim(); k++) { meanValues[k] /= count; } // calculate the variance of each dimension for (SizeType j = first; j <= end; j++) { const T* v = (const T*)p_index->GetSample(indices[j]); for (DimensionType k = 0; k < p_index->GetFeatureDim(); k++) { float dist = v[k] - meanValues[k]; varianceValues[k] += dist*dist; } } // choose the split dimension as one of the dimension inside TOP_DIM maximum variance node.split_dim = SelectDivisionDimension(varianceValues); // determine the threshold node.split_value = meanValues[node.split_dim]; } DimensionType SelectDivisionDimension(const std::vector<float>& varianceValues) const { // Record the top maximum variances std::vector<DimensionType> topind(m_numTopDimensionKDTSplit); int num = 0; // order the variances for (DimensionType i = 0; i < (DimensionType)varianceValues.size(); i++) { if (num < m_numTopDimensionKDTSplit || varianceValues[i] > varianceValues[topind[num - 1]]) { if (num < m_numTopDimensionKDTSplit) { topind[num++] = i; } else { topind[num - 1] = i; } int j = num - 1; // order the TOP_DIM variances while (j > 0 && varianceValues[topind[j]] > varianceValues[topind[j - 1]]) { std::swap(topind[j], topind[j - 1]); j--; } } } // randomly choose a dimension from TOP_DIM return topind[COMMON::Utils::rand(num)]; } template <typename T> SizeType Subdivide(VectorIndex* p_index, const KDTNode& node, std::vector<SizeType>& indices, const SizeType first, const SizeType last) const { SizeType i = first; SizeType j = last; // decide which child one point belongs while (i <= j) { SizeType ind = indices[i]; const T* v = (const T*)p_index->GetSample(ind); float val = v[node.split_dim]; if (val < node.split_value) { i++; } else { std::swap(indices[i], indices[j]); j--; } } // if all the points in the node are equal,equally split the node into 2 if ((i == first) || (i == last + 1)) { i = (first + last + 1) / 2; } return i; } private: std::vector<SizeType> m_pTreeStart; std::vector<KDTNode> m_pTreeRoots; public: std::unique_ptr<std::shared_timed_mutex> m_lock; int m_iTreeNumber, m_numTopDimensionKDTSplit, m_iSamples; }; } } #endif
parallel_queue_first_n_push.c
#include <stdio.h> #include <string.h> #include <stdlib.h> #include <stdbool.h> #include <omp.h> #define MAX 20 int intArray[MAX]; int front = 0; int rear = -1; int itemCount = 0; bool isEmpty() { return itemCount == 0; } bool isFull() { return itemCount == MAX; } int size() { return itemCount; } void insert(int data) { if(!isFull()) { if(rear == MAX-1) { rear = -1; } intArray[++rear] = data; itemCount++; } } int removeData() { int data = intArray[front++]; if(front == MAX) { front = 0; } itemCount--; return data; } int main(){ int id, num = 1; int n; printf("Enter value of N = "); scanf("%d",&n); omp_set_dynamic(0); #pragma omp parallel num_threads(2) { id = omp_get_thread_num(); if(id == 0){ while(1){ #pragma omp critical { if(num <= n){ insert(num); printf("Inserted %d to Queue", num); num++; } else { printf("Already entered first %d natural numbers", n); } // for infinite insertion // if(!isFull()){ // insert(num); // printf("Inserted %d to Queue", num); // num++; // } else { // printf("Queue Full"); // } fgetc(stdin); } } } else { while(1){ #pragma omp critical { if(!isEmpty()){ printf("Deleted item = %d\n", removeData()); } else { printf("Queue is empty"); num = 1; } fgetc(stdin); } } } } return 0; }
GB_unop__sinh_fp64_fp64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__sinh_fp64_fp64) // op(A') function: GB (_unop_tran__sinh_fp64_fp64) // C type: double // A type: double // cast: double cij = aij // unaryop: cij = sinh (aij) #define GB_ATYPE \ double #define GB_CTYPE \ double // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ double aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = sinh (x) ; // casting #define GB_CAST(z, aij) \ double z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ double aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ double z = aij ; \ Cx [pC] = sinh (z) ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_SINH || GxB_NO_FP64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__sinh_fp64_fp64) ( double *Cx, // Cx and Ax may be aliased const double *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; // TODO: if OP is ONE and uniform-valued matrices are exploited, then // do this in O(1) time if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (double), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { double aij = Ax [p] ; double z = aij ; Cx [p] = sinh (z) ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; double aij = Ax [p] ; double z = aij ; Cx [p] = sinh (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__sinh_fp64_fp64) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
nodal_two_step_v_p_strategy.h
// // Project Name: KratosPFEMFluidDynamicsApplication $ // Last modified by: $Author: AFranci $ // Date: $Date: June 2018 $ // Revision: $Revision: 0.0 $ // // #ifndef KRATOS_NODAL_TWO_STEP_V_P_STRATEGY_H #define KRATOS_NODAL_TWO_STEP_V_P_STRATEGY_H #include "includes/define.h" #include "includes/model_part.h" #include "includes/deprecated_variables.h" #include "includes/cfd_variables.h" #include "utilities/openmp_utils.h" #include "processes/process.h" #include "solving_strategies/schemes/scheme.h" #include "solving_strategies/strategies/solving_strategy.h" #include "custom_utilities/mesher_utilities.hpp" #include "custom_utilities/boundary_normals_calculation_utilities.hpp" #include "geometries/geometry.h" #include "utilities/geometry_utilities.h" #include "solving_strategies/schemes/residualbased_incrementalupdate_static_scheme.h" #include "custom_strategies/builders_and_solvers/nodal_residualbased_elimination_builder_and_solver.h" #include "custom_strategies/builders_and_solvers/nodal_residualbased_elimination_builder_and_solver_continuity.h" #include "custom_strategies/builders_and_solvers/nodal_residualbased_block_builder_and_solver.h" #include "custom_utilities/solver_settings.h" #include "custom_strategies/strategies/gauss_seidel_linear_strategy.h" #include "pfem_fluid_dynamics_application_variables.h" #include <stdio.h> #include <math.h> #include <iostream> #include <fstream> namespace Kratos { ///@addtogroup PFEMFluidDynamicsApplication ///@{ ///@name Kratos Globals ///@{ ///@} ///@name Type Definitions ///@{ ///@} ///@name Enum's ///@{ ///@} ///@name Functions ///@{ ///@} ///@name Kratos Classes ///@{ template <class TSparseSpace, class TDenseSpace, class TLinearSolver> class NodalTwoStepVPStrategy : public SolvingStrategy<TSparseSpace, TDenseSpace, TLinearSolver> { public: ///@name Type Definitions ///@{ KRATOS_CLASS_POINTER_DEFINITION(NodalTwoStepVPStrategy); /// Counted pointer of NodalTwoStepVPStrategy //typedef boost::shared_ptr< NodalTwoStepVPStrategy<TSparseSpace, TDenseSpace, TLinearSolver> > Pointer; typedef SolvingStrategy<TSparseSpace, TDenseSpace, TLinearSolver> BaseType; typedef typename BaseType::TDataType TDataType; /// Node type (default is: Node<3>) typedef Node<3> NodeType; /// Geometry type (using with given NodeType) typedef Geometry<NodeType> GeometryType; typedef std::size_t SizeType; //typedef typename BaseType::DofSetType DofSetType; typedef typename BaseType::DofsArrayType DofsArrayType; typedef typename BaseType::TSystemMatrixType TSystemMatrixType; typedef typename BaseType::TSystemVectorType TSystemVectorType; typedef typename BaseType::LocalSystemVectorType LocalSystemVectorType; typedef typename BaseType::LocalSystemMatrixType LocalSystemMatrixType; typedef typename BaseType::ElementsArrayType ElementsArrayType; typedef typename SolvingStrategy<TSparseSpace, TDenseSpace, TLinearSolver>::Pointer StrategyPointerType; typedef TwoStepVPSolverSettings<TSparseSpace, TDenseSpace, TLinearSolver> SolverSettingsType; typedef GeometryType::ShapeFunctionsGradientsType ShapeFunctionDerivativesArrayType; typedef GlobalPointersVector<Node<3>> NodeWeakPtrVectorType; ///@} ///@name Life Cycle ///@{ NodalTwoStepVPStrategy(ModelPart &rModelPart, SolverSettingsType &rSolverConfig) : BaseType(rModelPart) { InitializeStrategy(rSolverConfig); } NodalTwoStepVPStrategy(ModelPart &rModelPart, /*SolverConfiguration<TSparseSpace, TDenseSpace, TLinearSolver>& rSolverConfig,*/ typename TLinearSolver::Pointer pVelocityLinearSolver, typename TLinearSolver::Pointer pPressureLinearSolver, bool ReformDofSet = true, double VelTol = 0.0001, double PresTol = 0.0001, int MaxPressureIterations = 1, // Only for predictor-corrector unsigned int TimeOrder = 2, unsigned int DomainSize = 2) : BaseType(rModelPart), // Move Mesh flag, pass as input? mVelocityTolerance(VelTol), mPressureTolerance(PresTol), mMaxPressureIter(MaxPressureIterations), mDomainSize(DomainSize), mTimeOrder(TimeOrder), mReformDofSet(ReformDofSet) { KRATOS_TRY; BaseType::SetEchoLevel(1); // Check that input parameters are reasonable and sufficient. this->Check(); bool CalculateNormDxFlag = true; bool ReformDofAtEachIteration = false; // DofSet modifiaction is managed by the fractional step strategy, auxiliary strategies should not modify the DofSet directly. // Additional Typedefs typedef typename BuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver>::Pointer BuilderSolverTypePointer; typedef SolvingStrategy<TSparseSpace, TDenseSpace, TLinearSolver> BaseType; //initializing fractional velocity solution step typedef Scheme<TSparseSpace, TDenseSpace> SchemeType; typename SchemeType::Pointer pScheme; typename SchemeType::Pointer Temp = typename SchemeType::Pointer(new ResidualBasedIncrementalUpdateStaticScheme<TSparseSpace, TDenseSpace>()); /* typename SchemeType::Pointer Temp = typename SchemeType::Pointer(new IncrementalUpdateStaticScheme< TSparseSpace, TDenseSpace > ()); */ pScheme.swap(Temp); //CONSTRUCTION OF VELOCITY BuilderSolverTypePointer vel_build = BuilderSolverTypePointer(new NodalResidualBasedEliminationBuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver>(pVelocityLinearSolver)); /* BuilderSolverTypePointer vel_build = BuilderSolverTypePointer(new ResidualBasedBlockBuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver > (pVelocityLinearSolver)); */ this->mpMomentumStrategy = typename BaseType::Pointer(new GaussSeidelLinearStrategy<TSparseSpace, TDenseSpace, TLinearSolver>(rModelPart, pScheme, pVelocityLinearSolver, vel_build, ReformDofAtEachIteration, CalculateNormDxFlag)); this->mpMomentumStrategy->SetEchoLevel(BaseType::GetEchoLevel()); vel_build->SetCalculateReactionsFlag(false); /* BuilderSolverTypePointer pressure_build = BuilderSolverTypePointer(new ResidualBasedEliminationBuilderAndSolverComponentwise<TSparseSpace, TDenseSpace, TLinearSolver, Variable<double> >(pPressureLinearSolver, PRESSURE)); */ /* BuilderSolverTypePointer pressure_build = BuilderSolverTypePointer(new ResidualBasedBlockBuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver >(pPressureLinearSolver)); */ BuilderSolverTypePointer pressure_build = BuilderSolverTypePointer(new NodalResidualBasedEliminationBuilderAndSolverContinuity<TSparseSpace, TDenseSpace, TLinearSolver>(pPressureLinearSolver)); /* BuilderSolverTypePointer pressure_build = BuilderSolverTypePointer(new NodalResidualBasedBlockBuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver >(pPressureLinearSolver)); */ this->mpPressureStrategy = typename BaseType::Pointer(new GaussSeidelLinearStrategy<TSparseSpace, TDenseSpace, TLinearSolver>(rModelPart, pScheme, pPressureLinearSolver, pressure_build, ReformDofAtEachIteration, CalculateNormDxFlag)); this->mpPressureStrategy->SetEchoLevel(BaseType::GetEchoLevel()); pressure_build->SetCalculateReactionsFlag(false); KRATOS_CATCH(""); } /// Destructor. virtual ~NodalTwoStepVPStrategy() {} int Check() override { KRATOS_TRY; // Check elements and conditions in the model part int ierr = BaseType::Check(); if (ierr != 0) return ierr; if (DELTA_TIME.Key() == 0) KRATOS_THROW_ERROR(std::runtime_error, "DELTA_TIME Key is 0. Check that the application was correctly registered.", ""); if (BDF_COEFFICIENTS.Key() == 0) KRATOS_THROW_ERROR(std::runtime_error, "BDF_COEFFICIENTS Key is 0. Check that the application was correctly registered.", ""); ModelPart &rModelPart = BaseType::GetModelPart(); if (mTimeOrder == 2 && rModelPart.GetBufferSize() < 3) KRATOS_THROW_ERROR(std::invalid_argument, "Buffer size too small for fractional step strategy (BDF2), needed 3, got ", rModelPart.GetBufferSize()); if (mTimeOrder == 1 && rModelPart.GetBufferSize() < 2) KRATOS_THROW_ERROR(std::invalid_argument, "Buffer size too small for fractional step strategy (Backward Euler), needed 2, got ", rModelPart.GetBufferSize()); const ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo(); for (ModelPart::ElementIterator itEl = rModelPart.ElementsBegin(); itEl != rModelPart.ElementsEnd(); ++itEl) { ierr = itEl->Check(rCurrentProcessInfo); if (ierr != 0) break; } for (ModelPart::ConditionIterator itCond = rModelPart.ConditionsBegin(); itCond != rModelPart.ConditionsEnd(); ++itCond) { ierr = itCond->Check(rCurrentProcessInfo); if (ierr != 0) break; } return ierr; KRATOS_CATCH(""); } bool SolveSolutionStep() override { // Initialize BDF2 coefficients ModelPart &rModelPart = BaseType::GetModelPart(); this->SetTimeCoefficients(rModelPart.GetProcessInfo()); ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo(); double currentTime = rCurrentProcessInfo[TIME]; double timeInterval = rCurrentProcessInfo[DELTA_TIME]; bool timeIntervalChanged = rCurrentProcessInfo[TIME_INTERVAL_CHANGED]; bool converged = false; // bool momentumAlreadyConverged=false; // bool continuityAlreadyConverged=false; unsigned int maxNonLinearIterations = mMaxPressureIter; std::cout << "\n Solve with nodally_integrated_two_step_vp strategy at t=" << currentTime << "s" << std::endl; if (timeIntervalChanged == true && currentTime > 10 * timeInterval) { maxNonLinearIterations *= 2; } if (currentTime < 10 * timeInterval) { if (BaseType::GetEchoLevel() > 1) std::cout << "within the first 10 time steps, I consider the given iteration number x3" << std::endl; maxNonLinearIterations *= 3; } if (currentTime < 20 * timeInterval && currentTime >= 10 * timeInterval) { if (BaseType::GetEchoLevel() > 1) std::cout << "within the second 10 time steps, I consider the given iteration number x2" << std::endl; maxNonLinearIterations *= 2; } bool momentumConverged = true; bool continuityConverged = false; bool fixedTimeStep = false; double pressureNorm = 0; double velocityNorm = 0; /* boost::timer solve_step_time; */ this->InitializeSolutionStep(); for (unsigned int it = 0; it < maxNonLinearIterations; ++it) { if (BaseType::GetEchoLevel() > 1 && rModelPart.GetCommunicator().MyPID() == 0) std::cout << "----- > iteration: " << it << std::endl; if (it == 0) { this->ComputeNodalVolume(); this->InitializeNonLinearIterations(); } this->CalcNodalStrainsAndStresses(); momentumConverged = this->SolveMomentumIteration(it, maxNonLinearIterations, fixedTimeStep, velocityNorm); this->UpdateTopology(rModelPart, BaseType::GetEchoLevel()); this->ComputeNodalVolume(); this->InitializeNonLinearIterations(); this->CalcNodalStrains(); if (fixedTimeStep == false) { continuityConverged = this->SolveContinuityIteration(it, maxNonLinearIterations, pressureNorm); } // if((momentumConverged==true || it==maxNonLinearIterations-1) && momentumAlreadyConverged==false){ // std::ofstream myfile; // myfile.open ("momentumConvergedIteration.txt",std::ios::app); // myfile << currentTime << "\t" << it << "\n"; // myfile.close(); // momentumAlreadyConverged=true; // } // if((continuityConverged==true || it==maxNonLinearIterations-1) && continuityAlreadyConverged==false){ // std::ofstream myfile; // myfile.open ("continuityConvergedIteration.txt",std::ios::app); // myfile << currentTime << "\t" << it << "\n"; // myfile.close(); // continuityAlreadyConverged=true; // } if (it == maxNonLinearIterations - 1 || ((continuityConverged && momentumConverged) && it > 1)) { //this->ComputeErrorL2NormCaseImposedG(); //this->ComputeErrorL2NormCasePoiseuille(); this->CalculateAccelerations(); // std::ofstream myfile; // myfile.open ("maxConvergedIteration.txt",std::ios::app); // myfile << currentTime << "\t" << it << "\n"; // myfile.close(); } if ((continuityConverged && momentumConverged) && it > 1) { rCurrentProcessInfo.SetValue(BAD_VELOCITY_CONVERGENCE, false); rCurrentProcessInfo.SetValue(BAD_PRESSURE_CONVERGENCE, false); converged = true; std::cout << "nodal V-P strategy converged in " << it + 1 << " iterations." << std::endl; break; } } if (!continuityConverged && !momentumConverged && BaseType::GetEchoLevel() > 0 && rModelPart.GetCommunicator().MyPID() == 0) std::cout << "Convergence tolerance not reached." << std::endl; if (mReformDofSet) this->Clear(); /* std::cout << "solve_step_time : " << solve_step_time.elapsed() << std::endl; */ return converged; } void FinalizeSolutionStep() override { /* this->UpdateStressStrain(); */ } void Initialize() override { std::cout << " Initialize in nodal_two_step_v_p_strategy" << std::endl; ModelPart &rModelPart = BaseType::GetModelPart(); const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension(); unsigned int sizeStrains = 3 * (dimension - 1); // #pragma omp parallel // { ModelPart::NodeIterator NodesBegin; ModelPart::NodeIterator NodesEnd; OpenMPUtils::PartitionedIterators(rModelPart.Nodes(), NodesBegin, NodesEnd); for (ModelPart::NodeIterator itNode = NodesBegin; itNode != NodesEnd; ++itNode) { NodeWeakPtrVectorType &neighb_nodes = itNode->GetValue(NEIGHBOUR_NODES); unsigned int neighbourNodes = neighb_nodes.size(); unsigned int sizeSDFNeigh = neighbourNodes * dimension; if (itNode->SolutionStepsDataHas(NODAL_CAUCHY_STRESS)) { Vector &rNodalStress = itNode->FastGetSolutionStepValue(NODAL_CAUCHY_STRESS); if (rNodalStress.size() != sizeStrains) { rNodalStress.resize(sizeStrains, false); } noalias(rNodalStress) = ZeroVector(sizeStrains); } else { std::cout << "THIS node does not have NODAL_CAUCHY_STRESS... " << itNode->X() << " " << itNode->Y() << std::endl; } if (itNode->SolutionStepsDataHas(NODAL_DEVIATORIC_CAUCHY_STRESS)) { Vector &rNodalStress = itNode->FastGetSolutionStepValue(NODAL_DEVIATORIC_CAUCHY_STRESS); if (rNodalStress.size() != sizeStrains) { rNodalStress.resize(sizeStrains, false); } noalias(rNodalStress) = ZeroVector(sizeStrains); } else { std::cout << "THIS node does not have NODAL_DEVIATORIC_CAUCHY_STRESS... " << itNode->X() << " " << itNode->Y() << std::endl; } if (itNode->SolutionStepsDataHas(NODAL_VOLUME)) { itNode->FastGetSolutionStepValue(NODAL_VOLUME) = 0; } else { std::cout << "THIS node does not have NODAL_VOLUME... " << itNode->X() << " " << itNode->Y() << std::endl; } if (itNode->SolutionStepsDataHas(NODAL_MEAN_MESH_SIZE)) { itNode->FastGetSolutionStepValue(NODAL_MEAN_MESH_SIZE) = 0; } else { std::cout << "THIS node does not have NODAL_MEAN_MESH_SIZE... " << itNode->X() << " " << itNode->Y() << std::endl; } if (itNode->SolutionStepsDataHas(NODAL_FREESURFACE_AREA)) { itNode->FastGetSolutionStepValue(NODAL_FREESURFACE_AREA) = 0; } else { std::cout << "THIS node does not have NODAL_FREESURFACE_AREA... " << itNode->X() << " " << itNode->Y() << std::endl; } if (itNode->SolutionStepsDataHas(NODAL_SFD_NEIGHBOURS)) { Vector &rNodalSFDneighbours = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS); if (rNodalSFDneighbours.size() != sizeSDFNeigh) { rNodalSFDneighbours.resize(sizeSDFNeigh, false); } noalias(rNodalSFDneighbours) = ZeroVector(sizeSDFNeigh); } else { std::cout << "THIS node does not have NODAL_SFD_NEIGHBOURS... " << itNode->X() << " " << itNode->Y() << std::endl; } if (itNode->SolutionStepsDataHas(NODAL_SPATIAL_DEF_RATE)) { Vector &rSpatialDefRate = itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE); if (rSpatialDefRate.size() != sizeStrains) { rSpatialDefRate.resize(sizeStrains, false); } noalias(rSpatialDefRate) = ZeroVector(sizeStrains); } else { std::cout << "THIS node does not have NODAL_SPATIAL_DEF_RATE... " << itNode->X() << " " << itNode->Y() << std::endl; } if (itNode->SolutionStepsDataHas(NODAL_DEFORMATION_GRAD)) { Matrix &rFgrad = itNode->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD); if (rFgrad.size1() != dimension) { rFgrad.resize(dimension, dimension, false); } noalias(rFgrad) = ZeroMatrix(dimension, dimension); } else { std::cout << "THIS node does not have NODAL_DEFORMATION_GRAD... " << itNode->X() << " " << itNode->Y() << std::endl; } if (itNode->SolutionStepsDataHas(NODAL_DEFORMATION_GRAD_VEL)) { Matrix &rFgradVel = itNode->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD_VEL); if (rFgradVel.size1() != dimension) { rFgradVel.resize(dimension, dimension, false); } noalias(rFgradVel) = ZeroMatrix(dimension, dimension); } else { std::cout << "THIS node does not have NODAL_DEFORMATION_GRAD_VEL... " << itNode->X() << " " << itNode->Y() << std::endl; } this->AssignFluidMaterialToEachNode(itNode); } // } } void UnactiveSliverElements() { KRATOS_TRY; ModelPart &rModelPart = BaseType::GetModelPart(); const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension(); MesherUtilities MesherUtils; double ModelPartVolume = MesherUtils.ComputeModelPartVolume(rModelPart); double CriticalVolume = 0.001 * ModelPartVolume / double(rModelPart.Elements().size()); double ElementalVolume = 0; #pragma omp parallel { ModelPart::ElementIterator ElemBegin; ModelPart::ElementIterator ElemEnd; OpenMPUtils::PartitionedIterators(rModelPart.Elements(), ElemBegin, ElemEnd); for (ModelPart::ElementIterator itElem = ElemBegin; itElem != ElemEnd; ++itElem) { unsigned int numNodes = itElem->GetGeometry().size(); if (numNodes == (dimension + 1)) { if (dimension == 2) { ElementalVolume = (itElem)->GetGeometry().Area(); } else if (dimension == 3) { ElementalVolume = (itElem)->GetGeometry().Volume(); } if (ElementalVolume < CriticalVolume) { // std::cout << "sliver element: it has Volume: " << ElementalVolume << " vs CriticalVolume(meanVol/1000): " << CriticalVolume<< std::endl; (itElem)->Set(ACTIVE, false); } else { (itElem)->Set(ACTIVE, true); } } } } KRATOS_CATCH(""); } void AssignFluidMaterialToEachNode(ModelPart::NodeIterator itNode) { ModelPart &rModelPart = BaseType::GetModelPart(); ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo(); const double timeInterval = rCurrentProcessInfo[DELTA_TIME]; double deviatoricCoeff = itNode->FastGetSolutionStepValue(DYNAMIC_VISCOSITY); double volumetricCoeff = timeInterval * itNode->FastGetSolutionStepValue(BULK_MODULUS); double currFirstLame = volumetricCoeff - 2.0 * deviatoricCoeff / 3.0; itNode->FastGetSolutionStepValue(VOLUMETRIC_COEFFICIENT) = currFirstLame; itNode->FastGetSolutionStepValue(DEVIATORIC_COEFFICIENT) = deviatoricCoeff; } void ComputeNodalVolume() { ModelPart &rModelPart = BaseType::GetModelPart(); ElementsArrayType &pElements = rModelPart.Elements(); const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension(); #ifdef _OPENMP int number_of_threads = omp_get_max_threads(); #else int number_of_threads = 1; #endif vector<unsigned int> element_partition; OpenMPUtils::CreatePartition(number_of_threads, pElements.size(), element_partition); // #pragma omp parallel // { int k = OpenMPUtils::ThisThread(); typename ElementsArrayType::iterator ElemBegin = pElements.begin() + element_partition[k]; typename ElementsArrayType::iterator ElemEnd = pElements.begin() + element_partition[k + 1]; for (typename ElementsArrayType::iterator itElem = ElemBegin; itElem != ElemEnd; itElem++) //MSI: To be parallelized { Element::GeometryType &geometry = itElem->GetGeometry(); double elementalVolume = 0; if (dimension == 2) { elementalVolume = geometry.Area() / 3.0; } else if (dimension == 3) { elementalVolume = geometry.Volume() * 0.25; } // index = 0; unsigned int numNodes = geometry.size(); for (unsigned int i = 0; i < numNodes; i++) { double &nodalVolume = geometry(i)->FastGetSolutionStepValue(NODAL_VOLUME); nodalVolume += elementalVolume; } } // } } void InitializeSolutionStep() override { this->FillNodalSFDVector(); } void FillNodalSFDVector() { ModelPart &rModelPart = BaseType::GetModelPart(); // #pragma omp parallel // { // ModelPart::NodeIterator NodesBegin; // ModelPart::NodeIterator NodesEnd; // OpenMPUtils::PartitionedIterators(rModelPart.Nodes(),NodesBegin,NodesEnd); // for (ModelPart::NodeIterator itNode = NodesBegin; itNode != NodesEnd; ++itNode) // { for (ModelPart::NodeIterator itNode = rModelPart.NodesBegin(); itNode != rModelPart.NodesEnd(); itNode++) { InitializeNodalVariablesForRemeshedDomain(itNode); SetNeighboursOrderToNode(itNode); // it assigns neighbours to inner nodes, filling NODAL_SFD_NEIGHBOURS_ORDER } } void SetNeighboursOrderToNode(ModelPart::NodeIterator itNode) { NodeWeakPtrVectorType &neighb_nodes = itNode->GetValue(NEIGHBOUR_NODES); unsigned int neighbourNodes = neighb_nodes.size() + 1; // +1 becausealso the node itself must be considered as nieghbor node Vector &rNodeOrderedNeighbours = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS_ORDER); if (rNodeOrderedNeighbours.size() != neighbourNodes) rNodeOrderedNeighbours.resize(neighbourNodes, false); noalias(rNodeOrderedNeighbours) = ZeroVector(neighbourNodes); rNodeOrderedNeighbours[0] = itNode->Id(); if (neighbourNodes > 1) { for (unsigned int k = 0; k < neighbourNodes - 1; k++) { rNodeOrderedNeighbours[k + 1] = neighb_nodes[k].Id(); } } } void InitializeNodalVariablesForRemeshedDomain(ModelPart::NodeIterator itNode) { ModelPart &rModelPart = BaseType::GetModelPart(); const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension(); unsigned int sizeStrains = 3 * (dimension - 1); NodeWeakPtrVectorType &neighb_nodes = itNode->GetValue(NEIGHBOUR_NODES); unsigned int neighbourNodes = neighb_nodes.size() + 1; unsigned int sizeSDFNeigh = neighbourNodes * dimension; if (itNode->SolutionStepsDataHas(NODAL_CAUCHY_STRESS)) { Vector &rNodalStress = itNode->FastGetSolutionStepValue(NODAL_CAUCHY_STRESS); if (rNodalStress.size() != sizeStrains) rNodalStress.resize(sizeStrains, false); noalias(rNodalStress) = ZeroVector(sizeStrains); } if (itNode->SolutionStepsDataHas(NODAL_DEVIATORIC_CAUCHY_STRESS)) { Vector &rNodalDevStress = itNode->FastGetSolutionStepValue(NODAL_DEVIATORIC_CAUCHY_STRESS); if (rNodalDevStress.size() != sizeStrains) rNodalDevStress.resize(sizeStrains, false); noalias(rNodalDevStress) = ZeroVector(sizeStrains); } if (itNode->SolutionStepsDataHas(NODAL_SFD_NEIGHBOURS_ORDER)) { Vector &rNodalSFDneighboursOrder = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS_ORDER); if (rNodalSFDneighboursOrder.size() != neighbourNodes) rNodalSFDneighboursOrder.resize(neighbourNodes, false); noalias(rNodalSFDneighboursOrder) = ZeroVector(neighbourNodes); } if (itNode->SolutionStepsDataHas(NODAL_SFD_NEIGHBOURS)) { Vector &rNodalSFDneighbours = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS); if (rNodalSFDneighbours.size() != sizeSDFNeigh) rNodalSFDneighbours.resize(sizeSDFNeigh, false); noalias(rNodalSFDneighbours) = ZeroVector(sizeSDFNeigh); } if (itNode->SolutionStepsDataHas(NODAL_SPATIAL_DEF_RATE)) { Vector &rSpatialDefRate = itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE); if (rSpatialDefRate.size() != sizeStrains) rSpatialDefRate.resize(sizeStrains, false); noalias(rSpatialDefRate) = ZeroVector(sizeStrains); } if (itNode->SolutionStepsDataHas(NODAL_DEFORMATION_GRAD)) { Matrix &rFgrad = itNode->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD); if (rFgrad.size1() != dimension) rFgrad.resize(dimension, dimension, false); noalias(rFgrad) = ZeroMatrix(dimension, dimension); } if (itNode->SolutionStepsDataHas(NODAL_DEFORMATION_GRAD_VEL)) { Matrix &rFgradVel = itNode->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD_VEL); if (rFgradVel.size1() != dimension) rFgradVel.resize(dimension, dimension, false); noalias(rFgradVel) = ZeroMatrix(dimension, dimension); } if (itNode->SolutionStepsDataHas(NODAL_VOLUME)) { itNode->FastGetSolutionStepValue(NODAL_VOLUME) = 0; } if (itNode->SolutionStepsDataHas(NODAL_MEAN_MESH_SIZE)) { itNode->FastGetSolutionStepValue(NODAL_MEAN_MESH_SIZE) = 0; } if (itNode->SolutionStepsDataHas(NODAL_FREESURFACE_AREA)) { itNode->FastGetSolutionStepValue(NODAL_FREESURFACE_AREA) = 0; } if (itNode->SolutionStepsDataHas(NODAL_VOLUMETRIC_DEF_RATE)) { itNode->FastGetSolutionStepValue(NODAL_VOLUMETRIC_DEF_RATE) = 0; } if (itNode->SolutionStepsDataHas(NODAL_EQUIVALENT_STRAIN_RATE)) { itNode->FastGetSolutionStepValue(NODAL_EQUIVALENT_STRAIN_RATE) = 0; } } void InitializeNonLinearIterations() { ModelPart &rModelPart = BaseType::GetModelPart(); ElementsArrayType &pElements = rModelPart.Elements(); ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo(); #ifdef _OPENMP int number_of_threads = omp_get_max_threads(); #else int number_of_threads = 1; #endif vector<unsigned int> element_partition; OpenMPUtils::CreatePartition(number_of_threads, pElements.size(), element_partition); // #pragma omp parallel // { int k = OpenMPUtils::ThisThread(); typename ElementsArrayType::iterator ElemBegin = pElements.begin() + element_partition[k]; typename ElementsArrayType::iterator ElemEnd = pElements.begin() + element_partition[k + 1]; for (typename ElementsArrayType::iterator itElem = ElemBegin; itElem != ElemEnd; itElem++) //MSI: To be parallelized { itElem->InitializeNonLinearIteration(rCurrentProcessInfo); } // } } void CalcNodalStrainsAndStresses() { ModelPart &rModelPart = BaseType::GetModelPart(); // #pragma omp parallel // { ModelPart::NodeIterator NodesBegin; ModelPart::NodeIterator NodesEnd; OpenMPUtils::PartitionedIterators(rModelPart.Nodes(), NodesBegin, NodesEnd); for (ModelPart::NodeIterator itNode = NodesBegin; itNode != NodesEnd; ++itNode) { double nodalVolume = itNode->FastGetSolutionStepValue(NODAL_VOLUME); double theta = 0.5; if (nodalVolume > 0) { this->ComputeAndStoreNodalDeformationGradient(itNode, theta); this->CalcNodalStrainsAndStressesForNode(itNode); } else { // if nodalVolume==0 InitializeNodalVariablesForRemeshedDomain(itNode); } } // } } void CalcNodalStrainsAndStressesForNode(ModelPart::NodeIterator itNode) { ModelPart &rModelPart = BaseType::GetModelPart(); const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension(); double currFirstLame = itNode->FastGetSolutionStepValue(VOLUMETRIC_COEFFICIENT); double deviatoricCoeff = itNode->FastGetSolutionStepValue(DEVIATORIC_COEFFICIENT); Matrix Fgrad = itNode->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD); Matrix FgradVel = itNode->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD_VEL); double detFgrad = 1.0; Matrix InvFgrad = ZeroMatrix(dimension, dimension); Matrix SpatialVelocityGrad = ZeroMatrix(dimension, dimension); if (dimension == 2) { MathUtils<double>::InvertMatrix2(Fgrad, InvFgrad, detFgrad); } else if (dimension == 3) { MathUtils<double>::InvertMatrix3(Fgrad, InvFgrad, detFgrad); } //it computes the spatial velocity gradient tensor --> [L_ij]=dF_ik*invF_kj SpatialVelocityGrad = prod(FgradVel, InvFgrad); if (dimension == 2) { auto &r_stain_tensor2D = itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE); r_stain_tensor2D[0] = SpatialVelocityGrad(0, 0); r_stain_tensor2D[1] = SpatialVelocityGrad(1, 1); r_stain_tensor2D[2] = 0.5 * (SpatialVelocityGrad(1, 0) + SpatialVelocityGrad(0, 1)); double yieldShear = itNode->FastGetSolutionStepValue(YIELD_SHEAR); if (yieldShear > 0) { itNode->FastGetSolutionStepValue(NODAL_EQUIVALENT_STRAIN_RATE) = sqrt((2.0 * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[0] * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[0] + 2.0 * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[1] * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[1] + 4.0 * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[2] * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[2])); double adaptiveExponent = itNode->FastGetSolutionStepValue(ADAPTIVE_EXPONENT); double equivalentStrainRate = itNode->FastGetSolutionStepValue(NODAL_EQUIVALENT_STRAIN_RATE); double exponent = -adaptiveExponent * equivalentStrainRate; if (equivalentStrainRate != 0) { deviatoricCoeff += (yieldShear / equivalentStrainRate) * (1 - exp(exponent)); } if (equivalentStrainRate < 0.00001 && yieldShear != 0 && adaptiveExponent != 0) { // for gamma_dot very small the limit of the Papanastasiou viscosity is mu=m*tau_yield deviatoricCoeff = adaptiveExponent * yieldShear; } } double DefVol = itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[0] + itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[1]; itNode->GetSolutionStepValue(NODAL_VOLUMETRIC_DEF_RATE) = DefVol; double nodalSigmaTot_xx = currFirstLame * DefVol + 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[0]; double nodalSigmaTot_yy = currFirstLame * DefVol + 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[1]; double nodalSigmaTot_xy = 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[2]; double nodalSigmaDev_xx = 2.0 * deviatoricCoeff * (itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[0] - DefVol / 3.0); double nodalSigmaDev_yy = 2.0 * deviatoricCoeff * (itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[1] - DefVol / 3.0); double nodalSigmaDev_xy = 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[2]; auto &r_stress_tensor2D = itNode->GetSolutionStepValue(NODAL_CAUCHY_STRESS, 0); r_stress_tensor2D[0] = nodalSigmaTot_xx; r_stress_tensor2D[1] = nodalSigmaTot_yy; r_stress_tensor2D[2] = nodalSigmaTot_xy; auto &r_dev_stress_tensor2D = itNode->GetSolutionStepValue(NODAL_DEVIATORIC_CAUCHY_STRESS, 0); r_dev_stress_tensor2D[0] = nodalSigmaDev_xx; r_dev_stress_tensor2D[1] = nodalSigmaDev_yy; r_dev_stress_tensor2D[2] = nodalSigmaDev_xy; } else if (dimension == 3) { auto &r_stain_tensor3D = itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE); r_stain_tensor3D[0] = SpatialVelocityGrad(0, 0); r_stain_tensor3D[1] = SpatialVelocityGrad(1, 1); r_stain_tensor3D[2] = SpatialVelocityGrad(2, 2); r_stain_tensor3D[3] = 0.5 * (SpatialVelocityGrad(1, 0) + SpatialVelocityGrad(0, 1)); r_stain_tensor3D[4] = 0.5 * (SpatialVelocityGrad(2, 0) + SpatialVelocityGrad(0, 2)); r_stain_tensor3D[5] = 0.5 * (SpatialVelocityGrad(2, 1) + SpatialVelocityGrad(1, 2)); double yieldShear = itNode->FastGetSolutionStepValue(YIELD_SHEAR); if (yieldShear > 0) { itNode->FastGetSolutionStepValue(NODAL_EQUIVALENT_STRAIN_RATE) = sqrt(2.0 * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[0] * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[0] + 2.0 * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[1] * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[1] + 2.0 * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[2] * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[2] + 4.0 * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[3] * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[3] + 4.0 * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[4] * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[4] + 4.0 * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[5] * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[5]); double adaptiveExponent = itNode->FastGetSolutionStepValue(ADAPTIVE_EXPONENT); double equivalentStrainRate = itNode->FastGetSolutionStepValue(NODAL_EQUIVALENT_STRAIN_RATE); double exponent = -adaptiveExponent * equivalentStrainRate; if (equivalentStrainRate != 0) { deviatoricCoeff += (yieldShear / equivalentStrainRate) * (1 - exp(exponent)); } if (equivalentStrainRate < 0.00001 && yieldShear != 0 && adaptiveExponent != 0) { // for gamma_dot very small the limit of the Papanastasiou viscosity is mu=m*tau_yield deviatoricCoeff = adaptiveExponent * yieldShear; } } double DefVol = itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[0] + itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[1] + itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[2]; itNode->GetSolutionStepValue(NODAL_VOLUMETRIC_DEF_RATE) = DefVol; double nodalSigmaTot_xx = currFirstLame * DefVol + 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[0]; double nodalSigmaTot_yy = currFirstLame * DefVol + 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[1]; double nodalSigmaTot_zz = currFirstLame * DefVol + 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[2]; double nodalSigmaTot_xy = 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[3]; double nodalSigmaTot_xz = 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[4]; double nodalSigmaTot_yz = 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[5]; double nodalSigmaDev_xx = 2.0 * deviatoricCoeff * (itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[0] - DefVol / 3.0); double nodalSigmaDev_yy = 2.0 * deviatoricCoeff * (itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[1] - DefVol / 3.0); double nodalSigmaDev_zz = 2.0 * deviatoricCoeff * (itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[2] - DefVol / 3.0); double nodalSigmaDev_xy = 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[3]; double nodalSigmaDev_xz = 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[4]; double nodalSigmaDev_yz = 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[5]; auto &r_stress_tensor3D = itNode->GetSolutionStepValue(NODAL_CAUCHY_STRESS, 0); r_stress_tensor3D[0] = nodalSigmaTot_xx; r_stress_tensor3D[1] = nodalSigmaTot_yy; r_stress_tensor3D[2] = nodalSigmaTot_zz; r_stress_tensor3D[3] = nodalSigmaTot_xy; r_stress_tensor3D[4] = nodalSigmaTot_xz; r_stress_tensor3D[5] = nodalSigmaTot_yz; auto &r_dev_stress_tensor3D = itNode->GetSolutionStepValue(NODAL_DEVIATORIC_CAUCHY_STRESS, 0); r_dev_stress_tensor3D[0] = nodalSigmaDev_xx; r_dev_stress_tensor3D[1] = nodalSigmaDev_yy; r_dev_stress_tensor3D[2] = nodalSigmaDev_zz; r_dev_stress_tensor3D[3] = nodalSigmaDev_xy; r_dev_stress_tensor3D[4] = nodalSigmaDev_xz; r_dev_stress_tensor3D[5] = nodalSigmaDev_yz; } } void CalcNodalStrainsForNode(ModelPart::NodeIterator itNode) { /* std::cout << "Calc Nodal Strains " << std::endl; */ ModelPart &rModelPart = BaseType::GetModelPart(); const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension(); // Matrix Fgrad=itNode->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD); // Matrix FgradVel=itNode->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD_VEL); // double detFgrad=1.0; // Matrix InvFgrad=ZeroMatrix(dimension,dimension); // Matrix SpatialVelocityGrad=ZeroMatrix(dimension,dimension); double detFgrad = 1.0; Matrix nodalFgrad = ZeroMatrix(dimension, dimension); Matrix FgradVel = ZeroMatrix(dimension, dimension); Matrix InvFgrad = ZeroMatrix(dimension, dimension); Matrix SpatialVelocityGrad = ZeroMatrix(dimension, dimension); nodalFgrad = itNode->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD); FgradVel = itNode->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD_VEL); //Inverse if (dimension == 2) { MathUtils<double>::InvertMatrix2(nodalFgrad, InvFgrad, detFgrad); } else if (dimension == 3) { MathUtils<double>::InvertMatrix3(nodalFgrad, InvFgrad, detFgrad); } //it computes the spatial velocity gradient tensor --> [L_ij]=dF_ik*invF_kj SpatialVelocityGrad = prod(FgradVel, InvFgrad); if (dimension == 2) { itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[0] = SpatialVelocityGrad(0, 0); itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[1] = SpatialVelocityGrad(1, 1); itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[2] = 0.5 * (SpatialVelocityGrad(1, 0) + SpatialVelocityGrad(0, 1)); itNode->FastGetSolutionStepValue(NODAL_EQUIVALENT_STRAIN_RATE) = sqrt((2.0 * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[0] * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[0] + 2.0 * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[1] * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[1] + 4.0 * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[2] * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[2])); double DefX = itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[0]; double DefY = itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[1]; double DefVol = DefX + DefY; itNode->GetSolutionStepValue(NODAL_VOLUMETRIC_DEF_RATE) = DefVol; } else if (dimension == 3) { itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[0] = SpatialVelocityGrad(0, 0); itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[1] = SpatialVelocityGrad(1, 1); itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[2] = SpatialVelocityGrad(2, 2); itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[3] = 0.5 * (SpatialVelocityGrad(1, 0) + SpatialVelocityGrad(0, 1)); itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[4] = 0.5 * (SpatialVelocityGrad(2, 0) + SpatialVelocityGrad(0, 2)); itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[5] = 0.5 * (SpatialVelocityGrad(2, 1) + SpatialVelocityGrad(1, 2)); itNode->FastGetSolutionStepValue(NODAL_EQUIVALENT_STRAIN_RATE) = sqrt(2.0 * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[0] * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[0] + 2.0 * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[1] * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[1] + 2.0 * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[2] * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[2] + 4.0 * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[3] * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[3] + 4.0 * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[4] * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[4] + 4.0 * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[5] * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[5]); double DefX = itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[0]; double DefY = itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[1]; double DefZ = itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[2]; double DefVol = DefX + DefY + DefZ; itNode->GetSolutionStepValue(NODAL_VOLUMETRIC_DEF_RATE) = DefVol; } } void CalcNodalStrains() { /* std::cout << "Calc Nodal Strains " << std::endl; */ ModelPart &rModelPart = BaseType::GetModelPart(); // #pragma omp parallel // { ModelPart::NodeIterator NodesBegin; ModelPart::NodeIterator NodesEnd; OpenMPUtils::PartitionedIterators(rModelPart.Nodes(), NodesBegin, NodesEnd); for (ModelPart::NodeIterator itNode = NodesBegin; itNode != NodesEnd; ++itNode) { double nodalVolume = itNode->FastGetSolutionStepValue(NODAL_VOLUME); double theta = 1.0; if (nodalVolume > 0) { this->ComputeAndStoreNodalDeformationGradient(itNode, theta); this->CalcNodalStrainsForNode(itNode); } else { // if nodalVolume==0 InitializeNodalVariablesForRemeshedDomain(itNode); } } // } /* std::cout << "Calc Nodal Strains And Stresses DONE " << std::endl; */ } void ComputeAndStoreNodalDeformationGradient(ModelPart::NodeIterator itNode, double theta) { KRATOS_TRY; ModelPart &rModelPart = BaseType::GetModelPart(); const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension(); Vector nodalSFDneighboursId = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS_ORDER); Vector rNodalSFDneigh = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS); /* unsigned int idThisNode=nodalSFDneighboursId[0]; */ const unsigned int neighSize = nodalSFDneighboursId.size(); Matrix Fgrad = ZeroMatrix(dimension, dimension); Matrix FgradVel = ZeroMatrix(dimension, dimension); NodeWeakPtrVectorType &neighb_nodes = itNode->GetValue(NEIGHBOUR_NODES); if (dimension == 2) { double dNdXi = rNodalSFDneigh[0]; double dNdYi = rNodalSFDneigh[1]; Fgrad(0, 0) += dNdXi * itNode->X(); Fgrad(0, 1) += dNdYi * itNode->X(); Fgrad(1, 0) += dNdXi * itNode->Y(); Fgrad(1, 1) += dNdYi * itNode->Y(); double VelocityX = itNode->FastGetSolutionStepValue(VELOCITY_X, 0) * theta + itNode->FastGetSolutionStepValue(VELOCITY_X, 1) * (1 - theta); double VelocityY = itNode->FastGetSolutionStepValue(VELOCITY_Y, 0) * theta + itNode->FastGetSolutionStepValue(VELOCITY_Y, 1) * (1 - theta); FgradVel(0, 0) += dNdXi * VelocityX; FgradVel(0, 1) += dNdYi * VelocityX; FgradVel(1, 0) += dNdXi * VelocityY; FgradVel(1, 1) += dNdYi * VelocityY; unsigned int firstRow = 2; if (neighSize > 0) { for (unsigned int i = 0; i < neighSize - 1; i++) //neigh_nodes has one cell less than nodalSFDneighboursId becuase this has also the considered node ID at the beginning { dNdXi = rNodalSFDneigh[firstRow]; dNdYi = rNodalSFDneigh[firstRow + 1]; unsigned int neigh_nodes_id = neighb_nodes[i].Id(); unsigned int other_neigh_nodes_id = nodalSFDneighboursId[i + 1]; if (neigh_nodes_id != other_neigh_nodes_id) { std::cout << "node (x,y)=(" << itNode->X() << "," << itNode->Y() << ") with neigh_nodes_id " << neigh_nodes_id << " different than other_neigh_nodes_id " << other_neigh_nodes_id << std::endl; } Fgrad(0, 0) += dNdXi * neighb_nodes[i].X(); Fgrad(0, 1) += dNdYi * neighb_nodes[i].X(); Fgrad(1, 0) += dNdXi * neighb_nodes[i].Y(); Fgrad(1, 1) += dNdYi * neighb_nodes[i].Y(); VelocityX = neighb_nodes[i].FastGetSolutionStepValue(VELOCITY_X, 0) * theta + neighb_nodes[i].FastGetSolutionStepValue(VELOCITY_X, 1) * (1 - theta); VelocityY = neighb_nodes[i].FastGetSolutionStepValue(VELOCITY_Y, 0) * theta + neighb_nodes[i].FastGetSolutionStepValue(VELOCITY_Y, 1) * (1 - theta); FgradVel(0, 0) += dNdXi * VelocityX; FgradVel(0, 1) += dNdYi * VelocityX; FgradVel(1, 0) += dNdXi * VelocityY; FgradVel(1, 1) += dNdYi * VelocityY; firstRow += 2; } } } else { double dNdXi = rNodalSFDneigh[0]; double dNdYi = rNodalSFDneigh[1]; double dNdZi = rNodalSFDneigh[2]; double VelocityX = itNode->FastGetSolutionStepValue(VELOCITY_X, 0) * theta + itNode->FastGetSolutionStepValue(VELOCITY_X, 1) * (1 - theta); double VelocityY = itNode->FastGetSolutionStepValue(VELOCITY_Y, 0) * theta + itNode->FastGetSolutionStepValue(VELOCITY_Y, 1) * (1 - theta); double VelocityZ = itNode->FastGetSolutionStepValue(VELOCITY_Z, 0) * theta + itNode->FastGetSolutionStepValue(VELOCITY_Z, 1) * (1 - theta); Fgrad(0, 0) += dNdXi * itNode->X(); Fgrad(0, 1) += dNdYi * itNode->X(); Fgrad(0, 2) += dNdZi * itNode->X(); Fgrad(1, 0) += dNdXi * itNode->Y(); Fgrad(1, 1) += dNdYi * itNode->Y(); Fgrad(1, 2) += dNdZi * itNode->Y(); Fgrad(2, 0) += dNdXi * itNode->Z(); Fgrad(2, 1) += dNdYi * itNode->Z(); Fgrad(2, 2) += dNdZi * itNode->Z(); FgradVel(0, 0) += dNdXi * VelocityX; FgradVel(0, 1) += dNdYi * VelocityX; FgradVel(0, 2) += dNdZi * VelocityX; FgradVel(1, 0) += dNdXi * VelocityY; FgradVel(1, 1) += dNdYi * VelocityY; FgradVel(1, 2) += dNdZi * VelocityY; FgradVel(2, 0) += dNdXi * VelocityZ; FgradVel(2, 1) += dNdYi * VelocityZ; FgradVel(2, 2) += dNdZi * VelocityZ; unsigned int firstRow = 3; if (neighSize > 0) { for (unsigned int i = 0; i < neighSize - 1; i++) { dNdXi = rNodalSFDneigh[firstRow]; dNdYi = rNodalSFDneigh[firstRow + 1]; dNdZi = rNodalSFDneigh[firstRow + 2]; VelocityX = neighb_nodes[i].FastGetSolutionStepValue(VELOCITY_X, 0) * theta + neighb_nodes[i].FastGetSolutionStepValue(VELOCITY_X, 1) * (1 - theta); VelocityY = neighb_nodes[i].FastGetSolutionStepValue(VELOCITY_Y, 0) * theta + neighb_nodes[i].FastGetSolutionStepValue(VELOCITY_Y, 1) * (1 - theta); VelocityZ = neighb_nodes[i].FastGetSolutionStepValue(VELOCITY_Z, 0) * theta + neighb_nodes[i].FastGetSolutionStepValue(VELOCITY_Z, 1) * (1 - theta); Fgrad(0, 0) += dNdXi * neighb_nodes[i].X(); Fgrad(0, 1) += dNdYi * neighb_nodes[i].X(); Fgrad(0, 2) += dNdZi * neighb_nodes[i].X(); Fgrad(1, 0) += dNdXi * neighb_nodes[i].Y(); Fgrad(1, 1) += dNdYi * neighb_nodes[i].Y(); Fgrad(1, 2) += dNdZi * neighb_nodes[i].Y(); Fgrad(2, 0) += dNdXi * neighb_nodes[i].Z(); Fgrad(2, 1) += dNdYi * neighb_nodes[i].Z(); Fgrad(2, 2) += dNdZi * neighb_nodes[i].Z(); FgradVel(0, 0) += dNdXi * VelocityX; FgradVel(0, 1) += dNdYi * VelocityX; FgradVel(0, 2) += dNdZi * VelocityX; FgradVel(1, 0) += dNdXi * VelocityY; FgradVel(1, 1) += dNdYi * VelocityY; FgradVel(1, 2) += dNdZi * VelocityY; FgradVel(2, 0) += dNdXi * VelocityZ; FgradVel(2, 1) += dNdYi * VelocityZ; FgradVel(2, 2) += dNdZi * VelocityZ; firstRow += 3; } } } itNode->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD) = Fgrad; itNode->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD_VEL) = FgradVel; KRATOS_CATCH(""); } void UpdateTopology(ModelPart &rModelPart, unsigned int echoLevel) { KRATOS_TRY; /* this->CalculateDisplacements(); */ this->CalculateDisplacementsAndResetNodalVariables(); BaseType::MoveMesh(); BoundaryNormalsCalculationUtilities BoundaryComputation; BoundaryComputation.CalculateWeightedBoundaryNormals(rModelPart, echoLevel); KRATOS_CATCH(""); } void CalculatePressureVelocity() { ModelPart &rModelPart = BaseType::GetModelPart(); ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo(); const double timeInterval = rCurrentProcessInfo[DELTA_TIME]; unsigned int timeStep = rCurrentProcessInfo[STEP]; for (ModelPart::NodeIterator i = rModelPart.NodesBegin(); i != rModelPart.NodesEnd(); ++i) { if (timeStep == 1) { (i)->FastGetSolutionStepValue(PRESSURE_VELOCITY, 0) = 0; (i)->FastGetSolutionStepValue(PRESSURE_VELOCITY, 1) = 0; } else { double &CurrentPressure = (i)->FastGetSolutionStepValue(PRESSURE, 0); double &PreviousPressure = (i)->FastGetSolutionStepValue(PRESSURE, 1); double &CurrentPressureVelocity = (i)->FastGetSolutionStepValue(PRESSURE_VELOCITY, 0); CurrentPressureVelocity = (CurrentPressure - PreviousPressure) / timeInterval; } } } void CalculatePressureAcceleration() { ModelPart &rModelPart = BaseType::GetModelPart(); ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo(); const double timeInterval = rCurrentProcessInfo[DELTA_TIME]; unsigned int timeStep = rCurrentProcessInfo[STEP]; for (ModelPart::NodeIterator i = rModelPart.NodesBegin(); i != rModelPart.NodesEnd(); ++i) { if (timeStep == 1) { (i)->FastGetSolutionStepValue(PRESSURE_ACCELERATION, 0) = 0; (i)->FastGetSolutionStepValue(PRESSURE_ACCELERATION, 1) = 0; } else { double &CurrentPressureVelocity = (i)->FastGetSolutionStepValue(PRESSURE_VELOCITY, 0); double &PreviousPressureVelocity = (i)->FastGetSolutionStepValue(PRESSURE_VELOCITY, 1); double &CurrentPressureAcceleration = (i)->FastGetSolutionStepValue(PRESSURE_ACCELERATION, 0); CurrentPressureAcceleration = (CurrentPressureVelocity - PreviousPressureVelocity) / timeInterval; } } } void CalculateAccelerations() { ModelPart &rModelPart = BaseType::GetModelPart(); ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo(); Vector &BDFcoeffs = rCurrentProcessInfo[BDF_COEFFICIENTS]; for (ModelPart::NodeIterator i = rModelPart.NodesBegin(); i != rModelPart.NodesEnd(); ++i) { array_1d<double, 3> &CurrentVelocity = (i)->FastGetSolutionStepValue(VELOCITY, 0); array_1d<double, 3> &PreviousVelocity = (i)->FastGetSolutionStepValue(VELOCITY, 1); array_1d<double, 3> &CurrentAcceleration = (i)->FastGetSolutionStepValue(ACCELERATION, 0); array_1d<double, 3> &PreviousAcceleration = (i)->FastGetSolutionStepValue(ACCELERATION, 1); if ((i)->IsNot(ISOLATED) && ((i)->IsNot(RIGID) || (i)->Is(SOLID))) { UpdateAccelerations(CurrentAcceleration, CurrentVelocity, PreviousAcceleration, PreviousVelocity, BDFcoeffs); } else if ((i)->Is(RIGID)) { array_1d<double, 3> Zeros(3, 0.0); (i)->FastGetSolutionStepValue(ACCELERATION, 0) = Zeros; (i)->FastGetSolutionStepValue(ACCELERATION, 1) = Zeros; } else { (i)->FastGetSolutionStepValue(NODAL_VOLUME) = 0.0; (i)->FastGetSolutionStepValue(NODAL_VOLUMETRIC_DEF_RATE) = 0.0; (i)->FastGetSolutionStepValue(NODAL_EQUIVALENT_STRAIN_RATE) = 0; (i)->FastGetSolutionStepValue(NODAL_MEAN_MESH_SIZE) = 0.0; (i)->FastGetSolutionStepValue(NODAL_FREESURFACE_AREA) = 0.0; (i)->FastGetSolutionStepValue(PRESSURE, 0) = 0.0; (i)->FastGetSolutionStepValue(PRESSURE, 1) = 0.0; (i)->FastGetSolutionStepValue(PRESSURE_VELOCITY, 0) = 0.0; (i)->FastGetSolutionStepValue(PRESSURE_VELOCITY, 1) = 0.0; (i)->FastGetSolutionStepValue(PRESSURE_ACCELERATION, 0) = 0.0; (i)->FastGetSolutionStepValue(PRESSURE_ACCELERATION, 1) = 0.0; if ((i)->SolutionStepsDataHas(VOLUME_ACCELERATION)) { array_1d<double, 3> &VolumeAcceleration = (i)->FastGetSolutionStepValue(VOLUME_ACCELERATION); (i)->FastGetSolutionStepValue(ACCELERATION, 0) = VolumeAcceleration; (i)->FastGetSolutionStepValue(VELOCITY, 0) += VolumeAcceleration * rCurrentProcessInfo[DELTA_TIME]; } } } } inline void UpdateAccelerations(array_1d<double, 3> &CurrentAcceleration, const array_1d<double, 3> &CurrentVelocity, array_1d<double, 3> &PreviousAcceleration, const array_1d<double, 3> &PreviousVelocity, Vector &BDFcoeffs) { /* noalias(PreviousAcceleration)=CurrentAcceleration; */ noalias(CurrentAcceleration) = -BDFcoeffs[1] * (CurrentVelocity - PreviousVelocity) - PreviousAcceleration; // std::cout<<"rBDFCoeffs[0] is "<<rBDFCoeffs[0]<<std::endl;//3/(2*delta_t) // std::cout<<"rBDFCoeffs[1] is "<<rBDFCoeffs[1]<<std::endl;//-2/(delta_t) // std::cout<<"rBDFCoeffs[2] is "<<rBDFCoeffs[2]<<std::endl;//1/(2*delta_t) } void CalculateDisplacements() { ModelPart &rModelPart = BaseType::GetModelPart(); ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo(); const double TimeStep = rCurrentProcessInfo[DELTA_TIME]; for (ModelPart::NodeIterator i = rModelPart.NodesBegin(); i != rModelPart.NodesEnd(); ++i) { array_1d<double, 3> &CurrentVelocity = (i)->FastGetSolutionStepValue(VELOCITY, 0); array_1d<double, 3> &PreviousVelocity = (i)->FastGetSolutionStepValue(VELOCITY, 1); array_1d<double, 3> &CurrentDisplacement = (i)->FastGetSolutionStepValue(DISPLACEMENT, 0); array_1d<double, 3> &PreviousDisplacement = (i)->FastGetSolutionStepValue(DISPLACEMENT, 1); /* if( i->IsFixed(DISPLACEMENT_X) == false ) */ CurrentDisplacement[0] = 0.5 * TimeStep * (CurrentVelocity[0] + PreviousVelocity[0]) + PreviousDisplacement[0]; /* if( i->IsFixed(DISPLACEMENT_Y) == false ) */ CurrentDisplacement[1] = 0.5 * TimeStep * (CurrentVelocity[1] + PreviousVelocity[1]) + PreviousDisplacement[1]; /* if( i->IsFixed(DISPLACEMENT_Z) == false ) */ CurrentDisplacement[2] = 0.5 * TimeStep * (CurrentVelocity[2] + PreviousVelocity[2]) + PreviousDisplacement[2]; } } void CalculateDisplacementsAndResetNodalVariables() { ModelPart &rModelPart = BaseType::GetModelPart(); ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo(); const double TimeStep = rCurrentProcessInfo[DELTA_TIME]; const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension(); unsigned int sizeStrains = 3 * (dimension - 1); // #pragma omp parallel // { ModelPart::NodeIterator NodesBegin; ModelPart::NodeIterator NodesEnd; OpenMPUtils::PartitionedIterators(rModelPart.Nodes(), NodesBegin, NodesEnd); for (ModelPart::NodeIterator i = NodesBegin; i != NodesEnd; ++i) { array_1d<double, 3> &CurrentVelocity = (i)->FastGetSolutionStepValue(VELOCITY, 0); array_1d<double, 3> &PreviousVelocity = (i)->FastGetSolutionStepValue(VELOCITY, 1); array_1d<double, 3> &CurrentDisplacement = (i)->FastGetSolutionStepValue(DISPLACEMENT, 0); array_1d<double, 3> &PreviousDisplacement = (i)->FastGetSolutionStepValue(DISPLACEMENT, 1); CurrentDisplacement[0] = 0.5 * TimeStep * (CurrentVelocity[0] + PreviousVelocity[0]) + PreviousDisplacement[0]; CurrentDisplacement[1] = 0.5 * TimeStep * (CurrentVelocity[1] + PreviousVelocity[1]) + PreviousDisplacement[1]; if (dimension == 3) { CurrentDisplacement[2] = 0.5 * TimeStep * (CurrentVelocity[2] + PreviousVelocity[2]) + PreviousDisplacement[2]; } ///// reset Nodal variables ////// Vector &rNodalSFDneighbours = i->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS); unsigned int sizeSDFNeigh = rNodalSFDneighbours.size(); // unsigned int neighbourNodes=i->GetValue(NEIGHBOUR_NODES).size()+1; // unsigned int sizeSDFNeigh=neighbourNodes*dimension; i->FastGetSolutionStepValue(NODAL_VOLUME) = 0; i->FastGetSolutionStepValue(NODAL_MEAN_MESH_SIZE) = 0; i->FastGetSolutionStepValue(NODAL_FREESURFACE_AREA) = 0; i->FastGetSolutionStepValue(NODAL_VOLUMETRIC_DEF_RATE) = 0; i->FastGetSolutionStepValue(NODAL_EQUIVALENT_STRAIN_RATE) = 0; noalias(rNodalSFDneighbours) = ZeroVector(sizeSDFNeigh); Vector &rSpatialDefRate = i->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE); noalias(rSpatialDefRate) = ZeroVector(sizeStrains); Matrix &rFgrad = i->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD); noalias(rFgrad) = ZeroMatrix(dimension, dimension); Matrix &rFgradVel = i->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD_VEL); noalias(rFgradVel) = ZeroMatrix(dimension, dimension); } // } } void UpdatePressureAccelerations() { this->CalculateAccelerations(); this->CalculatePressureVelocity(); this->CalculatePressureAcceleration(); } void Clear() override { mpMomentumStrategy->Clear(); mpPressureStrategy->Clear(); } ///@} ///@name Access ///@{ void SetEchoLevel(int Level) override { BaseType::SetEchoLevel(Level); int StrategyLevel = Level > 0 ? Level - 1 : 0; mpMomentumStrategy->SetEchoLevel(StrategyLevel); mpPressureStrategy->SetEchoLevel(StrategyLevel); } ///@} ///@name Inquiry ///@{ ///@} ///@name Input and output ///@{ /// Turn back information as a string. std::string Info() const override { std::stringstream buffer; buffer << "NodalTwoStepVPStrategy"; return buffer.str(); } /// Print information about this object. void PrintInfo(std::ostream &rOStream) const override { rOStream << "NodalTwoStepVPStrategy"; } /// Print object's data. void PrintData(std::ostream &rOStream) const override { } ///@} ///@name Friends ///@{ ///@} protected: ///@name Protected Life Cycle ///@{ ///@} ///@name Protected static Member Variables ///@{ ///@} ///@name Protected member Variables ///@{ ///@} ///@name Protected Operators ///@{ ///@} ///@name Protected Operations ///@{ /// Calculate the coefficients for time iteration. /** * @param rCurrentProcessInfo ProcessInfo instance from the fluid ModelPart. Must contain DELTA_TIME and BDF_COEFFICIENTS variables. */ void SetTimeCoefficients(ProcessInfo &rCurrentProcessInfo) { KRATOS_TRY; if (mTimeOrder == 2) { //calculate the BDF coefficients double Dt = rCurrentProcessInfo[DELTA_TIME]; double OldDt = rCurrentProcessInfo.GetPreviousTimeStepInfo(1)[DELTA_TIME]; double Rho = OldDt / Dt; double TimeCoeff = 1.0 / (Dt * Rho * Rho + Dt * Rho); Vector &BDFcoeffs = rCurrentProcessInfo[BDF_COEFFICIENTS]; BDFcoeffs.resize(3, false); BDFcoeffs[0] = TimeCoeff * (Rho * Rho + 2.0 * Rho); //coefficient for step n+1 (3/2Dt if Dt is constant) BDFcoeffs[1] = -TimeCoeff * (Rho * Rho + 2.0 * Rho + 1.0); //coefficient for step n (-4/2Dt if Dt is constant) BDFcoeffs[2] = TimeCoeff; //coefficient for step n-1 (1/2Dt if Dt is constant) } else if (mTimeOrder == 1) { double Dt = rCurrentProcessInfo[DELTA_TIME]; double TimeCoeff = 1.0 / Dt; Vector &BDFcoeffs = rCurrentProcessInfo[BDF_COEFFICIENTS]; BDFcoeffs.resize(2, false); BDFcoeffs[0] = TimeCoeff; //coefficient for step n+1 (1/Dt) BDFcoeffs[1] = -TimeCoeff; //coefficient for step n (-1/Dt) } KRATOS_CATCH(""); } bool SolveMomentumIteration(unsigned int it, unsigned int maxIt, bool &fixedTimeStep, double &velocityNorm) { ModelPart &rModelPart = BaseType::GetModelPart(); int Rank = rModelPart.GetCommunicator().MyPID(); bool ConvergedMomentum = false; double NormDv = 0; fixedTimeStep = false; // build momentum system and solve for fractional step velocity increment rModelPart.GetProcessInfo().SetValue(FRACTIONAL_STEP, 1); if (it == 0) { mpMomentumStrategy->InitializeSolutionStep(); /* this->SetNeighboursVelocityId(); */ } NormDv = mpMomentumStrategy->Solve(); if (BaseType::GetEchoLevel() > 1 && Rank == 0) std::cout << "-------------- s o l v e d ! ------------------" << std::endl; if (it == 0) { velocityNorm = this->ComputeVelocityNorm(); } double DvErrorNorm = NormDv / velocityNorm; // double DvErrorNorm = 0; // ConvergedMomentum = this->CheckVelocityConvergence(NormDv, DvErrorNorm); unsigned int iterationForCheck = 3; KRATOS_INFO("TwoStepVPStrategy") << "iteration(" << it << ") Velocity error: " << DvErrorNorm << std::endl; // Check convergence if (it == maxIt - 1) { std::cout << " iteration(" << it << ") Final Velocity error: " << DvErrorNorm << std::endl; fixedTimeStep = this->FixTimeStepMomentum(DvErrorNorm); } else if (it > iterationForCheck) { fixedTimeStep = this->CheckMomentumConvergence(DvErrorNorm); } // ProcessInfo& rCurrentProcessInfo = rModelPart.GetProcessInfo(); // double currentTime = rCurrentProcessInfo[TIME]; // double tolerance=0.0000000001; // if(currentTime>(0.25-tolerance) && currentTime<(0.25+tolerance)){ // std::ofstream myfile; // myfile.open ("velocityConvergenceAt025s.txt",std::ios::app); // myfile << it << "\t" << DvErrorNorm << "\n"; // myfile.close(); // } // else if(currentTime>(0.5-tolerance) && currentTime<(0.5+tolerance)){ // std::ofstream myfile; // myfile.open ("velocityConvergenceAt05s.txt",std::ios::app); // myfile << it << "\t" << DvErrorNorm << "\n"; // myfile.close(); // } // else if(currentTime>(0.75-tolerance) && currentTime<(0.75+tolerance)){ // std::ofstream myfile; // myfile.open ("velocityConvergenceAt075s.txt",std::ios::app); // myfile << it << "\t" << DvErrorNorm << "\n"; // myfile.close(); // } // else if(currentTime>(1.0-tolerance) && currentTime<(1.0+tolerance)){ // std::ofstream myfile; // myfile.open ("velocityConvergenceAt100s.txt",std::ios::app); // myfile << it << "\t" << DvErrorNorm << "\n"; // myfile.close(); // } if (!ConvergedMomentum && BaseType::GetEchoLevel() > 0 && Rank == 0) std::cout << "Momentum equations did not reach the convergence tolerance." << std::endl; return ConvergedMomentum; } bool SolveContinuityIteration(unsigned int it, unsigned int maxIt, double &NormP) { ModelPart &rModelPart = BaseType::GetModelPart(); int Rank = rModelPart.GetCommunicator().MyPID(); bool ConvergedContinuity = false; double NormDp = 0; // 2. Pressure solution rModelPart.GetProcessInfo().SetValue(FRACTIONAL_STEP, 5); if (it == 0) { mpPressureStrategy->InitializeSolutionStep(); } NormDp = mpPressureStrategy->Solve(); if (BaseType::GetEchoLevel() > 0 && Rank == 0) std::cout << "The norm of pressure is: " << NormDp << std::endl; if (it == 0) { NormP=this->ComputePressureNorm(); } double DpErrorNorm = NormDp / (NormP); // double DpErrorNorm = 0; // ConvergedContinuity = this->CheckPressureConvergence(NormDp, DpErrorNorm); // Check convergence if (it == maxIt - 1) { std::cout << " iteration(" << it << ") Final Pressure error: " << DpErrorNorm << std::endl; ConvergedContinuity = this->FixTimeStepContinuity(DpErrorNorm); } else { std::cout << " iteration(" << it << ") Pressure error: " << DpErrorNorm << std::endl; } // ProcessInfo& rCurrentProcessInfo = rModelPart.GetProcessInfo(); // double currentTime = rCurrentProcessInfo[TIME]; // double tolerance=0.0000000001; // if(currentTime>(0.25-tolerance) && currentTime<(0.25+tolerance)){ // std::ofstream myfile; // myfile.open ("pressureConvergenceAt025s.txt",std::ios::app); // myfile << it << "\t" << DpErrorNorm << "\n"; // myfile.close(); // } // else if(currentTime>(0.5-tolerance) && currentTime<(0.5+tolerance)){ // std::ofstream myfile; // myfile.open ("pressureConvergenceAt05s.txt",std::ios::app); // myfile << it << "\t" << DpErrorNorm << "\n"; // myfile.close(); // } // else if(currentTime>(0.75-tolerance) && currentTime<(0.75+tolerance)){ // std::ofstream myfile; // myfile.open ("pressureConvergenceAt075s.txt",std::ios::app); // myfile << it << "\t" << DpErrorNorm << "\n"; // myfile.close(); // } // else if(currentTime>(1.0-tolerance) && currentTime<(1.0+tolerance)){ // std::ofstream myfile; // myfile.open ("pressureConvergenceAt100s.txt",std::ios::app); // myfile << it << "\t" << DpErrorNorm << "\n"; // myfile.close(); // } if (!ConvergedContinuity && BaseType::GetEchoLevel() > 0 && Rank == 0) std::cout << "Continuity equation did not reach the convergence tolerance." << std::endl; return ConvergedContinuity; } bool CheckVelocityConvergence(const double NormDv, double &errorNormDv) { ModelPart &rModelPart = BaseType::GetModelPart(); double NormV = 0.00; errorNormDv = 0; #pragma omp parallel reduction(+ \ : NormV) { ModelPart::NodeIterator NodeBegin; ModelPart::NodeIterator NodeEnd; OpenMPUtils::PartitionedIterators(rModelPart.Nodes(), NodeBegin, NodeEnd); for (ModelPart::NodeIterator itNode = NodeBegin; itNode != NodeEnd; ++itNode) { const array_1d<double, 3> &Vel = itNode->FastGetSolutionStepValue(VELOCITY); double NormVelNode = 0; for (unsigned int d = 0; d < 3; ++d) { NormVelNode += Vel[d] * Vel[d]; NormV += Vel[d] * Vel[d]; } } } BaseType::GetModelPart().GetCommunicator().GetDataCommunicator().SumAll(NormV); NormV = sqrt(NormV); if (NormV == 0.0) NormV = 1.00; errorNormDv = NormDv / NormV; if (BaseType::GetEchoLevel() > 0 && rModelPart.GetCommunicator().MyPID() == 0) { std::cout << "The norm of velocity increment is: " << NormDv << std::endl; std::cout << "The norm of velocity is: " << NormV << std::endl; std::cout << "Velocity error: " << errorNormDv << "mVelocityTolerance: " << mVelocityTolerance << std::endl; } /* else{ */ /* std::cout<<"Velocity error: "<< errorNormDv <<" velTol: " << mVelocityTolerance<< std::endl; */ /* } */ if (errorNormDv < mVelocityTolerance) { return true; } else { return false; } } double ComputeVelocityNorm() { ModelPart &rModelPart = BaseType::GetModelPart(); double NormV = 0.00; #pragma omp parallel reduction(+ \ : NormV) { ModelPart::NodeIterator NodeBegin; ModelPart::NodeIterator NodeEnd; OpenMPUtils::PartitionedIterators(rModelPart.Nodes(), NodeBegin, NodeEnd); for (ModelPart::NodeIterator itNode = NodeBegin; itNode != NodeEnd; ++itNode) { const array_1d<double, 3> &Vel = itNode->FastGetSolutionStepValue(VELOCITY); double NormVelNode = 0; for (unsigned int d = 0; d < 3; ++d) { NormVelNode += Vel[d] * Vel[d]; NormV += Vel[d] * Vel[d]; } } } BaseType::GetModelPart().GetCommunicator().GetDataCommunicator().SumAll(NormV); NormV = sqrt(NormV); if (NormV == 0.0) NormV = 1.00; return NormV; } double ComputePressureNorm() { ModelPart &rModelPart = BaseType::GetModelPart(); double NormP = 0.00; #pragma omp parallel reduction(+ \ : NormP) { ModelPart::NodeIterator NodeBegin; ModelPart::NodeIterator NodeEnd; OpenMPUtils::PartitionedIterators(rModelPart.Nodes(), NodeBegin, NodeEnd); for (ModelPart::NodeIterator itNode = NodeBegin; itNode != NodeEnd; ++itNode) { const double Pr = itNode->FastGetSolutionStepValue(PRESSURE); NormP += Pr * Pr; } } BaseType::GetModelPart().GetCommunicator().GetDataCommunicator().SumAll(NormP); NormP = sqrt(NormP); if (NormP == 0.0) NormP = 1.00; return NormP; } void ComputeErrorL2NormCaseImposedG() { ModelPart &rModelPart = BaseType::GetModelPart(); ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo(); const double currentTime = rCurrentProcessInfo[TIME]; double sumErrorL2Velocity = 0; double sumErrorL2VelocityX = 0; double sumErrorL2VelocityY = 0; double sumErrorL2Pressure = 0; double sumErrorL2TauXX = 0; double sumErrorL2TauYY = 0; double sumErrorL2TauXY = 0; #pragma omp parallel { ModelPart::NodeIterator NodeBegin; ModelPart::NodeIterator NodeEnd; OpenMPUtils::PartitionedIterators(rModelPart.Nodes(), NodeBegin, NodeEnd); for (ModelPart::NodeIterator itNode = NodeBegin; itNode != NodeEnd; ++itNode) { const double posX = itNode->X(); const double posY = itNode->Y(); const double nodalArea = itNode->FastGetSolutionStepValue(NODAL_VOLUME); const double velX = itNode->FastGetSolutionStepValue(VELOCITY_X); const double velY = itNode->FastGetSolutionStepValue(VELOCITY_Y); const double pressure = itNode->FastGetSolutionStepValue(PRESSURE); const double tauXX = itNode->FastGetSolutionStepValue(NODAL_DEVIATORIC_CAUCHY_STRESS)[0]; const double tauYY = itNode->FastGetSolutionStepValue(NODAL_DEVIATORIC_CAUCHY_STRESS)[1]; const double tauXY = itNode->FastGetSolutionStepValue(NODAL_DEVIATORIC_CAUCHY_STRESS)[2]; double expectedVelocityX = pow(posX, 2) * (1.0 - posX) * (1.0 - posX) * (2.0 * posY - 6.0 * pow(posY, 2) + 4.0 * pow(posY, 3)); double expectedVelocityY = -pow(posY, 2) * (1.0 - posY) * (1.0 - posY) * (2.0 * posX - 6.0 * pow(posX, 2) + 4.0 * pow(posX, 3)); double expectedPressure = -posX * (1.0 - posX); double expectedTauXX = 2.0 * (-4.0 * (1 - posX) * posX * (-1.0 + 2.0 * posX) * posY * (1.0 - 3.0 * posY + 2.0 * pow(posY, 2))); double expectedTauYY = 2.0 * (4.0 * posX * (1.0 - 3.0 * posX + 2.0 * pow(posX, 2)) * (1 - posY) * posY * (-1.0 + 2.0 * posY)); double expectedTauXY = (2.0 * (1.0 - 6.0 * posY + 6.0 * pow(posY, 2)) * (1 - posX) * (1 - posX) * pow(posX, 2) - 2.0 * (1.0 - 6.0 * posX + 6.0 * pow(posX, 2)) * (1 - posY) * (1 - posY) * pow(posY, 2)); double nodalErrorVelocityX = velX - expectedVelocityX; double nodalErrorVelocityY = velY - expectedVelocityY; double nodalErrorPressure = pressure - expectedPressure; double nodalErrorTauXX = tauXX - expectedTauXX; double nodalErrorTauYY = tauYY - expectedTauYY; double nodalErrorTauXY = tauXY - expectedTauXY; sumErrorL2Velocity += (pow(nodalErrorVelocityX, 2) + pow(nodalErrorVelocityY, 2)) * nodalArea; sumErrorL2VelocityX += pow(nodalErrorVelocityX, 2) * nodalArea; sumErrorL2VelocityY += pow(nodalErrorVelocityY, 2) * nodalArea; sumErrorL2Pressure += pow(nodalErrorPressure, 2) * nodalArea; sumErrorL2TauXX += pow(nodalErrorTauXX, 2) * nodalArea; sumErrorL2TauYY += pow(nodalErrorTauYY, 2) * nodalArea; sumErrorL2TauXY += pow(nodalErrorTauXY, 2) * nodalArea; // itNode->FastGetSolutionStepValue(NODAL_ERROR_XX)=nodalErrorTauXX; } } double errorL2Velocity = sqrt(sumErrorL2Velocity); double errorL2VelocityX = sqrt(sumErrorL2VelocityX); double errorL2VelocityY = sqrt(sumErrorL2VelocityY); double errorL2Pressure = sqrt(sumErrorL2Pressure); double errorL2TauXX = sqrt(sumErrorL2TauXX); double errorL2TauYY = sqrt(sumErrorL2TauYY); double errorL2TauXY = sqrt(sumErrorL2TauXY); std::ofstream myfileVelocity; myfileVelocity.open("errorL2VelocityFile.txt", std::ios::app); myfileVelocity << currentTime << "\t" << errorL2Velocity << "\n"; myfileVelocity.close(); std::ofstream myfileVelocityX; myfileVelocityX.open("errorL2VelocityXFile.txt", std::ios::app); myfileVelocityX << currentTime << "\t" << errorL2VelocityX << "\n"; myfileVelocityX.close(); std::ofstream myfileVelocityY; myfileVelocityY.open("errorL2VelocityYFile.txt", std::ios::app); myfileVelocityY << currentTime << "\t" << errorL2VelocityY << "\n"; myfileVelocityY.close(); std::ofstream myfilePressure; myfilePressure.open("errorL2PressureFile.txt", std::ios::app); myfilePressure << currentTime << "\t" << errorL2Pressure << "\n"; myfilePressure.close(); std::ofstream myfileTauXX; myfileTauXX.open("errorL2TauXXFile.txt", std::ios::app); myfileTauXX << currentTime << "\t" << errorL2TauXX << "\n"; myfileTauXX.close(); std::ofstream myfileTauYY; myfileTauYY.open("errorL2TauYYFile.txt", std::ios::app); myfileTauYY << currentTime << "\t" << errorL2TauYY << "\n"; myfileTauYY.close(); std::ofstream myfileTauXY; myfileTauXY.open("errorL2TauXYFile.txt", std::ios::app); myfileTauXY << currentTime << "\t" << errorL2TauXY << "\n"; myfileTauXY.close(); } void ComputeErrorL2NormCasePoiseuille() { ModelPart &rModelPart = BaseType::GetModelPart(); ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo(); const double currentTime = rCurrentProcessInfo[TIME]; double sumErrorL2VelocityTheta = 0; double sumErrorL2TauTheta = 0; double r_in = 0.2; double R_out = 0.5; double kappa = r_in / R_out; double omega = 0.5; double viscosity = 100.0; #pragma omp parallel { ModelPart::NodeIterator NodeBegin; ModelPart::NodeIterator NodeEnd; OpenMPUtils::PartitionedIterators(rModelPart.Nodes(), NodeBegin, NodeEnd); for (ModelPart::NodeIterator itNode = NodeBegin; itNode != NodeEnd; ++itNode) { const double posX = itNode->X(); const double posY = itNode->Y(); const double rPos = sqrt(pow(posX, 2) + pow(posY, 2)); const double cosalfa = posX / rPos; const double sinalfa = posY / rPos; const double sin2alfa = 2.0 * cosalfa * sinalfa; const double cos2alfa = 1.0 - 2.0 * pow(sinalfa, 2); const double nodalArea = itNode->FastGetSolutionStepValue(NODAL_VOLUME); const double velX = itNode->FastGetSolutionStepValue(VELOCITY_X); const double velY = itNode->FastGetSolutionStepValue(VELOCITY_Y); const double tauXX = itNode->FastGetSolutionStepValue(NODAL_DEVIATORIC_CAUCHY_STRESS)[0]; const double tauYY = itNode->FastGetSolutionStepValue(NODAL_DEVIATORIC_CAUCHY_STRESS)[1]; const double tauXY = itNode->FastGetSolutionStepValue(NODAL_DEVIATORIC_CAUCHY_STRESS)[2]; double expectedVelocityTheta = pow(kappa, 2) * omega * R_out / (1.0 - pow(kappa, 2)) * (R_out / rPos - rPos / R_out); double computedVelocityTheta = sqrt(pow(velX, 2) + pow(velY, 2)); double nodalErrorVelocityTheta = computedVelocityTheta - expectedVelocityTheta; double expectedTauTheta = (2.0 * viscosity * pow(kappa, 2) * omega * pow(R_out, 2)) / (1.0 - pow(kappa, 2)) / pow(rPos, 2); double computedTauTheta = +(tauXX - tauYY) * sin2alfa / 2.0 - tauXY * cos2alfa; double nodalErrorTauTheta = computedTauTheta - expectedTauTheta; itNode->FastGetSolutionStepValue(NODAL_ERROR_XX) = computedVelocityTheta; // if(posY>-0.01 && posY<0.01){ // std::cout<<"expectedTauTheta "<<expectedTauTheta<<" computedTauTheta "<<computedTauTheta <<std::endl; // std::cout<<"tauXX "<<tauXX<<" tauYY "<<tauYY<<" tauXY "<<tauXY <<std::endl; // std::cout<<"posX "<<posX <<" posY "<<posY <<std::endl; // std::cout<<"\n "; // } // if(posX>-0.01 && posX<0.01){ // std::cout<<"expectedTauTheta "<<expectedTauTheta<<" computedTauTheta "<<computedTauTheta <<std::endl; // std::cout<<"tauXX "<<tauXX<<" tauYY "<<tauYY<<" tauXY "<<tauXY <<std::endl; // std::cout<<"posX "<<posX <<" posY "<<posY <<std::endl; // std::cout<<"\n "; // } sumErrorL2VelocityTheta += pow(nodalErrorVelocityTheta, 2) * nodalArea; sumErrorL2TauTheta += pow(nodalErrorTauTheta, 2) * nodalArea; } } double errorL2VelocityTheta = sqrt(sumErrorL2VelocityTheta); double errorL2TauTheta = sqrt(sumErrorL2TauTheta); std::ofstream myfileVelocity; myfileVelocity.open("errorL2Poiseuille.txt", std::ios::app); myfileVelocity << currentTime << "\t" << errorL2VelocityTheta << "\t" << errorL2TauTheta << "\n"; myfileVelocity.close(); } bool CheckPressureConvergence(const double NormDp, double &errorNormDp) { ModelPart &rModelPart = BaseType::GetModelPart(); double NormP = 0.00; errorNormDp = 0; // #pragma omp parallel reduction(+:NormP) // { ModelPart::NodeIterator NodeBegin; ModelPart::NodeIterator NodeEnd; OpenMPUtils::PartitionedIterators(rModelPart.Nodes(), NodeBegin, NodeEnd); for (ModelPart::NodeIterator itNode = NodeBegin; itNode != NodeEnd; ++itNode) { const double Pr = itNode->FastGetSolutionStepValue(PRESSURE); NormP += Pr * Pr; } // } BaseType::GetModelPart().GetCommunicator().GetDataCommunicator().SumAll(NormP); NormP = sqrt(NormP); if (NormP == 0.0) NormP = 1.00; errorNormDp = NormDp / NormP; if (BaseType::GetEchoLevel() > 0 && rModelPart.GetCommunicator().MyPID() == 0) { std::cout << " The norm of pressure increment is: " << NormDp << std::endl; std::cout << " The norm of pressure is: " << NormP << std::endl; std::cout << " Pressure error: " << errorNormDp << std::endl; } /* else{ */ /* std::cout<<" Pressure error: "<<errorNormDp <<" presTol: "<<mPressureTolerance << std::endl; */ /* } */ if (errorNormDp < mPressureTolerance) { return true; } else return false; } bool FixTimeStepMomentum(const double DvErrorNorm) { ModelPart &rModelPart = BaseType::GetModelPart(); ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo(); double currentTime = rCurrentProcessInfo[TIME]; double timeInterval = rCurrentProcessInfo[DELTA_TIME]; double minTolerance = 0.005; bool fixedTimeStep = false; if (currentTime < 3 * timeInterval) { minTolerance = 10; } if ((DvErrorNorm > minTolerance || (DvErrorNorm < 0 && DvErrorNorm > 0) || (DvErrorNorm != DvErrorNorm)) && DvErrorNorm != 0 && (DvErrorNorm != 1 || currentTime > timeInterval)) { rCurrentProcessInfo.SetValue(BAD_VELOCITY_CONVERGENCE, true); std::cout << "NOT GOOD CONVERGENCE!!! I'll reduce the next time interval" << DvErrorNorm << std::endl; minTolerance = 0.05; if (DvErrorNorm > minTolerance) { std::cout << "BAD CONVERGENCE!!! I GO AHEAD WITH THE PREVIOUS VELOCITY AND PRESSURE FIELDS" << DvErrorNorm << std::endl; fixedTimeStep = true; // #pragma omp parallel // { ModelPart::NodeIterator NodeBegin; ModelPart::NodeIterator NodeEnd; OpenMPUtils::PartitionedIterators(rModelPart.Nodes(), NodeBegin, NodeEnd); for (ModelPart::NodeIterator itNode = NodeBegin; itNode != NodeEnd; ++itNode) { itNode->FastGetSolutionStepValue(VELOCITY, 0) = itNode->FastGetSolutionStepValue(VELOCITY, 1); itNode->FastGetSolutionStepValue(PRESSURE, 0) = itNode->FastGetSolutionStepValue(PRESSURE, 1); itNode->FastGetSolutionStepValue(ACCELERATION, 0) = itNode->FastGetSolutionStepValue(ACCELERATION, 1); } // } } } else { rCurrentProcessInfo.SetValue(BAD_VELOCITY_CONVERGENCE, false); } return fixedTimeStep; } bool CheckMomentumConvergence(const double DvErrorNorm) { ModelPart &rModelPart = BaseType::GetModelPart(); ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo(); double currentTime = rCurrentProcessInfo[TIME]; double timeInterval = rCurrentProcessInfo[DELTA_TIME]; double minTolerance = 0.99999; bool fixedTimeStep = false; if ((DvErrorNorm > minTolerance || (DvErrorNorm < 0 && DvErrorNorm > 0) || (DvErrorNorm != DvErrorNorm)) && DvErrorNorm != 0 && (DvErrorNorm != 1 || currentTime > timeInterval)) { rCurrentProcessInfo.SetValue(BAD_VELOCITY_CONVERGENCE, true); std::cout << " BAD CONVERGENCE DETECTED DURING THE ITERATIVE LOOP!!! error: " << DvErrorNorm << " higher than 0.9999" << std::endl; std::cout << " I GO AHEAD WITH THE PREVIOUS VELOCITY AND PRESSURE FIELDS" << std::endl; fixedTimeStep = true; #pragma omp parallel { ModelPart::NodeIterator NodeBegin; ModelPart::NodeIterator NodeEnd; OpenMPUtils::PartitionedIterators(rModelPart.Nodes(), NodeBegin, NodeEnd); for (ModelPart::NodeIterator itNode = NodeBegin; itNode != NodeEnd; ++itNode) { itNode->FastGetSolutionStepValue(VELOCITY, 0) = itNode->FastGetSolutionStepValue(VELOCITY, 1); itNode->FastGetSolutionStepValue(PRESSURE, 0) = itNode->FastGetSolutionStepValue(PRESSURE, 1); itNode->FastGetSolutionStepValue(ACCELERATION, 0) = itNode->FastGetSolutionStepValue(ACCELERATION, 1); } } } else { rCurrentProcessInfo.SetValue(BAD_VELOCITY_CONVERGENCE, false); } return fixedTimeStep; } bool FixTimeStepContinuity(const double DvErrorNorm) { ModelPart &rModelPart = BaseType::GetModelPart(); ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo(); double currentTime = rCurrentProcessInfo[TIME]; double timeInterval = rCurrentProcessInfo[DELTA_TIME]; double minTolerance = 0.01; bool fixedTimeStep = false; if (currentTime < 3 * timeInterval) { minTolerance = 10; } if ((DvErrorNorm > minTolerance || (DvErrorNorm < 0 && DvErrorNorm > 0) || (DvErrorNorm != DvErrorNorm)) && DvErrorNorm != 0 && (DvErrorNorm != 1 || currentTime > timeInterval)) { fixedTimeStep = true; rCurrentProcessInfo.SetValue(BAD_PRESSURE_CONVERGENCE, true); } else { rCurrentProcessInfo.SetValue(BAD_PRESSURE_CONVERGENCE, false); } return fixedTimeStep; } ///@} ///@name Protected Access ///@{ ///@} ///@name Protected Inquiry ///@{ ///@} ///@name Protected LifeCycle ///@{ ///@} // private: ///@name Static Member Variables ///@{ ///@} ///@name Member Variables ///@{ double mVelocityTolerance; double mPressureTolerance; unsigned int mMaxPressureIter; unsigned int mDomainSize; unsigned int mTimeOrder; bool mReformDofSet; // Fractional step index. /* 1 : Momentum step (calculate fractional step velocity) * 2-3 : Unused (reserved for componentwise calculation of frac step velocity) * 4 : Pressure step * 5 : Computation of projections * 6 : End of step velocity */ // unsigned int mStepId; /// Scheme for the solution of the momentum equation StrategyPointerType mpMomentumStrategy; /// Scheme for the solution of the mass equation StrategyPointerType mpPressureStrategy; ///@} ///@name Private Operators ///@{ ///@} ///@name Private Operations ///@{ void InitializeStrategy(SolverSettingsType &rSolverConfig) { KRATOS_TRY; mTimeOrder = rSolverConfig.GetTimeOrder(); // Check that input parameters are reasonable and sufficient. this->Check(); //ModelPart& rModelPart = this->GetModelPart(); mDomainSize = rSolverConfig.GetDomainSize(); mReformDofSet = rSolverConfig.GetReformDofSet(); BaseType::SetEchoLevel(rSolverConfig.GetEchoLevel()); // Initialize strategies for each step bool HaveVelStrategy = rSolverConfig.FindStrategy(SolverSettingsType::Velocity, mpMomentumStrategy); if (HaveVelStrategy) { rSolverConfig.FindTolerance(SolverSettingsType::Velocity, mVelocityTolerance); /* rSolverConfig.FindMaxIter(SolverSettingsType::Velocity,mMaxVelocityIter); */ } else { KRATOS_THROW_ERROR(std::runtime_error, "NodalTwoStepVPStrategy error: No Velocity strategy defined in FractionalStepSettings", ""); } bool HavePressStrategy = rSolverConfig.FindStrategy(SolverSettingsType::Pressure, mpPressureStrategy); if (HavePressStrategy) { rSolverConfig.FindTolerance(SolverSettingsType::Pressure, mPressureTolerance); rSolverConfig.FindMaxIter(SolverSettingsType::Pressure, mMaxPressureIter); } else { KRATOS_THROW_ERROR(std::runtime_error, "NodalTwoStepVPStrategy error: No Pressure strategy defined in FractionalStepSettings", ""); } // Check input parameters this->Check(); KRATOS_CATCH(""); } ///@} ///@name Private Access ///@{ ///@} ///@name Private Inquiry ///@{ ///@} ///@name Un accessible methods ///@{ /// Assignment operator. NodalTwoStepVPStrategy &operator=(NodalTwoStepVPStrategy const &rOther) {} /// Copy constructor. NodalTwoStepVPStrategy(NodalTwoStepVPStrategy const &rOther) {} ///@} }; /// Class NodalTwoStepVPStrategy ///@} ///@name Type Definitions ///@{ ///@} ///@} // addtogroup } // namespace Kratos. #endif // KRATOS_NODAL_TWO_STEP_V_P_STRATEGY_H
par_csr_matop.c
/****************************************************************************** * Copyright 1998-2019 Lawrence Livermore National Security, LLC and other * HYPRE Project Developers. See the top-level COPYRIGHT file for details. * * SPDX-License-Identifier: (Apache-2.0 OR MIT) ******************************************************************************/ #include "_hypre_utilities.h" #include "hypre_hopscotch_hash.h" #include "_hypre_parcsr_mv.h" #include "_hypre_lapack.h" #include "_hypre_blas.h" /* The following function was formerly part of hypre_ParMatmul but was removed so it can also be used for multiplication of Boolean matrices */ void hypre_ParMatmul_RowSizes( HYPRE_MemoryLocation memory_location, HYPRE_Int ** C_diag_i, HYPRE_Int ** C_offd_i, /*HYPRE_Int ** B_marker,*/ HYPRE_Int * A_diag_i, HYPRE_Int * A_diag_j, HYPRE_Int * A_offd_i, HYPRE_Int * A_offd_j, HYPRE_Int * B_diag_i, HYPRE_Int * B_diag_j, HYPRE_Int * B_offd_i, HYPRE_Int * B_offd_j, HYPRE_Int * B_ext_diag_i, HYPRE_Int * B_ext_diag_j, HYPRE_Int * B_ext_offd_i, HYPRE_Int * B_ext_offd_j, HYPRE_Int * map_B_to_C, HYPRE_Int *C_diag_size, HYPRE_Int *C_offd_size, HYPRE_Int num_rows_diag_A, HYPRE_Int num_cols_offd_A, HYPRE_Int allsquare, HYPRE_Int num_cols_diag_B, HYPRE_Int num_cols_offd_B, HYPRE_Int num_cols_offd_C ) { HYPRE_Int i1, i2, i3, jj2, jj3; HYPRE_Int jj_count_diag, jj_count_offd, jj_row_begin_diag, jj_row_begin_offd; HYPRE_Int start_indexing = 0; /* start indexing for C_data at 0 */ HYPRE_Int num_threads = hypre_NumThreads(); HYPRE_Int *jj_count_diag_array; HYPRE_Int *jj_count_offd_array; HYPRE_Int ii, size, rest; /* First pass begins here. Computes sizes of C rows. Arrays computed: C_diag_i, C_offd_i, B_marker Arrays needed: (11, all HYPRE_Int*) A_diag_i, A_diag_j, A_offd_i, A_offd_j, B_diag_i, B_diag_j, B_offd_i, B_offd_j, B_ext_i, B_ext_j, col_map_offd_B, col_map_offd_B, B_offd_i, B_offd_j, B_ext_i, B_ext_j, Scalars computed: C_diag_size, C_offd_size Scalars needed: num_rows_diag_A, num_rows_diag_A, num_cols_offd_A, allsquare, first_col_diag_B, n_cols_B, num_cols_offd_B, num_cols_diag_B */ *C_diag_i = hypre_CTAlloc(HYPRE_Int, num_rows_diag_A+1, memory_location); *C_offd_i = hypre_CTAlloc(HYPRE_Int, num_rows_diag_A+1, memory_location); jj_count_diag_array = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); jj_count_offd_array = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); /*----------------------------------------------------------------------- * Loop over rows of A *-----------------------------------------------------------------------*/ size = num_rows_diag_A/num_threads; rest = num_rows_diag_A - size*num_threads; #ifdef HYPRE_USING_OPENMP #pragma omp parallel private(ii, i1, jj_row_begin_diag, jj_row_begin_offd, jj_count_diag, jj_count_offd, jj2, i2, jj3, i3) #endif /*for (ii=0; ii < num_threads; ii++)*/ { HYPRE_Int *B_marker = NULL; HYPRE_Int ns, ne; ii = hypre_GetThreadNum(); if (ii < rest) { ns = ii*size+ii; ne = (ii+1)*size+ii+1; } else { ns = ii*size+rest; ne = (ii+1)*size+rest; } jj_count_diag = start_indexing; jj_count_offd = start_indexing; if (num_cols_diag_B || num_cols_offd_C) B_marker = hypre_CTAlloc(HYPRE_Int, num_cols_diag_B+num_cols_offd_C, HYPRE_MEMORY_HOST); for (i1 = 0; i1 < num_cols_diag_B+num_cols_offd_C; i1++) B_marker[i1] = -1; for (i1 = ns; i1 < ne; i1++) { /*-------------------------------------------------------------------- * Set marker for diagonal entry, C_{i1,i1} (for square matrices). *--------------------------------------------------------------------*/ jj_row_begin_diag = jj_count_diag; jj_row_begin_offd = jj_count_offd; if ( allsquare ) { B_marker[i1] = jj_count_diag; jj_count_diag++; } /*----------------------------------------------------------------- * Loop over entries in row i1 of A_offd. *-----------------------------------------------------------------*/ if (num_cols_offd_A) { for (jj2 = A_offd_i[i1]; jj2 < A_offd_i[i1+1]; jj2++) { i2 = A_offd_j[jj2]; /*----------------------------------------------------------- * Loop over entries in row i2 of B_ext. *-----------------------------------------------------------*/ for (jj3 = B_ext_offd_i[i2]; jj3 < B_ext_offd_i[i2+1]; jj3++) { i3 = num_cols_diag_B+B_ext_offd_j[jj3]; /*-------------------------------------------------------- * Check B_marker to see that C_{i1,i3} has not already * been accounted for. If it has not, mark it and increment * counter. *--------------------------------------------------------*/ if (B_marker[i3] < jj_row_begin_offd) { B_marker[i3] = jj_count_offd; jj_count_offd++; } } for (jj3 = B_ext_diag_i[i2]; jj3 < B_ext_diag_i[i2+1]; jj3++) { i3 = B_ext_diag_j[jj3]; if (B_marker[i3] < jj_row_begin_diag) { B_marker[i3] = jj_count_diag; jj_count_diag++; } } } } /*----------------------------------------------------------------- * Loop over entries in row i1 of A_diag. *-----------------------------------------------------------------*/ for (jj2 = A_diag_i[i1]; jj2 < A_diag_i[i1+1]; jj2++) { i2 = A_diag_j[jj2]; /*----------------------------------------------------------- * Loop over entries in row i2 of B_diag. *-----------------------------------------------------------*/ for (jj3 = B_diag_i[i2]; jj3 < B_diag_i[i2+1]; jj3++) { i3 = B_diag_j[jj3]; /*-------------------------------------------------------- * Check B_marker to see that C_{i1,i3} has not already * been accounted for. If it has not, mark it and increment * counter. *--------------------------------------------------------*/ if (B_marker[i3] < jj_row_begin_diag) { B_marker[i3] = jj_count_diag; jj_count_diag++; } } /*----------------------------------------------------------- * Loop over entries in row i2 of B_offd. *-----------------------------------------------------------*/ if (num_cols_offd_B) { for (jj3 = B_offd_i[i2]; jj3 < B_offd_i[i2+1]; jj3++) { i3 = num_cols_diag_B+map_B_to_C[B_offd_j[jj3]]; /*-------------------------------------------------------- * Check B_marker to see that C_{i1,i3} has not already * been accounted for. If it has not, mark it and increment * counter. *--------------------------------------------------------*/ if (B_marker[i3] < jj_row_begin_offd) { B_marker[i3] = jj_count_offd; jj_count_offd++; } } } } /*-------------------------------------------------------------------- * Set C_diag_i and C_offd_i for this row. *--------------------------------------------------------------------*/ (*C_diag_i)[i1] = jj_row_begin_diag; (*C_offd_i)[i1] = jj_row_begin_offd; } jj_count_diag_array[ii] = jj_count_diag; jj_count_offd_array[ii] = jj_count_offd; hypre_TFree(B_marker, HYPRE_MEMORY_HOST); #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif if (ii) { jj_count_diag = jj_count_diag_array[0]; jj_count_offd = jj_count_offd_array[0]; for (i1 = 1; i1 < ii; i1++) { jj_count_diag += jj_count_diag_array[i1]; jj_count_offd += jj_count_offd_array[i1]; } for (i1 = ns; i1 < ne; i1++) { (*C_diag_i)[i1] += jj_count_diag; (*C_offd_i)[i1] += jj_count_offd; } } else { (*C_diag_i)[num_rows_diag_A] = 0; (*C_offd_i)[num_rows_diag_A] = 0; for (i1 = 0; i1 < num_threads; i1++) { (*C_diag_i)[num_rows_diag_A] += jj_count_diag_array[i1]; (*C_offd_i)[num_rows_diag_A] += jj_count_offd_array[i1]; } } } /* end parallel loop */ /*----------------------------------------------------------------------- * Allocate C_diag_data and C_diag_j arrays. * Allocate C_offd_data and C_offd_j arrays. *-----------------------------------------------------------------------*/ *C_diag_size = (*C_diag_i)[num_rows_diag_A]; *C_offd_size = (*C_offd_i)[num_rows_diag_A]; hypre_TFree(jj_count_diag_array, HYPRE_MEMORY_HOST); hypre_TFree(jj_count_offd_array, HYPRE_MEMORY_HOST); /* End of First Pass */ } /*-------------------------------------------------------------------------- * hypre_ParMatmul : multiplies two ParCSRMatrices A and B and returns * the product in ParCSRMatrix C * Note that C does not own the partitionings since its row_starts * is owned by A and col_starts by B. *--------------------------------------------------------------------------*/ hypre_ParCSRMatrix *hypre_ParMatmul( hypre_ParCSRMatrix *A, hypre_ParCSRMatrix *B ) { #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_MATMUL] -= hypre_MPI_Wtime(); #endif MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Complex *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Complex *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); HYPRE_BigInt *row_starts_A = hypre_ParCSRMatrixRowStarts(A); HYPRE_Int num_rows_diag_A = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int num_cols_diag_A = hypre_CSRMatrixNumCols(A_diag); HYPRE_Int num_cols_offd_A = hypre_CSRMatrixNumCols(A_offd); hypre_CSRMatrix *B_diag = hypre_ParCSRMatrixDiag(B); HYPRE_Complex *B_diag_data = hypre_CSRMatrixData(B_diag); HYPRE_Int *B_diag_i = hypre_CSRMatrixI(B_diag); HYPRE_Int *B_diag_j = hypre_CSRMatrixJ(B_diag); hypre_CSRMatrix *B_offd = hypre_ParCSRMatrixOffd(B); HYPRE_BigInt *col_map_offd_B = hypre_ParCSRMatrixColMapOffd(B); HYPRE_Complex *B_offd_data = hypre_CSRMatrixData(B_offd); HYPRE_Int *B_offd_i = hypre_CSRMatrixI(B_offd); HYPRE_Int *B_offd_j = hypre_CSRMatrixJ(B_offd); HYPRE_BigInt first_col_diag_B = hypre_ParCSRMatrixFirstColDiag(B); HYPRE_BigInt last_col_diag_B; HYPRE_BigInt *col_starts_B = hypre_ParCSRMatrixColStarts(B); HYPRE_Int num_rows_diag_B = hypre_CSRMatrixNumRows(B_diag); HYPRE_Int num_cols_diag_B = hypre_CSRMatrixNumCols(B_diag); HYPRE_Int num_cols_offd_B = hypre_CSRMatrixNumCols(B_offd); hypre_ParCSRMatrix *C; HYPRE_BigInt *col_map_offd_C; HYPRE_Int *map_B_to_C=NULL; hypre_CSRMatrix *C_diag; HYPRE_Complex *C_diag_data; HYPRE_Int *C_diag_i; HYPRE_Int *C_diag_j; hypre_CSRMatrix *C_offd; HYPRE_Complex *C_offd_data=NULL; HYPRE_Int *C_offd_i=NULL; HYPRE_Int *C_offd_j=NULL; HYPRE_Int C_diag_size; HYPRE_Int C_offd_size; HYPRE_Int num_cols_offd_C = 0; hypre_CSRMatrix *Bs_ext; HYPRE_Complex *Bs_ext_data; HYPRE_Int *Bs_ext_i; HYPRE_BigInt *Bs_ext_j; HYPRE_Complex *B_ext_diag_data; HYPRE_Int *B_ext_diag_i; HYPRE_Int *B_ext_diag_j; HYPRE_Int B_ext_diag_size; HYPRE_Complex *B_ext_offd_data; HYPRE_Int *B_ext_offd_i; HYPRE_Int *B_ext_offd_j; HYPRE_BigInt *B_big_offd_j = NULL; HYPRE_Int B_ext_offd_size; HYPRE_BigInt n_rows_A, n_cols_A; HYPRE_BigInt n_rows_B, n_cols_B; HYPRE_Int allsquare = 0; HYPRE_Int num_procs; HYPRE_Int *my_diag_array; HYPRE_Int *my_offd_array; HYPRE_Int max_num_threads; HYPRE_Complex zero = 0.0; HYPRE_MemoryLocation memory_location_A = hypre_ParCSRMatrixMemoryLocation(A); HYPRE_MemoryLocation memory_location_B = hypre_ParCSRMatrixMemoryLocation(B); /* RL: TODO cannot guarantee, maybe should never assert hypre_assert(memory_location_A == memory_location_B); */ /* RL: in the case of A=H, B=D, or A=D, B=H, let C = D, * not sure if this is the right thing to do. * Also, need something like this in other places * TODO */ HYPRE_MemoryLocation memory_location_C = hypre_max(memory_location_A, memory_location_B); n_rows_A = hypre_ParCSRMatrixGlobalNumRows(A); n_cols_A = hypre_ParCSRMatrixGlobalNumCols(A); n_rows_B = hypre_ParCSRMatrixGlobalNumRows(B); n_cols_B = hypre_ParCSRMatrixGlobalNumCols(B); max_num_threads = hypre_NumThreads(); my_diag_array = hypre_CTAlloc(HYPRE_Int, max_num_threads, HYPRE_MEMORY_HOST); my_offd_array = hypre_CTAlloc(HYPRE_Int, max_num_threads, HYPRE_MEMORY_HOST); if (n_cols_A != n_rows_B || num_cols_diag_A != num_rows_diag_B) { hypre_error_w_msg(HYPRE_ERROR_GENERIC," Error! Incompatible matrix dimensions!\n"); return NULL; } /* if globally C=A*B is square and locally C_diag should also be square */ if ( num_rows_diag_A == num_cols_diag_B && n_rows_A == n_cols_B ) { allsquare = 1; } /*----------------------------------------------------------------------- * Extract B_ext, i.e. portion of B that is stored on neighbor procs * and needed locally for matrix matrix product *-----------------------------------------------------------------------*/ hypre_MPI_Comm_size(comm, &num_procs); #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_RENUMBER_COLIDX] -= hypre_MPI_Wtime(); #endif if (num_procs > 1) { /*--------------------------------------------------------------------- * If there exists no CommPkg for A, a CommPkg is generated using * equally load balanced partitionings within * hypre_ParCSRMatrixExtractBExt *--------------------------------------------------------------------*/ Bs_ext = hypre_ParCSRMatrixExtractBExt(B,A,1); Bs_ext_data = hypre_CSRMatrixData(Bs_ext); Bs_ext_i = hypre_CSRMatrixI(Bs_ext); Bs_ext_j = hypre_CSRMatrixBigJ(Bs_ext); } B_ext_diag_i = hypre_CTAlloc(HYPRE_Int, num_cols_offd_A+1, HYPRE_MEMORY_HOST); B_ext_offd_i = hypre_CTAlloc(HYPRE_Int, num_cols_offd_A+1, HYPRE_MEMORY_HOST); B_ext_diag_size = 0; B_ext_offd_size = 0; last_col_diag_B = first_col_diag_B + (HYPRE_BigInt)num_cols_diag_B -1; #ifdef HYPRE_CONCURRENT_HOPSCOTCH hypre_UnorderedBigIntSet set; #pragma omp parallel { HYPRE_Int size, rest, ii; HYPRE_Int ns, ne; HYPRE_Int i1, i, j; HYPRE_Int my_offd_size, my_diag_size; HYPRE_Int cnt_offd, cnt_diag; HYPRE_Int num_threads = hypre_NumActiveThreads(); size = num_cols_offd_A/num_threads; rest = num_cols_offd_A - size*num_threads; ii = hypre_GetThreadNum(); if (ii < rest) { ns = ii*size+ii; ne = (ii+1)*size+ii+1; } else { ns = ii*size+rest; ne = (ii+1)*size+rest; } my_diag_size = 0; my_offd_size = 0; for (i=ns; i < ne; i++) { B_ext_diag_i[i] = my_diag_size; B_ext_offd_i[i] = my_offd_size; for (j=Bs_ext_i[i]; j < Bs_ext_i[i+1]; j++) if (Bs_ext_j[j] < first_col_diag_B || Bs_ext_j[j] > last_col_diag_B) my_offd_size++; else my_diag_size++; } my_diag_array[ii] = my_diag_size; my_offd_array[ii] = my_offd_size; #pragma omp barrier if (ii) { my_diag_size = my_diag_array[0]; my_offd_size = my_offd_array[0]; for (i1 = 1; i1 < ii; i1++) { my_diag_size += my_diag_array[i1]; my_offd_size += my_offd_array[i1]; } for (i1 = ns; i1 < ne; i1++) { B_ext_diag_i[i1] += my_diag_size; B_ext_offd_i[i1] += my_offd_size; } } else { B_ext_diag_size = 0; B_ext_offd_size = 0; for (i1 = 0; i1 < num_threads; i1++) { B_ext_diag_size += my_diag_array[i1]; B_ext_offd_size += my_offd_array[i1]; } B_ext_diag_i[num_cols_offd_A] = B_ext_diag_size; B_ext_offd_i[num_cols_offd_A] = B_ext_offd_size; if (B_ext_diag_size) { B_ext_diag_j = hypre_CTAlloc(HYPRE_Int, B_ext_diag_size, HYPRE_MEMORY_HOST); B_ext_diag_data = hypre_CTAlloc(HYPRE_Complex, B_ext_diag_size, HYPRE_MEMORY_HOST); } if (B_ext_offd_size) { B_ext_offd_j = hypre_CTAlloc(HYPRE_Int, B_ext_offd_size, HYPRE_MEMORY_HOST); B_big_offd_j = hypre_CTAlloc(HYPRE_BigInt, B_ext_offd_size, HYPRE_MEMORY_HOST); B_ext_offd_data = hypre_CTAlloc(HYPRE_Complex, B_ext_offd_size, HYPRE_MEMORY_HOST); } hypre_UnorderedBigIntSetCreate(&set, B_ext_offd_size + num_cols_offd_B, 16*hypre_NumThreads()); } #pragma omp barrier cnt_offd = B_ext_offd_i[ns]; cnt_diag = B_ext_diag_i[ns]; for (i=ns; i < ne; i++) { for (j=Bs_ext_i[i]; j < Bs_ext_i[i+1]; j++) if (Bs_ext_j[j] < first_col_diag_B || Bs_ext_j[j] > last_col_diag_B) { hypre_UnorderedBigIntSetPut(&set, Bs_ext_j[j]); B_big_offd_j[cnt_offd] = Bs_ext_j[j]; //Bs_ext_j[cnt_offd] = Bs_ext_j[j]; B_ext_offd_data[cnt_offd++] = Bs_ext_data[j]; } else { B_ext_diag_j[cnt_diag] = (HYPRE_Int)(Bs_ext_j[j] - first_col_diag_B); B_ext_diag_data[cnt_diag++] = Bs_ext_data[j]; } } HYPRE_Int i_begin, i_end; hypre_GetSimpleThreadPartition(&i_begin, &i_end, num_cols_offd_B); for (i = i_begin; i < i_end; i++) { hypre_UnorderedBigIntSetPut(&set, col_map_offd_B[i]); } } /* omp parallel */ col_map_offd_C = hypre_UnorderedBigIntSetCopyToArray(&set, &num_cols_offd_C); hypre_UnorderedBigIntSetDestroy(&set); hypre_UnorderedBigIntMap col_map_offd_C_inverse; hypre_big_sort_and_create_inverse_map(col_map_offd_C, num_cols_offd_C, &col_map_offd_C, &col_map_offd_C_inverse); HYPRE_Int i, j; #pragma omp parallel for private(j) HYPRE_SMP_SCHEDULE for (i = 0; i < num_cols_offd_A; i++) for (j=B_ext_offd_i[i]; j < B_ext_offd_i[i+1]; j++) //B_ext_offd_j[j] = hypre_UnorderedIntMapGet(&col_map_offd_C_inverse, B_ext_offd_j[j]); B_ext_offd_j[j] = hypre_UnorderedBigIntMapGet(&col_map_offd_C_inverse, B_big_offd_j[j]); if (num_cols_offd_C) { hypre_UnorderedBigIntMapDestroy(&col_map_offd_C_inverse); } hypre_TFree(my_diag_array, HYPRE_MEMORY_HOST); hypre_TFree(my_offd_array, HYPRE_MEMORY_HOST); if (num_cols_offd_B) { HYPRE_Int i; map_B_to_C = hypre_CTAlloc(HYPRE_Int, num_cols_offd_B, HYPRE_MEMORY_HOST); #pragma omp parallel private(i) { HYPRE_Int i_begin, i_end; hypre_GetSimpleThreadPartition(&i_begin, &i_end, num_cols_offd_C); HYPRE_Int cnt; if (i_end > i_begin) { cnt = hypre_BigLowerBound(col_map_offd_B, col_map_offd_B + (HYPRE_BigInt)num_cols_offd_B, col_map_offd_C[i_begin]) - col_map_offd_B; } for (i = i_begin; i < i_end && cnt < num_cols_offd_B; i++) { if (col_map_offd_C[i] == col_map_offd_B[cnt]) { map_B_to_C[cnt++] = i; } } } } if (num_procs > 1) { hypre_CSRMatrixDestroy(Bs_ext); Bs_ext = NULL; } #else /* !HYPRE_CONCURRENT_HOPSCOTCH */ HYPRE_BigInt *temp; #ifdef HYPRE_USING_OPENMP #pragma omp parallel #endif { HYPRE_Int size, rest, ii; HYPRE_Int ns, ne; HYPRE_Int i1, i, j; HYPRE_Int my_offd_size, my_diag_size; HYPRE_Int cnt_offd, cnt_diag; HYPRE_Int num_threads = hypre_NumActiveThreads(); size = num_cols_offd_A/num_threads; rest = num_cols_offd_A - size*num_threads; ii = hypre_GetThreadNum(); if (ii < rest) { ns = ii*size+ii; ne = (ii+1)*size+ii+1; } else { ns = ii*size+rest; ne = (ii+1)*size+rest; } my_diag_size = 0; my_offd_size = 0; for (i=ns; i < ne; i++) { B_ext_diag_i[i] = my_diag_size; B_ext_offd_i[i] = my_offd_size; for (j=Bs_ext_i[i]; j < Bs_ext_i[i+1]; j++) if (Bs_ext_j[j] < first_col_diag_B || Bs_ext_j[j] > last_col_diag_B) my_offd_size++; else my_diag_size++; } my_diag_array[ii] = my_diag_size; my_offd_array[ii] = my_offd_size; #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif if (ii) { my_diag_size = my_diag_array[0]; my_offd_size = my_offd_array[0]; for (i1 = 1; i1 < ii; i1++) { my_diag_size += my_diag_array[i1]; my_offd_size += my_offd_array[i1]; } for (i1 = ns; i1 < ne; i1++) { B_ext_diag_i[i1] += my_diag_size; B_ext_offd_i[i1] += my_offd_size; } } else { B_ext_diag_size = 0; B_ext_offd_size = 0; for (i1 = 0; i1 < num_threads; i1++) { B_ext_diag_size += my_diag_array[i1]; B_ext_offd_size += my_offd_array[i1]; } B_ext_diag_i[num_cols_offd_A] = B_ext_diag_size; B_ext_offd_i[num_cols_offd_A] = B_ext_offd_size; if (B_ext_diag_size) { B_ext_diag_j = hypre_CTAlloc(HYPRE_Int, B_ext_diag_size, HYPRE_MEMORY_HOST); B_ext_diag_data = hypre_CTAlloc(HYPRE_Complex, B_ext_diag_size, HYPRE_MEMORY_HOST); } if (B_ext_offd_size) { B_ext_offd_j = hypre_CTAlloc(HYPRE_Int, B_ext_offd_size, HYPRE_MEMORY_HOST); B_big_offd_j = hypre_CTAlloc(HYPRE_BigInt, B_ext_offd_size, HYPRE_MEMORY_HOST); B_ext_offd_data = hypre_CTAlloc(HYPRE_Complex, B_ext_offd_size, HYPRE_MEMORY_HOST); } if (B_ext_offd_size || num_cols_offd_B) temp = hypre_CTAlloc(HYPRE_BigInt, B_ext_offd_size+num_cols_offd_B, HYPRE_MEMORY_HOST); } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif cnt_offd = B_ext_offd_i[ns]; cnt_diag = B_ext_diag_i[ns]; for (i=ns; i < ne; i++) { for (j=Bs_ext_i[i]; j < Bs_ext_i[i+1]; j++) if (Bs_ext_j[j] < first_col_diag_B || Bs_ext_j[j] > last_col_diag_B) { temp[cnt_offd] = Bs_ext_j[j]; B_big_offd_j[cnt_offd] = Bs_ext_j[j]; //Bs_ext_j[cnt_offd] = Bs_ext_j[j]; B_ext_offd_data[cnt_offd++] = Bs_ext_data[j]; } else { B_ext_diag_j[cnt_diag] = (HYPRE_Int)(Bs_ext_j[j] - first_col_diag_B); B_ext_diag_data[cnt_diag++] = Bs_ext_data[j]; } } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif if (ii == 0) { HYPRE_Int cnt; if (num_procs > 1) { hypre_CSRMatrixDestroy(Bs_ext); Bs_ext = NULL; } cnt = 0; if (B_ext_offd_size || num_cols_offd_B) { cnt = B_ext_offd_size; for (i=0; i < num_cols_offd_B; i++) temp[cnt++] = col_map_offd_B[i]; if (cnt) { HYPRE_BigInt value; hypre_BigQsort0(temp, 0, cnt-1); num_cols_offd_C = 1; value = temp[0]; for (i=1; i < cnt; i++) { if (temp[i] > value) { value = temp[i]; temp[num_cols_offd_C++] = value; } } } if (num_cols_offd_C) col_map_offd_C = hypre_CTAlloc(HYPRE_BigInt, num_cols_offd_C, HYPRE_MEMORY_HOST); for (i=0; i < num_cols_offd_C; i++) col_map_offd_C[i] = temp[i]; hypre_TFree(temp, HYPRE_MEMORY_HOST); } } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif for (i=ns; i < ne; i++) for (j=B_ext_offd_i[i]; j < B_ext_offd_i[i+1]; j++) B_ext_offd_j[j] = hypre_BigBinarySearch(col_map_offd_C, B_big_offd_j[j], //B_ext_offd_j[j] = hypre_BigBinarySearch(col_map_offd_C, Bs_ext_j[j], num_cols_offd_C); } /* end parallel region */ hypre_TFree(B_big_offd_j, HYPRE_MEMORY_HOST); hypre_TFree(my_diag_array, HYPRE_MEMORY_HOST); hypre_TFree(my_offd_array, HYPRE_MEMORY_HOST); if (num_cols_offd_B) { HYPRE_Int i, cnt; map_B_to_C = hypre_CTAlloc(HYPRE_Int, num_cols_offd_B, HYPRE_MEMORY_HOST); cnt = 0; for (i=0; i < num_cols_offd_C; i++) if (col_map_offd_C[i] == col_map_offd_B[cnt]) { map_B_to_C[cnt++] = i; if (cnt == num_cols_offd_B) break; } } #endif /* !HYPRE_CONCURRENT_HOPSCOTCH */ #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_RENUMBER_COLIDX] += hypre_MPI_Wtime(); #endif hypre_ParMatmul_RowSizes( /*&C_diag_i, &C_offd_i, &B_marker,*/ memory_location_C, &C_diag_i, &C_offd_i, A_diag_i, A_diag_j, A_offd_i, A_offd_j, B_diag_i, B_diag_j, B_offd_i, B_offd_j, B_ext_diag_i, B_ext_diag_j, B_ext_offd_i, B_ext_offd_j, map_B_to_C, &C_diag_size, &C_offd_size, num_rows_diag_A, num_cols_offd_A, allsquare, num_cols_diag_B, num_cols_offd_B, num_cols_offd_C ); /*----------------------------------------------------------------------- * Allocate C_diag_data and C_diag_j arrays. * Allocate C_offd_data and C_offd_j arrays. *-----------------------------------------------------------------------*/ last_col_diag_B = first_col_diag_B + (HYPRE_BigInt)num_cols_diag_B - 1; C_diag_data = hypre_CTAlloc(HYPRE_Complex, C_diag_size, memory_location_C); C_diag_j = hypre_CTAlloc(HYPRE_Int, C_diag_size, memory_location_C); if (C_offd_size) { C_offd_data = hypre_CTAlloc(HYPRE_Complex, C_offd_size, memory_location_C); C_offd_j = hypre_CTAlloc(HYPRE_Int, C_offd_size, memory_location_C); } /*----------------------------------------------------------------------- * Second Pass: Fill in C_diag_data and C_diag_j. * Second Pass: Fill in C_offd_data and C_offd_j. *-----------------------------------------------------------------------*/ /*----------------------------------------------------------------------- * Initialize some stuff. *-----------------------------------------------------------------------*/ #ifdef HYPRE_USING_OPENMP #pragma omp parallel #endif { HYPRE_Int *B_marker = NULL; HYPRE_Int ns, ne, size, rest, ii; HYPRE_Int i1, i2, i3, jj2, jj3; HYPRE_Int jj_row_begin_diag, jj_count_diag; HYPRE_Int jj_row_begin_offd, jj_count_offd; HYPRE_Int num_threads; HYPRE_Complex a_entry; /*, a_b_product;*/ ii = hypre_GetThreadNum(); num_threads = hypre_NumActiveThreads(); size = num_rows_diag_A/num_threads; rest = num_rows_diag_A - size*num_threads; if (ii < rest) { ns = ii*size+ii; ne = (ii+1)*size+ii+1; } else { ns = ii*size+rest; ne = (ii+1)*size+rest; } jj_count_diag = C_diag_i[ns]; jj_count_offd = C_offd_i[ns]; if (num_cols_diag_B || num_cols_offd_C) { B_marker = hypre_CTAlloc(HYPRE_Int, num_cols_diag_B+num_cols_offd_C, HYPRE_MEMORY_HOST); } for (i1 = 0; i1 < num_cols_diag_B+num_cols_offd_C; i1++) { B_marker[i1] = -1; } /*----------------------------------------------------------------------- * Loop over interior c-points. *-----------------------------------------------------------------------*/ for (i1 = ns; i1 < ne; i1++) { /*-------------------------------------------------------------------- * Create diagonal entry, C_{i1,i1} *--------------------------------------------------------------------*/ jj_row_begin_diag = jj_count_diag; jj_row_begin_offd = jj_count_offd; if ( allsquare ) { B_marker[i1] = jj_count_diag; C_diag_data[jj_count_diag] = zero; C_diag_j[jj_count_diag] = i1; jj_count_diag++; } /*----------------------------------------------------------------- * Loop over entries in row i1 of A_offd. *-----------------------------------------------------------------*/ if (num_cols_offd_A) { for (jj2 = A_offd_i[i1]; jj2 < A_offd_i[i1+1]; jj2++) { i2 = A_offd_j[jj2]; a_entry = A_offd_data[jj2]; /*----------------------------------------------------------- * Loop over entries in row i2 of B_ext. *-----------------------------------------------------------*/ for (jj3 = B_ext_offd_i[i2]; jj3 < B_ext_offd_i[i2+1]; jj3++) { i3 = num_cols_diag_B+B_ext_offd_j[jj3]; /*-------------------------------------------------------- * Check B_marker to see that C_{i1,i3} has not already * been accounted for. If it has not, create a new entry. * If it has, add new contribution. *--------------------------------------------------------*/ if (B_marker[i3] < jj_row_begin_offd) { B_marker[i3] = jj_count_offd; C_offd_data[jj_count_offd] = a_entry*B_ext_offd_data[jj3]; C_offd_j[jj_count_offd] = i3-num_cols_diag_B; jj_count_offd++; } else C_offd_data[B_marker[i3]] += a_entry*B_ext_offd_data[jj3]; } for (jj3 = B_ext_diag_i[i2]; jj3 < B_ext_diag_i[i2+1]; jj3++) { i3 = B_ext_diag_j[jj3]; if (B_marker[i3] < jj_row_begin_diag) { B_marker[i3] = jj_count_diag; C_diag_data[jj_count_diag] = a_entry*B_ext_diag_data[jj3]; C_diag_j[jj_count_diag] = i3; jj_count_diag++; } else C_diag_data[B_marker[i3]] += a_entry*B_ext_diag_data[jj3]; } } } /*----------------------------------------------------------------- * Loop over entries in row i1 of A_diag. *-----------------------------------------------------------------*/ for (jj2 = A_diag_i[i1]; jj2 < A_diag_i[i1+1]; jj2++) { i2 = A_diag_j[jj2]; a_entry = A_diag_data[jj2]; /*----------------------------------------------------------- * Loop over entries in row i2 of B_diag. *-----------------------------------------------------------*/ for (jj3 = B_diag_i[i2]; jj3 < B_diag_i[i2+1]; jj3++) { i3 = B_diag_j[jj3]; /*-------------------------------------------------------- * Check B_marker to see that C_{i1,i3} has not already * been accounted for. If it has not, create a new entry. * If it has, add new contribution. *--------------------------------------------------------*/ if (B_marker[i3] < jj_row_begin_diag) { B_marker[i3] = jj_count_diag; C_diag_data[jj_count_diag] = a_entry*B_diag_data[jj3]; C_diag_j[jj_count_diag] = i3; jj_count_diag++; } else { C_diag_data[B_marker[i3]] += a_entry*B_diag_data[jj3]; } } if (num_cols_offd_B) { for (jj3 = B_offd_i[i2]; jj3 < B_offd_i[i2+1]; jj3++) { i3 = num_cols_diag_B+map_B_to_C[B_offd_j[jj3]]; /*-------------------------------------------------------- * Check B_marker to see that C_{i1,i3} has not already * been accounted for. If it has not, create a new entry. * If it has, add new contribution. *--------------------------------------------------------*/ if (B_marker[i3] < jj_row_begin_offd) { B_marker[i3] = jj_count_offd; C_offd_data[jj_count_offd] = a_entry*B_offd_data[jj3]; C_offd_j[jj_count_offd] = i3-num_cols_diag_B; jj_count_offd++; } else { C_offd_data[B_marker[i3]] += a_entry*B_offd_data[jj3]; } } } } } hypre_TFree(B_marker, HYPRE_MEMORY_HOST); } /*end parallel region */ C = hypre_ParCSRMatrixCreate(comm, n_rows_A, n_cols_B, row_starts_A, col_starts_B, num_cols_offd_C, C_diag_size, C_offd_size); /* Note that C does not own the partitionings */ hypre_ParCSRMatrixSetRowStartsOwner(C, 0); hypre_ParCSRMatrixSetColStartsOwner(C, 0); C_diag = hypre_ParCSRMatrixDiag(C); hypre_CSRMatrixData(C_diag) = C_diag_data; hypre_CSRMatrixI(C_diag) = C_diag_i; hypre_CSRMatrixJ(C_diag) = C_diag_j; C_offd = hypre_ParCSRMatrixOffd(C); hypre_CSRMatrixI(C_offd) = C_offd_i; hypre_ParCSRMatrixOffd(C) = C_offd; if (num_cols_offd_C) { hypre_CSRMatrixData(C_offd) = C_offd_data; hypre_CSRMatrixJ(C_offd) = C_offd_j; hypre_ParCSRMatrixColMapOffd(C) = col_map_offd_C; } hypre_CSRMatrixMemoryLocation(C_diag) = memory_location_C; hypre_CSRMatrixMemoryLocation(C_offd) = memory_location_C; /*----------------------------------------------------------------------- * Free various arrays *-----------------------------------------------------------------------*/ hypre_TFree(B_ext_diag_i, HYPRE_MEMORY_HOST); if (B_ext_diag_size) { hypre_TFree(B_ext_diag_j, HYPRE_MEMORY_HOST); hypre_TFree(B_ext_diag_data, HYPRE_MEMORY_HOST); } hypre_TFree(B_ext_offd_i, HYPRE_MEMORY_HOST); if (B_ext_offd_size) { hypre_TFree(B_ext_offd_j, HYPRE_MEMORY_HOST); hypre_TFree(B_ext_offd_data, HYPRE_MEMORY_HOST); } if (num_cols_offd_B) hypre_TFree(map_B_to_C, HYPRE_MEMORY_HOST); #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_MATMUL] += hypre_MPI_Wtime(); #endif return C; } /* The following function was formerly part of hypre_ParCSRMatrixExtractBExt but the code was removed so it can be used for a corresponding function for Boolean matrices JSP: to allow communication overlapping, it returns comm_handle_idx and comm_handle_data. Before accessing B, they should be destroyed (including send_data contained in the comm_handle). */ void hypre_ParCSRMatrixExtractBExt_Arrays_Overlap( HYPRE_Int ** pB_ext_i, HYPRE_BigInt ** pB_ext_j, HYPRE_Complex ** pB_ext_data, HYPRE_BigInt ** pB_ext_row_map, HYPRE_Int * num_nonzeros, HYPRE_Int data, HYPRE_Int find_row_map, MPI_Comm comm, hypre_ParCSRCommPkg * comm_pkg, HYPRE_Int num_cols_B, HYPRE_Int num_recvs, HYPRE_Int num_sends, HYPRE_BigInt first_col_diag, HYPRE_BigInt * row_starts, HYPRE_Int * recv_vec_starts, HYPRE_Int * send_map_starts, HYPRE_Int * send_map_elmts, HYPRE_Int * diag_i, HYPRE_Int * diag_j, HYPRE_Int * offd_i, HYPRE_Int * offd_j, HYPRE_BigInt * col_map_offd, HYPRE_Real * diag_data, HYPRE_Real * offd_data, hypre_ParCSRCommHandle **comm_handle_idx, hypre_ParCSRCommHandle **comm_handle_data, HYPRE_Int *CF_marker, HYPRE_Int *CF_marker_offd, HYPRE_Int skip_fine, /* 1 if only coarse points are needed */ HYPRE_Int skip_same_sign /* 1 if only points that have the same sign are needed */ // extended based long range interpolation: skip_fine = 1, skip_same_sign = 0 for S matrix, skip_fine = 1, skip_same_sign = 1 for A matrix // other interpolation: skip_fine = 0, skip_same_sign = 0 ) { hypre_ParCSRCommHandle *comm_handle, *row_map_comm_handle = NULL; hypre_ParCSRCommPkg *tmp_comm_pkg; HYPRE_Int *B_int_i; HYPRE_BigInt *B_int_j; HYPRE_Int *B_ext_i; HYPRE_BigInt * B_ext_j; HYPRE_Complex * B_ext_data; HYPRE_Complex * B_int_data; HYPRE_BigInt * B_int_row_map; HYPRE_BigInt * B_ext_row_map; HYPRE_Int num_procs, my_id; HYPRE_Int *jdata_recv_vec_starts; HYPRE_Int *jdata_send_map_starts; HYPRE_Int i, j, k; HYPRE_Int start_index; /*HYPRE_Int jrow;*/ HYPRE_Int num_rows_B_ext; HYPRE_Int *prefix_sum_workspace; hypre_MPI_Comm_size(comm,&num_procs); hypre_MPI_Comm_rank(comm,&my_id); HYPRE_BigInt first_row_index = row_starts[0]; num_rows_B_ext = recv_vec_starts[num_recvs]; if ( num_rows_B_ext < 0 ) { /* no B_ext, no communication */ *pB_ext_i = NULL; *pB_ext_j = NULL; if ( data ) *pB_ext_data = NULL; if ( find_row_map ) *pB_ext_row_map = NULL; *num_nonzeros = 0; return; }; B_int_i = hypre_CTAlloc(HYPRE_Int, send_map_starts[num_sends]+1, HYPRE_MEMORY_HOST); B_ext_i = hypre_CTAlloc(HYPRE_Int, num_rows_B_ext+1, HYPRE_MEMORY_HOST); *pB_ext_i = B_ext_i; if ( find_row_map ) { B_int_row_map = hypre_CTAlloc( HYPRE_BigInt, send_map_starts[num_sends]+1 , HYPRE_MEMORY_HOST); B_ext_row_map = hypre_CTAlloc( HYPRE_BigInt, num_rows_B_ext+1 , HYPRE_MEMORY_HOST); *pB_ext_row_map = B_ext_row_map; }; /*-------------------------------------------------------------------------- * generate B_int_i through adding number of row-elements of offd and diag * for corresponding rows. B_int_i[j+1] contains the number of elements of * a row j (which is determined through send_map_elmts) *--------------------------------------------------------------------------*/ jdata_send_map_starts = hypre_CTAlloc(HYPRE_Int, num_sends+1, HYPRE_MEMORY_HOST); jdata_recv_vec_starts = hypre_CTAlloc(HYPRE_Int, num_recvs+1, HYPRE_MEMORY_HOST); jdata_send_map_starts[0] = B_int_i[0] = 0; /*HYPRE_Int prefix_sum_workspace[(hypre_NumThreads() + 1)*num_sends];*/ prefix_sum_workspace = hypre_TAlloc(HYPRE_Int, (hypre_NumThreads() + 1)*num_sends, HYPRE_MEMORY_HOST); #ifdef HYPRE_USING_OPENMP #pragma omp parallel private(i,j,k) #endif { /*HYPRE_Int counts[num_sends];*/ HYPRE_Int *counts; counts = hypre_TAlloc(HYPRE_Int, num_sends, HYPRE_MEMORY_HOST); for (i=0; i < num_sends; i++) { HYPRE_Int j_begin, j_end; hypre_GetSimpleThreadPartition(&j_begin, &j_end, send_map_starts[i + 1] - send_map_starts[i]); j_begin += send_map_starts[i]; j_end += send_map_starts[i]; HYPRE_Int count = 0; if (skip_fine && skip_same_sign) { for (j = j_begin; j < j_end; j++) { HYPRE_Int jrow = send_map_elmts[j]; HYPRE_Int len = 0; if (diag_data[diag_i[jrow]] >= 0) { for (k = diag_i[jrow] + 1; k < diag_i[jrow + 1]; k++) { if (diag_data[k] < 0 && CF_marker[diag_j[k]] >= 0) len++; } for (k = offd_i[jrow]; k < offd_i[jrow + 1]; k++) { if (offd_data[k] < 0) len++; } } else { for (k = diag_i[jrow] + 1; k < diag_i[jrow + 1]; k++) { if (diag_data[k] > 0 && CF_marker[diag_j[k]] >= 0) len++; } for (k = offd_i[jrow]; k < offd_i[jrow + 1]; k++) { if (offd_data[k] > 0) len++; } } B_int_i[j + 1] = len; count += len; } } else if (skip_fine) { for (j = j_begin; j < j_end; j++) { HYPRE_Int jrow = send_map_elmts[j]; HYPRE_Int len = 0; for (k = diag_i[jrow]; k < diag_i[jrow + 1]; k++) { if (CF_marker[diag_j[k]] >= 0) len++; } for (k = offd_i[jrow]; k < offd_i[jrow + 1]; k++) { if (CF_marker_offd[offd_j[k]] >= 0) len++; } B_int_i[j + 1] = len; count += len; } } else { for (j = j_begin; j < j_end; j++) { HYPRE_Int jrow = send_map_elmts[j]; HYPRE_Int len = diag_i[jrow + 1] - diag_i[jrow]; len += offd_i[jrow + 1] - offd_i[jrow]; B_int_i[j + 1] = len; count += len; } } if (find_row_map) { for (j = j_begin; j < j_end; j++) { HYPRE_Int jrow = send_map_elmts[j]; B_int_row_map[j] = (HYPRE_BigInt)jrow + first_row_index; } } counts[i] = count; } hypre_prefix_sum_multiple(counts, jdata_send_map_starts + 1, num_sends, prefix_sum_workspace); #ifdef HYPRE_USING_OPENMP #pragma omp master #endif { for (i = 1; i < num_sends; i++) { jdata_send_map_starts[i + 1] += jdata_send_map_starts[i]; } /*-------------------------------------------------------------------------- * initialize communication *--------------------------------------------------------------------------*/ comm_handle = hypre_ParCSRCommHandleCreate(11,comm_pkg, &B_int_i[1],&(B_ext_i[1]) ); if ( find_row_map ) { /* scatter/gather B_int row numbers to form array of B_ext row numbers */ row_map_comm_handle = hypre_ParCSRCommHandleCreate (21,comm_pkg, B_int_row_map, B_ext_row_map ); } B_int_j = hypre_TAlloc(HYPRE_BigInt, jdata_send_map_starts[num_sends], HYPRE_MEMORY_HOST); if (data) B_int_data = hypre_TAlloc(HYPRE_Complex, jdata_send_map_starts[num_sends], HYPRE_MEMORY_HOST); } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif for (i=0; i < num_sends; i++) { HYPRE_Int j_begin, j_end; hypre_GetSimpleThreadPartition(&j_begin, &j_end, send_map_starts[i + 1] - send_map_starts[i]); j_begin += send_map_starts[i]; j_end += send_map_starts[i]; HYPRE_Int count = counts[i] + jdata_send_map_starts[i]; if (data) { if (skip_same_sign && skip_fine) { for (j = j_begin; j < j_end; j++) { HYPRE_Int jrow = send_map_elmts[j]; /*HYPRE_Int count_begin = count;*/ if (diag_data[diag_i[jrow]] >= 0) { for (k = diag_i[jrow] + 1; k < diag_i[jrow + 1]; k++) { if (diag_data[k] < 0 && CF_marker[diag_j[k]] >= 0) { B_int_j[count] = (HYPRE_BigInt)diag_j[k]+first_col_diag; B_int_data[count] = diag_data[k]; count++; } } for (k = offd_i[jrow]; k < offd_i[jrow + 1]; k++) { HYPRE_Int c = offd_j[k]; HYPRE_BigInt c_global = col_map_offd[c]; if (offd_data[k] < 0) { B_int_j[count] = c_global; B_int_data[count] = offd_data[k]; count++; } } } else { for (k = diag_i[jrow] + 1; k < diag_i[jrow + 1]; k++) { if (diag_data[k] > 0 && CF_marker[diag_j[k]] >= 0) { B_int_j[count] = (HYPRE_BigInt)diag_j[k]+first_col_diag; B_int_data[count] = diag_data[k]; count++; } } for (k = offd_i[jrow]; k < offd_i[jrow + 1]; k++) { HYPRE_Int c = offd_j[k]; HYPRE_BigInt c_global = col_map_offd[c]; if (offd_data[k] > 0) { B_int_j[count] = c_global; B_int_data[count] = offd_data[k]; count++; } } } } } else { for (j = j_begin; j < j_end; ++j) { HYPRE_Int jrow = send_map_elmts[j]; for (k=diag_i[jrow]; k < diag_i[jrow+1]; k++) { B_int_j[count] = (HYPRE_BigInt)diag_j[k]+first_col_diag; B_int_data[count] = diag_data[k]; count++; } for (k=offd_i[jrow]; k < offd_i[jrow+1]; k++) { B_int_j[count] = col_map_offd[offd_j[k]]; B_int_data[count] = offd_data[k]; count++; } } } } // data else { if (skip_fine) { for (j = j_begin; j < j_end; j++) { HYPRE_Int jrow = send_map_elmts[j]; for (k = diag_i[jrow]; k < diag_i[jrow + 1]; k++) { if (CF_marker[diag_j[k]] >= 0) { B_int_j[count] = (HYPRE_BigInt)diag_j[k] + first_col_diag; count++; } } for (k = offd_i[jrow]; k < offd_i[jrow + 1]; k++) { if (CF_marker_offd[offd_j[k]] >= 0) { B_int_j[count] = col_map_offd[offd_j[k]]; count++; } } } } else { for (j = j_begin; j < j_end; ++j) { HYPRE_Int jrow = send_map_elmts[j]; for (k=diag_i[jrow]; k < diag_i[jrow+1]; k++) { B_int_j[count] = (HYPRE_BigInt)diag_j[k]+first_col_diag; count++; } for (k=offd_i[jrow]; k < offd_i[jrow+1]; k++) { B_int_j[count] = col_map_offd[offd_j[k]]; count++; } } } } // !data } /* for each send target */ hypre_TFree(counts, HYPRE_MEMORY_HOST); } /* omp parallel. JSP: this takes most of time in this function */ hypre_TFree(prefix_sum_workspace, HYPRE_MEMORY_HOST); tmp_comm_pkg = hypre_CTAlloc(hypre_ParCSRCommPkg, 1, HYPRE_MEMORY_HOST); hypre_ParCSRCommPkgComm(tmp_comm_pkg) = comm; hypre_ParCSRCommPkgNumSends(tmp_comm_pkg) = num_sends; hypre_ParCSRCommPkgNumRecvs(tmp_comm_pkg) = num_recvs; hypre_ParCSRCommPkgSendProcs(tmp_comm_pkg) = hypre_ParCSRCommPkgSendProcs(comm_pkg); hypre_ParCSRCommPkgRecvProcs(tmp_comm_pkg) = hypre_ParCSRCommPkgRecvProcs(comm_pkg); hypre_ParCSRCommPkgSendMapStarts(tmp_comm_pkg) = jdata_send_map_starts; hypre_ParCSRCommHandleDestroy(comm_handle); comm_handle = NULL; /*-------------------------------------------------------------------------- * after communication exchange B_ext_i[j+1] contains the number of elements * of a row j ! * evaluate B_ext_i and compute *num_nonzeros for B_ext *--------------------------------------------------------------------------*/ for (i=0; i < num_recvs; i++) for (j = recv_vec_starts[i]; j < recv_vec_starts[i+1]; j++) B_ext_i[j+1] += B_ext_i[j]; *num_nonzeros = B_ext_i[num_rows_B_ext]; *pB_ext_j = hypre_TAlloc(HYPRE_BigInt, *num_nonzeros, HYPRE_MEMORY_HOST); B_ext_j = *pB_ext_j; if (data) { *pB_ext_data = hypre_TAlloc(HYPRE_Complex, *num_nonzeros, HYPRE_MEMORY_HOST); B_ext_data = *pB_ext_data; }; for (i=0; i < num_recvs; i++) { start_index = B_ext_i[recv_vec_starts[i]]; *num_nonzeros = B_ext_i[recv_vec_starts[i+1]]-start_index; jdata_recv_vec_starts[i+1] = B_ext_i[recv_vec_starts[i+1]]; } hypre_ParCSRCommPkgRecvVecStarts(tmp_comm_pkg) = jdata_recv_vec_starts; *comm_handle_idx = hypre_ParCSRCommHandleCreate(21,tmp_comm_pkg,B_int_j,B_ext_j); if (data) { *comm_handle_data = hypre_ParCSRCommHandleCreate(1,tmp_comm_pkg,B_int_data, B_ext_data); } if (row_map_comm_handle) { hypre_ParCSRCommHandleDestroy(row_map_comm_handle); row_map_comm_handle = NULL; } hypre_TFree(jdata_send_map_starts, HYPRE_MEMORY_HOST); hypre_TFree(jdata_recv_vec_starts, HYPRE_MEMORY_HOST); hypre_TFree(tmp_comm_pkg, HYPRE_MEMORY_HOST); hypre_TFree(B_int_i, HYPRE_MEMORY_HOST); if ( find_row_map ) hypre_TFree(B_int_row_map, HYPRE_MEMORY_HOST); /* end generic part */ } void hypre_ParCSRMatrixExtractBExt_Arrays( HYPRE_Int ** pB_ext_i, HYPRE_BigInt ** pB_ext_j, HYPRE_Complex ** pB_ext_data, HYPRE_BigInt ** pB_ext_row_map, HYPRE_Int * num_nonzeros, HYPRE_Int data, HYPRE_Int find_row_map, MPI_Comm comm, hypre_ParCSRCommPkg * comm_pkg, HYPRE_Int num_cols_B, HYPRE_Int num_recvs, HYPRE_Int num_sends, HYPRE_BigInt first_col_diag, HYPRE_BigInt * row_starts, HYPRE_Int * recv_vec_starts, HYPRE_Int * send_map_starts, HYPRE_Int * send_map_elmts, HYPRE_Int * diag_i, HYPRE_Int * diag_j, HYPRE_Int * offd_i, HYPRE_Int * offd_j, HYPRE_BigInt * col_map_offd, HYPRE_Real * diag_data, HYPRE_Real * offd_data ) { hypre_ParCSRCommHandle *comm_handle_idx, *comm_handle_data; hypre_ParCSRMatrixExtractBExt_Arrays_Overlap( pB_ext_i, pB_ext_j, pB_ext_data, pB_ext_row_map, num_nonzeros, data, find_row_map, comm, comm_pkg, num_cols_B, num_recvs, num_sends, first_col_diag, row_starts, recv_vec_starts, send_map_starts, send_map_elmts, diag_i, diag_j, offd_i, offd_j, col_map_offd, diag_data, offd_data, &comm_handle_idx, &comm_handle_data, NULL, NULL, 0, 0); HYPRE_Int *send_idx = (HYPRE_Int *)comm_handle_idx->send_data; hypre_ParCSRCommHandleDestroy(comm_handle_idx); hypre_TFree(send_idx, HYPRE_MEMORY_HOST); if (data) { HYPRE_Real *send_data = (HYPRE_Real *)comm_handle_data->send_data; hypre_ParCSRCommHandleDestroy(comm_handle_data); hypre_TFree(send_data, HYPRE_MEMORY_HOST); } } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixExtractBExt : extracts rows from B which are located on * other processors and needed for multiplication with A locally. The rows * are returned as CSRMatrix. *--------------------------------------------------------------------------*/ hypre_CSRMatrix * hypre_ParCSRMatrixExtractBExt_Overlap( hypre_ParCSRMatrix *B, hypre_ParCSRMatrix *A, HYPRE_Int data, hypre_ParCSRCommHandle **comm_handle_idx, hypre_ParCSRCommHandle **comm_handle_data, HYPRE_Int *CF_marker, HYPRE_Int *CF_marker_offd, HYPRE_Int skip_fine, HYPRE_Int skip_same_sign ) { MPI_Comm comm = hypre_ParCSRMatrixComm(B); HYPRE_BigInt first_col_diag = hypre_ParCSRMatrixFirstColDiag(B); /*HYPRE_Int first_row_index = hypre_ParCSRMatrixFirstRowIndex(B);*/ HYPRE_BigInt *col_map_offd = hypre_ParCSRMatrixColMapOffd(B); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); HYPRE_Int num_recvs; HYPRE_Int *recv_vec_starts; HYPRE_Int num_sends; HYPRE_Int *send_map_starts; HYPRE_Int *send_map_elmts; hypre_CSRMatrix *diag = hypre_ParCSRMatrixDiag(B); HYPRE_Int *diag_i = hypre_CSRMatrixI(diag); HYPRE_Int *diag_j = hypre_CSRMatrixJ(diag); HYPRE_Real *diag_data = hypre_CSRMatrixData(diag); hypre_CSRMatrix *offd = hypre_ParCSRMatrixOffd(B); HYPRE_Int *offd_i = hypre_CSRMatrixI(offd); HYPRE_Int *offd_j = hypre_CSRMatrixJ(offd); HYPRE_Real *offd_data = hypre_CSRMatrixData(offd); HYPRE_Int num_cols_B, num_nonzeros; HYPRE_Int num_rows_B_ext; hypre_CSRMatrix *B_ext; HYPRE_Int *B_ext_i; HYPRE_BigInt *B_ext_j; HYPRE_Complex *B_ext_data; HYPRE_BigInt *idummy; /*--------------------------------------------------------------------- * If there exists no CommPkg for A, a CommPkg is generated using * equally load balanced partitionings *--------------------------------------------------------------------*/ if (!hypre_ParCSRMatrixCommPkg(A)) { hypre_MatvecCommPkgCreate(A); } comm_pkg = hypre_ParCSRMatrixCommPkg(A); num_recvs = hypre_ParCSRCommPkgNumRecvs(comm_pkg); recv_vec_starts = hypre_ParCSRCommPkgRecvVecStarts(comm_pkg); num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); send_map_starts = hypre_ParCSRCommPkgSendMapStarts(comm_pkg); send_map_elmts = hypre_ParCSRCommPkgSendMapElmts(comm_pkg); num_cols_B = hypre_ParCSRMatrixGlobalNumCols(B); num_rows_B_ext = recv_vec_starts[num_recvs]; hypre_ParCSRMatrixExtractBExt_Arrays_Overlap ( &B_ext_i, &B_ext_j, &B_ext_data, &idummy, &num_nonzeros, data, 0, comm, comm_pkg, num_cols_B, num_recvs, num_sends, first_col_diag, B->row_starts, recv_vec_starts, send_map_starts, send_map_elmts, diag_i, diag_j, offd_i, offd_j, col_map_offd, diag_data, offd_data, comm_handle_idx, comm_handle_data, CF_marker, CF_marker_offd, skip_fine, skip_same_sign ); B_ext = hypre_CSRMatrixCreate(num_rows_B_ext,num_cols_B,num_nonzeros); hypre_CSRMatrixMemoryLocation(B_ext) = HYPRE_MEMORY_HOST; hypre_CSRMatrixI(B_ext) = B_ext_i; hypre_CSRMatrixBigJ(B_ext) = B_ext_j; if (data) hypre_CSRMatrixData(B_ext) = B_ext_data; return B_ext; } hypre_CSRMatrix * hypre_ParCSRMatrixExtractBExt( hypre_ParCSRMatrix *B, hypre_ParCSRMatrix *A, HYPRE_Int want_data ) { #if 0 hypre_ParCSRCommHandle *comm_handle_idx, *comm_handle_data; hypre_CSRMatrix *B_ext = hypre_ParCSRMatrixExtractBExt_Overlap(B, A, want_data, &comm_handle_idx, &comm_handle_data, NULL, NULL, 0, 0); HYPRE_Int *send_idx = (HYPRE_Int *)comm_handle_idx->send_data; hypre_ParCSRCommHandleDestroy(comm_handle_idx); hypre_TFree(send_idx, HYPRE_MEMORY_HOST); if (want_data) { HYPRE_Real *send_data = (HYPRE_Real *)comm_handle_data->send_data; hypre_ParCSRCommHandleDestroy(comm_handle_data); hypre_TFree(send_data, HYPRE_MEMORY_HOST); } #else hypre_assert( hypre_CSRMatrixMemoryLocation(hypre_ParCSRMatrixDiag(B)) == hypre_CSRMatrixMemoryLocation(hypre_ParCSRMatrixOffd(B)) ); hypre_CSRMatrix *B_ext; void *request; if (!hypre_ParCSRMatrixCommPkg(A)) { hypre_MatvecCommPkgCreate(A); } hypre_ParcsrGetExternalRowsInit(B, hypre_CSRMatrixNumCols(hypre_ParCSRMatrixOffd(A)), hypre_ParCSRMatrixColMapOffd(A), hypre_ParCSRMatrixCommPkg(A), want_data, &request); B_ext = hypre_ParcsrGetExternalRowsWait(request); #endif return B_ext; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixTranspose *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixTranspose( hypre_ParCSRMatrix *A, hypre_ParCSRMatrix **AT_ptr, HYPRE_Int data ) { hypre_ParCSRCommHandle *comm_handle; MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Int num_cols = hypre_ParCSRMatrixNumCols(A); HYPRE_BigInt first_row_index = hypre_ParCSRMatrixFirstRowIndex(A); HYPRE_BigInt *row_starts = hypre_ParCSRMatrixRowStarts(A); HYPRE_BigInt *col_starts = hypre_ParCSRMatrixColStarts(A); HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(A_offd); HYPRE_Int ierr = 0; HYPRE_Int num_sends, num_recvs, num_cols_offd_AT; HYPRE_Int i, j, k, index, counter, j_row; HYPRE_BigInt value; hypre_ParCSRMatrix *AT; hypre_CSRMatrix *AT_diag; hypre_CSRMatrix *AT_offd; hypre_CSRMatrix *AT_tmp; HYPRE_BigInt first_row_index_AT, first_col_diag_AT; HYPRE_Int local_num_rows_AT, local_num_cols_AT; HYPRE_Int *AT_tmp_i; HYPRE_Int *AT_tmp_j; HYPRE_BigInt *AT_big_j = NULL; HYPRE_Complex *AT_tmp_data; HYPRE_Int *AT_buf_i; HYPRE_BigInt *AT_buf_j; HYPRE_Complex *AT_buf_data; HYPRE_Int *AT_offd_i; HYPRE_Int *AT_offd_j; HYPRE_Complex *AT_offd_data; HYPRE_BigInt *col_map_offd_AT; HYPRE_BigInt *row_starts_AT; HYPRE_BigInt *col_starts_AT; HYPRE_Int num_procs, my_id; HYPRE_Int *recv_procs; HYPRE_Int *send_procs; HYPRE_Int *recv_vec_starts; HYPRE_Int *send_map_starts; HYPRE_Int *send_map_elmts; HYPRE_Int *tmp_recv_vec_starts; HYPRE_Int *tmp_send_map_starts; hypre_ParCSRCommPkg *tmp_comm_pkg; hypre_MPI_Comm_size(comm,&num_procs); hypre_MPI_Comm_rank(comm,&my_id); num_cols_offd_AT = 0; counter = 0; AT_offd_j = NULL; AT_offd_data = NULL; col_map_offd_AT = NULL; HYPRE_MemoryLocation memory_location = hypre_ParCSRMatrixMemoryLocation(A); /*--------------------------------------------------------------------- * If there exists no CommPkg for A, a CommPkg is generated using * equally load balanced partitionings *--------------------------------------------------------------------*/ if (!comm_pkg) { hypre_MatvecCommPkgCreate(A); comm_pkg = hypre_ParCSRMatrixCommPkg(A); } if (num_procs > 1) { hypre_CSRMatrixTranspose (A_offd, &AT_tmp, data); AT_tmp_i = hypre_CSRMatrixI(AT_tmp); AT_tmp_j = hypre_CSRMatrixJ(AT_tmp); if (data) { AT_tmp_data = hypre_CSRMatrixData(AT_tmp); } num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); num_recvs = hypre_ParCSRCommPkgNumRecvs(comm_pkg); recv_procs = hypre_ParCSRCommPkgRecvProcs(comm_pkg); send_procs = hypre_ParCSRCommPkgSendProcs(comm_pkg); recv_vec_starts = hypre_ParCSRCommPkgRecvVecStarts(comm_pkg); send_map_starts = hypre_ParCSRCommPkgSendMapStarts(comm_pkg); send_map_elmts = hypre_ParCSRCommPkgSendMapElmts(comm_pkg); AT_buf_i = hypre_CTAlloc(HYPRE_Int, send_map_starts[num_sends], HYPRE_MEMORY_HOST); if (AT_tmp_i[num_cols_offd]) { AT_big_j = hypre_CTAlloc(HYPRE_BigInt, AT_tmp_i[num_cols_offd], HYPRE_MEMORY_HOST); } for (i=0; i < AT_tmp_i[num_cols_offd]; i++) { //AT_tmp_j[i] += first_row_index; AT_big_j[i] = (HYPRE_BigInt)AT_tmp_j[i]+first_row_index; } for (i=0; i < num_cols_offd; i++) { AT_tmp_i[i] = AT_tmp_i[i+1]-AT_tmp_i[i]; } comm_handle = hypre_ParCSRCommHandleCreate(12, comm_pkg, AT_tmp_i, AT_buf_i); } hypre_CSRMatrixTranspose(A_diag, &AT_diag, data); AT_offd_i = hypre_CTAlloc(HYPRE_Int, num_cols+1, memory_location); if (num_procs > 1) { hypre_ParCSRCommHandleDestroy(comm_handle); comm_handle = NULL; tmp_send_map_starts = hypre_CTAlloc(HYPRE_Int, num_sends+1, HYPRE_MEMORY_HOST); tmp_recv_vec_starts = hypre_CTAlloc(HYPRE_Int, num_recvs+1, HYPRE_MEMORY_HOST); tmp_send_map_starts[0] = send_map_starts[0]; for (i=0; i < num_sends; i++) { tmp_send_map_starts[i+1] = tmp_send_map_starts[i]; for (j=send_map_starts[i]; j < send_map_starts[i+1]; j++) { tmp_send_map_starts[i+1] += AT_buf_i[j]; AT_offd_i[send_map_elmts[j]+1] += AT_buf_i[j]; } } for (i=0; i < num_cols; i++) { AT_offd_i[i+1] += AT_offd_i[i]; } tmp_recv_vec_starts[0] = recv_vec_starts[0]; for (i=0; i < num_recvs; i++) { tmp_recv_vec_starts[i+1] = tmp_recv_vec_starts[i]; for (j=recv_vec_starts[i]; j < recv_vec_starts[i+1]; j++) { tmp_recv_vec_starts[i+1] += AT_tmp_i[j]; } } tmp_comm_pkg = hypre_CTAlloc(hypre_ParCSRCommPkg, 1, HYPRE_MEMORY_HOST); hypre_ParCSRCommPkgComm(tmp_comm_pkg) = comm; hypre_ParCSRCommPkgNumSends(tmp_comm_pkg) = num_sends; hypre_ParCSRCommPkgNumRecvs(tmp_comm_pkg) = num_recvs; hypre_ParCSRCommPkgRecvProcs(tmp_comm_pkg) = recv_procs; hypre_ParCSRCommPkgSendProcs(tmp_comm_pkg) = send_procs; hypre_ParCSRCommPkgRecvVecStarts(tmp_comm_pkg) = tmp_recv_vec_starts; hypre_ParCSRCommPkgSendMapStarts(tmp_comm_pkg) = tmp_send_map_starts; AT_buf_j = hypre_CTAlloc(HYPRE_BigInt, tmp_send_map_starts[num_sends], HYPRE_MEMORY_HOST); comm_handle = hypre_ParCSRCommHandleCreate(22, tmp_comm_pkg, AT_big_j, AT_buf_j); hypre_ParCSRCommHandleDestroy(comm_handle); comm_handle = NULL; hypre_TFree(AT_big_j, HYPRE_MEMORY_HOST); if (data) { AT_buf_data = hypre_CTAlloc(HYPRE_Complex, tmp_send_map_starts[num_sends], HYPRE_MEMORY_HOST); comm_handle = hypre_ParCSRCommHandleCreate(2,tmp_comm_pkg,AT_tmp_data, AT_buf_data); hypre_ParCSRCommHandleDestroy(comm_handle); comm_handle = NULL; } hypre_TFree(tmp_recv_vec_starts, HYPRE_MEMORY_HOST); hypre_TFree(tmp_send_map_starts, HYPRE_MEMORY_HOST); hypre_TFree(tmp_comm_pkg, HYPRE_MEMORY_HOST); hypre_CSRMatrixDestroy(AT_tmp); if (AT_offd_i[num_cols]) { AT_offd_j = hypre_CTAlloc(HYPRE_Int, AT_offd_i[num_cols], memory_location); AT_big_j = hypre_CTAlloc(HYPRE_BigInt, AT_offd_i[num_cols], HYPRE_MEMORY_HOST); if (data) { AT_offd_data = hypre_CTAlloc(HYPRE_Complex, AT_offd_i[num_cols], memory_location); } } else { AT_offd_j = NULL; AT_offd_data = NULL; } counter = 0; for (i=0; i < num_sends; i++) { for (j=send_map_starts[i]; j < send_map_starts[i+1]; j++) { j_row = send_map_elmts[j]; index = AT_offd_i[j_row]; for (k=0; k < AT_buf_i[j]; k++) { if (data) { AT_offd_data[index] = AT_buf_data[counter]; } AT_big_j[index++] = AT_buf_j[counter++]; } AT_offd_i[j_row] = index; } } for (i=num_cols; i > 0; i--) { AT_offd_i[i] = AT_offd_i[i-1]; } AT_offd_i[0] = 0; if (counter) { hypre_BigQsort0(AT_buf_j,0,counter-1); num_cols_offd_AT = 1; value = AT_buf_j[0]; for (i=1; i < counter; i++) { if (value < AT_buf_j[i]) { AT_buf_j[num_cols_offd_AT++] = AT_buf_j[i]; value = AT_buf_j[i]; } } } if (num_cols_offd_AT) { col_map_offd_AT = hypre_CTAlloc(HYPRE_BigInt, num_cols_offd_AT, HYPRE_MEMORY_HOST); } else { col_map_offd_AT = NULL; } for (i = 0; i < num_cols_offd_AT; i++) { col_map_offd_AT[i] = AT_buf_j[i]; } hypre_TFree(AT_buf_i, HYPRE_MEMORY_HOST); hypre_TFree(AT_buf_j, HYPRE_MEMORY_HOST); if (data) { hypre_TFree(AT_buf_data, HYPRE_MEMORY_HOST); } for (i=0; i < counter; i++) { AT_offd_j[i] = hypre_BigBinarySearch(col_map_offd_AT,AT_big_j[i], num_cols_offd_AT); } hypre_TFree(AT_big_j, HYPRE_MEMORY_HOST); } AT_offd = hypre_CSRMatrixCreate(num_cols, num_cols_offd_AT, counter); hypre_CSRMatrixMemoryLocation(AT_offd) = memory_location; hypre_CSRMatrixI(AT_offd) = AT_offd_i; hypre_CSRMatrixJ(AT_offd) = AT_offd_j; hypre_CSRMatrixData(AT_offd) = AT_offd_data; row_starts_AT = hypre_CTAlloc(HYPRE_BigInt, 2, HYPRE_MEMORY_HOST); for (i=0; i < 2; i++) { row_starts_AT[i] = col_starts[i]; } if (row_starts != col_starts) { col_starts_AT = hypre_CTAlloc(HYPRE_BigInt, 2, HYPRE_MEMORY_HOST); for (i=0; i < 2; i++) { col_starts_AT[i] = row_starts[i]; } } else { col_starts_AT = row_starts_AT; } first_row_index_AT = row_starts_AT[0]; first_col_diag_AT = col_starts_AT[0]; local_num_rows_AT = (HYPRE_Int)(row_starts_AT[1]-first_row_index_AT ); local_num_cols_AT = (HYPRE_Int)(col_starts_AT[1]-first_col_diag_AT); AT = hypre_CTAlloc(hypre_ParCSRMatrix, 1, HYPRE_MEMORY_HOST); hypre_ParCSRMatrixComm(AT) = comm; hypre_ParCSRMatrixDiag(AT) = AT_diag; hypre_ParCSRMatrixOffd(AT) = AT_offd; hypre_ParCSRMatrixGlobalNumRows(AT) = hypre_ParCSRMatrixGlobalNumCols(A); hypre_ParCSRMatrixGlobalNumCols(AT) = hypre_ParCSRMatrixGlobalNumRows(A); hypre_ParCSRMatrixRowStarts(AT) = row_starts_AT; hypre_ParCSRMatrixColStarts(AT) = col_starts_AT; hypre_ParCSRMatrixColMapOffd(AT) = col_map_offd_AT; hypre_ParCSRMatrixFirstRowIndex(AT) = first_row_index_AT; hypre_ParCSRMatrixFirstColDiag(AT) = first_col_diag_AT; hypre_ParCSRMatrixLastRowIndex(AT) = first_row_index_AT + local_num_rows_AT - 1; hypre_ParCSRMatrixLastColDiag(AT) = first_col_diag_AT + local_num_cols_AT - 1; hypre_ParCSRMatrixOwnsData(AT) = 1; hypre_ParCSRMatrixOwnsRowStarts(AT) = 1; hypre_ParCSRMatrixOwnsColStarts(AT) = 1; if (row_starts_AT == col_starts_AT) { hypre_ParCSRMatrixOwnsColStarts(AT) = 0; } hypre_ParCSRMatrixCommPkg(AT) = NULL; hypre_ParCSRMatrixCommPkgT(AT) = NULL; hypre_ParCSRMatrixRowindices(AT) = NULL; hypre_ParCSRMatrixRowvalues(AT) = NULL; hypre_ParCSRMatrixGetrowactive(AT) = 0; hypre_ParCSRMatrixOwnsAssumedPartition(AT) = 1; *AT_ptr = AT; return ierr; } /* ----------------------------------------------------------------------------- * generate a parallel spanning tree (for Maxwell Equation) * G_csr is the node to edge connectivity matrix * ----------------------------------------------------------------------------- */ void hypre_ParCSRMatrixGenSpanningTree( hypre_ParCSRMatrix *G_csr, HYPRE_Int **indices, HYPRE_Int G_type ) { HYPRE_BigInt nrows_G, ncols_G; HYPRE_Int *G_diag_i, *G_diag_j, *GT_diag_mat, i, j, k, edge; HYPRE_Int *nodes_marked, *edges_marked, *queue, queue_tail, queue_head, node; HYPRE_Int mypid, nprocs, n_children, *children, nsends, *send_procs, *recv_cnts; HYPRE_Int nrecvs, *recv_procs, n_proc_array, *proc_array, *pgraph_i, *pgraph_j; HYPRE_Int parent, proc, proc2, node2, found, *t_indices, tree_size, *T_diag_i; HYPRE_Int *T_diag_j, *counts, offset; MPI_Comm comm; hypre_ParCSRCommPkg *comm_pkg; hypre_CSRMatrix *G_diag; /* fetch G matrix (G_type = 0 ==> node to edge) */ if (G_type == 0) { nrows_G = hypre_ParCSRMatrixGlobalNumRows(G_csr); ncols_G = hypre_ParCSRMatrixGlobalNumCols(G_csr); G_diag = hypre_ParCSRMatrixDiag(G_csr); G_diag_i = hypre_CSRMatrixI(G_diag); G_diag_j = hypre_CSRMatrixJ(G_diag); } else { nrows_G = hypre_ParCSRMatrixGlobalNumCols(G_csr); ncols_G = hypre_ParCSRMatrixGlobalNumRows(G_csr); G_diag = hypre_ParCSRMatrixDiag(G_csr); T_diag_i = hypre_CSRMatrixI(G_diag); T_diag_j = hypre_CSRMatrixJ(G_diag); counts = hypre_TAlloc(HYPRE_Int, nrows_G , HYPRE_MEMORY_HOST); for (i = 0; i < nrows_G; i++) counts[i] = 0; for (i = 0; i < T_diag_i[ncols_G]; i++) counts[T_diag_j[i]]++; G_diag_i = hypre_TAlloc(HYPRE_Int, (nrows_G+1) , HYPRE_MEMORY_HOST); G_diag_j = hypre_TAlloc(HYPRE_Int, T_diag_i[ncols_G] , HYPRE_MEMORY_HOST); G_diag_i[0] = 0; for (i = 1; i <= nrows_G; i++) G_diag_i[i] = G_diag_i[i-1] + counts[i-1]; for (i = 0; i < ncols_G; i++) { for (j = T_diag_i[i]; j < T_diag_i[i+1]; j++) { k = T_diag_j[j]; offset = G_diag_i[k]++; G_diag_j[offset] = i; } } G_diag_i[0] = 0; for (i = 1; i <= nrows_G; i++) { G_diag_i[i] = G_diag_i[i-1] + counts[i-1]; } hypre_TFree(counts, HYPRE_MEMORY_HOST); } /* form G transpose in special form (2 nodes per edge max) */ GT_diag_mat = hypre_TAlloc(HYPRE_Int, 2 * ncols_G , HYPRE_MEMORY_HOST); for (i = 0; i < 2 * ncols_G; i++) GT_diag_mat[i] = -1; for (i = 0; i < nrows_G; i++) { for (j = G_diag_i[i]; j < G_diag_i[i+1]; j++) { edge = G_diag_j[j]; if (GT_diag_mat[edge*2] == -1) GT_diag_mat[edge*2] = i; else GT_diag_mat[edge*2+1] = i; } } /* BFS on the local matrix graph to find tree */ nodes_marked = hypre_TAlloc(HYPRE_Int, nrows_G , HYPRE_MEMORY_HOST); edges_marked = hypre_TAlloc(HYPRE_Int, ncols_G , HYPRE_MEMORY_HOST); for (i = 0; i < nrows_G; i++) nodes_marked[i] = 0; for (i = 0; i < ncols_G; i++) edges_marked[i] = 0; queue = hypre_TAlloc(HYPRE_Int, nrows_G , HYPRE_MEMORY_HOST); queue_head = 0; queue_tail = 1; queue[0] = 0; nodes_marked[0] = 1; while ((queue_tail-queue_head) > 0) { node = queue[queue_tail-1]; queue_tail--; for (i = G_diag_i[node]; i < G_diag_i[node+1]; i++) { edge = G_diag_j[i]; if (edges_marked[edge] == 0) { if (GT_diag_mat[2*edge+1] != -1) { node2 = GT_diag_mat[2*edge]; if (node2 == node) node2 = GT_diag_mat[2*edge+1]; if (nodes_marked[node2] == 0) { nodes_marked[node2] = 1; edges_marked[edge] = 1; queue[queue_tail] = node2; queue_tail++; } } } } } hypre_TFree(nodes_marked, HYPRE_MEMORY_HOST); hypre_TFree(queue, HYPRE_MEMORY_HOST); hypre_TFree(GT_diag_mat, HYPRE_MEMORY_HOST); /* fetch the communication information from */ comm = hypre_ParCSRMatrixComm(G_csr); hypre_MPI_Comm_rank(comm, &mypid); hypre_MPI_Comm_size(comm, &nprocs); comm_pkg = hypre_ParCSRMatrixCommPkg(G_csr); if (nprocs == 1 && comm_pkg == NULL) { hypre_MatvecCommPkgCreate((hypre_ParCSRMatrix *) G_csr); comm_pkg = hypre_ParCSRMatrixCommPkg(G_csr); } /* construct processor graph based on node-edge connection */ /* (local edges connected to neighbor processor nodes) */ n_children = 0; nrecvs = nsends = 0; if (nprocs > 1) { nsends = hypre_ParCSRCommPkgNumSends(comm_pkg); send_procs = hypre_ParCSRCommPkgSendProcs(comm_pkg); nrecvs = hypre_ParCSRCommPkgNumRecvs(comm_pkg); recv_procs = hypre_ParCSRCommPkgRecvProcs(comm_pkg); proc_array = NULL; if ((nsends+nrecvs) > 0) { n_proc_array = 0; proc_array = hypre_TAlloc(HYPRE_Int, (nsends+nrecvs) , HYPRE_MEMORY_HOST); for (i = 0; i < nsends; i++) proc_array[i] = send_procs[i]; for (i = 0; i < nrecvs; i++) proc_array[nsends+i] = recv_procs[i]; hypre_qsort0(proc_array, 0, nsends+nrecvs-1); n_proc_array = 1; for (i = 1; i < nrecvs+nsends; i++) if (proc_array[i] != proc_array[n_proc_array]) proc_array[n_proc_array++] = proc_array[i]; } pgraph_i = hypre_TAlloc(HYPRE_Int, (nprocs+1) , HYPRE_MEMORY_HOST); recv_cnts = hypre_TAlloc(HYPRE_Int, nprocs , HYPRE_MEMORY_HOST); hypre_MPI_Allgather(&n_proc_array, 1, HYPRE_MPI_INT, recv_cnts, 1, HYPRE_MPI_INT, comm); pgraph_i[0] = 0; for (i = 1; i <= nprocs; i++) pgraph_i[i] = pgraph_i[i-1] + recv_cnts[i-1]; pgraph_j = hypre_TAlloc(HYPRE_Int, pgraph_i[nprocs] , HYPRE_MEMORY_HOST); hypre_MPI_Allgatherv(proc_array, n_proc_array, HYPRE_MPI_INT, pgraph_j, recv_cnts, pgraph_i, HYPRE_MPI_INT, comm); hypre_TFree(recv_cnts, HYPRE_MEMORY_HOST); /* BFS on the processor graph to determine parent and children */ nodes_marked = hypre_TAlloc(HYPRE_Int, nprocs , HYPRE_MEMORY_HOST); for (i = 0; i < nprocs; i++) nodes_marked[i] = -1; queue = hypre_TAlloc(HYPRE_Int, nprocs , HYPRE_MEMORY_HOST); queue_head = 0; queue_tail = 1; node = 0; queue[0] = node; while ((queue_tail-queue_head) > 0) { proc = queue[queue_tail-1]; queue_tail--; for (i = pgraph_i[proc]; i < pgraph_i[proc+1]; i++) { proc2 = pgraph_j[i]; if (nodes_marked[proc2] < 0) { nodes_marked[proc2] = proc; queue[queue_tail] = proc2; queue_tail++; } } } parent = nodes_marked[mypid]; n_children = 0; for (i = 0; i < nprocs; i++) if (nodes_marked[i] == mypid) n_children++; if (n_children == 0) {n_children = 0; children = NULL;} else { children = hypre_TAlloc(HYPRE_Int, n_children , HYPRE_MEMORY_HOST); n_children = 0; for (i = 0; i < nprocs; i++) if (nodes_marked[i] == mypid) children[n_children++] = i; } hypre_TFree(nodes_marked, HYPRE_MEMORY_HOST); hypre_TFree(queue, HYPRE_MEMORY_HOST); hypre_TFree(pgraph_i, HYPRE_MEMORY_HOST); hypre_TFree(pgraph_j, HYPRE_MEMORY_HOST); } /* first, connection with my parent : if the edge in my parent * * is incident to one of my nodes, then my parent will mark it */ found = 0; for (i = 0; i < nrecvs; i++) { proc = hypre_ParCSRCommPkgRecvProc(comm_pkg, i); if (proc == parent) { found = 1; break; } } /* but if all the edges connected to my parent are on my side, * * then I will just pick one of them as tree edge */ if (found == 0) { for (i = 0; i < nsends; i++) { proc = hypre_ParCSRCommPkgSendProc(comm_pkg, i); if (proc == parent) { k = hypre_ParCSRCommPkgSendMapStart(comm_pkg,i); edge = hypre_ParCSRCommPkgSendMapElmt(comm_pkg,k); edges_marked[edge] = 1; break; } } } /* next, if my processor has an edge incident on one node in my * * child, put this edge on the tree. But if there is no such * * edge, then I will assume my child will pick up an edge */ for (j = 0; j < n_children; j++) { proc = children[j]; for (i = 0; i < nsends; i++) { proc2 = hypre_ParCSRCommPkgSendProc(comm_pkg, i); if (proc == proc2) { k = hypre_ParCSRCommPkgSendMapStart(comm_pkg,i); edge = hypre_ParCSRCommPkgSendMapElmt(comm_pkg,k); edges_marked[edge] = 1; break; } } } if (n_children > 0) { hypre_TFree(children, HYPRE_MEMORY_HOST); } /* count the size of the tree */ tree_size = 0; for (i = 0; i < ncols_G; i++) if (edges_marked[i] == 1) tree_size++; t_indices = hypre_TAlloc(HYPRE_Int, (tree_size+1) , HYPRE_MEMORY_HOST); t_indices[0] = tree_size; tree_size = 1; for (i = 0; i < ncols_G; i++) if (edges_marked[i] == 1) t_indices[tree_size++] = i; (*indices) = t_indices; hypre_TFree(edges_marked, HYPRE_MEMORY_HOST); if (G_type != 0) { hypre_TFree(G_diag_i, HYPRE_MEMORY_HOST); hypre_TFree(G_diag_j, HYPRE_MEMORY_HOST); } } /* ----------------------------------------------------------------------------- * extract submatrices based on given indices * ----------------------------------------------------------------------------- */ void hypre_ParCSRMatrixExtractSubmatrices( hypre_ParCSRMatrix *A_csr, HYPRE_Int *indices2, hypre_ParCSRMatrix ***submatrices ) { HYPRE_Int nrows_A, nindices, *indices, *A_diag_i, *A_diag_j, mypid, nprocs; HYPRE_Int i, j, k, *proc_offsets1, *proc_offsets2, *exp_indices; HYPRE_BigInt *itmp_array; HYPRE_Int nnz11, nnz12, nnz21, nnz22, col, ncols_offd, nnz_offd, nnz_diag; HYPRE_Int nrows, nnz; HYPRE_BigInt global_nrows, global_ncols, *row_starts, *col_starts; HYPRE_Int *diag_i, *diag_j, row, *offd_i; HYPRE_Complex *A_diag_a, *diag_a; hypre_ParCSRMatrix *A11_csr, *A12_csr, *A21_csr, *A22_csr; hypre_CSRMatrix *A_diag, *diag, *offd; MPI_Comm comm; /* ----------------------------------------------------- * first make sure the incoming indices are in order * ----------------------------------------------------- */ nindices = indices2[0]; indices = &(indices2[1]); hypre_qsort0(indices, 0, nindices-1); /* ----------------------------------------------------- * fetch matrix information * ----------------------------------------------------- */ nrows_A = (HYPRE_Int) hypre_ParCSRMatrixGlobalNumRows(A_csr); A_diag = hypre_ParCSRMatrixDiag(A_csr); A_diag_i = hypre_CSRMatrixI(A_diag); A_diag_j = hypre_CSRMatrixJ(A_diag); A_diag_a = hypre_CSRMatrixData(A_diag); comm = hypre_ParCSRMatrixComm(A_csr); hypre_MPI_Comm_rank(comm, &mypid); hypre_MPI_Comm_size(comm, &nprocs); if (nprocs > 1) { hypre_error_w_msg(HYPRE_ERROR_GENERIC,"ExtractSubmatrices: cannot handle nprocs > 1 yet.\n"); exit(1); } /* ----------------------------------------------------- * compute new matrix dimensions * ----------------------------------------------------- */ proc_offsets1 = hypre_TAlloc(HYPRE_Int, (nprocs+1) , HYPRE_MEMORY_HOST); proc_offsets2 = hypre_TAlloc(HYPRE_Int, (nprocs+1) , HYPRE_MEMORY_HOST); hypre_MPI_Allgather(&nindices, 1, HYPRE_MPI_INT, proc_offsets1, 1, HYPRE_MPI_INT, comm); k = 0; for (i = 0; i < nprocs; i++) { j = proc_offsets1[i]; proc_offsets1[i] = k; k += j; } proc_offsets1[nprocs] = k; itmp_array = hypre_ParCSRMatrixRowStarts(A_csr); for (i = 0; i <= nprocs; i++) proc_offsets2[i] = itmp_array[i] - proc_offsets1[i]; /* ----------------------------------------------------- * assign id's to row and col for later processing * ----------------------------------------------------- */ exp_indices = hypre_TAlloc(HYPRE_Int, nrows_A , HYPRE_MEMORY_HOST); for (i = 0; i < nrows_A; i++) exp_indices[i] = -1; for (i = 0; i < nindices; i++) { if (exp_indices[indices[i]] == -1) exp_indices[indices[i]] = i; else { hypre_error_w_msg(HYPRE_ERROR_GENERIC,"ExtractSubmatrices: wrong index %d %d\n"); exit(1); } } k = 0; for (i = 0; i < nrows_A; i++) { if (exp_indices[i] < 0) { exp_indices[i] = - k - 1; k++; } } /* ----------------------------------------------------- * compute number of nonzeros for each block * ----------------------------------------------------- */ nnz11 = nnz12 = nnz21 = nnz22 = 0; for (i = 0; i < nrows_A; i++) { if (exp_indices[i] >= 0) { for (j = A_diag_i[i]; j < A_diag_i[i+1]; j++) { col = A_diag_j[j]; if (exp_indices[col] >= 0) nnz11++; else nnz12++; } } else { for (j = A_diag_i[i]; j < A_diag_i[i+1]; j++) { col = A_diag_j[j]; if (exp_indices[col] >= 0) nnz21++; else nnz22++; } } } /* ----------------------------------------------------- * create A11 matrix (assume sequential for the moment) * ----------------------------------------------------- */ ncols_offd = 0; nnz_offd = 0; nnz_diag = nnz11; /* This case is not yet implemented! */ global_nrows = 0; global_ncols = 0; row_starts = NULL; col_starts = NULL; A11_csr = hypre_ParCSRMatrixCreate(comm, global_nrows, global_ncols, row_starts, col_starts, ncols_offd, nnz_diag, nnz_offd); nrows = nindices; diag_i = hypre_CTAlloc(HYPRE_Int, nrows+1, HYPRE_MEMORY_HOST); diag_j = hypre_CTAlloc(HYPRE_Int, nnz_diag, HYPRE_MEMORY_HOST); diag_a = hypre_CTAlloc(HYPRE_Complex, nnz_diag, HYPRE_MEMORY_HOST); nnz = 0; row = 0; diag_i[0] = 0; for (i = 0; i < nrows_A; i++) { if (exp_indices[i] >= 0) { for (j = A_diag_i[i]; j < A_diag_i[i+1]; j++) { col = A_diag_j[j]; if (exp_indices[col] >= 0) { diag_j[nnz] = exp_indices[col]; diag_a[nnz++] = A_diag_a[j]; } } row++; diag_i[row] = nnz; } } diag = hypre_ParCSRMatrixDiag(A11_csr); hypre_CSRMatrixI(diag) = diag_i; hypre_CSRMatrixJ(diag) = diag_j; hypre_CSRMatrixData(diag) = diag_a; offd_i = hypre_CTAlloc(HYPRE_Int, nrows+1, HYPRE_MEMORY_HOST); for (i = 0; i <= nrows; i++) offd_i[i] = 0; offd = hypre_ParCSRMatrixOffd(A11_csr); hypre_CSRMatrixI(offd) = offd_i; hypre_CSRMatrixJ(offd) = NULL; hypre_CSRMatrixData(offd) = NULL; /* ----------------------------------------------------- * create A12 matrix (assume sequential for the moment) * ----------------------------------------------------- */ ncols_offd = 0; nnz_offd = 0; nnz_diag = nnz12; global_nrows = (HYPRE_BigInt)proc_offsets1[nprocs]; global_ncols = (HYPRE_BigInt)proc_offsets2[nprocs]; row_starts = hypre_CTAlloc(HYPRE_BigInt, nprocs+1, HYPRE_MEMORY_HOST); col_starts = hypre_CTAlloc(HYPRE_BigInt, nprocs+1, HYPRE_MEMORY_HOST); for (i = 0; i <= nprocs; i++) { row_starts[i] = (HYPRE_BigInt)proc_offsets1[i]; col_starts[i] = (HYPRE_BigInt)proc_offsets2[i]; } A12_csr = hypre_ParCSRMatrixCreate(comm, global_nrows, global_ncols, row_starts, col_starts, ncols_offd, nnz_diag, nnz_offd); nrows = nindices; diag_i = hypre_CTAlloc(HYPRE_Int, nrows+1, HYPRE_MEMORY_HOST); diag_j = hypre_CTAlloc(HYPRE_Int, nnz_diag, HYPRE_MEMORY_HOST); diag_a = hypre_CTAlloc(HYPRE_Complex, nnz_diag, HYPRE_MEMORY_HOST); nnz = 0; row = 0; diag_i[0] = 0; for (i = 0; i < nrows_A; i++) { if (exp_indices[i] >= 0) { for (j = A_diag_i[i]; j < A_diag_i[i+1]; j++) { col = A_diag_j[j]; if (exp_indices[col] < 0) { diag_j[nnz] = - exp_indices[col] - 1; diag_a[nnz++] = A_diag_a[j]; } } row++; diag_i[row] = nnz; } } if (nnz > nnz_diag) { hypre_assert(0); hypre_error(HYPRE_ERROR_GENERIC); } diag = hypre_ParCSRMatrixDiag(A12_csr); hypre_CSRMatrixI(diag) = diag_i; hypre_CSRMatrixJ(diag) = diag_j; hypre_CSRMatrixData(diag) = diag_a; offd_i = hypre_CTAlloc(HYPRE_Int, nrows+1, HYPRE_MEMORY_HOST); for (i = 0; i <= nrows; i++) offd_i[i] = 0; offd = hypre_ParCSRMatrixOffd(A12_csr); hypre_CSRMatrixI(offd) = offd_i; hypre_CSRMatrixJ(offd) = NULL; hypre_CSRMatrixData(offd) = NULL; /* ----------------------------------------------------- * create A21 matrix (assume sequential for the moment) * ----------------------------------------------------- */ ncols_offd = 0; nnz_offd = 0; nnz_diag = nnz21; global_nrows = (HYPRE_BigInt)proc_offsets2[nprocs]; global_ncols = (HYPRE_BigInt)proc_offsets1[nprocs]; row_starts = hypre_CTAlloc(HYPRE_BigInt, nprocs+1, HYPRE_MEMORY_HOST); col_starts = hypre_CTAlloc(HYPRE_BigInt, nprocs+1, HYPRE_MEMORY_HOST); for (i = 0; i <= nprocs; i++) { row_starts[i] = (HYPRE_BigInt)proc_offsets2[i]; col_starts[i] = (HYPRE_BigInt)proc_offsets1[i]; } A21_csr = hypre_ParCSRMatrixCreate(comm, global_nrows, global_ncols, row_starts, col_starts, ncols_offd, nnz_diag, nnz_offd); nrows = nrows_A - nindices; diag_i = hypre_CTAlloc(HYPRE_Int, nrows+1, HYPRE_MEMORY_HOST); diag_j = hypre_CTAlloc(HYPRE_Int, nnz_diag, HYPRE_MEMORY_HOST); diag_a = hypre_CTAlloc(HYPRE_Complex, nnz_diag, HYPRE_MEMORY_HOST); nnz = 0; row = 0; diag_i[0] = 0; for (i = 0; i < nrows_A; i++) { if (exp_indices[i] < 0) { for (j = A_diag_i[i]; j < A_diag_i[i+1]; j++) { col = A_diag_j[j]; if (exp_indices[col] >= 0) { diag_j[nnz] = exp_indices[col]; diag_a[nnz++] = A_diag_a[j]; } } row++; diag_i[row] = nnz; } } diag = hypre_ParCSRMatrixDiag(A21_csr); hypre_CSRMatrixI(diag) = diag_i; hypre_CSRMatrixJ(diag) = diag_j; hypre_CSRMatrixData(diag) = diag_a; offd_i = hypre_CTAlloc(HYPRE_Int, nrows+1, HYPRE_MEMORY_HOST); for (i = 0; i <= nrows; i++) offd_i[i] = 0; offd = hypre_ParCSRMatrixOffd(A21_csr); hypre_CSRMatrixI(offd) = offd_i; hypre_CSRMatrixJ(offd) = NULL; hypre_CSRMatrixData(offd) = NULL; /* ----------------------------------------------------- * create A22 matrix (assume sequential for the moment) * ----------------------------------------------------- */ ncols_offd = 0; nnz_offd = 0; nnz_diag = nnz22; global_nrows = (HYPRE_BigInt)proc_offsets2[nprocs]; global_ncols = (HYPRE_BigInt)proc_offsets2[nprocs]; row_starts = hypre_CTAlloc(HYPRE_BigInt, nprocs+1, HYPRE_MEMORY_HOST); col_starts = hypre_CTAlloc(HYPRE_BigInt, nprocs+1, HYPRE_MEMORY_HOST); for (i = 0; i <= nprocs; i++) { row_starts[i] = (HYPRE_BigInt)proc_offsets2[i]; col_starts[i] = (HYPRE_BigInt)proc_offsets2[i]; } A22_csr = hypre_ParCSRMatrixCreate(comm, global_nrows, global_ncols, row_starts, col_starts, ncols_offd, nnz_diag, nnz_offd); nrows = nrows_A - nindices; diag_i = hypre_CTAlloc(HYPRE_Int, nrows+1, HYPRE_MEMORY_HOST); diag_j = hypre_CTAlloc(HYPRE_Int, nnz_diag, HYPRE_MEMORY_HOST); diag_a = hypre_CTAlloc(HYPRE_Complex, nnz_diag, HYPRE_MEMORY_HOST); nnz = 0; row = 0; diag_i[0] = 0; for (i = 0; i < nrows_A; i++) { if (exp_indices[i] < 0) { for (j = A_diag_i[i]; j < A_diag_i[i+1]; j++) { col = A_diag_j[j]; if (exp_indices[col] < 0) { diag_j[nnz] = - exp_indices[col] - 1; diag_a[nnz++] = A_diag_a[j]; } } row++; diag_i[row] = nnz; } } diag = hypre_ParCSRMatrixDiag(A22_csr); hypre_CSRMatrixI(diag) = diag_i; hypre_CSRMatrixJ(diag) = diag_j; hypre_CSRMatrixData(diag) = diag_a; offd_i = hypre_CTAlloc(HYPRE_Int, nrows+1, HYPRE_MEMORY_HOST); for (i = 0; i <= nrows; i++) offd_i[i] = 0; offd = hypre_ParCSRMatrixOffd(A22_csr); hypre_CSRMatrixI(offd) = offd_i; hypre_CSRMatrixJ(offd) = NULL; hypre_CSRMatrixData(offd) = NULL; /* ----------------------------------------------------- * hand the matrices back to the caller and clean up * ----------------------------------------------------- */ (*submatrices)[0] = A11_csr; (*submatrices)[1] = A12_csr; (*submatrices)[2] = A21_csr; (*submatrices)[3] = A22_csr; hypre_TFree(proc_offsets1, HYPRE_MEMORY_HOST); hypre_TFree(proc_offsets2, HYPRE_MEMORY_HOST); hypre_TFree(exp_indices, HYPRE_MEMORY_HOST); } /* ----------------------------------------------------------------------------- * extract submatrices of a rectangular matrix * ----------------------------------------------------------------------------- */ void hypre_ParCSRMatrixExtractRowSubmatrices( hypre_ParCSRMatrix *A_csr, HYPRE_Int *indices2, hypre_ParCSRMatrix ***submatrices ) { HYPRE_Int nrows_A, nindices, *indices, *A_diag_i, *A_diag_j, mypid, nprocs; HYPRE_Int i, j, k, *proc_offsets1, *proc_offsets2, *exp_indices; HYPRE_Int nnz11, nnz21, col, ncols_offd, nnz_offd, nnz_diag; HYPRE_Int *A_offd_i, *A_offd_j; HYPRE_Int nrows, nnz; HYPRE_BigInt global_nrows, global_ncols, *row_starts, *col_starts, *itmp_array; HYPRE_Int *diag_i, *diag_j, row, *offd_i, *offd_j, nnz11_offd, nnz21_offd; HYPRE_Complex *A_diag_a, *diag_a, *offd_a; hypre_ParCSRMatrix *A11_csr, *A21_csr; hypre_CSRMatrix *A_diag, *diag, *A_offd, *offd; MPI_Comm comm; /* ----------------------------------------------------- * first make sure the incoming indices are in order * ----------------------------------------------------- */ nindices = indices2[0]; indices = &(indices2[1]); hypre_qsort0(indices, 0, nindices-1); /* ----------------------------------------------------- * fetch matrix information * ----------------------------------------------------- */ nrows_A = (HYPRE_Int)hypre_ParCSRMatrixGlobalNumRows(A_csr); A_diag = hypre_ParCSRMatrixDiag(A_csr); A_diag_i = hypre_CSRMatrixI(A_diag); A_diag_j = hypre_CSRMatrixJ(A_diag); A_diag_a = hypre_CSRMatrixData(A_diag); A_offd = hypre_ParCSRMatrixOffd(A_csr); A_offd_i = hypre_CSRMatrixI(A_offd); A_offd_j = hypre_CSRMatrixJ(A_offd); comm = hypre_ParCSRMatrixComm(A_csr); hypre_MPI_Comm_rank(comm, &mypid); hypre_MPI_Comm_size(comm, &nprocs); /* ----------------------------------------------------- * compute new matrix dimensions * ----------------------------------------------------- */ proc_offsets1 = hypre_TAlloc(HYPRE_Int, (nprocs+1) , HYPRE_MEMORY_HOST); proc_offsets2 = hypre_TAlloc(HYPRE_Int, (nprocs+1) , HYPRE_MEMORY_HOST); hypre_MPI_Allgather(&nindices, 1, HYPRE_MPI_INT, proc_offsets1, 1, HYPRE_MPI_INT, comm); k = 0; for (i = 0; i < nprocs; i++) { j = proc_offsets1[i]; proc_offsets1[i] = k; k += j; } proc_offsets1[nprocs] = k; itmp_array = hypre_ParCSRMatrixRowStarts(A_csr); for (i = 0; i <= nprocs; i++) proc_offsets2[i] = (HYPRE_Int)(itmp_array[i] - proc_offsets1[i]); /* ----------------------------------------------------- * assign id's to row and col for later processing * ----------------------------------------------------- */ exp_indices = hypre_TAlloc(HYPRE_Int, nrows_A , HYPRE_MEMORY_HOST); for (i = 0; i < nrows_A; i++) exp_indices[i] = -1; for (i = 0; i < nindices; i++) { if (exp_indices[indices[i]] == -1) exp_indices[indices[i]] = i; else { hypre_error_w_msg(HYPRE_ERROR_GENERIC,"ExtractRowSubmatrices: wrong index %d %d\n"); exit(1); } } k = 0; for (i = 0; i < nrows_A; i++) { if (exp_indices[i] < 0) { exp_indices[i] = - k - 1; k++; } } /* ----------------------------------------------------- * compute number of nonzeros for each block * ----------------------------------------------------- */ nnz11 = nnz21 = nnz11_offd = nnz21_offd = 0; for (i = 0; i < nrows_A; i++) { if (exp_indices[i] >= 0) { for (j = A_diag_i[i]; j < A_diag_i[i+1]; j++) { col = A_diag_j[j]; if (exp_indices[col] >= 0) nnz11++; } nnz11_offd += A_offd_i[i+1] - A_offd_i[i]; } else { for (j = A_diag_i[i]; j < A_diag_i[i+1]; j++) { col = A_diag_j[j]; if (exp_indices[col] < 0) nnz21++; } nnz21_offd += A_offd_i[i+1] - A_offd_i[i]; } } /* ----------------------------------------------------- * create A11 matrix (assume sequential for the moment) * ----------------------------------------------------- */ ncols_offd = hypre_CSRMatrixNumCols(hypre_ParCSRMatrixDiag(A_csr)); nnz_diag = nnz11; nnz_offd = nnz11_offd; global_nrows = (HYPRE_BigInt)proc_offsets1[nprocs]; itmp_array = hypre_ParCSRMatrixColStarts(A_csr); global_ncols = itmp_array[nprocs]; row_starts = hypre_CTAlloc(HYPRE_BigInt, nprocs+1, HYPRE_MEMORY_HOST); col_starts = hypre_CTAlloc(HYPRE_BigInt, nprocs+1, HYPRE_MEMORY_HOST); for (i = 0; i <= nprocs; i++) { row_starts[i] = (HYPRE_BigInt)proc_offsets1[i]; col_starts[i] = itmp_array[i]; } A11_csr = hypre_ParCSRMatrixCreate(comm, global_nrows, global_ncols, row_starts, col_starts, ncols_offd, nnz_diag, nnz_offd); nrows = nindices; diag_i = hypre_CTAlloc(HYPRE_Int, nrows+1, HYPRE_MEMORY_HOST); diag_j = hypre_CTAlloc(HYPRE_Int, nnz_diag, HYPRE_MEMORY_HOST); diag_a = hypre_CTAlloc(HYPRE_Complex, nnz_diag, HYPRE_MEMORY_HOST); nnz = 0; row = 0; diag_i[0] = 0; for (i = 0; i < nrows_A; i++) { if (exp_indices[i] >= 0) { for (j = A_diag_i[i]; j < A_diag_i[i+1]; j++) { col = A_diag_j[j]; if (exp_indices[col] >= 0) { diag_j[nnz] = exp_indices[col]; diag_a[nnz++] = A_diag_a[j]; } } row++; diag_i[row] = nnz; } } diag = hypre_ParCSRMatrixDiag(A11_csr); hypre_CSRMatrixI(diag) = diag_i; hypre_CSRMatrixJ(diag) = diag_j; hypre_CSRMatrixData(diag) = diag_a; offd_i = hypre_CTAlloc(HYPRE_Int, nrows+1, HYPRE_MEMORY_HOST); offd_j = hypre_CTAlloc(HYPRE_Int, nnz_offd, HYPRE_MEMORY_HOST); offd_a = hypre_CTAlloc(HYPRE_Complex, nnz_offd, HYPRE_MEMORY_HOST); nnz = 0; row = 0; offd_i[0] = 0; for (i = 0; i < nrows_A; i++) { if (exp_indices[i] >= 0) { for (j = A_offd_i[i]; j < A_offd_i[i+1]; j++) { offd_j[nnz] = A_offd_j[j]; offd_a[nnz++] = A_diag_a[j]; } row++; offd_i[row] = nnz; } } offd = hypre_ParCSRMatrixOffd(A11_csr); hypre_CSRMatrixI(offd) = offd_i; hypre_CSRMatrixJ(offd) = offd_j; hypre_CSRMatrixData(offd) = offd_a; /* ----------------------------------------------------- * create A21 matrix * ----------------------------------------------------- */ ncols_offd = hypre_CSRMatrixNumCols(hypre_ParCSRMatrixDiag(A_csr)); nnz_offd = nnz21_offd; nnz_diag = nnz21; global_nrows = (HYPRE_BigInt)proc_offsets2[nprocs]; itmp_array = hypre_ParCSRMatrixColStarts(A_csr); global_ncols = itmp_array[nprocs]; row_starts = hypre_CTAlloc(HYPRE_BigInt, nprocs+1, HYPRE_MEMORY_HOST); col_starts = hypre_CTAlloc(HYPRE_BigInt, nprocs+1, HYPRE_MEMORY_HOST); for (i = 0; i <= nprocs; i++) { row_starts[i] = (HYPRE_BigInt)proc_offsets2[i]; col_starts[i] = itmp_array[i]; } A21_csr = hypre_ParCSRMatrixCreate(comm, global_nrows, global_ncols, row_starts, col_starts, ncols_offd, nnz_diag, nnz_offd); nrows = nrows_A - nindices; diag_i = hypre_CTAlloc(HYPRE_Int, nrows+1, HYPRE_MEMORY_HOST); diag_j = hypre_CTAlloc(HYPRE_Int, nnz_diag, HYPRE_MEMORY_HOST); diag_a = hypre_CTAlloc(HYPRE_Complex, nnz_diag, HYPRE_MEMORY_HOST); nnz = 0; row = 0; diag_i[0] = 0; for (i = 0; i < nrows_A; i++) { if (exp_indices[i] < 0) { for (j = A_diag_i[i]; j < A_diag_i[i+1]; j++) { diag_j[nnz] = A_diag_j[j]; diag_a[nnz++] = A_diag_a[j]; } row++; diag_i[row] = nnz; } } diag = hypre_ParCSRMatrixDiag(A21_csr); hypre_CSRMatrixI(diag) = diag_i; hypre_CSRMatrixJ(diag) = diag_j; hypre_CSRMatrixData(diag) = diag_a; offd_i = hypre_CTAlloc(HYPRE_Int, nrows+1, HYPRE_MEMORY_HOST); offd_j = hypre_CTAlloc(HYPRE_Int, nnz_offd, HYPRE_MEMORY_HOST); offd_a = hypre_CTAlloc(HYPRE_Complex, nnz_offd, HYPRE_MEMORY_HOST); nnz = 0; row = 0; offd_i[0] = 0; for (i = 0; i < nrows_A; i++) { if (exp_indices[i] < 0) { for (j = A_offd_i[i]; j < A_offd_i[i+1]; j++) { offd_j[nnz] = A_offd_j[j]; offd_a[nnz++] = A_diag_a[j]; } row++; offd_i[row] = nnz; } } offd = hypre_ParCSRMatrixOffd(A21_csr); hypre_CSRMatrixI(offd) = offd_i; hypre_CSRMatrixJ(offd) = offd_j; hypre_CSRMatrixData(offd) = offd_a; /* ----------------------------------------------------- * hand the matrices back to the caller and clean up * ----------------------------------------------------- */ (*submatrices)[0] = A11_csr; (*submatrices)[1] = A21_csr; hypre_TFree(proc_offsets1, HYPRE_MEMORY_HOST); hypre_TFree(proc_offsets2, HYPRE_MEMORY_HOST); hypre_TFree(exp_indices, HYPRE_MEMORY_HOST); } /* ----------------------------------------------------------------------------- * return the sum of all local elements of the matrix * ----------------------------------------------------------------------------- */ HYPRE_Complex hypre_ParCSRMatrixLocalSumElts( hypre_ParCSRMatrix * A ) { hypre_CSRMatrix * A_diag = hypre_ParCSRMatrixDiag( A ); hypre_CSRMatrix * A_offd = hypre_ParCSRMatrixOffd( A ); return hypre_CSRMatrixSumElts(A_diag) + hypre_CSRMatrixSumElts(A_offd); } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixMatAminvDB * computes C = (A - inv(D)B) where D is a diagonal matrix * Note: Data structure of A is expected to be a subset of data structure of B! *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixAminvDB( hypre_ParCSRMatrix *A, hypre_ParCSRMatrix *B, HYPRE_Complex *d, hypre_ParCSRMatrix **C_ptr) { MPI_Comm comm = hypre_ParCSRMatrixComm(B); hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); hypre_ParCSRMatrix *C = NULL; HYPRE_Int num_cols_offd_A = hypre_CSRMatrixNumCols(A_offd); hypre_ParCSRCommPkg *comm_pkg_B = hypre_ParCSRMatrixCommPkg(B); hypre_CSRMatrix *B_diag = hypre_ParCSRMatrixDiag(B); hypre_CSRMatrix *B_offd = hypre_ParCSRMatrixOffd(B); HYPRE_Int num_cols_offd_B = hypre_CSRMatrixNumCols(B_offd); HYPRE_Int num_sends_B, num_recvs_B; HYPRE_Int i, j, cnt; HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); HYPRE_Complex *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); HYPRE_Complex *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_BigInt *col_map_offd_A = hypre_ParCSRMatrixColMapOffd(A); HYPRE_Int num_rows = hypre_CSRMatrixNumRows(B_diag); HYPRE_Int *B_diag_i = hypre_CSRMatrixI(B_diag); HYPRE_Int *B_diag_j = hypre_CSRMatrixJ(B_diag); HYPRE_Complex *B_diag_data = hypre_CSRMatrixData(B_diag); HYPRE_Int *B_offd_i = hypre_CSRMatrixI(B_offd); HYPRE_Int *B_offd_j = hypre_CSRMatrixJ(B_offd); HYPRE_Complex *B_offd_data = hypre_CSRMatrixData(B_offd); HYPRE_BigInt *col_map_offd_B = hypre_ParCSRMatrixColMapOffd(B); hypre_CSRMatrix *C_diag = NULL; hypre_CSRMatrix *C_offd = NULL; HYPRE_Int *C_diag_i = NULL; HYPRE_Int *C_diag_j = NULL; HYPRE_Complex *C_diag_data = NULL; HYPRE_Int *C_offd_i = NULL; HYPRE_Int *C_offd_j = NULL; HYPRE_Complex *C_offd_data = NULL; HYPRE_Int num_procs, my_id; HYPRE_Int *recv_procs_B; HYPRE_Int *send_procs_B; HYPRE_Int *recv_vec_starts_B; HYPRE_Int *send_map_starts_B; HYPRE_Int *send_map_elmts_B; hypre_ParCSRCommPkg *comm_pkg_C; HYPRE_Int *recv_procs_C; HYPRE_Int *send_procs_C; HYPRE_Int *recv_vec_starts_C; HYPRE_Int *send_map_starts_C; HYPRE_Int *send_map_elmts_C; HYPRE_Int *map_to_B; /*HYPRE_Int *C_diag_array; HYPRE_Int *C_offd_array;*/ HYPRE_Complex *D_tmp; HYPRE_Int size, rest, num_threads, ii; hypre_MPI_Comm_size(comm,&num_procs); hypre_MPI_Comm_rank(comm,&my_id); num_threads = hypre_NumThreads(); /*C_diag_array = hypre_CTAlloc(HYPRE_Int, num_threads); C_offd_array = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST);*/ /*--------------------------------------------------------------------- * If there exists no CommPkg for B, a CommPkg is generated *--------------------------------------------------------------------*/ if (!comm_pkg_B) { hypre_MatvecCommPkgCreate(B); comm_pkg_B = hypre_ParCSRMatrixCommPkg(B); } C = hypre_ParCSRMatrixClone(B, 0); /*hypre_ParCSRMatrixInitialize(C);*/ C_diag = hypre_ParCSRMatrixDiag(C); C_diag_i = hypre_CSRMatrixI(C_diag); C_diag_j = hypre_CSRMatrixJ(C_diag); C_diag_data = hypre_CSRMatrixData(C_diag); C_offd = hypre_ParCSRMatrixOffd(C); C_offd_i = hypre_CSRMatrixI(C_offd); C_offd_j = hypre_CSRMatrixJ(C_offd); C_offd_data = hypre_CSRMatrixData(C_offd); size = num_rows/num_threads; rest = num_rows - size*num_threads; D_tmp = hypre_CTAlloc(HYPRE_Complex, num_rows, HYPRE_MEMORY_HOST); if (num_cols_offd_A) { map_to_B = hypre_CTAlloc(HYPRE_Int, num_cols_offd_A, HYPRE_MEMORY_HOST); cnt = 0; for (i=0; i < num_cols_offd_A; i++) { while (col_map_offd_B[cnt] < col_map_offd_A[i]) { cnt++; } map_to_B[i] = cnt; cnt++; } } #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(ii, i, j) #endif for (ii=0; ii < num_threads; ii++) { HYPRE_Int *A_marker = NULL; HYPRE_Int ns, ne, A_col, num_cols, nmax; if (ii < rest) { ns = ii*size+ii; ne = (ii+1)*size+ii+1; } else { ns = ii*size+rest; ne = (ii+1)*size+rest; } nmax = hypre_max(num_rows, num_cols_offd_B); A_marker = hypre_CTAlloc(HYPRE_Int, nmax, HYPRE_MEMORY_HOST); for (i=0; i < num_rows; i++) A_marker[i] = -1; for (i=ns; i < ne; i++) D_tmp[i] = 1.0/d[i]; num_cols = C_diag_i[ns]; for (i=ns; i < ne; i++) { for (j = A_diag_i[i]; j < A_diag_i[i+1]; j++) { A_col = A_diag_j[j]; if (A_marker[A_col] < C_diag_i[i]) { A_marker[A_col] = num_cols; C_diag_j[num_cols] = A_col; C_diag_data[num_cols] = A_diag_data[j]; num_cols++; } else { C_diag_data[A_marker[A_col]] += A_diag_data[j]; } } for (j = B_diag_i[i]; j < B_diag_i[i+1]; j++) { A_col = B_diag_j[j]; if (A_marker[A_col] < C_diag_i[i]) { A_marker[A_col] = num_cols; C_diag_j[num_cols] = A_col; C_diag_data[num_cols] = -D_tmp[i]*B_diag_data[j]; num_cols++; } else { C_diag_data[A_marker[A_col]] -= D_tmp[i]*B_diag_data[j]; } } } for (i=0; i < num_cols_offd_B; i++) A_marker[i] = -1; num_cols = C_offd_i[ns]; for (i=ns; i < ne; i++) { for (j = A_offd_i[i]; j < A_offd_i[i+1]; j++) { A_col = map_to_B[A_offd_j[j]]; if (A_marker[A_col] < B_offd_i[i]) { A_marker[A_col] = num_cols; C_offd_j[num_cols] = A_col; C_offd_data[num_cols] = A_offd_data[j]; num_cols++; } else { C_offd_data[A_marker[A_col]] += A_offd_data[j]; } } for (j = B_offd_i[i]; j < B_offd_i[i+1]; j++) { A_col = B_offd_j[j]; if (A_marker[A_col] < B_offd_i[i]) { A_marker[A_col] = num_cols; C_offd_j[num_cols] = A_col; C_offd_data[num_cols] = -D_tmp[i]*B_offd_data[j]; num_cols++; } else { C_offd_data[A_marker[A_col]] -= D_tmp[i]*B_offd_data[j]; } } } hypre_TFree(A_marker, HYPRE_MEMORY_HOST); } /* end parallel region */ /*for (i=0; i < num_cols_offd_B; i++) col_map_offd_C[i] = col_map_offd_B[i]; */ num_sends_B = hypre_ParCSRCommPkgNumSends(comm_pkg_B); num_recvs_B = hypre_ParCSRCommPkgNumRecvs(comm_pkg_B); recv_procs_B = hypre_ParCSRCommPkgRecvProcs(comm_pkg_B); recv_vec_starts_B = hypre_ParCSRCommPkgRecvVecStarts(comm_pkg_B); send_procs_B = hypre_ParCSRCommPkgSendProcs(comm_pkg_B); send_map_starts_B = hypre_ParCSRCommPkgSendMapStarts(comm_pkg_B); send_map_elmts_B = hypre_ParCSRCommPkgSendMapElmts(comm_pkg_B); recv_procs_C = hypre_CTAlloc(HYPRE_Int, num_recvs_B, HYPRE_MEMORY_HOST); recv_vec_starts_C = hypre_CTAlloc(HYPRE_Int, num_recvs_B+1, HYPRE_MEMORY_HOST); send_procs_C = hypre_CTAlloc(HYPRE_Int, num_sends_B, HYPRE_MEMORY_HOST); send_map_starts_C = hypre_CTAlloc(HYPRE_Int, num_sends_B+1, HYPRE_MEMORY_HOST); send_map_elmts_C = hypre_CTAlloc(HYPRE_Int, send_map_starts_B[num_sends_B], HYPRE_MEMORY_HOST); for (i=0; i < num_recvs_B; i++) recv_procs_C[i] = recv_procs_B[i]; for (i=0; i < num_recvs_B+1; i++) recv_vec_starts_C[i] = recv_vec_starts_B[i]; for (i=0; i < num_sends_B; i++) send_procs_C[i] = send_procs_B[i]; for (i=0; i < num_sends_B+1; i++) send_map_starts_C[i] = send_map_starts_B[i]; for (i=0; i < send_map_starts_B[num_sends_B]; i++) send_map_elmts_C[i] = send_map_elmts_B[i]; comm_pkg_C = hypre_CTAlloc(hypre_ParCSRCommPkg, 1, HYPRE_MEMORY_HOST); hypre_ParCSRCommPkgComm(comm_pkg_C) = comm; hypre_ParCSRCommPkgNumRecvs(comm_pkg_C) = num_recvs_B; hypre_ParCSRCommPkgRecvProcs(comm_pkg_C) = recv_procs_C; hypre_ParCSRCommPkgRecvVecStarts(comm_pkg_C) = recv_vec_starts_C; hypre_ParCSRCommPkgNumSends(comm_pkg_C) = num_sends_B; hypre_ParCSRCommPkgSendProcs(comm_pkg_C) = send_procs_C; hypre_ParCSRCommPkgSendMapStarts(comm_pkg_C) = send_map_starts_C; hypre_ParCSRCommPkgSendMapElmts(comm_pkg_C) = send_map_elmts_C; hypre_ParCSRMatrixCommPkg(C) = comm_pkg_C; hypre_TFree(D_tmp, HYPRE_MEMORY_HOST); if (num_cols_offd_A) hypre_TFree(map_to_B, HYPRE_MEMORY_HOST); *C_ptr = C; return (hypre_error_flag); } /*-------------------------------------------------------------------------- * hypre_ParTMatmul : multiplies two ParCSRMatrices transpose(A) and B and returns * the product in ParCSRMatrix C * Note that C does not own the partitionings since its row_starts * is owned by A and col_starts by B. *--------------------------------------------------------------------------*/ hypre_ParCSRMatrix *hypre_ParTMatmul( hypre_ParCSRMatrix *A, hypre_ParCSRMatrix *B) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_ParCSRCommPkg *comm_pkg_A = hypre_ParCSRMatrixCommPkg(A); hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); hypre_CSRMatrix *AT_diag = NULL; hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); hypre_CSRMatrix *AT_offd = NULL; HYPRE_Int num_rows_diag_A = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int num_cols_diag_A = hypre_CSRMatrixNumCols(A_diag); hypre_CSRMatrix *B_diag = hypre_ParCSRMatrixDiag(B); hypre_CSRMatrix *B_offd = hypre_ParCSRMatrixOffd(B); HYPRE_BigInt *col_map_offd_B = hypre_ParCSRMatrixColMapOffd(B); HYPRE_BigInt first_col_diag_B = hypre_ParCSRMatrixFirstColDiag(B); HYPRE_BigInt *col_starts_A = hypre_ParCSRMatrixColStarts(A); HYPRE_BigInt *col_starts_B = hypre_ParCSRMatrixColStarts(B); HYPRE_Int num_rows_diag_B = hypre_CSRMatrixNumRows(B_diag); HYPRE_Int num_cols_diag_B = hypre_CSRMatrixNumCols(B_diag); HYPRE_Int num_cols_offd_B = hypre_CSRMatrixNumCols(B_offd); hypre_ParCSRMatrix *C; HYPRE_BigInt *col_map_offd_C = NULL; HYPRE_Int *map_B_to_C; hypre_CSRMatrix *C_diag = NULL; hypre_CSRMatrix *C_tmp_diag = NULL; HYPRE_Complex *C_diag_data = NULL; HYPRE_Int *C_diag_i = NULL; HYPRE_Int *C_diag_j = NULL; HYPRE_BigInt first_col_diag_C; HYPRE_BigInt last_col_diag_C; hypre_CSRMatrix *C_offd = NULL; hypre_CSRMatrix *C_tmp_offd = NULL; hypre_CSRMatrix *C_int = NULL; hypre_CSRMatrix *C_ext = NULL; HYPRE_Int *C_ext_i; HYPRE_BigInt *C_ext_j; HYPRE_Complex *C_ext_data; HYPRE_Int *C_ext_diag_i; HYPRE_Int *C_ext_diag_j; HYPRE_Complex *C_ext_diag_data; HYPRE_Int *C_ext_offd_i; HYPRE_Int *C_ext_offd_j; HYPRE_Complex *C_ext_offd_data; HYPRE_Int C_ext_size = 0; HYPRE_Int C_ext_diag_size = 0; HYPRE_Int C_ext_offd_size = 0; HYPRE_Int *C_tmp_diag_i; HYPRE_Int *C_tmp_diag_j; HYPRE_Complex *C_tmp_diag_data; HYPRE_Int *C_tmp_offd_i; HYPRE_Int *C_tmp_offd_j; HYPRE_Complex *C_tmp_offd_data; HYPRE_Complex *C_offd_data=NULL; HYPRE_Int *C_offd_i=NULL; HYPRE_Int *C_offd_j=NULL; HYPRE_BigInt *temp; HYPRE_Int *send_map_starts_A; HYPRE_Int *send_map_elmts_A; HYPRE_Int num_sends_A; HYPRE_Int num_cols_offd_C = 0; HYPRE_Int *P_marker; HYPRE_Int i, j; HYPRE_Int i1, j_indx; HYPRE_BigInt n_rows_A, n_cols_A; HYPRE_BigInt n_rows_B, n_cols_B; /*HYPRE_Int allsquare = 0;*/ HYPRE_Int cnt, cnt_offd, cnt_diag; HYPRE_BigInt value; HYPRE_Int num_procs, my_id; HYPRE_Int max_num_threads; HYPRE_Int *C_diag_array = NULL; HYPRE_Int *C_offd_array = NULL; HYPRE_BigInt first_row_index, first_col_diag; HYPRE_Int local_num_rows, local_num_cols; n_rows_A = hypre_ParCSRMatrixGlobalNumRows(A); n_cols_A = hypre_ParCSRMatrixGlobalNumCols(A); n_rows_B = hypre_ParCSRMatrixGlobalNumRows(B); n_cols_B = hypre_ParCSRMatrixGlobalNumCols(B); hypre_MPI_Comm_size(comm,&num_procs); hypre_MPI_Comm_rank(comm, &my_id); max_num_threads = hypre_NumThreads(); if (n_rows_A != n_rows_B || num_rows_diag_A != num_rows_diag_B) { hypre_error_w_msg(HYPRE_ERROR_GENERIC," Error! Incompatible matrix dimensions!\n"); return NULL; } HYPRE_MemoryLocation memory_location_A = hypre_ParCSRMatrixMemoryLocation(A); HYPRE_MemoryLocation memory_location_B = hypre_ParCSRMatrixMemoryLocation(B); /* RL: TODO cannot guarantee, maybe should never assert hypre_assert(memory_location_A == memory_location_B); */ /* RL: in the case of A=H, B=D, or A=D, B=H, let C = D, * not sure if this is the right thing to do. * Also, need something like this in other places * TODO */ HYPRE_MemoryLocation memory_location_C = hypre_max(memory_location_A, memory_location_B); /*if (num_cols_diag_A == num_cols_diag_B) allsquare = 1;*/ hypre_CSRMatrixTranspose(A_diag, &AT_diag, 1); hypre_CSRMatrixTranspose(A_offd, &AT_offd, 1); C_tmp_diag = hypre_CSRMatrixMultiply(AT_diag, B_diag); C_ext_size = 0; if (num_procs > 1) { hypre_CSRMatrix *C_int_diag; hypre_CSRMatrix *C_int_offd; void *request; C_tmp_offd = hypre_CSRMatrixMultiply(AT_diag, B_offd); C_int_diag = hypre_CSRMatrixMultiply(AT_offd, B_diag); C_int_offd = hypre_CSRMatrixMultiply(AT_offd, B_offd); hypre_ParCSRMatrixDiag(B) = C_int_diag; hypre_ParCSRMatrixOffd(B) = C_int_offd; C_int = hypre_MergeDiagAndOffd(B); hypre_ParCSRMatrixDiag(B) = B_diag; hypre_ParCSRMatrixOffd(B) = B_offd; hypre_ExchangeExternalRowsInit(C_int, comm_pkg_A, &request); C_ext = hypre_ExchangeExternalRowsWait(request); C_ext_i = hypre_CSRMatrixI(C_ext); C_ext_j = hypre_CSRMatrixBigJ(C_ext); C_ext_data = hypre_CSRMatrixData(C_ext); C_ext_size = C_ext_i[hypre_CSRMatrixNumRows(C_ext)]; hypre_CSRMatrixDestroy(C_int); hypre_CSRMatrixDestroy(C_int_diag); hypre_CSRMatrixDestroy(C_int_offd); } else { C_tmp_offd = hypre_CSRMatrixCreate(num_cols_diag_A, 0, 0); hypre_CSRMatrixInitialize(C_tmp_offd); } hypre_CSRMatrixDestroy(AT_diag); hypre_CSRMatrixDestroy(AT_offd); /*----------------------------------------------------------------------- * Add contents of C_ext to C_tmp_diag and C_tmp_offd * to obtain C_diag and C_offd *-----------------------------------------------------------------------*/ /* check for new nonzero columns in C_offd generated through C_ext */ first_col_diag_C = first_col_diag_B; last_col_diag_C = first_col_diag_B + (HYPRE_BigInt)num_cols_diag_B - 1; C_tmp_diag_i = hypre_CSRMatrixI(C_tmp_diag); if (C_ext_size || num_cols_offd_B) { HYPRE_Int C_ext_num_rows; num_sends_A = hypre_ParCSRCommPkgNumSends(comm_pkg_A); send_map_starts_A = hypre_ParCSRCommPkgSendMapStarts(comm_pkg_A); send_map_elmts_A = hypre_ParCSRCommPkgSendMapElmts(comm_pkg_A); C_ext_num_rows = send_map_starts_A[num_sends_A]; C_ext_diag_i = hypre_CTAlloc(HYPRE_Int, C_ext_num_rows+1, HYPRE_MEMORY_HOST); C_ext_offd_i = hypre_CTAlloc(HYPRE_Int, C_ext_num_rows+1, HYPRE_MEMORY_HOST); temp = hypre_CTAlloc(HYPRE_BigInt, C_ext_size+num_cols_offd_B, HYPRE_MEMORY_HOST); C_ext_diag_size = 0; C_ext_offd_size = 0; for (i=0; i < C_ext_num_rows; i++) { for (j=C_ext_i[i]; j < C_ext_i[i+1]; j++) if (C_ext_j[j] < first_col_diag_C || C_ext_j[j] > last_col_diag_C) temp[C_ext_offd_size++] = C_ext_j[j]; else C_ext_diag_size++; C_ext_diag_i[i+1] = C_ext_diag_size; C_ext_offd_i[i+1] = C_ext_offd_size; } cnt = C_ext_offd_size; for (i=0; i < num_cols_offd_B; i++) temp[cnt++] = col_map_offd_B[i]; if (cnt) { hypre_BigQsort0(temp,0,cnt-1); value = temp[0]; num_cols_offd_C = 1; for (i=1; i < cnt; i++) { if (temp[i] > value) { value = temp[i]; temp[num_cols_offd_C++] = value; } } } if (num_cols_offd_C) col_map_offd_C = hypre_CTAlloc(HYPRE_BigInt, num_cols_offd_C, HYPRE_MEMORY_HOST); for (i=0; i < num_cols_offd_C; i++) col_map_offd_C[i] = temp[i]; hypre_TFree(temp, HYPRE_MEMORY_HOST); if (C_ext_diag_size) { C_ext_diag_j = hypre_CTAlloc(HYPRE_Int, C_ext_diag_size, HYPRE_MEMORY_HOST); C_ext_diag_data = hypre_CTAlloc(HYPRE_Complex, C_ext_diag_size, HYPRE_MEMORY_HOST); } if (C_ext_offd_size) { C_ext_offd_j = hypre_CTAlloc(HYPRE_Int, C_ext_offd_size, HYPRE_MEMORY_HOST); C_ext_offd_data = hypre_CTAlloc(HYPRE_Complex, C_ext_offd_size, HYPRE_MEMORY_HOST); } C_tmp_diag_j = hypre_CSRMatrixJ(C_tmp_diag); C_tmp_diag_data = hypre_CSRMatrixData(C_tmp_diag); C_tmp_offd_i = hypre_CSRMatrixI(C_tmp_offd); C_tmp_offd_j = hypre_CSRMatrixJ(C_tmp_offd); C_tmp_offd_data = hypre_CSRMatrixData(C_tmp_offd); cnt_offd = 0; cnt_diag = 0; for (i=0; i < C_ext_num_rows; i++) { for (j=C_ext_i[i]; j < C_ext_i[i+1]; j++) if (C_ext_j[j] < first_col_diag_C || C_ext_j[j] > last_col_diag_C) { C_ext_offd_j[cnt_offd] = hypre_BigBinarySearch(col_map_offd_C, C_ext_j[j], num_cols_offd_C); C_ext_offd_data[cnt_offd++] = C_ext_data[j]; } else { C_ext_diag_j[cnt_diag] = (HYPRE_Int)(C_ext_j[j] - first_col_diag_C); C_ext_diag_data[cnt_diag++] = C_ext_data[j]; } } } if (C_ext) { hypre_CSRMatrixDestroy(C_ext); C_ext = NULL; } if (num_cols_offd_B) { map_B_to_C = hypre_CTAlloc(HYPRE_Int, num_cols_offd_B, HYPRE_MEMORY_HOST); cnt = 0; for (i=0; i < num_cols_offd_C; i++) if (col_map_offd_C[i] == col_map_offd_B[cnt]) { map_B_to_C[cnt++] = i; if (cnt == num_cols_offd_B) break; } for (i=0; i < hypre_CSRMatrixI(C_tmp_offd)[hypre_CSRMatrixNumRows(C_tmp_offd)]; i++) { j_indx = C_tmp_offd_j[i]; C_tmp_offd_j[i] = map_B_to_C[j_indx]; } } /*----------------------------------------------------------------------- * Need to compute C_diag = C_tmp_diag + C_ext_diag * and C_offd = C_tmp_offd + C_ext_offd !!!! * First generate structure *-----------------------------------------------------------------------*/ if (C_ext_size || num_cols_offd_B) { C_diag_i = hypre_CTAlloc(HYPRE_Int, num_cols_diag_A+1, memory_location_C); C_offd_i = hypre_CTAlloc(HYPRE_Int, num_cols_diag_A+1, memory_location_C); C_diag_array = hypre_CTAlloc(HYPRE_Int, max_num_threads, HYPRE_MEMORY_HOST); C_offd_array = hypre_CTAlloc(HYPRE_Int, max_num_threads, HYPRE_MEMORY_HOST); #ifdef HYPRE_USING_OPENMP #pragma omp parallel #endif { HYPRE_Int *B_marker = NULL; HYPRE_Int *B_marker_offd = NULL; HYPRE_Int ik, jk, j1, j2, jcol; HYPRE_Int ns, ne, ii, nnz_d, nnz_o; HYPRE_Int rest, size; HYPRE_Int num_threads = hypre_NumActiveThreads(); size = num_cols_diag_A/num_threads; rest = num_cols_diag_A - size*num_threads; ii = hypre_GetThreadNum(); if (ii < rest) { ns = ii*size+ii; ne = (ii+1)*size+ii+1; } else { ns = ii*size+rest; ne = (ii+1)*size+rest; } B_marker = hypre_CTAlloc(HYPRE_Int, num_cols_diag_B, HYPRE_MEMORY_HOST); B_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd_C, HYPRE_MEMORY_HOST); for (ik = 0; ik < num_cols_diag_B; ik++) B_marker[ik] = -1; for (ik = 0; ik < num_cols_offd_C; ik++) B_marker_offd[ik] = -1; nnz_d = 0; nnz_o = 0; for (ik = ns; ik < ne; ik++) { for (jk = C_tmp_diag_i[ik]; jk < C_tmp_diag_i[ik+1]; jk++) { jcol = C_tmp_diag_j[jk]; B_marker[jcol] = ik; nnz_d++; } for (jk = C_tmp_offd_i[ik]; jk < C_tmp_offd_i[ik+1]; jk++) { jcol = C_tmp_offd_j[jk]; B_marker_offd[jcol] = ik; nnz_o++; } for (jk = 0; jk < num_sends_A; jk++) for (j1 = send_map_starts_A[jk]; j1 < send_map_starts_A[jk+1]; j1++) if (send_map_elmts_A[j1] == ik) { for (j2 = C_ext_diag_i[j1]; j2 < C_ext_diag_i[j1+1]; j2++) { jcol = C_ext_diag_j[j2]; if (B_marker[jcol] < ik) { B_marker[jcol] = ik; nnz_d++; } } for (j2 = C_ext_offd_i[j1]; j2 < C_ext_offd_i[j1+1]; j2++) { jcol = C_ext_offd_j[j2]; if (B_marker_offd[jcol] < ik) { B_marker_offd[jcol] = ik; nnz_o++; } } break; } C_diag_array[ii] = nnz_d; C_offd_array[ii] = nnz_o; } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif if (ii == 0) { nnz_d = 0; nnz_o = 0; for (ik = 0; ik < num_threads-1; ik++) { C_diag_array[ik+1] += C_diag_array[ik]; C_offd_array[ik+1] += C_offd_array[ik]; } nnz_d = C_diag_array[num_threads-1]; nnz_o = C_offd_array[num_threads-1]; C_diag_i[num_cols_diag_A] = nnz_d; C_offd_i[num_cols_diag_A] = nnz_o; C_diag = hypre_CSRMatrixCreate(num_cols_diag_A, num_cols_diag_A, nnz_d); C_offd = hypre_CSRMatrixCreate(num_cols_diag_A, num_cols_offd_C, nnz_o); hypre_CSRMatrixI(C_diag) = C_diag_i; hypre_CSRMatrixInitialize_v2(C_diag, 0, memory_location_C); C_diag_j = hypre_CSRMatrixJ(C_diag); C_diag_data = hypre_CSRMatrixData(C_diag); hypre_CSRMatrixI(C_offd) = C_offd_i; hypre_CSRMatrixInitialize_v2(C_offd, 0, memory_location_C); C_offd_j = hypre_CSRMatrixJ(C_offd); C_offd_data = hypre_CSRMatrixData(C_offd); } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif /*----------------------------------------------------------------------- * Need to compute C_diag = C_tmp_diag + C_ext_diag * and C_offd = C_tmp_offd + C_ext_offd !!!! * Now fill in values *-----------------------------------------------------------------------*/ for (ik = 0; ik < num_cols_diag_B; ik++) B_marker[ik] = -1; for (ik = 0; ik < num_cols_offd_C; ik++) B_marker_offd[ik] = -1; /*----------------------------------------------------------------------- * Populate matrices *-----------------------------------------------------------------------*/ nnz_d = 0; nnz_o = 0; nnz_o = 0; if (ii) { nnz_d = C_diag_array[ii-1]; nnz_o = C_offd_array[ii-1]; } for (ik = ns; ik < ne; ik++) { C_diag_i[ik] = nnz_d; C_offd_i[ik] = nnz_o; for (jk = C_tmp_diag_i[ik]; jk < C_tmp_diag_i[ik+1]; jk++) { jcol = C_tmp_diag_j[jk]; C_diag_j[nnz_d] = jcol; C_diag_data[nnz_d] = C_tmp_diag_data[jk]; B_marker[jcol] = nnz_d; nnz_d++; } for (jk = C_tmp_offd_i[ik]; jk < C_tmp_offd_i[ik+1]; jk++) { jcol = C_tmp_offd_j[jk]; C_offd_j[nnz_o] = jcol; C_offd_data[nnz_o] = C_tmp_offd_data[jk]; B_marker_offd[jcol] = nnz_o; nnz_o++; } for (jk = 0; jk < num_sends_A; jk++) for (j1 = send_map_starts_A[jk]; j1 < send_map_starts_A[jk+1]; j1++) if (send_map_elmts_A[j1] == ik) { for (j2 = C_ext_diag_i[j1]; j2 < C_ext_diag_i[j1+1]; j2++) { jcol = C_ext_diag_j[j2]; if (B_marker[jcol] < C_diag_i[ik]) { C_diag_j[nnz_d] = jcol; C_diag_data[nnz_d] = C_ext_diag_data[j2]; B_marker[jcol] = nnz_d; nnz_d++; } else C_diag_data[B_marker[jcol]] += C_ext_diag_data[j2]; } for (j2 = C_ext_offd_i[j1]; j2 < C_ext_offd_i[j1+1]; j2++) { jcol = C_ext_offd_j[j2]; if (B_marker_offd[jcol] < C_offd_i[ik]) { C_offd_j[nnz_o] = jcol; C_offd_data[nnz_o] = C_ext_offd_data[j2]; B_marker_offd[jcol] = nnz_o; nnz_o++; } else C_offd_data[B_marker_offd[jcol]] += C_ext_offd_data[j2]; } break; } } hypre_TFree(B_marker, HYPRE_MEMORY_HOST); hypre_TFree(B_marker_offd, HYPRE_MEMORY_HOST); } /*end parallel region */ hypre_TFree(C_diag_array, HYPRE_MEMORY_HOST); hypre_TFree(C_offd_array, HYPRE_MEMORY_HOST); } /*C = hypre_ParCSRMatrixCreate(comm, n_cols_A, n_cols_B, col_starts_A, col_starts_B, num_cols_offd_C, nnz_diag, nnz_offd); hypre_CSRMatrixDestroy(hypre_ParCSRMatrixDiag(C)); hypre_CSRMatrixDestroy(hypre_ParCSRMatrixOffd(C)); */ /* row_starts[0] is start of local rows. row_starts[1] is start of next processor's rows */ first_row_index = col_starts_A[0]; local_num_rows = (HYPRE_Int)(col_starts_A[1]-first_row_index ); first_col_diag = col_starts_B[0]; local_num_cols = (HYPRE_Int)(col_starts_B[1]-first_col_diag); C = hypre_CTAlloc(hypre_ParCSRMatrix, 1, HYPRE_MEMORY_HOST); hypre_ParCSRMatrixComm(C) = comm; hypre_ParCSRMatrixGlobalNumRows(C) = n_cols_A; hypre_ParCSRMatrixGlobalNumCols(C) = n_cols_B; hypre_ParCSRMatrixFirstRowIndex(C) = first_row_index; hypre_ParCSRMatrixFirstColDiag(C) = first_col_diag; hypre_ParCSRMatrixLastRowIndex(C) = first_row_index + (HYPRE_BigInt)local_num_rows - 1; hypre_ParCSRMatrixLastColDiag(C) = first_col_diag + (HYPRE_BigInt)local_num_cols - 1; hypre_ParCSRMatrixColMapOffd(C) = NULL; hypre_ParCSRMatrixAssumedPartition(C) = NULL; hypre_ParCSRMatrixRowStarts(C) = col_starts_A; hypre_ParCSRMatrixColStarts(C) = col_starts_B; hypre_ParCSRMatrixCommPkg(C) = NULL; hypre_ParCSRMatrixCommPkgT(C) = NULL; /* set defaults */ hypre_ParCSRMatrixOwnsData(C) = 1; hypre_ParCSRMatrixRowindices(C) = NULL; hypre_ParCSRMatrixRowvalues(C) = NULL; hypre_ParCSRMatrixGetrowactive(C) = 0; /* Note that C does not own the partitionings */ hypre_ParCSRMatrixSetRowStartsOwner(C,0); hypre_ParCSRMatrixSetColStartsOwner(C,0); if (C_diag) { hypre_ParCSRMatrixDiag(C) = C_diag; } else { hypre_ParCSRMatrixDiag(C) = C_tmp_diag; } if (C_offd) { hypre_ParCSRMatrixOffd(C) = C_offd; } else { hypre_ParCSRMatrixOffd(C) = C_tmp_offd; } hypre_CSRMatrixMemoryLocation(hypre_ParCSRMatrixDiag(C)) = memory_location_C; hypre_CSRMatrixMemoryLocation(hypre_ParCSRMatrixOffd(C)) = memory_location_C; if (num_cols_offd_C) { HYPRE_Int jj_count_offd, nnz_offd; HYPRE_BigInt *new_col_map_offd_C = NULL; P_marker = hypre_CTAlloc(HYPRE_Int, num_cols_offd_C, HYPRE_MEMORY_HOST); for (i=0; i < num_cols_offd_C; i++) { P_marker[i] = -1; } jj_count_offd = 0; nnz_offd = C_offd_i[num_cols_diag_A]; for (i=0; i < nnz_offd; i++) { i1 = C_offd_j[i]; if (P_marker[i1]) { P_marker[i1] = 0; jj_count_offd++; } } if (jj_count_offd < num_cols_offd_C) { new_col_map_offd_C = hypre_CTAlloc(HYPRE_BigInt, jj_count_offd, HYPRE_MEMORY_HOST); jj_count_offd = 0; for (i=0; i < num_cols_offd_C; i++) { if (!P_marker[i]) { P_marker[i] = jj_count_offd; new_col_map_offd_C[jj_count_offd++] = col_map_offd_C[i]; } } for (i=0; i < nnz_offd; i++) { i1 = C_offd_j[i]; C_offd_j[i] = P_marker[i1]; } num_cols_offd_C = jj_count_offd; hypre_TFree(col_map_offd_C, HYPRE_MEMORY_HOST); col_map_offd_C = new_col_map_offd_C; hypre_CSRMatrixNumCols(hypre_ParCSRMatrixOffd(C)) = num_cols_offd_C; } hypre_TFree(P_marker, HYPRE_MEMORY_HOST); } hypre_ParCSRMatrixColMapOffd(C) = col_map_offd_C; /*----------------------------------------------------------------------- * Free various arrays *-----------------------------------------------------------------------*/ if (C_ext_size || num_cols_offd_B) { hypre_TFree(C_ext_diag_i, HYPRE_MEMORY_HOST); hypre_TFree(C_ext_offd_i, HYPRE_MEMORY_HOST); } if (C_ext_diag_size) { hypre_TFree(C_ext_diag_j, HYPRE_MEMORY_HOST); hypre_TFree(C_ext_diag_data, HYPRE_MEMORY_HOST); } if (C_ext_offd_size) { hypre_TFree(C_ext_offd_j, HYPRE_MEMORY_HOST); hypre_TFree(C_ext_offd_data, HYPRE_MEMORY_HOST); } if (num_cols_offd_B) { hypre_TFree(map_B_to_C, HYPRE_MEMORY_HOST); } if (C_diag) { hypre_CSRMatrixDestroy(C_tmp_diag); } if (C_offd) { hypre_CSRMatrixDestroy(C_tmp_offd); } #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) if ( hypre_GetExecPolicy2(memory_location_A, memory_location_B) == HYPRE_EXEC_DEVICE ) { hypre_CSRMatrixMoveDiagFirstDevice(hypre_ParCSRMatrixDiag(C)); hypre_SyncCudaComputeStream(hypre_handle()); } #endif return C; } HYPRE_Int hypre_ParvecBdiagInvScal( hypre_ParVector *b, HYPRE_Int blockSize, hypre_ParVector **bs, hypre_ParCSRMatrix *A) { MPI_Comm comm = hypre_ParCSRMatrixComm(b); HYPRE_Int num_procs, my_id; hypre_MPI_Comm_rank(comm, &my_id); hypre_MPI_Comm_size(comm, &num_procs); HYPRE_Int i, j, s, block_start, block_end; HYPRE_BigInt nrow_global = hypre_ParVectorGlobalSize(b); HYPRE_BigInt first_row = hypre_ParVectorFirstIndex(b); HYPRE_BigInt last_row = hypre_ParVectorLastIndex(b); HYPRE_BigInt end_row = last_row + 1; /* one past-the-last */ HYPRE_BigInt first_row_block = first_row / (HYPRE_BigInt)(blockSize) * (HYPRE_BigInt)blockSize; HYPRE_BigInt end_row_block = hypre_min( (last_row / (HYPRE_BigInt)blockSize + 1) * (HYPRE_BigInt)blockSize, nrow_global ); hypre_assert(blockSize == A->bdiag_size); HYPRE_Complex *bdiaginv = A->bdiaginv; hypre_ParCSRCommPkg *comm_pkg = A->bdiaginv_comm_pkg; HYPRE_Complex *dense = bdiaginv; //for (i=first_row_block; i < end_row; i+=blockSize) ; //printf("===[%d %d), [ %d %d ) %d === \n", first_row, end_row, first_row_block, end_row_block, i); /* local vector of b */ hypre_Vector *b_local = hypre_ParVectorLocalVector(b); HYPRE_Complex *b_local_data = hypre_VectorData(b_local); /* number of sends (#procs) */ HYPRE_Int num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); /* number of rows to send */ HYPRE_Int num_rows_send = hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends); /* number of recvs (#procs) */ HYPRE_Int num_recvs = hypre_ParCSRCommPkgNumRecvs(comm_pkg); /* number of rows to recv */ HYPRE_Int num_rows_recv = hypre_ParCSRCommPkgRecvVecStart(comm_pkg, num_recvs); hypre_ParCSRCommHandle *comm_handle; j = 2; HYPRE_BigInt *part = hypre_TAlloc(HYPRE_BigInt, j, HYPRE_MEMORY_HOST); memcpy(part, hypre_ParVectorPartitioning(b), j*sizeof(HYPRE_BigInt)); hypre_ParVector *bnew = hypre_ParVectorCreate( hypre_ParVectorComm(b), hypre_ParVectorGlobalSize(b), part ); hypre_ParVectorInitialize(bnew); hypre_Vector *bnew_local = hypre_ParVectorLocalVector(bnew); HYPRE_Complex *bnew_local_data = hypre_VectorData(bnew_local); /* send and recv b */ HYPRE_Complex *send_b = hypre_TAlloc(HYPRE_Complex, num_rows_send, HYPRE_MEMORY_HOST); HYPRE_Complex *recv_b = hypre_TAlloc(HYPRE_Complex, num_rows_recv, HYPRE_MEMORY_HOST); for (i = 0; i < num_rows_send; i++) { j = hypre_ParCSRCommPkgSendMapElmt(comm_pkg, i); send_b[i] = b_local_data[j]; } comm_handle = hypre_ParCSRCommHandleCreate(1, comm_pkg, send_b, recv_b); /* ... */ hypre_ParCSRCommHandleDestroy(comm_handle); for (block_start = first_row_block; block_start < end_row_block; block_start += blockSize) { HYPRE_BigInt big_i; block_end = hypre_min(block_start + (HYPRE_BigInt)blockSize, nrow_global); s = (HYPRE_Int)(block_end - block_start); for (big_i = block_start; big_i < block_end; big_i++) { if (big_i < first_row || big_i >= end_row) { continue; } HYPRE_Int local_i = (HYPRE_Int)(big_i - first_row); HYPRE_Int block_i = (HYPRE_Int)(big_i - block_start); bnew_local_data[local_i] = 0.0; for (j = 0; j < s; j++) { HYPRE_BigInt global_rid = block_start + (HYPRE_BigInt)j; HYPRE_Complex val = dense[block_i + j*blockSize]; if (val == 0.0) { continue; } if (global_rid >= first_row && global_rid < end_row) { HYPRE_Int rid = (HYPRE_Int)(global_rid - first_row); bnew_local_data[local_i] += val * b_local_data[rid]; } else { HYPRE_Int rid; if (global_rid < first_row) { rid = (HYPRE_Int)(global_rid - first_row_block); } else { rid = (HYPRE_Int)(first_row - first_row_block + global_rid - end_row); } bnew_local_data[local_i] += val * recv_b[rid]; } } } dense += blockSize * blockSize; } hypre_TFree(send_b, HYPRE_MEMORY_HOST); hypre_TFree(recv_b, HYPRE_MEMORY_HOST); *bs = bnew; return hypre_error_flag; } /** * @brief Compute As = B^{-1}*A, where B is the block diagonal of A * @param[in] A : * @param[in] blockSize: block size * @param[out] B : * @return * @warning */ HYPRE_Int hypre_ParcsrBdiagInvScal( hypre_ParCSRMatrix *A, HYPRE_Int blockSize, hypre_ParCSRMatrix **As) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); HYPRE_Int num_procs, my_id; hypre_MPI_Comm_rank(comm, &my_id); hypre_MPI_Comm_size(comm, &num_procs); HYPRE_Int i, j, k, s; HYPRE_BigInt block_start, block_end; /* diag part of A */ hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_a = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); /* off-diag part of A */ hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Real *A_offd_a = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd); HYPRE_BigInt *col_map_offd_A = hypre_ParCSRMatrixColMapOffd(A); HYPRE_Int nrow_local = hypre_CSRMatrixNumRows(A_diag); HYPRE_BigInt first_row = hypre_ParCSRMatrixFirstRowIndex(A); HYPRE_BigInt last_row = hypre_ParCSRMatrixLastRowIndex(A); HYPRE_BigInt end_row = first_row + (HYPRE_BigInt)nrow_local; /* one past-the-last */ HYPRE_Int ncol_local = hypre_CSRMatrixNumCols(A_diag); HYPRE_BigInt first_col = hypre_ParCSRMatrixFirstColDiag(A); /* HYPRE_Int last_col = hypre_ParCSRMatrixLastColDiag(A); */ HYPRE_BigInt end_col = first_col + (HYPRE_BigInt)ncol_local; HYPRE_BigInt nrow_global = hypre_ParCSRMatrixGlobalNumRows(A); HYPRE_BigInt ncol_global = hypre_ParCSRMatrixGlobalNumCols(A); HYPRE_BigInt *row_starts = hypre_ParCSRMatrixRowStarts(A); void *request; /* if square globally and locally */ HYPRE_Int square2 = (nrow_global == ncol_global) && (nrow_local == ncol_local) && (first_row == first_col); if (nrow_global != ncol_global) { hypre_printf("hypre_ParcsrBdiagInvScal: only support N_ROW == N_COL\n"); return hypre_error_flag; } /* in block diagonals, row range of the blocks this proc span */ HYPRE_BigInt first_row_block = first_row / (HYPRE_BigInt)blockSize * (HYPRE_BigInt)blockSize; HYPRE_BigInt end_row_block = hypre_min( (last_row / (HYPRE_BigInt)blockSize + 1) * (HYPRE_BigInt)blockSize, nrow_global ); HYPRE_Int num_blocks = (HYPRE_Int)(last_row / (HYPRE_BigInt)blockSize + 1 - first_row / (HYPRE_BigInt)blockSize); //for (i=first_row_block; i < end_row; i+=blockSize) ; //printf("===[%d %d), [ %d %d ) %d === \n", first_row, end_row, first_row_block, end_row_block, i); //return 0; /* number of external rows */ HYPRE_Int num_ext_rows = (HYPRE_Int)(end_row_block - first_row_block - (end_row - first_row)); HYPRE_BigInt *ext_indices; HYPRE_Int A_ext_nnz; hypre_CSRMatrix *A_ext = NULL; HYPRE_Complex *A_ext_a = NULL; HYPRE_Int *A_ext_i = NULL; HYPRE_BigInt *A_ext_j = NULL; HYPRE_Real *dense_all = hypre_CTAlloc(HYPRE_Complex, num_blocks*blockSize*blockSize, HYPRE_MEMORY_HOST); HYPRE_Real *dense = dense_all; HYPRE_Int *IPIV = hypre_TAlloc(HYPRE_Int, blockSize, HYPRE_MEMORY_HOST); HYPRE_Complex *dgetri_work = NULL; HYPRE_Int dgetri_lwork = -1, lapack_info; HYPRE_Int num_cols_A_offd_new; HYPRE_BigInt *col_map_offd_A_new; HYPRE_BigInt big_i; HYPRE_Int *offd2new = NULL; HYPRE_Int *marker_diag, *marker_newoffd; HYPRE_Int nnz_diag = A_diag_i[nrow_local]; HYPRE_Int nnz_offd = A_offd_i[nrow_local]; HYPRE_Int nnz_diag_new = 0, nnz_offd_new = 0; HYPRE_Int *A_diag_i_new, *A_diag_j_new, *A_offd_i_new, *A_offd_j_new; HYPRE_Complex *A_diag_a_new, *A_offd_a_new; /* heuristic */ HYPRE_Int nnz_diag_alloc = 2 * nnz_diag; HYPRE_Int nnz_offd_alloc = 2 * nnz_offd; A_diag_i_new = hypre_CTAlloc(HYPRE_Int, nrow_local + 1, HYPRE_MEMORY_HOST); A_diag_j_new = hypre_CTAlloc(HYPRE_Int, nnz_diag_alloc, HYPRE_MEMORY_HOST); A_diag_a_new = hypre_CTAlloc(HYPRE_Complex, nnz_diag_alloc, HYPRE_MEMORY_HOST); A_offd_i_new = hypre_CTAlloc(HYPRE_Int, nrow_local + 1, HYPRE_MEMORY_HOST); A_offd_j_new = hypre_CTAlloc(HYPRE_Int, nnz_offd_alloc, HYPRE_MEMORY_HOST); A_offd_a_new = hypre_CTAlloc(HYPRE_Complex, nnz_offd_alloc, HYPRE_MEMORY_HOST); hypre_ParCSRMatrix *Anew; hypre_CSRMatrix *Anew_diag; hypre_CSRMatrix *Anew_offd; HYPRE_BigInt *row_starts_new, *col_starts_new; HYPRE_Real eps = 2.2e-16; /* Start with extracting the external rows */ HYPRE_BigInt *ext_offd; ext_indices = hypre_CTAlloc(HYPRE_BigInt, num_ext_rows, HYPRE_MEMORY_HOST); j = 0; for (big_i = first_row_block; big_i < first_row; big_i++) { ext_indices[j++] = big_i; } for (big_i = end_row; big_i < end_row_block; big_i++) { ext_indices[j++] = big_i; } hypre_assert(j == num_ext_rows); /* create CommPkg for external rows */ hypre_ParCSRFindExtendCommPkg(comm, nrow_global, first_row, nrow_local, row_starts, hypre_ParCSRMatrixAssumedPartition(A), num_ext_rows, ext_indices, &A->bdiaginv_comm_pkg); hypre_ParcsrGetExternalRowsInit(A, num_ext_rows, ext_indices, A->bdiaginv_comm_pkg, 1, &request); A_ext = hypre_ParcsrGetExternalRowsWait(request); hypre_TFree(ext_indices, HYPRE_MEMORY_HOST); A_ext_i = hypre_CSRMatrixI(A_ext); A_ext_j = hypre_CSRMatrixBigJ(A_ext); A_ext_a = hypre_CSRMatrixData(A_ext); A_ext_nnz = A_ext_i[num_ext_rows]; ext_offd = hypre_CTAlloc(HYPRE_BigInt, A_ext_nnz, HYPRE_MEMORY_HOST); /* fint the offd incides in A_ext */ for (i = 0, j = 0; i < A_ext_nnz; i++) { /* global index */ HYPRE_BigInt cid = A_ext_j[i]; /* keep the offd indices */ if (cid < first_col || cid >= end_col) { ext_offd[j++] = cid; } } /* remove duplicates after sorting (TODO better ways?) */ hypre_BigQsort0(ext_offd, 0, j-1); for (i = 0, k = 0; i < j; i++) { if (i == 0 || ext_offd[i] != ext_offd[i-1]) { ext_offd[k++] = ext_offd[i]; } } /* uniion these `k' new indices into col_map_offd_A */ col_map_offd_A_new = hypre_CTAlloc(HYPRE_BigInt, num_cols_A_offd + k, HYPRE_MEMORY_HOST); if (k) { /* map offd to offd_new */ offd2new = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); } hypre_union2(num_cols_A_offd, col_map_offd_A, k, ext_offd, &num_cols_A_offd_new, col_map_offd_A_new, offd2new, NULL); hypre_TFree(ext_offd, HYPRE_MEMORY_HOST); /* * adjust column indices in A_ext */ for (i = 0; i < A_ext_nnz; i++) { HYPRE_BigInt cid = A_ext_j[i]; if (cid < first_col || cid >= end_col) { j = hypre_BigBinarySearch(col_map_offd_A_new, cid, num_cols_A_offd_new); /* searching must succeed */ hypre_assert(j >= 0 && j < num_cols_A_offd_new); /* trick: save ncol_local + j back */ A_ext_j[i] = ncol_local + j; } else { /* save local index: [0, ncol_local-1] */ A_ext_j[i] = cid - first_col; } } /* marker for diag */ marker_diag = hypre_TAlloc(HYPRE_Int, ncol_local, HYPRE_MEMORY_HOST); for (i = 0; i < ncol_local; i++) { marker_diag[i] = -1; } /* marker for newoffd */ marker_newoffd = hypre_TAlloc(HYPRE_Int, num_cols_A_offd_new, HYPRE_MEMORY_HOST); for (i = 0; i < num_cols_A_offd_new; i++) { marker_newoffd[i] = -1; } /* outer most loop for blocks */ for (block_start = first_row_block; block_start < end_row_block; block_start += (HYPRE_BigInt)blockSize) { HYPRE_BigInt big_i; block_end = hypre_min(block_start + (HYPRE_BigInt)blockSize, nrow_global); s = (HYPRE_Int)(block_end - block_start); /* 1. fill the dense block diag matrix */ for (big_i = block_start; big_i < block_end; big_i++) { /* row index in this block */ HYPRE_Int block_i = (HYPRE_Int)(big_i - block_start); /* row index i: it can be local or external */ if (big_i >= first_row && big_i < end_row) { /* is a local row */ j = (HYPRE_Int)(big_i - first_row); for (k = A_diag_i[j]; k < A_diag_i[j+1]; k++) { HYPRE_BigInt cid = (HYPRE_BigInt)A_diag_j[k] + first_col; if (cid >= block_start && cid < block_end) { dense[block_i + (HYPRE_Int)(cid-block_start)*blockSize] = A_diag_a[k]; } } if (num_cols_A_offd) { for (k = A_offd_i[j]; k < A_offd_i[j+1]; k++) { HYPRE_BigInt cid = col_map_offd_A[A_offd_j[k]]; if (cid >= block_start && cid < block_end) { dense[block_i + (HYPRE_Int)(cid-block_start)*blockSize] = A_offd_a[k]; } } } } else { /* is an external row */ if (big_i < first_row) { j = (HYPRE_Int)(big_i - first_row_block); } else { j = (HYPRE_Int)(first_row - first_row_block + big_i - end_row); } for (k = A_ext_i[j]; k < A_ext_i[j+1]; k++) { HYPRE_BigInt cid = A_ext_j[k]; /* recover the global index */ cid = cid < (HYPRE_BigInt)ncol_local ? cid + first_col : col_map_offd_A_new[cid-ncol_local]; if (cid >= block_start && cid < block_end) { dense[block_i + (HYPRE_Int)(cid-block_start)*blockSize] = A_ext_a[k]; } } } } /* 2. invert the dense matrix */ hypre_dgetrf(&s, &s, dense, &blockSize, IPIV, &lapack_info); hypre_assert(lapack_info == 0); if (lapack_info == 0) { HYPRE_Int query = -1; HYPRE_Real lwork_opt; /* query the optimal size of work */ hypre_dgetri(&s, dense, &blockSize, IPIV, &lwork_opt, &query, &lapack_info); hypre_assert(lapack_info == 0); if (lwork_opt > dgetri_lwork) { dgetri_lwork = lwork_opt; dgetri_work = hypre_TReAlloc(dgetri_work, HYPRE_Complex, dgetri_lwork, HYPRE_MEMORY_HOST); } hypre_dgetri(&s, dense, &blockSize, IPIV, dgetri_work, &dgetri_lwork, &lapack_info); hypre_assert(lapack_info == 0); } /* filter out *zeros* */ HYPRE_Real Fnorm = 0.0; for (i = 0; i < s; i++) { for (j = 0; j < s; j++) { HYPRE_Complex t = dense[j+i*blockSize]; Fnorm += t * t; } } Fnorm = sqrt(Fnorm); for (i = 0; i < s; i++) { for (j = 0; j < s; j++) { if ( hypre_abs(dense[j+i*blockSize]) < eps * Fnorm ) { dense[j+i*blockSize] = 0.0; } } } /* 3. premultiplication: one-pass dynamic allocation */ for (big_i = block_start; big_i < block_end; big_i++) { /* starting points of this row in j */ HYPRE_Int diag_i_start = nnz_diag_new; HYPRE_Int offd_i_start = nnz_offd_new; /* compute a new row with global index 'i' and local index 'local_i' */ HYPRE_Int local_i = (HYPRE_Int)(big_i - first_row); /* row index in this block */ HYPRE_Int block_i = (HYPRE_Int)(big_i - block_start); if (big_i < first_row || big_i >= end_row) { continue; } /* if square^2: reserve the first space in diag part to the diag entry */ if (square2) { marker_diag[local_i] = nnz_diag_new; if (nnz_diag_new == nnz_diag_alloc) { nnz_diag_alloc = nnz_diag_alloc * 2 + 1; A_diag_j_new = hypre_TReAlloc(A_diag_j_new, HYPRE_Int, nnz_diag_alloc, HYPRE_MEMORY_HOST); A_diag_a_new = hypre_TReAlloc(A_diag_a_new, HYPRE_Complex, nnz_diag_alloc, HYPRE_MEMORY_HOST); } A_diag_j_new[nnz_diag_new] = local_i; A_diag_a_new[nnz_diag_new] = 0.0; nnz_diag_new ++; } /* combine s rows */ for (j = 0; j < s; j++) { /* row to combine: global row id */ HYPRE_BigInt global_rid = block_start + (HYPRE_BigInt)j; /* the multipiler */ HYPRE_Complex val = dense[block_i + j*blockSize]; if (val == 0.0) { continue; } if (global_rid >= first_row && global_rid < end_row) { /* this row is local */ HYPRE_Int rid = (HYPRE_Int)(global_rid - first_row); HYPRE_Int ii; for (ii = A_diag_i[rid]; ii < A_diag_i[rid+1]; ii++) { HYPRE_Int col = A_diag_j[ii]; HYPRE_Complex vv = A_diag_a[ii]; if (marker_diag[col] < diag_i_start) { /* this col has not been seen before, create new entry */ marker_diag[col] = nnz_diag_new; if (nnz_diag_new == nnz_diag_alloc) { nnz_diag_alloc = nnz_diag_alloc * 2 + 1; A_diag_j_new = hypre_TReAlloc(A_diag_j_new, HYPRE_Int, nnz_diag_alloc, HYPRE_MEMORY_HOST); A_diag_a_new = hypre_TReAlloc(A_diag_a_new, HYPRE_Complex, nnz_diag_alloc, HYPRE_MEMORY_HOST); } A_diag_j_new[nnz_diag_new] = col; A_diag_a_new[nnz_diag_new] = val * vv; nnz_diag_new ++; } else { /* existing entry, update */ HYPRE_Int p = marker_diag[col]; hypre_assert(A_diag_j_new[p] == col); A_diag_a_new[p] += val * vv; } } for (ii = A_offd_i[rid]; ii < A_offd_i[rid+1]; ii++) { HYPRE_Int col = A_offd_j[ii]; /* use the mapper to map to new offd */ HYPRE_Int col_new = offd2new ? offd2new[col] : col; HYPRE_Complex vv = A_offd_a[ii]; if (marker_newoffd[col_new] < offd_i_start) { /* this col has not been seen before, create new entry */ marker_newoffd[col_new] = nnz_offd_new; if (nnz_offd_new == nnz_offd_alloc) { nnz_offd_alloc = nnz_offd_alloc * 2 + 1; A_offd_j_new = hypre_TReAlloc(A_offd_j_new, HYPRE_Int, nnz_offd_alloc, HYPRE_MEMORY_HOST); A_offd_a_new = hypre_TReAlloc(A_offd_a_new, HYPRE_Complex, nnz_offd_alloc, HYPRE_MEMORY_HOST); } A_offd_j_new[nnz_offd_new] = col_new; A_offd_a_new[nnz_offd_new] = val * vv; nnz_offd_new ++; } else { /* existing entry, update */ HYPRE_Int p = marker_newoffd[col_new]; hypre_assert(A_offd_j_new[p] == col_new); A_offd_a_new[p] += val * vv; } } } else { /* this is an external row: go to A_ext */ HYPRE_Int rid, ii; if (global_rid < first_row) { rid = (HYPRE_Int)(global_rid - first_row_block); } else { rid = (HYPRE_Int)(first_row - first_row_block + global_rid - end_row); } for (ii = A_ext_i[rid]; ii < A_ext_i[rid+1]; ii++) { HYPRE_Int col = (HYPRE_Int)A_ext_j[ii]; HYPRE_Complex vv = A_ext_a[ii]; if (col < ncol_local) { /* in diag part */ if (marker_diag[col] < diag_i_start) { /* this col has not been seen before, create new entry */ marker_diag[col] = nnz_diag_new; if (nnz_diag_new == nnz_diag_alloc) { nnz_diag_alloc = nnz_diag_alloc * 2 + 1; A_diag_j_new = hypre_TReAlloc(A_diag_j_new, HYPRE_Int, nnz_diag_alloc, HYPRE_MEMORY_HOST); A_diag_a_new = hypre_TReAlloc(A_diag_a_new, HYPRE_Complex, nnz_diag_alloc, HYPRE_MEMORY_HOST); } A_diag_j_new[nnz_diag_new] = col; A_diag_a_new[nnz_diag_new] = val * vv; nnz_diag_new ++; } else { /* existing entry, update */ HYPRE_Int p = marker_diag[col]; hypre_assert(A_diag_j_new[p] == col); A_diag_a_new[p] += val * vv; } } else { /* in offd part */ col -= ncol_local; if (marker_newoffd[col] < offd_i_start) { /* this col has not been seen before, create new entry */ marker_newoffd[col] = nnz_offd_new; if (nnz_offd_new == nnz_offd_alloc) { nnz_offd_alloc = nnz_offd_alloc * 2 + 1; A_offd_j_new = hypre_TReAlloc(A_offd_j_new, HYPRE_Int, nnz_offd_alloc, HYPRE_MEMORY_HOST); A_offd_a_new = hypre_TReAlloc(A_offd_a_new, HYPRE_Complex, nnz_offd_alloc, HYPRE_MEMORY_HOST); } A_offd_j_new[nnz_offd_new] = col; A_offd_a_new[nnz_offd_new] = val * vv; nnz_offd_new ++; } else { /* existing entry, update */ HYPRE_Int p = marker_newoffd[col]; hypre_assert(A_offd_j_new[p] == col); A_offd_a_new[p] += val * vv; } } } } } /* done for row local_i */ A_diag_i_new[local_i + 1] = nnz_diag_new; A_offd_i_new[local_i + 1] = nnz_offd_new; } /* for i, each row */ dense += blockSize * blockSize; } /* for each block */ /* done with all rows */ /* resize properly */ A_diag_j_new = hypre_TReAlloc(A_diag_j_new, HYPRE_Int, nnz_diag_new, HYPRE_MEMORY_HOST); A_diag_a_new = hypre_TReAlloc(A_diag_a_new, HYPRE_Complex, nnz_diag_new, HYPRE_MEMORY_HOST); A_offd_j_new = hypre_TReAlloc(A_offd_j_new, HYPRE_Int, nnz_offd_new, HYPRE_MEMORY_HOST); A_offd_a_new = hypre_TReAlloc(A_offd_a_new, HYPRE_Complex, nnz_offd_new, HYPRE_MEMORY_HOST); /* readjust col_map_offd_new */ for (i = 0; i < num_cols_A_offd_new; i++) { marker_newoffd[i] = -1; } for (i = 0; i < nnz_offd_new; i++) { j = A_offd_j_new[i]; if (marker_newoffd[j] == -1) { marker_newoffd[j] = 1; } } for (i = 0, j = 0; i < num_cols_A_offd_new; i++) { if (marker_newoffd[i] == 1) { col_map_offd_A_new[j] = col_map_offd_A_new[i]; marker_newoffd[i] = j++; } } num_cols_A_offd_new = j; for (i = 0; i < nnz_offd_new; i++) { j = marker_newoffd[A_offd_j_new[i]]; hypre_assert(j >= 0 && j < num_cols_A_offd_new); A_offd_j_new[i] = j; } j = 2; row_starts_new = hypre_CTAlloc(HYPRE_BigInt, j, HYPRE_MEMORY_HOST); col_starts_new = hypre_CTAlloc(HYPRE_BigInt, j, HYPRE_MEMORY_HOST); memcpy(row_starts_new, hypre_ParCSRMatrixRowStarts(A), j*sizeof(HYPRE_BigInt)); memcpy(col_starts_new, hypre_ParCSRMatrixColStarts(A), j*sizeof(HYPRE_BigInt)); /* Now, we should have everything of Parcsr matrix As */ Anew = hypre_ParCSRMatrixCreate(comm, nrow_global, ncol_global, row_starts_new, col_starts_new, num_cols_A_offd_new, nnz_diag_new, nnz_offd_new); Anew_diag = hypre_ParCSRMatrixDiag(Anew); hypre_CSRMatrixData(Anew_diag) = A_diag_a_new; hypre_CSRMatrixI(Anew_diag) = A_diag_i_new; hypre_CSRMatrixJ(Anew_diag) = A_diag_j_new; Anew_offd = hypre_ParCSRMatrixOffd(Anew); hypre_CSRMatrixData(Anew_offd) = A_offd_a_new; hypre_CSRMatrixI(Anew_offd) = A_offd_i_new; hypre_CSRMatrixJ(Anew_offd) = A_offd_j_new; hypre_ParCSRMatrixColMapOffd(Anew) = col_map_offd_A_new; hypre_ParCSRMatrixSetNumNonzeros(Anew); hypre_ParCSRMatrixDNumNonzeros(Anew) = (HYPRE_Real) hypre_ParCSRMatrixNumNonzeros(Anew); //printf("nnz_diag %d --> %d, nnz_offd %d --> %d\n", nnz_diag, nnz_diag_new, nnz_offd, nnz_offd_new); /* create CommPkg of Anew */ hypre_MatvecCommPkgCreate(Anew); *As = Anew; /* if (bdiaginv) { *bdiaginv = dense_all; } else { hypre_TFree(dense_all, HYPRE_MEMORY_HOST); } */ /* save diagonal blocks in A */ A->bdiag_size = blockSize; A->bdiaginv = dense_all; /* free workspace */ hypre_TFree(IPIV, HYPRE_MEMORY_HOST); hypre_TFree(dgetri_work, HYPRE_MEMORY_HOST); hypre_TFree(marker_diag, HYPRE_MEMORY_HOST); hypre_TFree(marker_newoffd, HYPRE_MEMORY_HOST); hypre_TFree(offd2new, HYPRE_MEMORY_HOST); hypre_CSRMatrixDestroy(A_ext); return hypre_error_flag; } HYPRE_Int hypre_ParcsrGetExternalRowsInit( hypre_ParCSRMatrix *A, HYPRE_Int indices_len, HYPRE_BigInt *indices, hypre_ParCSRCommPkg *comm_pkg, HYPRE_Int want_data, void **request_ptr) { HYPRE_Int i, j, k; HYPRE_Int num_sends, num_rows_send, num_nnz_send, *send_i, num_recvs, num_rows_recv, num_nnz_recv, *recv_i, *send_jstarts, *recv_jstarts, *send_i_offset; HYPRE_BigInt *send_j, *recv_j; HYPRE_Complex *send_a = NULL, *recv_a = NULL; hypre_ParCSRCommPkg *comm_pkg_j; hypre_ParCSRCommHandle *comm_handle, *comm_handle_j, *comm_handle_a; /* HYPRE_Int global_num_rows = hypre_ParCSRMatrixGlobalNumRows(A); */ /* diag part of A */ hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_a = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); /* HYPRE_Int local_num_rows = hypre_CSRMatrixNumRows(A_diag); */ /* off-diag part of A */ hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Real *A_offd_a = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); /* HYPRE_BigInt *row_starts = hypre_ParCSRMatrixRowStarts(A); */ /* HYPRE_BigInt first_row = hypre_ParCSRMatrixFirstRowIndex(A); */ HYPRE_BigInt first_col = hypre_ParCSRMatrixFirstColDiag(A); HYPRE_BigInt *col_map_offd_A = hypre_ParCSRMatrixColMapOffd(A); MPI_Comm comm = hypre_ParCSRMatrixComm(A); HYPRE_Int num_procs; HYPRE_Int my_id; void **vrequest; hypre_CSRMatrix *A_ext; hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm, &my_id); /* number of sends (#procs) */ num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); /* number of rows to send */ num_rows_send = hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends); /* number of recvs (#procs) */ num_recvs = hypre_ParCSRCommPkgNumRecvs(comm_pkg); /* number of rows to recv */ num_rows_recv = hypre_ParCSRCommPkgRecvVecStart(comm_pkg, num_recvs); /* must be true if indices contains proper offd indices */ hypre_assert(indices_len == num_rows_recv); /* send_i/recv_i: * the arrays to send and recv: we first send and recv the row lengths */ send_i = hypre_TAlloc(HYPRE_Int, num_rows_send, HYPRE_MEMORY_HOST); recv_i = hypre_CTAlloc(HYPRE_Int, num_rows_recv + 1, HYPRE_MEMORY_HOST); /* fill the send array with row lengths */ for (i = 0, num_nnz_send = 0; i < num_rows_send; i++) { /* j: row index to send */ j = hypre_ParCSRCommPkgSendMapElmt(comm_pkg, i); send_i[i] = A_diag_i[j+1] - A_diag_i[j] + A_offd_i[j+1] - A_offd_i[j]; num_nnz_send += send_i[i]; } /* send this array out: note the shift in recv_i by one (async) */ comm_handle = hypre_ParCSRCommHandleCreate(11, comm_pkg, send_i, recv_i+1); /* prepare data to send out. overlap with the above commmunication */ send_j = hypre_TAlloc(HYPRE_BigInt, num_nnz_send, HYPRE_MEMORY_HOST); if (want_data) { send_a = hypre_TAlloc(HYPRE_Complex, num_nnz_send, HYPRE_MEMORY_HOST); } send_i_offset = hypre_TAlloc(HYPRE_Int, num_rows_send + 1, HYPRE_MEMORY_HOST); send_i_offset[0] = 0; hypre_TMemcpy(send_i_offset + 1, send_i, HYPRE_Int, num_rows_send, HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST); /* prefix sum. TODO: OMP parallelization */ for (i = 1; i <= num_rows_send; i++) { send_i_offset[i] += send_i_offset[i-1]; } hypre_assert(send_i_offset[num_rows_send] == num_nnz_send); /* pointers to each proc in send_j */ send_jstarts = hypre_TAlloc(HYPRE_Int, num_sends + 1, HYPRE_MEMORY_HOST); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for HYPRE_SMP_SCHEDULE #endif for (i = 0; i <= num_sends; i++) { send_jstarts[i] = send_i_offset[hypre_ParCSRCommPkgSendMapStart(comm_pkg, i)]; } hypre_assert(send_jstarts[num_sends] == num_nnz_send); /* fill the CSR matrix: j and a */ #ifdef HYPRE_USING_OPENMP #pragma omp parallel for HYPRE_SMP_SCHEDULE private(i,j,k) #endif for (i = 0; i < num_rows_send; i++) { HYPRE_Int i1 = send_i_offset[i]; j = hypre_ParCSRCommPkgSendMapElmt(comm_pkg, i); /* open row j and fill ja and a to send */ for (k = A_diag_i[j]; k < A_diag_i[j+1]; k++) { send_j[i1] = first_col + A_diag_j[k]; if (want_data) { send_a[i1] = A_diag_a[k]; } i1++; } if (num_procs > 1) { for (k = A_offd_i[j]; k < A_offd_i[j+1]; k++) { send_j[i1] = col_map_offd_A[A_offd_j[k]]; if (want_data) { send_a[i1] = A_offd_a[k]; } i1++; } } hypre_assert(send_i_offset[i+1] == i1); } /* finish the above communication: send_i/recv_i */ hypre_ParCSRCommHandleDestroy(comm_handle); /* adjust recv_i to ptrs */ for (i = 1; i <= num_rows_recv; i++) { recv_i[i] += recv_i[i-1]; } num_nnz_recv = recv_i[num_rows_recv]; recv_j = hypre_CTAlloc(HYPRE_BigInt, num_nnz_recv, HYPRE_MEMORY_HOST); if (want_data) { recv_a = hypre_CTAlloc(HYPRE_Complex, num_nnz_recv, HYPRE_MEMORY_HOST); } recv_jstarts = hypre_CTAlloc(HYPRE_Int, num_recvs + 1, HYPRE_MEMORY_HOST); for (i = 1; i <= num_recvs; i++) { j = hypre_ParCSRCommPkgRecvVecStart(comm_pkg, i); recv_jstarts[i] = recv_i[j]; } /* ready to send and recv: create a communication package for data */ comm_pkg_j = hypre_CTAlloc(hypre_ParCSRCommPkg, 1, HYPRE_MEMORY_HOST); hypre_ParCSRCommPkgComm (comm_pkg_j) = comm; hypre_ParCSRCommPkgNumSends (comm_pkg_j) = num_sends; hypre_ParCSRCommPkgSendProcs (comm_pkg_j) = hypre_ParCSRCommPkgSendProcs(comm_pkg); hypre_ParCSRCommPkgSendMapStarts(comm_pkg_j) = send_jstarts; hypre_ParCSRCommPkgNumRecvs (comm_pkg_j) = num_recvs; hypre_ParCSRCommPkgRecvProcs (comm_pkg_j) = hypre_ParCSRCommPkgRecvProcs(comm_pkg); hypre_ParCSRCommPkgRecvVecStarts(comm_pkg_j) = recv_jstarts; /* init communication */ /* ja */ comm_handle_j = hypre_ParCSRCommHandleCreate(21, comm_pkg_j, send_j, recv_j); if (want_data) { /* a */ comm_handle_a = hypre_ParCSRCommHandleCreate(1, comm_pkg_j, send_a, recv_a); } else { comm_handle_a = NULL; } /* create A_ext */ A_ext = hypre_CSRMatrixCreate(num_rows_recv, hypre_ParCSRMatrixGlobalNumCols(A), num_nnz_recv); hypre_CSRMatrixMemoryLocation(A_ext) = HYPRE_MEMORY_HOST; hypre_CSRMatrixI (A_ext) = recv_i; hypre_CSRMatrixBigJ(A_ext) = recv_j; hypre_CSRMatrixData(A_ext) = recv_a; /* output */ vrequest = hypre_TAlloc(void *, 4, HYPRE_MEMORY_HOST); vrequest[0] = (void *) comm_handle_j; vrequest[1] = (void *) comm_handle_a; vrequest[2] = (void *) A_ext; vrequest[3] = (void *) comm_pkg_j; *request_ptr = (void *) vrequest; /* free */ hypre_TFree(send_i, HYPRE_MEMORY_HOST); hypre_TFree(send_i_offset, HYPRE_MEMORY_HOST); return hypre_error_flag; } hypre_CSRMatrix* hypre_ParcsrGetExternalRowsWait(void *vrequest) { void **request = (void **) vrequest; hypre_ParCSRCommHandle *comm_handle_j = (hypre_ParCSRCommHandle *) request[0]; hypre_ParCSRCommHandle *comm_handle_a = (hypre_ParCSRCommHandle *) request[1]; hypre_CSRMatrix *A_ext = (hypre_CSRMatrix *) request[2]; hypre_ParCSRCommPkg *comm_pkg_j = (hypre_ParCSRCommPkg *) request[3]; HYPRE_BigInt *send_j = (HYPRE_BigInt *) hypre_ParCSRCommHandleSendData(comm_handle_j); if (comm_handle_a) { HYPRE_Complex *send_a = (HYPRE_Complex *) hypre_ParCSRCommHandleSendData(comm_handle_a); hypre_ParCSRCommHandleDestroy(comm_handle_a); hypre_TFree(send_a, HYPRE_MEMORY_HOST); } hypre_ParCSRCommHandleDestroy(comm_handle_j); hypre_TFree(send_j, HYPRE_MEMORY_HOST); hypre_TFree(hypre_ParCSRCommPkgSendMapStarts(comm_pkg_j), HYPRE_MEMORY_HOST); hypre_TFree(hypre_ParCSRCommPkgRecvVecStarts(comm_pkg_j), HYPRE_MEMORY_HOST); hypre_TFree(comm_pkg_j, HYPRE_MEMORY_HOST); hypre_TFree(request, HYPRE_MEMORY_HOST); return A_ext; } /* C = alpha * A + beta * B * A and B are assumed to have the same row and column partitionings */ HYPRE_Int hypre_ParcsrAdd( HYPRE_Complex alpha, hypre_ParCSRMatrix *A, HYPRE_Complex beta, hypre_ParCSRMatrix *B, hypre_ParCSRMatrix **Cout ) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); HYPRE_Int num_procs, my_id; hypre_MPI_Comm_rank(comm, &my_id); hypre_MPI_Comm_size(comm, &num_procs); HYPRE_Int i, j; /* diag part of A */ hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Complex *A_diag_a = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); /* off-diag part of A */ hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Complex *A_offd_a = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd); HYPRE_BigInt *col_map_offd_A = hypre_ParCSRMatrixColMapOffd(A); HYPRE_Int *A2C_offd = hypre_TAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); HYPRE_BigInt nrow_global = hypre_ParCSRMatrixGlobalNumRows(A); HYPRE_BigInt ncol_global = hypre_ParCSRMatrixGlobalNumCols(A); HYPRE_Int nrow_local = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int ncol_local = hypre_CSRMatrixNumCols(A_diag); HYPRE_Int nnz_diag_A = A_diag_i[nrow_local]; HYPRE_Int nnz_offd_A = A_offd_i[nrow_local]; /* diag part of B */ hypre_CSRMatrix *B_diag = hypre_ParCSRMatrixDiag(B); HYPRE_Complex *B_diag_a = hypre_CSRMatrixData(B_diag); HYPRE_Int *B_diag_i = hypre_CSRMatrixI(B_diag); HYPRE_Int *B_diag_j = hypre_CSRMatrixJ(B_diag); /* off-diag part of B */ hypre_CSRMatrix *B_offd = hypre_ParCSRMatrixOffd(B); HYPRE_Complex *B_offd_a = hypre_CSRMatrixData(B_offd); HYPRE_Int *B_offd_i = hypre_CSRMatrixI(B_offd); HYPRE_Int *B_offd_j = hypre_CSRMatrixJ(B_offd); HYPRE_Int num_cols_B_offd = hypre_CSRMatrixNumCols(B_offd); HYPRE_BigInt *col_map_offd_B = hypre_ParCSRMatrixColMapOffd(B); HYPRE_Int *B2C_offd = hypre_TAlloc(HYPRE_Int, num_cols_B_offd, HYPRE_MEMORY_HOST); hypre_assert(nrow_global == hypre_ParCSRMatrixGlobalNumRows(B)); hypre_assert(ncol_global == hypre_ParCSRMatrixGlobalNumCols(B)); hypre_assert(nrow_local == hypre_CSRMatrixNumRows(B_diag)); hypre_assert(ncol_local == hypre_CSRMatrixNumCols(B_diag)); HYPRE_Int nnz_diag_B = B_diag_i[nrow_local]; HYPRE_Int nnz_offd_B = B_offd_i[nrow_local]; HYPRE_MemoryLocation memory_location_A = hypre_ParCSRMatrixMemoryLocation(A); HYPRE_MemoryLocation memory_location_B = hypre_ParCSRMatrixMemoryLocation(B); /* RL: TODO cannot guarantee, maybe should never assert hypre_assert(memory_location_A == memory_location_B); */ /* RL: in the case of A=H, B=D, or A=D, B=H, let C = D, * not sure if this is the right thing to do. * Also, need something like this in other places * TODO */ HYPRE_MemoryLocation memory_location_C = hypre_max(memory_location_A, memory_location_B); /* C */ hypre_ParCSRMatrix *C; HYPRE_BigInt *row_starts_C, *col_starts_C; hypre_CSRMatrix *C_diag; hypre_CSRMatrix *C_offd; HYPRE_Int num_cols_C_offd = num_cols_A_offd + num_cols_B_offd; HYPRE_BigInt *col_map_offd_C = hypre_TAlloc(HYPRE_BigInt, num_cols_C_offd, HYPRE_MEMORY_HOST); HYPRE_Int nnz_diag_C_alloc = nnz_diag_A + nnz_diag_B; HYPRE_Int nnz_offd_C_alloc = nnz_offd_A + nnz_offd_B; HYPRE_Int nnz_diag_C = 0, nnz_offd_C = 0; HYPRE_Int *C_diag_i = hypre_CTAlloc(HYPRE_Int, nrow_local + 1, memory_location_C); HYPRE_Int *C_diag_j = hypre_CTAlloc(HYPRE_Int, nnz_diag_C_alloc, memory_location_C); HYPRE_Complex *C_diag_a = hypre_CTAlloc(HYPRE_Complex, nnz_diag_C_alloc, memory_location_C); HYPRE_Int *C_offd_i = hypre_CTAlloc(HYPRE_Int, nrow_local + 1, memory_location_C); HYPRE_Int *C_offd_j = hypre_CTAlloc(HYPRE_Int, nnz_offd_C_alloc, memory_location_C); HYPRE_Complex *C_offd_a = hypre_CTAlloc(HYPRE_Complex, nnz_offd_C_alloc, memory_location_C); hypre_union2( num_cols_A_offd, col_map_offd_A, num_cols_B_offd, col_map_offd_B, &num_cols_C_offd, col_map_offd_C, A2C_offd, B2C_offd ); HYPRE_Int *marker_diag = hypre_TAlloc(HYPRE_Int, ncol_local, HYPRE_MEMORY_HOST); HYPRE_Int *marker_offd = hypre_TAlloc(HYPRE_Int, num_cols_C_offd, HYPRE_MEMORY_HOST); for (i = 0; i < ncol_local; i++) { marker_diag[i] = -1; } for (i = 0; i < num_cols_C_offd; i++) { marker_offd[i] = -1; } /* main loop for each row i */ for (i = 0; i < nrow_local; i++) { HYPRE_Int diag_i_start = nnz_diag_C; HYPRE_Int offd_i_start = nnz_offd_C; for (j = A_diag_i[i]; j < A_diag_i[i+1]; j++) { HYPRE_Int col = A_diag_j[j]; HYPRE_Complex val = A_diag_a[j]; if (marker_diag[col] < diag_i_start) { /* this col has not been seen before, create new entry */ marker_diag[col] = nnz_diag_C; C_diag_j[nnz_diag_C] = col; C_diag_a[nnz_diag_C] = alpha * val; nnz_diag_C ++; } else { /* this should not happen */ hypre_printf("hypre warning: invalid ParCSR matrix %s %s %d\n", __FILE__, __func__, __LINE__); } } for (j = B_diag_i[i]; j < B_diag_i[i+1]; j++) { HYPRE_Int col = B_diag_j[j]; HYPRE_Complex val = B_diag_a[j]; if (marker_diag[col] < diag_i_start /*&& hypre_abs(val) > 0.0*/) { /* this col has not been seen before, create new entry */ marker_diag[col] = nnz_diag_C; C_diag_j[nnz_diag_C] = col; C_diag_a[nnz_diag_C] = beta * val; nnz_diag_C ++; } else { /* existing entry, update */ HYPRE_Int p = marker_diag[col]; hypre_assert(C_diag_j[p] == col); C_diag_a[p] += beta * val; } } C_diag_i[i+1] = nnz_diag_C; if (num_procs <= 1) { continue; } for (j = A_offd_i[i]; j < A_offd_i[i+1]; j++) { HYPRE_Int colA = A_offd_j[j]; HYPRE_Int colC = A2C_offd[colA]; HYPRE_Complex val = A_offd_a[j]; if (marker_offd[colC] < offd_i_start) { /* this col has not been seen before, create new entry */ marker_offd[colC] = nnz_offd_C; C_offd_j[nnz_offd_C] = colC; C_offd_a[nnz_offd_C] = alpha * val; nnz_offd_C ++; } else { /* this should not happen */ hypre_printf("hypre warning: invalid ParCSR matrix %s %s %d\n", __FILE__, __func__, __LINE__); } } for (j = B_offd_i[i]; j < B_offd_i[i+1]; j++) { HYPRE_Int colB = B_offd_j[j]; HYPRE_Int colC = B2C_offd[colB]; HYPRE_Complex val = B_offd_a[j]; if (marker_offd[colC] < offd_i_start /*&& hypre_abs(val) > 0.0*/) { /* this col has not been seen before, create new entry */ marker_offd[colC] = nnz_offd_C; C_offd_j[nnz_offd_C] = colC; C_offd_a[nnz_offd_C] = beta * val; nnz_offd_C ++; } else { /* existing entry, update */ HYPRE_Int p = marker_offd[colC]; hypre_assert(C_offd_j[p] == colC); C_offd_a[p] += beta * val; } } C_offd_i[i+1] = nnz_offd_C; } j = 2; row_starts_C = hypre_TAlloc(HYPRE_BigInt, j, HYPRE_MEMORY_HOST); col_starts_C = hypre_TAlloc(HYPRE_BigInt, j, HYPRE_MEMORY_HOST); memcpy(row_starts_C, hypre_ParCSRMatrixRowStarts(A), j*sizeof(HYPRE_BigInt)); memcpy(col_starts_C, hypre_ParCSRMatrixColStarts(A), j*sizeof(HYPRE_BigInt)); /* Now, we should have everything of Parcsr matrix C */ C = hypre_ParCSRMatrixCreate(comm, nrow_global, ncol_global, row_starts_C, col_starts_C, num_cols_C_offd, nnz_diag_C, nnz_offd_C); C_diag = hypre_ParCSRMatrixDiag(C); hypre_CSRMatrixData(C_diag) = C_diag_a; hypre_CSRMatrixI(C_diag) = C_diag_i; hypre_CSRMatrixJ(C_diag) = C_diag_j; hypre_CSRMatrixMemoryLocation(C_diag) = memory_location_C; C_offd = hypre_ParCSRMatrixOffd(C); hypre_CSRMatrixData(C_offd) = C_offd_a; hypre_CSRMatrixI(C_offd) = C_offd_i; hypre_CSRMatrixJ(C_offd) = C_offd_j; hypre_CSRMatrixMemoryLocation(C_offd) = memory_location_C; hypre_ParCSRMatrixColMapOffd(C) = col_map_offd_C; hypre_ParCSRMatrixSetNumNonzeros(C); hypre_ParCSRMatrixDNumNonzeros(C) = (HYPRE_Real) hypre_ParCSRMatrixNumNonzeros(C); /* create CommPkg of C */ hypre_MatvecCommPkgCreate(C); *Cout = C; /* done */ hypre_TFree(A2C_offd, HYPRE_MEMORY_HOST); hypre_TFree(B2C_offd, HYPRE_MEMORY_HOST); hypre_TFree(marker_diag, HYPRE_MEMORY_HOST); hypre_TFree(marker_offd, HYPRE_MEMORY_HOST); return hypre_error_flag; } HYPRE_Real hypre_ParCSRMatrixFnorm( hypre_ParCSRMatrix *A ) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); HYPRE_Real f_diag, f_offd, local_result, result; f_diag = hypre_CSRMatrixFnorm(hypre_ParCSRMatrixDiag(A)); f_offd = hypre_CSRMatrixFnorm(hypre_ParCSRMatrixOffd(A)); local_result = f_diag * f_diag + f_offd * f_offd; hypre_MPI_Allreduce(&local_result, &result, 1, HYPRE_MPI_REAL, hypre_MPI_SUM, comm); return sqrt(result); } HYPRE_Int hypre_ExchangeExternalRowsInit( hypre_CSRMatrix *B_ext, hypre_ParCSRCommPkg *comm_pkg_A, void **request_ptr) { MPI_Comm comm = hypre_ParCSRCommPkgComm(comm_pkg_A); HYPRE_Int num_recvs = hypre_ParCSRCommPkgNumRecvs(comm_pkg_A); HYPRE_Int *recv_procs = hypre_ParCSRCommPkgRecvProcs(comm_pkg_A); HYPRE_Int *recv_vec_starts = hypre_ParCSRCommPkgRecvVecStarts(comm_pkg_A); HYPRE_Int num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg_A); HYPRE_Int *send_procs = hypre_ParCSRCommPkgSendProcs(comm_pkg_A); HYPRE_Int *send_map_starts = hypre_ParCSRCommPkgSendMapStarts(comm_pkg_A); HYPRE_Int num_elmts_send = send_map_starts[num_sends]; HYPRE_Int num_elmts_recv = recv_vec_starts[num_recvs]; HYPRE_Int *B_ext_i = B_ext ? hypre_CSRMatrixI(B_ext) : NULL; HYPRE_BigInt *B_ext_j = B_ext ? hypre_CSRMatrixBigJ(B_ext) : NULL; HYPRE_Complex *B_ext_data = B_ext ? hypre_CSRMatrixData(B_ext) : NULL; HYPRE_Int B_ext_ncols = B_ext ? hypre_CSRMatrixNumCols(B_ext) : 0; HYPRE_Int B_ext_nrows = B_ext ? hypre_CSRMatrixNumRows(B_ext) : 0; HYPRE_Int *B_ext_rownnz = hypre_CTAlloc(HYPRE_Int, B_ext_nrows, HYPRE_MEMORY_HOST); hypre_assert(num_elmts_recv == B_ext_nrows); /* output matrix */ hypre_CSRMatrix *B_int; HYPRE_Int B_int_nrows = num_elmts_send; HYPRE_Int B_int_ncols = B_ext_ncols; HYPRE_Int *B_int_i = hypre_TAlloc(HYPRE_Int, B_int_nrows + 1, HYPRE_MEMORY_HOST); HYPRE_BigInt *B_int_j = NULL; HYPRE_Complex *B_int_data = NULL; HYPRE_Int B_int_nnz; hypre_ParCSRCommHandle *comm_handle, *comm_handle_j, *comm_handle_a; hypre_ParCSRCommPkg *comm_pkg_j; HYPRE_Int *jdata_recv_vec_starts; HYPRE_Int *jdata_send_map_starts; HYPRE_Int i; HYPRE_Int num_procs; void **vrequest; hypre_MPI_Comm_size(comm, &num_procs); jdata_send_map_starts = hypre_TAlloc(HYPRE_Int, num_sends+1, HYPRE_MEMORY_HOST); /*-------------------------------------------------------------------------- * B_ext_rownnz contains the number of elements of row j * (to be determined through send_map_elmnts on the receiving end) *--------------------------------------------------------------------------*/ for (i = 0; i < B_ext_nrows; i++) { B_ext_rownnz[i] = B_ext_i[i+1] - B_ext_i[i]; } /*-------------------------------------------------------------------------- * initialize communication: send/recv the row nnz * (note the use of comm_pkg_A, mode 12, as in transpose matvec *--------------------------------------------------------------------------*/ comm_handle = hypre_ParCSRCommHandleCreate(12, comm_pkg_A, B_ext_rownnz, B_int_i + 1); jdata_recv_vec_starts = hypre_TAlloc(HYPRE_Int, num_recvs + 1, HYPRE_MEMORY_HOST); jdata_recv_vec_starts[0] = 0; for (i = 1; i <= num_recvs; i++) { jdata_recv_vec_starts[i] = B_ext_i[recv_vec_starts[i]]; } comm_pkg_j = hypre_CTAlloc(hypre_ParCSRCommPkg, 1, HYPRE_MEMORY_HOST); hypre_ParCSRCommPkgComm(comm_pkg_j) = comm; hypre_ParCSRCommPkgNumSends(comm_pkg_j) = num_recvs; hypre_ParCSRCommPkgNumRecvs(comm_pkg_j) = num_sends; hypre_ParCSRCommPkgSendProcs(comm_pkg_j) = recv_procs; hypre_ParCSRCommPkgRecvProcs(comm_pkg_j) = send_procs; hypre_ParCSRCommHandleDestroy(comm_handle); /*-------------------------------------------------------------------------- * compute B_int: row nnz to row ptrs *--------------------------------------------------------------------------*/ B_int_i[0] = 0; for (i = 1; i <= B_int_nrows; i++) { B_int_i[i] += B_int_i[i-1]; } B_int_nnz = B_int_i[B_int_nrows]; B_int_j = hypre_TAlloc(HYPRE_BigInt, B_int_nnz, HYPRE_MEMORY_HOST); B_int_data = hypre_TAlloc(HYPRE_Complex, B_int_nnz, HYPRE_MEMORY_HOST); for (i = 0; i <= num_sends; i++) { jdata_send_map_starts[i] = B_int_i[send_map_starts[i]]; } /* note the order of send/recv is reversed */ hypre_ParCSRCommPkgRecvVecStarts(comm_pkg_j) = jdata_send_map_starts; hypre_ParCSRCommPkgSendMapStarts(comm_pkg_j) = jdata_recv_vec_starts; /* send/recv CSR rows */ comm_handle_a = hypre_ParCSRCommHandleCreate( 1, comm_pkg_j, B_ext_data, B_int_data); comm_handle_j = hypre_ParCSRCommHandleCreate(21, comm_pkg_j, B_ext_j, B_int_j); /* create CSR */ B_int = hypre_CSRMatrixCreate(B_int_nrows, B_int_ncols, B_int_nnz); hypre_CSRMatrixMemoryLocation(B_int) = HYPRE_MEMORY_HOST; hypre_CSRMatrixI(B_int) = B_int_i; hypre_CSRMatrixBigJ(B_int) = B_int_j; hypre_CSRMatrixData(B_int) = B_int_data; /* output */ vrequest = hypre_TAlloc(void *, 4, HYPRE_MEMORY_HOST); vrequest[0] = (void *) comm_handle_j; vrequest[1] = (void *) comm_handle_a; vrequest[2] = (void *) B_int; vrequest[3] = (void *) comm_pkg_j; *request_ptr = (void *) vrequest; hypre_TFree(B_ext_rownnz, HYPRE_MEMORY_HOST); return hypre_error_flag; } hypre_CSRMatrix* hypre_ExchangeExternalRowsWait(void *vrequest) { void **request = (void **) vrequest; hypre_ParCSRCommHandle *comm_handle_j = (hypre_ParCSRCommHandle *) request[0]; hypre_ParCSRCommHandle *comm_handle_a = (hypre_ParCSRCommHandle *) request[1]; hypre_CSRMatrix *B_int = (hypre_CSRMatrix *) request[2]; hypre_ParCSRCommPkg *comm_pkg_j = (hypre_ParCSRCommPkg *) request[3]; /* communication done */ hypre_ParCSRCommHandleDestroy(comm_handle_a); hypre_ParCSRCommHandleDestroy(comm_handle_j); hypre_TFree(hypre_ParCSRCommPkgSendMapStarts(comm_pkg_j), HYPRE_MEMORY_HOST); hypre_TFree(hypre_ParCSRCommPkgRecvVecStarts(comm_pkg_j), HYPRE_MEMORY_HOST); hypre_TFree(comm_pkg_j, HYPRE_MEMORY_HOST); hypre_TFree(request, HYPRE_MEMORY_HOST); return B_int; } /* ----------------------------------------------------------------------------- * extract submatrix A_{FF}, A_{FC}, A_{CF} or A_{CC} * char job[2] = "FF", "FC", "CF" or "CC" * ----------------------------------------------------------------------------- */ HYPRE_Int hypre_ParCSRMatrixExtractSubmatrixFC( hypre_ParCSRMatrix *A, HYPRE_Int *CF_marker, HYPRE_BigInt *cpts_starts_in, const char *job, hypre_ParCSRMatrix **B_ptr, HYPRE_Real strength_thresh) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); hypre_ParCSRCommHandle *comm_handle; /* diag part of A */ hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Complex *A_diag_a = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); /* off-diag part of A */ hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Complex *A_offd_a = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd); //HYPRE_Int *col_map_offd_A = hypre_ParCSRMatrixColMapOffd(A); hypre_ParCSRMatrix *B; hypre_CSRMatrix *B_diag, *B_offd; HYPRE_Real *B_maxel_row; HYPRE_Int *B_diag_i, *B_diag_j, *B_offd_i, *B_offd_j; HYPRE_Complex *B_diag_a, *B_offd_a; HYPRE_Int num_cols_B_offd; HYPRE_BigInt *col_map_offd_B; HYPRE_Int i, j, k, k1, k2; HYPRE_BigInt B_nrow_global, B_ncol_global; HYPRE_Int A_nlocal, B_nrow_local, B_ncol_local, B_nnz_diag, B_nnz_offd; HYPRE_BigInt total_global_fpts, total_global_cpts, *fpts_starts, *cpts_starts; HYPRE_Int nf_local, nc_local; HYPRE_Int row_set, col_set; HYPRE_BigInt *B_row_starts, *B_col_starts, B_first_col; HYPRE_Int my_id, num_procs, *sub_idx_diag, *sub_idx_offd; HYPRE_Int num_sends, *send_buf_data; /* MPI size and rank*/ hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm, &my_id); row_set = job[0] == 'F' ? -1 : 1; col_set = job[1] == 'F' ? -1 : 1; A_nlocal = hypre_CSRMatrixNumRows(A_diag); /*-------------- global number of C points and local C points * assuming cpts_starts is given */ if (row_set == 1 || col_set == 1) { /* copy cpts_starts first */ HYPRE_Int len; len = 2; cpts_starts = hypre_TAlloc(HYPRE_BigInt, len, HYPRE_MEMORY_HOST); memcpy(cpts_starts, cpts_starts_in, len*sizeof(HYPRE_BigInt)); if (my_id == (num_procs -1)) { total_global_cpts = cpts_starts[1]; } hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_INT, num_procs-1, comm); nc_local = (HYPRE_Int)(cpts_starts[1] - cpts_starts[0]); } /*-------------- global number of F points, local F points, and F starts */ if (row_set == -1 || col_set == -1) { nf_local = 0; for (i = 0; i < A_nlocal; i++) { if (CF_marker[i] < 0) { nf_local++; } } fpts_starts = hypre_TAlloc(HYPRE_BigInt, 2, HYPRE_MEMORY_HOST); hypre_MPI_Scan(&nf_local, fpts_starts+1, 1, HYPRE_MPI_BIG_INT, hypre_MPI_SUM, comm); fpts_starts[0] = fpts_starts[1] - nf_local; if (my_id == num_procs - 1) { total_global_fpts = fpts_starts[1]; } hypre_MPI_Bcast(&total_global_fpts, 1, HYPRE_MPI_INT, num_procs-1, comm); } if (row_set == -1 && col_set == -1) { /* FF */ B_nrow_local = nf_local; B_ncol_local = nf_local; B_nrow_global = total_global_fpts; B_ncol_global = total_global_fpts; B_row_starts = B_col_starts = fpts_starts; } else if (row_set == -1 && col_set == 1) { /* FC */ B_nrow_local = nf_local; B_ncol_local = nc_local; B_nrow_global = total_global_fpts; B_ncol_global = total_global_cpts; B_row_starts = fpts_starts; B_col_starts = cpts_starts; } else if (row_set == 1 && col_set == -1) { /* CF */ B_nrow_local = nc_local; B_ncol_local = nf_local; B_nrow_global = total_global_cpts; B_ncol_global = total_global_fpts; B_row_starts = cpts_starts; B_col_starts = fpts_starts; } else { /* CC */ B_nrow_local = nc_local; B_ncol_local = nc_local; B_nrow_global = total_global_cpts; B_ncol_global = total_global_cpts; B_row_starts = B_col_starts = cpts_starts; } /* global index of my first col */ B_first_col = B_col_starts[0]; /* sub_idx_diag: [local] mapping from F+C to F/C, if not selected, be -1 */ sub_idx_diag = hypre_TAlloc(HYPRE_Int, A_nlocal, HYPRE_MEMORY_HOST); for (i = 0, k = 0; i < A_nlocal; i++) { HYPRE_Int CF_i = CF_marker[i] > 0 ? 1 : -1; if (CF_i == col_set) { sub_idx_diag[i] = k++; } else { sub_idx_diag[i] = -1; } } hypre_assert(k == B_ncol_local); num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); send_buf_data = hypre_TAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST); k = 0; for (i = 0; i < num_sends; i++) { /* start pos of elements sent to send_proc[i] */ HYPRE_Int si = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); HYPRE_Int ei = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); /* loop through all elems to send_proc[i] */ for (j = si; j < ei; j++) { /* j1: local idx */ HYPRE_Int j1 = sub_idx_diag[hypre_ParCSRCommPkgSendMapElmt(comm_pkg, j)]; if (j1 != -1) { /* adjust j1 to B global idx */ j1 += B_first_col; } send_buf_data[k++] = j1; } } hypre_assert(k == hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends)); /* recv buffer */ sub_idx_offd = hypre_TAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); /* create a handle to start communication. 11: for integer */ comm_handle = hypre_ParCSRCommHandleCreate(11, comm_pkg, send_buf_data, sub_idx_offd); /* destroy the handle to finish communication */ hypre_ParCSRCommHandleDestroy(comm_handle); for (i = 0, num_cols_B_offd = 0; i < num_cols_A_offd; i++) { if (sub_idx_offd[i] != -1) { num_cols_B_offd ++; } } col_map_offd_B = hypre_TAlloc(HYPRE_BigInt, num_cols_B_offd, HYPRE_MEMORY_HOST); for (i = 0, k = 0; i < num_cols_A_offd; i++) { if (sub_idx_offd[i] != -1) { col_map_offd_B[k] = sub_idx_offd[i]; sub_idx_offd[i] = k++; } } hypre_assert(k == num_cols_B_offd); /* count nnz and set ia */ B_nnz_diag = B_nnz_offd = 0; B_maxel_row = hypre_TAlloc(HYPRE_Real, B_nrow_local, HYPRE_MEMORY_HOST); B_diag_i = hypre_TAlloc(HYPRE_Int, B_nrow_local+1, HYPRE_MEMORY_HOST); B_offd_i = hypre_TAlloc(HYPRE_Int, B_nrow_local+1, HYPRE_MEMORY_HOST); B_diag_i[0] = B_offd_i[0] = 0; for (i = 0, k = 0; i < A_nlocal; i++) { HYPRE_Int CF_i = CF_marker[i] > 0 ? 1 : -1; if (CF_i != row_set) { continue; } k++; // Get max abs-value element of this row HYPRE_Real temp_max = 0; if (strength_thresh > 0) { for (j = A_diag_i[i]+1; j < A_diag_i[i+1]; j++) { if (hypre_cabs(A_diag_a[j]) > temp_max) { temp_max = hypre_cabs(A_diag_a[j]); } } for (j = A_offd_i[i]; j < A_offd_i[i+1]; j++) { if (hypre_cabs(A_offd_a[j]) > temp_max) { temp_max = hypre_cabs(A_offd_a[j]); } } } B_maxel_row[k-1] = temp_max; // add one for diagonal element j = A_diag_i[i]; if (sub_idx_diag[A_diag_j[j]] != -1) { B_nnz_diag++; } // Count nnzs larger than tolerance times max row element for (j = A_diag_i[i]+1; j < A_diag_i[i+1]; j++) { if ( (sub_idx_diag[A_diag_j[j]] != -1) && (hypre_cabs(A_diag_a[j]) > (strength_thresh*temp_max)) ) { B_nnz_diag++; } } for (j = A_offd_i[i]; j < A_offd_i[i+1]; j++) { if ( (sub_idx_offd[A_offd_j[j]] != -1) && (hypre_cabs(A_offd_a[j]) > (strength_thresh*temp_max)) ) { B_nnz_offd++; } } B_diag_i[k] = B_nnz_diag; B_offd_i[k] = B_nnz_offd; } hypre_assert(k == B_nrow_local); B_diag_j = hypre_TAlloc(HYPRE_Int, B_nnz_diag, HYPRE_MEMORY_HOST); B_diag_a = hypre_TAlloc(HYPRE_Complex, B_nnz_diag, HYPRE_MEMORY_HOST); B_offd_j = hypre_TAlloc(HYPRE_Int, B_nnz_offd, HYPRE_MEMORY_HOST); B_offd_a = hypre_TAlloc(HYPRE_Complex, B_nnz_offd, HYPRE_MEMORY_HOST); for (i = 0, k=0, k1 = 0, k2 = 0; i < A_nlocal; i++) { HYPRE_Int CF_i = CF_marker[i] > 0 ? 1 : -1; if (CF_i != row_set) { continue; } HYPRE_Real maxel = B_maxel_row[k]; k++; for (j = A_diag_i[i]; j < A_diag_i[i+1]; j++) { HYPRE_Int j1 = sub_idx_diag[A_diag_j[j]]; if ( (j1 != -1) && ( (hypre_cabs(A_diag_a[j]) > (strength_thresh*maxel)) || j==A_diag_i[i] ) ) { B_diag_j[k1] = j1; B_diag_a[k1] = A_diag_a[j]; k1++; } } for (j = A_offd_i[i]; j < A_offd_i[i+1]; j++) { HYPRE_Int j1 = sub_idx_offd[A_offd_j[j]]; if ((j1 != -1) && (hypre_cabs(A_offd_a[j]) > (strength_thresh*maxel))) { hypre_assert(j1 >= 0 && j1 < num_cols_B_offd); B_offd_j[k2] = j1; B_offd_a[k2] = A_offd_a[j]; k2++; } } } hypre_assert(k1 == B_nnz_diag && k2 == B_nnz_offd); /* ready to create B = A(rowset, colset) */ B = hypre_ParCSRMatrixCreate(comm, B_nrow_global, B_ncol_global, B_row_starts, B_col_starts, num_cols_B_offd, B_nnz_diag, B_nnz_offd); B_diag = hypre_ParCSRMatrixDiag(B); hypre_CSRMatrixMemoryLocation(B_diag) = HYPRE_MEMORY_HOST; hypre_CSRMatrixData(B_diag) = B_diag_a; hypre_CSRMatrixI(B_diag) = B_diag_i; hypre_CSRMatrixJ(B_diag) = B_diag_j; B_offd = hypre_ParCSRMatrixOffd(B); hypre_CSRMatrixMemoryLocation(B_offd) = HYPRE_MEMORY_HOST; hypre_CSRMatrixData(B_offd) = B_offd_a; hypre_CSRMatrixI(B_offd) = B_offd_i; hypre_CSRMatrixJ(B_offd) = B_offd_j; hypre_ParCSRMatrixColMapOffd(B) = col_map_offd_B; hypre_ParCSRMatrixSetNumNonzeros(B); hypre_ParCSRMatrixDNumNonzeros(B) = (HYPRE_Real) hypre_ParCSRMatrixNumNonzeros(B); hypre_MatvecCommPkgCreate(B); *B_ptr = B; hypre_TFree(B_maxel_row, HYPRE_MEMORY_HOST); hypre_TFree(send_buf_data, HYPRE_MEMORY_HOST); hypre_TFree(sub_idx_diag, HYPRE_MEMORY_HOST); hypre_TFree(sub_idx_offd, HYPRE_MEMORY_HOST); return hypre_error_flag; }
Threads1xxx.c
#include <stdio.h> #include <omp.h> int thdLim =1024; //__attribute__((amdgpu_flat_work_group_size(1024, 1024))) int main() { int numTeams=128; int N = 12; int NN = 1024; int CUs[numTeams*NN]; int lims[N] , threads[N], Res[numTeams*NN]; int i; for (i=0; i <N; i++) lims[i] = threads[i] = -1; for (i=0; i <N*NN; i++) Res[i] = -1; #pragma omp target teams num_teams(numTeams) thread_limit(1024) #pragma omp distribute for (int j=0; j < numTeams; j++) { if (j<N) { lims[j] = omp_get_num_teams(); threads[j] = omp_get_num_threads(); } #pragma omp parallel for for (i=j*NN; i <(j+1)*NN; i++) { Res[i] = i; CUs[i] = omp_ext_get_smid(); } } for (i=0; i <N; i++) { printf("i=%d lims[%d] threads[%d]\n", i, lims[i], threads[i]); } for (i=0; i <numTeams*NN; i++) if (Res[i] != i) { printf("Failed %d %d\n",i, Res[i]); return 1;} //for (i=0; i <numTeams*NN; i++) // printf("CUs %d\n",CUs[i]); return 0; }
ompbarrier.c
#include <stdio.h> #include <stdlib.h> int main(int argc, char* argv[]) { int counter; const int n = 16; #pragma omp parallel private(counter) { for(counter = 0; counter < n; counter++) { #pragma omp barrier printf("Performing iteration %d\n", counter); fflush(stdout); #pragma omp barrier } } }
displacement_lagrangemultiplier_frictional_contact_criteria.h
// KRATOS ___| | | | // \___ \ __| __| | | __| __| | | __| _` | | // | | | | | ( | | | | ( | | // _____/ \__|_| \__,_|\___|\__|\__,_|_| \__,_|_| MECHANICS // // License: BSD License // license: StructuralMechanicsApplication/license.txt // // Main authors: Vicente Mataix Ferrandiz // #if !defined(KRATOS_DISPLACEMENT_LAGRANGE_MULTIPLIER_FRICTIONAL_CONTACT_CRITERIA_H) #define KRATOS_DISPLACEMENT_LAGRANGE_MULTIPLIER_FRICTIONAL_CONTACT_CRITERIA_H /* System includes */ /* External includes */ /* Project includes */ #include "utilities/table_stream_utility.h" #include "utilities/color_utilities.h" #include "solving_strategies/convergencecriterias/convergence_criteria.h" #include "custom_utilities/active_set_utilities.h" namespace Kratos { ///@addtogroup ContactStructuralMechanicsApplication ///@{ ///@name Kratos Globals ///@{ ///@} ///@name Type Definitions ///@{ ///@} ///@name Enum's ///@{ ///@} ///@name Functions ///@{ ///@name Kratos Classes ///@{ /** * @class DisplacementLagrangeMultiplierFrictionalContactCriteria * @ingroup ContactStructuralMechanicsApplication * @brief Convergence criteria for contact problems * @details This class implements a convergence control based on nodal displacement and * lagrange multiplier values. The error is evaluated separately for each of them, and * relative and absolute tolerances for both must be specified. * @author Vicente Mataix Ferrandiz */ template< class TSparseSpace, class TDenseSpace > class DisplacementLagrangeMultiplierFrictionalContactCriteria : public ConvergenceCriteria< TSparseSpace, TDenseSpace > { public: ///@name Type Definitions ///@{ /// Pointer definition of DisplacementLagrangeMultiplierFrictionalContactCriteria KRATOS_CLASS_POINTER_DEFINITION( DisplacementLagrangeMultiplierFrictionalContactCriteria ); /// Local Flags KRATOS_DEFINE_LOCAL_FLAG( ENSURE_CONTACT ); KRATOS_DEFINE_LOCAL_FLAG( PRINTING_OUTPUT ); KRATOS_DEFINE_LOCAL_FLAG( TABLE_IS_INITIALIZED ); KRATOS_DEFINE_LOCAL_FLAG( PURE_SLIP ); /// The base class definition (and it subclasses) typedef ConvergenceCriteria< TSparseSpace, TDenseSpace > BaseType; typedef typename BaseType::TDataType TDataType; typedef typename BaseType::DofsArrayType DofsArrayType; typedef typename BaseType::TSystemMatrixType TSystemMatrixType; typedef typename BaseType::TSystemVectorType TSystemVectorType; /// The sparse space used typedef TSparseSpace SparseSpaceType; /// The r_table stream definition TODO: Replace by logger typedef TableStreamUtility::Pointer TablePrinterPointerType; /// The index type definition typedef std::size_t IndexType; /// The key type definition typedef std::size_t KeyType; ///@} ///@name Life Cycle ///@{ /// Constructor. /** * @param DispRatioTolerance Relative tolerance for displacement error * @param DispAbsTolerance Absolute tolerance for displacement error * @param LMRatioTolerance Relative tolerance for lagrange multiplier error * @param LMAbsTolerance Absolute tolerance for lagrange multiplier error * @param NormalTangentRatio Ratio between the normal and tangent that will accepted as converged * @param EnsureContact To check if the contact is lost * @param pTable The pointer to the output r_table * @param PrintingOutput If the output is going to be printed in a txt file */ explicit DisplacementLagrangeMultiplierFrictionalContactCriteria( const TDataType DispRatioTolerance, const TDataType DispAbsTolerance, const TDataType LMNormalRatioTolerance, const TDataType LMNormalAbsTolerance, const TDataType LMTangentRatioTolerance, const TDataType LMTangentAbsTolerance, const TDataType NormalTangentRatio, const bool EnsureContact = false, const bool PureSlip = false, const bool PrintingOutput = false ) : BaseType() { // Set local flags mOptions.Set(DisplacementLagrangeMultiplierFrictionalContactCriteria::ENSURE_CONTACT, EnsureContact); mOptions.Set(DisplacementLagrangeMultiplierFrictionalContactCriteria::PRINTING_OUTPUT, PrintingOutput); mOptions.Set(DisplacementLagrangeMultiplierFrictionalContactCriteria::PURE_SLIP, PureSlip); mOptions.Set(DisplacementLagrangeMultiplierFrictionalContactCriteria::TABLE_IS_INITIALIZED, false); // The displacement solution mDispRatioTolerance = DispRatioTolerance; mDispAbsTolerance = DispAbsTolerance; // The normal contact solution mLMNormalRatioTolerance = LMNormalRatioTolerance; mLMNormalAbsTolerance = LMNormalAbsTolerance; // The tangent contact solution mLMTangentRatioTolerance = LMTangentRatioTolerance; mLMTangentAbsTolerance = LMTangentAbsTolerance; // We get the ratio between the normal and tangent that will accepted as converged mNormalTangentRatio = NormalTangentRatio; } /** * @brief Default constructor (parameters) * @param ThisParameters The configuration parameters */ explicit DisplacementLagrangeMultiplierFrictionalContactCriteria( Parameters ThisParameters = Parameters(R"({})")) : BaseType() { // The default parameters Parameters default_parameters = Parameters(R"( { "ensure_contact" : false, "pure_slip" : false, "print_convergence_criterion" : false, "displacement_relative_tolerance" : 1.0e-4, "displacement_absolute_tolerance" : 1.0e-9, "contact_displacement_relative_tolerance" : 1.0e-4, "contact_displacement_absolute_tolerance" : 1.0e-9, "frictional_contact_displacement_relative_tolerance" : 1.0e-4, "frictional_contact_displacement_absolute_tolerance" : 1.0e-9, "ratio_normal_tangent_threshold" : 1.0e-4 })" ); ThisParameters.ValidateAndAssignDefaults(default_parameters); // The displacement solution mDispRatioTolerance = ThisParameters["displacement_relative_tolerance"].GetDouble(); mDispAbsTolerance = ThisParameters["displacement_absolute_tolerance"].GetDouble(); // The normal contact solution mLMNormalRatioTolerance = ThisParameters["contact_displacement_relative_tolerance"].GetDouble(); mLMNormalAbsTolerance = ThisParameters["contact_displacement_absolute_tolerance"].GetDouble(); // The tangent contact solution mLMTangentRatioTolerance = ThisParameters["frictional_contact_displacement_relative_tolerance"].GetDouble(); mLMTangentAbsTolerance = ThisParameters["frictional_contact_displacement_absolute_tolerance"].GetDouble(); // We get the ratio between the normal and tangent that will accepted as converged mNormalTangentRatio = ThisParameters["ratio_normal_tangent_threshold"].GetDouble(); // Set local flags mOptions.Set(DisplacementLagrangeMultiplierFrictionalContactCriteria::ENSURE_CONTACT, ThisParameters["ensure_contact"].GetBool()); mOptions.Set(DisplacementLagrangeMultiplierFrictionalContactCriteria::PRINTING_OUTPUT, ThisParameters["print_convergence_criterion"].GetBool()); mOptions.Set(DisplacementLagrangeMultiplierFrictionalContactCriteria::TABLE_IS_INITIALIZED, false); mOptions.Set(DisplacementLagrangeMultiplierFrictionalContactCriteria::PURE_SLIP, ThisParameters["pure_slip"].GetBool()); } //* Copy constructor. DisplacementLagrangeMultiplierFrictionalContactCriteria( DisplacementLagrangeMultiplierFrictionalContactCriteria const& rOther ) :BaseType(rOther) ,mOptions(rOther.mOptions) ,mDispRatioTolerance(rOther.mDispRatioTolerance) ,mDispAbsTolerance(rOther.mDispAbsTolerance) ,mLMNormalRatioTolerance(rOther.mLMNormalRatioTolerance) ,mLMNormalAbsTolerance(rOther.mLMNormalAbsTolerance) ,mLMTangentRatioTolerance(rOther.mLMTangentRatioTolerance) ,mLMTangentAbsTolerance(rOther.mLMTangentAbsTolerance) ,mNormalTangentRatio(rOther.mNormalTangentRatio) { } /// Destructor. ~DisplacementLagrangeMultiplierFrictionalContactCriteria() override = default; ///@} ///@name Operators ///@{ /** * @brief Compute relative and absolute error. * @param rModelPart Reference to the ModelPart containing the contact problem. * @param rDofSet Reference to the container of the problem's degrees of freedom (stored by the BuilderAndSolver) * @param rA System matrix (unused) * @param rDx Vector of results (variations on nodal variables) * @param rb RHS vector (residual) * @return true if convergence is achieved, false otherwise */ bool PostCriteria( ModelPart& rModelPart, DofsArrayType& rDofSet, const TSystemMatrixType& rA, const TSystemVectorType& rDx, const TSystemVectorType& rb ) override { if (SparseSpaceType::Size(rDx) != 0) { //if we are solving for something // Getting process info ProcessInfo& r_process_info = rModelPart.GetProcessInfo(); // Compute the active set if (!r_process_info[ACTIVE_SET_COMPUTED]) { const array_1d<std::size_t, 2> is_converged = ActiveSetUtilities::ComputeALMFrictionalActiveSet(rModelPart, mOptions.Is(DisplacementLagrangeMultiplierFrictionalContactCriteria::PURE_SLIP), this->GetEchoLevel()); // We save to the process info if the active set has converged r_process_info[ACTIVE_SET_CONVERGED] = is_converged[0] == 0 ? true : false; r_process_info[SLIP_SET_CONVERGED] = is_converged[1] == 0 ? true : false; r_process_info[ACTIVE_SET_COMPUTED] = true; } // Initialize TDataType disp_solution_norm = 0.0, normal_lm_solution_norm = 0.0, tangent_lm_stick_solution_norm = 0.0, tangent_lm_slip_solution_norm = 0.0, disp_increase_norm = 0.0, normal_lm_increase_norm = 0.0, tangent_lm_stick_increase_norm = 0.0, tangent_lm_slip_increase_norm = 0.0; IndexType disp_dof_num(0), lm_dof_num(0), lm_stick_dof_num(0), lm_slip_dof_num(0); // First iterator const auto it_dof_begin = rDofSet.begin(); // The nodes array auto& r_nodes_array = rModelPart.Nodes(); // Auxiliar values std::size_t dof_id = 0; TDataType dof_value = 0.0, dof_incr = 0.0; // Loop over Dofs #pragma omp parallel for reduction(+:disp_solution_norm, normal_lm_solution_norm, tangent_lm_slip_solution_norm, tangent_lm_stick_solution_norm, disp_increase_norm, normal_lm_increase_norm, tangent_lm_slip_increase_norm, tangent_lm_stick_increase_norm, disp_dof_num, lm_dof_num, lm_stick_dof_num, lm_slip_dof_num, dof_id, dof_value, dof_incr) for (int i = 0; i < static_cast<int>(rDofSet.size()); i++) { auto it_dof = it_dof_begin + i; if (it_dof->IsFree()) { dof_id = it_dof->EquationId(); dof_value = it_dof->GetSolutionStepValue(0); dof_incr = rDx[dof_id]; const auto curr_var = it_dof->GetVariable(); if (curr_var == VECTOR_LAGRANGE_MULTIPLIER_X) { // The normal of the node (TODO: how to solve this without accesing all the time to the database?) const auto it_node = r_nodes_array.find(it_dof->Id()); const double normal_x = it_node->FastGetSolutionStepValue(NORMAL_X); const TDataType normal_dof_value = dof_value * normal_x; const TDataType normal_dof_incr = dof_incr * normal_x; normal_lm_solution_norm += std::pow(normal_dof_value, 2); normal_lm_increase_norm += std::pow(normal_dof_incr, 2); if (it_node->Is(SLIP) || mOptions.Is(DisplacementLagrangeMultiplierFrictionalContactCriteria::PURE_SLIP)) { tangent_lm_slip_solution_norm += std::pow(dof_value - normal_dof_value, 2); tangent_lm_slip_increase_norm += std::pow(dof_incr - normal_dof_incr, 2); ++lm_slip_dof_num; } else { tangent_lm_stick_solution_norm += std::pow(dof_value - normal_dof_value, 2); tangent_lm_stick_increase_norm += std::pow(dof_incr - normal_dof_incr, 2); ++lm_stick_dof_num; } lm_dof_num++; } else if (curr_var == VECTOR_LAGRANGE_MULTIPLIER_Y) { // The normal of the node (TODO: how to solve this without accesing all the time to the database?) const auto it_node = r_nodes_array.find(it_dof->Id()); const double normal_y = it_node->FastGetSolutionStepValue(NORMAL_Y); const TDataType normal_dof_value = dof_value * normal_y; const TDataType normal_dof_incr = dof_incr * normal_y; normal_lm_solution_norm += std::pow(normal_dof_value, 2); normal_lm_increase_norm += std::pow(normal_dof_incr, 2); if (it_node->Is(SLIP) || mOptions.Is(DisplacementLagrangeMultiplierFrictionalContactCriteria::PURE_SLIP)) { tangent_lm_slip_solution_norm += std::pow(dof_value - normal_dof_value, 2); tangent_lm_slip_increase_norm += std::pow(dof_incr - normal_dof_incr, 2); ++lm_slip_dof_num; } else { tangent_lm_stick_solution_norm += std::pow(dof_value - normal_dof_value, 2); tangent_lm_stick_increase_norm += std::pow(dof_incr - normal_dof_incr, 2); ++lm_stick_dof_num; } lm_dof_num++; } else if (curr_var == VECTOR_LAGRANGE_MULTIPLIER_Z) { // The normal of the node (TODO: how to solve this without accesing all the time to the database?) const auto it_node = r_nodes_array.find(it_dof->Id()); const double normal_z = it_node->FastGetSolutionStepValue(NORMAL_Z); const TDataType normal_dof_value = dof_value * normal_z; const TDataType normal_dof_incr = dof_incr * normal_z; normal_lm_solution_norm += std::pow(normal_dof_value, 2); normal_lm_increase_norm += std::pow(normal_dof_incr, 2); if (it_node->Is(SLIP) || mOptions.Is(DisplacementLagrangeMultiplierFrictionalContactCriteria::PURE_SLIP)) { tangent_lm_slip_solution_norm += std::pow(dof_value - normal_dof_value, 2); tangent_lm_slip_increase_norm += std::pow(dof_incr - normal_dof_incr, 2); ++lm_slip_dof_num; } else { tangent_lm_stick_solution_norm += std::pow(dof_value - normal_dof_value, 2); tangent_lm_stick_increase_norm += std::pow(dof_incr - normal_dof_incr, 2); ++lm_stick_dof_num; } lm_dof_num++; } else { disp_solution_norm += dof_value * dof_value; disp_increase_norm += dof_incr * dof_incr; disp_dof_num++; } } } if(disp_increase_norm == 0.0) disp_increase_norm = 1.0; if(normal_lm_increase_norm == 0.0) normal_lm_increase_norm = 1.0; if(tangent_lm_stick_increase_norm == 0.0) tangent_lm_stick_increase_norm = 1.0; if(tangent_lm_slip_increase_norm == 0.0) tangent_lm_slip_increase_norm = 1.0; if(disp_solution_norm == 0.0) disp_solution_norm = 1.0; KRATOS_ERROR_IF(mOptions.Is(DisplacementLagrangeMultiplierFrictionalContactCriteria::ENSURE_CONTACT) && normal_lm_solution_norm == 0.0) << "WARNING::CONTACT LOST::ARE YOU SURE YOU ARE SUPPOSED TO HAVE CONTACT?" << std::endl; const TDataType disp_ratio = std::sqrt(disp_increase_norm/disp_solution_norm); const TDataType normal_lm_ratio = std::sqrt(normal_lm_increase_norm/normal_lm_solution_norm); const TDataType tangent_lm_stick_ratio = std::sqrt(tangent_lm_stick_increase_norm/tangent_lm_stick_solution_norm); const TDataType tangent_lm_slip_ratio = std::sqrt(tangent_lm_slip_increase_norm/tangent_lm_slip_solution_norm); const TDataType disp_abs = std::sqrt(disp_increase_norm)/ static_cast<TDataType>(disp_dof_num); const TDataType normal_lm_abs = std::sqrt(normal_lm_increase_norm)/ static_cast<TDataType>(lm_dof_num); const TDataType tangent_lm_stick_abs = lm_stick_dof_num > 0 ? std::sqrt(tangent_lm_stick_increase_norm)/ static_cast<TDataType>(lm_stick_dof_num) : 0.0; const TDataType tangent_lm_slip_abs = lm_slip_dof_num > 0 ? std::sqrt(tangent_lm_slip_increase_norm)/ static_cast<TDataType>(lm_slip_dof_num) : 0.0; const TDataType normal_tangent_stick_ratio = tangent_lm_stick_abs/normal_lm_abs; const TDataType normal_tangent_slip_ratio = tangent_lm_slip_abs/normal_lm_abs; // We print the results // TODO: Replace for the new log if (rModelPart.GetCommunicator().MyPID() == 0 && this->GetEchoLevel() > 0) { if (r_process_info.Has(TABLE_UTILITY)) { std::cout.precision(4); TablePrinterPointerType p_table = r_process_info[TABLE_UTILITY]; auto& Table = p_table->GetTable(); Table << disp_ratio << mDispRatioTolerance << disp_abs << mDispAbsTolerance << normal_lm_ratio << mLMNormalRatioTolerance << normal_lm_abs << mLMNormalAbsTolerance << tangent_lm_stick_ratio << mLMTangentRatioTolerance << tangent_lm_stick_abs << mLMTangentAbsTolerance << tangent_lm_slip_ratio << mLMTangentRatioTolerance << tangent_lm_slip_abs << mLMTangentAbsTolerance; } else { std::cout.precision(4); if (mOptions.IsNot(DisplacementLagrangeMultiplierFrictionalContactCriteria::PRINTING_OUTPUT)) { KRATOS_INFO("DisplacementLagrangeMultiplierFrictionalContactCriteria") << BOLDFONT("DoF ONVERGENCE CHECK") << "\tSTEP: " << r_process_info[STEP] << "\tNL ITERATION: " << r_process_info[NL_ITERATION_NUMBER] << std::endl; KRATOS_INFO("DisplacementLagrangeMultiplierFrictionalContactCriteria") << BOLDFONT("\tDISPLACEMENT: RATIO = ") << disp_ratio << BOLDFONT(" EXP.RATIO = ") << mDispRatioTolerance << BOLDFONT(" ABS = ") << disp_abs << BOLDFONT(" EXP.ABS = ") << mDispAbsTolerance << std::endl; KRATOS_INFO("DisplacementLagrangeMultiplierFrictionalContactCriteria") << BOLDFONT(" NORMAL LAGRANGE MUL:\tRATIO = ") << normal_lm_ratio << BOLDFONT(" EXP.RATIO = ") << mLMNormalRatioTolerance << BOLDFONT(" ABS = ") << normal_lm_abs << BOLDFONT(" EXP.ABS = ") << mLMNormalAbsTolerance << std::endl; KRATOS_INFO("DisplacementLagrangeMultiplierFrictionalContactCriteria") << BOLDFONT(" STICK LAGRANGE MUL:\tRATIO = ") << tangent_lm_stick_ratio << BOLDFONT(" EXP.RATIO = ") << mLMTangentRatioTolerance << BOLDFONT(" ABS = ") << tangent_lm_stick_abs << BOLDFONT(" EXP.ABS = ") << mLMTangentAbsTolerance << std::endl; KRATOS_INFO("DisplacementLagrangeMultiplierFrictionalContactCriteria") << BOLDFONT(" SLIP LAGRANGE MUL:\tRATIO = ") << tangent_lm_slip_ratio << BOLDFONT(" EXP.RATIO = ") << mLMTangentRatioTolerance << BOLDFONT(" ABS = ") << tangent_lm_slip_abs << BOLDFONT(" EXP.ABS = ") << mLMTangentAbsTolerance << std::endl; } else { KRATOS_INFO("DisplacementLagrangeMultiplierFrictionalContactCriteria") << "DoF ONVERGENCE CHECK" << "\tSTEP: " << r_process_info[STEP] << "\tNL ITERATION: " << r_process_info[NL_ITERATION_NUMBER] << std::endl; KRATOS_INFO("DisplacementLagrangeMultiplierFrictionalContactCriteria") << "\tDISPLACEMENT: RATIO = " << disp_ratio << " EXP.RATIO = " << mDispRatioTolerance << " ABS = " << disp_abs << " EXP.ABS = " << mDispAbsTolerance << std::endl; KRATOS_INFO("DisplacementLagrangeMultiplierFrictionalContactCriteria") << " NORMAL LAGRANGE MUL:\tRATIO = " << normal_lm_ratio << " EXP.RATIO = " << mLMNormalRatioTolerance << " ABS = " << normal_lm_abs << " EXP.ABS = " << mLMNormalAbsTolerance << std::endl; KRATOS_INFO("DisplacementLagrangeMultiplierFrictionalContactCriteria") << " STICK LAGRANGE MUL:\tRATIO = " << tangent_lm_stick_ratio << " EXP.RATIO = " << mLMTangentRatioTolerance << " ABS = " << tangent_lm_stick_abs << " EXP.ABS = " << mLMTangentAbsTolerance << std::endl; KRATOS_INFO("DisplacementLagrangeMultiplierFrictionalContactCriteria") << " SLIP LAGRANGE MUL:\tRATIO = " << tangent_lm_slip_ratio << " EXP.RATIO = " << mLMTangentRatioTolerance << " ABS = " << tangent_lm_slip_abs << " EXP.ABS = " << mLMTangentAbsTolerance << std::endl; } } } // We check if converged const bool disp_converged = (disp_ratio <= mDispRatioTolerance || disp_abs <= mDispAbsTolerance); const bool lm_converged = (mOptions.IsNot(DisplacementLagrangeMultiplierFrictionalContactCriteria::ENSURE_CONTACT) && normal_lm_solution_norm == 0.0) ? true : (normal_lm_ratio <= mLMNormalRatioTolerance || normal_lm_abs <= mLMNormalAbsTolerance) && (tangent_lm_stick_ratio <= mLMTangentRatioTolerance || tangent_lm_stick_abs <= mLMTangentAbsTolerance || normal_tangent_stick_ratio <= mNormalTangentRatio) && (tangent_lm_slip_ratio <= mLMTangentRatioTolerance || tangent_lm_slip_abs <= mLMTangentAbsTolerance || normal_tangent_slip_ratio <= mNormalTangentRatio); if (disp_converged && lm_converged) { if (rModelPart.GetCommunicator().MyPID() == 0 && this->GetEchoLevel() > 0) { if (r_process_info.Has(TABLE_UTILITY)) { TablePrinterPointerType p_table = r_process_info[TABLE_UTILITY]; auto& r_table = p_table->GetTable(); if (mOptions.IsNot(DisplacementLagrangeMultiplierFrictionalContactCriteria::PRINTING_OUTPUT)) r_table << BOLDFONT(FGRN(" Achieved")); else r_table << "Achieved"; } else { if (mOptions.IsNot(DisplacementLagrangeMultiplierFrictionalContactCriteria::PRINTING_OUTPUT)) KRATOS_INFO("DisplacementLagrangeMultiplierFrictionalContactCriteria") << BOLDFONT("\tDoF") << " convergence is " << BOLDFONT(FGRN("achieved")) << std::endl; else KRATOS_INFO("DisplacementLagrangeMultiplierFrictionalContactCriteria") << "\tDoF convergence is achieved" << std::endl; } } return true; } else { if (rModelPart.GetCommunicator().MyPID() == 0 && this->GetEchoLevel() > 0) { if (r_process_info.Has(TABLE_UTILITY)) { TablePrinterPointerType p_table = r_process_info[TABLE_UTILITY]; auto& r_table = p_table->GetTable(); if (mOptions.IsNot(DisplacementLagrangeMultiplierFrictionalContactCriteria::PRINTING_OUTPUT)) r_table << BOLDFONT(FRED(" Not achieved")); else r_table << "Not achieved"; } else { if (mOptions.IsNot(DisplacementLagrangeMultiplierFrictionalContactCriteria::PRINTING_OUTPUT)) KRATOS_INFO("DisplacementLagrangeMultiplierFrictionalContactCriteria") << BOLDFONT("\tDoF") << " convergence is " << BOLDFONT(FRED(" not achieved")) << std::endl; else KRATOS_INFO("DisplacementLagrangeMultiplierFrictionalContactCriteria") << "\tDoF convergence is not achieved" << std::endl; } } return false; } } else // In this case all the displacements are imposed! return true; } /** * This function initialize the convergence criteria * @param rModelPart Reference to the ModelPart containing the contact problem. (unused) */ void Initialize( ModelPart& rModelPart ) override { BaseType::mConvergenceCriteriaIsInitialized = true; ProcessInfo& r_process_info = rModelPart.GetProcessInfo(); if (r_process_info.Has(TABLE_UTILITY) && mOptions.IsNot(DisplacementLagrangeMultiplierFrictionalContactCriteria::TABLE_IS_INITIALIZED)) { TablePrinterPointerType p_table = r_process_info[TABLE_UTILITY]; auto& r_table = p_table->GetTable(); r_table.AddColumn("DP RATIO", 10); r_table.AddColumn("EXP. RAT", 10); r_table.AddColumn("ABS", 10); r_table.AddColumn("EXP. ABS", 10); r_table.AddColumn("N.LM RATIO", 10); r_table.AddColumn("EXP. RAT", 10); r_table.AddColumn("ABS", 10); r_table.AddColumn("EXP. ABS", 10); if (mOptions.IsNot(DisplacementLagrangeMultiplierFrictionalContactCriteria::PURE_SLIP)) { r_table.AddColumn("STI. RATIO", 10); r_table.AddColumn("EXP. RAT", 10); r_table.AddColumn("ABS", 10); r_table.AddColumn("EXP. ABS", 10); } r_table.AddColumn("SLIP RATIO", 10); r_table.AddColumn("EXP. RAT", 10); r_table.AddColumn("ABS", 10); r_table.AddColumn("EXP. ABS", 10); r_table.AddColumn("CONVERGENCE", 15); mOptions.Set(DisplacementLagrangeMultiplierFrictionalContactCriteria::TABLE_IS_INITIALIZED, true); } } /** * @brief This function finalizes the non-linear iteration * @param rModelPart Reference to the ModelPart containing the problem. * @param rDofSet Reference to the container of the problem's degrees of freedom (stored by the BuilderAndSolver) * @param rA System matrix (unused) * @param rDx Vector of results (variations on nodal variables) * @param rb RHS vector (residual + reactions) */ void FinalizeNonLinearIteration( ModelPart& rModelPart, DofsArrayType& rDofSet, const TSystemMatrixType& rA, const TSystemVectorType& rDx, const TSystemVectorType& rb ) override { // Calling base criteria BaseType::FinalizeNonLinearIteration(rModelPart, rDofSet, rA, rDx, rb); // The current process info ProcessInfo& r_process_info = rModelPart.GetProcessInfo(); r_process_info.SetValue(ACTIVE_SET_COMPUTED, false); } ///@} ///@name Operations ///@{ ///@} ///@name Acces ///@{ ///@} ///@name Inquiry ///@{ ///@} ///@name Friends ///@{ protected: ///@name Protected static Member Variables ///@{ ///@} ///@name Protected member Variables ///@{ ///@} ///@name Protected Operators ///@{ ///@} ///@name Protected Operations ///@{ ///@} ///@name Protected Access ///@{ ///@} ///@name Protected Inquiry ///@{ ///@} ///@name Protected LifeCycle ///@{ ///@} private: ///@name Static Member Variables ///@{ ///@} ///@name Member Variables ///@{ Flags mOptions; /// Local flags TDataType mDispRatioTolerance; /// The ratio threshold for the norm of the displacement TDataType mDispAbsTolerance; /// The absolute value threshold for the norm of the displacement TDataType mLMNormalRatioTolerance; /// The ratio threshold for the norm of the LM (normal) TDataType mLMNormalAbsTolerance; /// The absolute value threshold for the norm of the LM (normal) TDataType mLMTangentRatioTolerance; /// The ratio threshold for the norm of the LM (tangent) TDataType mLMTangentAbsTolerance; /// The absolute value threshold for the norm of the LM (tangent) TDataType mNormalTangentRatio; /// The ratio to accept a non converged tangent component in case ///@} ///@name Private Operators ///@{ ///@} ///@name Private Operations ///@{ ///@} ///@name Private Access ///@{ ///@} ///@} ///@name Serialization ///@{ ///@name Private Inquiry ///@{ ///@} ///@name Unaccessible methods ///@{ ///@} }; // Kratos DisplacementLagrangeMultiplierFrictionalContactCriteria ///@name Local flags creation ///@{ /// Local Flags template<class TSparseSpace, class TDenseSpace> const Kratos::Flags DisplacementLagrangeMultiplierFrictionalContactCriteria<TSparseSpace, TDenseSpace>::ENSURE_CONTACT(Kratos::Flags::Create(0)); template<class TSparseSpace, class TDenseSpace> const Kratos::Flags DisplacementLagrangeMultiplierFrictionalContactCriteria<TSparseSpace, TDenseSpace>::NOT_ENSURE_CONTACT(Kratos::Flags::Create(0, false)); template<class TSparseSpace, class TDenseSpace> const Kratos::Flags DisplacementLagrangeMultiplierFrictionalContactCriteria<TSparseSpace, TDenseSpace>::PRINTING_OUTPUT(Kratos::Flags::Create(1)); template<class TSparseSpace, class TDenseSpace> const Kratos::Flags DisplacementLagrangeMultiplierFrictionalContactCriteria<TSparseSpace, TDenseSpace>::NOT_PRINTING_OUTPUT(Kratos::Flags::Create(1, false)); template<class TSparseSpace, class TDenseSpace> const Kratos::Flags DisplacementLagrangeMultiplierFrictionalContactCriteria<TSparseSpace, TDenseSpace>::TABLE_IS_INITIALIZED(Kratos::Flags::Create(2)); template<class TSparseSpace, class TDenseSpace> const Kratos::Flags DisplacementLagrangeMultiplierFrictionalContactCriteria<TSparseSpace, TDenseSpace>::NOT_TABLE_IS_INITIALIZED(Kratos::Flags::Create(2, false)); template<class TSparseSpace, class TDenseSpace> const Kratos::Flags DisplacementLagrangeMultiplierFrictionalContactCriteria<TSparseSpace, TDenseSpace>::PURE_SLIP(Kratos::Flags::Create(3)); template<class TSparseSpace, class TDenseSpace> const Kratos::Flags DisplacementLagrangeMultiplierFrictionalContactCriteria<TSparseSpace, TDenseSpace>::NOT_PURE_SLIP(Kratos::Flags::Create(3, false)); } #endif /* KRATOS_DISPLACEMENT_LAGRANGE_MULTIPLIER_FRICTIONAL_CONTACT_CRITERIA_H */
convolution_pack8to4_int8.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2022 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void convolution_pack8to4_int8_msa(const Mat& bottom_blob, Mat& top_blob, const Mat& weight_data_int8, int kernel_w, int kernel_h, int dilation_w, int dilation_h, int stride_w, int stride_h, const Option& opt) { int w = bottom_blob.w; int channels = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const int maxk = kernel_w * kernel_h; // kernel offsets std::vector<int> _space_ofs(maxk); int* space_ofs = &_space_ofs[0]; { int p1 = 0; int p2 = 0; int gap = w * dilation_h - kernel_w * dilation_w; for (int i = 0; i < kernel_h; i++) { for (int j = 0; j < kernel_w; j++) { space_ofs[p1] = p2; p1++; p2 += dilation_w; } p2 += gap; } } // num_output #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { int* outptr = top_blob.channel(p); for (int i = 0; i < outh; i++) { for (int j = 0; j < outw; j++) { v4i32 _sum0 = __msa_fill_w(0); v4i32 _sum1 = __msa_fill_w(0); v4i32 _sum2 = __msa_fill_w(0); v4i32 _sum3 = __msa_fill_w(0); const signed char* kptr = weight_data_int8.channel(p); // channels for (int q = 0; q < channels; q++) { const Mat m = bottom_blob.channel(q); const signed char* sptr = m.row<signed char>(i * stride_h) + j * stride_w * 8; for (int k = 0; k < maxk; k++) { v16i8 _val = __msa_ld_b(sptr + space_ofs[k] * 8, 0); v8i16 _val16 = (v8i16)__msa_ilvr_b(__msa_clti_s_b(_val, 0), _val); v16i8 _w01 = __msa_ld_b(kptr, 0); v16i8 _w23 = __msa_ld_b(kptr + 16, 0); v16i8 _extw01 = __msa_clti_s_b(_w01, 0); v16i8 _extw23 = __msa_clti_s_b(_w23, 0); v8i16 _w0 = (v8i16)__msa_ilvr_b(_extw01, _w01); v8i16 _w1 = (v8i16)__msa_ilvl_b(_extw01, _w01); v8i16 _w2 = (v8i16)__msa_ilvr_b(_extw23, _w23); v8i16 _w3 = (v8i16)__msa_ilvl_b(_extw23, _w23); v8i16 _s0 = __msa_mulv_h(_val16, _w0); v8i16 _s1 = __msa_mulv_h(_val16, _w1); v8i16 _s2 = __msa_mulv_h(_val16, _w2); v8i16 _s3 = __msa_mulv_h(_val16, _w3); _sum0 = __msa_addv_w(_sum0, __msa_hadd_s_w(_s0, _s0)); _sum1 = __msa_addv_w(_sum1, __msa_hadd_s_w(_s1, _s1)); _sum2 = __msa_addv_w(_sum2, __msa_hadd_s_w(_s2, _s2)); _sum3 = __msa_addv_w(_sum3, __msa_hadd_s_w(_s3, _s3)); kptr += 32; } } // transpose 4x4 { v4i32 _tmp0, _tmp1, _tmp2, _tmp3; _tmp0 = __msa_ilvr_w(_sum1, _sum0); _tmp1 = __msa_ilvr_w(_sum3, _sum2); _tmp2 = __msa_ilvl_w(_sum1, _sum0); _tmp3 = __msa_ilvl_w(_sum3, _sum2); _sum0 = (v4i32)__msa_ilvr_d((v2i64)_tmp1, (v2i64)_tmp0); _sum1 = (v4i32)__msa_ilvl_d((v2i64)_tmp1, (v2i64)_tmp0); _sum2 = (v4i32)__msa_ilvr_d((v2i64)_tmp3, (v2i64)_tmp2); _sum3 = (v4i32)__msa_ilvl_d((v2i64)_tmp3, (v2i64)_tmp2); } _sum0 = __msa_addv_w(_sum0, _sum1); _sum2 = __msa_addv_w(_sum2, _sum3); _sum0 = __msa_addv_w(_sum0, _sum2); __msa_st_w(_sum0, outptr + j * 4, 0); } outptr += outw * 4; } } }
brisched.h
#pragma omp parallel for for (long j = GZ / TILEJ; j < (N + GZ) / TILEJ; ++j) for (long i = GZ / TILEI; i < (N + GZ) / TILEI; ++i)
Psatd.h
#pragma once #include "Constants.h" #include "FieldSolver.h" #include "Grid.h" #include "Vectors.h" #include "PmlPsatd.h" //#include <chrono> #include <omp.h> namespace pfc { template <bool ifPoisson> class PSATDTimeStraggeredT : public SpectralFieldSolver<PSATDTimeStraggeredGridType> { public: PSATDTimeStraggeredT(PSATDTimeStraggeredGrid * grid, FP dt); void updateFields(); void updateHalfB(); void updateE(); void setPML(int sizePMLx, int sizePMLy, int sizePMLz); void setTimeStep(FP dt); void convertFieldsPoissonEquation(); ScalarField<complexFP> tmpJx, tmpJy, tmpJz; bool ifCourantConditionSatisfied(FP dt) { return true; } protected: PmlSpectral<GridTypes::PSATDTimeStraggeredGridType>* getPml() { return (PmlSpectral<GridTypes::PSATDTimeStraggeredGridType>*)pml.get(); } void saveJ(); void assignJ(ScalarField<complexFP>& J, ScalarField<complexFP>& tmpJ); }; template <bool ifPoisson> inline PSATDTimeStraggeredT<ifPoisson>::PSATDTimeStraggeredT(PSATDTimeStraggeredGrid* _grid, FP dt) : SpectralFieldSolver<GridTypes::PSATDTimeStraggeredGridType>(_grid, dt, 0.0, 0.5*dt, 0.5*dt), tmpJx(complexGrid->sizeStorage), tmpJy(complexGrid->sizeStorage), tmpJz(complexGrid->sizeStorage) { updateDims(); updateInternalDims(); } template <bool ifPoisson> inline void PSATDTimeStraggeredT<ifPoisson>::setPML(int sizePMLx, int sizePMLy, int sizePMLz) { pml.reset(new PmlPsatdTimeStraggered(this, Int3(sizePMLx, sizePMLy, sizePMLz))); updateInternalDims(); } template <bool ifPoisson> inline void PSATDTimeStraggeredT<ifPoisson>::setTimeStep(FP dt) { this->dt = dt; this->timeShiftB = 0.5*dt; this->timeShiftJ = 0.5*dt; if (pml.get()) pml.reset(new PmlPsatdTimeStraggered(this, pml->sizePML)); } template <bool ifPoisson> inline void PSATDTimeStraggeredT<ifPoisson>::assignJ(ScalarField<complexFP>& J, ScalarField<complexFP>& tmpJ) { const complexFP * const ptrJ = J.getData(); complexFP * const ptrTmpJ = tmpJ.getData(); const int n = J.getSize().volume(); OMP_FOR() for (int i = 0; i < n; i++) ptrTmpJ[i] = ptrJ[i]; } template <bool ifPoisson> inline void PSATDTimeStraggeredT<ifPoisson>::saveJ() { assignJ(complexGrid->Jx, tmpJx); assignJ(complexGrid->Jy, tmpJy); assignJ(complexGrid->Jz, tmpJz); } template <bool ifPoisson> inline void PSATDTimeStraggeredT<ifPoisson>::updateFields() { doFourierTransform(fourier_transform::Direction::RtoC); if (pml.get()) getPml()->updateBSplit(); updateHalfB(); if (pml.get()) getPml()->updateESplit(); updateE(); if (pml.get()) getPml()->updateBSplit(); updateHalfB(); saveJ(); doFourierTransform(fourier_transform::Direction::CtoR); if (pml.get()) getPml()->doSecondStep(); globalTime += dt; } template <bool ifPoisson> inline void PSATDTimeStraggeredT<ifPoisson>::convertFieldsPoissonEquation() { doFourierTransform(fourier_transform::Direction::RtoC); const Int3 begin = updateComplexBAreaBegin; const Int3 end = updateComplexBAreaEnd; double dt = this->dt * 0.5; OMP_FOR_COLLAPSE() for (int i = begin.x; i < end.x; i++) for (int j = begin.y; j < end.y; j++) { //#pragma omp simd for (int k = begin.z; k < end.z; k++) { FP3 K = getWaveVector(Int3(i, j, k)); FP normK = K.norm(); if (normK == 0) { continue; } K = K / normK; ComplexFP3 E(complexGrid->Ex(i, j, k), complexGrid->Ey(i, j, k), complexGrid->Ez(i, j, k)); ComplexFP3 El = (ComplexFP3)K * dot((ComplexFP3)K, E); complexGrid->Ex(i, j, k) -= El.x; complexGrid->Ey(i, j, k) -= El.y; complexGrid->Ez(i, j, k) -= El.z; } } doFourierTransform(fourier_transform::Direction::CtoR); } template <bool ifPoisson> inline void PSATDTimeStraggeredT<ifPoisson>::updateHalfB() { const Int3 begin = updateComplexBAreaBegin; const Int3 end = updateComplexBAreaEnd; double dt = 0.5 * this->dt; OMP_FOR_COLLAPSE() for (int i = begin.x; i < end.x; i++) for (int j = begin.y; j < end.y; j++) { //#pragma omp simd for (int k = begin.z; k < end.z; k++) { FP3 K = getWaveVector(Int3(i, j, k)); FP normK = K.norm(); if (normK == 0) { continue; } K = K / normK; ComplexFP3 E(complexGrid->Ex(i, j, k), complexGrid->Ey(i, j, k), complexGrid->Ez(i, j, k)); ComplexFP3 J(complexGrid->Jx(i, j, k), complexGrid->Jy(i, j, k), complexGrid->Jz(i, j, k)), prevJ(tmpJx(i, j, k), tmpJy(i, j, k), tmpJz(i, j, k)); ComplexFP3 crossKE = cross((ComplexFP3)K, E); ComplexFP3 crossKJ = cross((ComplexFP3)K, J - prevJ); FP S = sin(normK*constants::c*dt*0.5), C = cos(normK*constants::c*dt*0.5); complexFP coeff1 = 2 * complexFP::i()*S, coeff2 = complexFP::i() * ((1 - C) / (normK*constants::c)); complexGrid->Bx(i, j, k) += -coeff1 * crossKE.x + coeff2 * crossKJ.x; complexGrid->By(i, j, k) += -coeff1 * crossKE.y + coeff2 * crossKJ.y; complexGrid->Bz(i, j, k) += -coeff1 * crossKE.z + coeff2 * crossKJ.z; } } } template <bool ifPoisson> inline void PSATDTimeStraggeredT<ifPoisson>::updateE() { const Int3 begin = updateComplexEAreaBegin; const Int3 end = updateComplexEAreaEnd; OMP_FOR_COLLAPSE() for (int i = begin.x; i < end.x; i++) for (int j = begin.y; j < end.y; j++) { //#pragma omp simd for (int k = begin.z; k < end.z; k++) { FP3 K = getWaveVector(Int3(i, j, k)); FP normK = K.norm(); if (normK == 0) { complexGrid->Ex(i, j, k) += dt * complexGrid->Jx(i, j, k); complexGrid->Ey(i, j, k) += dt * complexGrid->Jy(i, j, k); complexGrid->Ez(i, j, k) += dt * complexGrid->Jz(i, j, k); continue; } K = K / normK; ComplexFP3 B(complexGrid->Bx(i, j, k), complexGrid->By(i, j, k), complexGrid->Bz(i, j, k)); ComplexFP3 J(complexGrid->Jx(i, j, k), complexGrid->Jy(i, j, k), complexGrid->Jz(i, j, k)); ComplexFP3 crossKB = cross((ComplexFP3)K, B); ComplexFP3 Jl = (ComplexFP3)K * dot((ComplexFP3)K, J); FP S = sin(normK*constants::c*dt*0.5); complexFP coeff1 = 2 * complexFP::i()*S, coeff2 = 2 * S / (normK*constants::c), coeff3 = coeff2 - dt; complexGrid->Ex(i, j, k) += coeff1 * crossKB.x - coeff2 * J.x + coeff3 * Jl.x; complexGrid->Ey(i, j, k) += coeff1 * crossKB.y - coeff2 * J.y + coeff3 * Jl.y; complexGrid->Ez(i, j, k) += coeff1 * crossKB.z - coeff2 * J.z + coeff3 * Jl.z; } } } // provides k \cdot E = 0 always (k \cdot J = 0 too) template <> inline void PSATDTimeStraggeredT<true>::updateE() { const Int3 begin = updateComplexEAreaBegin; const Int3 end = updateComplexEAreaEnd; OMP_FOR_COLLAPSE() for (int i = begin.x; i < end.x; i++) for (int j = begin.y; j < end.y; j++) { //#pragma omp simd for (int k = begin.z; k < end.z; k++) { FP3 K = getWaveVector(Int3(i, j, k)); FP normK = K.norm(); if (normK == 0) { complexGrid->Ex(i, j, k) += dt * complexGrid->Jx(i, j, k); complexGrid->Ey(i, j, k) += dt * complexGrid->Jy(i, j, k); complexGrid->Ez(i, j, k) += dt * complexGrid->Jz(i, j, k); continue; } K = K / normK; ComplexFP3 E(complexGrid->Ex(i, j, k), complexGrid->Ey(i, j, k), complexGrid->Ez(i, j, k)); ComplexFP3 B(complexGrid->Bx(i, j, k), complexGrid->By(i, j, k), complexGrid->Bz(i, j, k)); ComplexFP3 J(complexGrid->Jx(i, j, k), complexGrid->Jy(i, j, k), complexGrid->Jz(i, j, k)); ComplexFP3 crossKB = cross((ComplexFP3)K, B); ComplexFP3 El = (ComplexFP3)K * dot((ComplexFP3)K, E); ComplexFP3 Jl = (ComplexFP3)K * dot((ComplexFP3)K, J); FP S = sin(normK*constants::c*dt*0.5); complexFP coeff1 = 2 * complexFP::i()*S, coeff2 = 2 * S / (normK*constants::c), coeff3 = coeff2 - dt; complexGrid->Ex(i, j, k) += -El.x + coeff1 * crossKB.x - coeff2 * (J.x - Jl.x); complexGrid->Ey(i, j, k) += -El.y + coeff1 * crossKB.y - coeff2 * (J.y - Jl.y); complexGrid->Ez(i, j, k) += -El.z + coeff1 * crossKB.z - coeff2 * (J.z - Jl.z); } } } template <bool ifPoisson> class PSATDT : public SpectralFieldSolver<PSATDGridType> { public: PSATDT(PSATDGrid* grid, FP dt); void updateFields(); virtual void updateEB(); void setPML(int sizePMLx, int sizePMLy, int sizePMLz); void setTimeStep(FP dt); void convertFieldsPoissonEquation(); bool ifCourantConditionSatisfied(FP dt) { return true; } private: PmlSpectralTimeStraggered<GridTypes::PSATDGridType>* getPml() { return (PmlSpectralTimeStraggered<GridTypes::PSATDGridType>*)pml.get(); } }; template <bool ifPoisson> inline PSATDT<ifPoisson>::PSATDT(PSATDGrid* _grid, FP dt) : SpectralFieldSolver<GridTypes::PSATDGridType>(_grid, dt, 0.0, 0.0, 0.5*dt) { updateDims(); updateInternalDims(); } template <bool ifPoisson> inline void PSATDT<ifPoisson>::setPML(int sizePMLx, int sizePMLy, int sizePMLz) { pml.reset(new PmlPsatd(this, Int3(sizePMLx, sizePMLy, sizePMLz))); updateInternalDims(); } template <bool ifPoisson> inline void PSATDT<ifPoisson>::setTimeStep(FP dt) { this->dt = dt; this->timeShiftJ = 0.5*dt; if (pml.get()) pml.reset(new PmlPsatd(this, pml->sizePML)); } template <bool ifPoisson> inline void PSATDT<ifPoisson>::updateFields() { // std::chrono::steady_clock::time_point t1 = std::chrono::steady_clock::now(); doFourierTransform(fourier_transform::Direction::RtoC); //std::chrono::steady_clock::time_point t2 = std::chrono::steady_clock::now(); //std::chrono::milliseconds timeRtoC = std::chrono::duration_cast<std::chrono::milliseconds>(t2 - t1); //std::chrono::steady_clock::time_point t3 = std::chrono::steady_clock::now(); if (pml.get()) getPml()->updateBSplit(); updateEB(); if (pml.get()) getPml()->updateESplit(); updateEB(); if (pml.get()) getPml()->updateBSplit(); //std::chrono::steady_clock::time_point t4 = std::chrono::steady_clock::now(); //std::chrono::milliseconds timeSolver = std::chrono::duration_cast<std::chrono::milliseconds>(t4 - t3); //std::chrono::steady_clock::time_point t5 = std::chrono::steady_clock::now(); doFourierTransform(fourier_transform::Direction::CtoR); //std::chrono::steady_clock::time_point t6 = std::chrono::steady_clock::now(); //std::chrono::milliseconds timeCtoR = std::chrono::duration_cast<std::chrono::milliseconds>(t6 - t5); if (pml.get()) getPml()->doSecondStep(); globalTime += dt; //std::string strRtoC = "Time RtoC: " + std::to_string(timeRtoC.count()) + "\n"; //std::string strSolver = "Time PSATDT: " + std::to_string(timeSolver.count()) + "\n"; //std::string strCtoR = "Time CtoR: " + std::to_string(timeCtoR.count()) + "\n"; //std::cout << strRtoC << strSolver << strCtoR << std::endl; } template <bool ifPoisson> inline void PSATDT<ifPoisson>::convertFieldsPoissonEquation() { doFourierTransform(fourier_transform::Direction::RtoC); const Int3 begin = updateComplexBAreaBegin; const Int3 end = updateComplexBAreaEnd; double dt = this->dt *0.5; OMP_FOR_COLLAPSE() for (int i = begin.x; i < end.x; i++) for (int j = begin.y; j < end.y; j++) { //#pragma omp simd for (int k = begin.z; k < end.z; k++) { FP3 K = getWaveVector(Int3(i, j, k)); FP normK = K.norm(); if (normK == 0) { continue; } K = K / normK; ComplexFP3 E(complexGrid->Ex(i, j, k), complexGrid->Ey(i, j, k), complexGrid->Ez(i, j, k)); ComplexFP3 El = (ComplexFP3)K * dot((ComplexFP3)K, E); complexGrid->Ex(i, j, k) -= El.x; complexGrid->Ey(i, j, k) -= El.y; complexGrid->Ez(i, j, k) -= El.z; } } doFourierTransform(fourier_transform::Direction::CtoR); } template <bool ifPoisson> inline void PSATDT<ifPoisson>::updateEB() { const Int3 begin = updateComplexBAreaBegin; const Int3 end = updateComplexBAreaEnd; double dt = 0.5 * this->dt; OMP_FOR_COLLAPSE() for (int i = begin.x; i < end.x; i++) for (int j = begin.y; j < end.y; j++) { //#pragma omp simd for (int k = begin.z; k < end.z; k++) { FP3 K = getWaveVector(Int3(i, j, k)); FP normK = K.norm(); ComplexFP3 E(complexGrid->Ex(i, j, k), complexGrid->Ey(i, j, k), complexGrid->Ez(i, j, k)); ComplexFP3 B(complexGrid->Bx(i, j, k), complexGrid->By(i, j, k), complexGrid->Bz(i, j, k)); ComplexFP3 J(complexGrid->Jx(i, j, k), complexGrid->Jy(i, j, k), complexGrid->Jz(i, j, k)); J = complexFP(4 * constants::pi) * J; if (normK == 0) { complexGrid->Ex(i, j, k) += -J.x; complexGrid->Ey(i, j, k) += -J.y; complexGrid->Ez(i, j, k) += -J.z; continue; } K = K / normK; ComplexFP3 kEcross = cross((ComplexFP3)K, E), kBcross = cross((ComplexFP3)K, B), kJcross = cross((ComplexFP3)K, J); ComplexFP3 Jl = (ComplexFP3)K * dot((ComplexFP3)K, J), El = (ComplexFP3)K * dot((ComplexFP3)K, E); FP S = sin(normK*constants::c*dt), C = cos(normK*constants::c*dt); complexFP coef1E = S * complexFP::i(), coef2E = -S / (normK*constants::c), coef3E = S / (normK*constants::c) - dt; complexGrid->Ex(i, j, k) = C * E.x + coef1E * kBcross.x + (1 - C) * El.x + coef2E * J.x + coef3E * Jl.x; complexGrid->Ey(i, j, k) = C * E.y + coef1E * kBcross.y + (1 - C) * El.y + coef2E * J.y + coef3E * Jl.y; complexGrid->Ez(i, j, k) = C * E.z + coef1E * kBcross.z + (1 - C) * El.z + coef2E * J.z + coef3E * Jl.z; complexFP coef1B = -S * complexFP::i(), coef2B = ((1 - C) / (normK*constants::c))*complexFP::i(); complexGrid->Bx(i, j, k) = C * B.x + coef1B * kEcross.x + coef2B * kJcross.x; complexGrid->By(i, j, k) = C * B.y + coef1B * kEcross.y + coef2B * kJcross.y; complexGrid->Bz(i, j, k) = C * B.z + coef1B * kEcross.z + coef2B * kJcross.z; } } } // provides k \cdot E = 0 always (k \cdot J = 0 too) template <> inline void PSATDT<true>::updateEB() { const Int3 begin = updateComplexBAreaBegin; const Int3 end = updateComplexBAreaEnd; double dt = 0.5 * this->dt; OMP_FOR_COLLAPSE() for (int i = begin.x; i < end.x; i++) for (int j = begin.y; j < end.y; j++) { //#pragma omp simd for (int k = begin.z; k < end.z; k++) { FP3 K = getWaveVector(Int3(i, j, k)); FP normK = K.norm(); ComplexFP3 E(complexGrid->Ex(i, j, k), complexGrid->Ey(i, j, k), complexGrid->Ez(i, j, k)); ComplexFP3 B(complexGrid->Bx(i, j, k), complexGrid->By(i, j, k), complexGrid->Bz(i, j, k)); ComplexFP3 J(complexGrid->Jx(i, j, k), complexGrid->Jy(i, j, k), complexGrid->Jz(i, j, k)); J = complexFP(4 * constants::pi) * J; if (normK == 0) { complexGrid->Ex(i, j, k) += -J.x; complexGrid->Ey(i, j, k) += -J.y; complexGrid->Ez(i, j, k) += -J.z; continue; } K = K / normK; ComplexFP3 kEcross = cross((ComplexFP3)K, E), kBcross = cross((ComplexFP3)K, B), kJcross = cross((ComplexFP3)K, J); ComplexFP3 Jl = (ComplexFP3)K * dot((ComplexFP3)K, J), El = (ComplexFP3)K * dot((ComplexFP3)K, E); FP S = sin(normK*constants::c*dt), C = cos(normK*constants::c*dt); complexFP coef1E = S * complexFP::i(), coef2E = -S / (normK*constants::c), coef3E = S / (normK*constants::c) - dt; complexGrid->Ex(i, j, k) = C * (E.x - El.x) + coef1E * kBcross.x + coef2E * (J.x - Jl.x); complexGrid->Ey(i, j, k) = C * (E.y - El.y) + coef1E * kBcross.y + coef2E * (J.y - Jl.y); complexGrid->Ez(i, j, k) = C * (E.z - El.z) + coef1E * kBcross.z + coef2E * (J.z - Jl.z); complexFP coef1B = -S * complexFP::i(), coef2B = ((1 - C) / (normK*constants::c))*complexFP::i(); complexGrid->Bx(i, j, k) = C * B.x + coef1B * kEcross.x + coef2B * kJcross.x; complexGrid->By(i, j, k) = C * B.y + coef1B * kEcross.y + coef2B * kJcross.y; complexGrid->Bz(i, j, k) = C * B.z + coef1B * kEcross.z + coef2B * kJcross.z; } } } typedef PSATDT<true> PSATDPoisson; typedef PSATDT<false> PSATD; typedef PSATDTimeStraggeredT<true> PSATDTimeStraggeredPoisson; typedef PSATDTimeStraggeredT<false> PSATDTimeStraggered; }
GB_unaryop__abs_fp32_int16.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__abs_fp32_int16 // op(A') function: GB_tran__abs_fp32_int16 // C type: float // A type: int16_t // cast: float cij = (float) aij // unaryop: cij = fabsf (aij) #define GB_ATYPE \ int16_t #define GB_CTYPE \ float // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int16_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = fabsf (x) ; // casting #define GB_CASTING(z, x) \ float z = (float) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ABS || GxB_NO_FP32 || GxB_NO_INT16) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__abs_fp32_int16 ( float *restrict Cx, const int16_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__abs_fp32_int16 ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_binop__first_int64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__first_int64) // A.*B function (eWiseMult): GB (_AemultB_08__first_int64) // A.*B function (eWiseMult): GB (_AemultB_02__first_int64) // A.*B function (eWiseMult): GB (_AemultB_04__first_int64) // A.*B function (eWiseMult): GB (_AemultB_bitmap__first_int64) // A*D function (colscale): GB (_AxD__first_int64) // D*A function (rowscale): GB (_DxB__first_int64) // C+=B function (dense accum): GB (_Cdense_accumB__first_int64) // C+=b function (dense accum): GB (_Cdense_accumb__first_int64) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__first_int64) // C=scalar+B GB ((none)) // C=scalar+B' GB ((none)) // C=A+scalar GB ((none)) // C=A'+scalar GB ((none)) // C type: int64_t // A type: int64_t // A pattern? 0 // B type: int64_t // B pattern? 1 // BinaryOp: cij = aij #define GB_ATYPE \ int64_t #define GB_BTYPE \ int64_t #define GB_CTYPE \ int64_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ int64_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ ; // true if values of B are not used #define GB_B_IS_PATTERN \ 1 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int64_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = x ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_FIRST || GxB_NO_INT64 || GxB_NO_FIRST_INT64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__first_int64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__first_int64) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { #include "GB_dense_subassign_23_template.c" } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__first_int64) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { // get the scalar b for C += b, of type int64_t int64_t bwork = (*((int64_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__first_int64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *restrict Cx = (int64_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__first_int64) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *restrict Cx = (int64_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__first_int64) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; int64_t alpha_scalar ; int64_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((int64_t *) alpha_scalar_in)) ; beta_scalar = (*((int64_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__first_int64) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__first_int64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__first_int64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__first_int64) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *Cx = (int64_t *) Cx_output ; int64_t x = (*((int64_t *) x_input)) ; int64_t *Bx = (int64_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; ; ; Cx [p] = x ; } return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int64_t *Cx = (int64_t *) Cx_output ; int64_t *Ax = (int64_t *) Ax_input ; int64_t y = (*((int64_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int64_t aij = GBX (Ax, p, false) ; Cx [p] = aij ; } return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ #if 0 // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ ; ; \ Cx [pC] = x ; \ } GrB_Info GB ((none)) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int64_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t x = (*((const int64_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int64_t } #endif //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ #if 0 // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = aij ; \ } GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t y = (*((const int64_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif #endif
omp_parallel_copyin.c
<ompts:test> <ompts:testdescription>Test which checks the omp parallel copyin directive.</ompts:testdescription> <ompts:ompversion>3.0</ompts:ompversion> <ompts:directive>omp parallel copyin</ompts:directive> <ompts:dependences>omp critical,omp threadprivate</ompts:dependences> <ompts:testcode> #include <stdio.h> #include <stdlib.h> #include "omp_testsuite.h" static int sum1 = 789; #pragma omp threadprivate(sum1) int <ompts:testcode:functionname>omp_parallel_copyin</ompts:testcode:functionname>(FILE * logFile) { <ompts:orphan:vars> int sum, num_threads; </ompts:orphan:vars> int known_sum; sum = 0; sum1 = 7; num_threads = 0; #pragma omp parallel <ompts:check>copyin(sum1)</ompts:check> { /*printf("sum1=%d\n",sum1);*/ <ompts:orphan> int i; #pragma omp for for (i = 1; i < 1000; i++) { sum1 = sum1 + i; } /*end of for*/ #pragma omp critical { sum = sum + sum1; num_threads++; } /*end of critical*/ </ompts:orphan> } /* end of parallel*/ known_sum = (999 * 1000) / 2 + 7 * num_threads; return (known_sum == sum); } </ompts:testcode> </ompts:test>
zcposv.c
/** * * @file * * PLASMA is a software package provided by: * University of Tennessee, US, * University of Manchester, UK. * * @precisions mixed zc -> ds * **/ #include "plasma.h" #include "plasma_async.h" #include "plasma_context.h" #include "plasma_descriptor.h" #include "plasma_internal.h" #include "plasma_tuning.h" #include "plasma_types.h" #include "core_lapack.h" #include <math.h> #include <omp.h> #include <stdbool.h> /***************************************************************************//** * * @ingroup plasma_posv * * Computes the solution to a system of linear equations A * X = B, where A is * an n-by-n Hermitian positive definite matrix and X and B are n-by-nrhs matrices. * * plasma_zcposv first factorizes the matrix using plasma_cpotrf and uses * this factorization within an iterative refinement procedure to produce a * solution with COMPLEX*16 normwise backward error quality (see below). If * the approach fails the method falls back to a COMPLEX*16 factorization and * solve. * * The iterative refinement is not going to be a winning strategy if * the ratio COMPLEX performance over COMPLEX*16 performance is too * small. A reasonable strategy should take the number of right-hand * sides and the size of the matrix into account. This might be done * with a call to ILAENV in the future. Up to now, we always try * iterative refinement. * * The iterative refinement process is stopped if iter > itermax or * for all the RHS we have: Rnorm < sqrt(n)*Xnorm*Anorm*eps, where: * * - iter is the number of the current iteration in the iterative refinement * process * - Rnorm is the Infinity-norm of the residual * - Xnorm is the Infinity-norm of the solution * - Anorm is the Infinity-operator-norm of the matrix A * - eps is the machine epsilon returned by DLAMCH('Epsilon'). * The values itermax is fixed to 30. * ******************************************************************************* * * @param[in] uplo * Specifies whether the matrix A is upper or lower triangular: * - PlasmaUpper: Upper triangle of A is stored; * - PlasmaLower: Lower triangle of A is stored. * * @param[in] n * The number of linear equations, i.e., the order of the matrix A. * n >= 0. * * @param[in] nrhs * The number of right hand sides, i.e., the number of columns of the * matrix B. nrhs >= 0. * * @param[in,out] pA * The n-by-n Hermitian positive definite coefficient matrix A. * If uplo = PlasmaUpper, the leading n-by-n upper triangular part of * A contains the upper triangular part of the matrix A, and the * strictly lower triangular part of A is not referenced. * If uplo = PlasmaLower, the leading n-by-n lower triangular part of * A contains the lower triangular part of the matrix A, and the * strictly upper triangular part of A is not referenced. * On exit, contains the lower Cholesky factor matrix L, * if uplo == PlasmaLower and upper Cholesky factor conj(L^T), * otherwise. * * @param[in] lda * The leading dimension of the array A. lda >= max(1,n). * * @param[in] pB * The n-by-nrhs matrix of right hand side matrix B. * This matrix remains unchanged. * * @param[in] ldb * The leading dimension of the array B. ldb >= max(1,n). * * @param[out] pX * If return value = 0, the n-by-nrhs solution matrix X. * * @param[in] ldx * The leading dimension of the array X. ldx >= max(1,n). * * @param[out] iter * The number of the iterations in the iterative refinement * process, needed for the convergence. If failed, it is set * to be -(1+itermax), where itermax = 30. * ******************************************************************************* * * @retval PlasmaSuccess successful exit * ******************************************************************************* * * @sa plasma_omp_zcposv * @sa plasma_dsposv * @sa plasma_zposv * ******************************************************************************/ int plasma_zcposv(plasma_enum_t uplo, int n, int nrhs, plasma_complex64_t *pA, int lda, plasma_complex64_t *pB, int ldb, plasma_complex64_t *pX, int ldx, int *iter) { // Get PLASMA context. plasma_context_t *plasma = plasma_context_self(); if (plasma == NULL) { plasma_error("PLASMA not initialized"); return PlasmaErrorNotInitialized; } // Check input arguments. if (uplo != PlasmaUpper && uplo != PlasmaLower) { plasma_error("illegal value of uplo"); return -1; } if (n < 0) { plasma_error("illegal value of n"); return -2; } if (nrhs < 0) { plasma_error("illegal value of nrhs"); return -3; } if (lda < imax(1, n)) { plasma_error("illegal value of lda"); return -5; } if (ldb < imax(1, n)) { plasma_error("illegal value of ldb"); return -7; } if (ldx < imax(1, n)) { plasma_error("illegal value of ldx"); return -9; } // quick return *iter = 0; if (imin(n, nrhs) == 0) return PlasmaSuccess; // Tune parameters. if (plasma->tuning) plasma_tune_potrf(plasma, PlasmaComplexFloat, n); // Set tiling parameters. int nb = plasma->nb; // Create tile matrices. plasma_desc_t A; plasma_desc_t B; plasma_desc_t X; int retval; retval = plasma_desc_general_create(PlasmaComplexDouble, nb, nb, n, n, 0, 0, n, n, &A); if (retval != PlasmaSuccess) { plasma_error("plasma_desc_general_create() failed"); return retval; } retval = plasma_desc_general_create(PlasmaComplexDouble, nb, nb, n, nrhs, 0, 0, n, nrhs, &B); if (retval != PlasmaSuccess) { plasma_error("plasma_desc_general_create() failed"); plasma_desc_destroy(&A); return retval; } retval = plasma_desc_general_create(PlasmaComplexDouble, nb, nb, n, nrhs, 0, 0, n, nrhs, &X); if (retval != PlasmaSuccess) { plasma_error("plasma_desc_general_create() failed"); plasma_desc_destroy(&A); plasma_desc_destroy(&B); return retval; } // Create additional tile matrices. plasma_desc_t R, As, Xs; retval = plasma_desc_general_create(PlasmaComplexDouble, nb, nb, B.m, B.n, 0, 0, B.m, B.n, &R); if (retval != PlasmaSuccess) { plasma_error("plasma_desc_general_create() failed"); plasma_desc_destroy(&A); plasma_desc_destroy(&B); plasma_desc_destroy(&X); return retval; } retval = plasma_desc_general_create(PlasmaComplexFloat, nb, nb, A.m, A.n, 0, 0, A.m, A.n, &As); if (retval != PlasmaSuccess) { plasma_error("plasma_desc_general_create() failed"); plasma_desc_destroy(&A); plasma_desc_destroy(&B); plasma_desc_destroy(&X); plasma_desc_destroy(&R); return retval; } retval = plasma_desc_general_create(PlasmaComplexFloat, nb, nb, X.m, X.n, 0, 0, X.m, X.n, &Xs); if (retval != PlasmaSuccess) { plasma_error("plasma_desc_general_create() failed"); plasma_desc_destroy(&A); plasma_desc_destroy(&B); plasma_desc_destroy(&X); plasma_desc_destroy(&R); plasma_desc_destroy(&As); return retval; } // Allocate tiled workspace for Infinity norm calculations. size_t lwork = imax((size_t)A.nt*A.n+A.n, (size_t)X.mt*X.n+(size_t)R.mt*R.n); double *work = (double*)malloc(((size_t)lwork)*sizeof(double)); double *Rnorm = (double*)malloc(((size_t)R.n)*sizeof(double)); double *Xnorm = (double*)malloc(((size_t)X.n)*sizeof(double)); // Initialize sequence. plasma_sequence_t sequence; retval = plasma_sequence_init(&sequence); // Initialize request. plasma_request_t request; retval = plasma_request_init(&request); // asynchronous block #pragma omp parallel #pragma omp master { // Translate matrices to tile layout. plasma_omp_zge2desc(pA, lda, A, &sequence, &request); plasma_omp_zge2desc(pB, ldb, B, &sequence, &request); // Call tile async function. plasma_omp_zcposv(uplo, A, B, X, As, Xs, R, work, Rnorm, Xnorm, iter, &sequence, &request); // Translate back to LAPACK layout. plasma_omp_zdesc2ge(X, pX, ldx, &sequence, &request); } // implicit synchronization // Free matrices in tile layout. plasma_desc_destroy(&A); plasma_desc_destroy(&B); plasma_desc_destroy(&X); plasma_desc_destroy(&R); plasma_desc_destroy(&As); plasma_desc_destroy(&Xs); free(work); free(Rnorm); free(Xnorm); // Return status. int status = sequence.status; return status; } // Checks, that convergence criterion is true for all columns of R and X static bool conv(double *Rnorm, double *Xnorm, int n, double cte) { bool value = true; for (int i = 0; i < n; i++) { if (Rnorm[i] > Xnorm[i] * cte) { value = false; break; } } return value; } /***************************************************************************//** * * @ingroup plasma_posv * * Solves a Hermitian positive definite system using iterative refinement * with the Cholesky factor computed using plasma_cpotrf. * Non-blocking tile version of plasma_zcposv(). * Operates on matrices stored by tiles. * All matrices are passed through descriptors. * All dimensions are taken from the descriptors. * Allows for pipelining of operations at runtime. * ******************************************************************************* * * @param[in] uplo * Specifies whether the matrix A is upper or lower triangular: * - PlasmaUpper: Upper triangle of A is stored; * - PlasmaLower: Lower triangle of A is stored. * * @param[in] A * Descriptor of matrix A. * * @param[in] B * Descriptor of matrix B. * * @param[in,out] X * Descriptor of matrix X. * * @param[out] As * Descriptor of auxiliary matrix A in single complex precision. * * @param[out] Xs * Descriptor of auxiliary matrix X in single complex precision. * * @param[out] R * Descriptor of auxiliary remainder matrix R. * * @param[out] work * Workspace needed to compute infinity norm of the matrix A. * * @param[out] Rnorm * Workspace needed to store the max value in each of resudual vectors. * * @param[out] Xnorm * Workspace needed to store the max value in each of currenct solution * vectors. * * @param[out] iter * The number of the iterations in the iterative refinement * process, needed for the convergence. If failed, it is set * to be -(1+itermax), where itermax = 30. * * @param[in] sequence * Identifies the sequence of function calls that this call belongs to * (for completion checks and exception handling purposes). * * @param[out] request * Identifies this function call (for exception handling purposes). * * @retval void * Errors are returned by setting sequence->status and * request->status to error values. The sequence->status and * request->status should never be set to PLASMA_SUCCESS (the * initial values) since another async call may be setting a * failure value at the same time. * ******************************************************************************* * * @sa plasma_zcposv * @sa plasma_omp_dsposv * @sa plasma_omp_zposv * ******************************************************************************/ void plasma_omp_zcposv(plasma_enum_t uplo, plasma_desc_t A, plasma_desc_t B, plasma_desc_t X, plasma_desc_t As, plasma_desc_t Xs, plasma_desc_t R, double *work, double *Rnorm, double *Xnorm, int *iter, plasma_sequence_t *sequence, plasma_request_t *request) { const int itermax = 30; const plasma_complex64_t zmone = -1.0; const plasma_complex64_t zone = 1.0; *iter = 0; // Get PLASMA context. plasma_context_t *plasma = plasma_context_self(); if (plasma == NULL) { plasma_error("PLASMA not initialized"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } // Check input arguments. if (uplo != PlasmaUpper && uplo != PlasmaLower) { plasma_error("illegal value of uplo"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (plasma_desc_check(A) != PlasmaSuccess) { plasma_error("invalid A"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (plasma_desc_check(B) != PlasmaSuccess) { plasma_error("invalid B"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (plasma_desc_check(X) != PlasmaSuccess) { plasma_error("invalid X"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (plasma_desc_check(As) != PlasmaSuccess) { plasma_error("invalid As"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (plasma_desc_check(Xs) != PlasmaSuccess) { plasma_error("invalid Xs"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (plasma_desc_check(R) != PlasmaSuccess) { plasma_error("invalid R"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (sequence == NULL) { plasma_error("NULL sequence"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (request == NULL) { plasma_error("NULL request"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } // quick return if (A.n == 0 || B.n == 0) return; // workspace for dzamax double *workX = work; double *workR = &work[X.mt*X.n]; // Compute some constants. double cte; double eps = LAPACKE_dlamch_work('E'); double Anorm; plasma_pzlanhe(PlasmaInfNorm, uplo, A, work, &Anorm, sequence, request); // Convert B from double to single precision, store result in Xs. plasma_pzlag2c(B, Xs, sequence, request); // Convert A from double to single precision, store result in As. // TODO: need zlat2c plasma_pzlag2c(A, As, sequence, request); // Compute the Cholesky factorization of As. plasma_pcpotrf(uplo, As, sequence, request); // Solve the system As * Xs = Bs. plasma_pctrsm(PlasmaLeft, uplo, uplo == PlasmaUpper ? PlasmaConjTrans : PlasmaNoTrans, PlasmaNonUnit, 1.0, As, Xs, sequence, request); plasma_pctrsm(PlasmaLeft, uplo, uplo == PlasmaUpper ? PlasmaNoTrans : PlasmaConjTrans, PlasmaNonUnit, 1.0, As, Xs, sequence, request); // Convert Xs to double precision. plasma_pclag2z(Xs, X, sequence, request); // Compute R = B - A * X. plasma_pzlacpy(PlasmaGeneral, PlasmaNoTrans, B, R, sequence, request); plasma_pzhemm(PlasmaLeft, uplo, zmone, A, X, zone, R, sequence, request); // Check whether the nrhs normwise backward error satisfies the // stopping criterion. If yes, set iter=0 and return. plasma_pdzamax(PlasmaColumnwise, X, workX, Xnorm, sequence, request); plasma_pdzamax(PlasmaColumnwise, R, workR, Rnorm, sequence, request); #pragma omp taskwait { cte = Anorm * eps * sqrt((double)A.n); if (conv(Rnorm, Xnorm, R.n, cte)) { *iter = 0; return; } } // iterative refinement for (int iiter = 0; iiter < itermax; iiter++) { // Convert R from double to single precision, store result in Xs. plasma_pzlag2c(R, Xs, sequence, request); // Solve the system As * Xs = Rs. plasma_pctrsm(PlasmaLeft, uplo, uplo == PlasmaUpper ? PlasmaConjTrans : PlasmaNoTrans, PlasmaNonUnit, 1.0, As, Xs, sequence, request); plasma_pctrsm(PlasmaLeft, uplo, uplo == PlasmaUpper ? PlasmaNoTrans : PlasmaConjTrans, PlasmaNonUnit, 1.0, As, Xs, sequence, request); // Convert Xs back to double precision and update the current iterate. plasma_pclag2z(Xs, R, sequence, request); plasma_pzgeadd(PlasmaNoTrans, zone, R, zone, X, sequence, request); // Compute R = B - A * X. plasma_pzlacpy(PlasmaGeneral, PlasmaNoTrans, B, R, sequence, request); plasma_pzhemm(PlasmaLeft, uplo, zmone, A, X, zone, R, sequence, request); // Check whether nrhs normwise backward error satisfies the // stopping criterion. If yes, set iter = iiter > 0 and return. plasma_pdzamax(PlasmaColumnwise, X, workX, Xnorm, sequence, request); plasma_pdzamax(PlasmaColumnwise, R, workR, Rnorm, sequence, request); #pragma omp taskwait { if (conv(Rnorm, Xnorm, R.n, cte)) { *iter = iiter+1; return; } } } // If we are at this place of the code, this is because we have performed // iter = itermax iterations and never satisfied the stopping criterion, // set up the iter flag accordingly and follow up with double precision // routine. *iter = -itermax - 1; // Compute Cholesky factorization of A. plasma_pzpotrf(uplo, A, sequence, request); // Solve the system A * X = B. plasma_pzlacpy(PlasmaGeneral, PlasmaNoTrans, B, X, sequence, request); plasma_pztrsm(PlasmaLeft, uplo, uplo == PlasmaUpper ? PlasmaConjTrans : PlasmaNoTrans, PlasmaNonUnit, 1.0, A, X, sequence, request); plasma_pztrsm(PlasmaLeft, uplo, uplo == PlasmaUpper ? PlasmaNoTrans : PlasmaConjTrans, PlasmaNonUnit, 1.0, A, X, sequence, request); }
8898.c
/* * Compile using the command: * `cc 27Stencil.c -o oa -fopenmp -lm` */ #include <math.h> #include <omp.h> #include <stdint.h> #include <string.h> #include <stdio.h> #include <stdlib.h> #ifdef _OPENACC #include <openacc.h> #endif #define DEFAULT_DATASIZE 1048576 /* Default datasize. */ #define DEFAULT_REPS 10 /* Default repetitions. */ #define CONF95 1.96 #define ITERATIONS 10 #define FAC (1./26) #define TOLERANCE 1.0e-15 extern int reps; /* Repetitions. */ extern double *times; /* Array to store results in. */ extern int flag; /* Flag to set CPU or GPU invocation. */ extern unsigned int datasize; /* Datasize passed to benchmark functions. */ unsigned int datasize = -1; /* Datasize for tests in bytes. */ int reps = -1; /* Repetitions. */ double *times; /* Array of doubles storing the benchmark times in microseconds. */ double testtime; /* The average test time in microseconds for reps runs. */ double testsd; /* The standard deviation in the test time in microseconds for reps runs. */ int flag = 0; /* 0 indicates CPU. */ /* * Function prototypes for common functions. */ void init(int argc, char **argv); void finalisetest(char *); void finalise(void); void benchmark(char *, double (*test)(void)); void print_results(char *, double, double); /* Forward Declarations of utility functions*/ double max_diff(double *, double *, int); void wul(); void usage(char *argv[]) { printf("Usage: %s \n" "\t--reps <repetitions> (default %d)\n" "\t--datasize <datasize> (default %d bytes)\n", argv[0], DEFAULT_REPS, DEFAULT_DATASIZE); } /* * This function parses the parameters from the command line. */ void parse_args(int argc, char *argv[]) { int arg; for (arg = 1; arg < argc; arg++) { if (strcmp(argv[arg], "--reps") == 0) { reps = atoi(argv[++arg]); if (reps == 0) { printf("Invalid integer:--reps: %s\n", argv[arg]); usage(argv); exit(EXIT_FAILURE); } } else if (strcmp(argv[arg], "--datasize") == 0) { datasize = atoi(argv[++arg]); if (datasize == 0) { printf("Invalid integer:--datasize: %s\n", argv[arg]); usage(argv); exit(EXIT_FAILURE); } } else if (strcmp(argv[arg], "-h") == 0) { usage(argv); exit(EXIT_SUCCESS); } else { printf("Invalid parameters: %s\n", argv[arg]); usage(argv); exit(EXIT_FAILURE); } } } void stats(double *mtp, double *sdp) { double meantime, totaltime, sumsq, mintime, maxtime, sd; int i, good_reps; mintime = 1.0e10; maxtime = 0.; totaltime = 0.; good_reps = 0; for (i = 0; i < reps; i++) { /* Skip entries where times is 0, this indicates an error occured */ if (times[i] != 0){ mintime = (mintime < times[i]) ? mintime : times[i]; maxtime = (maxtime > times[i]) ? maxtime : times[i]; totaltime += times[i]; good_reps++; } } meantime = totaltime / good_reps; sumsq = 0; for (i = 0; i < reps; i++) { if (times[i] != 0){ sumsq += (times[i] - meantime) * (times[i] - meantime); } } sd = sqrt(sumsq / good_reps); *mtp = meantime; *sdp = sd; } /* * This function prints the results of the tests. * If you use a compiler which sets a different preprocessor flag * you may wish to add it here. */ void print_results(char *name, double testtime, double testsd) { char compiler[20]; /* Set default compiler idetifier. */ sprintf(compiler, "COMPILER"); /* Set compiler identifier based on known preprocessor flags. */ #ifdef __PGI sprintf(compiler, "PGI"); #endif #ifdef __HMPP sprintf(compiler, "CAPS"); #endif //printf("%s %s %d %f %f\n", compiler, name, datasize, testtime*1e6, CONF95*testsd*1e6); printf("%f\n", testtime*1e6); } /* * This function initialises the storage for the test results and set the defaults. */ void init(int argc, char **argv) { parse_args(argc, argv); if (reps == -1) { reps = DEFAULT_REPS; } if (datasize == (unsigned int)-1) { datasize = DEFAULT_DATASIZE; } times = (double *)malloc((reps) * sizeof(double)); /* #ifdef __PGI acc_init(acc_device_nvidia); // printf("PGI INIT\n"); #endif #ifdef __HMPP int a[5] = {1,2,3,4,5}; #pragma acc data copyin(a[0:5]) {} #endif #ifdef _CRAYC int a[5] = {1,2,3,4,5}; #pragma acc data copyin(a[0:5]) {} #endif */ } void finalise(void) { free(times); } /* * This function runs the benchmark specified. */ void benchmark(char *name, double (*test)(void)) { int i = 0; double tmp = 0; for (i=0; i<reps; i++) { tmp = test(); if (tmp == -10000){ printf("Memory allocation failure in %s\n", name); times[i] = 0; } else if (tmp == -11000){ printf("CPU/GPU mismatch in %s\n", name); times[i] = 0; } else{ times[i] = tmp; } } stats(&testtime, &testsd); //printf("in benchmark\n"); print_results(name, testtime, testsd); //printf("printed result\n"); } double stencil() { extern unsigned int datasize; int sz = cbrt((datasize/sizeof(double))/2); int i, j, k, iter; int n = sz-2; double fac = FAC; double t1, t2; double md; //printf("size = %d\n", sz); /* Work buffers, with halos */ double *a0 = (double*)malloc(sizeof(double)*sz*sz*sz); double *device_result = (double*)malloc(sizeof(double)*sz*sz*sz); double *a1 = (double*)malloc(sizeof(double)*sz*sz*sz); double *host_result = (double*)malloc(sizeof(double)*sz*sz*sz); double *a0_init = (double*)malloc(sizeof(double)*sz*sz*sz); if(a0==NULL||device_result==NULL||a1==NULL||host_result==NULL||a0_init==NULL){ /* Something went wrong in the memory allocation here, fail gracefully */ return(-10000); } /* initialize input array a0 */ /* zero all of array (including halos) */ //printf("size = %d\n", sz); for (i = 0; i < sz; i++) { for (j = 0; j < sz; j++) { for (k = 0; k < sz; k++) { a0[i*sz*sz+j*sz+k] = 0.0; //printf("%d\t", (i*sz*sz+j*sz+k)); } } } //printf("\n"); //int size_of_a0 = sizeof(a0) / sizeof(*a0); //printf("size of a0 = %d\n", size_of_a0); /* use random numbers to fill interior */ for (i = 1; i < n+1; i++) { for (j = 1; j < n+1; j++) { for (k = 1; k < n+1; k++) { a0[i*sz*sz+j*sz+k] = (double) rand()/ (double)(1.0 + RAND_MAX); } } } /* memcpy(&a0_init[0], &a0[0], sizeof(double)*sz*sz*sz); */ /* save initial input array for later GPU run */ for (i = 0; i < sz; i++) { for (j = 0; j < sz; j++) { for (k = 0; k < sz; k++) { a0_init[i*sz*sz+j*sz+k] = a0[i*sz*sz+j*sz+k]; } } } //printf("Host computation\n"); /* run main computation on host */ for (iter = 0; iter < ITERATIONS; iter++) { for (i = 1; i < n+1; i++) { for (j = 1; j < n+1; j++) { for (k = 1; k < n+1; k++) { a1[i*sz*sz+j*sz+k] = ( a0[i*sz*sz+(j-1)*sz+k] + a0[i*sz*sz+(j+1)*sz+k] + a0[(i-1)*sz*sz+j*sz+k] + a0[(i+1)*sz*sz+j*sz+k] + a0[(i-1)*sz*sz+(j-1)*sz+k] + a0[(i-1)*sz*sz+(j+1)*sz+k] + a0[(i+1)*sz*sz+(j-1)*sz+k] + a0[(i+1)*sz*sz+(j+1)*sz+k] + a0[i*sz*sz+(j-1)*sz+(k-1)] + a0[i*sz*sz+(j+1)*sz+(k-1)] + a0[(i-1)*sz*sz+j*sz+(k-1)] + a0[(i+1)*sz*sz+j*sz+(k-1)] + a0[(i-1)*sz*sz+(j-1)*sz+(k-1)] + a0[(i-1)*sz*sz+(j+1)*sz+(k-1)] + a0[(i+1)*sz*sz+(j-1)*sz+(k-1)] + a0[(i+1)*sz*sz+(j+1)*sz+(k-1)] + a0[i*sz*sz+(j-1)*sz+(k+1)] + a0[i*sz*sz+(j+1)*sz+(k+1)] + a0[(i-1)*sz*sz+j*sz+(k+1)] + a0[(i+1)*sz*sz+j*sz+(k+1)] + a0[(i-1)*sz*sz+(j-1)*sz+(k+1)] + a0[(i-1)*sz*sz+(j+1)*sz+(k+1)] + a0[(i+1)*sz*sz+(j-1)*sz+(k+1)] + a0[(i+1)*sz*sz+(j+1)*sz+(k+1)] + a0[i*sz*sz+j*sz+(k-1)] + a0[i*sz*sz+j*sz+(k+1)] ) * fac; } } } for (i = 1; i < n+1; i++) { for (j = 1; j < n+1; j++) { for (k = 1; k < n+1; k++) { a0[i*sz*sz+j*sz+k] = a1[i*sz*sz+j*sz+k]; } } } } /* end iteration loop */ /* save result */ /* memcpy(&host_result[0], &a0[0], sizeof(double)*sz*sz*sz); */ for (i = 0; i < sz; i++) { for (j = 0; j < sz; j++) { for (k = 0; k < sz; k++) { host_result[i*sz*sz+j*sz+k] = a0[i*sz*sz+j*sz+k]; // printf("%lf\t", a0[i*sz*sz+j*sz+k]); } } } //int size = sizeof(host_result)/sizeof(host_result[0]); //for(i = 0; i < size; i++) { // printf("%lf\t", host_result[i]); //} //printf("\n"); /* copy initial array back to a0 */ /* memcpy(&a0[0], &a0_init[0], sizeof(double)*sz*sz*sz); */ for (i = 0; i < sz; i++) { for (j = 0; j < sz; j++) { for (k = 0; k < sz; k++) { a0[i*sz*sz+j*sz+k] = a0_init[i*sz*sz+j*sz+k]; } } } //printf("Starting acc pragma code\n"); t1 = omp_get_wtime(); #pragma acc data copy(a0[0:sz*sz*sz]), create(a1[0:sz*sz*sz], i,j,k,iter), copyin(sz,fac,n) { for (iter = 0; iter < ITERATIONS; iter++) { #pragma omp parallel for for (i = 1; i < n+1; i++) { #pragma omp parallel for num_threads(2) for (j = 1; j < n+1; j++) { for (k = 1; k < n+1; k++) { a1[i*sz*sz+j*sz+k] = ( a0[i*sz*sz+(j-1)*sz+k] + a0[i*sz*sz+(j+1)*sz+k] + a0[(i-1)*sz*sz+j*sz+k] + a0[(i+1)*sz*sz+j*sz+k] + a0[(i-1)*sz*sz+(j-1)*sz+k] + a0[(i-1)*sz*sz+(j+1)*sz+k] + a0[(i+1)*sz*sz+(j-1)*sz+k] + a0[(i+1)*sz*sz+(j+1)*sz+k] + a0[i*sz*sz+(j-1)*sz+(k-1)] + a0[i*sz*sz+(j+1)*sz+(k-1)] + a0[(i-1)*sz*sz+j*sz+(k-1)] + a0[(i+1)*sz*sz+j*sz+(k-1)] + a0[(i-1)*sz*sz+(j-1)*sz+(k-1)] + a0[(i-1)*sz*sz+(j+1)*sz+(k-1)] + a0[(i+1)*sz*sz+(j-1)*sz+(k-1)] + a0[(i+1)*sz*sz+(j+1)*sz+(k-1)] + a0[i*sz*sz+(j-1)*sz+(k+1)] + a0[i*sz*sz+(j+1)*sz+(k+1)] + a0[(i-1)*sz*sz+j*sz+(k+1)] + a0[(i+1)*sz*sz+j*sz+(k+1)] + a0[(i-1)*sz*sz+(j-1)*sz+(k+1)] + a0[(i-1)*sz*sz+(j+1)*sz+(k+1)] + a0[(i+1)*sz*sz+(j-1)*sz+(k+1)] + a0[(i+1)*sz*sz+(j+1)*sz+(k+1)] + a0[i*sz*sz+j*sz+(k-1)] + a0[i*sz*sz+j*sz+(k+1)] ) * fac; } } } #pragma acc parallel loop for (i = 1; i < n+1; i++) { #pragma acc loop for (j = 1; j < n+1; j++) { #pragma acc loop for (k = 1; k < n+1; k++) { a0[i*sz*sz+j*sz+k] = a1[i*sz*sz+j*sz+k]; } } } } /* end iteration loop */ } /* end data region */ #pragma acc wait t2 = omp_get_wtime(); memcpy(&device_result[0], &a0[0], sizeof(double)*sz*sz*sz); md = max_diff(&host_result[0],&device_result[0], sz); /* Free malloc'd memory to prevent leaks */ free(a0); free(a0_init); free(a1); free(host_result); free(device_result); //printf("md: %lf \t tolerance: %lf", md, TOLERANCE); if (md < TOLERANCE ){ //printf ("GPU matches host to within tolerance of %1.1e\n\n", TOLERANCE); return(t2 - t1); } else{ // printf ("WARNING: GPU does not match to within tolerance of %1.1e\nIt is %lf\n", TOLERANCE, md); return(-11000); } } /* Utility Functions */ double max_diff(double *array1,double *array2, int sz) { double tmpdiff, diff; int i,j,k; int n = sz-2; diff=0.0; for (i = 1; i < n+1; i++) { for (j = 1; j < n+1; j++) { for (k = 1; k < n+1; k++) { tmpdiff = fabs(array1[i*sz*sz+j*sz+k] - array2[i*sz*sz+j*sz+k]); //printf("diff: %lf", tmpdiff); if (tmpdiff > diff) diff = tmpdiff; } } } return diff; } /* * This function ensures the device is awake. * It is more portable than acc_init(). */ void wul(){ int data = 8192; double *arr_a = (double *)malloc(sizeof(double) * data); double *arr_b = (double *)malloc(sizeof(double) * data); int i = 0; if (arr_a==NULL||arr_b==NULL) { printf("Unable to allocate memory in wul.\n"); } for (i=0;i<data;i++){ arr_a[i] = (double) (rand()/(1.0+RAND_MAX)); } #pragma acc data copy(arr_b[0:data]), copyin(arr_a[0:data]) { #pragma acc parallel loop for (i=0;i<data;i++){ arr_b[i] = arr_a[i] * 2; } } if (arr_a[0] < 0){ printf("Error in WUL\n"); /* * This should never be called as rands should be in the range (0,1]. * This stops clever optimizers. */ } free(arr_a); free(arr_b); } int main(int argc, char **argv) { char testName[32]; //printf("compiler name datasize testtime*1e6 CONF95*testsd*1e6\n"); /* Initialise storage for test results & parse input arguements. */ init(argc, argv); /* Ensure device is awake. */ wul(); sprintf(testName, "27S"); benchmark(testName, &stencil); /* Print results & free results storage */ finalise(); return EXIT_SUCCESS; }
c-tree.h
/* Definitions for C parsing and type checking. Copyright (C) 1987, 1993, 1994, 1995, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2007 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING3. If not see <http://www.gnu.org/licenses/>. */ #ifndef GCC_C_TREE_H #define GCC_C_TREE_H #include "c-common.h" #include "toplev.h" #include "diagnostic.h" /* struct lang_identifier is private to c-decl.c, but langhooks.c needs to know how big it is. This is sanity-checked in c-decl.c. */ #define C_SIZEOF_STRUCT_LANG_IDENTIFIER \ (sizeof (struct c_common_identifier) + 3 * sizeof (void *)) /* Language-specific declaration information. */ struct lang_decl GTY(()) { char dummy; }; /* In a RECORD_TYPE or UNION_TYPE, nonzero if any component is read-only. */ #define C_TYPE_FIELDS_READONLY(TYPE) TREE_LANG_FLAG_1 (TYPE) /* In a RECORD_TYPE or UNION_TYPE, nonzero if any component is volatile. */ #define C_TYPE_FIELDS_VOLATILE(TYPE) TREE_LANG_FLAG_2 (TYPE) /* In a RECORD_TYPE or UNION_TYPE or ENUMERAL_TYPE nonzero if the definition of the type has already started. */ #define C_TYPE_BEING_DEFINED(TYPE) TYPE_LANG_FLAG_0 (TYPE) /* In an incomplete RECORD_TYPE or UNION_TYPE, a list of variable declarations whose type would be completed by completing that type. */ #define C_TYPE_INCOMPLETE_VARS(TYPE) TYPE_VFIELD (TYPE) /* In an IDENTIFIER_NODE, nonzero if this identifier is actually a keyword. C_RID_CODE (node) is then the RID_* value of the keyword, and C_RID_YYCODE is the token number wanted by Yacc. */ #define C_IS_RESERVED_WORD(ID) TREE_LANG_FLAG_0 (ID) struct lang_type GTY(()) { /* In a RECORD_TYPE, a sorted array of the fields of the type. */ struct sorted_fields_type * GTY ((reorder ("resort_sorted_fields"))) s; /* In an ENUMERAL_TYPE, the min and max values. */ tree enum_min; tree enum_max; /* In a RECORD_TYPE, information specific to Objective-C, such as a list of adopted protocols or a pointer to a corresponding @interface. See objc/objc-act.h for details. */ tree objc_info; }; /* Record whether a type or decl was written with nonconstant size. Note that TYPE_SIZE may have simplified to a constant. */ #define C_TYPE_VARIABLE_SIZE(TYPE) TYPE_LANG_FLAG_1 (TYPE) #define C_DECL_VARIABLE_SIZE(TYPE) DECL_LANG_FLAG_0 (TYPE) /* Record whether a typedef for type `int' was actually `signed int'. */ #define C_TYPEDEF_EXPLICITLY_SIGNED(EXP) DECL_LANG_FLAG_1 (EXP) /* For a FUNCTION_DECL, nonzero if it was defined without an explicit return type. */ #define C_FUNCTION_IMPLICIT_INT(EXP) DECL_LANG_FLAG_1 (EXP) /* For a FUNCTION_DECL, nonzero if it was an implicit declaration. */ #define C_DECL_IMPLICIT(EXP) DECL_LANG_FLAG_2 (EXP) /* For FUNCTION_DECLs, evaluates true if the decl is built-in but has been declared. */ #define C_DECL_DECLARED_BUILTIN(EXP) \ DECL_LANG_FLAG_3 (FUNCTION_DECL_CHECK (EXP)) /* For FUNCTION_DECLs, evaluates true if the decl is built-in, has a built-in prototype and does not have a non-built-in prototype. */ #define C_DECL_BUILTIN_PROTOTYPE(EXP) \ DECL_LANG_FLAG_6 (FUNCTION_DECL_CHECK (EXP)) /* Record whether a decl was declared register. This is strictly a front-end flag, whereas DECL_REGISTER is used for code generation; they may differ for structures with volatile fields. */ #define C_DECL_REGISTER(EXP) DECL_LANG_FLAG_4 (EXP) /* Record whether a decl was used in an expression anywhere except an unevaluated operand of sizeof / typeof / alignof. This is only used for functions declared static but not defined, though outside sizeof and typeof it is set for other function decls as well. */ #define C_DECL_USED(EXP) DECL_LANG_FLAG_5 (FUNCTION_DECL_CHECK (EXP)) /* Record whether a label was defined in a statement expression which has finished and so can no longer be jumped to. */ #define C_DECL_UNJUMPABLE_STMT_EXPR(EXP) \ DECL_LANG_FLAG_6 (LABEL_DECL_CHECK (EXP)) /* Record whether a label was the subject of a goto from outside the current level of statement expression nesting and so cannot be defined right now. */ #define C_DECL_UNDEFINABLE_STMT_EXPR(EXP) \ DECL_LANG_FLAG_7 (LABEL_DECL_CHECK (EXP)) /* Record whether a label was defined in the scope of an identifier with variably modified type which has finished and so can no longer be jumped to. */ #define C_DECL_UNJUMPABLE_VM(EXP) \ DECL_LANG_FLAG_3 (LABEL_DECL_CHECK (EXP)) /* Record whether a label was the subject of a goto from outside the current level of scopes of identifiers with variably modified type and so cannot be defined right now. */ #define C_DECL_UNDEFINABLE_VM(EXP) \ DECL_LANG_FLAG_5 (LABEL_DECL_CHECK (EXP)) /* Record whether a variable has been declared threadprivate by #pragma omp threadprivate. */ #define C_DECL_THREADPRIVATE_P(DECL) DECL_LANG_FLAG_3 (VAR_DECL_CHECK (DECL)) /* Nonzero for a decl which either doesn't exist or isn't a prototype. N.B. Could be simplified if all built-in decls had complete prototypes (but this is presently difficult because some of them need FILE*). */ #define C_DECL_ISNT_PROTOTYPE(EXP) \ (EXP == 0 \ || (TYPE_ARG_TYPES (TREE_TYPE (EXP)) == 0 \ && !DECL_BUILT_IN (EXP))) /* For FUNCTION_TYPE, a hidden list of types of arguments. The same as TYPE_ARG_TYPES for functions with prototypes, but created for functions without prototypes. */ #define TYPE_ACTUAL_ARG_TYPES(NODE) TYPE_LANG_SLOT_1 (NODE) /* Record parser information about an expression that is irrelevant for code generation alongside a tree representing its value. */ struct c_expr { /* The value of the expression. */ tree value; /* Record the original binary operator of an expression, which may have been changed by fold, STRING_CST for unparenthesized string constants, or ERROR_MARK for other expressions (including parenthesized expressions). */ enum tree_code original_code; }; /* A kind of type specifier. Note that this information is currently only used to distinguish tag definitions, tag references and typeof uses. */ enum c_typespec_kind { /* A reserved keyword type specifier. */ ctsk_resword, /* A reference to a tag, previously declared, such as "struct foo". This includes where the previous declaration was as a different kind of tag, in which case this is only valid if shadowing that tag in an inner scope. */ ctsk_tagref, /* A reference to a tag, not previously declared in a visible scope. */ ctsk_tagfirstref, /* A definition of a tag such as "struct foo { int a; }". */ ctsk_tagdef, /* A typedef name. */ ctsk_typedef, /* An ObjC-specific kind of type specifier. */ ctsk_objc, /* A typeof specifier. */ ctsk_typeof }; /* A type specifier: this structure is created in the parser and passed to declspecs_add_type only. */ struct c_typespec { /* What kind of type specifier this is. */ enum c_typespec_kind kind; /* The specifier itself. */ tree spec; }; /* A storage class specifier. */ enum c_storage_class { csc_none, csc_auto, csc_extern, csc_register, csc_static, csc_typedef }; /* A type specifier keyword "void", "_Bool", "char", "int", "float", "double", "_Decimal32", "_Decimal64", "_Decimal128", "_Fract", "_Accum", or none of these. */ enum c_typespec_keyword { cts_none, cts_void, cts_bool, cts_char, cts_int, cts_float, cts_double, cts_dfloat32, cts_dfloat64, cts_dfloat128, cts_fract, cts_accum }; /* A sequence of declaration specifiers in C. */ struct c_declspecs { /* The type specified, if a single type specifier such as a struct, union or enum specifier, typedef name or typeof specifies the whole type, or NULL_TREE if none or a keyword such as "void" or "char" is used. Does not include qualifiers. */ tree type; /* The attributes from a typedef decl. */ tree decl_attr; /* When parsing, the attributes. Outside the parser, this will be NULL; attributes (possibly from multiple lists) will be passed separately. */ tree attrs; /* Any type specifier keyword used such as "int", not reflecting modifiers such as "short", or cts_none if none. */ enum c_typespec_keyword typespec_word; /* The storage class specifier, or csc_none if none. */ enum c_storage_class storage_class; /* Whether any declaration specifiers have been seen at all. */ BOOL_BITFIELD declspecs_seen_p : 1; /* Whether a type specifier has been seen. */ BOOL_BITFIELD type_seen_p : 1; /* Whether something other than a storage class specifier or attribute has been seen. This is used to warn for the obsolescent usage of storage class specifiers other than at the start of the list. (Doing this properly would require function specifiers to be handled separately from storage class specifiers.) */ BOOL_BITFIELD non_sc_seen_p : 1; /* Whether the type is specified by a typedef or typeof name. */ BOOL_BITFIELD typedef_p : 1; /* Whether a struct, union or enum type either had its content defined by a type specifier in the list or was the first visible declaration of its tag. */ BOOL_BITFIELD tag_defined_p : 1; /* Whether the type is explicitly "signed" or specified by a typedef whose type is explicitly "signed". */ BOOL_BITFIELD explicit_signed_p : 1; /* Whether the specifiers include a deprecated typedef. */ BOOL_BITFIELD deprecated_p : 1; /* Whether the type defaulted to "int" because there were no type specifiers. */ BOOL_BITFIELD default_int_p; /* Whether "long" was specified. */ BOOL_BITFIELD long_p : 1; /* Whether "long" was specified more than once. */ BOOL_BITFIELD long_long_p : 1; /* Whether "short" was specified. */ BOOL_BITFIELD short_p : 1; /* Whether "signed" was specified. */ BOOL_BITFIELD signed_p : 1; /* Whether "unsigned" was specified. */ BOOL_BITFIELD unsigned_p : 1; /* Whether "complex" was specified. */ BOOL_BITFIELD complex_p : 1; /* Whether "inline" was specified. */ BOOL_BITFIELD inline_p : 1; /* Whether "__thread" was specified. */ BOOL_BITFIELD thread_p : 1; /* Whether "const" was specified. */ BOOL_BITFIELD const_p : 1; /* Whether "volatile" was specified. */ BOOL_BITFIELD volatile_p : 1; /* Whether "restrict" was specified. */ BOOL_BITFIELD restrict_p : 1; /* Whether "_Sat" was specified. */ BOOL_BITFIELD saturating_p : 1; }; /* The various kinds of declarators in C. */ enum c_declarator_kind { /* An identifier. */ cdk_id, /* A function. */ cdk_function, /* An array. */ cdk_array, /* A pointer. */ cdk_pointer, /* Parenthesized declarator with nested attributes. */ cdk_attrs }; /* Information about the parameters in a function declarator. */ struct c_arg_info { /* A list of parameter decls. */ tree parms; /* A list of structure, union and enum tags defined. */ tree tags; /* A list of argument types to go in the FUNCTION_TYPE. */ tree types; /* A list of non-parameter decls (notably enumeration constants) defined with the parameters. */ tree others; /* A list of VLA sizes from the parameters. In a function definition, these are used to ensure that side-effects in sizes of arrays converted to pointers (such as a parameter int i[n++]) take place; otherwise, they are ignored. */ tree pending_sizes; /* True when these arguments had [*]. */ BOOL_BITFIELD had_vla_unspec : 1; }; /* A declarator. */ struct c_declarator { /* The kind of declarator. */ enum c_declarator_kind kind; /* Except for cdk_id, the contained declarator. For cdk_id, NULL. */ struct c_declarator *declarator; location_t id_loc; /* Currently only set for cdk_id. */ union { /* For identifiers, an IDENTIFIER_NODE or NULL_TREE if an abstract declarator. */ tree id; /* For functions. */ struct c_arg_info *arg_info; /* For arrays. */ struct { /* The array dimension, or NULL for [] and [*]. */ tree dimen; /* The qualifiers inside []. */ int quals; /* The attributes (currently ignored) inside []. */ tree attrs; /* Whether [static] was used. */ BOOL_BITFIELD static_p : 1; /* Whether [*] was used. */ BOOL_BITFIELD vla_unspec_p : 1; } array; /* For pointers, the qualifiers on the pointer type. */ int pointer_quals; /* For attributes. */ tree attrs; } u; }; /* A type name. */ struct c_type_name { /* The declaration specifiers. */ struct c_declspecs *specs; /* The declarator. */ struct c_declarator *declarator; }; /* A parameter. */ struct c_parm { /* The declaration specifiers, minus any prefix attributes. */ struct c_declspecs *specs; /* The attributes. */ tree attrs; /* The declarator. */ struct c_declarator *declarator; }; /* Save and restore the variables in this file and elsewhere that keep track of the progress of compilation of the current function. Used for nested functions. */ struct language_function GTY(()) { struct c_language_function base; tree x_break_label; tree x_cont_label; struct c_switch * GTY((skip)) x_switch_stack; struct c_arg_info * GTY((skip)) arg_info; int returns_value; int returns_null; int returns_abnormally; int warn_about_return_type; }; /* Save lists of labels used or defined in particular contexts. Allocated on the parser obstack. */ struct c_label_list { /* The label at the head of the list. */ tree label; /* The rest of the list. */ struct c_label_list *next; }; /* Statement expression context. */ struct c_label_context_se { /* The labels defined at this level of nesting. */ struct c_label_list *labels_def; /* The labels used at this level of nesting. */ struct c_label_list *labels_used; /* The next outermost context. */ struct c_label_context_se *next; }; /* Context of variably modified declarations. */ struct c_label_context_vm { /* The labels defined at this level of nesting. */ struct c_label_list *labels_def; /* The labels used at this level of nesting. */ struct c_label_list *labels_used; /* The scope of this context. Multiple contexts may be at the same numbered scope, since each variably modified declaration starts a new context. */ unsigned scope; /* The next outermost context. */ struct c_label_context_vm *next; }; /* Used when parsing an enum. Initialized by start_enum. */ struct c_enum_contents { /* While defining an enum type, this is 1 plus the last enumerator constant value. */ tree enum_next_value; /* Nonzero means that there was overflow computing enum_next_value. */ int enum_overflow; }; /* in c-parser.c */ extern void c_parse_init (void); /* in c-aux-info.c */ extern void gen_aux_info_record (tree, int, int, int); /* in c-decl.c */ extern struct obstack parser_obstack; extern tree c_break_label; extern tree c_cont_label; extern int global_bindings_p (void); extern void push_scope (void); extern tree pop_scope (void); extern void insert_block (tree); extern void c_init_decl_processing (void); extern void c_dup_lang_specific_decl (tree); extern void c_print_identifier (FILE *, tree, int); extern int quals_from_declspecs (const struct c_declspecs *); extern struct c_declarator *build_array_declarator (tree, struct c_declspecs *, bool, bool); extern tree build_enumerator (struct c_enum_contents *, tree, tree); extern tree check_for_loop_decls (void); extern void mark_forward_parm_decls (void); extern void declare_parm_level (void); extern void undeclared_variable (tree, location_t); extern tree declare_label (tree); extern tree define_label (location_t, tree); extern void c_maybe_initialize_eh (void); extern void finish_decl (tree, tree, tree); extern tree finish_enum (tree, tree, tree); extern void finish_function (void); extern tree finish_struct (tree, tree, tree); extern struct c_arg_info *get_parm_info (bool); extern tree grokfield (struct c_declarator *, struct c_declspecs *, tree, tree *); extern tree groktypename (struct c_type_name *); extern tree grokparm (const struct c_parm *); extern tree implicitly_declare (tree); extern void keep_next_level (void); extern void pending_xref_error (void); extern void c_push_function_context (struct function *); extern void c_pop_function_context (struct function *); extern void push_parm_decl (const struct c_parm *); extern struct c_declarator *set_array_declarator_inner (struct c_declarator *, struct c_declarator *); extern tree c_builtin_function (tree); extern void shadow_tag (const struct c_declspecs *); extern void shadow_tag_warned (const struct c_declspecs *, int); extern tree start_enum (struct c_enum_contents *, tree); extern int start_function (struct c_declspecs *, struct c_declarator *, tree); extern tree start_decl (struct c_declarator *, struct c_declspecs *, bool, tree); extern tree start_struct (enum tree_code, tree); extern void store_parm_decls (void); extern void store_parm_decls_from (struct c_arg_info *); extern tree xref_tag (enum tree_code, tree); extern struct c_typespec parser_xref_tag (enum tree_code, tree); extern int c_expand_decl (tree); extern struct c_parm *build_c_parm (struct c_declspecs *, tree, struct c_declarator *); extern struct c_declarator *build_attrs_declarator (tree, struct c_declarator *); extern struct c_declarator *build_function_declarator (struct c_arg_info *, struct c_declarator *); extern struct c_declarator *build_id_declarator (tree); extern struct c_declarator *make_pointer_declarator (struct c_declspecs *, struct c_declarator *); extern struct c_declspecs *build_null_declspecs (void); extern struct c_declspecs *declspecs_add_qual (struct c_declspecs *, tree); extern struct c_declspecs *declspecs_add_type (struct c_declspecs *, struct c_typespec); extern struct c_declspecs *declspecs_add_scspec (struct c_declspecs *, tree); extern struct c_declspecs *declspecs_add_attrs (struct c_declspecs *, tree); extern struct c_declspecs *finish_declspecs (struct c_declspecs *); /* in c-objc-common.c */ extern bool c_objc_common_init (void); extern bool c_missing_noreturn_ok_p (tree); extern tree c_objc_common_truthvalue_conversion (tree expr); extern bool c_warn_unused_global_decl (const_tree); extern void c_initialize_diagnostics (diagnostic_context *); extern bool c_vla_unspec_p (tree x, tree fn); #define c_build_type_variant(TYPE, CONST_P, VOLATILE_P) \ c_build_qualified_type ((TYPE), \ ((CONST_P) ? TYPE_QUAL_CONST : 0) | \ ((VOLATILE_P) ? TYPE_QUAL_VOLATILE : 0)) /* in c-typeck.c */ extern int in_alignof; extern int in_sizeof; extern int in_typeof; extern struct c_switch *c_switch_stack; extern struct c_label_context_se *label_context_stack_se; extern struct c_label_context_vm *label_context_stack_vm; extern tree require_complete_type (tree); extern int same_translation_unit_p (const_tree, const_tree); extern int comptypes (tree, tree); extern bool c_vla_type_p (const_tree); extern bool c_mark_addressable (tree); extern void c_incomplete_type_error (const_tree, const_tree); extern tree c_type_promotes_to (tree); extern struct c_expr default_function_array_conversion (struct c_expr); extern tree composite_type (tree, tree); extern tree build_component_ref (tree, tree); extern tree build_array_ref (tree, tree); extern tree build_external_ref (tree, int, location_t); extern void pop_maybe_used (bool); extern struct c_expr c_expr_sizeof_expr (struct c_expr); extern struct c_expr c_expr_sizeof_type (struct c_type_name *); extern struct c_expr parser_build_unary_op (enum tree_code, struct c_expr); extern struct c_expr parser_build_binary_op (enum tree_code, struct c_expr, struct c_expr); extern tree build_conditional_expr (tree, tree, tree); extern tree build_compound_expr (tree, tree); extern tree c_cast_expr (struct c_type_name *, tree); extern tree build_c_cast (tree, tree); extern void store_init_value (tree, tree); extern void error_init (const char *); extern void pedwarn_init (const char *); extern void maybe_warn_string_init (tree, struct c_expr); extern void start_init (tree, tree, int); extern void finish_init (void); extern void really_start_incremental_init (tree); extern void push_init_level (int); extern struct c_expr pop_init_level (int); extern void set_init_index (tree, tree); extern void set_init_label (tree); extern void process_init_element (struct c_expr); extern tree build_compound_literal (tree, tree); extern tree c_start_case (tree); extern void c_finish_case (tree); extern tree build_asm_expr (tree, tree, tree, tree, bool); extern tree build_asm_stmt (tree, tree); extern int c_types_compatible_p (tree, tree); extern tree c_begin_compound_stmt (bool); extern tree c_end_compound_stmt (tree, bool); extern void c_finish_if_stmt (location_t, tree, tree, tree, bool); extern void c_finish_loop (location_t, tree, tree, tree, tree, tree, bool); extern tree c_begin_stmt_expr (void); extern tree c_finish_stmt_expr (tree); extern tree c_process_expr_stmt (tree); extern tree c_finish_expr_stmt (tree); extern tree c_finish_return (tree); extern tree c_finish_bc_stmt (tree *, bool); extern tree c_finish_goto_label (tree); extern tree c_finish_goto_ptr (tree); extern void c_begin_vm_scope (unsigned int); extern void c_end_vm_scope (unsigned int); extern tree c_expr_to_decl (tree, bool *, bool *, bool *); extern tree c_begin_omp_parallel (void); extern tree c_finish_omp_parallel (tree, tree); extern tree c_finish_omp_clauses (tree); /* Set to 0 at beginning of a function definition, set to 1 if a return statement that specifies a return value is seen. */ extern int current_function_returns_value; /* Set to 0 at beginning of a function definition, set to 1 if a return statement with no argument is seen. */ extern int current_function_returns_null; /* Set to 0 at beginning of a function definition, set to 1 if a call to a noreturn function is seen. */ extern int current_function_returns_abnormally; /* Nonzero means we are reading code that came from a system header file. */ extern int system_header_p; /* True means global_bindings_p should return false even if the scope stack says we are in file scope. */ extern bool c_override_global_bindings_to_false; /* True means we've initialized exception handling. */ extern bool c_eh_initialized_p; /* In c-decl.c */ extern void c_finish_incomplete_decl (tree); extern void c_write_global_declarations (void); /* In order for the format checking to accept the C frontend diagnostic framework extensions, you must include this file before toplev.h, not after. */ #if GCC_VERSION >= 4001 #define ATTRIBUTE_GCC_CDIAG(m, n) __attribute__ ((__format__ (GCC_DIAG_STYLE, m ,n))) ATTRIBUTE_NONNULL(m) #else #define ATTRIBUTE_GCC_CDIAG(m, n) ATTRIBUTE_NONNULL(m) #endif extern void pedwarn_c90 (const char *, ...) ATTRIBUTE_GCC_CDIAG(1,2); extern void pedwarn_c99 (const char *, ...) ATTRIBUTE_GCC_CDIAG(1,2); #endif /* ! GCC_C_TREE_H */
wendy.c
/* wendy.c: One time-step of a one-dimensional N-body code */ #include <stdio.h> #include <stdlib.h> #include <math.h> #include <time.h> #include <wendy.h> #include <bst.h> #include <parallel_sort.h> #ifdef _OPENMP #include <omp.h> #else #define omp_get_wtime() (clock()*1./CLOCKS_PER_SEC) #endif // functionality to perform argsort in approximate solver #define SORT_NAME argsort #define SORT_TYPE struct array_w_index #define SORT_CMP(x,y) ( ( ((x).val) < ((y).val) ) ? -1: ( ( ((x).val) > ((y).val) ) ? 1: 0 ) ) #include <sort.h> // For parallel_sort, need standard (qsort-like) comparison function int argsort_compare_function(const void *a,const void *b) { struct array_w_index *x = (struct array_w_index *) a; struct array_w_index *y = (struct array_w_index *) b; if (x->val < y->val) return -1; else if (x->val > y->val) return 1; else return 0; } double _solve_coll_quad(double c0, double c1, double c2){ // Solves for collisions under quadratic motion: a t^2/2 + vt + x double mba; mba= -c1/c2; return 0.5 * ( mba + sqrt ( mba * mba - 4. * c0/c2) ); } double _solve_coll_harm(double c0, double c1, double c2, double omega){ // Solves for collisions under harmonic motion: A cos(omegaxt+phi)+a/omega^2 double A, B, out; A= c0-c2; B= c1/omega; out= -asin(c2/sqrt(B*B+A*A))-atan2(A,B); if ( out < 0 ) { out= M_PI+asin(c2/sqrt(B*B+A*A))-atan2(A,B); } //printf("Solves equation22? %g,%g,%g,%g\n",A*cos(out),B*sin(out),C, // A * cos(out) + B * sin(out) - C); //fflush(stdout); return out/omega; } void _wendy_nbody_onestep(int N, double * x, double * v, double * a, double * m, int * sindx, int * cindx, double * next_tcoll, double * tcoll, double dt, int maxcoll, int * err, int * ncoll, double * time_elapsed){ int cnt_coll,ii, tmpi; int c_in_x_indx, c_in_x_next_indx; double dv,tdt, dm, tmpd; struct node* minNode; double * t= (double *) malloc ( N * sizeof(double) ); for (ii=0; ii < N; ii++) *(t+ii)= 0.; cnt_coll= 0; // Build binary search tree for keeping track of collision times int * idx= (int *) malloc ( (N-1) * sizeof(int) ); for (ii=0; ii < N-1; ii++) *(idx+ii)= ii; struct node* bst_tcoll= bst_build(N-1,idx,tcoll); free(idx); //bst_inorder(bst_tcoll); // Time how long the loop takes clock_t time_begin= clock(); while ( *next_tcoll < dt && cnt_coll < maxcoll ){ //printf("Colliding in %f\n",*next_tcoll); //fflush(stdout); cnt_coll+= 1; // collide, update collided particles c_in_x_indx= *(sindx+ *cindx); c_in_x_next_indx= *(sindx+ *cindx+1); tdt= ( *next_tcoll - *(t + c_in_x_indx) ); dv= *(a + c_in_x_indx) * tdt; *(x + c_in_x_indx)+= dv * tdt / 2. + *(v + c_in_x_indx) * tdt; *(v + c_in_x_indx)+= dv; *(t + c_in_x_indx)= *next_tcoll; tdt= ( *next_tcoll - *(t + c_in_x_next_indx) ); dv= *(a + c_in_x_next_indx) * tdt; *(x + c_in_x_next_indx)+= dv * tdt / 2. + *(v + c_in_x_next_indx) * tdt; *(v + c_in_x_next_indx)+= dv; *(t + c_in_x_next_indx)= *next_tcoll; // swap tmpi= *(sindx+ *cindx); *(sindx+ *cindx)= *(sindx+ *cindx+1); *(sindx+ *cindx+1)= tmpi; // track mass and update accelerations dm= *(m + c_in_x_next_indx) - *(m + c_in_x_indx); *(a + c_in_x_indx)-= dm; *(a + c_in_x_next_indx)-= dm; tmpd= *(a + c_in_x_indx); *(a + c_in_x_indx)= *(a + c_in_x_next_indx); *(a + c_in_x_next_indx)= tmpd; // Update collision times, solution for delta x = 0 //printf("Collide in %f\n",2.* (*(v + c_in_x_indx) - *(v + c_in_x_next_indx)) / // (*(a + c_in_x_next_indx) - *(a + c_in_x_indx))); //fflush(stdout); tmpd= 2.* (*(v + c_in_x_indx) - *(v + c_in_x_next_indx)) / (*(a + c_in_x_next_indx) - *(a + c_in_x_indx)); bst_tcoll= bst_deleteNode(bst_tcoll,tcoll+*cindx); *(tcoll + *cindx)= *next_tcoll+tmpd; bst_tcoll= bst_forceInsert(bst_tcoll,*cindx,tcoll+*cindx); // Abuse the c_in_x_indx and c_in_x_next_indx arrays if ( *cindx > 0 ){ c_in_x_indx= *(sindx+ *cindx-1); c_in_x_next_indx= *(sindx+ *cindx); tdt= *(t + c_in_x_indx) - *next_tcoll; bst_tcoll= bst_deleteNode(bst_tcoll,tcoll+*cindx-1); *(tcoll + *cindx -1)= *next_tcoll + _solve_coll_quad(*(x + c_in_x_indx) + *(a+c_in_x_indx) * tdt*tdt / 2. - *(v+c_in_x_indx) * tdt - *(x + c_in_x_next_indx), *(v + c_in_x_indx) - *(a + c_in_x_indx) * tdt - *(v + c_in_x_next_indx), 0.5*(*(a + c_in_x_indx) - *(a + c_in_x_next_indx))); bst_tcoll= bst_forceInsert(bst_tcoll,*cindx-1,tcoll+*cindx-1); } if ( *cindx < N-2 ){ c_in_x_indx= *(sindx+ *cindx+2); c_in_x_next_indx= *(sindx+ *cindx+1); tdt= *(t + c_in_x_indx) - *next_tcoll; bst_tcoll= bst_deleteNode(bst_tcoll,tcoll+*cindx+1); *(tcoll + *cindx+1)= *next_tcoll + _solve_coll_quad(*(x + c_in_x_indx) + *(a+c_in_x_indx) * tdt*tdt / 2. - *(v+c_in_x_indx) * tdt - *(x + c_in_x_next_indx), *(v + c_in_x_indx) - *(a + c_in_x_indx) * tdt - *(v + c_in_x_next_indx), 0.5*(*(a + c_in_x_indx) - *(a + c_in_x_next_indx))); bst_tcoll= bst_forceInsert(bst_tcoll,*cindx+1,tcoll+*cindx+1); } // Find minimum minNode= bst_minValueNode(bst_tcoll); *cindx= minNode->idx; *next_tcoll= *minNode->val; //printf("Next one %f\n",*next_tcoll); //fflush(stdout); } clock_t time_end= clock(); *time_elapsed= (double) (time_end-time_begin) / CLOCKS_PER_SEC; //printf("Next %f\n",*next_tcoll-dt); //fflush(stdout); // Update all to next snapshot for (ii=0; ii < N; ii++) { tdt= dt - *(t+ii); dv= *(a+ii) * tdt; *(x+ii)+= dv * tdt / 2. + *(v+ii) * tdt; *(v+ii)+= dv; } for (ii=0; ii < N-1; ii++) { *(tcoll+ii)-= dt; } *next_tcoll-= dt; free(t); bst_destroy(bst_tcoll); *ncoll= cnt_coll; if ( cnt_coll == maxcoll ) *err= -2; } void _wendy_nbody_harm_onestep(int N, double * x, double * v, double * a, double * m, int * sindx, int * cindx, double * next_tcoll, double * tcoll,double dt, int maxcoll, int * err, int * ncoll,double * time_elapsed, double omega){ int cnt_coll,ii, tmpi; int c_in_x_indx, c_in_x_next_indx; double tdt, dm, tmpd, cosot, sinot; struct node* minNode; double * t= (double *) malloc ( N * sizeof(double) ); for (ii=0; ii < N; ii++) *(t+ii)= 0.; cnt_coll= 0; // Build binary search tree for keeping track of collision times int * idx= (int *) malloc ( (N-1) * sizeof(int) ); for (ii=0; ii < N-1; ii++) *(idx+ii)= ii; struct node* bst_tcoll= bst_build(N-1,idx,tcoll); free(idx); //bst_inorder(bst_tcoll); // Time how long the loop takes clock_t time_begin= clock(); while ( *next_tcoll < dt && cnt_coll < maxcoll ){ //printf("Colliding in %f\n",*next_tcoll); //fflush(stdout); cnt_coll+= 1; // collide, update collided particles c_in_x_indx= *(sindx+ *cindx); c_in_x_next_indx= *(sindx+ *cindx+1); tdt= ( *next_tcoll - *(t + c_in_x_indx) ); sinot = sin( omega * tdt ); cosot= sqrt(1.-sinot*sinot); tmpd= *(x + c_in_x_indx) - *(a + c_in_x_indx); *(x + c_in_x_indx)= tmpd * cosot \ + *(v + c_in_x_indx) / omega * sinot \ + *(a + c_in_x_indx); *(v + c_in_x_indx)= -tmpd * omega * sinot \ + *(v + c_in_x_indx) * cosot; *(t + c_in_x_indx)= *next_tcoll; tdt= ( *next_tcoll - *(t + c_in_x_next_indx) ); sinot = sin( omega * tdt ); cosot= sqrt(1.-sinot*sinot); tmpd= *(x + c_in_x_next_indx) - *(a + c_in_x_next_indx); *(x + c_in_x_next_indx)= tmpd * cosot \ + *(v + c_in_x_next_indx) / omega * sinot \ + *(a + c_in_x_next_indx) ; *(v + c_in_x_next_indx)= -tmpd * omega * sinot \ + *(v + c_in_x_next_indx) * cosot; *(t + c_in_x_next_indx)= *next_tcoll; //printf("Collide? %g, %g, %g\n",*(x + c_in_x_indx),*(x + c_in_x_next_indx), // *(x + c_in_x_indx)-*(x + c_in_x_next_indx)); //fflush(stdout); // swap tmpi= *(sindx+ *cindx); *(sindx+ *cindx)= *(sindx+ *cindx+1); *(sindx+ *cindx+1)= tmpi; // track mass and update accelerations dm= *(m + c_in_x_next_indx) - *(m + c_in_x_indx); *(a + c_in_x_indx)-= dm; *(a + c_in_x_next_indx)-= dm; tmpd= *(a + c_in_x_indx); *(a + c_in_x_indx)= *(a + c_in_x_next_indx); *(a + c_in_x_next_indx)= tmpd; // Update collision times, solution for delta x = 0 tmpd= (*(v + c_in_x_indx) - *(v + c_in_x_next_indx)) / \ (*(a + c_in_x_next_indx) - *(a + c_in_x_indx)) / omega; tmpd*= tmpd; tmpd= (1.-tmpd)/(1.+tmpd); tmpd= acos(tmpd)/omega; bst_tcoll= bst_deleteNode(bst_tcoll,tcoll+*cindx); *(tcoll + *cindx)= *next_tcoll+tmpd; bst_tcoll= bst_forceInsert(bst_tcoll,*cindx,tcoll+*cindx); //printf("Collide in %f\n",*(tcoll + *cindx)-*next_tcoll); //fflush(stdout); // Abuse the c_in_x_indx and c_in_x_next_indx arrays if ( *cindx > 0 ){ c_in_x_indx= *(sindx+ *cindx-1); c_in_x_next_indx= *(sindx+ *cindx); // Also forward the previous sheet to the collision time for convenience tdt= *next_tcoll - *(t + c_in_x_indx); sinot = sin( omega * tdt ); cosot= sqrt(1.-sinot*sinot); tmpd= *(x + c_in_x_indx) - *(a + c_in_x_indx); *(x + c_in_x_indx)= tmpd * cosot \ + *(v + c_in_x_indx) / omega * sinot \ + *(a + c_in_x_indx); *(v + c_in_x_indx)= -tmpd * omega * sinot \ + *(v + c_in_x_indx) * cosot; *(t + c_in_x_indx)= *next_tcoll; bst_tcoll= bst_deleteNode(bst_tcoll,tcoll+*cindx-1); *(tcoll + *cindx -1)= *next_tcoll + _solve_coll_harm(*(x + c_in_x_indx) - *(x + c_in_x_next_indx), *(v + c_in_x_indx) - *(v + c_in_x_next_indx), *(a + c_in_x_indx) - *(a + c_in_x_next_indx), omega); bst_tcoll= bst_forceInsert(bst_tcoll,*cindx-1,tcoll+*cindx-1); } if ( *cindx < N-2 ){ c_in_x_indx= *(sindx+ *cindx+2); c_in_x_next_indx= *(sindx+ *cindx+1); // Also forward the next sheet to the collision time for convenience tdt= *next_tcoll - *(t + c_in_x_indx); sinot = sin( omega * tdt ); cosot= sqrt(1.-sinot*sinot); tmpd= *(x + c_in_x_indx) - *(a + c_in_x_indx); *(x + c_in_x_indx)= tmpd * cosot \ + *(v + c_in_x_indx) / omega * sinot \ + *(a + c_in_x_indx); *(v + c_in_x_indx)= -tmpd * omega * sinot \ + *(v + c_in_x_indx) * cosot; *(t + c_in_x_indx)= *next_tcoll; bst_tcoll= bst_deleteNode(bst_tcoll,tcoll+*cindx+1); *(tcoll + *cindx+1)= *next_tcoll + _solve_coll_harm(*(x + c_in_x_indx) - *(x + c_in_x_next_indx), *(v + c_in_x_indx) - *(v + c_in_x_next_indx), *(a + c_in_x_indx) - *(a + c_in_x_next_indx), omega); bst_tcoll= bst_forceInsert(bst_tcoll,*cindx+1,tcoll+*cindx+1); } // Find minimum minNode= bst_minValueNode(bst_tcoll); *cindx= minNode->idx; *next_tcoll= *minNode->val; //printf("Next one %f\n",*next_tcoll); //fflush(stdout); } clock_t time_end= clock(); *time_elapsed= (double) (time_end-time_begin) / CLOCKS_PER_SEC; //printf("Next %f\n",*next_tcoll-dt); //fflush(stdout); // Update all to next snapshot for (ii=0; ii < N; ii++) { tdt= dt - *(t+ii); sinot = sin( omega * tdt ); cosot= sqrt(1.-sinot*sinot); tmpd= *(x+ii) - *(a+ii); *(x+ii)= tmpd * cosot + *(v+ii) / omega * sinot + *(a+ii); *(v+ii)= -tmpd * omega * sinot + *(v+ii) * cosot; } for (ii=0; ii < N-1; ii++) { *(tcoll+ii)-= dt; } *next_tcoll-= dt; free(t); bst_destroy(bst_tcoll); *ncoll= cnt_coll; if ( cnt_coll == maxcoll ) *err= -2; } // Approximate solution using leapfrog integration w/ exact forces void leapfrog_leapq(int N, struct array_w_index *xi,double *v,double dt){ int ii; #pragma omp parallel for schedule(static,CHUNK_PARALLEL_LEAPFROG) private(ii) for (ii=0; ii < N; ii++) (xi+ii)->val+= dt * *(v + (xi+ii)->idx); } void leapfrog_leappq(int N, struct array_w_index * xi, double *v,double dt_kick,double dt_drift, double *a){ int ii; #pragma omp parallel for schedule(static,CHUNK_PARALLEL_LEAPFROG) private(ii) for (ii=0; ii< N; ii++) { *(v + (xi+ii)->idx)+= dt_kick * *(a + (xi+ii)->idx); (xi+ii)->val+= dt_drift * *(v + (xi+ii)->idx); } } void _nbody_force(int N, int sort_type, struct array_w_index * xi, double * x,double * m, double * a, double t, double totmass, double omega2, double (*ext_force)(int N,double *x, double t, double *a), double * cumulmass){ int ii; // argsort switch ( sort_type ) { case 0: argsort_quick_sort(xi,N); break; case 1: argsort_merge_sort(xi,N); break; case 2: argsort_tim_sort(xi,N); break; case 3: qsort(xi,N,sizeof(struct array_w_index),argsort_compare_function); break; case 4: parallel_sort(xi,N,sizeof(struct array_w_index),argsort_compare_function); break; } // Compute cumulative mass and acceleration for (ii=0; ii< N-1; ii++) *(cumulmass+ii+1)= *(cumulmass+ii) + *(m+(xi+ii)->idx); // Evaluate external force if ( ext_force && N > EXTERNAL_SWITCH ) { // Need to de-sort to pass x to ext_force's array calculation #pragma omp parallel for schedule(static,CHUNK_PARALLEL_LEAPFROG) private(ii) for (ii=0; ii< N; ii++) *(x+(xi+ii)->idx)= (xi+ii)->val; ext_force(N,x,t,a); } else if ( ext_force ) // for < EXTERNAL_SWITCH, use sequential evaluation for (ii=0; ii< N; ii++) *(a + (xi+ii)->idx)= ext_force(1,&(xi+ii)->val,t,NULL); else #pragma omp parallel for schedule(static,CHUNK_PARALLEL_LEAPFROG) private(ii) for (ii=0; ii< N; ii++) *(a+ii)= 0.; if ( omega2 < 0 ) #pragma omp parallel for schedule(static,CHUNK_PARALLEL_LEAPFROG) private(ii) for (ii=0; ii< N; ii++) *(a + (xi+ii)->idx)+= totmass - 2 * *(cumulmass+ii) - *(m+(xi+ii)->idx); else #pragma omp parallel for schedule(static,CHUNK_PARALLEL_LEAPFROG) private(ii) for (ii=0; ii< N; ii++) *(a + (xi+ii)->idx)+= totmass - 2 * *(cumulmass+ii) \ - *(m+(xi+ii)->idx) - omega2 * (xi+ii)->val; } void _wendy_nbody_approx_onestep(int N, struct array_w_index * xi, double * x, double * v, double * m, double * a, double totmass, double dt, int nleap, double * t0, double omega2, double (*ext_force)(int,double *x, double t,double *a), int sort_type, int * err,double * time_elapsed, double * cumulmass){ int ii; double time_begin, time_end; time_begin= omp_get_wtime(); //drift half leapfrog_leapq(N,xi,v,dt/2.); //now drift full for a while for (ii=0; ii < (nleap-1); ii++){ //kick+drift _nbody_force(N,sort_type,xi,x,m,a,*t0,totmass,omega2,*ext_force,cumulmass); if ( ext_force ) *t0+= dt; leapfrog_leappq(N,xi,v,dt,dt,a); } //end with one last kick and drift _nbody_force(N,sort_type,xi,x,m,a,*t0,totmass,omega2,*ext_force,cumulmass); if ( ext_force ) *t0+= dt; leapfrog_leappq(N,xi,v,dt,dt/2.,a); //de-sort #pragma omp parallel for schedule(static,CHUNK_PARALLEL_LEAPFROG) private(ii) for (ii=0; ii< N; ii++) *(x+(xi+ii)->idx)= (xi+ii)->val; time_end= omp_get_wtime(); *time_elapsed= time_end-time_begin; }
calculate_octree_signed_distance_to_3d_skin_process.h
// | / | // ' / __| _` | __| _ \ __| // . \ | ( | | ( |\__ ` // _|\_\_| \__,_|\__|\___/ ____/ // Multi-Physics // // License: BSD License // Kratos default license: kratos/license.txt // // Main authors: Pooyan Dadvand // #if !defined(KRATOS_CALCULATE_DISTANCE_PROCESS_H_INCLUDED ) #define KRATOS_CALCULATE_DISTANCE_PROCESS_H_INCLUDED // System includes #include <string> #include <iostream> // External includes // Project includes #include "includes/define.h" #include "processes/process.h" #include "includes/model_part.h" #include "spatial_containers/octree_binary.h" #include "utilities/spatial_containers_configure.h" #include "utilities/timer.h" #include "utilities/math_utils.h" #include "utilities/geometry_utilities.h" namespace Kratos { class DistanceSpatialContainersConfigure { public: class CellNodeData { double mDistance; double mCoordinates[3]; std::size_t mId; public: double& Distance(){return mDistance;} double& X() {return mCoordinates[0];} double& Y() {return mCoordinates[1];} double& Z() {return mCoordinates[2];} double& operator[](int i) {return mCoordinates[i];} std::size_t& Id(){return mId;} }; ///@name Type Definitions ///@{ enum { Dimension = 3, DIMENSION = 3, MAX_LEVEL = 12, MIN_LEVEL = 2 }; typedef Point PointType; /// always the point 3D typedef std::vector<double>::iterator DistanceIteratorType; typedef ModelPart::ElementsContainerType::ContainerType ContainerType; typedef ContainerType::value_type PointerType; typedef ContainerType::iterator IteratorType; typedef ModelPart::ElementsContainerType::ContainerType ResultContainerType; typedef ResultContainerType::value_type ResultPointerType; typedef ResultContainerType::iterator ResultIteratorType; typedef Element::Pointer pointer_type; typedef std::vector<CellNodeData*> data_type; typedef std::vector<PointerType>::iterator PointerTypeIterator; /// Pointer definition of DistanceSpatialContainersConfigure KRATOS_CLASS_POINTER_DEFINITION(DistanceSpatialContainersConfigure); ///@} ///@name Life Cycle ///@{ /// Default constructor. DistanceSpatialContainersConfigure() {} /// Destructor. virtual ~DistanceSpatialContainersConfigure() {} ///@} ///@name Operators ///@{ ///@} ///@name Operations ///@{ static data_type* AllocateData() { return new data_type(27, NULL); } static void CopyData(data_type* source, data_type* destination) { destination = source; } static void DeleteData(data_type* data) { delete data; } static inline void CalculateBoundingBox(const PointerType& rObject, PointType& rLowPoint, PointType& rHighPoint) { rHighPoint = rObject->GetGeometry().GetPoint(0); rLowPoint = rObject->GetGeometry().GetPoint(0); for (unsigned int point = 0; point<rObject->GetGeometry().PointsNumber(); point++) { for(std::size_t i = 0; i<3; i++) { rLowPoint[i] = (rLowPoint[i] > rObject->GetGeometry().GetPoint(point)[i] ) ? rObject->GetGeometry().GetPoint(point)[i] : rLowPoint[i]; rHighPoint[i] = (rHighPoint[i] < rObject->GetGeometry().GetPoint(point)[i] ) ? rObject->GetGeometry().GetPoint(point)[i] : rHighPoint[i]; } } } static inline void GetBoundingBox(const PointerType rObject, double* rLowPoint, double* rHighPoint) { for(std::size_t i = 0; i<3; i++) { rLowPoint[i] = rObject->GetGeometry().GetPoint(0)[i]; rHighPoint[i] = rObject->GetGeometry().GetPoint(0)[i]; } for (unsigned int point = 0; point<rObject->GetGeometry().PointsNumber(); point++) { for(std::size_t i = 0; i<3; i++) { rLowPoint[i] = (rLowPoint[i] > rObject->GetGeometry().GetPoint(point)[i] ) ? rObject->GetGeometry().GetPoint(point)[i] : rLowPoint[i]; rHighPoint[i] = (rHighPoint[i] < rObject->GetGeometry().GetPoint(point)[i] ) ? rObject->GetGeometry().GetPoint(point)[i] : rHighPoint[i]; } } } static inline bool Intersection(const PointerType& rObj_1, const PointerType& rObj_2) { Element::GeometryType& geom_1 = rObj_1->GetGeometry(); Element::GeometryType& geom_2 = rObj_2->GetGeometry(); return geom_1.HasIntersection(geom_2); } static inline bool IntersectionBox(const PointerType& rObject, const PointType& rLowPoint, const PointType& rHighPoint) { return rObject->GetGeometry().HasIntersection(rLowPoint, rHighPoint); } static inline bool IsIntersected(const PointerType& rObject, const double& tolerance, const double rLowPoint[], const double rHighPoint[]) { Kratos::Element::GeometryType& geom_1 = rObject->GetGeometry(); Kratos::Point rLowPointTolerance; Kratos::Point rHighPointTolerance; for(std::size_t i = 0; i<3; i++) { rLowPointTolerance[i] = rLowPoint[i] * 1+tolerance; rHighPointTolerance[i] = rHighPoint[i] * 1+tolerance; } return geom_1.HasIntersection(rLowPointTolerance,rHighPointTolerance); } ///@} ///@name Access ///@{ ///@} ///@name Inquiry ///@{ ///@} ///@name Input and output ///@{ /// Turn back information as a string. virtual std::string Info() const { return " Spatial Containers Configure"; } /// Print information about this object. virtual void PrintInfo(std::ostream& rOStream) const {} /// Print object's data. virtual void PrintData(std::ostream& rOStream) const {} ///@} protected: private: /// Assignment operator. DistanceSpatialContainersConfigure& operator=(DistanceSpatialContainersConfigure const& rOther); /// Copy constructor. DistanceSpatialContainersConfigure(DistanceSpatialContainersConfigure const& rOther); }; // Class DistanceSpatialContainersConfigure ///@name Kratos Globals ///@{ ///@} ///@name Type Definitions ///@{ ///@} ///@name Enum's ///@{ ///@} ///@name Functions ///@{ ///@} ///@name Kratos Classes ///@{ /// Short class definition. /** Detail class definition. */ class CalculateSignedDistanceTo3DSkinProcess : public Process { public: ///@name Type Definitions ///@{ /// Pointer definition of CalculateSignedDistanceTo3DSkinProcess KRATOS_CLASS_POINTER_DEFINITION(CalculateSignedDistanceTo3DSkinProcess); typedef DistanceSpatialContainersConfigure ConfigurationType; typedef OctreeBinaryCell<ConfigurationType> CellType; typedef OctreeBinary<CellType> OctreeType; ///@} ///@name Life Cycle ///@{ /// Constructor. CalculateSignedDistanceTo3DSkinProcess(ModelPart& rThisModelPart) : mrSkinModelPart(rThisModelPart), mrBodyModelPart(rThisModelPart) { } /// Destructor. virtual ~CalculateSignedDistanceTo3DSkinProcess() { } ///@} ///@name Operators ///@{ void operator()() { Execute(); } ///@} ///@name Operations ///@{ virtual void Execute() { KRATOS_TRY //std::cout << "Generating the Octree..." << std::endl; GenerateOctree(); //std::cout << "Generating the Octree finished" << std::endl; GenerateCellNodalData(); CalculateDistance(); CalculateDistance2(); std::ofstream mesh_file1("octree1.post.msh"); std::ofstream res_file("octree1.post.res"); Timer::Start("Writing Gid conform Mesh"); PrintGiDMesh(mesh_file1); PrintGiDResults(res_file); // mOctree.PrintGiDMeshNew(mesh_file1); Timer::Stop("Writing Gid conform Mesh"); KRATOS_WATCH(mrBodyModelPart); KRATOS_CATCH(""); } void GenerateOctree() { Timer::Start("Generating Octree"); for(ModelPart::NodeIterator i_node = mrSkinModelPart.NodesBegin() ; i_node != mrSkinModelPart.NodesEnd() ; i_node++) { double temp_point[3]; const Node<3>& r_node = *i_node; temp_point[0] = r_node[0]; temp_point[1] = r_node[1]; temp_point[2] = r_node[2]; mOctree.Insert(temp_point); } mOctree.Constrain2To1(); for(ModelPart::ElementIterator i_element = mrSkinModelPart.ElementsBegin() ; i_element != mrSkinModelPart.ElementsEnd() ; i_element++) { mOctree.Insert(*(i_element).base()); } Timer::Stop("Generating Octree"); // octree.Insert(*(mrSkinModelPart.ElementsBegin().base())); KRATOS_WATCH(mOctree); } void GenerateCellNodalData() { Timer::Start("Generating Cell Nodal Data"); std::vector<OctreeType::cell_type*> all_leaves; mOctree.GetAllLeavesVector(all_leaves); #pragma omp parallel for for (std::size_t i = 0; i < all_leaves.size(); i++) { *(all_leaves[i]->pGetDataPointer()) = ConfigurationType::AllocateData(); } std::size_t last_id = mrBodyModelPart.NumberOfNodes() + 1; for (std::size_t i = 0; i < all_leaves.size(); i++) { CellType* cell = all_leaves[i]; GenerateCellNode(cell,last_id); } Timer::Stop("Generating Cell Nodal Data"); } void GenerateCellNode(CellType* pCell, std::size_t& LastId) { for (int i_pos=0; i_pos < 8; i_pos++) // position 8 is for center { ConfigurationType::CellNodeData* p_node = (*(pCell->pGetData()))[i_pos]; if(p_node == 0) { CellType::key_type keys[3]; pCell->GetKey(i_pos,keys); double new_point[3]; (*(pCell->pGetData()))[i_pos] = new ConfigurationType::CellNodeData; (*(pCell->pGetData()))[i_pos]->Id() = LastId++; (*(pCell->pGetData()))[i_pos]->X() = pCell->GetCoordinate(keys[0]); (*(pCell->pGetData()))[i_pos]->Y() = pCell->GetCoordinate(keys[1]); (*(pCell->pGetData()))[i_pos]->Z() = pCell->GetCoordinate(keys[2]); mOctreeNodes.push_back((*(pCell->pGetData()))[i_pos]); SetNodeInNeighbours(pCell,i_pos,(*(pCell->pGetData()))[i_pos]); } } } // void GenerateCellNode(CellType* pCell, std::size_t& LastId) // { // for (int i_pos=0; i_pos < 8; i_pos++) // position 8 is for center // { // Node<3>* p_node = (*(pCell->pGetData()))[i_pos]; // if(p_node == 0) // { // CellType::key_type keys[3]; // pCell->GetKey(i_pos,keys); // // double new_point[3]; // // new_point[0] = pCell->GetCoordinate(keys[0]); // new_point[1] = pCell->GetCoordinate(keys[1]); // new_point[2] = pCell->GetCoordinate(keys[2]); // // // (*(pCell->pGetData()))[i_pos] = (mrBodyModelPart.CreateNewNode(++LastId, new_point[0], new_point[1], new_point[2])).get(); // // SetNodeInNeighbours(pCell,i_pos,(*(pCell->pGetData()))[i_pos]); // } // // } // } void SetNodeInNeighbours(CellType* pCell, int Position, ConfigurationType::CellNodeData* pNode) { CellType::key_type point_key[3]; pCell->GetKey(Position, point_key); for (std::size_t i_direction = 0; i_direction < 8; i_direction++) { CellType::key_type neighbour_key[3]; if (pCell->GetNeighbourKey(Position, i_direction, neighbour_key)) { CellType* neighbour_cell = mOctree.pGetCell(neighbour_key); if (!neighbour_cell || (neighbour_cell == pCell)) continue; std::size_t position = neighbour_cell->GetLocalPosition(point_key); if((*neighbour_cell->pGetData())[position]) { //std::cout << "ERROR!! Bad Position calculated!!!!!!!!!!! position :" << position << std::endl; continue; } (*neighbour_cell->pGetData())[position] = pNode; } } } void CalculateDistance() { Timer::Start("Calculate Distances"); ModelPart::NodesContainerType::ContainerType& nodes = mrBodyModelPart.NodesArray(); int nodes_size = nodes.size(); // first of all we reset the node distance to 1.00 which is the maximum distnace in our normalized space. #pragma omp parallel for firstprivate(nodes_size) for(int i = 0 ; i < nodes_size ; i++) nodes[i]->GetSolutionStepValue(DISTANCE) = 1.00; std::vector<CellType*> leaves; mOctree.GetAllLeavesVector(leaves); int leaves_size = leaves.size(); for(int i = 0 ; i < leaves_size ; i++) CalculateNotEmptyLeavesDistance(leaves[i]); #pragma omp parallel for firstprivate(nodes_size) for(int i = 0 ; i < nodes_size ; i++) { CalculateNodeDistance(*(nodes[i])); } Timer::Stop("Calculate Distances"); } void CalculateDistance2() { Timer::Start("Calculate Distances 2"); ModelPart::NodesContainerType::ContainerType& nodes = mrBodyModelPart.NodesArray(); int nodes_size = nodes.size(); // first of all we reste the node distance to 1.00 which is the maximum distnace in our normalized space. #pragma omp parallel for firstprivate(nodes_size) for(int i = 0 ; i < nodes_size ; i++) nodes[i]->GetSolutionStepValue(DISTANCE) = 1.00; std::vector<CellType*> leaves; mOctree.GetAllLeavesVector(leaves); int leaves_size = leaves.size(); for(int i = 0 ; i < leaves_size ; i++) CalculateNotEmptyLeavesDistance(leaves[i]); for(int i_direction = 0 ; i_direction < 1 ; i_direction++) { //#pragma omp parallel for firstprivate(nodes_size) for(int i = 0 ; i < nodes_size ; i++) { if(nodes[i]->X() < 1.00 && nodes[i]->Y() < 1.00 && nodes[i]->Z() < 1.00) // if((*nodes[i])[i_direction] == 0.00) CalculateDistance(*(nodes[i]), i_direction); } } Timer::Stop("Calculate Distances 2"); } void CalculateDistance(Node<3>& rNode, int i_direction) { // double coords[3] = {rNode.X(), rNode.Y(), rNode.Z()}; // // KRATOS_WATCH_3(coords); // // //This function must color the positions in space defined by 'coords'. // //coords is of dimension (3) normalized in (0,1)^3 space // // typedef Element::GeometryType triangle_type; // typedef std::vector<std::pair<double, triangle_type*> > intersections_container_type; // // intersections_container_type intersections; // std::vector<Node<3>*> nodes_array; // // // const double epsilon = 1e-12; // // double distance = 1.0; // // // Creating the ray // double ray[3] = {coords[0], coords[1], coords[2]}; // ray[i_direction] = 0; // starting from the lower extreme // // // KRATOS_WATCH_3(ray) // GetIntersectionsAndNodes(ray, i_direction, intersections, nodes_array); // // KRATOS_WATCH(nodes_array.size()) // for (int i_node = 0; i_node < nodes_array.size() ; i_node++) // { // double coord = nodes_array[i_node]->Coordinates()[i_direction]; // // KRATOS_WATCH(intersections.size()); // // int ray_color= 1; // std::vector<std::pair<double, Element::GeometryType*> >::iterator i_intersection = intersections.begin(); // while (i_intersection != intersections.end()) { // double d = coord - i_intersection->first; // if (d > epsilon) { // // ray_color = -ray_color; // distance = d; // } else if (d > -epsilon) {//interface // distance = 0.00; // break; // } else { // if(distance > -d) // distance = -d; // break; // } // // i_intersection++; // } // // distance *= ray_color; // // double& node_distance = nodes_array[i_node]->GetSolutionStepValue(DISTANCE); // if(fabs(distance) < fabs(node_distance)) // node_distance = distance; // else if (distance*node_distance < 0.00) // assigning the correct sign // node_distance = -node_distance; // // // } } void CalculateNotEmptyLeavesDistance(CellType* pCell) { typedef Element::GeometryType triangle_type; typedef OctreeType::cell_type::object_container_type object_container_type; object_container_type* objects = (pCell->pGetObjects()); // There are no intersection in empty cells if (objects->empty()) return; for (int i_pos=0; i_pos < 8; i_pos++) // position 8 is for center { double distance = 1.00; // maximum distance is 1.00 for(object_container_type::iterator i_object = objects->begin(); i_object != objects->end(); i_object++) { CellType::key_type keys[3]; pCell->GetKey(i_pos,keys); double cell_point[3]; //cell_point[0] = pCell->GetCoordinate(keys[0]); //cell_point[1] = pCell->GetCoordinate(keys[1]); //cell_point[2] = pCell->GetCoordinate(keys[2]); double d = GeometryUtils::PointDistanceToTriangle3D((*i_object)->GetGeometry()[0], (*i_object)->GetGeometry()[1], (*i_object)->GetGeometry()[2], Point(cell_point[0], cell_point[1], cell_point[2])); if(d < distance) distance = d; } double& node_distance = (*(pCell->pGetData()))[i_pos]->Distance(); if(distance < node_distance) node_distance = distance; } } void CalculateNodeDistance(Node<3>& rNode) { double coord[3] = {rNode.X(), rNode.Y(), rNode.Z()}; double distance = DistancePositionInSpace(coord); double& node_distance = rNode.GetSolutionStepValue(DISTANCE); const double epsilon = 1.00e-12; if(fabs(node_distance) > fabs(distance)) node_distance = distance; else if (distance*node_distance < 0.00) // assigning the correct sign node_distance = -node_distance; } double DistancePositionInSpace(double* coords) { //This function must color the positions in space defined by 'coords'. //coords is of dimension (3) normalized in (0,1)^3 space typedef Element::GeometryType triangle_type; typedef std::vector<std::pair<double, triangle_type*> > intersections_container_type; intersections_container_type intersections; const int dimension = 3; const double epsilon = 1e-12; double distances[3] = {1.0, 1.0, 1.0}; for (int i_direction = 0; i_direction < dimension; i_direction++) { // Creating the ray double ray[3] = {coords[0], coords[1], coords[2]}; ray[i_direction] = 0; // starting from the lower extreme GetIntersections(ray, i_direction, intersections); // if(intersections.size() == 1) // KRATOS_WATCH_3(ray) // KRATOS_WATCH(intersections.size()); int ray_color= 1; std::vector<std::pair<double, Element::GeometryType*> >::iterator i_intersection = intersections.begin(); while (i_intersection != intersections.end()) { double d = coords[i_direction] - i_intersection->first; if (d > epsilon) { ray_color = -ray_color; distances[i_direction] = d; // if(distances[i_direction] > d) // I think this is redundunt. Pooyan. // { // if(ray_color > 0.00) // distances[i_direction] = d; // else // distances[i_direction] = -d; // } } else if (d > -epsilon) {//interface distances[i_direction] = 0.00; break; } else { if(distances[i_direction] > -d) distances[i_direction] = -d; break; } i_intersection++; } distances[i_direction] *= ray_color; } // if(distances[0]*distances[1] < 0.00 || distances[2]*distances[1] < 0.00) // KRATOS_WATCH_3(distances); #ifdef _DEBUG std::cout << "colors : " << colors[0] << ", " << colors[1] << ", " << colors[2] << std::endl; #endif double distance = (fabs(distances[0]) > fabs(distances[1])) ? distances[1] : distances[0]; distance = (fabs(distance) > fabs(distances[2])) ? distances[2] : distance; return distance; } void GetIntersectionsAndNodes(double* ray, int direction, std::vector<std::pair<double,Element::GeometryType*> >& intersections, std::vector<Node<3>*>& rNodesArray) { // //This function passes the ray through the model and gives the hit point to all objects in its way // //ray is of dimension (3) normalized in (0,1)^3 space // // direction can be 0,1,2 which are x,y and z respectively // // const double epsilon = 1.00e-12; // // // first clearing the intersections points vector // intersections.clear(); // // OctreeType* octree = &mOctree; // // OctreeType::key_type ray_key[3] = {octree->Key(ray[0]), octree->Key(ray[1]), octree->Key(ray[2])}; //ASK_TOKEN // OctreeType::key_type cell_key[3]; // // // getting the entrance cell from lower extreme // ray_key[direction] = 0; // OctreeType::cell_type* cell = octree->pGetCell(ray_key); // // while (cell) { // std::size_t position = cell->GetLocalPosition(ray_key); // Is this the local position!?!?!?! // OctreeType::key_type node_key[3]; // cell->GetKey(position, node_key); // if((node_key[0] == ray_key[0]) && (node_key[1] == ray_key[1]) && (node_key[2] == ray_key[2])) // { // if(cell->pGetData()) // { // if(cell->pGetData()->size() > position) // { // Node<3>* p_node = (*cell->pGetData())[position]; // if(p_node) // { // //KRATOS_WATCH(p_node->Id()) // rNodesArray.push_back(p_node); // } // } // else // KRATOS_WATCH(cell->pGetData()->size()) // } // } // // // // std::cout << "."; // GetCellIntersections(cell, ray, ray_key, direction, intersections); // // // Add the cell's middle node if existed // // cell->GetKey(8, cell_key); // 8 is the central position // // ray_key[direction]=cell_key[direction]; // positioning the ray in the middle of cell in its direction // // // position = cell->GetLocalPosition(ray_key); // // if(position < 27) // principal nodes // // { // // if(cell->pGetData()) // // { // // if(cell->pGetData()->size() > position) // // { // // Node<3>* p_node = (*cell->pGetData())[position]; // // if(p_node) // // { // // //KRATOS_WATCH(p_node->Id()) // // rNodesArray.push_back(p_node); // // } // // } // // else // // KRATOS_WATCH(cell->pGetData()->size()) // // } // // } // // else // // { // // KRATOS_WATCH(position); // // KRATOS_WATCH(*cell); // // } // // // // go to the next cell // if (cell->GetNeighbourKey(1 + direction * 2, cell_key)) { // ray_key[direction] = cell_key[direction]; // cell = octree->pGetCell(ray_key); // ray_key[direction] -= 1 ;//the key returned by GetNeighbourKey is inside the cell (minkey +1), to ensure that the corresponding // //cell get in pGetCell is the right one. // #ifdef _DEBUG // Octree_Pooyan::key_type min_key[3]; // cell->GetMinKey(min_key[0],min_key[1],min_key[2]); // Octree_Pooyan::key_type tmp; // tmp= min_key[direction]; // assert(ray_key[direction]==tmp); // #endif // } else // cell = NULL; // } // // // // // KRATOS_WATCH(rNodesArray.size()); // // now eliminating the repeated objects // if (!intersections.empty()) { // //sort // std::sort(intersections.begin(), intersections.end()); // // unique // std::vector<std::pair<double, Element::GeometryType*> >::iterator i_begin = intersections.begin(); // std::vector<std::pair<double, Element::GeometryType*> >::iterator i_intersection = intersections.begin(); // while (++i_begin != intersections.end()) { // // considering the very near points as the same points // if (fabs(i_begin->first - i_intersection->first) > epsilon) // if the hit points are far enough they are not the same // *(++i_intersection) = *i_begin; // } // intersections.resize((++i_intersection) - intersections.begin()); // // } } void GetIntersections(double* ray, int direction, std::vector<std::pair<double,Element::GeometryType*> >& intersections) { // //This function passes the ray through the model and gives the hit point to all objects in its way // //ray is of dimension (3) normalized in (0,1)^3 space // // direction can be 0,1,2 which are x,y and z respectively // // const double epsilon = 1.00e-12; // // // first clearing the intersections points vector // intersections.clear(); // // OctreeType* octree = &mOctree; // // OctreeType::key_type ray_key[3] = {octree->Key(ray[0]), octree->Key(ray[1]), octree->Key(ray[2])}; // OctreeType::key_type cell_key[3]; // // // getting the entrance cell from lower extreme // OctreeType::cell_type* cell = octree->pGetCell(ray_key); // // while (cell) { // // std::cout << "."; // GetCellIntersections(cell, ray, ray_key, direction, intersections); // // go to the next cell // if (cell->GetNeighbourKey(1 + direction * 2, cell_key)) { // ray_key[direction] = cell_key[direction]; // cell = octree->pGetCell(ray_key); // ray_key[direction] -= 1 ;//the key returned by GetNeighbourKey is inside the cell (minkey +1), to ensure that the corresponding // //cell get in pGetCell is the right one. // #ifdef _DEBUG // Octree_Pooyan::key_type min_key[3]; // cell->GetMinKey(min_key[0],min_key[1],min_key[2]); // Octree_Pooyan::key_type tmp; // tmp= min_key[direction]; // assert(ray_key[direction]==tmp); // #endif // } else // cell = NULL; // } // // // // now eliminating the repeated objects // if (!intersections.empty()) { // //sort // std::sort(intersections.begin(), intersections.end()); // // unique // std::vector<std::pair<double, Element::GeometryType*> >::iterator i_begin = intersections.begin(); // std::vector<std::pair<double, Element::GeometryType*> >::iterator i_intersection = intersections.begin(); // while (++i_begin != intersections.end()) { // // considering the very near points as the same points // if (fabs(i_begin->first - i_intersection->first) > epsilon) // if the hit points are far enough they are not the same // *(++i_intersection) = *i_begin; // } // intersections.resize((++i_intersection) - intersections.begin()); // // } } int GetCellIntersections(OctreeType::cell_type* cell, double* ray, OctreeType::key_type* ray_key, int direction, std::vector<std::pair<double, Element::GeometryType*> >& intersections) { // //This function passes the ray through the cell and gives the hit point to all objects in its way // //ray is of dimension (3) normalized in (0,1)^3 space // // direction can be 0,1,2 which are x,y and z respectively // // typedef Element::GeometryType triangle_type; // typedef OctreeType::cell_type::object_container_type object_container_type; // // object_container_type* objects = (cell->pGetObjects()); // // // There are no intersection in empty cells // if (objects->empty()) // return 0; // // // std::cout << "X"; // // calculating the two extreme of the ray segment inside the cell // double ray_point1[3] = {ray[0], ray[1], ray[2]}; // double ray_point2[3] = {ray[0], ray[1], ray[2]}; // ray_point1[direction] = cell->GetCoordinate(ray_key[direction]); // ray_point2[direction] = ray_point1[direction] + cell->GetSize(); // // for (object_container_type::iterator i_object = objects->begin(); i_object != objects->end(); i_object++) { // double intersection[3]={0.00,0.00,0.00}; // // int is_intersected = IntersectionTriangleSegment((*i_object)->GetGeometry(), ray_point1, ray_point2, intersection); // This intersection has to be optimized for axis aligned rays // // if (is_intersected == 1) // There is an intersection but not coplanar // intersections.push_back(std::pair<double, Element::GeometryType*>(intersection[direction], &((*i_object)->GetGeometry()))); // //else if(is_intersected == 2) // coplanar case // } // // return 0; } int IntersectionTriangleSegment(Element::GeometryType& rGeometry, double* RayPoint1, double* RayPoint2, double* IntersectionPoint) { // This is the adaption of the implemnetation provided in: // http://www.softsurfer.com/Archive/algorithm_0105/algorithm_0105.htm#intersect_RayTriangle() const double epsilon = 1.00e-12; array_1d<double,3> u, v, n; // triangle vectors array_1d<double,3> dir, w0, w; // ray vectors double r, a, b; // params to calc ray-plane intersect // get triangle edge vectors and plane normal u = rGeometry[1] - rGeometry[0]; v = rGeometry[2] - rGeometry[0]; MathUtils<double>::CrossProduct(n, u, v); // cross product if (norm_2(n) == 0) // triangle is degenerate return -1; // do not deal with this case for(int i = 0 ; i < 3 ; i++) { dir[i] = RayPoint2[i] - RayPoint1[i]; // ray direction vector w0[i] = RayPoint1[i] - rGeometry[0][i]; } a = -inner_prod(n,w0); b = inner_prod(n,dir); if (fabs(b) < epsilon) { // ray is parallel to triangle plane if (a == 0) // ray lies in triangle plane return 2; else return 0; // ray disjoint from plane } // get intersect point of ray with triangle plane r = a / b; if (r < 0.0) // ray goes away from triangle return 0; // => no intersect // for a segment, also test if (r > 1.0) => no intersect for(int i = 0 ; i < 3 ; i++) IntersectionPoint[i] = RayPoint1[i] + r * dir[i]; // intersect point of ray and plane // is I inside T? double uu, uv, vv, wu, wv, D; uu = inner_prod(u,u); uv = inner_prod(u,v); vv = inner_prod(v,v); for(int i = 0 ; i < 3 ; i++) w[i] = IntersectionPoint[i] - rGeometry[0][i]; wu = inner_prod(w,u); wv = inner_prod(w,v); D = uv * uv - uu * vv; // get and test parametric coords double s, t; s = (uv * wv - vv * wu) / D; if (s < 0.0 - epsilon || s > 1.0 + epsilon) // I is outside T return 0; t = (uv * wu - uu * wv) / D; if (t < 0.0 - epsilon || (s + t) > 1.0 + epsilon) // I is outside T return 0; return 1; // I is in T } ///@} ///@name Access ///@{ ///@} ///@name Inquiry ///@{ ///@} ///@name Input and output ///@{ /// Turn back information as a string. virtual std::string Info() const { return "CalculateSignedDistanceTo3DSkinProcess"; } /// Print information about this object. virtual void PrintInfo(std::ostream& rOStream) const { rOStream << "CalculateSignedDistanceTo3DSkinProcess"; } /// Print object's data. virtual void PrintData(std::ostream& rOStream) const { } void PrintGiDMesh(std::ostream & rOStream) const { std::vector<CellType*> leaves; mOctree.GetAllLeavesVector(leaves); std::cout << "writing " << leaves.size() << " leaves" << std::endl; rOStream << "MESH \"leaves\" dimension 3 ElemType Hexahedra Nnode 8" << std::endl; rOStream << "# color 96 96 96" << std::endl; rOStream << "Coordinates" << std::endl; rOStream << "# node_number coordinate_x coordinate_y coordinate_z " << std::endl; for(DistanceSpatialContainersConfigure::data_type::const_iterator i_node = mOctreeNodes.begin() ; i_node != mOctreeNodes.end() ; i_node++) { rOStream << (*i_node)->Id() << " " << (*i_node)->X() << " " << (*i_node)->Y() << " " << (*i_node)->Z() << std::endl; } std::cout << "Nodes written..." << std::endl; rOStream << "end coordinates" << std::endl; rOStream << "Elements" << std::endl; rOStream << "# element n1 n2 n3 n4 n5 n6 n7 n8" << std::endl; for (std::size_t i = 0; i < leaves.size(); i++) { if ((leaves[i]->pGetData())) { DistanceSpatialContainersConfigure::data_type& nodes = (*(leaves[i]->pGetData())); // std::cout << "Leave - Level: " << nodes[0]->Id() << " " << nodes[1]->Id() << " " << nodes[2]->Id() << " etc... " << std::endl; rOStream << i + 1; for(int j = 0 ; j < 8 ; j++) rOStream << " " << nodes[j]->Id(); rOStream << std::endl; } } rOStream << "end elements" << std::endl; } void PrintGiDResults(std::ostream & rOStream) const { std::vector<CellType*> leaves; mOctree.GetAllLeavesVector(leaves); rOStream << "GiD Post Results File 1.0" << std::endl << std::endl; rOStream << "Result \"Distance\" \"Kratos\" 1 Scalar OnNodes" << std::endl; rOStream << "Values" << std::endl; for(ModelPart::NodeIterator i_node = mrBodyModelPart.NodesBegin() ; i_node != mrBodyModelPart.NodesEnd() ; i_node++) { rOStream << i_node->Id() << " " << i_node->GetSolutionStepValue(DISTANCE) << std::endl; } rOStream << "End Values" << std::endl; } ///@} ///@name Friends ///@{ ///@} protected: ///@name Protected static Member Variables ///@{ ///@} ///@name Protected member Variables ///@{ ///@} ///@name Protected Operators ///@{ ///@} ///@name Protected Operations ///@{ ///@} ///@name Protected Access ///@{ ///@} ///@name Protected Inquiry ///@{ ///@} ///@name Protected LifeCycle ///@{ ///@} private: ///@name Static Member Variables ///@{ ///@} ///@name Member Variables ///@{ ModelPart& mrSkinModelPart; ModelPart& mrBodyModelPart; DistanceSpatialContainersConfigure::data_type mOctreeNodes; OctreeType mOctree; ///@} ///@name Private Operators ///@{ ///@} ///@name Private Operations ///@{ ///@} ///@name Private Access ///@{ ///@} ///@name Private Inquiry ///@{ ///@} ///@name Un accessible methods ///@{ /// Assignment operator. CalculateSignedDistanceTo3DSkinProcess& operator=(CalculateSignedDistanceTo3DSkinProcess const& rOther); /// Copy constructor. //CalculateSignedDistanceTo3DSkinProcess(CalculateSignedDistanceTo3DSkinProcess const& rOther); ///@} }; // Class CalculateSignedDistanceTo3DSkinProcess ///@} ///@name Type Definitions ///@{ ///@} ///@name Input and output ///@{ /// input stream function inline std::istream& operator >> (std::istream& rIStream, CalculateSignedDistanceTo3DSkinProcess& rThis); /// output stream function inline std::ostream& operator << (std::ostream& rOStream, const CalculateSignedDistanceTo3DSkinProcess& rThis) { rThis.PrintInfo(rOStream); rOStream << std::endl; rThis.PrintData(rOStream); return rOStream; } ///@} } // namespace Kratos. #endif // KRATOS_CALCULATE_DISTANCE_PROCESS_H_INCLUDED defined
random_forest.h
#ifndef __RANDOM_FOREST_H__ #define __RANDOM_FOREST_H__ #include "../utils/helper.h" #include "../utils/random.h" using namespace RandomNumbers; #include "../patterns/classification_rule.h" namespace RandomForestRelated { vector<double> featureImportance; int RANDOM_FEATURES = 4; int RANDOM_POSITIONS = 8; const int CLASSIFICATION = 0xa001; const int REGRESSION = 0xa002; const int SURVIVAL = 0xa003; int TASK_TYPE = CLASSIFICATION; // default double binaryEntropy(int p1, int total) { if (p1 == 0 || p1 == total) { return 0; } double p = p1 / (double)total; return - p * log2(p) - (1 - p) * log2(1 - p); } double calculateLoss(const vector<int> IDs, const vector<double> &labels) { if (TASK_TYPE == CLASSIFICATION) { // entropy unordered_map<int, int> hist; FOR (id, IDs) { ++ hist[(int)labels[*id]]; } double entropy = 0; FOR (iter, hist) { double p = iter->second / (double) IDs.size(); entropy += -p * log(p); } return entropy; } else if (TASK_TYPE == REGRESSION) { // mean square error double avg = 0; FOR (id, IDs) { avg += labels[*id]; } if (IDs.size()) { avg /= IDs.size(); } double squareError = 0; FOR (id, IDs) { squareError += sqr(avg - labels[*id]); } return squareError / IDs.size(); } else if (TASK_TYPE == SURVIVAL) { cerr << "TODO survival" << endl; exit(-1); } else { myAssert(false, "Unknown Task Type!"); } return 0; } struct TreeNode { bool leaf; int level, feature; double value, result; int left, right; TreeNode() { leaf = false; level = feature = left = right = -1; value = result = 0; } }; class DecisionTree { public: vector<TreeNode> nodes; void dump(FILE* out) { size_t size = nodes.size(); fwrite(&size, sizeof(size), 1, out); if (size > 0) { fwrite(&nodes[0], sizeof(nodes[0]), size, out); } } void load(FILE* in) { size_t size; fread(&size, sizeof(size), 1, in); nodes.resize(size); if (size > 0) { fread(&nodes[0], sizeof(nodes[0]), size, in); } } DecisionTree() {} void train(const vector< vector<double> > &features, const vector<double> &results, unordered_map<double, vector<int> > &labelBuckets, int minNodeSize, int maxLevel = 18, vector<string> featureNames = vector<string>()) { int threadID = omp_get_thread_num(); if (features.size() == 0) { return; } vector< vector<int> > featureGroups; if (featureNames.size() != 0) { unordered_map<string, int> name2id; for (int i = 0; i < featureNames.size(); ++ i) { string name = featureNames[i]; if (name.find("=") != -1) { name = name.substr(0, name.find("=")); } if (!name2id.count(name)) { name2id[name] = featureGroups.size(); featureGroups.push_back(vector<int>()); } featureGroups[name2id[name]].push_back(i); } } else { for (int i = 0; i < features[0].size(); ++ i) { featureGroups.push_back(vector<int>(1, i)); } } TreeNode root; root.level = 0; nodes.push_back(root); // bootstrapping vector<int> rootBag; // int samplesN = max((int)results.size(), 100); // for (int i = 0; i < samplesN; ++ i) { // rootBag.push_back(rng[threadID].next(results.size())); // } vector<double> distinctLabels; for (auto kv : labelBuckets){ distinctLabels.push_back(kv.first); } int numLabels = (int)distinctLabels.size(); for (int i = 0; i < numLabels; ++i) { for (int j = 0; j < 50; ++j) { int ind = labelBuckets[distinctLabels[i]][rng[threadID].next(labelBuckets[distinctLabels[i]].size())]; rootBag.push_back(ind); } } int samplesN = max((int)results.size(), 50 * numLabels); vector<vector<int>> nodeBags; nodeBags.push_back(rootBag); for (int curNode = 0; curNode < (int)nodes.size(); ++ curNode) { TreeNode &node = nodes[curNode]; vector<int> &bag = nodeBags[curNode]; myAssert(bag.size() > 0, "[ERROR] empty node in decision tree!"); myAssert(bag.size() >= minNodeSize, "[ERROR] bag is too small!"); bool equal = true; double first = results[bag[0]]; for (int i = 1; i < (int)bag.size(); ++ i) { if (sign(results[bag[i]] - first)) { equal = false; break; } } if (equal || (int)bag.size() < minNodeSize * 2 || node.level >= maxLevel) { // leaf node.leaf = true; for (int i = 0; i < (int)bag.size(); ++ i) { node.result += results[bag[i]]; } node.result /= bag.size(); continue; } double bagLoss = calculateLoss(bag, results); int bestFeature = -1; int bestLeft = 0, bestRight = 0; double bestValue = 0; double bestLoss = 1e100; vector<int> leftBag, rightBag; for (int _ = 0; _ < RANDOM_FEATURES; ++ _) { int groupID = rng[threadID].next(featureGroups.size()); int featureID = featureGroups[groupID][rng[threadID].next(featureGroups[groupID].size())]; bool continuous = false; if (featureGroups[groupID].size() == 1) { // continuous variable continuous = true; } else { // categorical variable continuous = false; } for (int __ = 0; __ < RANDOM_POSITIONS; ++ __) { double splitValue = 0.5; // for categorical variable if (continuous) { // continuous int instanceID = bag[rng[threadID].next(bag.size())]; splitValue = features[instanceID][featureID]; } else { // categorical if (__) { // get a new value featureID = featureGroups[groupID][rng[threadID].next(featureGroups[groupID].size())]; } } vector<int> currentLeftBag, currentRightBag; for (int i = 0; i < (int)bag.size(); ++ i) { int id = bag[i]; if (features[id][featureID] < splitValue) { currentLeftBag.push_back(id); } else { currentRightBag.push_back(id); } } if (currentLeftBag.size() < minNodeSize || currentRightBag.size() < minNodeSize) { continue; } double currentLoss = (calculateLoss(currentLeftBag, results) * currentLeftBag.size() + calculateLoss(currentRightBag, results) * currentRightBag.size()) / bag.size(); if (currentLoss < bestLoss) { bestLoss = currentLoss; bestValue = splitValue; bestFeature = featureID; leftBag = currentLeftBag; rightBag = currentRightBag; } } } if (leftBag.size() < minNodeSize || rightBag.size() < minNodeSize) { // leaf node.leaf = true; for (int i = 0; i < (int)bag.size(); ++ i) { node.result += results[bag[i]]; } node.result /= bag.size(); continue; } myAssert(leftBag.size() >= minNodeSize && rightBag.size() >= minNodeSize, "[ERROR] bag is too small"); featureImportance[bestFeature] += bagLoss - bestLoss; double nextValue = -1e100; for (int i = 0; i < (int)leftBag.size(); ++ i) { int id = leftBag[i]; nextValue = max(nextValue, features[id][bestFeature]); } TreeNode left, right; left.level = right.level = node.level + 1; node.feature = bestFeature; node.value = (bestValue + nextValue) / 2; node.left = nodes.size(); node.right = nodes.size() + 1; nodes.push_back(left); nodes.push_back(right); nodeBags.push_back(leftBag); nodeBags.push_back(rightBag); } } double estimate(vector<double> &features) { TreeNode *current = &nodes[0]; while (!current->leaf) { if (features[current->feature] < current->value) { current = &nodes[current->left]; } else { current = &nodes[current->right]; } } return current->result; } void traverse(int id, Rule &current, Rules &all, vector< vector<double> > &train, vector<double> &trainY, int MIN_SUP) { if (current.satisfiedTrainings.size() < MIN_SUP) { return; } if (id != 0) { // not root current.loss = calculateLoss(current.satisfiedTrainings, trainY); all.push_back(current); } if (nodes[id].leaf) { return; } vector<int> bag = current.satisfiedTrainings; //split left & right vector<int> leftBag, rightBag; int index = nodes[id].feature; double sep = nodes[id].value; FOR (tid, bag) { if (train[*tid][index] < sep) { leftBag.push_back(*tid); } else { rightBag.push_back(*tid); } } current.push_back(Condition(index, sep, true)); current.satisfiedTrainings = leftBag; traverse(nodes[id].left, current, all, train, trainY, MIN_SUP); current.pop_back(); current.push_back(Condition(index, sep, false)); current.satisfiedTrainings = rightBag; traverse(nodes[id].right, current, all, train, trainY, MIN_SUP); current.pop_back(); current.satisfiedTrainings = bag; } Rules getRules(vector< vector<double> > &train, vector<double> &trainY, int MIN_SUP) { Rule current; Rules all; for (int i = 0; i < train.size(); ++ i) { current.satisfiedTrainings.push_back(i); } traverse(0, current, all, train, trainY, MIN_SUP); return all; } }; class RandomForest { vector<DecisionTree> trees; vector< vector<double> > features; vector<double> results; public: void dump(string filename) { FILE* out = fopen(filename.c_str(), "wb"); size_t size = trees.size(); fwrite(&size, sizeof(size), 1, out); for (size_t i = 0; i < trees.size(); ++ i) { trees[i].dump(out); } fclose(out); } void load(string filename) { FILE* in = fopen(filename.c_str(), "rb"); size_t size; fread(&size, sizeof(size), 1, in); trees.resize(size); for (size_t i = 0; i < trees.size(); ++ i) { trees[i].load(in); } fclose(in); } void clear() { features.clear(); results.clear(); trees.clear(); } void train(vector< vector<double> > &_features, vector<double> _results, int treesNo = 100, int minNodeSize = 100, int maxLevel = 100, vector<string> featureNames = vector<string>()) { if (features.size() == 0) { features = _features; results = _results; if (features.size() > 0) { featureImportance.resize(features[0].size(), 0); } } unordered_map<double, vector<int>> labelBuckets; for (int i = 0; i < (int)results.size(); ++i) { double lbl = results[i]; labelBuckets[lbl].push_back(i); } myAssert(features.size() == results.size(), "[ERROR] wrong training data!"); trees.resize(treesNo); #pragma omp parallel for for (int i = 0; i < treesNo; ++ i) { trees[i].train(_features, _results, labelBuckets, minNodeSize, maxLevel, featureNames); } } Rules getRules(vector< vector<double> > &train, vector<double> &trainY, int MIN_SUP) { Rules ret; for (int i = 0; i < (int)trees.size(); ++ i) { ret.extend(trees[i].getRules(train, trainY, MIN_SUP)); } return ret; } double estimate(vector<double> &features) { if (trees.size() == 0) { return 0.0; } double sum = 0; for (int i = 0; i < (int)trees.size(); ++ i) { sum += trees[i].estimate(features); } return sum / trees.size(); } }; }; #endif
GB_unop__lnot_int16_int16.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCUDA_DEV #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__lnot_int16_int16) // op(A') function: GB (_unop_tran__lnot_int16_int16) // C type: int16_t // A type: int16_t // cast: int16_t cij = aij // unaryop: cij = !(aij != 0) #define GB_ATYPE \ int16_t #define GB_CTYPE \ int16_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int16_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = !(x != 0) ; // casting #define GB_CAST(z, aij) \ int16_t z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ int16_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ int16_t z = aij ; \ Cx [pC] = !(z != 0) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LNOT || GxB_NO_INT16) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__lnot_int16_int16) ( int16_t *Cx, // Cx and Ax may be aliased const int16_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int16_t aij = Ax [p] ; int16_t z = aij ; Cx [p] = !(z != 0) ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; int16_t aij = Ax [p] ; int16_t z = aij ; Cx [p] = !(z != 0) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__lnot_int16_int16) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
task.c
/* Copyright (C) 2007-2017 Free Software Foundation, Inc. Contributed by Richard Henderson <rth@redhat.com>. This file is part of the GNU Offloading and Multi Processing Library (libgomp). Libgomp is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3, or (at your option) any later version. Libgomp is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. Under Section 7 of GPL version 3, you are granted additional permissions described in the GCC Runtime Library Exception, version 3.1, as published by the Free Software Foundation. You should have received a copy of the GNU General Public License and a copy of the GCC Runtime Library Exception along with this program; see the files COPYING3 and COPYING.RUNTIME respectively. If not, see <http://www.gnu.org/licenses/>. */ /* This file handles the maintainence of tasks in response to task creation and termination. */ #include "libgomp.h" #include <stdlib.h> #include <string.h> #include "gomp-constants.h" typedef struct gomp_task_depend_entry *hash_entry_type; static inline void * htab_alloc (size_t size) { return gomp_malloc (size); } static inline void htab_free (void *ptr) { free (ptr); } #include "hashtab.h" static inline hashval_t htab_hash (hash_entry_type element) { return hash_pointer (element->addr); } static inline bool htab_eq (hash_entry_type x, hash_entry_type y) { return x->addr == y->addr; } /* Create a new task data structure. */ void gomp_init_task (struct gomp_task *task, struct gomp_task *parent_task, struct gomp_task_icv *prev_icv) { /* It would seem that using memset here would be a win, but it turns out that partially filling gomp_task allows us to keep the overhead of task creation low. In the nqueens-1.c test, for a sufficiently large N, we drop the overhead from 5-6% to 1%. Note, the nqueens-1.c test in serial mode is a good test to benchmark the overhead of creating tasks as there are millions of tiny tasks created that all run undeferred. */ task->parent = parent_task; task->icv = *prev_icv; task->kind = GOMP_TASK_IMPLICIT; task->taskwait = NULL; task->in_tied_task = false; task->final_task = false; task->copy_ctors_done = false; task->parent_depends_on = false; priority_queue_init (&task->children_queue); task->taskgroup = NULL; task->dependers = NULL; task->depend_hash = NULL; task->depend_count = 0; } /* Clean up a task, after completing it. */ void gomp_end_task (void) { struct gomp_thread *thr = gomp_thread (); struct gomp_task *task = thr->task; gomp_finish_task (task); thr->task = task->parent; } /* Clear the parent field of every task in LIST. */ static inline void gomp_clear_parent_in_list (struct priority_list *list) { struct priority_node *p = list->tasks; if (p) do { priority_node_to_task (PQ_CHILDREN, p)->parent = NULL; p = p->next; } while (p != list->tasks); } /* Splay tree version of gomp_clear_parent_in_list. Clear the parent field of every task in NODE within SP, and free the node when done. */ static void gomp_clear_parent_in_tree (prio_splay_tree sp, prio_splay_tree_node node) { if (!node) return; prio_splay_tree_node left = node->left, right = node->right; gomp_clear_parent_in_list (&node->key.l); #if _LIBGOMP_CHECKING_ memset (node, 0xaf, sizeof (*node)); #endif /* No need to remove the node from the tree. We're nuking everything, so just free the nodes and our caller can clear the entire splay tree. */ free (node); gomp_clear_parent_in_tree (sp, left); gomp_clear_parent_in_tree (sp, right); } /* Clear the parent field of every task in Q and remove every task from Q. */ static inline void gomp_clear_parent (struct priority_queue *q) { if (priority_queue_multi_p (q)) { gomp_clear_parent_in_tree (&q->t, q->t.root); /* All the nodes have been cleared in gomp_clear_parent_in_tree. No need to remove anything. We can just nuke everything. */ q->t.root = NULL; } else gomp_clear_parent_in_list (&q->l); } /* Helper function for GOMP_task and gomp_create_target_task. For a TASK with in/out dependencies, fill in the various dependency queues. PARENT is the parent of said task. DEPEND is as in GOMP_task. */ static void gomp_task_handle_depend (struct gomp_task *task, struct gomp_task *parent, void **depend) { size_t ndepend = (uintptr_t) depend[0]; size_t nout = (uintptr_t) depend[1]; size_t i; hash_entry_type ent; task->depend_count = ndepend; task->num_dependees = 0; if (parent->depend_hash == NULL) parent->depend_hash = htab_create (2 * ndepend > 12 ? 2 * ndepend : 12); for (i = 0; i < ndepend; i++) { task->depend[i].addr = depend[2 + i]; task->depend[i].next = NULL; task->depend[i].prev = NULL; task->depend[i].task = task; task->depend[i].is_in = i >= nout; task->depend[i].redundant = false; task->depend[i].redundant_out = false; hash_entry_type *slot = htab_find_slot (&parent->depend_hash, &task->depend[i], INSERT); hash_entry_type out = NULL, last = NULL; if (*slot) { /* If multiple depends on the same task are the same, all but the first one are redundant. As inout/out come first, if any of them is inout/out, it will win, which is the right semantics. */ if ((*slot)->task == task) { task->depend[i].redundant = true; continue; } for (ent = *slot; ent; ent = ent->next) { if (ent->redundant_out) break; last = ent; /* depend(in:...) doesn't depend on earlier depend(in:...). */ if (i >= nout && ent->is_in) continue; if (!ent->is_in) out = ent; struct gomp_task *tsk = ent->task; if (tsk->dependers == NULL) { tsk->dependers = gomp_malloc (sizeof (struct gomp_dependers_vec) + 6 * sizeof (struct gomp_task *)); tsk->dependers->n_elem = 1; tsk->dependers->allocated = 6; tsk->dependers->elem[0] = task; task->num_dependees++; continue; } /* We already have some other dependency on tsk from earlier depend clause. */ else if (tsk->dependers->n_elem && (tsk->dependers->elem[tsk->dependers->n_elem - 1] == task)) continue; else if (tsk->dependers->n_elem == tsk->dependers->allocated) { tsk->dependers->allocated = tsk->dependers->allocated * 2 + 2; tsk->dependers = gomp_realloc (tsk->dependers, sizeof (struct gomp_dependers_vec) + (tsk->dependers->allocated * sizeof (struct gomp_task *))); } tsk->dependers->elem[tsk->dependers->n_elem++] = task; task->num_dependees++; } task->depend[i].next = *slot; (*slot)->prev = &task->depend[i]; } *slot = &task->depend[i]; /* There is no need to store more than one depend({,in}out:) task per address in the hash table chain for the purpose of creation of deferred tasks, because each out depends on all earlier outs, thus it is enough to record just the last depend({,in}out:). For depend(in:), we need to keep all of the previous ones not terminated yet, because a later depend({,in}out:) might need to depend on all of them. So, if the new task's clause is depend({,in}out:), we know there is at most one other depend({,in}out:) clause in the list (out). For non-deferred tasks we want to see all outs, so they are moved to the end of the chain, after first redundant_out entry all following entries should be redundant_out. */ if (!task->depend[i].is_in && out) { if (out != last) { out->next->prev = out->prev; out->prev->next = out->next; out->next = last->next; out->prev = last; last->next = out; if (out->next) out->next->prev = out; } out->redundant_out = true; } } } /* Called when encountering an explicit task directive. If IF_CLAUSE is false, then we must not delay in executing the task. If UNTIED is true, then the task may be executed by any member of the team. DEPEND is an array containing: depend[0]: number of depend elements. depend[1]: number of depend elements of type "out". depend[2..N+1]: address of [1..N]th depend element. */ void GOMP_task (void (*fn) (void *), void *data, void (*cpyfn) (void *, void *), long arg_size, long arg_align, bool if_clause, unsigned flags, void **depend, int priority) { struct gomp_thread *thr = gomp_thread (); struct gomp_team *team = thr->ts.team; #ifdef HAVE_BROKEN_POSIX_SEMAPHORES /* If pthread_mutex_* is used for omp_*lock*, then each task must be tied to one thread all the time. This means UNTIED tasks must be tied and if CPYFN is non-NULL IF(0) must be forced, as CPYFN might be running on different thread than FN. */ if (cpyfn) if_clause = false; flags &= ~GOMP_TASK_FLAG_UNTIED; #endif /* If parallel or taskgroup has been cancelled, don't start new tasks. */ if (team && (gomp_team_barrier_cancelled (&team->barrier) || (thr->task->taskgroup && thr->task->taskgroup->cancelled))) return; if ((flags & GOMP_TASK_FLAG_PRIORITY) == 0) priority = 0; else if (priority > gomp_max_task_priority_var) priority = gomp_max_task_priority_var; if (!if_clause || team == NULL || (thr->task && thr->task->final_task) || team->task_count > 64 * team->nthreads) { struct gomp_task task; /* If there are depend clauses and earlier deferred sibling tasks with depend clauses, check if there isn't a dependency. If there is, we need to wait for them. There is no need to handle depend clauses for non-deferred tasks other than this, because the parent task is suspended until the child task finishes and thus it can't start further child tasks. */ if ((flags & GOMP_TASK_FLAG_DEPEND) && thr->task && thr->task->depend_hash) gomp_task_maybe_wait_for_dependencies (depend); gomp_init_task (&task, thr->task, gomp_icv (false)); task.kind = GOMP_TASK_UNDEFERRED; task.final_task = (thr->task && thr->task->final_task) || (flags & GOMP_TASK_FLAG_FINAL); task.priority = priority; if (thr->task) { task.in_tied_task = thr->task->in_tied_task; task.taskgroup = thr->task->taskgroup; } thr->task = &task; if (__builtin_expect (cpyfn != NULL, 0)) { char buf[arg_size + arg_align - 1]; char *arg = (char *) (((uintptr_t) buf + arg_align - 1) & ~(uintptr_t) (arg_align - 1)); cpyfn (arg, data); fn (arg); } else fn (data); /* Access to "children" is normally done inside a task_lock mutex region, but the only way this particular task.children can be set is if this thread's task work function (fn) creates children. So since the setter is *this* thread, we need no barriers here when testing for non-NULL. We can have task.children set by the current thread then changed by a child thread, but seeing a stale non-NULL value is not a problem. Once past the task_lock acquisition, this thread will see the real value of task.children. */ if (!priority_queue_empty_p (&task.children_queue, MEMMODEL_RELAXED)) { gomp_mutex_lock (&team->task_lock); gomp_clear_parent (&task.children_queue); gomp_mutex_unlock (&team->task_lock); } gomp_end_task (); } else { struct gomp_task *task; struct gomp_task *parent = thr->task; struct gomp_taskgroup *taskgroup = parent->taskgroup; char *arg; bool do_wake; size_t depend_size = 0; if (flags & GOMP_TASK_FLAG_DEPEND) depend_size = ((uintptr_t) depend[0] * sizeof (struct gomp_task_depend_entry)); task = gomp_malloc (sizeof (*task) + depend_size + arg_size + arg_align - 1); arg = (char *) (((uintptr_t) (task + 1) + depend_size + arg_align - 1) & ~(uintptr_t) (arg_align - 1)); gomp_init_task (task, parent, gomp_icv (false)); task->priority = priority; task->kind = GOMP_TASK_UNDEFERRED; task->in_tied_task = parent->in_tied_task; task->taskgroup = taskgroup; thr->task = task; if (cpyfn) { cpyfn (arg, data); task->copy_ctors_done = true; } else memcpy (arg, data, arg_size); thr->task = parent; task->kind = GOMP_TASK_WAITING; task->fn = fn; task->fn_data = arg; task->final_task = (flags & GOMP_TASK_FLAG_FINAL) >> 1; gomp_mutex_lock (&team->task_lock); /* If parallel or taskgroup has been cancelled, don't start new tasks. */ if (__builtin_expect ((gomp_team_barrier_cancelled (&team->barrier) || (taskgroup && taskgroup->cancelled)) && !task->copy_ctors_done, 0)) { gomp_mutex_unlock (&team->task_lock); gomp_finish_task (task); free (task); return; } if (taskgroup) taskgroup->num_children++; if (depend_size) { gomp_task_handle_depend (task, parent, depend); if (task->num_dependees) { /* Tasks that depend on other tasks are not put into the various waiting queues, so we are done for now. Said tasks are instead put into the queues via gomp_task_run_post_handle_dependers() after their dependencies have been satisfied. After which, they can be picked up by the various scheduling points. */ gomp_mutex_unlock (&team->task_lock); return; } } priority_queue_insert (PQ_CHILDREN, &parent->children_queue, task, priority, PRIORITY_INSERT_BEGIN, /*adjust_parent_depends_on=*/false, task->parent_depends_on); if (taskgroup) priority_queue_insert (PQ_TASKGROUP, &taskgroup->taskgroup_queue, task, priority, PRIORITY_INSERT_BEGIN, /*adjust_parent_depends_on=*/false, task->parent_depends_on); priority_queue_insert (PQ_TEAM, &team->task_queue, task, priority, PRIORITY_INSERT_END, /*adjust_parent_depends_on=*/false, task->parent_depends_on); ++team->task_count; ++team->task_queued_count; gomp_team_barrier_set_task_pending (&team->barrier); do_wake = team->task_running_count + !parent->in_tied_task < team->nthreads; gomp_mutex_unlock (&team->task_lock); if (do_wake) gomp_team_barrier_wake (&team->barrier, 1); } } ialias (GOMP_taskgroup_start) ialias (GOMP_taskgroup_end) #define TYPE long #define UTYPE unsigned long #define TYPE_is_long 1 #include "taskloop.c" #undef TYPE #undef UTYPE #undef TYPE_is_long #define TYPE unsigned long long #define UTYPE TYPE #define GOMP_taskloop GOMP_taskloop_ull #include "taskloop.c" #undef TYPE #undef UTYPE #undef GOMP_taskloop static void inline priority_queue_move_task_first (enum priority_queue_type type, struct priority_queue *head, struct gomp_task *task) { #if _LIBGOMP_CHECKING_ if (!priority_queue_task_in_queue_p (type, head, task)) gomp_fatal ("Attempt to move first missing task %p", task); #endif struct priority_list *list; if (priority_queue_multi_p (head)) { list = priority_queue_lookup_priority (head, task->priority); #if _LIBGOMP_CHECKING_ if (!list) gomp_fatal ("Unable to find priority %d", task->priority); #endif } else list = &head->l; priority_list_remove (list, task_to_priority_node (type, task), 0); priority_list_insert (type, list, task, task->priority, PRIORITY_INSERT_BEGIN, type == PQ_CHILDREN, task->parent_depends_on); } /* Actual body of GOMP_PLUGIN_target_task_completion that is executed with team->task_lock held, or is executed in the thread that called gomp_target_task_fn if GOMP_PLUGIN_target_task_completion has been run before it acquires team->task_lock. */ static void gomp_target_task_completion (struct gomp_team *team, struct gomp_task *task) { struct gomp_task *parent = task->parent; if (parent) priority_queue_move_task_first (PQ_CHILDREN, &parent->children_queue, task); struct gomp_taskgroup *taskgroup = task->taskgroup; if (taskgroup) priority_queue_move_task_first (PQ_TASKGROUP, &taskgroup->taskgroup_queue, task); priority_queue_insert (PQ_TEAM, &team->task_queue, task, task->priority, PRIORITY_INSERT_BEGIN, false, task->parent_depends_on); task->kind = GOMP_TASK_WAITING; if (parent && parent->taskwait) { if (parent->taskwait->in_taskwait) { /* One more task has had its dependencies met. Inform any waiters. */ parent->taskwait->in_taskwait = false; gomp_sem_post (&parent->taskwait->taskwait_sem); } else if (parent->taskwait->in_depend_wait) { /* One more task has had its dependencies met. Inform any waiters. */ parent->taskwait->in_depend_wait = false; gomp_sem_post (&parent->taskwait->taskwait_sem); } } if (taskgroup && taskgroup->in_taskgroup_wait) { /* One more task has had its dependencies met. Inform any waiters. */ taskgroup->in_taskgroup_wait = false; gomp_sem_post (&taskgroup->taskgroup_sem); } ++team->task_queued_count; gomp_team_barrier_set_task_pending (&team->barrier); /* I'm afraid this can't be done after releasing team->task_lock, as gomp_target_task_completion is run from unrelated thread and therefore in between gomp_mutex_unlock and gomp_team_barrier_wake the team could be gone already. */ if (team->nthreads > team->task_running_count) gomp_team_barrier_wake (&team->barrier, 1); } /* Signal that a target task TTASK has completed the asynchronously running phase and should be requeued as a task to handle the variable unmapping. */ void GOMP_PLUGIN_target_task_completion (void *data) { struct gomp_target_task *ttask = (struct gomp_target_task *) data; struct gomp_task *task = ttask->task; struct gomp_team *team = ttask->team; gomp_mutex_lock (&team->task_lock); if (ttask->state == GOMP_TARGET_TASK_READY_TO_RUN) { ttask->state = GOMP_TARGET_TASK_FINISHED; gomp_mutex_unlock (&team->task_lock); return; } ttask->state = GOMP_TARGET_TASK_FINISHED; gomp_target_task_completion (team, task); gomp_mutex_unlock (&team->task_lock); } static void gomp_task_run_post_handle_depend_hash (struct gomp_task *); /* Called for nowait target tasks. */ bool gomp_create_target_task (struct gomp_device_descr *devicep, void (*fn) (void *), size_t mapnum, void **hostaddrs, size_t *sizes, unsigned short *kinds, unsigned int flags, void **depend, void **args, enum gomp_target_task_state state) { struct gomp_thread *thr = gomp_thread (); struct gomp_team *team = thr->ts.team; /* If parallel or taskgroup has been cancelled, don't start new tasks. */ if (team && (gomp_team_barrier_cancelled (&team->barrier) || (thr->task->taskgroup && thr->task->taskgroup->cancelled))) return true; struct gomp_target_task *ttask; struct gomp_task *task; struct gomp_task *parent = thr->task; struct gomp_taskgroup *taskgroup = parent->taskgroup; bool do_wake; size_t depend_size = 0; uintptr_t depend_cnt = 0; size_t tgt_align = 0, tgt_size = 0; if (depend != NULL) { depend_cnt = (uintptr_t) depend[0]; depend_size = depend_cnt * sizeof (struct gomp_task_depend_entry); } if (fn) { /* GOMP_MAP_FIRSTPRIVATE need to be copied first, as they are firstprivate on the target task. */ size_t i; for (i = 0; i < mapnum; i++) if ((kinds[i] & 0xff) == GOMP_MAP_FIRSTPRIVATE) { size_t align = (size_t) 1 << (kinds[i] >> 8); if (tgt_align < align) tgt_align = align; tgt_size = (tgt_size + align - 1) & ~(align - 1); tgt_size += sizes[i]; } if (tgt_align) tgt_size += tgt_align - 1; else tgt_size = 0; } task = gomp_malloc (sizeof (*task) + depend_size + sizeof (*ttask) + mapnum * (sizeof (void *) + sizeof (size_t) + sizeof (unsigned short)) + tgt_size); gomp_init_task (task, parent, gomp_icv (false)); task->priority = 0; task->kind = GOMP_TASK_WAITING; task->in_tied_task = parent->in_tied_task; task->taskgroup = taskgroup; ttask = (struct gomp_target_task *) &task->depend[depend_cnt]; ttask->devicep = devicep; ttask->fn = fn; ttask->mapnum = mapnum; ttask->args = args; memcpy (ttask->hostaddrs, hostaddrs, mapnum * sizeof (void *)); ttask->sizes = (size_t *) &ttask->hostaddrs[mapnum]; memcpy (ttask->sizes, sizes, mapnum * sizeof (size_t)); ttask->kinds = (unsigned short *) &ttask->sizes[mapnum]; memcpy (ttask->kinds, kinds, mapnum * sizeof (unsigned short)); if (tgt_align) { char *tgt = (char *) &ttask->kinds[mapnum]; size_t i; uintptr_t al = (uintptr_t) tgt & (tgt_align - 1); if (al) tgt += tgt_align - al; tgt_size = 0; for (i = 0; i < mapnum; i++) if ((kinds[i] & 0xff) == GOMP_MAP_FIRSTPRIVATE) { size_t align = (size_t) 1 << (kinds[i] >> 8); tgt_size = (tgt_size + align - 1) & ~(align - 1); memcpy (tgt + tgt_size, hostaddrs[i], sizes[i]); ttask->hostaddrs[i] = tgt + tgt_size; tgt_size = tgt_size + sizes[i]; } } ttask->flags = flags; ttask->state = state; ttask->task = task; ttask->team = team; task->fn = NULL; task->fn_data = ttask; task->final_task = 0; gomp_mutex_lock (&team->task_lock); /* If parallel or taskgroup has been cancelled, don't start new tasks. */ if (__builtin_expect (gomp_team_barrier_cancelled (&team->barrier) || (taskgroup && taskgroup->cancelled), 0)) { gomp_mutex_unlock (&team->task_lock); gomp_finish_task (task); free (task); return true; } if (depend_size) { gomp_task_handle_depend (task, parent, depend); if (task->num_dependees) { if (taskgroup) taskgroup->num_children++; gomp_mutex_unlock (&team->task_lock); return true; } } if (state == GOMP_TARGET_TASK_DATA) { gomp_task_run_post_handle_depend_hash (task); gomp_mutex_unlock (&team->task_lock); gomp_finish_task (task); free (task); return false; } if (taskgroup) taskgroup->num_children++; /* For async offloading, if we don't need to wait for dependencies, run the gomp_target_task_fn right away, essentially schedule the mapping part of the task in the current thread. */ if (devicep != NULL && (devicep->capabilities & GOMP_OFFLOAD_CAP_OPENMP_400)) { priority_queue_insert (PQ_CHILDREN, &parent->children_queue, task, 0, PRIORITY_INSERT_END, /*adjust_parent_depends_on=*/false, task->parent_depends_on); if (taskgroup) priority_queue_insert (PQ_TASKGROUP, &taskgroup->taskgroup_queue, task, 0, PRIORITY_INSERT_END, /*adjust_parent_depends_on=*/false, task->parent_depends_on); task->pnode[PQ_TEAM].next = NULL; task->pnode[PQ_TEAM].prev = NULL; task->kind = GOMP_TASK_TIED; ++team->task_count; gomp_mutex_unlock (&team->task_lock); thr->task = task; gomp_target_task_fn (task->fn_data); thr->task = parent; gomp_mutex_lock (&team->task_lock); task->kind = GOMP_TASK_ASYNC_RUNNING; /* If GOMP_PLUGIN_target_task_completion has run already in between gomp_target_task_fn and the mutex lock, perform the requeuing here. */ if (ttask->state == GOMP_TARGET_TASK_FINISHED) gomp_target_task_completion (team, task); else ttask->state = GOMP_TARGET_TASK_RUNNING; gomp_mutex_unlock (&team->task_lock); return true; } priority_queue_insert (PQ_CHILDREN, &parent->children_queue, task, 0, PRIORITY_INSERT_BEGIN, /*adjust_parent_depends_on=*/false, task->parent_depends_on); if (taskgroup) priority_queue_insert (PQ_TASKGROUP, &taskgroup->taskgroup_queue, task, 0, PRIORITY_INSERT_BEGIN, /*adjust_parent_depends_on=*/false, task->parent_depends_on); priority_queue_insert (PQ_TEAM, &team->task_queue, task, 0, PRIORITY_INSERT_END, /*adjust_parent_depends_on=*/false, task->parent_depends_on); ++team->task_count; ++team->task_queued_count; gomp_team_barrier_set_task_pending (&team->barrier); do_wake = team->task_running_count + !parent->in_tied_task < team->nthreads; gomp_mutex_unlock (&team->task_lock); if (do_wake) gomp_team_barrier_wake (&team->barrier, 1); return true; } /* Given a parent_depends_on task in LIST, move it to the front of its priority so it is run as soon as possible. Care is taken to update the list's LAST_PARENT_DEPENDS_ON field. We rearrange the queue such that all parent_depends_on tasks are first, and last_parent_depends_on points to the last such task we rearranged. For example, given the following tasks in a queue where PD[123] are the parent_depends_on tasks: task->children | V C1 -> C2 -> C3 -> PD1 -> PD2 -> PD3 -> C4 We rearrange such that: task->children | +--- last_parent_depends_on | | V V PD1 -> PD2 -> PD3 -> C1 -> C2 -> C3 -> C4. */ static void inline priority_list_upgrade_task (struct priority_list *list, struct priority_node *node) { struct priority_node *last_parent_depends_on = list->last_parent_depends_on; if (last_parent_depends_on) { node->prev->next = node->next; node->next->prev = node->prev; node->prev = last_parent_depends_on; node->next = last_parent_depends_on->next; node->prev->next = node; node->next->prev = node; } else if (node != list->tasks) { node->prev->next = node->next; node->next->prev = node->prev; node->prev = list->tasks->prev; node->next = list->tasks; list->tasks = node; node->prev->next = node; node->next->prev = node; } list->last_parent_depends_on = node; } /* Given a parent_depends_on TASK in its parent's children_queue, move it to the front of its priority so it is run as soon as possible. PARENT is passed as an optimization. (This function could be defined in priority_queue.c, but we want it inlined, and putting it in priority_queue.h is not an option, given that gomp_task has not been properly defined at that point). */ static void inline priority_queue_upgrade_task (struct gomp_task *task, struct gomp_task *parent) { struct priority_queue *head = &parent->children_queue; struct priority_node *node = &task->pnode[PQ_CHILDREN]; #if _LIBGOMP_CHECKING_ if (!task->parent_depends_on) gomp_fatal ("priority_queue_upgrade_task: task must be a " "parent_depends_on task"); if (!priority_queue_task_in_queue_p (PQ_CHILDREN, head, task)) gomp_fatal ("priority_queue_upgrade_task: cannot find task=%p", task); #endif if (priority_queue_multi_p (head)) { struct priority_list *list = priority_queue_lookup_priority (head, task->priority); priority_list_upgrade_task (list, node); } else priority_list_upgrade_task (&head->l, node); } /* Given a CHILD_TASK in LIST that is about to be executed, move it out of the way in LIST so that other tasks can be considered for execution. LIST contains tasks of type TYPE. Care is taken to update the queue's LAST_PARENT_DEPENDS_ON field if applicable. */ static void inline priority_list_downgrade_task (enum priority_queue_type type, struct priority_list *list, struct gomp_task *child_task) { struct priority_node *node = task_to_priority_node (type, child_task); if (list->tasks == node) list->tasks = node->next; else if (node->next != list->tasks) { /* The task in NODE is about to become TIED and TIED tasks cannot come before WAITING tasks. If we're about to leave the queue in such an indeterminate state, rewire things appropriately. However, a TIED task at the end is perfectly fine. */ struct gomp_task *next_task = priority_node_to_task (type, node->next); if (next_task->kind == GOMP_TASK_WAITING) { /* Remove from list. */ node->prev->next = node->next; node->next->prev = node->prev; /* Rewire at the end. */ node->next = list->tasks; node->prev = list->tasks->prev; list->tasks->prev->next = node; list->tasks->prev = node; } } /* If the current task is the last_parent_depends_on for its priority, adjust last_parent_depends_on appropriately. */ if (__builtin_expect (child_task->parent_depends_on, 0) && list->last_parent_depends_on == node) { struct gomp_task *prev_child = priority_node_to_task (type, node->prev); if (node->prev != node && prev_child->kind == GOMP_TASK_WAITING && prev_child->parent_depends_on) list->last_parent_depends_on = node->prev; else { /* There are no more parent_depends_on entries waiting to run, clear the list. */ list->last_parent_depends_on = NULL; } } } /* Given a TASK in HEAD that is about to be executed, move it out of the way so that other tasks can be considered for execution. HEAD contains tasks of type TYPE. Care is taken to update the queue's LAST_PARENT_DEPENDS_ON field if applicable. (This function could be defined in priority_queue.c, but we want it inlined, and putting it in priority_queue.h is not an option, given that gomp_task has not been properly defined at that point). */ static void inline priority_queue_downgrade_task (enum priority_queue_type type, struct priority_queue *head, struct gomp_task *task) { #if _LIBGOMP_CHECKING_ if (!priority_queue_task_in_queue_p (type, head, task)) gomp_fatal ("Attempt to downgrade missing task %p", task); #endif if (priority_queue_multi_p (head)) { struct priority_list *list = priority_queue_lookup_priority (head, task->priority); priority_list_downgrade_task (type, list, task); } else priority_list_downgrade_task (type, &head->l, task); } /* Setup CHILD_TASK to execute. This is done by setting the task to TIED, and updating all relevant queues so that CHILD_TASK is no longer chosen for scheduling. Also, remove CHILD_TASK from the overall team task queue entirely. Return TRUE if task or its containing taskgroup has been cancelled. */ static inline bool gomp_task_run_pre (struct gomp_task *child_task, struct gomp_task *parent, struct gomp_team *team) { #if _LIBGOMP_CHECKING_ if (child_task->parent) priority_queue_verify (PQ_CHILDREN, &child_task->parent->children_queue, true); if (child_task->taskgroup) priority_queue_verify (PQ_TASKGROUP, &child_task->taskgroup->taskgroup_queue, false); priority_queue_verify (PQ_TEAM, &team->task_queue, false); #endif /* Task is about to go tied, move it out of the way. */ if (parent) priority_queue_downgrade_task (PQ_CHILDREN, &parent->children_queue, child_task); /* Task is about to go tied, move it out of the way. */ struct gomp_taskgroup *taskgroup = child_task->taskgroup; if (taskgroup) priority_queue_downgrade_task (PQ_TASKGROUP, &taskgroup->taskgroup_queue, child_task); priority_queue_remove (PQ_TEAM, &team->task_queue, child_task, MEMMODEL_RELAXED); child_task->pnode[PQ_TEAM].next = NULL; child_task->pnode[PQ_TEAM].prev = NULL; child_task->kind = GOMP_TASK_TIED; if (--team->task_queued_count == 0) gomp_team_barrier_clear_task_pending (&team->barrier); if ((gomp_team_barrier_cancelled (&team->barrier) || (taskgroup && taskgroup->cancelled)) && !child_task->copy_ctors_done) return true; return false; } static void gomp_task_run_post_handle_depend_hash (struct gomp_task *child_task) { struct gomp_task *parent = child_task->parent; size_t i; for (i = 0; i < child_task->depend_count; i++) if (!child_task->depend[i].redundant) { if (child_task->depend[i].next) child_task->depend[i].next->prev = child_task->depend[i].prev; if (child_task->depend[i].prev) child_task->depend[i].prev->next = child_task->depend[i].next; else { hash_entry_type *slot = htab_find_slot (&parent->depend_hash, &child_task->depend[i], NO_INSERT); if (*slot != &child_task->depend[i]) abort (); if (child_task->depend[i].next) *slot = child_task->depend[i].next; else htab_clear_slot (parent->depend_hash, slot); } } } /* After a CHILD_TASK has been run, adjust the dependency queue for each task that depends on CHILD_TASK, to record the fact that there is one less dependency to worry about. If a task that depended on CHILD_TASK now has no dependencies, place it in the various queues so it gets scheduled to run. TEAM is the team to which CHILD_TASK belongs to. */ static size_t gomp_task_run_post_handle_dependers (struct gomp_task *child_task, struct gomp_team *team) { struct gomp_task *parent = child_task->parent; size_t i, count = child_task->dependers->n_elem, ret = 0; for (i = 0; i < count; i++) { struct gomp_task *task = child_task->dependers->elem[i]; /* CHILD_TASK satisfies a dependency for TASK. Keep track of TASK's remaining dependencies. Once TASK has no other depenencies, put it into the various queues so it will get scheduled for execution. */ if (--task->num_dependees != 0) continue; struct gomp_taskgroup *taskgroup = task->taskgroup; if (parent) { priority_queue_insert (PQ_CHILDREN, &parent->children_queue, task, task->priority, PRIORITY_INSERT_BEGIN, /*adjust_parent_depends_on=*/true, task->parent_depends_on); if (parent->taskwait) { if (parent->taskwait->in_taskwait) { /* One more task has had its dependencies met. Inform any waiters. */ parent->taskwait->in_taskwait = false; gomp_sem_post (&parent->taskwait->taskwait_sem); } else if (parent->taskwait->in_depend_wait) { /* One more task has had its dependencies met. Inform any waiters. */ parent->taskwait->in_depend_wait = false; gomp_sem_post (&parent->taskwait->taskwait_sem); } } } if (taskgroup) { priority_queue_insert (PQ_TASKGROUP, &taskgroup->taskgroup_queue, task, task->priority, PRIORITY_INSERT_BEGIN, /*adjust_parent_depends_on=*/false, task->parent_depends_on); if (taskgroup->in_taskgroup_wait) { /* One more task has had its dependencies met. Inform any waiters. */ taskgroup->in_taskgroup_wait = false; gomp_sem_post (&taskgroup->taskgroup_sem); } } priority_queue_insert (PQ_TEAM, &team->task_queue, task, task->priority, PRIORITY_INSERT_END, /*adjust_parent_depends_on=*/false, task->parent_depends_on); ++team->task_count; ++team->task_queued_count; ++ret; } free (child_task->dependers); child_task->dependers = NULL; if (ret > 1) gomp_team_barrier_set_task_pending (&team->barrier); return ret; } static inline size_t gomp_task_run_post_handle_depend (struct gomp_task *child_task, struct gomp_team *team) { if (child_task->depend_count == 0) return 0; /* If parent is gone already, the hash table is freed and nothing will use the hash table anymore, no need to remove anything from it. */ if (child_task->parent != NULL) gomp_task_run_post_handle_depend_hash (child_task); if (child_task->dependers == NULL) return 0; return gomp_task_run_post_handle_dependers (child_task, team); } /* Remove CHILD_TASK from its parent. */ static inline void gomp_task_run_post_remove_parent (struct gomp_task *child_task) { struct gomp_task *parent = child_task->parent; if (parent == NULL) return; /* If this was the last task the parent was depending on, synchronize with gomp_task_maybe_wait_for_dependencies so it can clean up and return. */ if (__builtin_expect (child_task->parent_depends_on, 0) && --parent->taskwait->n_depend == 0 && parent->taskwait->in_depend_wait) { parent->taskwait->in_depend_wait = false; gomp_sem_post (&parent->taskwait->taskwait_sem); } if (priority_queue_remove (PQ_CHILDREN, &parent->children_queue, child_task, MEMMODEL_RELEASE) && parent->taskwait && parent->taskwait->in_taskwait) { parent->taskwait->in_taskwait = false; gomp_sem_post (&parent->taskwait->taskwait_sem); } child_task->pnode[PQ_CHILDREN].next = NULL; child_task->pnode[PQ_CHILDREN].prev = NULL; } /* Remove CHILD_TASK from its taskgroup. */ static inline void gomp_task_run_post_remove_taskgroup (struct gomp_task *child_task) { struct gomp_taskgroup *taskgroup = child_task->taskgroup; if (taskgroup == NULL) return; bool empty = priority_queue_remove (PQ_TASKGROUP, &taskgroup->taskgroup_queue, child_task, MEMMODEL_RELAXED); child_task->pnode[PQ_TASKGROUP].next = NULL; child_task->pnode[PQ_TASKGROUP].prev = NULL; if (taskgroup->num_children > 1) --taskgroup->num_children; else { /* We access taskgroup->num_children in GOMP_taskgroup_end outside of the task lock mutex region, so need a release barrier here to ensure memory written by child_task->fn above is flushed before the NULL is written. */ __atomic_store_n (&taskgroup->num_children, 0, MEMMODEL_RELEASE); } if (empty && taskgroup->in_taskgroup_wait) { taskgroup->in_taskgroup_wait = false; gomp_sem_post (&taskgroup->taskgroup_sem); } } void gomp_barrier_handle_tasks (gomp_barrier_state_t state) { struct gomp_thread *thr = gomp_thread (); struct gomp_team *team = thr->ts.team; struct gomp_task *task = thr->task; struct gomp_task *child_task = NULL; struct gomp_task *to_free = NULL; int do_wake = 0; gomp_mutex_lock (&team->task_lock); if (gomp_barrier_last_thread (state)) { if (team->task_count == 0) { gomp_team_barrier_done (&team->barrier, state); gomp_mutex_unlock (&team->task_lock); gomp_team_barrier_wake (&team->barrier, 0); return; } gomp_team_barrier_set_waiting_for_tasks (&team->barrier); } while (1) { bool cancelled = false; if (!priority_queue_empty_p (&team->task_queue, MEMMODEL_RELAXED)) { bool ignored; child_task = priority_queue_next_task (PQ_TEAM, &team->task_queue, PQ_IGNORED, NULL, &ignored); cancelled = gomp_task_run_pre (child_task, child_task->parent, team); if (__builtin_expect (cancelled, 0)) { if (to_free) { gomp_finish_task (to_free); free (to_free); to_free = NULL; } goto finish_cancelled; } team->task_running_count++; child_task->in_tied_task = true; } gomp_mutex_unlock (&team->task_lock); if (do_wake) { gomp_team_barrier_wake (&team->barrier, do_wake); do_wake = 0; } if (to_free) { gomp_finish_task (to_free); free (to_free); to_free = NULL; } if (child_task) { thr->task = child_task; if (__builtin_expect (child_task->fn == NULL, 0)) { if (gomp_target_task_fn (child_task->fn_data)) { thr->task = task; gomp_mutex_lock (&team->task_lock); child_task->kind = GOMP_TASK_ASYNC_RUNNING; team->task_running_count--; struct gomp_target_task *ttask = (struct gomp_target_task *) child_task->fn_data; /* If GOMP_PLUGIN_target_task_completion has run already in between gomp_target_task_fn and the mutex lock, perform the requeuing here. */ if (ttask->state == GOMP_TARGET_TASK_FINISHED) gomp_target_task_completion (team, child_task); else ttask->state = GOMP_TARGET_TASK_RUNNING; child_task = NULL; continue; } } else child_task->fn (child_task->fn_data); thr->task = task; } else return; gomp_mutex_lock (&team->task_lock); if (child_task) { finish_cancelled:; size_t new_tasks = gomp_task_run_post_handle_depend (child_task, team); gomp_task_run_post_remove_parent (child_task); gomp_clear_parent (&child_task->children_queue); gomp_task_run_post_remove_taskgroup (child_task); to_free = child_task; child_task = NULL; if (!cancelled) team->task_running_count--; if (new_tasks > 1) { do_wake = team->nthreads - team->task_running_count; if (do_wake > new_tasks) do_wake = new_tasks; } if (--team->task_count == 0 && gomp_team_barrier_waiting_for_tasks (&team->barrier)) { gomp_team_barrier_done (&team->barrier, state); gomp_mutex_unlock (&team->task_lock); gomp_team_barrier_wake (&team->barrier, 0); gomp_mutex_lock (&team->task_lock); } } } } /* Called when encountering a taskwait directive. Wait for all children of the current task. */ void GOMP_taskwait (void) { struct gomp_thread *thr = gomp_thread (); struct gomp_team *team = thr->ts.team; struct gomp_task *task = thr->task; struct gomp_task *child_task = NULL; struct gomp_task *to_free = NULL; struct gomp_taskwait taskwait; int do_wake = 0; /* The acquire barrier on load of task->children here synchronizes with the write of a NULL in gomp_task_run_post_remove_parent. It is not necessary that we synchronize with other non-NULL writes at this point, but we must ensure that all writes to memory by a child thread task work function are seen before we exit from GOMP_taskwait. */ if (task == NULL || priority_queue_empty_p (&task->children_queue, MEMMODEL_ACQUIRE)) return; memset (&taskwait, 0, sizeof (taskwait)); bool child_q = false; gomp_mutex_lock (&team->task_lock); while (1) { bool cancelled = false; if (priority_queue_empty_p (&task->children_queue, MEMMODEL_RELAXED)) { bool destroy_taskwait = task->taskwait != NULL; task->taskwait = NULL; gomp_mutex_unlock (&team->task_lock); if (to_free) { gomp_finish_task (to_free); free (to_free); } if (destroy_taskwait) gomp_sem_destroy (&taskwait.taskwait_sem); return; } struct gomp_task *next_task = priority_queue_next_task (PQ_CHILDREN, &task->children_queue, PQ_TEAM, &team->task_queue, &child_q); if (next_task->kind == GOMP_TASK_WAITING) { child_task = next_task; cancelled = gomp_task_run_pre (child_task, task, team); if (__builtin_expect (cancelled, 0)) { if (to_free) { gomp_finish_task (to_free); free (to_free); to_free = NULL; } goto finish_cancelled; } } else { /* All tasks we are waiting for are either running in other threads, or they are tasks that have not had their dependencies met (so they're not even in the queue). Wait for them. */ if (task->taskwait == NULL) { taskwait.in_depend_wait = false; gomp_sem_init (&taskwait.taskwait_sem, 0); task->taskwait = &taskwait; } taskwait.in_taskwait = true; } gomp_mutex_unlock (&team->task_lock); if (do_wake) { gomp_team_barrier_wake (&team->barrier, do_wake); do_wake = 0; } if (to_free) { gomp_finish_task (to_free); free (to_free); to_free = NULL; } if (child_task) { thr->task = child_task; if (__builtin_expect (child_task->fn == NULL, 0)) { if (gomp_target_task_fn (child_task->fn_data)) { thr->task = task; gomp_mutex_lock (&team->task_lock); child_task->kind = GOMP_TASK_ASYNC_RUNNING; struct gomp_target_task *ttask = (struct gomp_target_task *) child_task->fn_data; /* If GOMP_PLUGIN_target_task_completion has run already in between gomp_target_task_fn and the mutex lock, perform the requeuing here. */ if (ttask->state == GOMP_TARGET_TASK_FINISHED) gomp_target_task_completion (team, child_task); else ttask->state = GOMP_TARGET_TASK_RUNNING; child_task = NULL; continue; } } else child_task->fn (child_task->fn_data); thr->task = task; } else gomp_sem_wait (&taskwait.taskwait_sem); gomp_mutex_lock (&team->task_lock); if (child_task) { finish_cancelled:; size_t new_tasks = gomp_task_run_post_handle_depend (child_task, team); if (child_q) { priority_queue_remove (PQ_CHILDREN, &task->children_queue, child_task, MEMMODEL_RELAXED); child_task->pnode[PQ_CHILDREN].next = NULL; child_task->pnode[PQ_CHILDREN].prev = NULL; } gomp_clear_parent (&child_task->children_queue); gomp_task_run_post_remove_taskgroup (child_task); to_free = child_task; child_task = NULL; team->task_count--; if (new_tasks > 1) { do_wake = team->nthreads - team->task_running_count - !task->in_tied_task; if (do_wake > new_tasks) do_wake = new_tasks; } } } } /* An undeferred task is about to run. Wait for all tasks that this undeferred task depends on. This is done by first putting all known ready dependencies (dependencies that have their own dependencies met) at the top of the scheduling queues. Then we iterate through these imminently ready tasks (and possibly other high priority tasks), and run them. If we run out of ready dependencies to execute, we either wait for the reamining dependencies to finish, or wait for them to get scheduled so we can run them. DEPEND is as in GOMP_task. */ void gomp_task_maybe_wait_for_dependencies (void **depend) { struct gomp_thread *thr = gomp_thread (); struct gomp_task *task = thr->task; struct gomp_team *team = thr->ts.team; struct gomp_task_depend_entry elem, *ent = NULL; struct gomp_taskwait taskwait; size_t ndepend = (uintptr_t) depend[0]; size_t nout = (uintptr_t) depend[1]; size_t i; size_t num_awaited = 0; struct gomp_task *child_task = NULL; struct gomp_task *to_free = NULL; int do_wake = 0; gomp_mutex_lock (&team->task_lock); for (i = 0; i < ndepend; i++) { elem.addr = depend[i + 2]; ent = htab_find (task->depend_hash, &elem); for (; ent; ent = ent->next) if (i >= nout && ent->is_in) continue; else { struct gomp_task *tsk = ent->task; if (!tsk->parent_depends_on) { tsk->parent_depends_on = true; ++num_awaited; /* If depenency TSK itself has no dependencies and is ready to run, move it up front so that we run it as soon as possible. */ if (tsk->num_dependees == 0 && tsk->kind == GOMP_TASK_WAITING) priority_queue_upgrade_task (tsk, task); } } } if (num_awaited == 0) { gomp_mutex_unlock (&team->task_lock); return; } memset (&taskwait, 0, sizeof (taskwait)); taskwait.n_depend = num_awaited; gomp_sem_init (&taskwait.taskwait_sem, 0); task->taskwait = &taskwait; while (1) { bool cancelled = false; if (taskwait.n_depend == 0) { task->taskwait = NULL; gomp_mutex_unlock (&team->task_lock); if (to_free) { gomp_finish_task (to_free); free (to_free); } gomp_sem_destroy (&taskwait.taskwait_sem); return; } /* Theoretically when we have multiple priorities, we should chose between the highest priority item in task->children_queue and team->task_queue here, so we should use priority_queue_next_task(). However, since we are running an undeferred task, perhaps that makes all tasks it depends on undeferred, thus a priority of INF? This would make it unnecessary to take anything into account here, but the dependencies. On the other hand, if we want to use priority_queue_next_task(), care should be taken to only use priority_queue_remove() below if the task was actually removed from the children queue. */ bool ignored; struct gomp_task *next_task = priority_queue_next_task (PQ_CHILDREN, &task->children_queue, PQ_IGNORED, NULL, &ignored); if (next_task->kind == GOMP_TASK_WAITING) { child_task = next_task; cancelled = gomp_task_run_pre (child_task, task, team); if (__builtin_expect (cancelled, 0)) { if (to_free) { gomp_finish_task (to_free); free (to_free); to_free = NULL; } goto finish_cancelled; } } else /* All tasks we are waiting for are either running in other threads, or they are tasks that have not had their dependencies met (so they're not even in the queue). Wait for them. */ taskwait.in_depend_wait = true; gomp_mutex_unlock (&team->task_lock); if (do_wake) { gomp_team_barrier_wake (&team->barrier, do_wake); do_wake = 0; } if (to_free) { gomp_finish_task (to_free); free (to_free); to_free = NULL; } if (child_task) { thr->task = child_task; if (__builtin_expect (child_task->fn == NULL, 0)) { if (gomp_target_task_fn (child_task->fn_data)) { thr->task = task; gomp_mutex_lock (&team->task_lock); child_task->kind = GOMP_TASK_ASYNC_RUNNING; struct gomp_target_task *ttask = (struct gomp_target_task *) child_task->fn_data; /* If GOMP_PLUGIN_target_task_completion has run already in between gomp_target_task_fn and the mutex lock, perform the requeuing here. */ if (ttask->state == GOMP_TARGET_TASK_FINISHED) gomp_target_task_completion (team, child_task); else ttask->state = GOMP_TARGET_TASK_RUNNING; child_task = NULL; continue; } } else child_task->fn (child_task->fn_data); thr->task = task; } else gomp_sem_wait (&taskwait.taskwait_sem); gomp_mutex_lock (&team->task_lock); if (child_task) { finish_cancelled:; size_t new_tasks = gomp_task_run_post_handle_depend (child_task, team); if (child_task->parent_depends_on) --taskwait.n_depend; priority_queue_remove (PQ_CHILDREN, &task->children_queue, child_task, MEMMODEL_RELAXED); child_task->pnode[PQ_CHILDREN].next = NULL; child_task->pnode[PQ_CHILDREN].prev = NULL; gomp_clear_parent (&child_task->children_queue); gomp_task_run_post_remove_taskgroup (child_task); to_free = child_task; child_task = NULL; team->task_count--; if (new_tasks > 1) { do_wake = team->nthreads - team->task_running_count - !task->in_tied_task; if (do_wake > new_tasks) do_wake = new_tasks; } } } } /* Called when encountering a taskyield directive. */ void GOMP_taskyield (void) { /* Nothing at the moment. */ } void GOMP_taskgroup_start (void) { struct gomp_thread *thr = gomp_thread (); struct gomp_team *team = thr->ts.team; struct gomp_task *task = thr->task; struct gomp_taskgroup *taskgroup; /* If team is NULL, all tasks are executed as GOMP_TASK_UNDEFERRED tasks and thus all children tasks of taskgroup and their descendant tasks will be finished by the time GOMP_taskgroup_end is called. */ if (team == NULL) return; taskgroup = gomp_malloc (sizeof (struct gomp_taskgroup)); taskgroup->prev = task->taskgroup; priority_queue_init (&taskgroup->taskgroup_queue); taskgroup->in_taskgroup_wait = false; taskgroup->cancelled = false; taskgroup->num_children = 0; gomp_sem_init (&taskgroup->taskgroup_sem, 0); task->taskgroup = taskgroup; } void GOMP_taskgroup_end (void) { struct gomp_thread *thr = gomp_thread (); struct gomp_team *team = thr->ts.team; struct gomp_task *task = thr->task; struct gomp_taskgroup *taskgroup; struct gomp_task *child_task = NULL; struct gomp_task *to_free = NULL; int do_wake = 0; if (team == NULL) return; taskgroup = task->taskgroup; if (__builtin_expect (taskgroup == NULL, 0) && thr->ts.level == 0) { /* This can happen if GOMP_taskgroup_start is called when thr->ts.team == NULL, but inside of the taskgroup there is #pragma omp target nowait that creates an implicit team with a single thread. In this case, we want to wait for all outstanding tasks in this team. */ gomp_team_barrier_wait (&team->barrier); return; } /* The acquire barrier on load of taskgroup->num_children here synchronizes with the write of 0 in gomp_task_run_post_remove_taskgroup. It is not necessary that we synchronize with other non-0 writes at this point, but we must ensure that all writes to memory by a child thread task work function are seen before we exit from GOMP_taskgroup_end. */ if (__atomic_load_n (&taskgroup->num_children, MEMMODEL_ACQUIRE) == 0) goto finish; bool unused; gomp_mutex_lock (&team->task_lock); while (1) { bool cancelled = false; if (priority_queue_empty_p (&taskgroup->taskgroup_queue, MEMMODEL_RELAXED)) { if (taskgroup->num_children) { if (priority_queue_empty_p (&task->children_queue, MEMMODEL_RELAXED)) goto do_wait; child_task = priority_queue_next_task (PQ_CHILDREN, &task->children_queue, PQ_TEAM, &team->task_queue, &unused); } else { gomp_mutex_unlock (&team->task_lock); if (to_free) { gomp_finish_task (to_free); free (to_free); } goto finish; } } else child_task = priority_queue_next_task (PQ_TASKGROUP, &taskgroup->taskgroup_queue, PQ_TEAM, &team->task_queue, &unused); if (child_task->kind == GOMP_TASK_WAITING) { cancelled = gomp_task_run_pre (child_task, child_task->parent, team); if (__builtin_expect (cancelled, 0)) { if (to_free) { gomp_finish_task (to_free); free (to_free); to_free = NULL; } goto finish_cancelled; } } else { child_task = NULL; do_wait: /* All tasks we are waiting for are either running in other threads, or they are tasks that have not had their dependencies met (so they're not even in the queue). Wait for them. */ taskgroup->in_taskgroup_wait = true; } gomp_mutex_unlock (&team->task_lock); if (do_wake) { gomp_team_barrier_wake (&team->barrier, do_wake); do_wake = 0; } if (to_free) { gomp_finish_task (to_free); free (to_free); to_free = NULL; } if (child_task) { thr->task = child_task; if (__builtin_expect (child_task->fn == NULL, 0)) { if (gomp_target_task_fn (child_task->fn_data)) { thr->task = task; gomp_mutex_lock (&team->task_lock); child_task->kind = GOMP_TASK_ASYNC_RUNNING; struct gomp_target_task *ttask = (struct gomp_target_task *) child_task->fn_data; /* If GOMP_PLUGIN_target_task_completion has run already in between gomp_target_task_fn and the mutex lock, perform the requeuing here. */ if (ttask->state == GOMP_TARGET_TASK_FINISHED) gomp_target_task_completion (team, child_task); else ttask->state = GOMP_TARGET_TASK_RUNNING; child_task = NULL; continue; } } else child_task->fn (child_task->fn_data); thr->task = task; } else gomp_sem_wait (&taskgroup->taskgroup_sem); gomp_mutex_lock (&team->task_lock); if (child_task) { finish_cancelled:; size_t new_tasks = gomp_task_run_post_handle_depend (child_task, team); gomp_task_run_post_remove_parent (child_task); gomp_clear_parent (&child_task->children_queue); gomp_task_run_post_remove_taskgroup (child_task); to_free = child_task; child_task = NULL; team->task_count--; if (new_tasks > 1) { do_wake = team->nthreads - team->task_running_count - !task->in_tied_task; if (do_wake > new_tasks) do_wake = new_tasks; } } } finish: task->taskgroup = taskgroup->prev; gomp_sem_destroy (&taskgroup->taskgroup_sem); free (taskgroup); } int omp_in_final (void) { struct gomp_thread *thr = gomp_thread (); return thr->task && thr->task->final_task; } ialias (omp_in_final)
irbuilder_for_unsigned_static_chunked.c
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --function-signature --include-generated-funcs // RUN: %clang_cc1 -no-opaque-pointers -fopenmp-enable-irbuilder -verify -fopenmp -fopenmp-version=45 -x c++ -triple x86_64-unknown-unknown -emit-llvm %s -o - | FileCheck %s // expected-no-diagnostics #ifndef HEADER #define HEADER // CHECK-LABEL: define {{.*}}@workshareloop_unsigned_static_chunked( // CHECK-NEXT: [[ENTRY:.*]]: // CHECK-NEXT: %[[A_ADDR:.+]] = alloca float*, align 8 // CHECK-NEXT: %[[B_ADDR:.+]] = alloca float*, align 8 // CHECK-NEXT: %[[C_ADDR:.+]] = alloca float*, align 8 // CHECK-NEXT: %[[D_ADDR:.+]] = alloca float*, align 8 // CHECK-NEXT: %[[I:.+]] = alloca i32, align 4 // CHECK-NEXT: %[[AGG_CAPTURED:.+]] = alloca %struct.anon, align 8 // CHECK-NEXT: %[[AGG_CAPTURED1:.+]] = alloca %struct.anon.0, align 4 // CHECK-NEXT: %[[DOTCOUNT_ADDR:.+]] = alloca i32, align 4 // CHECK-NEXT: %[[P_LASTITER:.+]] = alloca i32, align 4 // CHECK-NEXT: %[[P_LOWERBOUND:.+]] = alloca i32, align 4 // CHECK-NEXT: %[[P_UPPERBOUND:.+]] = alloca i32, align 4 // CHECK-NEXT: %[[P_STRIDE:.+]] = alloca i32, align 4 // CHECK-NEXT: store float* %[[A:.+]], float** %[[A_ADDR]], align 8 // CHECK-NEXT: store float* %[[B:.+]], float** %[[B_ADDR]], align 8 // CHECK-NEXT: store float* %[[C:.+]], float** %[[C_ADDR]], align 8 // CHECK-NEXT: store float* %[[D:.+]], float** %[[D_ADDR]], align 8 // CHECK-NEXT: store i32 33, i32* %[[I]], align 4 // CHECK-NEXT: %[[TMP0:.+]] = getelementptr inbounds %struct.anon, %struct.anon* %[[AGG_CAPTURED]], i32 0, i32 0 // CHECK-NEXT: store i32* %[[I]], i32** %[[TMP0]], align 8 // CHECK-NEXT: %[[TMP1:.+]] = getelementptr inbounds %struct.anon.0, %struct.anon.0* %[[AGG_CAPTURED1]], i32 0, i32 0 // CHECK-NEXT: %[[TMP2:.+]] = load i32, i32* %[[I]], align 4 // CHECK-NEXT: store i32 %[[TMP2]], i32* %[[TMP1]], align 4 // CHECK-NEXT: call void @__captured_stmt(i32* %[[DOTCOUNT_ADDR]], %struct.anon* %[[AGG_CAPTURED]]) // CHECK-NEXT: %[[DOTCOUNT:.+]] = load i32, i32* %[[DOTCOUNT_ADDR]], align 4 // CHECK-NEXT: br label %[[OMP_LOOP_PREHEADER:.+]] // CHECK-EMPTY: // CHECK-NEXT: [[OMP_LOOP_PREHEADER]]: // CHECK-NEXT: store i32 0, i32* %[[P_LOWERBOUND]], align 4 // CHECK-NEXT: %[[TMP3:.+]] = sub i32 %[[DOTCOUNT]], 1 // CHECK-NEXT: store i32 %[[TMP3]], i32* %[[P_UPPERBOUND]], align 4 // CHECK-NEXT: store i32 1, i32* %[[P_STRIDE]], align 4 // CHECK-NEXT: %[[OMP_GLOBAL_THREAD_NUM:.+]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @1) // CHECK-NEXT: call void @__kmpc_for_static_init_4u(%struct.ident_t* @1, i32 %[[OMP_GLOBAL_THREAD_NUM]], i32 33, i32* %[[P_LASTITER]], i32* %[[P_LOWERBOUND]], i32* %[[P_UPPERBOUND]], i32* %[[P_STRIDE]], i32 1, i32 5) // CHECK-NEXT: %[[OMP_FIRSTCHUNK_LB:.+]] = load i32, i32* %[[P_LOWERBOUND]], align 4 // CHECK-NEXT: %[[OMP_FIRSTCHUNK_UB:.+]] = load i32, i32* %[[P_UPPERBOUND]], align 4 // CHECK-NEXT: %[[TMP4:.+]] = add i32 %[[OMP_FIRSTCHUNK_UB]], 1 // CHECK-NEXT: %[[OMP_CHUNK_RANGE:.+]] = sub i32 %[[TMP4]], %[[OMP_FIRSTCHUNK_LB]] // CHECK-NEXT: %[[OMP_DISPATCH_STRIDE:.+]] = load i32, i32* %[[P_STRIDE]], align 4 // CHECK-NEXT: %[[TMP5:.+]] = sub nuw i32 %[[DOTCOUNT]], %[[OMP_FIRSTCHUNK_LB]] // CHECK-NEXT: %[[TMP6:.+]] = icmp ule i32 %[[DOTCOUNT]], %[[OMP_FIRSTCHUNK_LB]] // CHECK-NEXT: %[[TMP7:.+]] = sub i32 %[[TMP5]], 1 // CHECK-NEXT: %[[TMP8:.+]] = udiv i32 %[[TMP7]], %[[OMP_DISPATCH_STRIDE]] // CHECK-NEXT: %[[TMP9:.+]] = add i32 %[[TMP8]], 1 // CHECK-NEXT: %[[TMP10:.+]] = icmp ule i32 %[[TMP5]], %[[OMP_DISPATCH_STRIDE]] // CHECK-NEXT: %[[TMP11:.+]] = select i1 %[[TMP10]], i32 1, i32 %[[TMP9]] // CHECK-NEXT: %[[OMP_DISPATCH_TRIPCOUNT:.+]] = select i1 %[[TMP6]], i32 0, i32 %[[TMP11]] // CHECK-NEXT: br label %[[OMP_DISPATCH_PREHEADER:.+]] // CHECK-EMPTY: // CHECK-NEXT: [[OMP_DISPATCH_PREHEADER]]: // CHECK-NEXT: br label %[[OMP_DISPATCH_HEADER:.+]] // CHECK-EMPTY: // CHECK-NEXT: [[OMP_DISPATCH_HEADER]]: // CHECK-NEXT: %[[OMP_DISPATCH_IV:.+]] = phi i32 [ 0, %[[OMP_DISPATCH_PREHEADER]] ], [ %[[OMP_DISPATCH_NEXT:.+]], %[[OMP_DISPATCH_INC:.+]] ] // CHECK-NEXT: br label %[[OMP_DISPATCH_COND:.+]] // CHECK-EMPTY: // CHECK-NEXT: [[OMP_DISPATCH_COND]]: // CHECK-NEXT: %[[OMP_DISPATCH_CMP:.+]] = icmp ult i32 %[[OMP_DISPATCH_IV]], %[[OMP_DISPATCH_TRIPCOUNT]] // CHECK-NEXT: br i1 %[[OMP_DISPATCH_CMP]], label %[[OMP_DISPATCH_BODY:.+]], label %[[OMP_DISPATCH_EXIT:.+]] // CHECK-EMPTY: // CHECK-NEXT: [[OMP_DISPATCH_BODY]]: // CHECK-NEXT: %[[TMP12:.+]] = mul i32 %[[OMP_DISPATCH_IV]], %[[OMP_DISPATCH_STRIDE]] // CHECK-NEXT: %[[TMP13:.+]] = add i32 %[[TMP12]], %[[OMP_FIRSTCHUNK_LB]] // CHECK-NEXT: br label %[[OMP_LOOP_PREHEADER9:.+]] // CHECK-EMPTY: // CHECK-NEXT: [[OMP_DISPATCH_INC]]: // CHECK-NEXT: %[[OMP_DISPATCH_NEXT]] = add nuw i32 %[[OMP_DISPATCH_IV]], 1 // CHECK-NEXT: br label %[[OMP_DISPATCH_HEADER]] // CHECK-EMPTY: // CHECK-NEXT: [[OMP_DISPATCH_EXIT]]: // CHECK-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @1, i32 %[[OMP_GLOBAL_THREAD_NUM]]) // CHECK-NEXT: %[[OMP_GLOBAL_THREAD_NUM10:.+]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @1) // CHECK-NEXT: call void @__kmpc_barrier(%struct.ident_t* @2, i32 %[[OMP_GLOBAL_THREAD_NUM10]]) // CHECK-NEXT: br label %[[OMP_DISPATCH_AFTER:.+]] // CHECK-EMPTY: // CHECK-NEXT: [[OMP_DISPATCH_AFTER]]: // CHECK-NEXT: br label %[[OMP_LOOP_AFTER:.+]] // CHECK-EMPTY: // CHECK-NEXT: [[OMP_LOOP_PREHEADER9]]: // CHECK-NEXT: %[[TMP14:.+]] = add i32 %[[TMP13]], %[[OMP_CHUNK_RANGE]] // CHECK-NEXT: %[[OMP_CHUNK_IS_LAST:.+]] = icmp uge i32 %[[TMP14]], %[[DOTCOUNT]] // CHECK-NEXT: %[[TMP15:.+]] = sub i32 %[[DOTCOUNT]], %[[TMP13]] // CHECK-NEXT: %[[OMP_CHUNK_TRIPCOUNT:.+]] = select i1 %[[OMP_CHUNK_IS_LAST]], i32 %[[TMP15]], i32 %[[OMP_CHUNK_RANGE]] // CHECK-NEXT: br label %[[OMP_LOOP_HEADER:.+]] // CHECK-EMPTY: // CHECK-NEXT: [[OMP_LOOP_HEADER]]: // CHECK-NEXT: %[[OMP_LOOP_IV:.+]] = phi i32 [ 0, %[[OMP_LOOP_PREHEADER9]] ], [ %[[OMP_LOOP_NEXT:.+]], %[[OMP_LOOP_INC:.+]] ] // CHECK-NEXT: br label %[[OMP_LOOP_COND:.+]] // CHECK-EMPTY: // CHECK-NEXT: [[OMP_LOOP_COND]]: // CHECK-NEXT: %[[OMP_LOOP_CMP:.+]] = icmp ult i32 %[[OMP_LOOP_IV]], %[[OMP_CHUNK_TRIPCOUNT]] // CHECK-NEXT: br i1 %[[OMP_LOOP_CMP]], label %[[OMP_LOOP_BODY:.+]], label %[[OMP_LOOP_EXIT:.+]] // CHECK-EMPTY: // CHECK-NEXT: [[OMP_LOOP_BODY]]: // CHECK-NEXT: %[[TMP16:.+]] = add i32 %[[OMP_LOOP_IV]], %[[TMP13]] // CHECK-NEXT: call void @__captured_stmt.1(i32* %[[I]], i32 %[[TMP16]], %struct.anon.0* %[[AGG_CAPTURED1]]) // CHECK-NEXT: %[[TMP17:.+]] = load float*, float** %[[B_ADDR]], align 8 // CHECK-NEXT: %[[TMP18:.+]] = load i32, i32* %[[I]], align 4 // CHECK-NEXT: %[[IDXPROM:.+]] = zext i32 %[[TMP18]] to i64 // CHECK-NEXT: %[[ARRAYIDX:.+]] = getelementptr inbounds float, float* %[[TMP17]], i64 %[[IDXPROM]] // CHECK-NEXT: %[[TMP19:.+]] = load float, float* %[[ARRAYIDX]], align 4 // CHECK-NEXT: %[[TMP20:.+]] = load float*, float** %[[C_ADDR]], align 8 // CHECK-NEXT: %[[TMP21:.+]] = load i32, i32* %[[I]], align 4 // CHECK-NEXT: %[[IDXPROM2:.+]] = zext i32 %[[TMP21]] to i64 // CHECK-NEXT: %[[ARRAYIDX3:.+]] = getelementptr inbounds float, float* %[[TMP20]], i64 %[[IDXPROM2]] // CHECK-NEXT: %[[TMP22:.+]] = load float, float* %[[ARRAYIDX3]], align 4 // CHECK-NEXT: %[[MUL:.+]] = fmul float %[[TMP19]], %[[TMP22]] // CHECK-NEXT: %[[TMP23:.+]] = load float*, float** %[[D_ADDR]], align 8 // CHECK-NEXT: %[[TMP24:.+]] = load i32, i32* %[[I]], align 4 // CHECK-NEXT: %[[IDXPROM4:.+]] = zext i32 %[[TMP24]] to i64 // CHECK-NEXT: %[[ARRAYIDX5:.+]] = getelementptr inbounds float, float* %[[TMP23]], i64 %[[IDXPROM4]] // CHECK-NEXT: %[[TMP25:.+]] = load float, float* %[[ARRAYIDX5]], align 4 // CHECK-NEXT: %[[MUL6:.+]] = fmul float %[[MUL]], %[[TMP25]] // CHECK-NEXT: %[[TMP26:.+]] = load float*, float** %[[A_ADDR]], align 8 // CHECK-NEXT: %[[TMP27:.+]] = load i32, i32* %[[I]], align 4 // CHECK-NEXT: %[[IDXPROM7:.+]] = zext i32 %[[TMP27]] to i64 // CHECK-NEXT: %[[ARRAYIDX8:.+]] = getelementptr inbounds float, float* %[[TMP26]], i64 %[[IDXPROM7]] // CHECK-NEXT: store float %[[MUL6]], float* %[[ARRAYIDX8]], align 4 // CHECK-NEXT: br label %[[OMP_LOOP_INC]] // CHECK-EMPTY: // CHECK-NEXT: [[OMP_LOOP_INC]]: // CHECK-NEXT: %[[OMP_LOOP_NEXT]] = add nuw i32 %[[OMP_LOOP_IV]], 1 // CHECK-NEXT: br label %[[OMP_LOOP_HEADER]] // CHECK-EMPTY: // CHECK-NEXT: [[OMP_LOOP_EXIT]]: // CHECK-NEXT: br label %[[OMP_DISPATCH_INC]] // CHECK-EMPTY: // CHECK-NEXT: [[OMP_LOOP_AFTER]]: // CHECK-NEXT: ret void // CHECK-NEXT: } extern "C" void workshareloop_unsigned_static_chunked(float *a, float *b, float *c, float *d) { #pragma omp for schedule(static, 5) for (unsigned i = 33; i < 32000000; i += 7) { a[i] = b[i] * c[i] * d[i]; } } #endif // HEADER // CHECK-LABEL: define {{.*}}@__captured_stmt( // CHECK-NEXT: [[ENTRY:.*]]: // CHECK-NEXT: %[[DISTANCE_ADDR:.+]] = alloca i32*, align 8 // CHECK-NEXT: %[[__CONTEXT_ADDR:.+]] = alloca %struct.anon*, align 8 // CHECK-NEXT: %[[DOTSTART:.+]] = alloca i32, align 4 // CHECK-NEXT: %[[DOTSTOP:.+]] = alloca i32, align 4 // CHECK-NEXT: %[[DOTSTEP:.+]] = alloca i32, align 4 // CHECK-NEXT: store i32* %[[DISTANCE:.+]], i32** %[[DISTANCE_ADDR]], align 8 // CHECK-NEXT: store %struct.anon* %[[__CONTEXT:.+]], %struct.anon** %[[__CONTEXT_ADDR]], align 8 // CHECK-NEXT: %[[TMP0:.+]] = load %struct.anon*, %struct.anon** %[[__CONTEXT_ADDR]], align 8 // CHECK-NEXT: %[[TMP1:.+]] = getelementptr inbounds %struct.anon, %struct.anon* %[[TMP0]], i32 0, i32 0 // CHECK-NEXT: %[[TMP2:.+]] = load i32*, i32** %[[TMP1]], align 8 // CHECK-NEXT: %[[TMP3:.+]] = load i32, i32* %[[TMP2]], align 4 // CHECK-NEXT: store i32 %[[TMP3]], i32* %[[DOTSTART]], align 4 // CHECK-NEXT: store i32 32000000, i32* %[[DOTSTOP]], align 4 // CHECK-NEXT: store i32 7, i32* %[[DOTSTEP]], align 4 // CHECK-NEXT: %[[TMP4:.+]] = load i32, i32* %[[DOTSTART]], align 4 // CHECK-NEXT: %[[TMP5:.+]] = load i32, i32* %[[DOTSTOP]], align 4 // CHECK-NEXT: %[[CMP:.+]] = icmp ult i32 %[[TMP4]], %[[TMP5]] // CHECK-NEXT: br i1 %[[CMP]], label %[[COND_TRUE:.+]], label %[[COND_FALSE:.+]] // CHECK-EMPTY: // CHECK-NEXT: [[COND_TRUE]]: // CHECK-NEXT: %[[TMP6:.+]] = load i32, i32* %[[DOTSTOP]], align 4 // CHECK-NEXT: %[[TMP7:.+]] = load i32, i32* %[[DOTSTART]], align 4 // CHECK-NEXT: %[[SUB:.+]] = sub i32 %[[TMP6]], %[[TMP7]] // CHECK-NEXT: %[[TMP8:.+]] = load i32, i32* %[[DOTSTEP]], align 4 // CHECK-NEXT: %[[SUB1:.+]] = sub i32 %[[TMP8]], 1 // CHECK-NEXT: %[[ADD:.+]] = add i32 %[[SUB]], %[[SUB1]] // CHECK-NEXT: %[[TMP9:.+]] = load i32, i32* %[[DOTSTEP]], align 4 // CHECK-NEXT: %[[DIV:.+]] = udiv i32 %[[ADD]], %[[TMP9]] // CHECK-NEXT: br label %[[COND_END:.+]] // CHECK-EMPTY: // CHECK-NEXT: [[COND_FALSE]]: // CHECK-NEXT: br label %[[COND_END]] // CHECK-EMPTY: // CHECK-NEXT: [[COND_END]]: // CHECK-NEXT: %[[COND:.+]] = phi i32 [ %[[DIV]], %[[COND_TRUE]] ], [ 0, %[[COND_FALSE]] ] // CHECK-NEXT: %[[TMP10:.+]] = load i32*, i32** %[[DISTANCE_ADDR]], align 8 // CHECK-NEXT: store i32 %[[COND]], i32* %[[TMP10]], align 4 // CHECK-NEXT: ret void // CHECK-NEXT: } // CHECK-LABEL: define {{.*}}@__captured_stmt.1( // CHECK-NEXT: [[ENTRY:.*]]: // CHECK-NEXT: %[[LOOPVAR_ADDR:.+]] = alloca i32*, align 8 // CHECK-NEXT: %[[LOGICAL_ADDR:.+]] = alloca i32, align 4 // CHECK-NEXT: %[[__CONTEXT_ADDR:.+]] = alloca %struct.anon.0*, align 8 // CHECK-NEXT: store i32* %[[LOOPVAR:.+]], i32** %[[LOOPVAR_ADDR]], align 8 // CHECK-NEXT: store i32 %[[LOGICAL:.+]], i32* %[[LOGICAL_ADDR]], align 4 // CHECK-NEXT: store %struct.anon.0* %[[__CONTEXT:.+]], %struct.anon.0** %[[__CONTEXT_ADDR]], align 8 // CHECK-NEXT: %[[TMP0:.+]] = load %struct.anon.0*, %struct.anon.0** %[[__CONTEXT_ADDR]], align 8 // CHECK-NEXT: %[[TMP1:.+]] = getelementptr inbounds %struct.anon.0, %struct.anon.0* %[[TMP0]], i32 0, i32 0 // CHECK-NEXT: %[[TMP2:.+]] = load i32, i32* %[[TMP1]], align 4 // CHECK-NEXT: %[[TMP3:.+]] = load i32, i32* %[[LOGICAL_ADDR]], align 4 // CHECK-NEXT: %[[MUL:.+]] = mul i32 7, %[[TMP3]] // CHECK-NEXT: %[[ADD:.+]] = add i32 %[[TMP2]], %[[MUL]] // CHECK-NEXT: %[[TMP4:.+]] = load i32*, i32** %[[LOOPVAR_ADDR]], align 8 // CHECK-NEXT: store i32 %[[ADD]], i32* %[[TMP4]], align 4 // CHECK-NEXT: ret void // CHECK-NEXT: } // CHECK: ![[META0:[0-9]+]] = !{i32 1, !"wchar_size", i32 4} // CHECK: ![[META1:[0-9]+]] = !{i32 7, !"openmp", i32 45} // CHECK: ![[META2:[0-9]+]] =
datatypes.h
#ifndef DATATYPES_H_ #define DATATYPES_H_ #include <stdbool.h> #include "../tools.h" #include "PlyDict.h" #include "ObjDict.h" #define MSG_HEAD_SEP "YGG_MSG_HEAD" /*! @brief Size of COMM buffer. */ #define COMMBUFFSIZ 2000 #define FMT_LEN 100 #ifdef __cplusplus /* If this is a C++ compiler, use C linkage */ extern "C" { #endif static char prefix_char = '#'; #ifdef _OPENMP #pragma omp threadprivate(prefix_char) #endif /*! @brief Bit flags. */ #define HEAD_FLAG_VALID 0x00000001 //!< Set if the header is valid. #define HEAD_FLAG_MULTIPART 0x00000002 //!< Set if the header is for a multipart message #define HEAD_TYPE_IN_DATA 0x00000004 //!< Set if the type is stored with the data during serialization #define HEAD_AS_ARRAY 0x00000008 //!< Set if messages will be serialized arrays /*! @brief C-friendly definition of MetaschemaType. */ typedef struct dtype_t { char type[COMMBUFFSIZ]; //!< Type name bool use_generic; //!< Flag for empty dtypes to specify generic in/out void *obj; //!< MetaschemaType Pointer } dtype_t; /*! @brief C-friendly defintion of YggGeneric. */ typedef struct generic_t { char prefix; //!< Prefix character for limited verification. void *obj; //!< Pointer to YggGeneric class. } generic_t; /*! @brief C-friendly definition of vector object. */ typedef generic_t json_array_t; /*! @brief C-friendly definition of map object. */ typedef generic_t json_object_t; /*! @brief C-friendly definition of schema object. */ typedef generic_t schema_t; /*! @brief C-friendly defintion of Python class object. */ typedef python_t python_class_t; /*! @brief C-friendly defintion of Python function object. */ typedef python_t python_function_t; /*! @brief C-friendly defintion of Python instance object. */ typedef generic_t python_instance_t; /*! @brief Macro wrapping call to PyObject_CallFunction. */ #define call_python(x, format, ...) PyObject_CallFunction(x.obj, format, __VA_ARGS__) /*! @brief Aliases to allow differentiation in parsing model definition. */ typedef char* unicode_t; typedef char* string_t; typedef char* bytes_t; /*! @brief Header information passed by comms for multipart messages. */ typedef struct comm_head_t { size_t bodysiz; //!< Size of body. size_t bodybeg; //!< Start of body in header. int flags; //!< Bit flags encoding the status of the header. int nargs_populated; //!< Number of arguments populated during deserialization. // size_t size; //!< Size of incoming message. char address[COMMBUFFSIZ]; //!< Address that message will comm in on. char id[COMMBUFFSIZ]; //!< Unique ID associated with this message. char response_address[COMMBUFFSIZ]; //!< Response address. char request_id[COMMBUFFSIZ]; //!< Request id. char zmq_reply[COMMBUFFSIZ]; //!< Reply address for ZMQ sockets. char zmq_reply_worker[COMMBUFFSIZ]; //!< Reply address for worker socket. char model[COMMBUFFSIZ]; //!< Name of model that sent the header. // These should be removed once JSON fully implemented int serializer_type; //!< Code indicating the type of serializer. char format_str[COMMBUFFSIZ]; //!< Format string for serializer. char field_names[COMMBUFFSIZ]; //!< String containing field names. char field_units[COMMBUFFSIZ]; //!< String containing field units. // dtype_t* dtype; //!< Type structure. } comm_head_t; /*! @brief C wrapper for the C++ type_from_doc function. @param type_doc void* Pointer to const rapidjson::Value type doc. @returns void* Pointer to MetaschemaType class. */ void* type_from_doc_c(const void* type_doc, const bool use_generic); /*! @brief C wrapper for the C++ type_from_pyobj function. @param type_doc void* Pointer to const rapidjson::Value type doc. @returns void* Pointer to MetaschemaType class. */ void* type_from_pyobj_c(PyObject* pyobj, const bool use_generic); /*! @brief Determine if a datatype was created from a format. @params[in] type_struct dtype_t* Datatype structure. @returns int 1 if the datatype was created from a format, 0 if it was not, -1 if there is an error. */ int is_dtype_format_array(dtype_t* type_struct); /*! @brief Initialize an empty generic object. @returns generic_t New generic object structure. */ generic_t init_generic(); /*! @brief Initialize an empty array of mixed types with generic wrappers. @returns generic_t New generic object structure containing an empty array. */ generic_t init_generic_array(); /*! @brief Initialize an empty map (JSON object) of mixed types with generic wrappers. @returns generic_t New generic object structure contaiing an empty map (JSON object). */ generic_t init_generic_map(); /*! @brief Determine if the provided character matches the required generic prefix char. @param[in] x char Character to check. @returns int 1 if the character is the correct prefix, 0 otherwise. */ int is_generic_flag(char x); /*! @brief Determine if a generic structure is initialized. @param[in] x generic_t Generic structure to test. @returns int 1 if the structure is initialized, 0 otherwise. */ int is_generic_init(generic_t x); /*! @brief Create a generic object from the provided information. @param[in] type_class dtype_t* Type structure/class. @param[in] data void* Pointer to data. @param[in] nbytes size_t Size of data. @returns generic_t Pointer to new generic object structure. */ generic_t create_generic(dtype_t* type_class, void* data, size_t nbytes); /*! @brief Destroy a generic object. @param[in] x generic_t* Pointer to generic object structure to destory. @returns int -1 if unsuccessful, 0 otherwise. */ int destroy_generic(generic_t* x); /*! @brief Copy data from one generic object to the other. @param[in] src generic_t Generic structure that data should be copied from. @returns generic_t Copied structure. */ generic_t copy_generic(generic_t src); /*! @brief Display information about the generic type. @param[in] x generic_t* Wrapper for generic object. */ void display_generic(generic_t x); /*! @brief Return the recovered generic structure if one is present in the variable argument list. @param[in] nargs size_t Number of argument present in ap. @param[in] ap va_list_t Variable argument list. @returns generic_t Generic structure if one is present. */ generic_t get_generic_va(size_t nargs, va_list_t ap); /*! @brief Return the recovered generic structure if one is present in the variable argument list. @param[in] nargs size_t Number of argument present in ap. @param[in] ap va_list_t Variable argument list. @returns generic_t* Generic structure if one is present, NULL otherwise. */ generic_t* get_generic_va_ptr(size_t nargs, va_list_t ap); /*! @brief Return the recovered generic structure if one is present in the variable argument list by removing it. @param[in] nargs size_t* Pointer to number of arguments present in ap that will be decremented by 1. @param[in] ap va_list_t* Pointer to variable argument list. @returns generic_t Generic structure if one is present. */ generic_t pop_generic_va(size_t* nargs, va_list_t* ap); /*! @brief Return the recovered generic structure if one is present in the variable argument list by removing it. @param[in] nargs size_t* Pointer to number of arguments present in ap that will be decremented by 1. @param[in] ap va_list_t* Pointer to variable argument list. @returns generic_t* Generic structure if one is present, NULL otherwise. */ generic_t* pop_generic_va_ptr(size_t* nargs, va_list_t* ap); /*! @brief Add an element to the end of an array of generic elements. @param[in] arr generic_t Array to add element to. @param[in] x generic_t Element to add. @returns int Flag that is 1 if there is an error and 0 otherwise. */ int add_generic_array(generic_t arr, generic_t x); /*! @brief Set an element in the array at a given index to a new value. @param[in] arr generic_t Array to add element to. @param[in] i size_t Index where element should be added. @param[in] x generic_t Element to add. @returns int Flag that is 1 if there is an error and 0 otherwise. */ int set_generic_array(generic_t arr, size_t i, generic_t x); /*! @brief Get an element from an array. @param[in] arr generic_t Array to get element from. @param[in] i size_t Index of element to get. @param[out] x generic_t* Pointer to address where element should be stored. @returns int Flag that is 1 if there is an error and 0 otherwise. */ int get_generic_array(generic_t arr, size_t i, generic_t *x); /*! @brief Set an element in the object at for a given key to a new value. @param[in] arr generic_t Object to add element to. @param[in] k const char* Key where element should be added. @param[in] x generic_t Element to add. @returns int Flag that is 1 if there is an error and 0 otherwise. */ int set_generic_object(generic_t arr, const char* k, generic_t x); /*! @brief Get an element from an object. @param[in] arr generic_t Object to get element from. @param[in] k const char* Key of element to return. @param[out] x generic_t* Pointer to address where element should be stored. @returns int Flag that is 1 if there is an error and 0 otherwise. */ int get_generic_object(generic_t arr, const char* k, generic_t *x); /*! @brief Get the number of elements in an array object. @param[in] x generic_t Generic object that is presumed to contain an array. @returns size_t Number of elements in array. */ size_t generic_array_get_size(generic_t x); /*! @brief Get an item from an array for types that don't require additional parameters. @param[in] x generic_t Generic object that is presumed to contain an array. @param[in] index size_t Index for value that should be returned. @param[in] type const char* Type of value expected. @returns void* Pointer to data for array item. */ void* generic_array_get_item(generic_t x, const size_t index, const char *type); int generic_array_get_item_nbytes(generic_t x, const size_t index); bool generic_array_get_bool(generic_t x, const size_t index); int generic_array_get_integer(generic_t x, const size_t index); void* generic_array_get_null(generic_t x, const size_t index); double generic_array_get_number(generic_t x, const size_t index); char* generic_array_get_string(generic_t x, const size_t index); generic_t generic_array_get_object(generic_t x, const size_t index); generic_t generic_array_get_array(generic_t x, const size_t index); char* generic_array_get_direct(generic_t x, const size_t index); ply_t generic_array_get_ply(generic_t x, const size_t index); obj_t generic_array_get_obj(generic_t x, const size_t index); python_t generic_array_get_python_class(generic_t x, const size_t index); python_t generic_array_get_python_function(generic_t x, const size_t index); schema_t generic_array_get_schema(generic_t x, const size_t index); generic_t generic_array_get_any(generic_t x, const size_t index); /*! @brief Get a scalar value from an array. @param[in] x generic_t Generic object that is presumed to contain an array. @param[in] index size_t Index for value that should be returned. @param[in] subtype const char* Subtype of scalar expected. @param[in] precision const int Precision of scalar that is expected. @returns void* Pointer to scalar data. */ void* generic_array_get_scalar(generic_t x, const size_t index, const char *subtype, const size_t precision); int8_t generic_array_get_int8(generic_t x, const size_t index); int16_t generic_array_get_int16(generic_t x, const size_t index); int32_t generic_array_get_int32(generic_t x, const size_t index); int64_t generic_array_get_int64(generic_t x, const size_t index); uint8_t generic_array_get_uint8(generic_t x, const size_t index); uint16_t generic_array_get_uint16(generic_t x, const size_t index); uint32_t generic_array_get_uint32(generic_t x, const size_t index); uint64_t generic_array_get_uint64(generic_t x, const size_t index); float generic_array_get_float(generic_t x, const size_t index); double generic_array_get_double(generic_t x, const size_t index); long double generic_array_get_long_double(generic_t x, const size_t index); complex_float_t generic_array_get_complex_float(generic_t x, const size_t index); complex_double_t generic_array_get_complex_double(generic_t x, const size_t index); complex_long_double_t generic_array_get_complex_long_double(generic_t x, const size_t index); char* generic_array_get_bytes(generic_t x, const size_t index); char* generic_array_get_unicode(generic_t x, const size_t index); /*! @brief Get a 1d array value from an array. @param[in] x generic_t Generic object that is presumed to contain an array. @param[in] index size_t Index for value that should be returned. @param[in] subtype const char* Subtype of array expected. @param[in] precision const size_t Precision of array that is expected. @param[out] data void** Pointer to pointer that should be reallocated to store the data. @returns size_t Number of elements in the data. */ size_t generic_array_get_1darray(generic_t x, const size_t index, const char *subtype, const size_t precision, void** data); size_t generic_array_get_1darray_int8(generic_t x, const size_t index, int8_t** data); size_t generic_array_get_1darray_int16(generic_t x, const size_t index, int16_t** data); size_t generic_array_get_1darray_int32(generic_t x, const size_t index, int32_t** data); size_t generic_array_get_1darray_int64(generic_t x, const size_t index, int64_t** data); size_t generic_array_get_1darray_uint8(generic_t x, const size_t index, uint8_t** data); size_t generic_array_get_1darray_uint16(generic_t x, const size_t index, uint16_t** data); size_t generic_array_get_1darray_uint32(generic_t x, const size_t index, uint32_t** data); size_t generic_array_get_1darray_uint64(generic_t x, const size_t index, uint64_t** data); size_t generic_array_get_1darray_float(generic_t x, const size_t index, float** data); size_t generic_array_get_1darray_double(generic_t x, const size_t index, double** data); size_t generic_array_get_1darray_long_double(generic_t x, const size_t index, long double** data); size_t generic_array_get_1darray_complex_float(generic_t x, const size_t index, complex_float_t** data); size_t generic_array_get_1darray_complex_double(generic_t x, const size_t index, complex_double_t** data); size_t generic_array_get_1darray_complex_long_double(generic_t x, const size_t index, complex_long_double_t** data); size_t generic_array_get_1darray_bytes(generic_t x, const size_t index, char** data); size_t generic_array_get_1darray_unicode(generic_t x, const size_t index, char** data); /*! @brief Get a nd array value from an array. @param[in] x generic_t Generic object that is presumed to contain an array. @param[in] index size_t Index for value that should be returned. @param[in] subtype const char* Subtype of array expected. @param[in] precision const size_t Precision of array that is expected. @param[out] data void** Pointer to array that should be reallocated to store the data. @param[out] shape size_t** Pointer to array that should be reallocated to store the array shape in each dimension. @returns size_t Number of dimensions in the array. */ size_t generic_array_get_ndarray(generic_t x, const size_t index, const char *subtype, const size_t precision, void** data, size_t** shape); size_t generic_array_get_ndarray_int8(generic_t x, const size_t index, int8_t** data, size_t** shape); size_t generic_array_get_ndarray_int16(generic_t x, const size_t index, int16_t** data, size_t** shape); size_t generic_array_get_ndarray_int32(generic_t x, const size_t index, int32_t** data, size_t** shape); size_t generic_array_get_ndarray_int64(generic_t x, const size_t index, int64_t** data, size_t** shape); size_t generic_array_get_ndarray_uint8(generic_t x, const size_t index, uint8_t** data, size_t** shape); size_t generic_array_get_ndarray_uint16(generic_t x, const size_t index, uint16_t** data, size_t** shape); size_t generic_array_get_ndarray_uint32(generic_t x, const size_t index, uint32_t** data, size_t** shape); size_t generic_array_get_ndarray_uint64(generic_t x, const size_t index, uint64_t** data, size_t** shape); size_t generic_array_get_ndarray_float(generic_t x, const size_t index, float** data, size_t** shape); size_t generic_array_get_ndarray_double(generic_t x, const size_t index, double** data, size_t** shape); size_t generic_array_get_ndarray_long_double(generic_t x, const size_t index, long double** data, size_t** shape); size_t generic_array_get_ndarray_complex_float(generic_t x, const size_t index, complex_float_t** data, size_t** shape); size_t generic_array_get_ndarray_complex_double(generic_t x, const size_t index, complex_double_t** data, size_t** shape); size_t generic_array_get_ndarray_complex_long_double(generic_t x, const size_t index, complex_long_double_t** data, size_t** shape); size_t generic_array_get_ndarray_bytes(generic_t x, const size_t index, char** data, size_t** shape); size_t generic_array_get_ndarray_unicode(generic_t x, const size_t index, char** data, size_t** shape); /*! @brief Get the number of elements in an map object. @param[in] x generic_t Generic object that is presumed to contain a map. @returns size_t Number of elements in map. */ size_t generic_map_get_size(generic_t x); /*! @brief Determine if a map object has a certain key. @param[in] x generic_t Generic object that is presumed to contain a map. @param[in] key char* Key to check for. @returns int 1 if the key is present, 0 otherwise. */ int generic_map_has_key(generic_t x, char* key); /*! @brief Get the keys in a map object. @param[in] x generic_t Generic object that is presumed to contain a map. @param[out] keys char*** Pointer to memory where array of keys should be stored. @returns size_t Number of keys in map. */ size_t generic_map_get_keys(generic_t x, char*** keys); /*! @brief Get an item from a map for types that don't require additional parameters. @param[in] x generic_t Generic object that is presumed to contain a map. @param[in] key const char* Key string for value that should be returned. @param[in] type const char* Type of value expected. @returns void* Pointer to data for map item. */ void* generic_map_get_item(generic_t x, const char* key, const char *type); int generic_map_get_item_nbytes(generic_t x, const char* key); bool generic_map_get_bool(generic_t x, const char* key); int generic_map_get_integer(generic_t x, const char* key); void* generic_map_get_null(generic_t x, const char* key); double generic_map_get_number(generic_t x, const char* key); char* generic_map_get_string(generic_t x, const char* key); generic_t generic_map_get_object(generic_t x, const char* key); generic_t generic_map_get_array(generic_t x, const char* key); char* generic_map_get_direct(generic_t x, const char* key); ply_t generic_map_get_ply(generic_t x, const char* key); obj_t generic_map_get_obj(generic_t x, const char* key); python_t generic_map_get_python_class(generic_t x, const char* key); python_t generic_map_get_python_function(generic_t x, const char* key); schema_t generic_map_get_schema(generic_t x, const char* key); generic_t generic_map_get_any(generic_t x, const char* key); /*! @brief Get a scalar value from a map. @param[in] x generic_t Generic object that is presumed to contain a map. @param[in] key const char* Key string for value that should be returned. @param[in] subtype const char* Subtype of scalar expected. @param[in] precision const int Precision of scalar that is expected. @returns void* Pointer to scalar data. */ void* generic_map_get_scalar(generic_t x, const char* key, const char *subtype, const size_t precision); int8_t generic_map_get_int8(generic_t x, const char* key); int16_t generic_map_get_int16(generic_t x, const char* key); int32_t generic_map_get_int32(generic_t x, const char* key); int64_t generic_map_get_int64(generic_t x, const char* key); uint8_t generic_map_get_uint8(generic_t x, const char* key); uint16_t generic_map_get_uint16(generic_t x, const char* key); uint32_t generic_map_get_uint32(generic_t x, const char* key); uint64_t generic_map_get_uint64(generic_t x, const char* key); float generic_map_get_float(generic_t x, const char* key); double generic_map_get_double(generic_t x, const char* key); long double generic_map_get_long_double(generic_t x, const char* key); complex_float_t generic_map_get_complex_float(generic_t x, const char* key); complex_double_t generic_map_get_complex_double(generic_t x, const char* key); complex_long_double_t generic_map_get_complex_long_double(generic_t x, const char* key); char* generic_map_get_bytes(generic_t x, const char* key); char* generic_map_get_unicode(generic_t x, const char* key); /*! @brief Get a 1d array value from a map. @param[in] x generic_t Generic object that is presumed to contain a map. @param[in] key const char* Key string for value that should be returned. @param[in] subtype const char* Subtype of array expected. @param[in] precision const size_t Precision of array that is expected. @param[out] data void** Pointer to pointer that should be reallocated to store the data. @returns size_t Number of elements in the data. */ size_t generic_map_get_1darray(generic_t x, const char* key, const char *subtype, const size_t precision, void** data); size_t generic_map_get_1darray_int8(generic_t x, const char* key, int8_t** data); size_t generic_map_get_1darray_int16(generic_t x, const char* key, int16_t** data); size_t generic_map_get_1darray_int32(generic_t x, const char* key, int32_t** data); size_t generic_map_get_1darray_int64(generic_t x, const char* key, int64_t** data); size_t generic_map_get_1darray_uint8(generic_t x, const char* key, uint8_t** data); size_t generic_map_get_1darray_uint16(generic_t x, const char* key, uint16_t** data); size_t generic_map_get_1darray_uint32(generic_t x, const char* key, uint32_t** data); size_t generic_map_get_1darray_uint64(generic_t x, const char* key, uint64_t** data); size_t generic_map_get_1darray_float(generic_t x, const char* key, float** data); size_t generic_map_get_1darray_double(generic_t x, const char* key, double** data); size_t generic_map_get_1darray_long_double(generic_t x, const char* key, long double** data); size_t generic_map_get_1darray_complex_float(generic_t x, const char* key, complex_float_t** data); size_t generic_map_get_1darray_complex_double(generic_t x, const char* key, complex_double_t** data); size_t generic_map_get_1darray_complex_long_double(generic_t x, const char* key, complex_long_double_t** data); size_t generic_map_get_1darray_bytes(generic_t x, const char* key, char** data); size_t generic_map_get_1darray_unicode(generic_t x, const char* key, char** data); /*! @brief Get a nd array value from a map. @param[in] x generic_t Generic object that is presumed to contain a map. @param[in] key const char* Key string for value that should be returned. @param[in] subtype const char* Subtype of array expected. @param[in] precision const size_t Precision of array that is expected. @param[out] data void** Pointer to array that should be reallocated to store the data. @param[out] shape size_t** Pointer to array that should be reallocated to store the array shape in each dimension. @returns size_t Number of dimensions in the array. */ size_t generic_map_get_ndarray(generic_t x, const char* key, const char *subtype, const size_t precision, void** data, size_t** shape); size_t generic_map_get_ndarray_int8(generic_t x, const char* key, int8_t** data, size_t** shape); size_t generic_map_get_ndarray_int16(generic_t x, const char* key, int16_t** data, size_t** shape); size_t generic_map_get_ndarray_int32(generic_t x, const char* key, int32_t** data, size_t** shape); size_t generic_map_get_ndarray_int64(generic_t x, const char* key, int64_t** data, size_t** shape); size_t generic_map_get_ndarray_uint8(generic_t x, const char* key, uint8_t** data, size_t** shape); size_t generic_map_get_ndarray_uint16(generic_t x, const char* key, uint16_t** data, size_t** shape); size_t generic_map_get_ndarray_uint32(generic_t x, const char* key, uint32_t** data, size_t** shape); size_t generic_map_get_ndarray_uint64(generic_t x, const char* key, uint64_t** data, size_t** shape); size_t generic_map_get_ndarray_float(generic_t x, const char* key, float** data, size_t** shape); size_t generic_map_get_ndarray_double(generic_t x, const char* key, double** data, size_t** shape); size_t generic_map_get_ndarray_long_double(generic_t x, const char* key, long double** data, size_t** shape); size_t generic_map_get_ndarray_complex_float(generic_t x, const char* key, complex_float_t** data, size_t** shape); size_t generic_map_get_ndarray_complex_double(generic_t x, const char* key, complex_double_t** data, size_t** shape); size_t generic_map_get_ndarray_complex_long_double(generic_t x, const char* key, complex_long_double_t** data, size_t** shape); size_t generic_map_get_ndarray_bytes(generic_t x, const char* key, char** data, size_t** shape); size_t generic_map_get_ndarray_unicode(generic_t x, const char* key, char** data, size_t** shape); /*! @brief Set an item in an array for types that don't require additional parameters. @param[in] x generic_t Generic object that is presumed to contain an array. @param[in] index size_t Index for value that should be set. @param[in] type const char* Type of value being set. @param[in] value void* Pointer to data that item should be set to. @returns int -1 if there is an error, 0 otherwise. */ int generic_array_set_item(generic_t x, const size_t index, const char *type, void* value); int generic_array_set_bool(generic_t x, const size_t index, bool value); int generic_array_set_integer(generic_t x, const size_t index, int value); int generic_array_set_null(generic_t x, const size_t index, void* value); int generic_array_set_number(generic_t x, const size_t index, double value); int generic_array_set_string(generic_t x, const size_t index, char* value); int generic_array_set_object(generic_t x, const size_t index, generic_t value); int generic_array_set_map(generic_t x, const size_t index, generic_t value); int generic_array_set_array(generic_t x, const size_t index, generic_t value); int generic_array_set_direct(generic_t x, const size_t index, char* value); int generic_array_set_ply(generic_t x, const size_t index, ply_t value); int generic_array_set_obj(generic_t x, const size_t index, obj_t value); int generic_array_set_python_class(generic_t x, const size_t index, python_t value); int generic_array_set_python_function(generic_t x, const size_t index, python_t value); int generic_array_set_schema(generic_t x, const size_t index, schema_t value); int generic_array_set_any(generic_t x, const size_t index, generic_t value); /*! @brief Set a scalar value in an array. @param[in] x generic_t Generic object that is presumed to contain an array. @param[in] index size_t Index for value that should be set. @param[in] value void* Pointer to scalar data. @param[in] subtype const char* Subtype of scalar in value. @param[in] precision const int Precision of scalar in value. @param[in] units const char* Units of value. @returns int -1 if there is an error, 0 otherwise. */ int generic_array_set_scalar(generic_t x, const size_t index, void* value, const char *subtype, const size_t precision, const char* units); int generic_array_set_int8(generic_t x, const size_t index, int8_t value, const char* units); int generic_array_set_int16(generic_t x, const size_t index, int16_t value, const char* units); int generic_array_set_int32(generic_t x, const size_t index, int32_t value, const char* units); int generic_array_set_int64(generic_t x, const size_t index, int64_t value, const char* units); int generic_array_set_uint8(generic_t x, const size_t index, uint8_t value, const char* units); int generic_array_set_uint16(generic_t x, const size_t index, uint16_t value, const char* units); int generic_array_set_uint32(generic_t x, const size_t index, uint32_t value, const char* units); int generic_array_set_uint64(generic_t x, const size_t index, uint64_t value, const char* units); int generic_array_set_float(generic_t x, const size_t index, float value, const char* units); int generic_array_set_double(generic_t x, const size_t index, double value, const char* units); int generic_array_set_long_double(generic_t x, const size_t index, long double value, const char* units); int generic_array_set_complex_float(generic_t x, const size_t index, complex_float_t value, const char* units); int generic_array_set_complex_double(generic_t x, const size_t index, complex_double_t value, const char* units); int generic_array_set_complex_long_double(generic_t x, const size_t index, complex_long_double_t value, const char* units); int generic_array_set_bytes(generic_t x, const size_t index, char* value, const char* units); int generic_array_set_unicode(generic_t x, const size_t index, char* value, const char* units); /*! @brief Set a 1d array value in an array. @param[in] x generic_t Generic object that is presumed to contain an array. @param[in] index size_t Index for value that should be set. @param[in] value void* Pointer to array data. @param[in] subtype const char* Subtype of array expected. @param[in] precision const size_t Precision of array that is expected. @param[in] length const size_t Number of elements in value. @param[in] units const char* Units of value. @returns int -1 if there is an error, 0 otherwise. */ int generic_array_set_1darray(generic_t x, const size_t index, void* value, const char *subtype, const size_t precision, const size_t length, const char* units); int generic_array_set_1darray_int8(generic_t x, const size_t index, int8_t* value, const size_t length, const char* units); int generic_array_set_1darray_int16(generic_t x, const size_t index, int16_t* value, const size_t length, const char* units); int generic_array_set_1darray_int32(generic_t x, const size_t index, int32_t* value, const size_t length, const char* units); int generic_array_set_1darray_int64(generic_t x, const size_t index, int64_t* value, const size_t length, const char* units); int generic_array_set_1darray_uint8(generic_t x, const size_t index, uint8_t* value, const size_t length, const char* units); int generic_array_set_1darray_uint16(generic_t x, const size_t index, uint16_t* value, const size_t length, const char* units); int generic_array_set_1darray_uint32(generic_t x, const size_t index, uint32_t* value, const size_t length, const char* units); int generic_array_set_1darray_uint64(generic_t x, const size_t index, uint64_t* value, const size_t length, const char* units); int generic_array_set_1darray_float(generic_t x, const size_t index, float* value, const size_t length, const char* units); int generic_array_set_1darray_double(generic_t x, const size_t index, double* value, const size_t length, const char* units); int generic_array_set_1darray_long_double(generic_t x, const size_t index, long double* value, const size_t length, const char* units); int generic_array_set_1darray_complex_float(generic_t x, const size_t index, complex_float_t* value, const size_t length, const char* units); int generic_array_set_1darray_complex_double(generic_t x, const size_t index, complex_double_t* value, const size_t length, const char* units); int generic_array_set_1darray_complex_long_double(generic_t x, const size_t index, complex_long_double_t* value, const size_t length, const char* units); int generic_array_set_1darray_bytes(generic_t x, const size_t index, char** value, const size_t length, const char* units); int generic_array_set_1darray_unicode(generic_t x, const size_t index, char** value, const size_t length, const char* units); /*! @brief Set a nd array value from an array. @param[in] x generic_t Generic object that is presumed to contain an array. @param[in] index size_t Index for value that should be set. @param[in] value void* Pointer to array data. @param[in] subtype const char* Subtype of array in value. @param[in] precision const size_t Precision of array that is in value. @param[in] ndim size_t Number of dimensions in the array. @param[in] shape size_t* Pointer to array containing the size of the array in each dimension. @returns int -1 if there is an error, 0 otherwise. */ int generic_array_set_ndarray(generic_t x, const size_t index, void* data, const char *subtype, const size_t precision, const size_t ndim, const size_t* shape, const char* units); int generic_array_set_ndarray_int8(generic_t x, const size_t index, int8_t* data, const size_t ndim, const size_t* shape, const char* units); int generic_array_set_ndarray_int16(generic_t x, const size_t index, int16_t* data, const size_t ndim, const size_t* shape, const char* units); int generic_array_set_ndarray_int32(generic_t x, const size_t index, int32_t* data, const size_t ndim, const size_t* shape, const char* units); int generic_array_set_ndarray_int64(generic_t x, const size_t index, int64_t* data, const size_t ndim, const size_t* shape, const char* units); int generic_array_set_ndarray_uint8(generic_t x, const size_t index, uint8_t* data, const size_t ndim, const size_t* shape, const char* units); int generic_array_set_ndarray_uint16(generic_t x, const size_t index, uint16_t* data, const size_t ndim, const size_t* shape, const char* units); int generic_array_set_ndarray_uint32(generic_t x, const size_t index, uint32_t* data, const size_t ndim, const size_t* shape, const char* units); int generic_array_set_ndarray_uint64(generic_t x, const size_t index, uint64_t* data, const size_t ndim, const size_t* shape, const char* units); int generic_array_set_ndarray_float(generic_t x, const size_t index, float* data, const size_t ndim, const size_t* shape, const char* units); int generic_array_set_ndarray_double(generic_t x, const size_t index, double* data, const size_t ndim, const size_t* shape, const char* units); int generic_array_set_ndarray_long_double(generic_t x, const size_t index, long double* data, const size_t ndim, const size_t* shape, const char* units); int generic_array_set_ndarray_complex_float(generic_t x, const size_t index, complex_float_t* data, const size_t ndim, const size_t* shape, const char* units); int generic_array_set_ndarray_complex_double(generic_t x, const size_t index, complex_double_t* data, const size_t ndim, const size_t* shape, const char* units); int generic_array_set_ndarray_complex_long_double(generic_t x, const size_t index, complex_long_double_t* data, const size_t ndim, const size_t* shape, const char* units); int generic_array_set_ndarray_bytes(generic_t x, const size_t index, char** data, const size_t ndim, const size_t* shape, const char* units); int generic_array_set_ndarray_unicode(generic_t x, const size_t index, char** data, const size_t ndim, const size_t* shape, const char* units); /*! @brief Set an item from a map for types that don't require additional parameters. @param[in] x generic_t Generic object that is presumed to contain a map. @param[in] key const char* Key string for value that should be set. @param[in] type const char* Type of value being set. @param[in] value void* Pointer to data that item should be set to. @returns int -1 if there is an error, 0 otherwise. */ int generic_map_set_item(generic_t x, const char* key, const char* type, void* value); int generic_map_set_bool(generic_t x, const char* key, bool value); int generic_map_set_integer(generic_t x, const char* key, int value); int generic_map_set_null(generic_t x, const char* key, void* value); int generic_map_set_number(generic_t x, const char* key, double value); int generic_map_set_string(generic_t x, const char* key, char* value); int generic_map_set_object(generic_t x, const char* key, generic_t value); int generic_map_set_map(generic_t x, const char* key, generic_t value); int generic_map_set_array(generic_t x, const char* key, generic_t value); int generic_map_set_direct(generic_t x, const char* key, char* value); int generic_map_set_ply(generic_t x, const char* key, ply_t value); int generic_map_set_obj(generic_t x, const char* key, obj_t value); int generic_map_set_python_class(generic_t x, const char* key, python_t value); int generic_map_set_python_function(generic_t x, const char* key, python_t value); int generic_map_set_schema(generic_t x, const char* key, schema_t value); int generic_map_set_any(generic_t x, const char* key, generic_t value); /*! @brief Set a scalar value in a map. @param[in] x generic_t Generic object that is presumed to contain a map. @param[in] key const char* Key string for value that should be set. @param[in] value void* Pointer to scalar data. @param[in] subtype const char* Subtype of scalar in value. @param[in] precision const int Precision of scalar in value. @param[in] units const char* Units of value. @returns int -1 if there is an error, 0 otherwise. */ int generic_map_set_scalar(generic_t x, const char* key, void* value, const char *subtype, const size_t precision, const char* units); int generic_map_set_int8(generic_t x, const char* key, int8_t value, const char* units); int generic_map_set_int16(generic_t x, const char* key, int16_t value, const char* units); int generic_map_set_int32(generic_t x, const char* key, int32_t value, const char* units); int generic_map_set_int64(generic_t x, const char* key, int64_t value, const char* units); int generic_map_set_uint8(generic_t x, const char* key, uint8_t value, const char* units); int generic_map_set_uint16(generic_t x, const char* key, uint16_t value, const char* units); int generic_map_set_uint32(generic_t x, const char* key, uint32_t value, const char* units); int generic_map_set_uint64(generic_t x, const char* key, uint64_t value, const char* units); int generic_map_set_float(generic_t x, const char* key, float value, const char* units); int generic_map_set_double(generic_t x, const char* key, double value, const char* units); int generic_map_set_long_double(generic_t x, const char* key, long double value, const char* units); int generic_map_set_complex_float(generic_t x, const char* key, complex_float_t value, const char* units); int generic_map_set_complex_double(generic_t x, const char* key, complex_double_t value, const char* units); int generic_map_set_complex_long_double(generic_t x, const char* key, complex_long_double_t value, const char* units); int generic_map_set_bytes(generic_t x, const char* key, char* value, const char* units); int generic_map_set_unicode(generic_t x, const char* key, char* value, const char* units); /*! @brief Set a 1d array value in a map. @param[in] x generic_t Generic object that is presumed to contain a map. @param[in] key const char* Key string for value that should be set. @param[in] value void* Pointer to array data. @param[in] subtype const char* Subtype of array expected. @param[in] precision const size_t Precision of array that is expected. @param[in] length const size_t Number of elements in value. @param[in] units const char* Units of value. @returns int -1 if there is an error, 0 otherwise. */ int generic_map_set_1darray(generic_t x, const char* key, void* value, const char *subtype, const size_t precision, const size_t length, const char* units); int generic_map_set_1darray_int8(generic_t x, const char* key, int8_t* value, const size_t length, const char* units); int generic_map_set_1darray_int16(generic_t x, const char* key, int16_t* value, const size_t length, const char* units); int generic_map_set_1darray_int32(generic_t x, const char* key, int32_t* value, const size_t length, const char* units); int generic_map_set_1darray_int64(generic_t x, const char* key, int64_t* value, const size_t length, const char* units); int generic_map_set_1darray_uint8(generic_t x, const char* key, uint8_t* value, const size_t length, const char* units); int generic_map_set_1darray_uint16(generic_t x, const char* key, uint16_t* value, const size_t length, const char* units); int generic_map_set_1darray_uint32(generic_t x, const char* key, uint32_t* value, const size_t length, const char* units); int generic_map_set_1darray_uint64(generic_t x, const char* key, uint64_t* value, const size_t length, const char* units); int generic_map_set_1darray_float(generic_t x, const char* key, float* value, const size_t length, const char* units); int generic_map_set_1darray_double(generic_t x, const char* key, double* value, const size_t length, const char* units); int generic_map_set_1darray_long_double(generic_t x, const char* key, long double* value, const size_t length, const char* units); int generic_map_set_1darray_complex_float(generic_t x, const char* key, complex_float_t* value, const size_t length, const char* units); int generic_map_set_1darray_complex_double(generic_t x, const char* key, complex_double_t* value, const size_t length, const char* units); int generic_map_set_1darray_complex_long_double(generic_t x, const char* key, complex_long_double_t* value, const size_t length, const char* units); int generic_map_set_1darray_bytes(generic_t x, const char* key, char** value, const size_t length, const char* units); int generic_map_set_1darray_unicode(generic_t x, const char* key, char** value, const size_t length, const char* units); /*! @brief Set a nd array value in a map. @param[in] x generic_t Generic object that is presumed to contain a map. @param[in] key const char* Key string for value that should be set. @param[in] value void* Pointer to array data. @param[in] subtype const char* Subtype of array in value. @param[in] precision const size_t Precision of array that is in value. @param[in] ndim size_t Number of dimensions in the array. @param[in] shape size_t* Pointer to array containing the size of the array in each dimension. @returns int -1 if there is an error, 0 otherwise. */ int generic_map_set_ndarray(generic_t x, const char* key, void* data, const char *subtype, const size_t precision, const size_t ndim, const size_t* shape, const char* units); int generic_map_set_ndarray_int8(generic_t x, const char* key, int8_t* data, const size_t ndim, const size_t* shape, const char* units); int generic_map_set_ndarray_int16(generic_t x, const char* key, int16_t* data, const size_t ndim, const size_t* shape, const char* units); int generic_map_set_ndarray_int32(generic_t x, const char* key, int32_t* data, const size_t ndim, const size_t* shape, const char* units); int generic_map_set_ndarray_int64(generic_t x, const char* key, int64_t* data, const size_t ndim, const size_t* shape, const char* units); int generic_map_set_ndarray_uint8(generic_t x, const char* key, uint8_t* data, const size_t ndim, const size_t* shape, const char* units); int generic_map_set_ndarray_uint16(generic_t x, const char* key, uint16_t* data, const size_t ndim, const size_t* shape, const char* units); int generic_map_set_ndarray_uint32(generic_t x, const char* key, uint32_t* data, const size_t ndim, const size_t* shape, const char* units); int generic_map_set_ndarray_uint64(generic_t x, const char* key, uint64_t* data, const size_t ndim, const size_t* shape, const char* units); int generic_map_set_ndarray_float(generic_t x, const char* key, float* data, const size_t ndim, const size_t* shape, const char* units); int generic_map_set_ndarray_double(generic_t x, const char* key, double* data, const size_t ndim, const size_t* shape, const char* units); int generic_map_set_ndarray_long_double(generic_t x, const char* key, long double* data, const size_t ndim, const size_t* shape, const char* units); int generic_map_set_ndarray_complex_float(generic_t x, const char* key, complex_float_t* data, const size_t ndim, const size_t* shape, const char* units); int generic_map_set_ndarray_complex_double(generic_t x, const char* key, complex_double_t* data, const size_t ndim, const size_t* shape, const char* units); int generic_map_set_ndarray_complex_long_double(generic_t x, const char* key, complex_long_double_t* data, const size_t ndim, const size_t* shape, const char* units); int generic_map_set_ndarray_bytes(generic_t x, const char* key, char** data, const size_t ndim, const size_t* shape, const char* units); int generic_map_set_ndarray_unicode(generic_t x, const char* key, char** data, const size_t ndim, const size_t* shape, const char* units); /*! >>>>>>> topic/timesync @brief Destroy a structure containing a Python object. @param[in] x python_t* Pointer to Python object structure that should be freed. */ void destroy_python(python_t *x); /*! @brief Copy a Python object structure (NOTE: this dosn't copy the underlying Python object but does increment the reference count). @param[in] x python_t Structure containing Python object to copy. @returns python_t Copy of x. */ python_t copy_python(python_t x); /*! @brief Display a Python object structure. @param[in] x python_t Structure containing Python object to display. */ void display_python(python_t x); /*! @brief Destroy a structure containing a Python function object. @param[in] x python_function_t* Pointer to Python function structure that should be freed. */ void destroy_python_function(python_function_t *x); /*! @brief Skip datatype arguments. @param[in] dtype dtype_t* Type structure to skip arguments for. @param[in, out] nargs Pointer to number of arguments in ap. @param[in, out] ap va_list_t Variable argument list. @returns int 0 if there are no errors, 1 otherwise. */ int skip_va_elements(const dtype_t* dtype, size_t *nargs, va_list_t *ap); /*! @brief Determine if a datatype is empty. @param[in] dtype dtype_t* Type structure to test. @returns int 1 if dtype is empty, 0 otherwise. */ int is_empty_dtype(const dtype_t* dtype); /*! @brief Get the name of the type from the class. @param[in] type_class dtype_t* Type structure/class. @returns const char* Type name. */ const char* dtype_name(const dtype_t* type_class); /*! @brief Get the subtype of the type. @param[in] type_class dtype_t* Type structure/class. @returns const char* The subtype of the class, "" if there is an error. */ const char* dtype_subtype(const dtype_t* type_class); /*! @brief Get the precision of the type. @param[in] type_class dtype_t* Type structure/class. @returns const size_t The precision of the class, 0 if there is an error. */ const size_t dtype_precision(const dtype_t* type_class); /*! @brief Initialize a datatype structure including setting the type string. @param[in] dtype dtype_t* Type structure/class. @returns dtype_t* Initialized type structure/class. */ dtype_t* complete_dtype(dtype_t *dtype, const bool use_generic); /*! @brief Construct and empty type object. @param[in] use_generic bool If true, serialized/deserialized objects will be expected to be YggGeneric classes. @returns dtype_t* Type structure/class. */ dtype_t* create_dtype_empty(const bool use_generic); /*! @brief Create a datatype based on a JSON document. @param type_doc void* Pointer to const rapidjson::Value type doc. @param[in] use_generic bool If true, serialized/deserialized objects will be expected to be YggGeneric classes. @returns dtype_t* Type structure/class. */ dtype_t* create_dtype_doc(void* type_doc, const bool use_generic); /*! @brief Create a datatype based on a Python dictionary. @param[in] pyobj PyObject* Python dictionary. @param[in] use_generic bool If true, serialized/deserialized objects will be expected to be YggGeneric classes. @returns dtype_t* Type structure/class. */ dtype_t* create_dtype_python(PyObject* pyobj, const bool use_generic); /*! @brief Construct a Direct type object. @param[in] use_generic bool If true, serialized/deserialized objects will be expected to be YggGeneric classes. @returns dtype_t* Type structure/class. */ dtype_t* create_dtype_direct(const bool use_generic); /*! @brief Construct a type object for one of the default JSON types. @param[in] type char* Name of the type. @param[in] use_generic bool If true, serialized/deserialized objects will be expected to be YggGeneric classes. @returns dtype_t* Type structure/class. */ dtype_t* create_dtype_default(const char* type, const bool use_generic); /*! @brief Construct a Scalar type object. @param[in] subtype char* Name of the scalar subtype (e.g. int, uint, float, bytes). @param[in] precision size_t Precision of the scalar in bits. @param[in] units char* Units for scalar. (e.g. "cm", "g", "" for unitless) @param[in] use_generic bool If true, serialized/deserialized objects will be expected to be YggGeneric classes. @returns dtype_t* Type structure/class. */ dtype_t* create_dtype_scalar(const char* subtype, const size_t precision, const char* units, const bool use_generic); /*! @brief Construct a 1D array type object. @param[in] subtype char* Name of the array subtype (e.g. int, uint, float, bytes). @param[in] precision size_t Precision of the array in bits. @param[in] length size_t Number of elements in the array. @param[in] units char* Units for array elements. (e.g. "cm", "g", "" for unitless) @param[in] use_generic bool If true, serialized/deserialized objects will be expected to be YggGeneric classes. @returns dtype_t* Type structure/class. */ dtype_t* create_dtype_1darray(const char* subtype, const size_t precision, const size_t length, const char* units, const bool use_generic); /*! @brief Construct a ND array type object. @param[in] subtype char* Name of the array subtype (e.g. int, uint, float, bytes). @param[in] precision size_t Precision of the array in bits. @param[in] ndim size_t Number of dimensions in the array (and therefore also the number of elements in shape). @param[in] shape size_t* Pointer to array where each element is the size of the array in that dimension. @param[in] units char* Units for array elements. (e.g. "cm", "g", "" for unitless) @param[in] use_generic bool If true, serialized/deserialized objects will be expected to be YggGeneric classes. @returns dtype_t* Type structure/class. */ dtype_t* create_dtype_ndarray(const char* subtype, const size_t precision, const size_t ndim, const size_t* shape, const char* units, const bool use_generic); /*! @brief Construct a ND array type object. @param[in] subtype char* Name of the array subtype (e.g. int, uint, float, bytes). @param[in] precision size_t Precision of the array in bits. @param[in] ndim size_t Number of dimensions in the array (and therefore also the number of elements in shape). @param[in] shape[] size_t Array where each element is the size of the array in that dimension. @param[in] units char* Units for array elements. (e.g. "cm", "g", "" for unitless) @param[in] use_generic bool If true, serialized/deserialized objects will be expected to be YggGeneric classes. @returns dtype_t* Type structure/class. */ dtype_t* create_dtype_ndarray_arr(const char* subtype, const size_t precision, const size_t ndim, const int64_t shape[], const char* units, const bool use_generic); /*! @brief Construct a JSON array type object. @param[in] nitems size_t Number of types in items. @param[in] items dtype_t** Pointer to array of types describing the array elements. @param[in] use_generic bool If true, serialized/deserialized objects will be expected to be YggGeneric classes. @returns dtype_t* Type structure/class. */ dtype_t* create_dtype_json_array(const size_t nitems, dtype_t** items, const bool use_generic); /*! @brief Construct a JSON object type object. @param[in] nitems size_t Number of keys/types in keys and values. @param[in] keys char** Pointer to array of keys for each type. @param[in] values dtype_t** Pointer to array of types describing the values for each key. @param[in] use_generic bool If true, serialized/deserialized objects will be expected to be YggGeneric classes. @returns dtype_t* Type structure/class. */ dtype_t* create_dtype_json_object(const size_t nitems, char** keys, dtype_t** values, const bool use_generic); /*! @brief Construct a Ply type object. @param[in] use_generic bool If true, serialized/deserialized objects will be expected to be YggGeneric classes. @returns dtype_t* Type structure/class. */ dtype_t* create_dtype_ply(const bool use_generic); /*! @brief Construct a Obj type object. @param[in] use_generic bool If true, serialized/deserialized objects will be expected to be YggGeneric classes. @returns dtype_t* Type structure/class. */ dtype_t* create_dtype_obj(const bool use_generic); /*! @brief Construct an AsciiTable type object. @param[in] use_generic bool If true, serialized/deserialized objects will be expected to be YggGeneric classes. @returns dtype_t* Type structure/class. */ dtype_t* create_dtype_ascii_table(const char *format_str, const int as_array, const bool use_generic); /*! @brief Construct a type object based on the provided format string. @param[in] format_str const char* C-style format string that will be used to determine the type of elements in arrays that will be serialized/deserialized using the resulting type. @param[in] as_array int If 1, the types will be arrays. Otherwise they will be scalars. @param[in] use_generic bool If true, serialized/deserialized objects will be expected to be YggGeneric classes. @returns dtype_t* Type structure/class. */ dtype_t* create_dtype_format(const char *format_str, const int as_array, const bool use_generic); /*! @brief Construct a type object for Python objects. @param[in] type char* Type string. @param[in] use_generic bool If true, serialized/deserialized objects will be expected to be YggGeneric classes. @returns dtype_t* Type structure/class. */ dtype_t* create_dtype_pyobj(const char* type, const bool use_generic); /*! @brief Construct a type object for Python object instances. @param[in] class_name char* Python class name. @param[in] args_dtype dtype_t* Datatype describing the arguments creating the instance. @param[in] kwargs_dtype dtype_t* Datatype describing the keyword arguments creating the instance. @param[in] use_generic bool If true, serialized/deserialized objects will be expected to be YggGeneric classes. @returns dtype_t* Type structure/class. */ dtype_t* create_dtype_pyinst(const char* class_name, const dtype_t* args_dtype, const dtype_t* kwargs_dtype, const bool use_generic); /*! @brief Construct a type object for a schema. @param[in] use_generic bool If true, serialized/deserialized objects will be expected to be YggGeneric classes. @returns dtype_t* Type structure/class. */ dtype_t* create_dtype_schema(const bool use_generic); /*! @brief Construct a type object for receiving any type. @param[in] use_generic bool If true, serialized/deserialized objects will be expected to be YggGeneric classes. @returns dtype_t* Type structure/class. */ dtype_t* create_dtype_any(const bool use_generic); /*! @brief Wrapper for freeing MetaschemaType class wrapper struct. @param[in] dtype dtype_t** Wrapper struct for C++ Metaschema type class. @returns: int 0 if free was successfull, -1 if there was an error. */ int destroy_dtype(dtype_t** dtype); /*! @brief Initialize a header struct. @param[in] size size_t Size of message to be sent. @param[in] address char* Address that should be used for remainder of message following this header if it is a multipart message. @param[in] id char* Message ID. @returns comm_head_t Structure with provided information, char arrays correctly initialized to empty strings if NULLs provided. */ static inline comm_head_t init_header(const size_t size, const char *address, const char *id) { comm_head_t out; // Parameters set during read out.bodysiz = 0; out.bodybeg = 0; out.flags = HEAD_FLAG_VALID; out.nargs_populated = 0; // Parameters sent in header out.size = size; if (address == NULL) out.address[0] = '\0'; else strncpy(out.address, address, COMMBUFFSIZ); if (id == NULL) out.id[0] = '\0'; else strncpy(out.id, id, COMMBUFFSIZ); out.response_address[0] = '\0'; out.request_id[0] = '\0'; out.zmq_reply[0] = '\0'; out.zmq_reply_worker[0] = '\0'; out.model[0] = '\0'; // Parameters that will be removed out.serializer_type = -1; out.format_str[0] = '\0'; // Parameters used for type out.dtype = NULL; return out; }; /*! @brief Destroy a header object. @param[in] x comm_head_t* Pointer to the header that should be destroyed. @returns int 0 if successful, -1 otherwise. */ static inline int destroy_header(comm_head_t* x) { int ret = 0; if (x->dtype != NULL) { ret = destroy_dtype(&(x->dtype)); } return ret; }; /*! @brief Split header and body of message. @param[in] buf const char* Message that should be split. @param[in] buf_siz size_t Size of buf. @param[out] head const char** pointer to buffer where the extracted header should be stored. @param[out] headsiz size_t reference to memory where size of extracted header should be stored. @returns: int 0 if split is successful, -1 if there was an error. */ static inline int split_head_body(const char *buf, const size_t buf_siz, char **head, size_t *headsiz) { // Split buffer into head and body int ret; size_t sind, eind, sind_head, eind_head; sind = 0; eind = 0; #ifdef _WIN32 // Windows regex of newline is buggy UNUSED(buf_siz); size_t sind1, eind1, sind2, eind2; char re_head_tag[COMMBUFFSIZ]; sprintf(re_head_tag, "(%s)", MSG_HEAD_SEP); ret = find_match(re_head_tag, buf, &sind1, &eind1); if (ret > 0) { sind = sind1; ret = find_match(re_head_tag, buf + eind1, &sind2, &eind2); if (ret > 0) eind = eind1 + eind2; } #else // Extract just header char re_head[COMMBUFFSIZ] = MSG_HEAD_SEP; strcat(re_head, "(.*)"); strcat(re_head, MSG_HEAD_SEP); // strcat(re_head, ".*"); ret = find_match(re_head, buf, &sind, &eind); #endif if (ret < 0) { ygglog_error("split_head_body: Could not find header in '%.1000s'", buf); return -1; } else if (ret == 0) { ygglog_debug("split_head_body: No header in '%.1000s...'", buf); sind_head = 0; eind_head = 0; } else { sind_head = sind + strlen(MSG_HEAD_SEP); eind_head = eind - strlen(MSG_HEAD_SEP); } headsiz[0] = (eind_head - sind_head); char* temp = (char*)realloc(*head, *headsiz + 1); if (temp == NULL) { ygglog_error("split_head_body: Failed to reallocate header."); return -1; } *head = temp; memcpy(*head, buf + sind_head, *headsiz); (*head)[*headsiz] = '\0'; return 0; }; /*! @brief Format header to a string. @param[in] head comm_head_t* Pointer to header to be formatted. @param[out] buf char ** Pointer to buffer where header should be written. @param[in] buf_siz size_t Size of buf. @param[in] max_header_size size_t Maximum size that header can occupy before the type should be moved to the data portion of the message. @param[in] int no_type If 1, type information will not be added to the header. If 0, it will be. @returns: int Size of header written. */ int format_comm_header(comm_head_t *head, char **buf, size_t buf_siz, const size_t max_header_size, const int no_type); /*! @brief Extract type from data and updated header. @param[in] buf char** Pointer to data containing type. @param[in] buf_siz size_t Size of buf. @param[in,out] head comm_head_t* Pointer to header structure that should be updated. @returns: int -1 if there is an error, size of adjusted data that dosn't include type otherwise. */ int parse_type_in_data(char **buf, const size_t buf_siz, comm_head_t* head); /*! @brief Extract header information from a string. @param[in] buf const char* Message that header should be extracted from. @param[in] buf_siz size_t Size of buf. @returns: comm_head_t Header information structure. */ comm_head_t parse_comm_header(const char *buf, const size_t buf_siz); /*! @brief Get the ascii table data structure. @param[in] dtype dtype_t* Wrapper struct for C++ Metaschema type class. @returns: void* Cast pointer to ascii table. */ void* dtype_ascii_table(const dtype_t* dtype); /*! @brief Get a copy of a type structure. @param[in] dtype dtype_t* Wrapper struct for C++ Metaschema type class. @returns: dtype_t* Type class. */ dtype_t* copy_dtype(const dtype_t* dtype); /*! @brief Wrapper for updating a type object with information from another. @param[in] dtype1 dtype_t* Wrapper struct for C++ Metaschema type class that should be updated. @param[in] dtype2 dtype_t* Wrapper struct for C++ Metaschema type class that should be updated from. @returns: int 0 if successfull, -1 if there was an error. */ int update_dtype(dtype_t* dtype1, dtype_t* dtype2); /*! @brief Wrapper for updatining a type object with information from the provided variable arguments if a generic structure is present. @param[in] dtype1 dtype_t* Wrapper struct for C++ Metaschema type class that should be updated. @param[in] nargs size_t Number of arguments in ap. @param[in] ap va_list_t Variable argument list. @returns: int 0 if successfull, -1 if there was an error. */ int update_dtype_from_generic_ap(dtype_t* dtype1, size_t nargs, va_list_t ap); /*! @brief Wrapper for updating the precision of a bytes or unicode scalar type. @param[in] dtype dtype_t* Wrapper struct for C++ Metaschema type class. @param[in] new_precision size_t New precision. @returns: int 0 if free was successfull, -1 if there was an error. */ int update_precision_dtype(const dtype_t* dtype, const size_t new_precision); /*! @brief Wrapper for deserializing from a data type. @param[in] dtype dtype_t* Wrapper struct for C++ Metaschema type class. @param[in] buf character pointer to serialized message. @param[in] buf_siz size_t Size of buf. @param[in] allow_realloc int If 1, variables being filled are assumed to be pointers to pointers for heap memory. If 0, variables are assumed to be pointers to stack memory. If allow_realloc is set to 1, but stack variables are passed, a segfault can occur. @param[in, out] nargs int Number of arguments remaining in argument list. @param[in] ap va_list Arguments to be parsed from message. returns: int The number of populated arguments. -1 indicates an error. */ int deserialize_dtype(const dtype_t *dtype, const char *buf, const size_t buf_siz, const int allow_realloc, size_t *nargs, va_list_t ap); /*! @brief Wrapper for serializing from a data type. @param[in] dtype dtype_t* Wrapper struct for C++ Metaschema type class. @param[in] buf character pointer to pointer to memory where serialized message should be stored. @param[in] buf_siz size_t Size of memory allocated to buf. @param[in] allow_realloc int If 1, buf will be realloced if it is not big enough to hold the serialized emssage. If 0, an error will be returned. @param[in, out] nargs int Number of arguments remaining in argument list. @param[in] ap va_list Arguments to be formatted. returns: int The length of the serialized message or -1 if there is an error. */ int serialize_dtype(const dtype_t *dtype, char **buf, size_t *buf_siz, const int allow_realloc, size_t *nargs, va_list_t ap); /*! @brief Wrapper for displaying a data type. @param[in] dtype dtype_t* Wrapper struct for C++ Metaschema type class. @param[in] indent char* Indentation to add to display output. */ void display_dtype(const dtype_t *dtype, const char* indent); /*! @brief Wrapper for determining how many arguments a data type expects. @param[in] dtype dtype_t* Wrapper struct for C++ Metaschema type class. */ size_t nargs_exp_dtype(const dtype_t *dtype); #define free_generic destroy_generic #define init_json_object init_generic #define init_json_array init_generic #define init_schema init_generic #define free_json_object free_generic #define free_json_array free_generic #define free_schema free_generic #define copy_json_object copy_generic #define copy_json_array copy_generic #define copy_schema copy_generic #define display_json_object display_generic #define display_json_array display_generic #define display_schema display_generic #ifdef __cplusplus /* If this is a C++ compiler, end C linkage */ } #endif #endif /*DATATYPES_H_*/
data.c
#include "data.h" #include "utils.h" #include "image.h" #include "opencl.h" #include <stdio.h> #include <stdlib.h> #include <string.h> pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER; #define class temp #define new new_temp list *get_paths(char *filename) { if (filename) { unsigned long br = strcspn(filename, "\n\r"); unsigned long n = strlen(filename); // macos cannot write memory if br == n (prevent overflow buffer for security) // if keep no check br < n, bus error on macos; no impact on linux if (br < n) filename[strcspn(filename, "\n\r")] = 0; } char *pos; if ((pos=strchr(filename, '\r')) != NULL) *pos = '\0'; if ((pos=strchr(filename, '\n')) != NULL) *pos = '\0'; char *path; FILE *file = fopen(filename, "r"); if(!file) file_error(filename); list *lines = make_list(); while((path=fgetl(file))){ if (path) path[strcspn(path, "\n\r")] = 0; list_insert(lines, path); } fclose(file); return lines; } /* char **get_random_paths_indexes(char **paths, int n, int m, int *indexes) { char **random_paths = calloc(n, sizeof(char*)); int i; pthread_mutex_lock(&mutex); for(i = 0; i < n; ++i){ int index = rand()%m; indexes[i] = index; random_paths[i] = paths[index]; //if(i == 0) printf("%s\n", paths[index]); } pthread_mutex_unlock(&mutex); return random_paths; } */ char **get_random_paths(char **paths, int n, int m) { pthread_mutex_lock(&mutex); char **random_paths = (char**)calloc(n, sizeof(char*)); int i; for(i = 0; i < n; ++i){ int index = rand()%m; random_paths[i] = paths[index]; //if(i == 0) printf("%s\n", paths[index]); } pthread_mutex_unlock(&mutex); return random_paths; } char **find_replace_paths(char **paths, int n, char *find, char *replace) { char **replace_paths = (char**)calloc(n, sizeof(char*)); int i; for(i = 0; i < n; ++i){ char replaced[4096]; find_replace(paths[i], find, replace, replaced); replace_paths[i] = copy_string(replaced); } return replace_paths; } matrix load_image_paths_gray(char **paths, int n, int w, int h) { int i; matrix X; X.rows = n; X.vals = (float**)calloc(X.rows, sizeof(float*)); X.cols = 0; for(i = 0; i < n; ++i){ image im = load_image(paths[i], w, h, 3); image gray = grayscale_image(im); free_image(im); im = gray; X.vals[i] = im.data; X.cols = im.h*im.w*im.c; } return X; } matrix load_image_paths(char **paths, int n, int w, int h) { int i; matrix X; X.rows = n; X.vals = (float**)calloc(X.rows, sizeof(float*)); X.cols = 0; for(i = 0; i < n; ++i){ image im = load_image_color(paths[i], w, h); X.vals[i] = im.data; X.cols = im.h*im.w*im.c; } return X; } matrix load_image_augment_paths(char **paths, int n, int min, int max, int size, float angle, float aspect, float hue, float saturation, float exposure, int center) { int i; matrix X; X.rows = n; X.vals = (float**)calloc(X.rows, sizeof(float*)); X.cols = 0; for(i = 0; i < n; ++i){ image im = load_image_color(paths[i], 0, 0); image crop; if(center){ crop = center_crop_image(im, size, size); } else { crop = random_augment_image(im, angle, aspect, min, max, size, size); } int flip = rand()%2; if (flip) flip_image(crop); random_distort_image(crop, hue, saturation, exposure); /* show_image(im, "orig"); show_image(crop, "crop"); cvWaitKey(0); */ //grayscale_image_3c(crop); free_image(im); X.vals[i] = crop.data; X.cols = crop.h*crop.w*crop.c; } return X; } box_label *read_boxes(char *filename, int *n) { if (filename) filename[strcspn(filename, "\n\r")] = 0; char *pos; if ((pos=strchr(filename, '\r')) != NULL) *pos = '\0'; if ((pos=strchr(filename, '\n')) != NULL) *pos = '\0'; FILE *file = fopen(filename, "r"); if(!file) file_error(filename); float x, y, h, w; int id; int count = 0; int size = 64; box_label *boxes = (box_label*)calloc(size, sizeof(box_label)); while(fscanf(file, "%d %f %f %f %f", &id, &x, &y, &w, &h) == 5){ if(count == size) { size = size * 2; boxes = (box_label*)realloc(boxes, size*sizeof(box_label)); } boxes[count].id = id; boxes[count].x = x; boxes[count].y = y; boxes[count].h = h; boxes[count].w = w; boxes[count].left = x - w/2; boxes[count].right = x + w/2; boxes[count].top = y - h/2; boxes[count].bottom = y + h/2; ++count; } fclose(file); *n = count; return boxes; } void randomize_boxes(box_label *b, int n) { int i; for(i = 0; i < n; ++i){ box_label swap = b[i]; int index = rand()%n; b[i] = b[index]; b[index] = swap; } } void correct_boxes(box_label *boxes, int n, float dx, float dy, float sx, float sy, int flip) { int i; for(i = 0; i < n; ++i){ if(boxes[i].x == 0 && boxes[i].y == 0) { boxes[i].x = 999999; boxes[i].y = 999999; boxes[i].w = 999999; boxes[i].h = 999999; continue; } boxes[i].left = boxes[i].left * sx - dx; boxes[i].right = boxes[i].right * sx - dx; boxes[i].top = boxes[i].top * sy - dy; boxes[i].bottom = boxes[i].bottom* sy - dy; if(flip){ float swap = boxes[i].left; boxes[i].left = 1. - boxes[i].right; boxes[i].right = 1. - swap; } boxes[i].left = constrain(0, 1, boxes[i].left); boxes[i].right = constrain(0, 1, boxes[i].right); boxes[i].top = constrain(0, 1, boxes[i].top); boxes[i].bottom = constrain(0, 1, boxes[i].bottom); boxes[i].x = (boxes[i].left+boxes[i].right)/2; boxes[i].y = (boxes[i].top+boxes[i].bottom)/2; boxes[i].w = (boxes[i].right - boxes[i].left); boxes[i].h = (boxes[i].bottom - boxes[i].top); boxes[i].w = constrain(0, 1, boxes[i].w); boxes[i].h = constrain(0, 1, boxes[i].h); } } void fill_truth_swag(char *path, float *truth, int classes, int flip, float dx, float dy, float sx, float sy) { char labelpath[4096]; find_replace(path, "images", "labels", labelpath); find_replace(labelpath, "JPEGImages", "labels", labelpath); find_replace(labelpath, ".jpg", ".txt", labelpath); find_replace(labelpath, ".JPG", ".txt", labelpath); find_replace(labelpath, ".JPEG", ".txt", labelpath); int count = 0; box_label *boxes = read_boxes(labelpath, &count); randomize_boxes(boxes, count); correct_boxes(boxes, count, dx, dy, sx, sy, flip); float x,y,w,h; int id; int i; for (i = 0; i < count && i < 90; ++i) { x = boxes[i].x; y = boxes[i].y; w = boxes[i].w; h = boxes[i].h; id = boxes[i].id; if (w < .0 || h < .0) continue; int index = (4+classes) * i; truth[index++] = x; truth[index++] = y; truth[index++] = w; truth[index++] = h; if (id < classes) truth[index+id] = 1; } free(boxes); } void fill_truth_region(char *path, float *truth, int classes, int num_boxes, int flip, float dx, float dy, float sx, float sy) { char labelpath[4096]; find_replace(path, "images", "labels", labelpath); find_replace(labelpath, "JPEGImages", "labels", labelpath); find_replace(labelpath, ".jpg", ".txt", labelpath); find_replace(labelpath, ".png", ".txt", labelpath); find_replace(labelpath, ".JPG", ".txt", labelpath); find_replace(labelpath, ".JPEG", ".txt", labelpath); int count = 0; box_label *boxes = read_boxes(labelpath, &count); randomize_boxes(boxes, count); correct_boxes(boxes, count, dx, dy, sx, sy, flip); float x,y,w,h; int id; int i; for (i = 0; i < count; ++i) { x = boxes[i].x; y = boxes[i].y; w = boxes[i].w; h = boxes[i].h; id = boxes[i].id; if (w < .005 || h < .005) continue; int col = (int)(x*num_boxes); int row = (int)(y*num_boxes); x = x*num_boxes - col; y = y*num_boxes - row; int index = (col+row*num_boxes)*(5+classes); if (truth[index]) continue; truth[index++] = 1; if (id < classes) truth[index+id] = 1; index += classes; truth[index++] = x; truth[index++] = y; truth[index++] = w; truth[index++] = h; } free(boxes); } void load_rle(image im, int *rle, int n) { int count = 0; int curr = 0; int i,j; for(i = 0; i < n; ++i){ for(j = 0; j < rle[i]; ++j){ im.data[count++] = curr; } curr = 1 - curr; } for(; count < im.h*im.w*im.c; ++count){ im.data[count] = curr; } } void or_image(image src, image dest, int c) { int i; for(i = 0; i < src.w*src.h; ++i){ if(src.data[i]) dest.data[dest.w*dest.h*c + i] = 1; } } void exclusive_image(image src) { int k, j, i; int s = src.w*src.h; for(k = 0; k < src.c-1; ++k){ for(i = 0; i < s; ++i){ if (src.data[k*s + i]){ for(j = k+1; j < src.c; ++j){ src.data[j*s + i] = 0; } } } } } box bound_image(image im) { int x,y; int minx = im.w; int miny = im.h; int maxx = 0; int maxy = 0; for(y = 0; y < im.h; ++y){ for(x = 0; x < im.w; ++x){ if(im.data[y*im.w + x]){ minx = (x < minx) ? x : minx; miny = (y < miny) ? y : miny; maxx = (x > maxx) ? x : maxx; maxy = (y > maxy) ? y : maxy; } } } box b = {minx, miny, maxx-minx + 1, maxy-miny + 1}; //printf("%f %f %f %f\n", b.x, b.y, b.w, b.h); return b; } void fill_truth_iseg(char *path, int num_boxes, float *truth, int classes, int w, int h, augment_args aug, int flip, int mw, int mh) { char labelpath[4096]; find_replace(path, "images", "mask", labelpath); find_replace(labelpath, "JPEGImages", "mask", labelpath); find_replace(labelpath, ".jpg", ".txt", labelpath); find_replace(labelpath, ".JPG", ".txt", labelpath); find_replace(labelpath, ".JPEG", ".txt", labelpath); FILE *file = fopen(labelpath, "r"); if(!file) file_error(labelpath); char buff[32788]; int id; int i = 0; int j; image part = make_image(w, h, 1); while((fscanf(file, "%d %s", &id, buff) == 2) && i < num_boxes){ int n = 0; int *rle = read_intlist(buff, &n, 0); load_rle(part, rle, n); image sized = rotate_crop_image(part, aug.rad, aug.scale, aug.w, aug.h, aug.dx, aug.dy, aug.aspect); if(flip) flip_image(sized); image mask = resize_image(sized, mw, mh); truth[i*(mw*mh+1)] = id; for(j = 0; j < mw*mh; ++j){ truth[i*(mw*mh + 1) + 1 + j] = mask.data[j]; } ++i; free_image(mask); free_image(sized); free(rle); } if(i < num_boxes) truth[i*(mw*mh+1)] = -1; fclose(file); free_image(part); } void fill_truth_mask(char *path, int num_boxes, float *truth, int classes, int w, int h, augment_args aug, int flip, int mw, int mh) { char labelpath[4096]; find_replace(path, "images", "mask", labelpath); find_replace(labelpath, "JPEGImages", "mask", labelpath); find_replace(labelpath, ".jpg", ".txt", labelpath); find_replace(labelpath, ".JPG", ".txt", labelpath); find_replace(labelpath, ".JPEG", ".txt", labelpath); FILE *file = fopen(labelpath, "r"); if(!file) file_error(labelpath); char buff[32788]; int id; int i = 0; image part = make_image(w, h, 1); while((fscanf(file, "%d %s", &id, buff) == 2) && i < num_boxes){ int n = 0; int *rle = read_intlist(buff, &n, 0); load_rle(part, rle, n); image sized = rotate_crop_image(part, aug.rad, aug.scale, aug.w, aug.h, aug.dx, aug.dy, aug.aspect); if(flip) flip_image(sized); box b = bound_image(sized); if(b.w > 0){ image crop = crop_image(sized, b.x, b.y, b.w, b.h); image mask = resize_image(crop, mw, mh); truth[i*(4 + mw*mh + 1) + 0] = (b.x + b.w/2.)/sized.w; truth[i*(4 + mw*mh + 1) + 1] = (b.y + b.h/2.)/sized.h; truth[i*(4 + mw*mh + 1) + 2] = b.w/sized.w; truth[i*(4 + mw*mh + 1) + 3] = b.h/sized.h; int j; for(j = 0; j < mw*mh; ++j){ truth[i*(4 + mw*mh + 1) + 4 + j] = mask.data[j]; } truth[i*(4 + mw*mh + 1) + 4 + mw*mh] = id; free_image(crop); free_image(mask); ++i; } free_image(sized); free(rle); } fclose(file); free_image(part); } void fill_truth_detection(char *path, int num_boxes, float *truth, int classes, int flip, float dx, float dy, float sx, float sy) { char labelpath[4096]; find_replace(path, "images", "labels", labelpath); find_replace(labelpath, "JPEGImages", "labels", labelpath); find_replace(labelpath, "raw", "labels", labelpath); find_replace(labelpath, ".jpg", ".txt", labelpath); find_replace(labelpath, ".png", ".txt", labelpath); find_replace(labelpath, ".JPG", ".txt", labelpath); find_replace(labelpath, ".JPEG", ".txt", labelpath); int count = 0; box_label *boxes = read_boxes(labelpath, &count); randomize_boxes(boxes, count); correct_boxes(boxes, count, dx, dy, sx, sy, flip); if(count > num_boxes) count = num_boxes; float x,y,w,h; int id; int i; int sub = 0; for (i = 0; i < count; ++i) { x = boxes[i].x; y = boxes[i].y; w = boxes[i].w; h = boxes[i].h; id = boxes[i].id; if ((w < .001 || h < .001)) { ++sub; continue; } truth[(i-sub)*5+0] = x; truth[(i-sub)*5+1] = y; truth[(i-sub)*5+2] = w; truth[(i-sub)*5+3] = h; truth[(i-sub)*5+4] = id; } free(boxes); } #define NUMCHARS 37 void print_letters(float *pred, int n) { int i; for(i = 0; i < n; ++i){ int index = max_index(pred+i*NUMCHARS, NUMCHARS); printf("%c", int_to_alphanum(index)); } printf("\n"); } void fill_truth_captcha(char *path, int n, float *truth) { char *begin = strrchr(path, '/'); ++begin; int i; for(i = 0; i < strlen(begin) && i < n && begin[i] != '.'; ++i){ int index = alphanum_to_int(begin[i]); if(index > 35) printf("Bad %c\n", begin[i]); truth[i*NUMCHARS+index] = 1; } for(;i < n; ++i){ truth[i*NUMCHARS + NUMCHARS-1] = 1; } } data load_data_captcha(char **paths, int n, int m, int k, int w, int h) { if(m) paths = get_random_paths(paths, n, m); data d = {0}; d.shallow = 0; d.X = load_image_paths(paths, n, w, h); d.y = make_matrix(n, k*NUMCHARS); int i; for(i = 0; i < n; ++i){ fill_truth_captcha(paths[i], k, d.y.vals[i]); } if(m) free(paths); return d; } data load_data_captcha_encode(char **paths, int n, int m, int w, int h) { if(m) paths = get_random_paths(paths, n, m); data d = {0}; d.shallow = 0; d.X = load_image_paths(paths, n, w, h); d.X.cols = 17100; d.y = d.X; if(m) free(paths); return d; } void fill_truth(char *path, char **labels, int k, float *truth) { int i; memset(truth, 0, k*sizeof(float)); int count = 0; for(i = 0; i < k; ++i){ if(strstr(path, labels[i])){ truth[i] = 1; ++count; //printf("%s %s %d\n", path, labels[i], i); } } if(count != 1 && (k != 1 || count != 0)) printf("Too many or too few labels: %d, %s\n", count, path); } void fill_hierarchy(float *truth, int k, tree *hierarchy) { int j; for(j = 0; j < k; ++j){ if(truth[j]){ int parent = hierarchy->parent[j]; while(parent >= 0){ truth[parent] = 1; parent = hierarchy->parent[parent]; } } } int i; int count = 0; for(j = 0; j < hierarchy->groups; ++j){ //printf("%d\n", count); int mask = 1; for(i = 0; i < hierarchy->group_size[j]; ++i){ if(truth[count + i]){ mask = 0; break; } } if (mask) { for(i = 0; i < hierarchy->group_size[j]; ++i){ truth[count + i] = SECRET_NUM; } } count += hierarchy->group_size[j]; } } matrix load_regression_labels_paths(char **paths, int n, int k) { matrix y = make_matrix(n, k); int i,j; for(i = 0; i < n; ++i){ char labelpath[4096]; find_replace(paths[i], "images", "labels", labelpath); find_replace(labelpath, "JPEGImages", "labels", labelpath); find_replace(labelpath, ".BMP", ".txt", labelpath); find_replace(labelpath, ".JPEG", ".txt", labelpath); find_replace(labelpath, ".JPG", ".txt", labelpath); find_replace(labelpath, ".JPeG", ".txt", labelpath); find_replace(labelpath, ".Jpeg", ".txt", labelpath); find_replace(labelpath, ".PNG", ".txt", labelpath); find_replace(labelpath, ".TIF", ".txt", labelpath); find_replace(labelpath, ".bmp", ".txt", labelpath); find_replace(labelpath, ".jpeg", ".txt", labelpath); find_replace(labelpath, ".jpg", ".txt", labelpath); find_replace(labelpath, ".png", ".txt", labelpath); find_replace(labelpath, ".tif", ".txt", labelpath); FILE *file = fopen(labelpath, "r"); for(j = 0; j < k; ++j){ fscanf(file, "%f", &(y.vals[i][j])); } fclose(file); } return y; } matrix load_labels_paths(char **paths, int n, char **labels, int k, tree *hierarchy) { matrix y = make_matrix(n, k); int i; for(i = 0; i < n && labels; ++i){ fill_truth(paths[i], labels, k, y.vals[i]); if(hierarchy){ fill_hierarchy(y.vals[i], k, hierarchy); } } return y; } matrix load_tags_paths(char **paths, int n, int k) { matrix y = make_matrix(n, k); int i; //int count = 0; for(i = 0; i < n; ++i){ char label[4096]; find_replace(paths[i], "images", "labels", label); find_replace(label, ".jpg", ".txt", label); FILE *file = fopen(label, "r"); if (!file) continue; //++count; int tag; while(fscanf(file, "%d", &tag) == 1){ if(tag < k){ y.vals[i][tag] = 1; } } fclose(file); } //printf("%d/%d\n", count, n); return y; } char **get_labels(char *filename) { list *plist = get_paths(filename); char **labels = (char **)list_to_array(plist); free_list(plist); return labels; } void free_data(data d) { if(!d.shallow){ free_matrix(d.X); free_matrix(d.y); }else{ free(d.X.vals); free(d.y.vals); } } image get_segmentation_image(char *path, int w, int h, int classes) { char labelpath[4096]; find_replace(path, "images", "mask", labelpath); find_replace(labelpath, "JPEGImages", "mask", labelpath); find_replace(labelpath, ".jpg", ".txt", labelpath); find_replace(labelpath, ".JPG", ".txt", labelpath); find_replace(labelpath, ".JPEG", ".txt", labelpath); image mask = make_image(w, h, classes); FILE *file = fopen(labelpath, "r"); if(!file) file_error(labelpath); char buff[32788]; int id; image part = make_image(w, h, 1); while(fscanf(file, "%d %s", &id, buff) == 2){ int n = 0; int *rle = read_intlist(buff, &n, 0); load_rle(part, rle, n); or_image(part, mask, id); free(rle); } //exclusive_image(mask); fclose(file); free_image(part); return mask; } image get_segmentation_image2(char *path, int w, int h, int classes) { char labelpath[4096]; find_replace(path, "images", "mask", labelpath); find_replace(labelpath, "JPEGImages", "mask", labelpath); find_replace(labelpath, ".jpg", ".txt", labelpath); find_replace(labelpath, ".JPG", ".txt", labelpath); find_replace(labelpath, ".JPEG", ".txt", labelpath); image mask = make_image(w, h, classes+1); int i; for(i = 0; i < w*h; ++i){ mask.data[w*h*classes + i] = 1; } FILE *file = fopen(labelpath, "r"); if(!file) file_error(labelpath); char buff[32788]; int id; image part = make_image(w, h, 1); while(fscanf(file, "%d %s", &id, buff) == 2){ int n = 0; int *rle = read_intlist(buff, &n, 0); load_rle(part, rle, n); or_image(part, mask, id); for(i = 0; i < w*h; ++i){ if(part.data[i]) mask.data[w*h*classes + i] = 0; } free(rle); } //exclusive_image(mask); fclose(file); free_image(part); return mask; } data load_data_seg(int n, char **paths, int m, int w, int h, int classes, int min, int max, float angle, float aspect, float hue, float saturation, float exposure, int div) { char **random_paths = get_random_paths(paths, n, m); int i; data d = {0}; d.shallow = 0; d.X.rows = n; d.X.vals = (float**)calloc(d.X.rows, sizeof(float*)); d.X.cols = h*w*3; d.y.rows = n; d.y.cols = h*w*classes/div/div; d.y.vals = (float**)calloc(d.X.rows, sizeof(float*)); for(i = 0; i < n; ++i){ image orig = load_image_color(random_paths[i], 0, 0); augment_args a = random_augment_args(orig, angle, aspect, min, max, w, h); image sized = rotate_crop_image(orig, a.rad, a.scale, a.w, a.h, a.dx, a.dy, a.aspect); int flip = rand()%2; if(flip) flip_image(sized); random_distort_image(sized, hue, saturation, exposure); d.X.vals[i] = sized.data; image mask = get_segmentation_image(random_paths[i], orig.w, orig.h, classes); //image mask = make_image(orig.w, orig.h, classes+1); image sized_m = rotate_crop_image(mask, a.rad, a.scale/div, a.w/div, a.h/div, a.dx/div, a.dy/div, a.aspect); if(flip) flip_image(sized_m); d.y.vals[i] = sized_m.data; free_image(orig); free_image(mask); /* image rgb = mask_to_rgb(sized_m, classes); show_image(rgb, "part"); show_image(sized, "orig"); cvWaitKey(0); free_image(rgb); */ } free(random_paths); return d; } data load_data_iseg(int n, char **paths, int m, int w, int h, int classes, int boxes, int div, int min, int max, float angle, float aspect, float hue, float saturation, float exposure) { char **random_paths = get_random_paths(paths, n, m); int i; data d = {0}; d.shallow = 0; d.X.rows = n; d.X.vals = (float**)calloc(d.X.rows, sizeof(float*)); d.X.cols = h*w*3; d.y = make_matrix(n, (((w/div)*(h/div))+1)*boxes); for(i = 0; i < n; ++i){ image orig = load_image_color(random_paths[i], 0, 0); augment_args a = random_augment_args(orig, angle, aspect, min, max, w, h); image sized = rotate_crop_image(orig, a.rad, a.scale, a.w, a.h, a.dx, a.dy, a.aspect); int flip = rand()%2; if(flip) flip_image(sized); random_distort_image(sized, hue, saturation, exposure); d.X.vals[i] = sized.data; //show_image(sized, "image"); fill_truth_iseg(random_paths[i], boxes, d.y.vals[i], classes, orig.w, orig.h, a, flip, w/div, h/div); free_image(orig); /* image rgb = mask_to_rgb(sized_m, classes); show_image(rgb, "part"); show_image(sized, "orig"); cvWaitKey(0); free_image(rgb); */ } free(random_paths); return d; } data load_data_mask(int n, char **paths, int m, int w, int h, int classes, int boxes, int coords, int min, int max, float angle, float aspect, float hue, float saturation, float exposure) { char **random_paths = get_random_paths(paths, n, m); int i; data d = {0}; d.shallow = 0; d.X.rows = n; d.X.vals = (float**)calloc(d.X.rows, sizeof(float*)); d.X.cols = h*w*3; d.y = make_matrix(n, (coords+1)*boxes); for(i = 0; i < n; ++i){ image orig = load_image_color(random_paths[i], 0, 0); augment_args a = random_augment_args(orig, angle, aspect, min, max, w, h); image sized = rotate_crop_image(orig, a.rad, a.scale, a.w, a.h, a.dx, a.dy, a.aspect); int flip = rand()%2; if(flip) flip_image(sized); random_distort_image(sized, hue, saturation, exposure); d.X.vals[i] = sized.data; //show_image(sized, "image"); fill_truth_mask(random_paths[i], boxes, d.y.vals[i], classes, orig.w, orig.h, a, flip, 14, 14); free_image(orig); /* image rgb = mask_to_rgb(sized_m, classes); show_image(rgb, "part"); show_image(sized, "orig"); cvWaitKey(0); free_image(rgb); */ } free(random_paths); return d; } data load_data_region(int n, char **paths, int m, int w, int h, int size, int classes, float jitter, float hue, float saturation, float exposure) { char **random_paths = get_random_paths(paths, n, m); int i; data d = {0}; d.shallow = 0; d.X.rows = n; d.X.vals = (float**)calloc(d.X.rows, sizeof(float*)); d.X.cols = h*w*3; int k = size*size*(5+classes); d.y = make_matrix(n, k); for(i = 0; i < n; ++i){ image orig = load_image_color(random_paths[i], 0, 0); int oh = orig.h; int ow = orig.w; int dw = (ow*jitter); int dh = (oh*jitter); int pleft = rand_uniform(-dw, dw); int pright = rand_uniform(-dw, dw); int ptop = rand_uniform(-dh, dh); int pbot = rand_uniform(-dh, dh); int swidth = ow - pleft - pright; int sheight = oh - ptop - pbot; float sx = (float)swidth / ow; float sy = (float)sheight / oh; int flip = rand()%2; image cropped = crop_image(orig, pleft, ptop, swidth, sheight); float dx = ((float)pleft/ow)/sx; float dy = ((float)ptop /oh)/sy; image sized = resize_image(cropped, w, h); if(flip) flip_image(sized); random_distort_image(sized, hue, saturation, exposure); d.X.vals[i] = sized.data; fill_truth_region(random_paths[i], d.y.vals[i], classes, size, flip, dx, dy, 1./sx, 1./sy); free_image(orig); free_image(cropped); } free(random_paths); return d; } data load_data_compare(int n, char **paths, int m, int classes, int w, int h) { if(m) paths = get_random_paths(paths, 2*n, m); int i,j; data d = {0}; d.shallow = 0; d.X.rows = n; d.X.vals = (float**)calloc(d.X.rows, sizeof(float*)); d.X.cols = h*w*6; int k = 2*(classes); d.y = make_matrix(n, k); for(i = 0; i < n; ++i){ image im1 = load_image_color(paths[i*2], w, h); image im2 = load_image_color(paths[i*2+1], w, h); d.X.vals[i] = (float*)calloc(d.X.cols, sizeof(float)); memcpy(d.X.vals[i], im1.data, h*w*3*sizeof(float)); memcpy(d.X.vals[i] + h*w*3, im2.data, h*w*3*sizeof(float)); int id; float iou; char imlabel1[4096]; char imlabel2[4096]; find_replace(paths[i*2], "imgs", "labels", imlabel1); find_replace(imlabel1, "jpg", "txt", imlabel1); FILE *fp1 = fopen(imlabel1, "r"); while(fscanf(fp1, "%d %f", &id, &iou) == 2){ if (d.y.vals[i][2*id] < iou) d.y.vals[i][2*id] = iou; } find_replace(paths[i*2+1], "imgs", "labels", imlabel2); find_replace(imlabel2, "jpg", "txt", imlabel2); FILE *fp2 = fopen(imlabel2, "r"); while(fscanf(fp2, "%d %f", &id, &iou) == 2){ if (d.y.vals[i][2*id + 1] < iou) d.y.vals[i][2*id + 1] = iou; } for (j = 0; j < classes; ++j){ if (d.y.vals[i][2*j] > .5 && d.y.vals[i][2*j+1] < .5){ d.y.vals[i][2*j] = 1; d.y.vals[i][2*j+1] = 0; } else if (d.y.vals[i][2*j] < .5 && d.y.vals[i][2*j+1] > .5){ d.y.vals[i][2*j] = 0; d.y.vals[i][2*j+1] = 1; } else { d.y.vals[i][2*j] = SECRET_NUM; d.y.vals[i][2*j+1] = SECRET_NUM; } } fclose(fp1); fclose(fp2); free_image(im1); free_image(im2); } if(m) free(paths); return d; } data load_data_swag(char **paths, int n, int classes, float jitter) { int index = rand()%n; char *random_path = paths[index]; image orig = load_image_color(random_path, 0, 0); int h = orig.h; int w = orig.w; data d = {0}; d.shallow = 0; d.w = w; d.h = h; d.X.rows = 1; d.X.vals = (float**)calloc(d.X.rows, sizeof(float*)); d.X.cols = h*w*3; int k = (4+classes)*90; d.y = make_matrix(1, k); int dw = w*jitter; int dh = h*jitter; int pleft = rand_uniform(-dw, dw); int pright = rand_uniform(-dw, dw); int ptop = rand_uniform(-dh, dh); int pbot = rand_uniform(-dh, dh); int swidth = w - pleft - pright; int sheight = h - ptop - pbot; float sx = (float)swidth / w; float sy = (float)sheight / h; int flip = rand()%2; image cropped = crop_image(orig, pleft, ptop, swidth, sheight); float dx = ((float)pleft/w)/sx; float dy = ((float)ptop /h)/sy; image sized = resize_image(cropped, w, h); if(flip) flip_image(sized); d.X.vals[0] = sized.data; fill_truth_swag(random_path, d.y.vals[0], classes, flip, dx, dy, 1./sx, 1./sy); free_image(orig); free_image(cropped); return d; } data load_data_detection(int n, char **paths, int m, int w, int h, int boxes, int classes, float jitter, float hue, float saturation, float exposure) { char **random_paths = get_random_paths(paths, n, m); int i; data d = {0}; d.shallow = 0; d.X.rows = n; d.X.vals = (float**)calloc(d.X.rows, sizeof(float*)); d.X.cols = h*w*3; d.y = make_matrix(n, 5*boxes); for(i = 0; i < n; ++i){ image orig = load_image_color(random_paths[i], 0, 0); image sized = make_image(w, h, orig.c); fill_image(sized, .5); float dw = jitter * orig.w; float dh = jitter * orig.h; float new_ar = (orig.w + rand_uniform(-dw, dw)) / (orig.h + rand_uniform(-dh, dh)); //float scale = rand_uniform(.25, 2); float scale = 1; float nw, nh; if(new_ar < 1){ nh = scale * h; nw = nh * new_ar; } else { nw = scale * w; nh = nw / new_ar; } float dx = rand_uniform(0, w - nw); float dy = rand_uniform(0, h - nh); place_image(orig, nw, nh, dx, dy, sized); random_distort_image(sized, hue, saturation, exposure); int flip = rand()%2; if(flip) flip_image(sized); d.X.vals[i] = sized.data; fill_truth_detection(random_paths[i], boxes, d.y.vals[i], classes, flip, -dx/w, -dy/h, nw/w, nh/h); free_image(orig); } free(random_paths); return d; } void *load_thread(void *ptr) { //printf("Loading data: %d\n", rand()); load_args a = *(struct load_args*)ptr; if(a.exposure == 0) a.exposure = 1; if(a.saturation == 0) a.saturation = 1; if(a.aspect == 0) a.aspect = 1; if (a.type == OLD_CLASSIFICATION_DATA){ *a.d = load_data_old(a.paths, a.n, a.m, a.labels, a.classes, a.w, a.h); } else if (a.type == REGRESSION_DATA){ *a.d = load_data_regression(a.paths, a.n, a.m, a.classes, a.min, a.max, a.size, a.angle, a.aspect, a.hue, a.saturation, a.exposure); } else if (a.type == CLASSIFICATION_DATA){ *a.d = load_data_augment(a.paths, a.n, a.m, a.labels, a.classes, a.hierarchy, a.min, a.max, a.size, a.angle, a.aspect, a.hue, a.saturation, a.exposure, a.center); } else if (a.type == SUPER_DATA){ *a.d = load_data_super(a.paths, a.n, a.m, a.w, a.h, a.scale); } else if (a.type == WRITING_DATA){ *a.d = load_data_writing(a.paths, a.n, a.m, a.w, a.h, a.out_w, a.out_h); } else if (a.type == ISEG_DATA){ *a.d = load_data_iseg(a.n, a.paths, a.m, a.w, a.h, a.classes, a.num_boxes, a.scale, a.min, a.max, a.angle, a.aspect, a.hue, a.saturation, a.exposure); } else if (a.type == INSTANCE_DATA){ *a.d = load_data_mask(a.n, a.paths, a.m, a.w, a.h, a.classes, a.num_boxes, a.coords, a.min, a.max, a.angle, a.aspect, a.hue, a.saturation, a.exposure); } else if (a.type == SEGMENTATION_DATA){ *a.d = load_data_seg(a.n, a.paths, a.m, a.w, a.h, a.classes, a.min, a.max, a.angle, a.aspect, a.hue, a.saturation, a.exposure, a.scale); } else if (a.type == REGION_DATA){ *a.d = load_data_region(a.n, a.paths, a.m, a.w, a.h, a.num_boxes, a.classes, a.jitter, a.hue, a.saturation, a.exposure); } else if (a.type == DETECTION_DATA){ *a.d = load_data_detection(a.n, a.paths, a.m, a.w, a.h, a.num_boxes, a.classes, a.jitter, a.hue, a.saturation, a.exposure); } else if (a.type == SWAG_DATA){ *a.d = load_data_swag(a.paths, a.n, a.classes, a.jitter); } else if (a.type == COMPARE_DATA){ *a.d = load_data_compare(a.n, a.paths, a.m, a.classes, a.w, a.h); } else if (a.type == IMAGE_DATA){ *(a.im) = load_image_color(a.path, 0, 0); *(a.resized) = resize_image(*(a.im), a.w, a.h); } else if (a.type == LETTERBOX_DATA){ *(a.im) = load_image_color(a.path, 0, 0); *(a.resized) = letterbox_image(*(a.im), a.w, a.h); } else if (a.type == TAG_DATA){ *a.d = load_data_tag(a.paths, a.n, a.m, a.classes, a.min, a.max, a.size, a.angle, a.aspect, a.hue, a.saturation, a.exposure); } free(ptr); return 0; } pthread_t load_data_in_thread(load_args args) { pthread_t thread; struct load_args *ptr = (load_args*)calloc(1, sizeof(struct load_args)); *ptr = args; if(pthread_create(&thread, 0, load_thread, ptr)) error("Thread creation failed"); return thread; } void *load_threads(void *ptr) { int i; load_args args = *(load_args *)ptr; if (args.threads == 0) args.threads = 1; data *out = args.d; int total = args.n; free(ptr); data *buffers = (data*)calloc(args.threads, sizeof(data)); pthread_t *threads = (pthread_t*)calloc(args.threads, sizeof(pthread_t)); for(i = 0; i < args.threads; ++i){ args.d = buffers + i; args.n = (i+1) * total/args.threads - i * total/args.threads; threads[i] = load_data_in_thread(args); } for(i = 0; i < args.threads; ++i){ pthread_join(threads[i], 0); } *out = concat_datas(buffers, args.threads); out->shallow = 0; for(i = 0; i < args.threads; ++i){ buffers[i].shallow = 1; free_data(buffers[i]); } free(buffers); free(threads); return 0; } void load_data_blocking(load_args args) { struct load_args *ptr = (load_args*)calloc(1, sizeof(struct load_args)); *ptr = args; load_thread(ptr); } pthread_t load_data(load_args args) { pthread_t thread; struct load_args *ptr = (load_args*)calloc(1, sizeof(struct load_args)); *ptr = args; if(pthread_create(&thread, 0, load_threads, ptr)) error("Thread creation failed"); return thread; } data load_data_writing(char **paths, int n, int m, int w, int h, int out_w, int out_h) { if(m) paths = get_random_paths(paths, n, m); char **replace_paths = find_replace_paths(paths, n, ".png", "-label.png"); data d = {0}; d.shallow = 0; d.X = load_image_paths(paths, n, w, h); d.y = load_image_paths_gray(replace_paths, n, out_w, out_h); if(m) free(paths); int i; for(i = 0; i < n; ++i) free(replace_paths[i]); free(replace_paths); return d; } data load_data_old(char **paths, int n, int m, char **labels, int k, int w, int h) { if(m) paths = get_random_paths(paths, n, m); data d = {0}; d.shallow = 0; d.X = load_image_paths(paths, n, w, h); d.y = load_labels_paths(paths, n, labels, k, 0); if(m) free(paths); return d; } /* data load_data_study(char **paths, int n, int m, char **labels, int k, int min, int max, int size, float angle, float aspect, float hue, float saturation, float exposure) { data d = {0}; d.indexes = calloc(n, sizeof(int)); if(m) paths = get_random_paths_indexes(paths, n, m, d.indexes); d.shallow = 0; d.X = load_image_augment_paths(paths, n, min, max, size, angle, aspect, hue, saturation, exposure); d.y = load_labels_paths(paths, n, labels, k); if(m) free(paths); return d; } */ data load_data_super(char **paths, int n, int m, int w, int h, int scale) { if(m) paths = get_random_paths(paths, n, m); data d = {0}; d.shallow = 0; int i; d.X.rows = n; d.X.vals = (float**)calloc(n, sizeof(float*)); d.X.cols = w*h*3; d.y.rows = n; d.y.vals = (float**)calloc(n, sizeof(float*)); d.y.cols = w*scale * h*scale * 3; for(i = 0; i < n; ++i){ image im = load_image_color(paths[i], 0, 0); image crop = random_crop_image(im, w*scale, h*scale); int flip = rand()%2; if (flip) flip_image(crop); image resize = resize_image(crop, w, h); d.X.vals[i] = resize.data; d.y.vals[i] = crop.data; free_image(im); } if(m) free(paths); return d; } data load_data_regression(char **paths, int n, int m, int k, int min, int max, int size, float angle, float aspect, float hue, float saturation, float exposure) { if(m) paths = get_random_paths(paths, n, m); data d = {0}; d.shallow = 0; d.X = load_image_augment_paths(paths, n, min, max, size, angle, aspect, hue, saturation, exposure, 0); d.y = load_regression_labels_paths(paths, n, k); if(m) free(paths); return d; } data select_data(data *orig, int *inds) { data d = {0}; d.shallow = 1; d.w = orig[0].w; d.h = orig[0].h; d.X.rows = orig[0].X.rows; d.y.rows = orig[0].X.rows; d.X.cols = orig[0].X.cols; d.y.cols = orig[0].y.cols; d.X.vals = (float**)calloc(orig[0].X.rows, sizeof(float *)); d.y.vals = (float**)calloc(orig[0].y.rows, sizeof(float *)); int i; for(i = 0; i < d.X.rows; ++i){ d.X.vals[i] = orig[inds[i]].X.vals[i]; d.y.vals[i] = orig[inds[i]].y.vals[i]; } return d; } data *tile_data(data orig, int divs, int size) { data *ds = (data*)calloc(divs*divs, sizeof(data)); int i, j; #pragma omp parallel for for(i = 0; i < divs*divs; ++i){ data d; d.shallow = 0; d.w = orig.w/divs * size; d.h = orig.h/divs * size; d.X.rows = orig.X.rows; d.X.cols = d.w*d.h*3; d.X.vals = (float**)calloc(d.X.rows, sizeof(float*)); d.y = copy_matrix(orig.y); #pragma omp parallel for for(j = 0; j < orig.X.rows; ++j){ int x = (i%divs) * orig.w / divs - (d.w - orig.w/divs)/2; int y = (i/divs) * orig.h / divs - (d.h - orig.h/divs)/2; image im = float_to_image(orig.w, orig.h, 3, orig.X.vals[j]); d.X.vals[j] = crop_image(im, x, y, d.w, d.h).data; } ds[i] = d; } return ds; } data resize_data(data orig, int w, int h) { data d = {0}; d.shallow = 0; d.w = w; d.h = h; int i; d.X.rows = orig.X.rows; d.X.cols = w*h*3; d.X.vals = (float**)calloc(d.X.rows, sizeof(float*)); d.y = copy_matrix(orig.y); #pragma omp parallel for for(i = 0; i < orig.X.rows; ++i){ image im = float_to_image(orig.w, orig.h, 3, orig.X.vals[i]); d.X.vals[i] = resize_image(im, w, h).data; } return d; } data load_data_augment(char **paths, int n, int m, char **labels, int k, tree *hierarchy, int min, int max, int size, float angle, float aspect, float hue, float saturation, float exposure, int center) { if(m) paths = get_random_paths(paths, n, m); data d = {0}; d.shallow = 0; d.w=size; d.h=size; d.X = load_image_augment_paths(paths, n, min, max, size, angle, aspect, hue, saturation, exposure, center); d.y = load_labels_paths(paths, n, labels, k, hierarchy); if(m) free(paths); return d; } data load_data_tag(char **paths, int n, int m, int k, int min, int max, int size, float angle, float aspect, float hue, float saturation, float exposure) { if(m) paths = get_random_paths(paths, n, m); data d = {0}; d.w = size; d.h = size; d.shallow = 0; d.X = load_image_augment_paths(paths, n, min, max, size, angle, aspect, hue, saturation, exposure, 0); d.y = load_tags_paths(paths, n, k); if(m) free(paths); return d; } matrix concat_matrix(matrix m1, matrix m2) { int i, count = 0; matrix m; m.cols = m1.cols; m.rows = m1.rows+m2.rows; m.vals = (float**)calloc(m1.rows + m2.rows, sizeof(float*)); for(i = 0; i < m1.rows; ++i){ m.vals[count++] = m1.vals[i]; } for(i = 0; i < m2.rows; ++i){ m.vals[count++] = m2.vals[i]; } return m; } data concat_data(data d1, data d2) { data d = {0}; d.shallow = 1; d.X = concat_matrix(d1.X, d2.X); d.y = concat_matrix(d1.y, d2.y); d.w = d1.w; d.h = d1.h; return d; } data concat_datas(data *d, int n) { int i; data out = {0}; for(i = 0; i < n; ++i){ data new = concat_data(d[i], out); free_data(out); out = new; } return out; } data load_categorical_data_csv(char *filename, int target, int k) { data d = {0}; d.shallow = 0; matrix X = csv_to_matrix(filename); float *truth_1d = pop_column(&X, target); float **truth = one_hot_encode(truth_1d, X.rows, k); matrix y; y.rows = X.rows; y.cols = k; y.vals = truth; d.X = X; d.y = y; free(truth_1d); return d; } data load_cifar10_data(char *filename) { data d = {0}; d.shallow = 0; long i,j; matrix X = make_matrix(10000, 3072); matrix y = make_matrix(10000, 10); d.X = X; d.y = y; FILE *fp = fopen(filename, "rb"); if(!fp) file_error(filename); for(i = 0; i < 10000; ++i){ unsigned char bytes[3073]; fread(bytes, 1, 3073, fp); int class = bytes[0]; y.vals[i][class] = 1; for(j = 0; j < X.cols; ++j){ X.vals[i][j] = (double)bytes[j+1]; } } scale_data_rows(d, 1./255); //normalize_data_rows(d); fclose(fp); return d; } void get_random_batch(data d, int n, float *X, float *y) { int j; for(j = 0; j < n; ++j){ int index = rand()%d.X.rows; memcpy(X+j*d.X.cols, d.X.vals[index], d.X.cols*sizeof(float)); memcpy(y+j*d.y.cols, d.y.vals[index], d.y.cols*sizeof(float)); } } void get_next_batch(data d, int n, int offset, float *X, float *y) { int j; for(j = 0; j < n; ++j){ int index = offset + j; memcpy(X+j*d.X.cols, d.X.vals[index], d.X.cols*sizeof(float)); if(y) memcpy(y+j*d.y.cols, d.y.vals[index], d.y.cols*sizeof(float)); } } void smooth_data(data d) { int i, j; float scale = 1. / d.y.cols; float eps = .1; for(i = 0; i < d.y.rows; ++i){ for(j = 0; j < d.y.cols; ++j){ d.y.vals[i][j] = eps * scale + (1-eps) * d.y.vals[i][j]; } } } data load_all_cifar10() { data d = {0}; d.shallow = 0; int i,j,b; matrix X = make_matrix(50000, 3072); matrix y = make_matrix(50000, 10); d.X = X; d.y = y; for(b = 0; b < 5; ++b){ char buff[256]; sprintf(buff, "data/cifar/cifar-10-batches-bin/data_batch_%d.bin", b+1); FILE *fp = fopen(buff, "rb"); if(!fp) file_error(buff); for(i = 0; i < 10000; ++i){ unsigned char bytes[3073]; fread(bytes, 1, 3073, fp); int class = bytes[0]; y.vals[i+b*10000][class] = 1; for(j = 0; j < X.cols; ++j){ X.vals[i+b*10000][j] = (double)bytes[j+1]; } } fclose(fp); } //normalize_data_rows(d); scale_data_rows(d, 1./255); smooth_data(d); return d; } data load_go(char *filename) { FILE *fp = fopen(filename, "rb"); matrix X = make_matrix(3363059, 361); matrix y = make_matrix(3363059, 361); int row, col; if(!fp) file_error(filename); char *label; int count = 0; while((label = fgetl(fp))){ int i; if(count == X.rows){ X = resize_matrix(X, count*2); y = resize_matrix(y, count*2); } sscanf(label, "%d %d", &row, &col); char *board = fgetl(fp); int index = row*19 + col; y.vals[count][index] = 1; for(i = 0; i < 19*19; ++i){ float val = 0; if(board[i] == '1') val = 1; else if(board[i] == '2') val = -1; X.vals[count][i] = val; } ++count; free(label); free(board); } X = resize_matrix(X, count); y = resize_matrix(y, count); data d = {0}; d.shallow = 0; d.X = X; d.y = y; fclose(fp); return d; } void randomize_data(data d) { int i; for(i = d.X.rows-1; i > 0; --i){ int index = rand()%i; float *swap = d.X.vals[index]; d.X.vals[index] = d.X.vals[i]; d.X.vals[i] = swap; swap = d.y.vals[index]; d.y.vals[index] = d.y.vals[i]; d.y.vals[i] = swap; } } void scale_data_rows(data d, float s) { int i; for(i = 0; i < d.X.rows; ++i){ scale_array(d.X.vals[i], d.X.cols, s); } } void translate_data_rows(data d, float s) { int i; for(i = 0; i < d.X.rows; ++i){ translate_array(d.X.vals[i], d.X.cols, s); } } data copy_data(data d) { data c = {0}; c.w = d.w; c.h = d.h; c.shallow = 0; c.num_boxes = d.num_boxes; c.boxes = d.boxes; c.X = copy_matrix(d.X); c.y = copy_matrix(d.y); return c; } void normalize_data_rows(data d) { int i; for(i = 0; i < d.X.rows; ++i){ normalize_array(d.X.vals[i], d.X.cols); } } data get_data_part(data d, int part, int total) { data p = {0}; p.shallow = 1; p.X.rows = d.X.rows * (part + 1) / total - d.X.rows * part / total; p.y.rows = d.y.rows * (part + 1) / total - d.y.rows * part / total; p.X.cols = d.X.cols; p.y.cols = d.y.cols; p.X.vals = d.X.vals + d.X.rows * part / total; p.y.vals = d.y.vals + d.y.rows * part / total; return p; } data get_random_data(data d, int num) { data r = {0}; r.shallow = 1; r.X.rows = num; r.y.rows = num; r.X.cols = d.X.cols; r.y.cols = d.y.cols; r.X.vals = (float**)calloc(num, sizeof(float *)); r.y.vals = (float**)calloc(num, sizeof(float *)); int i; for(i = 0; i < num; ++i){ int index = rand()%d.X.rows; r.X.vals[i] = d.X.vals[index]; r.y.vals[i] = d.y.vals[index]; } return r; } data *split_data(data d, int part, int total) { data *split = (data*)calloc(2, sizeof(data)); int i; int start = part*d.X.rows/total; int end = (part+1)*d.X.rows/total; data train; data test; train.shallow = test.shallow = 1; test.X.rows = test.y.rows = end-start; train.X.rows = train.y.rows = d.X.rows - (end-start); train.X.cols = test.X.cols = d.X.cols; train.y.cols = test.y.cols = d.y.cols; train.X.vals = (float**)calloc(train.X.rows, sizeof(float*)); test.X.vals = (float**)calloc(test.X.rows, sizeof(float*)); train.y.vals = (float**)calloc(train.y.rows, sizeof(float*)); test.y.vals = (float**)calloc(test.y.rows, sizeof(float*)); for(i = 0; i < start; ++i){ train.X.vals[i] = d.X.vals[i]; train.y.vals[i] = d.y.vals[i]; } for(i = start; i < end; ++i){ test.X.vals[i-start] = d.X.vals[i]; test.y.vals[i-start] = d.y.vals[i]; } for(i = end; i < d.X.rows; ++i){ train.X.vals[i-(end-start)] = d.X.vals[i]; train.y.vals[i-(end-start)] = d.y.vals[i]; } split[0] = train; split[1] = test; return split; } #undef class #undef new
sharing-3.c
/* { dg-do compile } */ #define N 50 #define CHUNKSIZE 5 main () { int i, chunk; float c[N]; chunk = CHUNKSIZE; #pragma omp parallel for shared (c, chunk) schedule (dynamic, chunk) for (i = 0; i < N; i++) c[i] = i; return 0; }
GB_queue_insert.c
//------------------------------------------------------------------------------ // GB_queue_insert: insert a matrix at the head of the matrix queue //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2018, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // check if the matrix has pending computations (either pending tuples or // zombies, or both). If it has any, and if it is not already in the queue, // then insert it into the queue. #include "GB.h" void GB_queue_insert // insert matrix at the head of queue ( GrB_Matrix A // matrix to insert ) { //-------------------------------------------------------------------------- // check inputs //-------------------------------------------------------------------------- ASSERT (A != NULL) ; //-------------------------------------------------------------------------- // insert the matrix at the head of the queue //-------------------------------------------------------------------------- if ((A->npending > 0 || A->nzombies > 0) && !(A->enqueued)) { // A is not in the queue yet, but needs to be there #pragma omp critical (GB_queue) { // check again to be safe, then add A to the head of the queue if ((A->npending > 0 || A->nzombies > 0) && !(A->enqueued)) { // add the matrix to the head of the queue GrB_Matrix Head = (GrB_Matrix) (GB_Global.queue_head) ; A->queue_next = Head ; A->queue_prev = NULL ; A->enqueued = true ; if (Head != NULL) { Head->queue_prev = A ; } GB_Global.queue_head = A ; } } } }
3d25pt.c
/* * Order-2, 3D 25 point stencil * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) #ifndef min #define min(x,y) ((x) < (y)? (x) : (y)) #endif /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+8; Ny = atoi(argv[2])+8; Nz = atoi(argv[3])+8; } if (argc > 4) Nt = atoi(argv[4]); double ****A = (double ****) malloc(sizeof(double***)*2); double ***roc2 = (double ***) malloc(sizeof(double**)); A[0] = (double ***) malloc(sizeof(double**)*Nz); A[1] = (double ***) malloc(sizeof(double**)*Nz); roc2 = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[0][i] = (double**) malloc(sizeof(double*)*Ny); A[1][i] = (double**) malloc(sizeof(double*)*Ny); roc2[i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[0][i][j] = (double*) malloc(sizeof(double)*Nx); A[1][i][j] = (double*) malloc(sizeof(double)*Nx); roc2[i][j] = (double*) malloc(sizeof(double)*Nx); } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 4; tile_size[1] = 4; tile_size[2] = 24; tile_size[3] = 32; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); roc2[i][j][k] = 2.0 * (rand() % BASE); } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif const double coef0 = -0.28472; const double coef1 = 0.16000; const double coef2 = -0.02000; const double coef3 = 0.00254; const double coef4 = -0.00018; for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 #pragma scop for (t = 0; t < Nt; t++) { for (i = 4; i < Nz-4; i++) { for (j = 4; j < Ny-4; j++) { for (k = 4; k < Nx-4; k++) { A[(t+1)%2][i][j][k] = 2.0*A[t%2][i][j][k] - A[(t+1)%2][i][j][k] + roc2[i][j][k]*( coef0* A[t%2][i ][j ][k ] + coef1*(A[t%2][i-1][j ][k ] + A[t%2][i+1][j ][k ] + A[t%2][i ][j-1][k ] + A[t%2][i ][j+1][k ] + A[t%2][i ][j ][k-1] + A[t%2][i ][j ][k+1]) + coef2*(A[t%2][i-2][j ][k ] + A[t%2][i+2][j ][k ] + A[t%2][i ][j-2][k ] + A[t%2][i ][j+2][k ] + A[t%2][i ][j ][k-2] + A[t%2][i ][j ][k+2]) + coef3*(A[t%2][i-3][j ][k ] + A[t%2][i+3][j ][k ] + A[t%2][i ][j-3][k ] + A[t%2][i ][j+3][k ] + A[t%2][i ][j ][k-3] + A[t%2][i ][j ][k+3]) + coef4*(A[t%2][i-4][j ][k ] + A[t%2][i+4][j ][k ] + A[t%2][i ][j-4][k ] + A[t%2][i ][j+4][k ] + A[t%2][i ][j ][k-4] + A[t%2][i ][j ][k+4]) ); } } } } #pragma endscop gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = MIN(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(4, "constant") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); free(roc2[i][j]); } free(A[0][i]); free(A[1][i]); free(roc2[i]); } free(A[0]); free(A[1]); free(roc2); return 0; }
core_zhegst.c
/** * * @file * * PLASMA is a software package provided by: * University of Tennessee, US, * University of Manchester, UK. * * @precisions normal z -> c d s * **/ #include "core_blas.h" #include "plasma_types.h" #include "core_lapack.h" /***************************************************************************//** * * @ingroup core_hegst * * Reduces a complex Hermitian-definite generalized eigenproblem to standard * form. * * If ITYPE = 1, the problem is A*x = lambda*B*x, * and A is overwritten by inv(U^H)*A*inv(U) or inv(L)*A*inv(L^H) * * If ITYPE = 2 or 3, the problem is A*B*x = lambda*x or * B*A*x = lambda*x, and A is overwritten by U*A*U^H or L^H*A*L. * ******************************************************************************* * * @param[in] itype * = 1: compute inv(U^H)*A*inv(U) or inv(L)*A*inv(L^H); * = 2 or 3: compute U*A*U^H or L^H*A*L. * * @param[in] uplo * If PlasmaUpper, upper triangle of A is stored and B is factored as * U^H*U; * If PlasmaLower, lower triangle of A is stored and B is factored as * L*L^H. * * @param[in] n * The order of the matrices A and B. N >= 0. * * @param[in,out] A * On entry, the Hermitian matrix A. If UPLO = 'U', the leading * N-by-N upper triangular part of A contains the upper * triangular part of the matrix A, and the strictly lower * triangular part of A is not referenced. If UPLO = 'L', the * leading N-by-N lower triangular part of A contains the lower * triangular part of the matrix A, and the strictly upper * triangular part of A is not referenced. * * On exit, if INFO = 0, the transformed matrix, stored in the * same format as A. * * @param[in] lda * The leading dimension of the array A. LDA >= max(1,N). * * @param[in,out] B * The triangular factor from the Cholesky factorization of B, * as returned by ZPOTRF. * * @param[in] ldb * The leading dimension of the array B. LDB >= max(1,N). * ******************************************************************************/ int core_zhegst(int itype, plasma_enum_t uplo, int n, plasma_complex64_t *A, int lda, plasma_complex64_t *B, int ldb) { int info = LAPACKE_zhegst_work( LAPACK_COL_MAJOR, itype, lapack_const(uplo), n, A, lda, B, ldb ); return info; } /******************************************************************************/ void core_omp_zhegst(int itype, plasma_enum_t uplo, int n, plasma_complex64_t *A, int lda, plasma_complex64_t *B, int ldb, plasma_sequence_t *sequence, plasma_request_t *request) { #pragma omp task depend(inout:A[0:lda*n]) \ depend(in:B[0:ldb*n]) { if (sequence->status == PlasmaSuccess) core_zhegst(itype, uplo, n, A, lda, B, ldb); } }
Fig_4.3_parReg1.c
#include <stdio.h> #include <omp.h> int main() { omp_set_num_threads(4); int size_of_team; #pragma omp parallel { int ID = omp_get_thread_num(); int NThrds = omp_get_num_threads(); if (ID == 0) size_of_team = NThrds; } // end of parallel region printf("We just did the join on a team of size \%d", size_of_team); }
stepwise_tangent.h
#ifndef __MIXTURE_MODEL_OPT_STEPWISE_TANGENT_H #define __MIXTURE_MODEL_OPT_STEPWISE_TANGENT_H #include <vector> #include <functional> #include <numeric> #include <cassert> #include <atomic> #include <algorithm> #include <iomanip> #include <iostream> #include <fstream> #include <string> #include <boost/thread.hpp> #include "../kdtree-eigen/kdtree_eigen.h" #include "../distribution.h" #include "../mixture_model.h" #include "../samples.h" #include "../sphere_volume.h" #include "util.h" // #include "../gsl/gsl" #define TANGENT_DEBUG 0 #define SPLIT_AND_MERGE 0 namespace jmm { template< typename Scalar, int t_meanDims, int t_covDims > struct SDMMParams { using Vectord = Eigen::Matrix<Scalar, t_meanDims, 1>; using Matrixd = Eigen::Matrix<Scalar, t_covDims, t_covDims>; EIGEN_MAKE_ALIGNED_OPERATOR_NEW SDMMParams(int size) : size(size) { heuristicWeight = Scalar(0.f); weights.resize(size, Scalar(0.f)); means.resize(size, Vectord::Zero()); covs.resize(size, Matrixd::Zero()); densitySum.resize(size, Scalar(0.f)); logDensitySum.resize(size, Scalar(0.f)); logPdfSum.resize(size, Scalar(0.f)); } SDMMParams(SDMMParams&& other) = default; SDMMParams(const SDMMParams& other) = default; SDMMParams& operator=(SDMMParams&& other) = default; SDMMParams& operator=(const SDMMParams& other) = default; void setZero() { heuristicWeight = Scalar(0.f); std::fill(weights.begin(), weights.end(), Scalar(0.f)); std::fill(means.begin(), means.end(), Vectord::Zero()); std::fill(covs.begin(), covs.end(), Matrixd::Zero()); std::fill(densitySum.begin(), densitySum.end(), Scalar(0.f)); std::fill(logPdfSum.begin(), logPdfSum.end(), Scalar(0.f)); std::fill(logDensitySum.begin(), logDensitySum.end(), Scalar(0.f)); } void normalize(SDMMParams& out, Scalar invWeight) { auto normalize = [&](const auto& value) { return value * invWeight; }; out.heuristicWeight = normalize(heuristicWeight); std::transform(weights.begin(), weights.end(), out.weights.begin(), normalize); std::transform(means.begin(), means.end(), out.means.begin(), normalize); std::transform(covs.begin(), covs.end(), out.covs.begin(), normalize); } SDMMParams& operator*=(Scalar multiplier) { auto multiply = [multiplier](auto& range) { std::transform( range.begin(), range.end(), range.begin(), [multiplier](const auto& value) { return value * multiplier; } ); }; heuristicWeight *= multiplier; multiply(weights); multiply(means); multiply(covs); return *this; } void sumProductInto(SDMMParams& out, Scalar multiplier) { auto sumProduct = [&](const auto& value, const auto& outValue) { return multiplier * value + outValue; }; auto sumProductIntoRange = [&](const auto& range, auto& outRange) { std::transform( range.begin(), range.end(), outRange.begin(), outRange.begin(), sumProduct ); }; out.heuristicWeight = sumProduct(heuristicWeight, out.heuristicWeight); sumProductIntoRange(weights, out.weights); sumProductIntoRange(means, out.means); sumProductIntoRange(covs, out.covs); } void sumErrorStatsInto(SDMMParams& out) { auto sum = [&](const auto& value, const auto& outValue) { return value + outValue; }; auto sumIntoRange = [&](const auto& range, auto& outRange) { std::transform( range.begin(), range.end(), outRange.begin(), outRange.begin(), sum ); }; sumIntoRange(densitySum, out.densitySum); sumIntoRange(logDensitySum, out.logDensitySum); sumIntoRange(logPdfSum, out.logPdfSum); } void calculateError(jmm::aligned_vector<Scalar>& error, Scalar nSamples) { if(error.size() != size) { error.resize(size, 0.f); } for(int component_i = 0; component_i < densitySum.size(); ++component_i) { // error[component_i] = ( // logDensitySum[component_i] - // std::log(densitySum[component_i]) * densitySum[component_i] - // logPdfSum[component_i] // ) / densitySum[component_i]; // error[component_i] /= nSamples; error[component_i] = densitySum[component_i] / nSamples; } } int size; Scalar heuristicWeight; jmm::aligned_vector<Scalar> weights; jmm::aligned_vector<Vectord> means; jmm::aligned_vector<Matrixd> covs; jmm::aligned_vector<Scalar> densitySum; jmm::aligned_vector<Scalar> logDensitySum; jmm::aligned_vector<Scalar> logPdfSum; }; template< int t_dims, int t_components, int t_conditionalDims, typename Scalar, template<int, int, typename> class Component_t, template<int, int, typename> class Marginal_t > class StepwiseTangentEM { protected: using MM = MixtureModel<t_dims, t_components, t_conditionalDims, Scalar, Component_t, Marginal_t>; using Vectord = typename MM::Vectord; using Component = typename MM::Component; using JointTangentVectord = typename Component::JointTangentVectord; using Matrixd = typename MM::Matrixd; using TangentSpaced = TangentSpace<t_dims, t_conditionalDims, Scalar>; constexpr static int t_statDims = Component::t_jointTangentDims; int iterationsRun; jmm::aligned_vector<int> iterationsRunForMixture; jmm::aligned_vector<bool> startedTraining; Scalar heuristicTotalWeight; jmm::aligned_vector<Scalar> totalWeightForMixture; SDMMParams<Scalar, t_statDims, t_statDims> statsGlobal; SDMMParams<Scalar, t_statDims, t_statDims> statsGlobalNormalized; SDMMParams<Scalar, t_statDims, t_statDims> newParams; jmm::aligned_vector<TangentSpaced> tangentSpacesNew; jmm::aligned_vector<Scalar> samplesPerComponentGlobal; jmm::aligned_vector<Scalar> samplesPerComponentInIterationGlobal; Scalar sampleCountGlobal; jmm::aligned_vector<Matrixd> bPriors; jmm::aligned_vector<Eigen::Matrix<Scalar, 3, 3>> bDepthPriors; Scalar alpha; Scalar bPrior; Scalar niPriorMinusOne; Scalar epsilon; bool decreasePrior; Scalar trainingBatch = 0; bool jacobianCorrection = false; int trainingCutoff = 32; Scalar minBPrior = 0; // 1e-8f; Scalar minNiPriorMinusOne = 0; // 1e-6; public: // StepwiseTangentEM() : StepwiseTangentEM(0.9, 0.5, 6e-5, 1e-100, true) { } // alpha(0.9), // bPrior(0.5), // niPriorMinusOne(6e-5), // epsilon(1e-100), // decreasePrior(true) {} StepwiseTangentEM( Scalar alpha=0.9, Eigen::Matrix<Scalar, 5, 1> bPrior=Eigen::Matrix<Scalar, 5, 1>::Constant(1e-5), Scalar niPriorMinusOne=6e-5, Scalar epsilon=1e-100, bool decreasePrior=true ) : statsGlobal(t_components), statsGlobalNormalized(t_components), newParams(t_components), alpha(alpha), // bPrior(bPrior), niPriorMinusOne(niPriorMinusOne), epsilon(epsilon), decreasePrior(decreasePrior) { iterationsRun = 0; iterationsRunForMixture.resize(t_components, 0); heuristicTotalWeight = 0.f; totalWeightForMixture.resize(t_components, 0.f); startedTraining.resize(t_components, false); samplesPerComponentGlobal.resize(t_components, 0); samplesPerComponentInIterationGlobal.resize(t_components); sampleCountGlobal = 0; Matrixd bPriorMatrix = bPrior.asDiagonal(); bPriors.resize(t_components, bPriorMatrix); bDepthPriors.resize(t_components, Eigen::Matrix<Scalar, 3, 3>::Identity() * epsilon); tangentSpacesNew.resize(t_components); }; void setJacobianCorrection(bool on) { jacobianCorrection = on; } jmm::aligned_vector<Matrixd>& getBPriors() { return bPriors; } auto& getStatsGlobal() { return statsGlobal; } jmm::aligned_vector<Eigen::Matrix<Scalar, 3, 3>>& getBDepthPriors() { return bDepthPriors; } void calculateStats( MM& distribution, Samples<t_dims, Scalar>& samples, bool countSamples, SDMMParams<Scalar, t_statDims, t_statDims>& stats, jmm::aligned_vector<Scalar>& samplesPerComponent, Scalar weightSum ) { Eigen::Matrix<Scalar, Eigen::Dynamic, 1> posterior(distribution.nComponents(), 1); Eigen::Matrix<Scalar, Eigen::Dynamic, 1> pdf(distribution.nComponents(), 1); Eigen::Matrix< Scalar, Component::t_jointTangentDims, Eigen::Dynamic > tangentVectors(Component::t_jointTangentDims, distribution.nComponents()); Scalar weightNormalization = (Scalar) samples.size() / weightSum; #pragma omp for for(int sample_i = 0; sample_i < samples.size(); ++sample_i) { if(!isValidSample(samples, sample_i)) { continue; } if(samples.weights(sample_i) == 0) { continue; } Vectord sample = samples.samples.col(sample_i); Scalar heuristicPosterior = 0; bool useHeuristic = samples.isDiffuse(sample_i); distribution.posteriorAndLog( sample, useHeuristic, samples.heuristicPdfs(sample_i), pdf, posterior, tangentVectors, heuristicPosterior ); stats.heuristicWeight += samples.weights(sample_i) * heuristicPosterior; const auto& components = distribution.components(); for(int component_i = 0; component_i < distribution.nComponents(); ++component_i) { if(posterior(component_i) < 1e-10) { // TODO: still calculate marginals and normalization continue; } // Scalar weightAugmented = std::sqrt(samples.weights(sample_i)); Scalar weight = samples.weights(sample_i) * posterior(component_i); #if TANGENT_DEBUG == 1 if(weight == 0.f) { std::cerr << "Zero weight * posterior: " << samples.weights(sample_i) << " * " << posterior(component_i) << " from component " << component_i << ", with weight: " << distribution.weights()[component_i] << ", and covariance determinant: " << components[component_i].cov().determinant() << "\n"; continue; } #endif // TANGENT_DEBUG == 1 stats.weights[component_i] += weight; stats.means[component_i] += weight * tangentVectors.col(component_i); stats.covs[component_i] += weight * tangentVectors.col(component_i) * tangentVectors.col(component_i).transpose(); #if SPLIT_AND_MERGE == 1 if(weight > 0 && pdf(component_i) > 0) { Scalar samplingPdf = samples.samplingPdfs(sample_i); Scalar Li = samples.weights(sample_i) * samplingPdf; Scalar LiNormalized = Li * weightNormalization; stats.densitySum[component_i] += LiNormalized * LiNormalized * posterior(component_i) / (pdf(component_i) * samplingPdf) - 1; // stats.densitySum[component_i] += std::abs(weight * weightNormalization - pdf(component_i)); // stats.logDensitySum[component_i] += std::log(weight * weightNormalization); // stats.logPdfSum[component_i] += weight * weightNormalization * std::log(pdf(component_i)); } #endif // SPLIT_AND_MERGE == 1 } } } void calculateStatsPrune( MM& distribution, Samples<t_dims, Scalar>& samples, bool countSamples, SDMMParams<Scalar, t_statDims, t_statDims>& stats, jmm::aligned_vector<Scalar>& samplesPerComponent ) { Eigen::Matrix<Scalar, Eigen::Dynamic, 1> posterior(distribution.nComponents(), 1); Eigen::Matrix< Scalar, Component::t_jointTangentDims, Eigen::Dynamic > tangentVectors(Component::t_jointTangentDims, distribution.nComponents()); Eigen::Matrix<int, Eigen::Dynamic, 1> posteriorIndices(distribution.nComponents(), 1); int posteriorLastIdx; #pragma omp for for(int sample_i = 0; sample_i < samples.size(); ++sample_i) { if(!isValidSample(samples, sample_i)) { continue; } if(samples.weights(sample_i) == 0) { continue; } Vectord sample = samples.samples.col(sample_i); Scalar heuristicPosterior = 0; bool useHeuristic = samples.isDiffuse(sample_i); // distribution.posterior( // sample, // useHeuristic, // samples.heuristicPdfs(sample_i), // posterior, // heuristicPosterior // ); distribution.posteriorPruneAndLog( sample, useHeuristic, samples.heuristicPdfs(sample_i), posterior, tangentVectors, posteriorIndices, posteriorLastIdx, heuristicPosterior ); stats.heuristicWeight += samples.weights(sample_i) * heuristicPosterior; const auto& components = distribution.components(); for(int found_i = 0; found_i < posteriorLastIdx; ++found_i) { Scalar weight = samples.weights(sample_i) * posterior(found_i); int component_i = posteriorIndices(found_i); #if TANGENT_DEBUG == 1 if(weight == 0.f) { std::cerr << "Zero weight * posterior: " << samples.weights(sample_i) << " * " << posterior(component_i) << " from component " << component_i << ", with weight: " << distribution.weights()[component_i] << ", and covariance determinant: " << components[component_i].cov().determinant() << "\n"; continue; } #endif // TANGENT_DEBUG == 1 // JointTangentVectord tangentSample; // Scalar jacobian; // bool logSuccess = components[component_i].tangentSpace().log(sample, tangentSample, jacobian); // if(!logSuccess) { // continue; // } // bool isInside = // components[component_i].isInside(samples.samples.col(sample_i), 0.95); // if(countSamples) { // samplesPerComponent[component_i] += isInside ? 1 : 0; // } stats.weights[component_i] += weight; stats.means[component_i] += weight * tangentVectors.col(found_i); stats.covs[component_i] += weight * tangentVectors.col(found_i) * tangentVectors.col(found_i).transpose(); } } } bool isValidSample( const Samples<t_dims, Scalar>& samples, int sample_i, bool warn=true ) { if(std::isfinite(samples.weights(sample_i))) { return true; } if(warn) { std::cerr << "inf or nan sample, id=" << sample_i << ", value=" << samples.weights(sample_i) << '\n'; } return false; } Scalar sumWeights(const Samples<t_dims, Scalar>& samples) { Scalar weightSum = 0.f; #pragma omp parallel num_threads(1) { #pragma omp for reduction(+: weightSum) for(int sample_i = 0; sample_i < samples.size(); ++sample_i) { if(!isValidSample(samples, sample_i)) { continue; } weightSum += samples.weights(sample_i); } } return weightSum; } struct SDMMIndividualParams { Scalar weight; JointTangentVectord mean; Matrixd cov; Component distribution() { return Component(mean, cov); } }; constexpr static Scalar weightSplitWeight = 0.5; std::pair<SDMMIndividualParams, SDMMIndividualParams> splitSVD( const SDMMIndividualParams& gaussian ) { constexpr static Scalar u = 0.5; constexpr static Scalar beta = 0.5; constexpr static int l = 0; constexpr static Scalar mean_j_const = std::sqrt((1 - weightSplitWeight) / weightSplitWeight) * u; constexpr static Scalar mean_k_const = std::sqrt(weightSplitWeight / (1 - weightSplitWeight)) * u; Scalar weight = gaussian.weight; Matrixd cov = gaussian.cov; const Eigen::JacobiSVD<Matrixd> svd(cov, Eigen::ComputeFullU | Eigen::ComputeFullV); Matrixd A = svd.matrixU() * svd.singularValues().cwiseSqrt().asDiagonal(); JointTangentVectord a_l = A.col(l); a_l.topRows(3).setZero(); std::cerr << a_l.transpose() << "\n"; Scalar weight_j = weightSplitWeight * weight; Scalar weight_k = (1 - weightSplitWeight) * weight; Scalar sqrt_k_over_j = std::sqrt(weight_k / weight_j); Scalar sqrt_j_over_k = std::sqrt(weight_j / weight_k); JointTangentVectord mean_j = gaussian.mean - mean_j_const * a_l; JointTangentVectord mean_k = gaussian.mean + mean_k_const * a_l; Matrixd cov_j = (1 - weightSplitWeight) / weightSplitWeight * cov + ( (beta - beta * u * u - 1) / weightSplitWeight + 1 ) * a_l * a_l.transpose(); Matrixd cov_k = weightSplitWeight / (1 - weightSplitWeight) * cov + ( (beta * u * u - beta - u * u) / (1 - weightSplitWeight) + 1 ) * a_l * a_l.transpose(); return { {weight_j, mean_j, cov_j}, {weight_k, mean_k, cov_k} }; } void splitStatsSVD(MM& distribution, int statIdx) { std::cerr << "Splitting component " << statIdx << ".\n"; Scalar weight = distribution.weights()[statIdx]; const Component& component = distribution.components()[statIdx]; const auto& splits_pair = splitSVD( {weight, component.tangentMean(), component.cov()} ); SDMMIndividualParams splits[2] = {splits_pair.first, splits_pair.second}; std::cerr << "Increasing nComponents to " << distribution.nComponents() + 1 << '\n'; assert(distribution.nComponents() + 1 < t_components); distribution.setNComponents(distribution.nComponents() + 1); int j = statIdx; int k = distribution.nComponents() - 1; const Scalar weightStat = statsGlobal.weights[statIdx]; statsGlobal.weights[j] = weightSplitWeight * weightStat; statsGlobal.weights[k] = (1 - weightSplitWeight) * weightStat; for(int split_i = 0; split_i < 2; ++split_i) { int component_i = (split_i == 0) ? j : k; Scalar decreasedBPrior = bPrior / (Scalar) (iterationsRunForMixture[component_i] + 1); // Vectord bPriorDiag = Vectord::Constant(decreasedBPrior); newParams.weights[component_i] = splits[split_i].weight; newParams.means[component_i] = splits[split_i].mean; newParams.covs[component_i] = splits[split_i].cov; // newParams.covs[component_i] += ( // bPriorDiag * // totalWeightForMixture[component_i] / // statsGlobal.weights[component_i] // ).asDiagonal(); distribution.weights()[component_i] = newParams.weights[component_i]; std::cerr << "Setting component " << component_i << " to: " << newParams.weights[component_i] << ".\n"; Vectord embeddedMean; Scalar expJacobianDet; bool success = distribution.components()[component_i].tangentSpace().exp( newParams.means[component_i], embeddedMean, expJacobianDet ); assert(success); distribution.components()[component_i].set( embeddedMean, newParams.covs[component_i] ); statsGlobal.means[component_i] = statsGlobal.weights[component_i] * newParams.means[component_i]; statsGlobal.covs[component_i] = statsGlobal.weights[component_i] * ( newParams.covs[component_i] + newParams.means[component_i] * newParams.means[component_i].transpose() ); // compareNewAndStats(component_i); } bool successfulCdfCreation = distribution.createCdf(true); } void optimize( MM& distribution, Samples<t_dims, Scalar>& samples, Scalar& maxError ) { int componentBegin = 0; int componentEnd = distribution.nComponents(); // Sum up the weights Scalar weightSum = sumWeights(samples); if(weightSum == 0) { return; } #pragma omp parallel num_threads(1) { #if TANGENT_DEBUG == 1 #pragma omp critical { std::cerr << "Optimizer threadID=" << omp_get_thread_num() << "\n"; } #pragma omp single { std::cerr << "Weights sum: " << weightSum << "\n"; } #endif jmm::aligned_vector<Scalar> eta_i(t_components); Scalar heuristicEta = 0; SDMMParams<Scalar, t_statDims, t_statDims> stats(t_components); jmm::aligned_vector<Scalar> samplesPerComponent(t_components); int iterations = 1; // if(iterationsRun < 3) { // iterations = 2; // } for(int emIt = 0; emIt < iterations; ++emIt) { #pragma omp barrier #pragma omp single { newParams.setZero(); std::fill( samplesPerComponentInIterationGlobal.begin(), samplesPerComponentInIterationGlobal.end(), 0.f ); } stats.setZero(); std::fill( samplesPerComponent.begin(), samplesPerComponent.end(), 0.f ); #pragma omp barrier calculateStats( distribution, samples, emIt == 0, stats, samplesPerComponent, weightSum ); #pragma omp barrier #pragma omp critical { for(int component_i = componentBegin; component_i < componentEnd; ++component_i) { samplesPerComponentGlobal[component_i] += samplesPerComponent[component_i]; samplesPerComponentInIterationGlobal[component_i] += samplesPerComponent[component_i]; sampleCountGlobal += samplesPerComponent[component_i]; } #if TANGENT_DEBUG == 1 std::cerr << "Thread ID=" << omp_get_thread_num() << " finished calculating stats." << " Sample count: " << samplesPerComponent[0] << "\n"; #endif } #pragma omp barrier Scalar learningRate = 0.2; heuristicEta = std::pow(learningRate * iterationsRun + 1, -alpha); for(int component_i = componentBegin; component_i < componentEnd; ++component_i) { // If a mixture gets 0 samples in the first iteration, // eta_i interpolate between 0 and the new ss. // If we add the samples per component before this method here, // we risk that happening. // Simply deciding eta_i based on iterationsRunForMixture also doesn't work, // because then we don't accumulate anything from previous iterations since // (iterationsRunForMixture + 1)^-1 = 1. // TODO: COULD BE THE PROBLEM! Turn off after 3rd iteration or so? eta_i[component_i] = std::pow(learningRate * iterationsRun + 1, -alpha); } #pragma omp barrier #pragma omp single { #if TANGENT_DEBUG == 1 std::cerr << "eta_i=" << heuristicEta << '\n'; #endif // TANGENT_DEBUG == 1 heuristicTotalWeight *= (1.f - heuristicEta); heuristicTotalWeight += heuristicEta * weightSum; for(int component_i = componentBegin; component_i < componentEnd; ++component_i) { totalWeightForMixture[component_i] *= (1 - eta_i[component_i]); totalWeightForMixture[component_i] += eta_i[component_i] * weightSum; } } #pragma omp barrier #pragma omp single { statsGlobal *= (1.f - heuristicEta); } #pragma omp barrier #pragma omp critical { stats.sumProductInto(statsGlobal, heuristicEta); #if SPLIT_AND_MERGE == 1 stats.sumErrorStatsInto(statsGlobal); #endif // SPLIT_AND_MERGE == 1 } #pragma omp barrier #pragma omp single { auto& components = distribution.components(); // Normalize distribution. distribution.setNormalization( (1.f - heuristicEta) * distribution.normalization() + heuristicEta * weightSum / (Scalar) samples.size() ); Scalar invTotalWeight = 1.f / heuristicTotalWeight; statsGlobal.normalize(statsGlobalNormalized, invTotalWeight); int weakGaussiansCount = 0; int degenerateWeightsCount = 0; int degenerateGaussiansCount = 0; int untrainedGaussiansCount = 0; auto killComponent = [&](int component_i) -> void { newParams.weights[component_i] = 0; statsGlobal.weights[component_i] = 0; }; Scalar invGlobalDecreaseFactor = 1.f / Scalar(std::pow((Scalar) 3, (Scalar) std::min(trainingCutoff, iterationsRun))); newParams.heuristicWeight = niPriorMinusOne * invGlobalDecreaseFactor + statsGlobalNormalized.heuristicWeight; // #pragma omp for for(int component_i = componentBegin; component_i < componentEnd; ++component_i) { Scalar decreasedNiPriorMinusOne = niPriorMinusOne; Scalar decreasedApriorMinusTwo = 100.f / (Scalar) distribution.nComponents(); Matrixd decreasedBPrior = decreasedApriorMinusTwo * bPriors[component_i]; Scalar invMixtureDecreaseFactor = 1.f / std::pow((Scalar) 2, (Scalar) std::min(trainingCutoff, iterationsRun)); if(decreasePrior) { decreasedBPrior = decreasedBPrior * invMixtureDecreaseFactor; decreasedApriorMinusTwo = decreasedApriorMinusTwo * invMixtureDecreaseFactor; decreasedNiPriorMinusOne = niPriorMinusOne * invGlobalDecreaseFactor; } Scalar invWeightStatGlobal = 1.f / statsGlobalNormalized.weights[component_i]; Scalar invMatrixNormalization = 1.f / (0.05 * decreasedApriorMinusTwo + statsGlobalNormalized.weights[component_i]); // THIS SHOULD NEVER HAPPEN assert(isfinite(statsGlobalNormalized.weights[component_i])); assert(totalWeightForMixture[component_i] > 0); assert(isfinite(invMatrixNormalization)); // Dead components should stay dead: // Zero mixture weight means a zero posterior, a zero mean, and a zero covariance. // Equivalently, a zero weight component will never get if(distribution.weights()[component_i] == 0.f) { ++degenerateWeightsCount; newParams.weights[component_i] = 0.f; continue; } if(!std::isfinite(invWeightStatGlobal)) { ++weakGaussiansCount; #if TANGENT_DEBUG == 1 std::cerr << "!isfinite(invWeightStatGlobal) = 1.f / " << statsGlobalNormalized.weights[component_i] << std::endl; #endif // TANGENT_DEBUG == 1 newParams.weights[component_i] = decreasedNiPriorMinusOne + statsGlobalNormalized.weights[component_i]; continue; } // Only allow components to wake up in the first few iterations. // Otherwise very likely to be garbage. if(samplesPerComponentGlobal[component_i] < trainingBatch && iterationsRun < 3) { ++untrainedGaussiansCount; newParams.weights[component_i] = (iterationsRun < trainingCutoff) ? decreasedNiPriorMinusOne + statsGlobalNormalized.weights[component_i] : 0; continue; } newParams.weights[component_i] = decreasedNiPriorMinusOne + statsGlobalNormalized.weights[component_i]; newParams.means[component_i] = statsGlobalNormalized.means[component_i] * invWeightStatGlobal; newParams.covs[component_i] = statsGlobalNormalized.covs[component_i] - statsGlobalNormalized.means[component_i] * newParams.means[component_i].transpose(); auto dumpDebugInfo = [&](const std::string& error, int component_i) { std::cerr << error << ": " << component_i << ":\n" << "weightStatsGlobal=" << statsGlobal.weights[component_i] << "\n" << "statsGlobalNormalized.weights=" << statsGlobalNormalized.weights[component_i] << "\n" << "invWeightStatGlobal=" << invWeightStatGlobal << "\n" << "invMatrixNormalization=" << invMatrixNormalization << "\n" << newParams.covs[component_i] << "\n decreasedBPrior:\n" << decreasedBPrior << "\n = decreasedAPrior: " << decreasedApriorMinusTwo << " * bPriors:\n" << bPriors[component_i] << "\n + bDepthPriors:\n" << bDepthPriors[component_i] << '\n'; }; #if TANGENT_DEBUG == 1 dumpDebugInfo("OPTIMIZATION DEBUG", component_i); #endif // TANGENT_DEBUG == 1 newParams.covs[component_i] += decreasedBPrior; newParams.covs[component_i] *= invMatrixNormalization; if(t_dims == 6) { newParams.covs[component_i].topLeftCorner(3, 3) += bDepthPriors[component_i]; } Vectord embeddedMean; Scalar expJacobianDet; bool success = components[component_i].tangentSpace().exp( newParams.means[component_i], embeddedMean, expJacobianDet ); assert(success); Matrixd jointJacobian = Matrixd::Identity(); Matrixd jointInvJacobian = Matrixd::Identity(); if(jacobianCorrection) { const TangentSpaced& oldTangentSpace = components[component_i].tangentSpace(); TangentSpaced newTangentSpace(embeddedMean); Eigen::Matrix<Scalar, 3, 2> expJacobian = oldTangentSpace.expJacobian( newParams.means[component_i].bottomRows(2) ); Eigen::Matrix<Scalar, 2, 3> logJacobian = newTangentSpace.logJacobian( embeddedMean.bottomRows(3) ); Eigen::Matrix<Scalar, 2, 1> meanNewDir = newParams.means[component_i].bottomRows(2).normalized(); Eigen::Matrix<Scalar, 2, 1> meanNewPerpDir; meanNewPerpDir << -meanNewDir[1], meanNewDir[0]; Eigen::Matrix<Scalar, 2, 2> hackobian = meanNewDir * meanNewDir.transpose() + expJacobianDet * meanNewPerpDir * meanNewPerpDir.transpose(); jointJacobian.bottomRightCorner(2, 2) = logJacobian * newTangentSpace.invRotation() * oldTangentSpace.rotation() * expJacobian; #if TANGENT_DEBUG == 1 Eigen::Matrix<Scalar, 2, 3> invExpJacobian = oldTangentSpace.logJacobian( embeddedMean.bottomRows(3) ); Eigen::Matrix<Scalar, 3, 2> invLogJacobian = newTangentSpace.expJacobian( {0.f, 0.f} ); jointInvJacobian.bottomRightCorner(2, 2) = invExpJacobian * oldTangentSpace.invRotation() * newTangentSpace.rotation() * invLogJacobian; std::cerr << "EXP JACOBIAN VALIDATION:\n" << expJacobian << "\n vs \n" << invExpJacobian << "\n=\n" << invExpJacobian * expJacobian << "\n" << "LOG JACOBIAN VALIDATION:\n" << logJacobian << "\n vs \n" << invLogJacobian << "\n" << "JOINT JACOBIAN VALIDATION:\n" << jointJacobian << "\n vs \n" << jointInvJacobian << "\n vs \n" << hackobian << "\n" ; #endif // TANGENT_DEBUG == 1 } newParams.covs[component_i] = jointJacobian * newParams.covs[component_i] * jointJacobian.transpose(); #if TANGENT_DEBUG == 1 std::cerr << "Mean " << component_i << ": " << newParams.means[component_i].transpose() << ", embedded: " << embeddedMean.transpose() << ", jacobian: " << jacobian << ", newParams.covs det: " << newParams.covs[component_i].determinant() << "\n"; std::cerr << "Joint jacobian matrix det: " << jointJacobian.determinant() << " vs. " << expJacobianDet << '\n'; #endif // TANGENT_DEBUG == 1 if(!isPositiveDefinite(newParams.covs[component_i])) { dumpDebugInfo("Non-PD Matrix", component_i); Eigen::Matrix<Scalar, 3, 3> spatial = newParams.covs[component_i].topLeftCorner(3, 3); Eigen::Matrix<Scalar, 2, 2> directional = newParams.covs[component_i].bottomRightCorner(2, 2); if(!isPositiveDefinite(spatial)) { std::cerr << "Non-PD Spatial Matrix:\n" << spatial << std::endl; } if(!isPositiveDefinite(directional)) { std::cerr << "Non-PD Directional Matrix:\n" << directional << std::endl; } ++degenerateGaussiansCount; newParams.weights[component_i] = 0.f; continue; } components[component_i].set( embeddedMean, newParams.covs[component_i] ); statsGlobalNormalized.covs[component_i] -= statsGlobalNormalized.means[component_i] * newParams.means[component_i].transpose(); JointTangentVectord conditionMeanStat = statsGlobalNormalized.means[component_i]; conditionMeanStat.template bottomRows<Component::t_tangentDims>().setZero(); JointTangentVectord conditionMeanNew = newParams.means[component_i]; conditionMeanNew.template bottomRows<Component::t_tangentDims>().setZero(); statsGlobalNormalized.covs[component_i] += conditionMeanStat * conditionMeanNew.transpose(); statsGlobalNormalized.covs[component_i] = jointJacobian * statsGlobalNormalized.covs[component_i] * jointJacobian.transpose(); statsGlobal.covs[component_i] = statsGlobalNormalized.covs[component_i] * totalWeightForMixture[component_i]; statsGlobal.means[component_i].template bottomRows<Component::t_tangentDims>().setZero(); } // Copy new distributions // #if TANGENT_DEBUG == 1 if(weakGaussiansCount > 0) { std::cerr << "weakGaussiansCount=" << weakGaussiansCount << '\n'; std::cerr << "degenerateWeightsCount=" << degenerateWeightsCount << '\n'; std::cerr << "degenerateGaussiansCount=" << degenerateGaussiansCount << '\n'; std::cerr << "untrainedGaussiansCount=" << untrainedGaussiansCount << '\n'; } // #endif // TANGENT_DEBUG == 1 Scalar pdfNorm = std::accumulate( std::begin(newParams.weights) + componentBegin, std::begin(newParams.weights) + componentEnd, 0.f ) + newParams.heuristicWeight; #if TANGENT_DEBUG == 1 std::cerr << "heuristicWeightNew=" << heuristicWeightNew / pdfNorm << std::endl; #endif // TANGENT_DEBUG == 1 jmm::normalizePdf( std::begin(newParams.weights) + componentBegin, std::begin(newParams.weights) + componentEnd ); std::copy( std::begin(newParams.weights) + componentBegin, std::begin(newParams.weights) + componentEnd, std::begin(distribution.weights()) + componentBegin ); #if TANGENT_DEBUG == 1 std::cerr << "newParams.weights = ["; for(int component_i = componentBegin; component_i < componentEnd; ++component_i) { std::cerr << newParams.weights[component_i] << ", "; } std::cerr << "]\n"; #endif // TANGENT_DEBUG == 1 // if(iterationsRun > 3) { // jmm::aligned_vector<Scalar> error(t_components); // statsGlobal.calculateError(error, samples.size()); // maxError = *std::max_element(error.begin(), error.begin() + componentEnd); // distribution.setModelError(maxError); // for(int error_i = 0; error_i < componentEnd; ++error_i) { // if(error[error_i] > 100 && distribution.nComponents() < t_components - 1) { // splitStatsSVD(distribution, error_i); // } // } // } bool successfulCdfCreation = distribution.createCdf(false); distribution.configure(); assert(successfulCdfCreation); for(int component_i = componentBegin; component_i < componentEnd; ++component_i) { if(samplesPerComponentGlobal[component_i] > 0) { startedTraining[component_i] = true; } } for(int component_i = componentBegin; component_i < componentEnd; ++component_i) { if(samplesPerComponentGlobal[component_i] >= trainingBatch) { ++iterationsRunForMixture[component_i]; } } ++iterationsRun; } #pragma omp barrier } } } }; } #endif /* __MIXTURE_MODEL_OPT_STEPWISE_TANGENT_H */
raycaster.h
#ifndef INTEGRATORS_RAYCASTER_H #define INTEGRATORS_RAYCASTER_H #ifndef DEBUG #include <omp.h> #endif #include "integrator.h" #include "regular_pixel_sampler.h" #include "timer.h" /* * The RayCaster class template implements a common interface for all ray casters. It assumes that all ray * casters shoot only one ray per pixel, through their centers, and that each ray will store some * data to its corresponding position on a target buffer. * * Since all ray casters shoot rays through the centers of the corresponding pixels, the class template * instantiate a RegularPixelSampler, which is set up to use just one sample. * * Ray casters may differ, however, with respect to the type of data that may be returned by the rays. * For instance, depending on the ray caster, the ray may return the distance to, or the normal vector * at, the closest hit point. The template parameter refers exactly to the type of data that is to be returned by * the rays, and will be used, among other things, to define the type the elements of the target buffer. */ template <class T> class RayCaster : public Integrator { public: RayCaster(Camera& camera, const Scene& scene, Integrator::Type integrator_type); virtual ~RayCaster() = 0; T traceRay(const Ray& ray, std::size_t& num_intersection_tests, std::size_t& num_intersections); void render(); virtual void saveImageToFile() = 0; protected: virtual T getHitValue(const IntersectionRecord& intersection_record, std::size_t num_intersection_tests, std::size_t num_intersections) const = 0; virtual T getMissValue(std::size_t num_intersection_tests) const = 0; std::vector<std::vector<T>> buffer_; }; template <class T> RayCaster<T>::RayCaster(Camera& camera, const Scene& scene, Integrator::Type integrator_type) : Integrator(camera, scene, integrator_type, std::make_unique<RegularPixelSampler>(1)), buffer_(camera_.getImageBuffer().getImageWidth(), std::vector<T>(camera_.getImageBuffer().getImageHeight())) {} template <class T> RayCaster<T>::~RayCaster() {} template <class T> T RayCaster<T>::traceRay(const Ray& ray, std::size_t& num_intersection_tests, std::size_t& num_intersections) { IntersectionRecord intersection_record; if (scene_.intersect(ray, intersection_record, num_intersection_tests, num_intersections)) return getHitValue(intersection_record, num_intersection_tests, num_intersections); else return getMissValue(num_intersection_tests); } template <class T> void RayCaster<T>::render() { Timer timer; int_tests_count_ = 0; int_count_ = 0; min_int_tests_count_pp_ = std::numeric_limits<std::size_t>::max(); max_int_tests_count_pp_ = 0; min_int_count_pp_ = std::numeric_limits<std::size_t>::max(); max_int_count_pp_ = 0; timer.start(); #ifndef DEBUG #pragma omp parallel for schedule(dynamic, 1) reduction(+ : int_tests_count_, int_count_) reduction(min: min_int_tests_count_pp_, min_int_count_pp_) reduction(max: max_int_tests_count_pp_, max_int_count_pp_) #endif for (unsigned int y = 0; y < camera_.getImageBuffer().getImageHeight(); ++y) for (unsigned int x = 0; x < camera_.getImageBuffer().getImageWidth(); ++x) { std::size_t curr_int_tests_count; std::size_t curr_int_count; float x_screen = x + pixel_sampler_->getSample(0).x; float y_screen = y + pixel_sampler_->getSample(0).y; Ray ray = camera_.getRay(x_screen, y_screen); buffer_[x][y] = traceRay(ray, curr_int_tests_count, curr_int_count); // update integration statistics... int_tests_count_ += curr_int_tests_count; int_count_ += curr_int_count; min_int_tests_count_pp_ = std::min(min_int_tests_count_pp_, curr_int_tests_count); max_int_tests_count_pp_ = std::max(max_int_tests_count_pp_, curr_int_tests_count); min_int_count_pp_ = std::min(min_int_count_pp_, curr_int_count); max_int_count_pp_ = std::max(max_int_count_pp_, curr_int_count); } timer.finish(); total_integration_time_ = timer.getElapsedTime(); std::cout << "====> Total rendering time: " << total_integration_time_ << " microseconds\n"; } #endif // INTEGRATORS_RAYCASTER_H
test.c
#include <stdio.h> #include <omp.h> #pragma omp requires unified_shared_memory #include "../utilities/check.h" #include "../utilities/utilities.h" #define TRIALS (1) #define N (1024*3) #define INIT() INIT_LOOP(N, {C[i] = 1; D[i] = i; E[i] = -i+1;}) #define ZERO(X) ZERO_ARRAY(N, X) #define DUMP_SUCCESS9() { \ DUMP_SUCCESS(gpu_threads-max_threads); \ DUMP_SUCCESS(gpu_threads-max_threads); \ DUMP_SUCCESS(gpu_threads-max_threads); \ DUMP_SUCCESS(gpu_threads-max_threads); \ DUMP_SUCCESS(gpu_threads-max_threads); \ DUMP_SUCCESS(gpu_threads-max_threads); \ DUMP_SUCCESS(gpu_threads-max_threads); \ DUMP_SUCCESS(gpu_threads-max_threads); \ DUMP_SUCCESS(gpu_threads-max_threads); \ } // // FIXME: // Add support for 'shared', 'lastprivate' // int main(void) { check_offloading(); double A[N], B[N], C[N], D[N], E[N]; double S[N]; double p[2]; int cpuExec = 0; #pragma omp target map(tofrom: cpuExec) { cpuExec = omp_is_initial_device(); } int gpu_threads = 224; int cpu_threads = 32; int max_threads = cpuExec ? cpu_threads : gpu_threads; INIT(); // // Test: proc_bind clause // #undef TARGET_PARALLEL_FOR_CLAUSES #define TARGET_PARALLEL_FOR_CLAUSES proc_bind(master) #include "tpf_defines.h" for (int t = 0; t <= max_threads; t++) { int threads[1]; threads[0] = t; TARGET_PARALLEL_FOR1( { S[0] = 0; for (int i = 0; i < N; i++) { A[i] = B[i] = 0; } }, for (int i = 0; i < N; i++) { \ A[i] += C[i] + D[i]; \ B[i] += D[i] + E[i]; \ }, { double tmp = 0; for (int i = 0; i < N; i++) { tmp += A[i] + B[i] - 1; } S[0] += tmp; }, VERIFY(0, 1, S[0], N/2*(N+1))) TARGET_PARALLEL_FOR2( { S[0] = 0; for (int i = 0; i < N; i++) { A[i] = B[i] = 0; } }, for (int i = 0; i < N; i++) { \ A[i] += C[i] + D[i]; \ B[i] += D[i] + E[i]; \ }, { double tmp = 0; for (int i = 0; i < N; i++) { tmp += A[i] + B[i] - 1; } S[0] += tmp; }, VERIFY(0, 1, S[0], N/2*(N+1))) TARGET_PARALLEL_FOR3( { S[0] = 0; for (int i = 0; i < N; i++) { A[i] = B[i] = 0; } }, for (int i = 0; i < N; i++) { \ A[i] += C[i] + D[i]; \ B[i] += D[i] + E[i]; \ }, { double tmp = 0; for (int i = 0; i < N; i++) { tmp += A[i] + B[i] - 1; } S[0] += tmp; }, VERIFY(0, 1, S[0], N/2*(N+1))) TARGET_PARALLEL_FOR4( { S[0] = 0; for (int i = 0; i < N; i++) { A[i] = B[i] = 0; } }, for (int i = 0; i < N; i++) { \ A[i] += C[i] + D[i]; \ B[i] += D[i] + E[i]; \ }, { double tmp = 0; for (int i = 0; i < N; i++) { tmp += A[i] + B[i] - 1; } S[0] += tmp; }, VERIFY(0, 1, S[0], N/2*(N+1))) TARGET_PARALLEL_FOR5( { S[0] = 0; for (int i = 0; i < N; i++) { A[i] = B[i] = 0; } }, for (int i = 0; i < N; i++) { \ A[i] += C[i] + D[i]; \ B[i] += D[i] + E[i]; \ }, { double tmp = 0; for (int i = 0; i < N; i++) { tmp += A[i] + B[i] - 1; } S[0] += tmp; }, VERIFY(0, 1, S[0], N/2*(N+1))) TARGET_PARALLEL_FOR6( { S[0] = 0; for (int i = 0; i < N; i++) { A[i] = B[i] = 0; } }, for (int i = 0; i < N; i++) { \ A[i] += C[i] + D[i]; \ B[i] += D[i] + E[i]; \ }, { double tmp = 0; for (int i = 0; i < N; i++) { tmp += A[i] + B[i] - 1; } S[0] += tmp; }, VERIFY(0, 1, S[0], N/2*(N+1))) TARGET_PARALLEL_FOR7( { S[0] = 0; for (int i = 0; i < N; i++) { A[i] = B[i] = 0; } }, for (int i = 0; i < N; i++) { \ A[i] += C[i] + D[i]; \ B[i] += D[i] + E[i]; \ }, { double tmp = 0; for (int i = 0; i < N; i++) { tmp += A[i] + B[i] - 1; } S[0] += tmp; }, VERIFY(0, 1, S[0], N/2*(N+1))) TARGET_PARALLEL_FOR8( { S[0] = 0; for (int i = 0; i < N; i++) { A[i] = B[i] = 0; } }, for (int i = 0; i < N; i++) { \ A[i] += C[i] + D[i]; \ B[i] += D[i] + E[i]; \ }, { double tmp = 0; for (int i = 0; i < N; i++) { tmp += A[i] + B[i] - 1; } S[0] += tmp; }, VERIFY(0, 1, S[0], N/2*(N+1))) TARGET_PARALLEL_FOR9( { S[0] = 0; for (int i = 0; i < N; i++) { A[i] = B[i] = 0; } }, for (int i = 0; i < N; i++) { \ A[i] += C[i] + D[i]; \ B[i] += D[i] + E[i]; \ }, { double tmp = 0; for (int i = 0; i < N; i++) { tmp += A[i] + B[i] - 1; } S[0] += tmp; }, VERIFY(0, 1, S[0], N/2*(N+1))) } DUMP_SUCCESS9() #undef TARGET_PARALLEL_FOR_CLAUSES #define TARGET_PARALLEL_FOR_CLAUSES proc_bind(close) #include "tpf_defines.h" for (int t = 0; t <= max_threads; t++) { int threads[1]; threads[0] = t; TARGET_PARALLEL_FOR1( { S[0] = 0; for (int i = 0; i < N; i++) { A[i] = B[i] = 0; } }, for (int i = 0; i < N; i++) { \ A[i] += C[i] + D[i]; \ B[i] += D[i] + E[i]; \ }, { double tmp = 0; for (int i = 0; i < N; i++) { tmp += A[i] + B[i] - 1; } S[0] += tmp; }, VERIFY(0, 1, S[0], N/2*(N+1))) TARGET_PARALLEL_FOR2( { S[0] = 0; for (int i = 0; i < N; i++) { A[i] = B[i] = 0; } }, for (int i = 0; i < N; i++) { \ A[i] += C[i] + D[i]; \ B[i] += D[i] + E[i]; \ }, { double tmp = 0; for (int i = 0; i < N; i++) { tmp += A[i] + B[i] - 1; } S[0] += tmp; }, VERIFY(0, 1, S[0], N/2*(N+1))) TARGET_PARALLEL_FOR3( { S[0] = 0; for (int i = 0; i < N; i++) { A[i] = B[i] = 0; } }, for (int i = 0; i < N; i++) { \ A[i] += C[i] + D[i]; \ B[i] += D[i] + E[i]; \ }, { double tmp = 0; for (int i = 0; i < N; i++) { tmp += A[i] + B[i] - 1; } S[0] += tmp; }, VERIFY(0, 1, S[0], N/2*(N+1))) TARGET_PARALLEL_FOR4( { S[0] = 0; for (int i = 0; i < N; i++) { A[i] = B[i] = 0; } }, for (int i = 0; i < N; i++) { \ A[i] += C[i] + D[i]; \ B[i] += D[i] + E[i]; \ }, { double tmp = 0; for (int i = 0; i < N; i++) { tmp += A[i] + B[i] - 1; } S[0] += tmp; }, VERIFY(0, 1, S[0], N/2*(N+1))) TARGET_PARALLEL_FOR5( { S[0] = 0; for (int i = 0; i < N; i++) { A[i] = B[i] = 0; } }, for (int i = 0; i < N; i++) { \ A[i] += C[i] + D[i]; \ B[i] += D[i] + E[i]; \ }, { double tmp = 0; for (int i = 0; i < N; i++) { tmp += A[i] + B[i] - 1; } S[0] += tmp; }, VERIFY(0, 1, S[0], N/2*(N+1))) TARGET_PARALLEL_FOR6( { S[0] = 0; for (int i = 0; i < N; i++) { A[i] = B[i] = 0; } }, for (int i = 0; i < N; i++) { \ A[i] += C[i] + D[i]; \ B[i] += D[i] + E[i]; \ }, { double tmp = 0; for (int i = 0; i < N; i++) { tmp += A[i] + B[i] - 1; } S[0] += tmp; }, VERIFY(0, 1, S[0], N/2*(N+1))) TARGET_PARALLEL_FOR7( { S[0] = 0; for (int i = 0; i < N; i++) { A[i] = B[i] = 0; } }, for (int i = 0; i < N; i++) { \ A[i] += C[i] + D[i]; \ B[i] += D[i] + E[i]; \ }, { double tmp = 0; for (int i = 0; i < N; i++) { tmp += A[i] + B[i] - 1; } S[0] += tmp; }, VERIFY(0, 1, S[0], N/2*(N+1))) TARGET_PARALLEL_FOR8( { S[0] = 0; for (int i = 0; i < N; i++) { A[i] = B[i] = 0; } }, for (int i = 0; i < N; i++) { \ A[i] += C[i] + D[i]; \ B[i] += D[i] + E[i]; \ }, { double tmp = 0; for (int i = 0; i < N; i++) { tmp += A[i] + B[i] - 1; } S[0] += tmp; }, VERIFY(0, 1, S[0], N/2*(N+1))) TARGET_PARALLEL_FOR9( { S[0] = 0; for (int i = 0; i < N; i++) { A[i] = B[i] = 0; } }, for (int i = 0; i < N; i++) { \ A[i] += C[i] + D[i]; \ B[i] += D[i] + E[i]; \ }, { double tmp = 0; for (int i = 0; i < N; i++) { tmp += A[i] + B[i] - 1; } S[0] += tmp; }, VERIFY(0, 1, S[0], N/2*(N+1))) } DUMP_SUCCESS9() #undef TARGET_PARALLEL_FOR_CLAUSES #define TARGET_PARALLEL_FOR_CLAUSES proc_bind(spread) #include "tpf_defines.h" for (int t = 0; t <= max_threads; t++) { int threads[1]; threads[0] = t; TARGET_PARALLEL_FOR1( { S[0] = 0; for (int i = 0; i < N; i++) { A[i] = B[i] = 0; } }, for (int i = 0; i < N; i++) { \ A[i] += C[i] + D[i]; \ B[i] += D[i] + E[i]; \ }, { double tmp = 0; for (int i = 0; i < N; i++) { tmp += A[i] + B[i] - 1; } S[0] += tmp; }, VERIFY(0, 1, S[0], N/2*(N+1))) TARGET_PARALLEL_FOR2( { S[0] = 0; for (int i = 0; i < N; i++) { A[i] = B[i] = 0; } }, for (int i = 0; i < N; i++) { \ A[i] += C[i] + D[i]; \ B[i] += D[i] + E[i]; \ }, { double tmp = 0; for (int i = 0; i < N; i++) { tmp += A[i] + B[i] - 1; } S[0] += tmp; }, VERIFY(0, 1, S[0], N/2*(N+1))) TARGET_PARALLEL_FOR3( { S[0] = 0; for (int i = 0; i < N; i++) { A[i] = B[i] = 0; } }, for (int i = 0; i < N; i++) { \ A[i] += C[i] + D[i]; \ B[i] += D[i] + E[i]; \ }, { double tmp = 0; for (int i = 0; i < N; i++) { tmp += A[i] + B[i] - 1; } S[0] += tmp; }, VERIFY(0, 1, S[0], N/2*(N+1))) TARGET_PARALLEL_FOR4( { S[0] = 0; for (int i = 0; i < N; i++) { A[i] = B[i] = 0; } }, for (int i = 0; i < N; i++) { \ A[i] += C[i] + D[i]; \ B[i] += D[i] + E[i]; \ }, { double tmp = 0; for (int i = 0; i < N; i++) { tmp += A[i] + B[i] - 1; } S[0] += tmp; }, VERIFY(0, 1, S[0], N/2*(N+1))) TARGET_PARALLEL_FOR5( { S[0] = 0; for (int i = 0; i < N; i++) { A[i] = B[i] = 0; } }, for (int i = 0; i < N; i++) { \ A[i] += C[i] + D[i]; \ B[i] += D[i] + E[i]; \ }, { double tmp = 0; for (int i = 0; i < N; i++) { tmp += A[i] + B[i] - 1; } S[0] += tmp; }, VERIFY(0, 1, S[0], N/2*(N+1))) TARGET_PARALLEL_FOR6( { S[0] = 0; for (int i = 0; i < N; i++) { A[i] = B[i] = 0; } }, for (int i = 0; i < N; i++) { \ A[i] += C[i] + D[i]; \ B[i] += D[i] + E[i]; \ }, { double tmp = 0; for (int i = 0; i < N; i++) { tmp += A[i] + B[i] - 1; } S[0] += tmp; }, VERIFY(0, 1, S[0], N/2*(N+1))) TARGET_PARALLEL_FOR7( { S[0] = 0; for (int i = 0; i < N; i++) { A[i] = B[i] = 0; } }, for (int i = 0; i < N; i++) { \ A[i] += C[i] + D[i]; \ B[i] += D[i] + E[i]; \ }, { double tmp = 0; for (int i = 0; i < N; i++) { tmp += A[i] + B[i] - 1; } S[0] += tmp; }, VERIFY(0, 1, S[0], N/2*(N+1))) TARGET_PARALLEL_FOR8( { S[0] = 0; for (int i = 0; i < N; i++) { A[i] = B[i] = 0; } }, for (int i = 0; i < N; i++) { \ A[i] += C[i] + D[i]; \ B[i] += D[i] + E[i]; \ }, { double tmp = 0; for (int i = 0; i < N; i++) { tmp += A[i] + B[i] - 1; } S[0] += tmp; }, VERIFY(0, 1, S[0], N/2*(N+1))) TARGET_PARALLEL_FOR9( { S[0] = 0; for (int i = 0; i < N; i++) { A[i] = B[i] = 0; } }, for (int i = 0; i < N; i++) { \ A[i] += C[i] + D[i]; \ B[i] += D[i] + E[i]; \ }, { double tmp = 0; for (int i = 0; i < N; i++) { tmp += A[i] + B[i] - 1; } S[0] += tmp; }, VERIFY(0, 1, S[0], N/2*(N+1))) } DUMP_SUCCESS9() // // Test: private, shared clauses on omp target parallel for. // #undef TARGET_PARALLEL_FOR_CLAUSES #define TARGET_PARALLEL_FOR_CLAUSES private(p,q) shared(A,B,C,D,E) #include "tpf_defines.h" // FIXME: shared(a) where 'a' is an implicitly mapped scalar does not work. // FIXME: shared(A) private(A) does not generate correct results. for (int t = 0; t <= max_threads; t++) { int threads[1]; threads[0] = t; TARGET_PARALLEL_FOR1( double p = 2; \ double q = 4; \ S[0] = 0; \ for (int i = 0; i < N; i++) { \ A[i] = B[i] = 0; \ } , for (int i = 0; i < N; i++) { \ p = C[i] + D[i]; \ q = D[i] + E[i]; \ A[i] += p; \ B[i] += q; \ }, { double tmp = p + q; for (int i = 0; i < N; i++) { tmp += A[i] + B[i] - 1; } S[0] += tmp; }, VERIFY(0, 1, S[0], 6 + N/2*(N+1))) TARGET_PARALLEL_FOR2( double p = 2; \ double q = 4; \ S[0] = 0; \ for (int i = 0; i < N; i++) { \ A[i] = B[i] = 0; \ } , for (int i = 0; i < N; i++) { \ p = C[i] + D[i]; \ q = D[i] + E[i]; \ A[i] += p; \ B[i] += q; \ }, { double tmp = p + q; for (int i = 0; i < N; i++) { tmp += A[i] + B[i] - 1; } S[0] += tmp; }, VERIFY(0, 1, S[0], 6 + N/2*(N+1))) TARGET_PARALLEL_FOR3( double p = 2; \ double q = 4; \ S[0] = 0; \ for (int i = 0; i < N; i++) { \ A[i] = B[i] = 0; \ } , for (int i = 0; i < N; i++) { \ p = C[i] + D[i]; \ q = D[i] + E[i]; \ A[i] += p; \ B[i] += q; \ }, { double tmp = p + q; for (int i = 0; i < N; i++) { tmp += A[i] + B[i] - 1; } S[0] += tmp; }, VERIFY(0, 1, S[0], 6 + N/2*(N+1))) TARGET_PARALLEL_FOR4( double p = 2; \ double q = 4; \ S[0] = 0; \ for (int i = 0; i < N; i++) { \ A[i] = B[i] = 0; \ } , for (int i = 0; i < N; i++) { \ p = C[i] + D[i]; \ q = D[i] + E[i]; \ A[i] += p; \ B[i] += q; \ }, { double tmp = p + q; for (int i = 0; i < N; i++) { tmp += A[i] + B[i] - 1; } S[0] += tmp; }, VERIFY(0, 1, S[0], 6 + N/2*(N+1))) TARGET_PARALLEL_FOR5( double p = 2; \ double q = 4; \ S[0] = 0; \ for (int i = 0; i < N; i++) { \ A[i] = B[i] = 0; \ } , for (int i = 0; i < N; i++) { \ p = C[i] + D[i]; \ q = D[i] + E[i]; \ A[i] += p; \ B[i] += q; \ }, { double tmp = p + q; for (int i = 0; i < N; i++) { tmp += A[i] + B[i] - 1; } S[0] += tmp; }, VERIFY(0, 1, S[0], 6 + N/2*(N+1))) TARGET_PARALLEL_FOR6( double p = 2; \ double q = 4; \ S[0] = 0; \ for (int i = 0; i < N; i++) { \ A[i] = B[i] = 0; \ } , for (int i = 0; i < N; i++) { \ p = C[i] + D[i]; \ q = D[i] + E[i]; \ A[i] += p; \ B[i] += q; \ }, { double tmp = p + q; for (int i = 0; i < N; i++) { tmp += A[i] + B[i] - 1; } S[0] += tmp; }, VERIFY(0, 1, S[0], 6 + N/2*(N+1))) TARGET_PARALLEL_FOR7( double p = 2; \ double q = 4; \ S[0] = 0; \ for (int i = 0; i < N; i++) { \ A[i] = B[i] = 0; \ } , for (int i = 0; i < N; i++) { \ p = C[i] + D[i]; \ q = D[i] + E[i]; \ A[i] += p; \ B[i] += q; \ }, { double tmp = p + q; for (int i = 0; i < N; i++) { tmp += A[i] + B[i] - 1; } S[0] += tmp; }, VERIFY(0, 1, S[0], 6 + N/2*(N+1))) TARGET_PARALLEL_FOR8( double p = 2; \ double q = 4; \ S[0] = 0; \ for (int i = 0; i < N; i++) { \ A[i] = B[i] = 0; \ } , for (int i = 0; i < N; i++) { \ p = C[i] + D[i]; \ q = D[i] + E[i]; \ A[i] += p; \ B[i] += q; \ }, { double tmp = p + q; for (int i = 0; i < N; i++) { tmp += A[i] + B[i] - 1; } S[0] += tmp; }, VERIFY(0, 1, S[0], 6 + N/2*(N+1))) TARGET_PARALLEL_FOR9( double p = 2; \ double q = 4; \ S[0] = 0; \ for (int i = 0; i < N; i++) { \ A[i] = B[i] = 0; \ } , for (int i = 0; i < N; i++) { \ p = C[i] + D[i]; \ q = D[i] + E[i]; \ A[i] += p; \ B[i] += q; \ }, { double tmp = p + q; for (int i = 0; i < N; i++) { tmp += A[i] + B[i] - 1; } S[0] += tmp; }, VERIFY(0, 1, S[0], 6 + N/2*(N+1))) } DUMP_SUCCESS9() // // Test: firstprivate clause on omp target parallel for. // #undef TARGET_PARALLEL_FOR_CLAUSES #define TARGET_PARALLEL_FOR_CLAUSES firstprivate(p,q) #include "tpf_defines.h" for (int t = 0; t <= max_threads; t++) { int threads[1]; threads[0] = t; TARGET_PARALLEL_FOR1( double p = -4; \ double q = 4; \ S[0] = 0; \ for (int i = 0; i < N; i++) { \ A[i] = B[i] = 0; \ } , for (int i = 0; i < N; i++) { \ A[i] += C[i] + D[i] + p; \ B[i] += D[i] + E[i] + q; \ if (i == N-1) { \ p += 6; \ q += 9; \ } \ }, { double tmp = p + q; for (int i = 0; i < N; i++) { tmp += A[i] + B[i] - 1; } S[0] += tmp; }, VERIFY(0, 1, S[0], N/2*(N+1))) TARGET_PARALLEL_FOR2( double p = -4; \ double q = 4; \ S[0] = 0; \ for (int i = 0; i < N; i++) { \ A[i] = B[i] = 0; \ } , for (int i = 0; i < N; i++) { \ A[i] += C[i] + D[i] + p; \ B[i] += D[i] + E[i] + q; \ if (i == N-1) { \ p += 6; \ q += 9; \ } \ }, { double tmp = p + q; for (int i = 0; i < N; i++) { tmp += A[i] + B[i] - 1; } S[0] += tmp; }, VERIFY(0, 1, S[0], N/2*(N+1))) TARGET_PARALLEL_FOR3( double p = -4; \ double q = 4; \ S[0] = 0; \ for (int i = 0; i < N; i++) { \ A[i] = B[i] = 0; \ } , for (int i = 0; i < N; i++) { \ A[i] += C[i] + D[i] + p; \ B[i] += D[i] + E[i] + q; \ if (i == N-1) { \ p += 6; \ q += 9; \ } \ }, { double tmp = p + q; for (int i = 0; i < N; i++) { tmp += A[i] + B[i] - 1; } S[0] += tmp; }, VERIFY(0, 1, S[0], N/2*(N+1))) TARGET_PARALLEL_FOR4( double p = -4; \ double q = 4; \ S[0] = 0; \ for (int i = 0; i < N; i++) { \ A[i] = B[i] = 0; \ } , for (int i = 0; i < N; i++) { \ A[i] += C[i] + D[i] + p; \ B[i] += D[i] + E[i] + q; \ if (i == N-1) { \ p += 6; \ q += 9; \ } \ }, { double tmp = p + q; for (int i = 0; i < N; i++) { tmp += A[i] + B[i] - 1; } S[0] += tmp; }, VERIFY(0, 1, S[0], N/2*(N+1))) TARGET_PARALLEL_FOR5( double p = -4; \ double q = 4; \ S[0] = 0; \ for (int i = 0; i < N; i++) { \ A[i] = B[i] = 0; \ } , for (int i = 0; i < N; i++) { \ A[i] += C[i] + D[i] + p; \ B[i] += D[i] + E[i] + q; \ if (i == N-1) { \ p += 6; \ q += 9; \ } \ }, { double tmp = p + q; for (int i = 0; i < N; i++) { tmp += A[i] + B[i] - 1; } S[0] += tmp; }, VERIFY(0, 1, S[0], N/2*(N+1))) TARGET_PARALLEL_FOR6( double p = -4; \ double q = 4; \ S[0] = 0; \ for (int i = 0; i < N; i++) { \ A[i] = B[i] = 0; \ } , for (int i = 0; i < N; i++) { \ A[i] += C[i] + D[i] + p; \ B[i] += D[i] + E[i] + q; \ if (i == N-1) { \ p += 6; \ q += 9; \ } \ }, { double tmp = p + q; for (int i = 0; i < N; i++) { tmp += A[i] + B[i] - 1; } S[0] += tmp; }, VERIFY(0, 1, S[0], N/2*(N+1))) TARGET_PARALLEL_FOR7( double p = -4; \ double q = 4; \ S[0] = 0; \ for (int i = 0; i < N; i++) { \ A[i] = B[i] = 0; \ } , for (int i = 0; i < N; i++) { \ A[i] += C[i] + D[i] + p; \ B[i] += D[i] + E[i] + q; \ if (i == N-1) { \ p += 6; \ q += 9; \ } \ }, { double tmp = p + q; for (int i = 0; i < N; i++) { tmp += A[i] + B[i] - 1; } S[0] += tmp; }, VERIFY(0, 1, S[0], N/2*(N+1))) TARGET_PARALLEL_FOR8( double p = -4; \ double q = 4; \ S[0] = 0; \ for (int i = 0; i < N; i++) { \ A[i] = B[i] = 0; \ } , for (int i = 0; i < N; i++) { \ A[i] += C[i] + D[i] + p; \ B[i] += D[i] + E[i] + q; \ if (i == N-1) { \ p += 6; \ q += 9; \ } \ }, { double tmp = p + q; for (int i = 0; i < N; i++) { tmp += A[i] + B[i] - 1; } S[0] += tmp; }, VERIFY(0, 1, S[0], N/2*(N+1))) TARGET_PARALLEL_FOR9( double p = -4; \ double q = 4; \ S[0] = 0; \ for (int i = 0; i < N; i++) { \ A[i] = B[i] = 0; \ } , for (int i = 0; i < N; i++) { \ A[i] += C[i] + D[i] + p; \ B[i] += D[i] + E[i] + q; \ if (i == N-1) { \ p += 6; \ q += 9; \ } \ }, { double tmp = p + q; for (int i = 0; i < N; i++) { tmp += A[i] + B[i] - 1; } S[0] += tmp; }, VERIFY(0, 1, S[0], N/2*(N+1))) } DUMP_SUCCESS9() #if 0 FIXME // // Test: lastprivate clause on omp target parallel for. // #undef TARGET_PARALLEL_FOR_CLAUSES #define TARGET_PARALLEL_FOR_CLAUSES lastprivate(q) #include "tpf_defines.h" // FIXME: modify to t=1 and in tpf_defines.h to use host after bug fix. // FIXME: variable is not private. for (int t = 2; t <= max_threads; t++) { int threads[1]; threads[0] = t; TARGET_PARALLEL_FOR1( double p[1]; \ double q[1]; \ S[0] = 0; \ for (int i = 0; i < N; i++) { \ A[i] = B[i] = 0; \ } , for (int i = 0; i < N; i++) { \ p[0] = C[i] + D[i]; \ q[0] = D[i] + E[i]; \ A[i] = p[0]; \ B[i] = q[0]; \ }, { double tmp = p[0] + q[0]; for (int i = 0; i < N; i++) { tmp += A[i] + B[i] - 1; } S[0] += tmp; }, VERIFY(0, 1, S[0], N+1+ N/2*(N+1))) } FIXME: private of non-scalar does not work. // // Test: private clause on omp parallel for. // #undef PARALLEL_FOR_CLAUSES #define PARALLEL_FOR_CLAUSES private(p) #include "tpf_defines.h" for (int t = 0; t <= 224; t++) { int threads[1]; threads[0] = t; PARALLEL_FOR( p[0] = 2; p[1] = 4; \ S[0] = 0; \ for (int i = 0; i < N; i++) { \ A[i] = B[i] = 0; \ } , for (int i = 0; i < N; i++) { \ p[0] = C[i] + D[i]; \ p[1] = D[i] + E[i]; \ A[i] += p[0]; \ B[i] += p[1]; \ } , { double tmp = p[0] + p[1]; for (int i = 0; i < N; i++) { tmp += A[i] + B[i]; } S[0] += tmp; }, VERIFY(0, 1, S[0], 6 + SUMS * (N/2*(N+1)))) } FIXME: private of non-scalar does not work. // // Test: firstprivate clause on omp parallel for. // #undef PARALLEL_FOR_CLAUSES #define PARALLEL_FOR_CLAUSES firstprivate(p) #include "tpf_defines.h" for (int t = 0; t <= 224; t++) { int threads[1]; threads[0] = t; PARALLEL_FOR( p[0] = -4; p[1] = 4; \ S[0] = 0; \ for (int i = 0; i < N; i++) { \ A[i] = B[i] = 0; \ } , for (int i = 0; i < N; i++) { \ A[i] += C[i] + D[i] + p[0]; \ B[i] += D[i] + E[i] + p[1]; \ if (i == N-1) { \ p[0] += 6; \ p[1] += 9; \ } \ } , { double tmp = p[0] + p[1]; for (int i = 0; i < N; i++) { tmp += A[i] + B[i]; } S[0] += tmp; }, VERIFY(0, 1, S[0], SUMS * (N/2*(N+1)))) } #endif // // Test: collapse clause on omp target parallel for. // #undef TARGET_PARALLEL_FOR_CLAUSES #define TARGET_PARALLEL_FOR_CLAUSES collapse(2) #include "tpf_defines.h" for (int t = 0; t <= max_threads; t++) { int threads[1]; threads[0] = t; TARGET_PARALLEL_FOR1( S[0] = 0; \ for (int i = 0; i < N; i++) { \ A[i] = B[i] = 0; \ } , for (int i = 0; i < 1024; i++) { \ for (int j = 0; j < 3; j++) { \ A[i*3+j] += C[i*3+j] + D[i*3+j]; \ B[i*3+j] += D[i*3+j] + E[i*3+j]; \ } \ } , { double tmp = 0; for (int i = 0; i < N; i++) { tmp += A[i] + B[i] - 1; } S[0] += tmp; }, VERIFY(0, 1, S[0], (N/2*(N+1)))) TARGET_PARALLEL_FOR2( S[0] = 0; \ for (int i = 0; i < N; i++) { \ A[i] = B[i] = 0; \ } , for (int i = 0; i < 1024; i++) { \ for (int j = 0; j < 3; j++) { \ A[i*3+j] += C[i*3+j] + D[i*3+j]; \ B[i*3+j] += D[i*3+j] + E[i*3+j]; \ } \ } , { double tmp = 0; for (int i = 0; i < N; i++) { tmp += A[i] + B[i] - 1; } S[0] += tmp; }, VERIFY(0, 1, S[0], (N/2*(N+1)))) TARGET_PARALLEL_FOR3( S[0] = 0; \ for (int i = 0; i < N; i++) { \ A[i] = B[i] = 0; \ } , for (int i = 0; i < 1024; i++) { \ for (int j = 0; j < 3; j++) { \ A[i*3+j] += C[i*3+j] + D[i*3+j]; \ B[i*3+j] += D[i*3+j] + E[i*3+j]; \ } \ } , { double tmp = 0; for (int i = 0; i < N; i++) { tmp += A[i] + B[i] - 1; } S[0] += tmp; }, VERIFY(0, 1, S[0], (N/2*(N+1)))) TARGET_PARALLEL_FOR4( S[0] = 0; \ for (int i = 0; i < N; i++) { \ A[i] = B[i] = 0; \ } , for (int i = 0; i < 1024; i++) { \ for (int j = 0; j < 3; j++) { \ A[i*3+j] += C[i*3+j] + D[i*3+j]; \ B[i*3+j] += D[i*3+j] + E[i*3+j]; \ } \ } , { double tmp = 0; for (int i = 0; i < N; i++) { tmp += A[i] + B[i] - 1; } S[0] += tmp; }, VERIFY(0, 1, S[0], (N/2*(N+1)))) TARGET_PARALLEL_FOR5( S[0] = 0; \ for (int i = 0; i < N; i++) { \ A[i] = B[i] = 0; \ } , for (int i = 0; i < 1024; i++) { \ for (int j = 0; j < 3; j++) { \ A[i*3+j] += C[i*3+j] + D[i*3+j]; \ B[i*3+j] += D[i*3+j] + E[i*3+j]; \ } \ } , { double tmp = 0; for (int i = 0; i < N; i++) { tmp += A[i] + B[i] - 1; } S[0] += tmp; }, VERIFY(0, 1, S[0], (N/2*(N+1)))) TARGET_PARALLEL_FOR6( S[0] = 0; \ for (int i = 0; i < N; i++) { \ A[i] = B[i] = 0; \ } , for (int i = 0; i < 1024; i++) { \ for (int j = 0; j < 3; j++) { \ A[i*3+j] += C[i*3+j] + D[i*3+j]; \ B[i*3+j] += D[i*3+j] + E[i*3+j]; \ } \ } , { double tmp = 0; for (int i = 0; i < N; i++) { tmp += A[i] + B[i] - 1; } S[0] += tmp; }, VERIFY(0, 1, S[0], (N/2*(N+1)))) TARGET_PARALLEL_FOR7( S[0] = 0; \ for (int i = 0; i < N; i++) { \ A[i] = B[i] = 0; \ } , for (int i = 0; i < 1024; i++) { \ for (int j = 0; j < 3; j++) { \ A[i*3+j] += C[i*3+j] + D[i*3+j]; \ B[i*3+j] += D[i*3+j] + E[i*3+j]; \ } \ } , { double tmp = 0; for (int i = 0; i < N; i++) { tmp += A[i] + B[i] - 1; } S[0] += tmp; }, VERIFY(0, 1, S[0], (N/2*(N+1)))) TARGET_PARALLEL_FOR8( S[0] = 0; \ for (int i = 0; i < N; i++) { \ A[i] = B[i] = 0; \ } , for (int i = 0; i < 1024; i++) { \ for (int j = 0; j < 3; j++) { \ A[i*3+j] += C[i*3+j] + D[i*3+j]; \ B[i*3+j] += D[i*3+j] + E[i*3+j]; \ } \ } , { double tmp = 0; for (int i = 0; i < N; i++) { tmp += A[i] + B[i] - 1; } S[0] += tmp; }, VERIFY(0, 1, S[0], (N/2*(N+1)))) TARGET_PARALLEL_FOR9( S[0] = 0; \ for (int i = 0; i < N; i++) { \ A[i] = B[i] = 0; \ } , for (int i = 0; i < 1024; i++) { \ for (int j = 0; j < 3; j++) { \ A[i*3+j] += C[i*3+j] + D[i*3+j]; \ B[i*3+j] += D[i*3+j] + E[i*3+j]; \ } \ } , { double tmp = 0; for (int i = 0; i < N; i++) { tmp += A[i] + B[i] - 1; } S[0] += tmp; }, VERIFY(0, 1, S[0], (N/2*(N+1)))) } DUMP_SUCCESS9() // // Test: ordered clause on omp target parallel for. // #undef TARGET_PARALLEL_FOR_CLAUSES #define TARGET_PARALLEL_FOR_CLAUSES ordered #include "tpf_defines.h" for (int t = 0; t <= max_threads; t++) { int threads[1]; threads[0] = t; TARGET_PARALLEL_FOR1( S[0] = 0; \ , for (int i = 0; i < N; i++) { \ _Pragma("omp ordered") \ S[0] += C[i] + D[i]; \ } , { }, VERIFY(0, 1, S[0], N/2*(N+1))) TARGET_PARALLEL_FOR2( S[0] = 0; \ , for (int i = 0; i < N; i++) { \ _Pragma("omp ordered") \ S[0] += C[i] + D[i]; \ } , { }, VERIFY(0, 1, S[0], N/2*(N+1))) TARGET_PARALLEL_FOR3( S[0] = 0; \ , for (int i = 0; i < N; i++) { \ _Pragma("omp ordered") \ S[0] += C[i] + D[i]; \ } , { }, VERIFY(0, 1, S[0], N/2*(N+1))) TARGET_PARALLEL_FOR4( S[0] = 0; \ , for (int i = 0; i < N; i++) { \ _Pragma("omp ordered") \ S[0] += C[i] + D[i]; \ } , { }, VERIFY(0, 1, S[0], N/2*(N+1))) TARGET_PARALLEL_FOR5( S[0] = 0; \ , for (int i = 0; i < N; i++) { \ _Pragma("omp ordered") \ S[0] += C[i] + D[i]; \ } , { }, VERIFY(0, 1, S[0], N/2*(N+1))) TARGET_PARALLEL_FOR6( S[0] = 0; \ , for (int i = 0; i < N; i++) { \ _Pragma("omp ordered") \ S[0] += C[i] + D[i]; \ } , { }, VERIFY(0, 1, S[0], N/2*(N+1))) TARGET_PARALLEL_FOR7( S[0] = 0; \ , for (int i = 0; i < N; i++) { \ _Pragma("omp ordered") \ S[0] += C[i] + D[i]; \ } , { }, VERIFY(0, 1, S[0], N/2*(N+1))) TARGET_PARALLEL_FOR8( S[0] = 0; \ , for (int i = 0; i < N; i++) { \ _Pragma("omp ordered") \ S[0] += C[i] + D[i]; \ } , { }, VERIFY(0, 1, S[0], N/2*(N+1))) TARGET_PARALLEL_FOR9( S[0] = 0; \ , for (int i = 0; i < N; i++) { \ _Pragma("omp ordered") \ S[0] += C[i] + D[i]; \ } , { }, VERIFY(0, 1, S[0], N/2*(N+1))) } DUMP_SUCCESS9() // // Test: Ensure coalesced scheduling on GPU. // if (cpuExec == 0) { #undef TARGET_PARALLEL_FOR_CLAUSES #define TARGET_PARALLEL_FOR_CLAUSES #include "tpf_defines.h" int threads[1]; threads[0] = 32; TARGET_PARALLEL_FOR1( S[0] = 0; \ for (int i = 0; i < 96; i++) { \ A[i] = 0; \ } \ , for (int i = 0; i < 96; i++) { \ A[i] += i - omp_get_thread_num(); \ } , { double tmp = 0; for (int i = 0; i < 96; i++) { tmp += A[i]; } S[0] = tmp; }, VERIFY(0, 1, S[0], (32*32 + 64*32) )) TARGET_PARALLEL_FOR2( S[0] = 0; \ for (int i = 0; i < 96; i++) { \ A[i] = 0; \ } \ , for (int i = 0; i < 96; i++) { \ A[i] += i - omp_get_thread_num(); \ } , { double tmp = 0; for (int i = 0; i < 96; i++) { tmp += A[i]; } S[0] = tmp; }, VERIFY(0, 1, S[0], (32*32 + 64*32) )) TARGET_PARALLEL_FOR7( S[0] = 0; \ for (int i = 0; i < 96; i++) { \ A[i] = 0; \ } \ , for (int i = 0; i < 96; i++) { \ A[i] += i - omp_get_thread_num(); \ } , { double tmp = 0; for (int i = 0; i < 96; i++) { tmp += A[i]; } S[0] = tmp; }, VERIFY(0, 1, S[0], (32*32 + 64*32) )) } else { DUMP_SUCCESS(3); } return 0; }
DRB013-nowait-orig-yes.c
/* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov, schordan1@llnl.gov, karlin1@llnl.gov) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https://github.com/LLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* This example is extracted from a paper: Ma etc. Symbolic Analysis of Concurrency Errors in OpenMP Programs, ICPP 2013 Some threads may finish the for loop early and execute errors = dt[9]+1 while another thread may still be simultaneously executing the for worksharing region by writing to d[9], causing data races. Data race pair: a[i]@72:7 vs. a[9]@75:13. */ #include <stdio.h> int main() { int i,error; int len = 1000; int a[len], b=5; for (i=0; i<len; i++) a[i]= i; #pragma omp parallel shared(b, error) { #pragma omp for nowait for(i = 0; i < len; i++) a[i] = b + a[i]*5; #pragma omp single error = a[9] + 1; } printf ("error = %d\n", error); return 0; }
SoaDistanceTableAB.h
////////////////////////////////////////////////////////////////////////////////////// // This file is distributed under the University of Illinois/NCSA Open Source License. // See LICENSE file in top directory for details. // // Copyright (c) 2016 Jeongnim Kim and QMCPACK developers. // // File developed by: Jeongnim Kim, jeongnim.kim@intel.com, Intel Corp. // Amrita Mathuriya, amrita.mathuriya@intel.com, Intel Corp. // // File created by: Jeongnim Kim, jeongnim.kim@intel.com, Intel Corp. ////////////////////////////////////////////////////////////////////////////////////// // -*- C++ -*- #ifndef QMCPLUSPLUS_DTDIMPL_AB_H #define QMCPLUSPLUS_DTDIMPL_AB_H #include "Utilities/FairDivide.h" #include "Message/OpenMP.h" namespace qmcplusplus { /**@ingroup nnlist * @brief A derived classe from DistacneTableData, specialized for AB using a transposed form */ template<typename T, unsigned D, int SC> struct SoaDistanceTableAB : public DTD_BConds<T, D, SC>, public DistanceTableData { SoaDistanceTableAB(const ParticleSet& source, ParticleSet& target) : DTD_BConds<T, D, SC>(source.Lattice), DistanceTableData(source, target), evaluate_timer_(*timer_manager.createTimer(std::string("SoaDistanceTableAB::evaluate_") + target.getName() + "_" + source.getName(), timer_level_fine)), move_timer_(*timer_manager.createTimer(std::string("SoaDistanceTableAB::move_") + target.getName() + "_" + source.getName(), timer_level_fine)), update_timer_(*timer_manager.createTimer(std::string("SoaDistanceTableAB::update_") + target.getName() + "_" + source.getName(), timer_level_fine)) { resize(); } void resize() { if (N_sources * N_targets == 0) return; // initialize memory containers and views const int Nsources_padded = getAlignedSize<T>(N_sources); distances_.resize(N_targets); displacements_.resize(N_targets); for (int i = 0; i < N_targets; ++i) { distances_[i].resize(Nsources_padded); displacements_[i].resize(Nsources_padded); } // The padding of temp_r_ and temp_dr_ is necessary for the memory copy in the update function // temp_r_ is padded explicitly while temp_dr_ is padded internally temp_r_.resize(Nsources_padded); temp_dr_.resize(N_sources); } SoaDistanceTableAB() = delete; SoaDistanceTableAB(const SoaDistanceTableAB&) = delete; /** evaluate the full table */ inline void evaluate(ParticleSet& P) override { ScopedTimer local_timer(evaluate_timer_); #pragma omp parallel { int first, last; FairDivideAligned(N_sources, getAlignment<T>(), omp_get_num_threads(), omp_get_thread_num(), first, last); //be aware of the sign of Displacement for (int iat = 0; iat < N_targets; ++iat) DTD_BConds<T, D, SC>::computeDistances(P.R[iat], Origin->getCoordinates().getAllParticlePos(), distances_[iat].data(), displacements_[iat], first, last); } } ///evaluate the temporary pair relations inline void move(const ParticleSet& P, const PosType& rnew, const IndexType iat, bool prepare_old) override { ScopedTimer local_timer(move_timer_); DTD_BConds<T, D, SC>::computeDistances(rnew, Origin->getCoordinates().getAllParticlePos(), temp_r_.data(), temp_dr_, 0, N_sources); // If the full table is not ready all the time, overwrite the current value. // If this step is missing, DT values can be undefined in case a move is rejected. if (!need_full_table_ && prepare_old) DTD_BConds<T, D, SC>::computeDistances(P.R[iat], Origin->getCoordinates().getAllParticlePos(), distances_[iat].data(), displacements_[iat], 0, N_sources); } ///update the stripe for jat-th particle inline void update(IndexType iat) override { ScopedTimer local_timer(update_timer_); std::copy_n(temp_r_.data(), N_sources, distances_[iat].data()); for (int idim = 0; idim < D; ++idim) std::copy_n(temp_dr_.data(idim), N_sources, displacements_[iat].data(idim)); } size_t get_neighbors(int iat, RealType rcut, int* restrict jid, RealType* restrict dist, PosType* restrict displ) const override { constexpr T cminus(-1); size_t nn = 0; for (int jat = 0; jat < N_targets; ++jat) { const RealType rij = distances_[jat][iat]; if (rij < rcut) { //make the compact list jid[nn] = jat; dist[nn] = rij; displ[nn] = cminus * displacements_[jat][iat]; nn++; } } return nn; } int get_first_neighbor(IndexType iat, RealType& r, PosType& dr, bool newpos) const override { RealType min_dist = std::numeric_limits<RealType>::max(); int index = -1; if (newpos) { for (int jat = 0; jat < N_sources; ++jat) if (temp_r_[jat] < min_dist) { min_dist = temp_r_[jat]; index = jat; } if (index >= 0) { r = min_dist; dr = temp_dr_[index]; } } else { for (int jat = 0; jat < N_sources; ++jat) if (distances_[iat][jat] < min_dist) { min_dist = distances_[iat][jat]; index = jat; } if (index >= 0) { r = min_dist; dr = displacements_[iat][index]; } } return index; } size_t get_neighbors(int iat, RealType rcut, RealType* restrict dist) const { size_t nn = 0; for (int jat = 0; jat < N_targets; ++jat) { const RealType rij = distances_[jat][iat]; if (rij < rcut) { //make the compact list dist[nn] = rij; nn++; } } return nn; } private: /// timer for evaluate() NewTimer& evaluate_timer_; /// timer for move() NewTimer& move_timer_; /// timer for update() NewTimer& update_timer_; }; } // namespace qmcplusplus #endif
DRB037-truedepseconddimension-orig-yes.c
/* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov, schordan1@llnl.gov, karlin1@llnl.gov) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https://github.com/LLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* Only the outmost loop can be parallelized in this program. The inner loop has true dependence. Data race pair: b[i][j]@63:7 vs. b[i][j-1]@63:15 */ #include <stdlib.h> #include <stdio.h> double b[1000][1000]; int main(int argc, char* argv[]) { int i,j; int n=1000, m=1000; #pragma omp target data map(tofrom:b[0:1000][0:1000]) #pragma omp target parallel for private(j) for (i=0;i<n;i++) for (j=1;j<m;j++) b[i][j]= i * m + j; #pragma omp target data map(tofrom:b[0:1000][0:1000]) #pragma omp target parallel for private(j) for (i=0;i<n;i++) for (j=1;j<m;j++) b[i][j]=b[i][j-1]; for (i=0;i<n;i++) for (j=1;j<m;j++) printf("%lf\n",b[i][j]); return 0; }
rwpng.c
/* ** PNG read/write functions ** ** © 1998-2000 by Greg Roelofs. ** © 2009-2017 by Kornel Lesiński. ** ** See COPYRIGHT file for license. */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include <limits.h> #include "png.h" /* if this include fails, you need to install libpng (e.g. libpng-devel package) and run ./configure */ #include "rwpng.h" #if USE_LCMS #include "lcms2.h" #endif #ifndef Z_BEST_COMPRESSION #define Z_BEST_COMPRESSION 9 #endif #ifndef Z_BEST_SPEED #define Z_BEST_SPEED 1 #endif #ifdef _OPENMP #include <omp.h> #else #define omp_get_max_threads() 1 #endif #if PNG_LIBPNG_VER < 10400 #error libpng version 1.4 or later is required. 1.6 is recommended. You have an obsolete version of libpng or compiling on an outdated/unsupported operating system. Please upgrade. #endif #if PNG_LIBPNG_VER < 10500 typedef png_const_charp png_const_bytep; #endif static void rwpng_error_handler(png_structp png_ptr, png_const_charp msg); pngquant_error rwpng_read_image32_cocoa(FILE *infile, uint32_t *width, uint32_t *height, size_t *file_size, rwpng_rgba **image_data); void rwpng_version_info(FILE *fp) { const char *pngver = png_get_header_ver(NULL); #if USE_COCOA fprintf(fp, " Color profiles are supported via Cocoa. Using libpng %s.\n", pngver); #elif USE_LCMS fprintf(fp, " Color profiles are supported via Little CMS. Using libpng %s.\n", pngver); #else fprintf(fp, " Compiled with no support for color profiles. Using libpng %s.\n", pngver); #endif #if PNG_LIBPNG_VER < 10600 if (strcmp(pngver, "1.3.") < 0) { fputs("\nWARNING: Your version of libpng is outdated and may produce corrupted files.\n" "Please recompile pngquant with the current version of libpng (1.6 or later).\n", fp); } else if (strcmp(pngver, "1.6.") < 0) { #if defined(PNG_UNKNOWN_CHUNKS_SUPPORTED) fputs("\nWARNING: Your version of libpng is old and has buggy support for custom chunks.\n" "Please recompile pngquant with the current version of libpng (1.6 or later).\n", fp); #endif } #endif } struct rwpng_read_data { FILE *const fp; png_size_t bytes_read; }; #if !USE_COCOA static void user_read_data(png_structp png_ptr, png_bytep data, png_size_t length) { struct rwpng_read_data *read_data = (struct rwpng_read_data *)png_get_io_ptr(png_ptr); png_size_t read = fread(data, 1, length, read_data->fp); if (!read) { png_error(png_ptr, "Read error"); } read_data->bytes_read += read; } #endif struct rwpng_write_state { FILE *outfile; png_size_t maximum_file_size; png_size_t bytes_written; pngquant_error retval; }; static void user_write_data(png_structp png_ptr, png_bytep data, png_size_t length) { struct rwpng_write_state *write_state = (struct rwpng_write_state *)png_get_io_ptr(png_ptr); if (SUCCESS != write_state->retval) { return; } if (!fwrite(data, length, 1, write_state->outfile)) { write_state->retval = CANT_WRITE_ERROR; } write_state->bytes_written += length; } static void user_flush_data(png_structp png_ptr) { // libpng never calls this :( } static png_bytepp rwpng_create_row_pointers(png_infop info_ptr, png_structp png_ptr, unsigned char *base, size_t height, png_size_t rowbytes) { if (!rowbytes) { rowbytes = png_get_rowbytes(png_ptr, info_ptr); } png_bytepp row_pointers = malloc(height * sizeof(row_pointers[0])); if (!row_pointers) return NULL; for(size_t row = 0; row < height; row++) { row_pointers[row] = base + row * rowbytes; } return row_pointers; } #if !USE_COCOA static int read_chunk_callback(png_structp png_ptr, png_unknown_chunkp in_chunk) { if (0 == memcmp("iCCP", in_chunk->name, 5) || 0 == memcmp("cHRM", in_chunk->name, 5) || 0 == memcmp("gAMA", in_chunk->name, 5)) { return 0; // not handled } if (in_chunk->location == 0 ) { return 1; // ignore chunks with invalid location } struct rwpng_chunk **head = (struct rwpng_chunk **)png_get_user_chunk_ptr(png_ptr); struct rwpng_chunk *chunk = malloc(sizeof(struct rwpng_chunk)); memcpy(chunk->name, in_chunk->name, 5); chunk->size = in_chunk->size; chunk->location = in_chunk->location; chunk->data = in_chunk->size ? malloc(in_chunk->size) : NULL; if (in_chunk->size) { memcpy(chunk->data, in_chunk->data, in_chunk->size); } chunk->next = *head; *head = chunk; return 1; // marks as "handled", libpng won't store it } #endif /* retval: 0 = success 21 = bad sig 22 = bad IHDR 24 = insufficient memory 25 = libpng error (via longjmp()) 26 = wrong PNG color type (no alpha channel) */ #if !USE_COCOA static void rwpng_warning_stderr_handler(png_structp png_ptr, png_const_charp msg) { fprintf(stderr, " libpng warning: %s\n", msg); } static void rwpng_warning_silent_handler(png_structp png_ptr, png_const_charp msg) { } static pngquant_error rwpng_read_image24_libpng(FILE *infile, png24_image *mainprog_ptr, int strip, int verbose) { png_structp png_ptr = NULL; png_infop info_ptr = NULL; png_size_t rowbytes; int color_type, bit_depth; png_ptr = png_create_read_struct(PNG_LIBPNG_VER_STRING, mainprog_ptr, rwpng_error_handler, verbose ? rwpng_warning_stderr_handler : rwpng_warning_silent_handler); if (!png_ptr) { return PNG_OUT_OF_MEMORY_ERROR; /* out of memory */ } info_ptr = png_create_info_struct(png_ptr); if (!info_ptr) { png_destroy_read_struct(&png_ptr, NULL, NULL); return PNG_OUT_OF_MEMORY_ERROR; /* out of memory */ } /* setjmp() must be called in every function that calls a non-trivial * libpng function */ if (setjmp(mainprog_ptr->jmpbuf)) { png_destroy_read_struct(&png_ptr, &info_ptr, NULL); return LIBPNG_FATAL_ERROR; /* fatal libpng error (via longjmp()) */ } #if defined(PNG_SKIP_sRGB_CHECK_PROFILE) && defined(PNG_SET_OPTION_SUPPORTED) png_set_option(png_ptr, PNG_SKIP_sRGB_CHECK_PROFILE, PNG_OPTION_ON); #endif #if PNG_LIBPNG_VER >= 10500 && defined(PNG_UNKNOWN_CHUNKS_SUPPORTED) if (!strip) { /* copy standard chunks too */ png_set_keep_unknown_chunks(png_ptr, PNG_HANDLE_CHUNK_IF_SAFE, (png_const_bytep)"pHYs\0iTXt\0tEXt\0zTXt", 4); } #endif if (!strip) { png_set_read_user_chunk_fn(png_ptr, &mainprog_ptr->chunks, read_chunk_callback); } struct rwpng_read_data read_data = {infile, 0}; png_set_read_fn(png_ptr, &read_data, user_read_data); png_read_info(png_ptr, info_ptr); /* read all PNG info up to image data */ /* alternatively, could make separate calls to png_get_image_width(), * etc., but want bit_depth and color_type for later [don't care about * compression_type and filter_type => NULLs] */ png_get_IHDR(png_ptr, info_ptr, &mainprog_ptr->width, &mainprog_ptr->height, &bit_depth, &color_type, NULL, NULL, NULL); /* expand palette images to RGB, low-bit-depth grayscale images to 8 bits, * transparency chunks to full alpha channel; strip 16-bit-per-sample * images to 8 bits per sample; and convert grayscale to RGB[A] */ /* GRR TO DO: preserve all safe-to-copy ancillary PNG chunks */ if (!(color_type & PNG_COLOR_MASK_ALPHA)) { #ifdef PNG_READ_FILLER_SUPPORTED png_set_expand(png_ptr); png_set_filler(png_ptr, 65535L, PNG_FILLER_AFTER); #else fprintf(stderr, "pngquant readpng: image is neither RGBA nor GA\n"); png_destroy_read_struct(&png_ptr, &info_ptr, NULL); mainprog_ptr->retval = WRONG_INPUT_COLOR_TYPE; return mainprog_ptr->retval; #endif } if (bit_depth == 16) { png_set_strip_16(png_ptr); } if (!(color_type & PNG_COLOR_MASK_COLOR)) { png_set_gray_to_rgb(png_ptr); } /* get source gamma for gamma correction, or use sRGB default */ double gamma = 0.45455; if (png_get_valid(png_ptr, info_ptr, PNG_INFO_sRGB)) { mainprog_ptr->input_color = RWPNG_SRGB; mainprog_ptr->output_color = RWPNG_SRGB; } else { png_get_gAMA(png_ptr, info_ptr, &gamma); if (gamma > 0 && gamma <= 1.0) { mainprog_ptr->input_color = RWPNG_GAMA_ONLY; mainprog_ptr->output_color = RWPNG_GAMA_ONLY; } else { fprintf(stderr, "pngquant readpng: ignored out-of-range gamma %f\n", gamma); mainprog_ptr->input_color = RWPNG_NONE; mainprog_ptr->output_color = RWPNG_NONE; gamma = 0.45455; } } mainprog_ptr->gamma = gamma; png_set_interlace_handling(png_ptr); /* all transformations have been registered; now update info_ptr data, * get rowbytes and channels, and allocate image memory */ png_read_update_info(png_ptr, info_ptr); rowbytes = png_get_rowbytes(png_ptr, info_ptr); // For overflow safety reject images that won't fit in 32-bit if (rowbytes > INT_MAX/mainprog_ptr->height) { png_destroy_read_struct(&png_ptr, &info_ptr, NULL); return PNG_OUT_OF_MEMORY_ERROR; } if ((mainprog_ptr->rgba_data = malloc(rowbytes * mainprog_ptr->height)) == NULL) { fprintf(stderr, "pngquant readpng: unable to allocate image data\n"); png_destroy_read_struct(&png_ptr, &info_ptr, NULL); return PNG_OUT_OF_MEMORY_ERROR; } png_bytepp row_pointers = rwpng_create_row_pointers(info_ptr, png_ptr, mainprog_ptr->rgba_data, mainprog_ptr->height, 0); /* now we can go ahead and just read the whole image */ png_read_image(png_ptr, row_pointers); /* and we're done! (png_read_end() can be omitted if no processing of * post-IDAT text/time/etc. is desired) */ png_read_end(png_ptr, NULL); #if USE_LCMS #if PNG_LIBPNG_VER < 10500 png_charp ProfileData; #else png_bytep ProfileData; #endif png_uint_32 ProfileLen; cmsHPROFILE hInProfile = NULL; /* color_type is read from the image before conversion to RGBA */ int COLOR_PNG = color_type & PNG_COLOR_MASK_COLOR; /* embedded ICC profile */ if (png_get_iCCP(png_ptr, info_ptr, &(png_charp){0}, &(int){0}, &ProfileData, &ProfileLen)) { hInProfile = cmsOpenProfileFromMem(ProfileData, ProfileLen); cmsColorSpaceSignature colorspace = cmsGetColorSpace(hInProfile); /* only RGB (and GRAY) valid for PNGs */ if (colorspace == cmsSigRgbData && COLOR_PNG) { mainprog_ptr->input_color = RWPNG_ICCP; mainprog_ptr->output_color = RWPNG_SRGB; } else { if (colorspace == cmsSigGrayData && !COLOR_PNG) { mainprog_ptr->input_color = RWPNG_ICCP_WARN_GRAY; mainprog_ptr->output_color = RWPNG_SRGB; } cmsCloseProfile(hInProfile); hInProfile = NULL; } } /* build RGB profile from cHRM and gAMA */ if (hInProfile == NULL && COLOR_PNG && !png_get_valid(png_ptr, info_ptr, PNG_INFO_sRGB) && png_get_valid(png_ptr, info_ptr, PNG_INFO_gAMA) && png_get_valid(png_ptr, info_ptr, PNG_INFO_cHRM)) { cmsCIExyY WhitePoint; cmsCIExyYTRIPLE Primaries; png_get_cHRM(png_ptr, info_ptr, &WhitePoint.x, &WhitePoint.y, &Primaries.Red.x, &Primaries.Red.y, &Primaries.Green.x, &Primaries.Green.y, &Primaries.Blue.x, &Primaries.Blue.y); WhitePoint.Y = Primaries.Red.Y = Primaries.Green.Y = Primaries.Blue.Y = 1.0; cmsToneCurve *GammaTable[3]; GammaTable[0] = GammaTable[1] = GammaTable[2] = cmsBuildGamma(NULL, 1/gamma); hInProfile = cmsCreateRGBProfile(&WhitePoint, &Primaries, GammaTable); cmsFreeToneCurve(GammaTable[0]); mainprog_ptr->input_color = RWPNG_GAMA_CHRM; mainprog_ptr->output_color = RWPNG_SRGB; } /* transform image to sRGB colorspace */ if (hInProfile != NULL) { cmsHPROFILE hOutProfile = cmsCreate_sRGBProfile(); cmsHTRANSFORM hTransform = cmsCreateTransform(hInProfile, TYPE_RGBA_8, hOutProfile, TYPE_RGBA_8, INTENT_PERCEPTUAL, omp_get_max_threads() > 1 ? cmsFLAGS_NOCACHE : 0); #pragma omp parallel for \ if (mainprog_ptr->height*mainprog_ptr->width > 8000) \ schedule(static) for (unsigned int i = 0; i < mainprog_ptr->height; i++) { /* It is safe to use the same block for input and output, when both are of the same TYPE. */ cmsDoTransform(hTransform, row_pointers[i], row_pointers[i], mainprog_ptr->width); } cmsDeleteTransform(hTransform); cmsCloseProfile(hOutProfile); cmsCloseProfile(hInProfile); mainprog_ptr->gamma = 0.45455; } #endif png_destroy_read_struct(&png_ptr, &info_ptr, NULL); mainprog_ptr->file_size = read_data.bytes_read; mainprog_ptr->row_pointers = (unsigned char **)row_pointers; return SUCCESS; } #endif static void rwpng_free_chunks(struct rwpng_chunk *chunk) { if (!chunk) return; rwpng_free_chunks(chunk->next); free(chunk->data); free(chunk); } void rwpng_free_image24(png24_image *image) { free(image->row_pointers); image->row_pointers = NULL; free(image->rgba_data); image->rgba_data = NULL; rwpng_free_chunks(image->chunks); image->chunks = NULL; } void rwpng_free_image8(png8_image *image) { free(image->indexed_data); image->indexed_data = NULL; free(image->row_pointers); image->row_pointers = NULL; rwpng_free_chunks(image->chunks); image->chunks = NULL; } pngquant_error rwpng_read_image24(FILE *infile, png24_image *out, int strip, int verbose) { #if USE_COCOA rwpng_rgba *pixel_data; pngquant_error res = rwpng_read_image32_cocoa(infile, &out->width, &out->height, &out->file_size, &pixel_data); if (res != SUCCESS) { return res; } out->gamma = 0.45455; out->input_color = RWPNG_COCOA; out->output_color = RWPNG_SRGB; out->rgba_data = (unsigned char *)pixel_data; out->row_pointers = malloc(sizeof(out->row_pointers[0])*out->height); for(int i=0; i < out->height; i++) { out->row_pointers[i] = (unsigned char *)&pixel_data[out->width*i]; } return SUCCESS; #else return rwpng_read_image24_libpng(infile, out, strip, verbose); #endif } static pngquant_error rwpng_write_image_init(rwpng_png_image *mainprog_ptr, png_structpp png_ptr_p, png_infopp info_ptr_p, int fast_compression) { /* could also replace libpng warning-handler (final NULL), but no need: */ *png_ptr_p = png_create_write_struct(PNG_LIBPNG_VER_STRING, mainprog_ptr, rwpng_error_handler, NULL); if (!(*png_ptr_p)) { return LIBPNG_INIT_ERROR; /* out of memory */ } *info_ptr_p = png_create_info_struct(*png_ptr_p); if (!(*info_ptr_p)) { png_destroy_write_struct(png_ptr_p, NULL); return LIBPNG_INIT_ERROR; /* out of memory */ } /* setjmp() must be called in every function that calls a PNG-writing * libpng function, unless an alternate error handler was installed-- * but compatible error handlers must either use longjmp() themselves * (as in this program) or exit immediately, so here we go: */ if (setjmp(mainprog_ptr->jmpbuf)) { png_destroy_write_struct(png_ptr_p, info_ptr_p); return LIBPNG_INIT_ERROR; /* libpng error (via longjmp()) */ } png_set_compression_level(*png_ptr_p, fast_compression ? Z_BEST_SPEED : Z_BEST_COMPRESSION); png_set_compression_mem_level(*png_ptr_p, fast_compression ? 9 : 5); // judging by optipng results, smaller mem makes libpng compress slightly better return SUCCESS; } static void rwpng_write_end(png_infopp info_ptr_p, png_structpp png_ptr_p, png_bytepp row_pointers) { png_write_info(*png_ptr_p, *info_ptr_p); png_set_packing(*png_ptr_p); png_write_image(*png_ptr_p, row_pointers); png_write_end(*png_ptr_p, NULL); png_destroy_write_struct(png_ptr_p, info_ptr_p); } static void rwpng_set_gamma(png_infop info_ptr, png_structp png_ptr, double gamma, rwpng_color_transform color) { if (color != RWPNG_GAMA_ONLY && color != RWPNG_NONE) { png_set_gAMA(png_ptr, info_ptr, gamma); } if (color == RWPNG_SRGB) { png_set_sRGB(png_ptr, info_ptr, 0); // 0 = Perceptual } } pngquant_error rwpng_write_image8(FILE *outfile, png8_image *mainprog_ptr) { png_structp png_ptr; png_infop info_ptr; if (mainprog_ptr->num_palette > 256) return INVALID_ARGUMENT; pngquant_error retval = rwpng_write_image_init((rwpng_png_image*)mainprog_ptr, &png_ptr, &info_ptr, mainprog_ptr->fast_compression); if (retval) return retval; struct rwpng_write_state write_state; write_state = (struct rwpng_write_state){ .outfile = outfile, .maximum_file_size = mainprog_ptr->maximum_file_size, .retval = SUCCESS, }; png_set_write_fn(png_ptr, &write_state, user_write_data, user_flush_data); // Palette images generally don't gain anything from filtering png_set_filter(png_ptr, PNG_FILTER_TYPE_BASE, PNG_FILTER_VALUE_NONE); rwpng_set_gamma(info_ptr, png_ptr, mainprog_ptr->gamma, mainprog_ptr->output_color); /* set the image parameters appropriately */ int sample_depth; #if PNG_LIBPNG_VER > 10400 /* old libpng corrupts files with low depth */ if (mainprog_ptr->num_palette <= 2) sample_depth = 1; else if (mainprog_ptr->num_palette <= 4) sample_depth = 2; else if (mainprog_ptr->num_palette <= 16) sample_depth = 4; else #endif sample_depth = 8; struct rwpng_chunk *chunk = mainprog_ptr->chunks; mainprog_ptr->metadata_size = 0; int chunk_num=0; while(chunk) { png_unknown_chunk pngchunk = { .size = chunk->size, .data = chunk->data, .location = chunk->location, }; memcpy(pngchunk.name, chunk->name, 5); png_set_unknown_chunks(png_ptr, info_ptr, &pngchunk, 1); #if defined(PNG_HAVE_IHDR) && PNG_LIBPNG_VER < 10600 png_set_unknown_chunk_location(png_ptr, info_ptr, chunk_num, pngchunk.location ? pngchunk.location : PNG_HAVE_IHDR); #endif mainprog_ptr->metadata_size += chunk->size + 12; chunk = chunk->next; chunk_num++; } png_set_IHDR(png_ptr, info_ptr, mainprog_ptr->width, mainprog_ptr->height, sample_depth, PNG_COLOR_TYPE_PALETTE, 0, PNG_COMPRESSION_TYPE_DEFAULT, PNG_FILTER_TYPE_BASE); png_color palette[256]; png_byte trans[256]; unsigned int num_trans = 0; for(unsigned int i = 0; i < mainprog_ptr->num_palette; i++) { palette[i] = (png_color){ .red = mainprog_ptr->palette[i].r, .green = mainprog_ptr->palette[i].g, .blue = mainprog_ptr->palette[i].b, }; trans[i] = mainprog_ptr->palette[i].a; if (mainprog_ptr->palette[i].a < 255) { num_trans = i+1; } } png_set_PLTE(png_ptr, info_ptr, palette, mainprog_ptr->num_palette); if (num_trans > 0) { png_set_tRNS(png_ptr, info_ptr, trans, num_trans, NULL); } rwpng_write_end(&info_ptr, &png_ptr, mainprog_ptr->row_pointers); if (SUCCESS == write_state.retval && write_state.maximum_file_size && write_state.bytes_written > write_state.maximum_file_size) { return TOO_LARGE_FILE; } return write_state.retval; } pngquant_error rwpng_write_image24(FILE *outfile, const png24_image *mainprog_ptr) { png_structp png_ptr; png_infop info_ptr; pngquant_error retval = rwpng_write_image_init((rwpng_png_image*)mainprog_ptr, &png_ptr, &info_ptr, 0); if (retval) return retval; png_init_io(png_ptr, outfile); rwpng_set_gamma(info_ptr, png_ptr, mainprog_ptr->gamma, mainprog_ptr->output_color); png_set_IHDR(png_ptr, info_ptr, mainprog_ptr->width, mainprog_ptr->height, 8, PNG_COLOR_TYPE_RGB_ALPHA, 0, PNG_COMPRESSION_TYPE_DEFAULT, PNG_FILTER_TYPE_BASE); png_bytepp row_pointers = rwpng_create_row_pointers(info_ptr, png_ptr, mainprog_ptr->rgba_data, mainprog_ptr->height, 0); rwpng_write_end(&info_ptr, &png_ptr, row_pointers); free(row_pointers); return SUCCESS; } static void rwpng_error_handler(png_structp png_ptr, png_const_charp msg) { rwpng_png_image *mainprog_ptr; /* This function, aside from the extra step of retrieving the "error * pointer" (below) and the fact that it exists within the application * rather than within libpng, is essentially identical to libpng's * default error handler. The second point is critical: since both * setjmp() and longjmp() are called from the same code, they are * guaranteed to have compatible notions of how big a jmp_buf is, * regardless of whether _BSD_SOURCE or anything else has (or has not) * been defined. */ fprintf(stderr, " error: %s (libpng failed)\n", msg); fflush(stderr); mainprog_ptr = png_get_error_ptr(png_ptr); if (mainprog_ptr == NULL) abort(); longjmp(mainprog_ptr->jmpbuf, 1); }
segment.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % SSSSS EEEEE GGGG M M EEEEE N N TTTTT % % SS E G MM MM E NN N T % % SSS EEE G GGG M M M EEE N N N T % % SS E G G M M E N NN T % % SSSSS EEEEE GGGG M M EEEEE N N T % % % % % % MagickCore Methods to Segment an Image with Thresholding Fuzzy c-Means % % % % Software Design % % John Cristy % % April 1993 % % % % % % Copyright 1999-2013 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % http://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % Segment segments an image by analyzing the histograms of the color % components and identifying units that are homogeneous with the fuzzy % c-means technique. The scale-space filter analyzes the histograms of % the three color components of the image and identifies a set of % classes. The extents of each class is used to coarsely segment the % image with thresholding. The color associated with each class is % determined by the mean color of all pixels within the extents of a % particular class. Finally, any unclassified pixels are assigned to % the closest class with the fuzzy c-means technique. % % The fuzzy c-Means algorithm can be summarized as follows: % % o Build a histogram, one for each color component of the image. % % o For each histogram, successively apply the scale-space filter and % build an interval tree of zero crossings in the second derivative % at each scale. Analyze this scale-space ``fingerprint'' to % determine which peaks and valleys in the histogram are most % predominant. % % o The fingerprint defines intervals on the axis of the histogram. % Each interval contains either a minima or a maxima in the original % signal. If each color component lies within the maxima interval, % that pixel is considered ``classified'' and is assigned an unique % class number. % % o Any pixel that fails to be classified in the above thresholding % pass is classified using the fuzzy c-Means technique. It is % assigned to one of the classes discovered in the histogram analysis % phase. % % The fuzzy c-Means technique attempts to cluster a pixel by finding % the local minima of the generalized within group sum of squared error % objective function. A pixel is assigned to the closest class of % which the fuzzy membership has a maximum value. % % Segment is strongly based on software written by Andy Gallo, % University of Delaware. % % The following reference was used in creating this program: % % Young Won Lim, Sang Uk Lee, "On The Color Image Segmentation % Algorithm Based on the Thresholding and the Fuzzy c-Means % Techniques", Pattern Recognition, Volume 23, Number 9, pages % 935-952, 1990. % % */ #include "magick/studio.h" #include "magick/cache.h" #include "magick/color.h" #include "magick/colormap.h" #include "magick/colorspace.h" #include "magick/colorspace-private.h" #include "magick/exception.h" #include "magick/exception-private.h" #include "magick/image.h" #include "magick/image-private.h" #include "magick/memory_.h" #include "magick/monitor.h" #include "magick/monitor-private.h" #include "magick/quantize.h" #include "magick/quantum.h" #include "magick/quantum-private.h" #include "magick/resource_.h" #include "magick/segment.h" #include "magick/string_.h" #include "magick/thread-private.h" /* Define declarations. */ #define MaxDimension 3 #define DeltaTau 0.5f #if defined(FastClassify) #define WeightingExponent 2.0 #define SegmentPower(ratio) (ratio) #else #define WeightingExponent 2.5 #define SegmentPower(ratio) pow(ratio,(double) (1.0/(weighting_exponent-1.0))); #endif #define Tau 5.2f /* Typedef declarations. */ typedef struct _ExtentPacket { MagickRealType center; ssize_t index, left, right; } ExtentPacket; typedef struct _Cluster { struct _Cluster *next; ExtentPacket red, green, blue; ssize_t count, id; } Cluster; typedef struct _IntervalTree { MagickRealType tau; ssize_t left, right; MagickRealType mean_stability, stability; struct _IntervalTree *sibling, *child; } IntervalTree; typedef struct _ZeroCrossing { MagickRealType tau, histogram[256]; short crossings[256]; } ZeroCrossing; /* Constant declarations. */ static const int Blue = 2, Green = 1, Red = 0, SafeMargin = 3, TreeLength = 600; /* Method prototypes. */ static MagickRealType OptimalTau(const ssize_t *,const double,const double,const double, const double,short *); static ssize_t DefineRegion(const short *,ExtentPacket *); static void InitializeHistogram(const Image *,ssize_t **,ExceptionInfo *), ScaleSpace(const ssize_t *,const MagickRealType,MagickRealType *), ZeroCrossHistogram(MagickRealType *,const MagickRealType,short *); /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C l a s s i f y % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % Classify() defines one or more classes. Each pixel is thresholded to % determine which class it belongs to. If the class is not identified it is % assigned to the closest class based on the fuzzy c-Means technique. % % The format of the Classify method is: % % MagickBooleanType Classify(Image *image,short **extrema, % const MagickRealType cluster_threshold, % const MagickRealType weighting_exponent, % const MagickBooleanType verbose) % % A description of each parameter follows. % % o image: the image. % % o extrema: Specifies a pointer to an array of integers. They % represent the peaks and valleys of the histogram for each color % component. % % o cluster_threshold: This MagickRealType represents the minimum number of % pixels contained in a hexahedra before it can be considered valid % (expressed as a percentage). % % o weighting_exponent: Specifies the membership weighting exponent. % % o verbose: A value greater than zero prints detailed information about % the identified classes. % */ static MagickBooleanType Classify(Image *image,short **extrema, const MagickRealType cluster_threshold, const MagickRealType weighting_exponent,const MagickBooleanType verbose) { #define SegmentImageTag "Segment/Image" CacheView *image_view; Cluster *cluster, *head, *last_cluster, *next_cluster; ExceptionInfo *exception; ExtentPacket blue, green, red; MagickOffsetType progress; MagickRealType *free_squares; MagickStatusType status; register ssize_t i; register MagickRealType *squares; size_t number_clusters; ssize_t count, y; /* Form clusters. */ cluster=(Cluster *) NULL; head=(Cluster *) NULL; (void) ResetMagickMemory(&red,0,sizeof(red)); (void) ResetMagickMemory(&green,0,sizeof(green)); (void) ResetMagickMemory(&blue,0,sizeof(blue)); while (DefineRegion(extrema[Red],&red) != 0) { green.index=0; while (DefineRegion(extrema[Green],&green) != 0) { blue.index=0; while (DefineRegion(extrema[Blue],&blue) != 0) { /* Allocate a new class. */ if (head != (Cluster *) NULL) { cluster->next=(Cluster *) AcquireMagickMemory( sizeof(*cluster->next)); cluster=cluster->next; } else { cluster=(Cluster *) AcquireMagickMemory(sizeof(*cluster)); head=cluster; } if (cluster == (Cluster *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); /* Initialize a new class. */ cluster->count=0; cluster->red=red; cluster->green=green; cluster->blue=blue; cluster->next=(Cluster *) NULL; } } } if (head == (Cluster *) NULL) { /* No classes were identified-- create one. */ cluster=(Cluster *) AcquireMagickMemory(sizeof(*cluster)); if (cluster == (Cluster *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); /* Initialize a new class. */ cluster->count=0; cluster->red=red; cluster->green=green; cluster->blue=blue; cluster->next=(Cluster *) NULL; head=cluster; } /* Count the pixels for each cluster. */ status=MagickTrue; count=0; progress=0; exception=(&image->exception); image_view=AcquireVirtualCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { register const PixelPacket *p; register ssize_t x; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const PixelPacket *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next) if (((ssize_t) ScaleQuantumToChar(GetPixelRed(p)) >= (cluster->red.left-SafeMargin)) && ((ssize_t) ScaleQuantumToChar(GetPixelRed(p)) <= (cluster->red.right+SafeMargin)) && ((ssize_t) ScaleQuantumToChar(GetPixelGreen(p)) >= (cluster->green.left-SafeMargin)) && ((ssize_t) ScaleQuantumToChar(GetPixelGreen(p)) <= (cluster->green.right+SafeMargin)) && ((ssize_t) ScaleQuantumToChar(GetPixelBlue(p)) >= (cluster->blue.left-SafeMargin)) && ((ssize_t) ScaleQuantumToChar(GetPixelBlue(p)) <= (cluster->blue.right+SafeMargin))) { /* Count this pixel. */ count++; cluster->red.center+=(MagickRealType) ScaleQuantumToChar(GetPixelRed(p)); cluster->green.center+=(MagickRealType) ScaleQuantumToChar(GetPixelGreen(p)); cluster->blue.center+=(MagickRealType) ScaleQuantumToChar(GetPixelBlue(p)); cluster->count++; break; } p++; } if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_Classify) #endif proceed=SetImageProgress(image,SegmentImageTag,progress++, 2*image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); /* Remove clusters that do not meet minimum cluster threshold. */ count=0; last_cluster=head; next_cluster=head; for (cluster=head; cluster != (Cluster *) NULL; cluster=next_cluster) { next_cluster=cluster->next; if ((cluster->count > 0) && (cluster->count >= (count*cluster_threshold/100.0))) { /* Initialize cluster. */ cluster->id=count; cluster->red.center/=cluster->count; cluster->green.center/=cluster->count; cluster->blue.center/=cluster->count; count++; last_cluster=cluster; continue; } /* Delete cluster. */ if (cluster == head) head=next_cluster; else last_cluster->next=next_cluster; cluster=(Cluster *) RelinquishMagickMemory(cluster); } number_clusters=(size_t) count; if (verbose != MagickFalse) { /* Print cluster statistics. */ (void) FormatLocaleFile(stdout,"Fuzzy C-means Statistics\n"); (void) FormatLocaleFile(stdout,"===================\n\n"); (void) FormatLocaleFile(stdout,"\tCluster Threshold = %g\n",(double) cluster_threshold); (void) FormatLocaleFile(stdout,"\tWeighting Exponent = %g\n",(double) weighting_exponent); (void) FormatLocaleFile(stdout,"\tTotal Number of Clusters = %.20g\n\n", (double) number_clusters); /* Print the total number of points per cluster. */ (void) FormatLocaleFile(stdout,"\n\nNumber of Vectors Per Cluster\n"); (void) FormatLocaleFile(stdout,"=============================\n\n"); for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next) (void) FormatLocaleFile(stdout,"Cluster #%.20g = %.20g\n",(double) cluster->id,(double) cluster->count); /* Print the cluster extents. */ (void) FormatLocaleFile(stdout, "\n\n\nCluster Extents: (Vector Size: %d)\n",MaxDimension); (void) FormatLocaleFile(stdout,"================"); for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next) { (void) FormatLocaleFile(stdout,"\n\nCluster #%.20g\n\n",(double) cluster->id); (void) FormatLocaleFile(stdout, "%.20g-%.20g %.20g-%.20g %.20g-%.20g\n",(double) cluster->red.left,(double) cluster->red.right,(double) cluster->green.left,(double) cluster->green.right,(double) cluster->blue.left,(double) cluster->blue.right); } /* Print the cluster center values. */ (void) FormatLocaleFile(stdout, "\n\n\nCluster Center Values: (Vector Size: %d)\n",MaxDimension); (void) FormatLocaleFile(stdout,"====================="); for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next) { (void) FormatLocaleFile(stdout,"\n\nCluster #%.20g\n\n",(double) cluster->id); (void) FormatLocaleFile(stdout,"%g %g %g\n",(double) cluster->red.center,(double) cluster->green.center,(double) cluster->blue.center); } (void) FormatLocaleFile(stdout,"\n"); } if (number_clusters > 256) ThrowBinaryException(ImageError,"TooManyClusters",image->filename); /* Speed up distance calculations. */ squares=(MagickRealType *) AcquireQuantumMemory(513UL,sizeof(*squares)); if (squares == (MagickRealType *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); squares+=255; for (i=(-255); i <= 255; i++) squares[i]=(MagickRealType) i*(MagickRealType) i; /* Allocate image colormap. */ if (AcquireImageColormap(image,number_clusters) == MagickFalse) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); i=0; for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next) { image->colormap[i].red=ScaleCharToQuantum((unsigned char) (cluster->red.center+0.5)); image->colormap[i].green=ScaleCharToQuantum((unsigned char) (cluster->green.center+0.5)); image->colormap[i].blue=ScaleCharToQuantum((unsigned char) (cluster->blue.center+0.5)); i++; } /* Do course grain classes. */ exception=(&image->exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { Cluster *cluster; register const PixelPacket *restrict p; register IndexPacket *restrict indexes; register ssize_t x; register PixelPacket *restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { SetPixelIndex(indexes+x,0); for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next) { if (((ssize_t) ScaleQuantumToChar(q->red) >= (cluster->red.left-SafeMargin)) && ((ssize_t) ScaleQuantumToChar(q->red) <= (cluster->red.right+SafeMargin)) && ((ssize_t) ScaleQuantumToChar(q->green) >= (cluster->green.left-SafeMargin)) && ((ssize_t) ScaleQuantumToChar(q->green) <= (cluster->green.right+SafeMargin)) && ((ssize_t) ScaleQuantumToChar(q->blue) >= (cluster->blue.left-SafeMargin)) && ((ssize_t) ScaleQuantumToChar(q->blue) <= (cluster->blue.right+SafeMargin))) { /* Classify this pixel. */ SetPixelIndex(indexes+x,cluster->id); break; } } if (cluster == (Cluster *) NULL) { MagickRealType distance_squared, local_minima, numerator, ratio, sum; register ssize_t j, k; /* Compute fuzzy membership. */ local_minima=0.0; for (j=0; j < (ssize_t) image->colors; j++) { sum=0.0; p=image->colormap+j; distance_squared=squares[(ssize_t) ScaleQuantumToChar(q->red)- (ssize_t) ScaleQuantumToChar(GetPixelRed(p))]+ squares[(ssize_t) ScaleQuantumToChar(q->green)- (ssize_t) ScaleQuantumToChar(GetPixelGreen(p))]+ squares[(ssize_t) ScaleQuantumToChar(q->blue)- (ssize_t) ScaleQuantumToChar(GetPixelBlue(p))]; numerator=distance_squared; for (k=0; k < (ssize_t) image->colors; k++) { p=image->colormap+k; distance_squared=squares[(ssize_t) ScaleQuantumToChar(q->red)- (ssize_t) ScaleQuantumToChar(GetPixelRed(p))]+ squares[(ssize_t) ScaleQuantumToChar(q->green)- (ssize_t) ScaleQuantumToChar(GetPixelGreen(p))]+ squares[(ssize_t) ScaleQuantumToChar(q->blue)- (ssize_t) ScaleQuantumToChar(GetPixelBlue(p))]; ratio=numerator/distance_squared; sum+=SegmentPower(ratio); } if ((sum != 0.0) && ((1.0/sum) > local_minima)) { /* Classify this pixel. */ local_minima=1.0/sum; SetPixelIndex(indexes+x,j); } } } q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_Classify) #endif proceed=SetImageProgress(image,SegmentImageTag,progress++, 2*image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); status&=SyncImage(image); /* Relinquish resources. */ for (cluster=head; cluster != (Cluster *) NULL; cluster=next_cluster) { next_cluster=cluster->next; cluster=(Cluster *) RelinquishMagickMemory(cluster); } squares-=255; free_squares=squares; free_squares=(MagickRealType *) RelinquishMagickMemory(free_squares); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C o n s o l i d a t e C r o s s i n g s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ConsolidateCrossings() guarantees that an even number of zero crossings % always lie between two crossings. % % The format of the ConsolidateCrossings method is: % % ConsolidateCrossings(ZeroCrossing *zero_crossing, % const size_t number_crossings) % % A description of each parameter follows. % % o zero_crossing: Specifies an array of structures of type ZeroCrossing. % % o number_crossings: This size_t specifies the number of elements % in the zero_crossing array. % */ static inline ssize_t MagickAbsoluteValue(const ssize_t x) { if (x < 0) return(-x); return(x); } static inline ssize_t MagickMax(const ssize_t x,const ssize_t y) { if (x > y) return(x); return(y); } static inline ssize_t MagickMin(const ssize_t x,const ssize_t y) { if (x < y) return(x); return(y); } static void ConsolidateCrossings(ZeroCrossing *zero_crossing, const size_t number_crossings) { register ssize_t i, j, k, l; ssize_t center, correct, count, left, right; /* Consolidate zero crossings. */ for (i=(ssize_t) number_crossings-1; i >= 0; i--) for (j=0; j <= 255; j++) { if (zero_crossing[i].crossings[j] == 0) continue; /* Find the entry that is closest to j and still preserves the property that there are an even number of crossings between intervals. */ for (k=j-1; k > 0; k--) if (zero_crossing[i+1].crossings[k] != 0) break; left=MagickMax(k,0); center=j; for (k=j+1; k < 255; k++) if (zero_crossing[i+1].crossings[k] != 0) break; right=MagickMin(k,255); /* K is the zero crossing just left of j. */ for (k=j-1; k > 0; k--) if (zero_crossing[i].crossings[k] != 0) break; if (k < 0) k=0; /* Check center for an even number of crossings between k and j. */ correct=(-1); if (zero_crossing[i+1].crossings[j] != 0) { count=0; for (l=k+1; l < center; l++) if (zero_crossing[i+1].crossings[l] != 0) count++; if (((count % 2) == 0) && (center != k)) correct=center; } /* Check left for an even number of crossings between k and j. */ if (correct == -1) { count=0; for (l=k+1; l < left; l++) if (zero_crossing[i+1].crossings[l] != 0) count++; if (((count % 2) == 0) && (left != k)) correct=left; } /* Check right for an even number of crossings between k and j. */ if (correct == -1) { count=0; for (l=k+1; l < right; l++) if (zero_crossing[i+1].crossings[l] != 0) count++; if (((count % 2) == 0) && (right != k)) correct=right; } l=(ssize_t) zero_crossing[i].crossings[j]; zero_crossing[i].crossings[j]=0; if (correct != -1) zero_crossing[i].crossings[correct]=(short) l; } } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e f i n e R e g i o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DefineRegion() defines the left and right boundaries of a peak region. % % The format of the DefineRegion method is: % % ssize_t DefineRegion(const short *extrema,ExtentPacket *extents) % % A description of each parameter follows. % % o extrema: Specifies a pointer to an array of integers. They % represent the peaks and valleys of the histogram for each color % component. % % o extents: This pointer to an ExtentPacket represent the extends % of a particular peak or valley of a color component. % */ static ssize_t DefineRegion(const short *extrema,ExtentPacket *extents) { /* Initialize to default values. */ extents->left=0; extents->center=0.0; extents->right=255; /* Find the left side (maxima). */ for ( ; extents->index <= 255; extents->index++) if (extrema[extents->index] > 0) break; if (extents->index > 255) return(MagickFalse); /* no left side - no region exists */ extents->left=extents->index; /* Find the right side (minima). */ for ( ; extents->index <= 255; extents->index++) if (extrema[extents->index] < 0) break; extents->right=extents->index-1; return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e r i v a t i v e H i s t o g r a m % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DerivativeHistogram() determines the derivative of the histogram using % central differencing. % % The format of the DerivativeHistogram method is: % % DerivativeHistogram(const MagickRealType *histogram, % MagickRealType *derivative) % % A description of each parameter follows. % % o histogram: Specifies an array of MagickRealTypes representing the number % of pixels for each intensity of a particular color component. % % o derivative: This array of MagickRealTypes is initialized by % DerivativeHistogram to the derivative of the histogram using central % differencing. % */ static void DerivativeHistogram(const MagickRealType *histogram, MagickRealType *derivative) { register ssize_t i, n; /* Compute endpoints using second order polynomial interpolation. */ n=255; derivative[0]=(-1.5*histogram[0]+2.0*histogram[1]-0.5*histogram[2]); derivative[n]=(0.5*histogram[n-2]-2.0*histogram[n-1]+1.5*histogram[n]); /* Compute derivative using central differencing. */ for (i=1; i < n; i++) derivative[i]=(histogram[i+1]-histogram[i-1])/2.0; return; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t I m a g e D y n a m i c T h r e s h o l d % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageDynamicThreshold() returns the dynamic threshold for an image. % % The format of the GetImageDynamicThreshold method is: % % MagickBooleanType GetImageDynamicThreshold(const Image *image, % const double cluster_threshold,const double smooth_threshold, % MagickPixelPacket *pixel,ExceptionInfo *exception) % % A description of each parameter follows. % % o image: the image. % % o cluster_threshold: This MagickRealType represents the minimum number of % pixels contained in a hexahedra before it can be considered valid % (expressed as a percentage). % % o smooth_threshold: the smoothing threshold eliminates noise in the second % derivative of the histogram. As the value is increased, you can expect a % smoother second derivative. % % o pixel: return the dynamic threshold here. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType GetImageDynamicThreshold(const Image *image, const double cluster_threshold,const double smooth_threshold, MagickPixelPacket *pixel,ExceptionInfo *exception) { Cluster *background, *cluster, *object, *head, *last_cluster, *next_cluster; ExtentPacket blue, green, red; MagickBooleanType proceed; MagickRealType threshold; register const PixelPacket *p; register ssize_t i, x; short *extrema[MaxDimension]; ssize_t count, *histogram[MaxDimension], y; /* Allocate histogram and extrema. */ assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); GetMagickPixelPacket(image,pixel); for (i=0; i < MaxDimension; i++) { histogram[i]=(ssize_t *) AcquireQuantumMemory(256UL,sizeof(**histogram)); extrema[i]=(short *) AcquireQuantumMemory(256UL,sizeof(**histogram)); if ((histogram[i] == (ssize_t *) NULL) || (extrema[i] == (short *) NULL)) { for (i-- ; i >= 0; i--) { extrema[i]=(short *) RelinquishMagickMemory(extrema[i]); histogram[i]=(ssize_t *) RelinquishMagickMemory(histogram[i]); } (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(MagickFalse); } } /* Initialize histogram. */ InitializeHistogram(image,histogram,exception); (void) OptimalTau(histogram[Red],Tau,0.2f,DeltaTau, (smooth_threshold == 0.0f ? 1.0f : smooth_threshold),extrema[Red]); (void) OptimalTau(histogram[Green],Tau,0.2f,DeltaTau, (smooth_threshold == 0.0f ? 1.0f : smooth_threshold),extrema[Green]); (void) OptimalTau(histogram[Blue],Tau,0.2f,DeltaTau, (smooth_threshold == 0.0f ? 1.0f : smooth_threshold),extrema[Blue]); /* Form clusters. */ cluster=(Cluster *) NULL; head=(Cluster *) NULL; (void) ResetMagickMemory(&red,0,sizeof(red)); (void) ResetMagickMemory(&green,0,sizeof(green)); (void) ResetMagickMemory(&blue,0,sizeof(blue)); while (DefineRegion(extrema[Red],&red) != 0) { green.index=0; while (DefineRegion(extrema[Green],&green) != 0) { blue.index=0; while (DefineRegion(extrema[Blue],&blue) != 0) { /* Allocate a new class. */ if (head != (Cluster *) NULL) { cluster->next=(Cluster *) AcquireMagickMemory( sizeof(*cluster->next)); cluster=cluster->next; } else { cluster=(Cluster *) AcquireMagickMemory(sizeof(*cluster)); head=cluster; } if (cluster == (Cluster *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'", image->filename); return(MagickFalse); } /* Initialize a new class. */ cluster->count=0; cluster->red=red; cluster->green=green; cluster->blue=blue; cluster->next=(Cluster *) NULL; } } } if (head == (Cluster *) NULL) { /* No classes were identified-- create one. */ cluster=(Cluster *) AcquireMagickMemory(sizeof(*cluster)); if (cluster == (Cluster *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(MagickFalse); } /* Initialize a new class. */ cluster->count=0; cluster->red=red; cluster->green=green; cluster->blue=blue; cluster->next=(Cluster *) NULL; head=cluster; } /* Count the pixels for each cluster. */ count=0; for (y=0; y < (ssize_t) image->rows; y++) { p=GetVirtualPixels(image,0,y,image->columns,1,exception); if (p == (const PixelPacket *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next) if (((ssize_t) ScaleQuantumToChar(GetPixelRed(p)) >= (cluster->red.left-SafeMargin)) && ((ssize_t) ScaleQuantumToChar(GetPixelRed(p)) <= (cluster->red.right+SafeMargin)) && ((ssize_t) ScaleQuantumToChar(GetPixelGreen(p)) >= (cluster->green.left-SafeMargin)) && ((ssize_t) ScaleQuantumToChar(GetPixelGreen(p)) <= (cluster->green.right+SafeMargin)) && ((ssize_t) ScaleQuantumToChar(GetPixelBlue(p)) >= (cluster->blue.left-SafeMargin)) && ((ssize_t) ScaleQuantumToChar(GetPixelBlue(p)) <= (cluster->blue.right+SafeMargin))) { /* Count this pixel. */ count++; cluster->red.center+=(MagickRealType) ScaleQuantumToChar(GetPixelRed(p)); cluster->green.center+=(MagickRealType) ScaleQuantumToChar(GetPixelGreen(p)); cluster->blue.center+=(MagickRealType) ScaleQuantumToChar(GetPixelBlue(p)); cluster->count++; break; } p++; } proceed=SetImageProgress(image,SegmentImageTag,(MagickOffsetType) y, 2*image->rows); if (proceed == MagickFalse) break; } /* Remove clusters that do not meet minimum cluster threshold. */ count=0; last_cluster=head; next_cluster=head; for (cluster=head; cluster != (Cluster *) NULL; cluster=next_cluster) { next_cluster=cluster->next; if ((cluster->count > 0) && (cluster->count >= (count*cluster_threshold/100.0))) { /* Initialize cluster. */ cluster->id=count; cluster->red.center/=cluster->count; cluster->green.center/=cluster->count; cluster->blue.center/=cluster->count; count++; last_cluster=cluster; continue; } /* Delete cluster. */ if (cluster == head) head=next_cluster; else last_cluster->next=next_cluster; cluster=(Cluster *) RelinquishMagickMemory(cluster); } object=head; background=head; if (count > 1) { object=head->next; for (cluster=object; cluster->next != (Cluster *) NULL; ) { if (cluster->count < object->count) object=cluster; cluster=cluster->next; } background=head->next; for (cluster=background; cluster->next != (Cluster *) NULL; ) { if (cluster->count > background->count) background=cluster; cluster=cluster->next; } } threshold=(background->red.center+object->red.center)/2.0; pixel->red=(MagickRealType) ScaleCharToQuantum((unsigned char) (threshold+0.5)); threshold=(background->green.center+object->green.center)/2.0; pixel->green=(MagickRealType) ScaleCharToQuantum((unsigned char) (threshold+0.5)); threshold=(background->blue.center+object->blue.center)/2.0; pixel->blue=(MagickRealType) ScaleCharToQuantum((unsigned char) (threshold+0.5)); /* Relinquish resources. */ for (cluster=head; cluster != (Cluster *) NULL; cluster=next_cluster) { next_cluster=cluster->next; cluster=(Cluster *) RelinquishMagickMemory(cluster); } for (i=0; i < MaxDimension; i++) { extrema[i]=(short *) RelinquishMagickMemory(extrema[i]); histogram[i]=(ssize_t *) RelinquishMagickMemory(histogram[i]); } return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + I n i t i a l i z e H i s t o g r a m % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % InitializeHistogram() computes the histogram for an image. % % The format of the InitializeHistogram method is: % % InitializeHistogram(const Image *image,ssize_t **histogram) % % A description of each parameter follows. % % o image: Specifies a pointer to an Image structure; returned from % ReadImage. % % o histogram: Specifies an array of integers representing the number % of pixels for each intensity of a particular color component. % */ static void InitializeHistogram(const Image *image,ssize_t **histogram, ExceptionInfo *exception) { register const PixelPacket *p; register ssize_t i, x; ssize_t y; /* Initialize histogram. */ for (i=0; i <= 255; i++) { histogram[Red][i]=0; histogram[Green][i]=0; histogram[Blue][i]=0; } for (y=0; y < (ssize_t) image->rows; y++) { p=GetVirtualPixels(image,0,y,image->columns,1,exception); if (p == (const PixelPacket *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { histogram[Red][(ssize_t) ScaleQuantumToChar(GetPixelRed(p))]++; histogram[Green][(ssize_t) ScaleQuantumToChar(GetPixelGreen(p))]++; histogram[Blue][(ssize_t) ScaleQuantumToChar(GetPixelBlue(p))]++; p++; } } } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + I n i t i a l i z e I n t e r v a l T r e e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % InitializeIntervalTree() initializes an interval tree from the lists of % zero crossings. % % The format of the InitializeIntervalTree method is: % % InitializeIntervalTree(IntervalTree **list,ssize_t *number_nodes, % IntervalTree *node) % % A description of each parameter follows. % % o zero_crossing: Specifies an array of structures of type ZeroCrossing. % % o number_crossings: This size_t specifies the number of elements % in the zero_crossing array. % */ static void InitializeList(IntervalTree **list,ssize_t *number_nodes, IntervalTree *node) { if (node == (IntervalTree *) NULL) return; if (node->child == (IntervalTree *) NULL) list[(*number_nodes)++]=node; InitializeList(list,number_nodes,node->sibling); InitializeList(list,number_nodes,node->child); } static void MeanStability(IntervalTree *node) { register IntervalTree *child; if (node == (IntervalTree *) NULL) return; node->mean_stability=0.0; child=node->child; if (child != (IntervalTree *) NULL) { register ssize_t count; register MagickRealType sum; sum=0.0; count=0; for ( ; child != (IntervalTree *) NULL; child=child->sibling) { sum+=child->stability; count++; } node->mean_stability=sum/(MagickRealType) count; } MeanStability(node->sibling); MeanStability(node->child); } static void Stability(IntervalTree *node) { if (node == (IntervalTree *) NULL) return; if (node->child == (IntervalTree *) NULL) node->stability=0.0; else node->stability=node->tau-(node->child)->tau; Stability(node->sibling); Stability(node->child); } static IntervalTree *InitializeIntervalTree(const ZeroCrossing *zero_crossing, const size_t number_crossings) { IntervalTree *head, **list, *node, *root; register ssize_t i; ssize_t j, k, left, number_nodes; /* Allocate interval tree. */ list=(IntervalTree **) AcquireQuantumMemory((size_t) TreeLength, sizeof(*list)); if (list == (IntervalTree **) NULL) return((IntervalTree *) NULL); /* The root is the entire histogram. */ root=(IntervalTree *) AcquireMagickMemory(sizeof(*root)); root->child=(IntervalTree *) NULL; root->sibling=(IntervalTree *) NULL; root->tau=0.0; root->left=0; root->right=255; for (i=(-1); i < (ssize_t) number_crossings; i++) { /* Initialize list with all nodes with no children. */ number_nodes=0; InitializeList(list,&number_nodes,root); /* Split list. */ for (j=0; j < number_nodes; j++) { head=list[j]; left=head->left; node=head; for (k=head->left+1; k < head->right; k++) { if (zero_crossing[i+1].crossings[k] != 0) { if (node == head) { node->child=(IntervalTree *) AcquireMagickMemory( sizeof(*node->child)); node=node->child; } else { node->sibling=(IntervalTree *) AcquireMagickMemory( sizeof(*node->sibling)); node=node->sibling; } node->tau=zero_crossing[i+1].tau; node->child=(IntervalTree *) NULL; node->sibling=(IntervalTree *) NULL; node->left=left; node->right=k; left=k; } } if (left != head->left) { node->sibling=(IntervalTree *) AcquireMagickMemory( sizeof(*node->sibling)); node=node->sibling; node->tau=zero_crossing[i+1].tau; node->child=(IntervalTree *) NULL; node->sibling=(IntervalTree *) NULL; node->left=left; node->right=head->right; } } } /* Determine the stability: difference between a nodes tau and its child. */ Stability(root->child); MeanStability(root->child); list=(IntervalTree **) RelinquishMagickMemory(list); return(root); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + O p t i m a l T a u % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % OptimalTau() finds the optimal tau for each band of the histogram. % % The format of the OptimalTau method is: % % MagickRealType OptimalTau(const ssize_t *histogram,const double max_tau, % const double min_tau,const double delta_tau, % const double smooth_threshold,short *extrema) % % A description of each parameter follows. % % o histogram: Specifies an array of integers representing the number % of pixels for each intensity of a particular color component. % % o extrema: Specifies a pointer to an array of integers. They % represent the peaks and valleys of the histogram for each color % component. % */ static void ActiveNodes(IntervalTree **list,ssize_t *number_nodes, IntervalTree *node) { if (node == (IntervalTree *) NULL) return; if (node->stability >= node->mean_stability) { list[(*number_nodes)++]=node; ActiveNodes(list,number_nodes,node->sibling); } else { ActiveNodes(list,number_nodes,node->sibling); ActiveNodes(list,number_nodes,node->child); } } static void FreeNodes(IntervalTree *node) { if (node == (IntervalTree *) NULL) return; FreeNodes(node->sibling); FreeNodes(node->child); node=(IntervalTree *) RelinquishMagickMemory(node); } static MagickRealType OptimalTau(const ssize_t *histogram,const double max_tau, const double min_tau,const double delta_tau,const double smooth_threshold, short *extrema) { IntervalTree **list, *node, *root; MagickBooleanType peak; MagickRealType average_tau, *derivative, *second_derivative, tau, value; register ssize_t i, x; size_t count, number_crossings; ssize_t index, j, k, number_nodes; ZeroCrossing *zero_crossing; /* Allocate interval tree. */ list=(IntervalTree **) AcquireQuantumMemory((size_t) TreeLength, sizeof(*list)); if (list == (IntervalTree **) NULL) return(0.0); /* Allocate zero crossing list. */ count=(size_t) ((max_tau-min_tau)/delta_tau)+2; zero_crossing=(ZeroCrossing *) AcquireQuantumMemory((size_t) count, sizeof(*zero_crossing)); if (zero_crossing == (ZeroCrossing *) NULL) return(0.0); for (i=0; i < (ssize_t) count; i++) zero_crossing[i].tau=(-1.0); /* Initialize zero crossing list. */ derivative=(MagickRealType *) AcquireQuantumMemory(256,sizeof(*derivative)); second_derivative=(MagickRealType *) AcquireQuantumMemory(256, sizeof(*second_derivative)); if ((derivative == (MagickRealType *) NULL) || (second_derivative == (MagickRealType *) NULL)) ThrowFatalException(ResourceLimitFatalError, "UnableToAllocateDerivatives"); i=0; for (tau=max_tau; tau >= min_tau; tau-=delta_tau) { zero_crossing[i].tau=tau; ScaleSpace(histogram,tau,zero_crossing[i].histogram); DerivativeHistogram(zero_crossing[i].histogram,derivative); DerivativeHistogram(derivative,second_derivative); ZeroCrossHistogram(second_derivative,smooth_threshold, zero_crossing[i].crossings); i++; } /* Add an entry for the original histogram. */ zero_crossing[i].tau=0.0; for (j=0; j <= 255; j++) zero_crossing[i].histogram[j]=(MagickRealType) histogram[j]; DerivativeHistogram(zero_crossing[i].histogram,derivative); DerivativeHistogram(derivative,second_derivative); ZeroCrossHistogram(second_derivative,smooth_threshold, zero_crossing[i].crossings); number_crossings=(size_t) i; derivative=(MagickRealType *) RelinquishMagickMemory(derivative); second_derivative=(MagickRealType *) RelinquishMagickMemory(second_derivative); /* Ensure the scale-space fingerprints form lines in scale-space, not loops. */ ConsolidateCrossings(zero_crossing,number_crossings); /* Force endpoints to be included in the interval. */ for (i=0; i <= (ssize_t) number_crossings; i++) { for (j=0; j < 255; j++) if (zero_crossing[i].crossings[j] != 0) break; zero_crossing[i].crossings[0]=(-zero_crossing[i].crossings[j]); for (j=255; j > 0; j--) if (zero_crossing[i].crossings[j] != 0) break; zero_crossing[i].crossings[255]=(-zero_crossing[i].crossings[j]); } /* Initialize interval tree. */ root=InitializeIntervalTree(zero_crossing,number_crossings); if (root == (IntervalTree *) NULL) return(0.0); /* Find active nodes: stability is greater (or equal) to the mean stability of its children. */ number_nodes=0; ActiveNodes(list,&number_nodes,root->child); /* Initialize extrema. */ for (i=0; i <= 255; i++) extrema[i]=0; for (i=0; i < number_nodes; i++) { /* Find this tau in zero crossings list. */ k=0; node=list[i]; for (j=0; j <= (ssize_t) number_crossings; j++) if (zero_crossing[j].tau == node->tau) k=j; /* Find the value of the peak. */ peak=zero_crossing[k].crossings[node->right] == -1 ? MagickTrue : MagickFalse; index=node->left; value=zero_crossing[k].histogram[index]; for (x=node->left; x <= node->right; x++) { if (peak != MagickFalse) { if (zero_crossing[k].histogram[x] > value) { value=zero_crossing[k].histogram[x]; index=x; } } else if (zero_crossing[k].histogram[x] < value) { value=zero_crossing[k].histogram[x]; index=x; } } for (x=node->left; x <= node->right; x++) { if (index == 0) index=256; if (peak != MagickFalse) extrema[x]=(short) index; else extrema[x]=(short) (-index); } } /* Determine the average tau. */ average_tau=0.0; for (i=0; i < number_nodes; i++) average_tau+=list[i]->tau; average_tau/=(MagickRealType) number_nodes; /* Relinquish resources. */ FreeNodes(root); zero_crossing=(ZeroCrossing *) RelinquishMagickMemory(zero_crossing); list=(IntervalTree **) RelinquishMagickMemory(list); return(average_tau); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + S c a l e S p a c e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ScaleSpace() performs a scale-space filter on the 1D histogram. % % The format of the ScaleSpace method is: % % ScaleSpace(const ssize_t *histogram,const MagickRealType tau, % MagickRealType *scale_histogram) % % A description of each parameter follows. % % o histogram: Specifies an array of MagickRealTypes representing the number % of pixels for each intensity of a particular color component. % */ static void ScaleSpace(const ssize_t *histogram,const MagickRealType tau, MagickRealType *scale_histogram) { double alpha, beta, *gamma, sum; register ssize_t u, x; gamma=(double *) AcquireQuantumMemory(256,sizeof(*gamma)); if (gamma == (double *) NULL) ThrowFatalException(ResourceLimitFatalError,"UnableToAllocateGammaMap"); alpha=1.0/(tau*sqrt(2.0*MagickPI)); beta=(-1.0/(2.0*tau*tau)); for (x=0; x <= 255; x++) gamma[x]=0.0; for (x=0; x <= 255; x++) { gamma[x]=exp((double) beta*x*x); if (gamma[x] < MagickEpsilon) break; } for (x=0; x <= 255; x++) { sum=0.0; for (u=0; u <= 255; u++) sum+=(double) histogram[u]*gamma[MagickAbsoluteValue(x-u)]; scale_histogram[x]=(MagickRealType) (alpha*sum); } gamma=(double *) RelinquishMagickMemory(gamma); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e g m e n t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SegmentImage() segment an image by analyzing the histograms of the color % components and identifying units that are homogeneous with the fuzzy % C-means technique. % % The format of the SegmentImage method is: % % MagickBooleanType SegmentImage(Image *image, % const ColorspaceType colorspace,const MagickBooleanType verbose, % const double cluster_threshold,const double smooth_threshold) % % A description of each parameter follows. % % o image: the image. % % o colorspace: Indicate the colorspace. % % o verbose: Set to MagickTrue to print detailed information about the % identified classes. % % o cluster_threshold: This represents the minimum number of pixels % contained in a hexahedra before it can be considered valid (expressed % as a percentage). % % o smooth_threshold: the smoothing threshold eliminates noise in the second % derivative of the histogram. As the value is increased, you can expect a % smoother second derivative. % */ MagickExport MagickBooleanType SegmentImage(Image *image, const ColorspaceType colorspace,const MagickBooleanType verbose, const double cluster_threshold,const double smooth_threshold) { ColorspaceType previous_colorspace; MagickBooleanType status; register ssize_t i; short *extrema[MaxDimension]; ssize_t *histogram[MaxDimension]; /* Allocate histogram and extrema. */ assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); for (i=0; i < MaxDimension; i++) { histogram[i]=(ssize_t *) AcquireQuantumMemory(256,sizeof(**histogram)); extrema[i]=(short *) AcquireQuantumMemory(256,sizeof(**extrema)); if ((histogram[i] == (ssize_t *) NULL) || (extrema[i] == (short *) NULL)) { for (i-- ; i >= 0; i--) { extrema[i]=(short *) RelinquishMagickMemory(extrema[i]); histogram[i]=(ssize_t *) RelinquishMagickMemory(histogram[i]); } ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename) } } /* Initialize histogram. */ previous_colorspace=image->colorspace; (void) TransformImageColorspace(image,colorspace); InitializeHistogram(image,histogram,&image->exception); (void) OptimalTau(histogram[Red],Tau,0.2,DeltaTau, smooth_threshold == 0.0 ? 1.0 : smooth_threshold,extrema[Red]); (void) OptimalTau(histogram[Green],Tau,0.2,DeltaTau, smooth_threshold == 0.0 ? 1.0 : smooth_threshold,extrema[Green]); (void) OptimalTau(histogram[Blue],Tau,0.2,DeltaTau, smooth_threshold == 0.0 ? 1.0 : smooth_threshold,extrema[Blue]); /* Classify using the fuzzy c-Means technique. */ status=Classify(image,extrema,cluster_threshold,WeightingExponent,verbose); (void) TransformImageColorspace(image,previous_colorspace); /* Relinquish resources. */ for (i=0; i < MaxDimension; i++) { extrema[i]=(short *) RelinquishMagickMemory(extrema[i]); histogram[i]=(ssize_t *) RelinquishMagickMemory(histogram[i]); } return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + Z e r o C r o s s H i s t o g r a m % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ZeroCrossHistogram() find the zero crossings in a histogram and marks % directions as: 1 is negative to positive; 0 is zero crossing; and -1 % is positive to negative. % % The format of the ZeroCrossHistogram method is: % % ZeroCrossHistogram(MagickRealType *second_derivative, % const MagickRealType smooth_threshold,short *crossings) % % A description of each parameter follows. % % o second_derivative: Specifies an array of MagickRealTypes representing the % second derivative of the histogram of a particular color component. % % o crossings: This array of integers is initialized with % -1, 0, or 1 representing the slope of the first derivative of the % of a particular color component. % */ static void ZeroCrossHistogram(MagickRealType *second_derivative, const MagickRealType smooth_threshold,short *crossings) { register ssize_t i; ssize_t parity; /* Merge low numbers to zero to help prevent noise. */ for (i=0; i <= 255; i++) if ((second_derivative[i] < smooth_threshold) && (second_derivative[i] >= -smooth_threshold)) second_derivative[i]=0.0; /* Mark zero crossings. */ parity=0; for (i=0; i <= 255; i++) { crossings[i]=0; if (second_derivative[i] < 0.0) { if (parity > 0) crossings[i]=(-1); parity=1; } else if (second_derivative[i] > 0.0) { if (parity < 0) crossings[i]=1; parity=(-1); } } }
wave_generator.h
// // Project Name: Kratos // Last Modified by: $Author: pooyan $ // Date: $Date: 2006-11-27 16:07:33 $ // Revision: $Revision: 1.1.1.1 $ // // #if !defined(KRATOS_WAVEGENERATOR_H_INCLUDED ) #define KRATOS_WAVEGENERATOR_H_INCLUDED // System includes #include <string> #include <iostream> // External includes // Project includes #include "includes/define.h" namespace Kratos { ///@name Kratos Globals ///@{ ///@} ///@name Type Definitions ///@{ ///@} ///@name Enum's ///@{ ///@} ///@name Functions ///@{ ///@} ///@name Kratos Classes ///@{ /// Short class definition. /** Detail class definition. */ class WaveGenerator { public: ///@name Type Definitions ///@{ /// Pointer definition of WaveGenerator KRATOS_CLASS_POINTER_DEFINITION(WaveGenerator); ///@} ///@name Life Cycle ///@{ /// Default constructor. WaveGenerator() {} /// Destructor. virtual ~WaveGenerator() {} void GenerateWaveXYPlane(ModelPart::NodesContainerType& rNodes, const double d, const double H, const double T, const double Z0, const double t, const double g) { double C0=g*T/( 6.2831853071795862 ); double L0=C0*T; double teta = (-6.2831853071795862*t / T) - (3.1415926535897931 / 2.0); double aux = cosh(6.2831853071795862*d/L0); double ux= 0.5 * H * (g * T / L0) * cos(teta) / aux; double uy= 0.5 * H * (g * T / L0) * sin(teta) / aux; double h=0.5 * H * cos(teta); const PointerVector< Node<3> >::iterator it_begin = rNodes.begin(); array_1d<double,3> temp; #pragma omp parallel for private(temp) for(int i=0; i<static_cast<int>(rNodes.size()); i++) { PointerVector< Node<3> >::iterator it = it_begin + i; const double Z = it->Z(); temp[0] = ux * cosh(6.2831853071795862*(Z - Z0 + d)/L0); temp[1] = 0.0; temp[2] = uy * sinh(6.2831853071795862*(Z - Z0 + d)/L0); const double distance = -h+Z - Z0; it->FastGetSolutionStepValue(DISTANCE,1)= distance; if(distance <= 0) // applying velocity only to dense fluid part noalias(it->FastGetSolutionStepValue(VELOCITY))=temp; } } /** * * @param rNodes Nodes of the plane we want to apply the wave * @param d Depth of water * @param H height of wave (2 * a) * @param T Period of wave * @param Z0 Level of natural free surface * @param X0 Plane x * @param t current time * @param g gravity */ void GenerateVolumeWaveXYPlane(ModelPart::NodesContainerType& rNodes, const double d, const double H, const double T, const double Z0, const double X0, const double t, const double g) { double C0=g*T/( 6.2831853071795862 ); double L0=C0*T; const PointerVector< Node<3> >::iterator it_begin = rNodes.begin(); array_1d<double,3> temp; array_1d<double,3> velocity; #pragma omp parallel for private(temp, velocity) for(int i=0; i<static_cast<int>(rNodes.size()); i++) { PointerVector< Node<3> >::iterator it = it_begin + i; const double x = it->X(); const double Z = it->Z(); const double alpha = (x - X0) / L0; double beta = 0.8 + 0.199 * alpha * 2.; if(alpha < 0.01) beta = 0.00; double teta = (-6.2831853071795862*t / T) + (3.1415926535897931 * 2.0 * alpha) + (3.1415926535897931 / 2.0); double aux = cosh(6.2831853071795862*d/L0); double ux= 0.5 * H * (g * T / L0) * cos(teta) / aux; double uy= 0.5 * H * (g * T / L0) * sin(teta) / aux; double h=0.5 * H * cos(teta); temp[0] = ux * cosh(6.2831853071795862*(Z - Z0 + d)/L0); temp[1] = 0.0; temp[2] = uy * sinh(6.2831853071795862*(Z - Z0 + d)/L0); const double node_distance = it->FastGetSolutionStepValue(DISTANCE,1); const double distance = -h+Z - Z0; if((t / T) > alpha ) it->FastGetSolutionStepValue(DISTANCE,1)= node_distance * beta + (1-beta) * distance; // if(distance <= 0) // correcting the pressure only to dense fluid part // it->FastGetSolutionStepValue(PRESSURE)= -distance * 1000; noalias(velocity) = it->FastGetSolutionStepValue(VELOCITY) * beta; if((t / T) > alpha ) if(distance <= 0) // applying velocity only to dense fluid part noalias(it->FastGetSolutionStepValue(VELOCITY))=velocity + (1 - beta) * temp; } } void GenerateComposedVolumeWaveXYPlane(ModelPart::NodesContainerType& rNodes, const double d, const Vector& HVector, const Vector& TVector, const Vector& PhaseVector, const double Z0, const double X0, const double t, const double Length) { unsigned int number_of_waves; if (HVector.size() <= TVector.size()) number_of_waves = HVector.size(); else number_of_waves = TVector.size(); const double g = 9.81; const PointerVector< Node < 3 > >::iterator it_begin = rNodes.begin(); array_1d<double, 3 > wave_velocity; array_1d<double, 3 > velocity; #pragma omp parallel for private(wave_velocity,velocity) for (int i = 0; i<static_cast<int> (rNodes.size()); i++) { PointerVector< Node < 3 > >::iterator it = it_begin + i; const double x = it->X(); const double Z = it->Z(); wave_velocity = ZeroVector(3); double wave_h = 0.00; double beta = (x - X0) / Length; double gamma = 0.8 + 0.199 * beta * 2.0; if (gamma > 1.00) gamma = 1.00; if (beta < 0.1) gamma = 8.00 * beta; bool wave_arrived = false; for (unsigned int i_wave = 0; i_wave < number_of_waves; i_wave++) { const double T = TVector[i_wave]; const double H = HVector[i_wave]; double C0 = g * T / (6.2831853071795862); double L0 = C0*T; const double alpha = (x - X0) / L0; if((t / T) > alpha) wave_arrived = true; double teta = (-6.2831853071795862 * t / T) + (3.1415926535897931 * 2.0 * alpha) + (3.1415926535897931 / 2.0) + PhaseVector[i_wave]; double aux = cosh(6.2831853071795862 * d / L0); double ux = 0.5 * H * (g * T / L0) * cos(teta) / aux; double uy = 0.5 * H * (g * T / L0) * sin(teta) / aux; double h = 0.5 * H * cos(teta); if ((t / T) > alpha) { wave_velocity[0] += ux * cosh(6.2831853071795862 * (Z - Z0 + d) / L0); wave_velocity[1] = 0.0; wave_velocity[2] += uy * sinh(6.2831853071795862 * (Z - Z0 + d) / L0); } if ((t / T) > alpha) wave_h += h; } const double node_distance = it->FastGetSolutionStepValue(DISTANCE, 1); double distance = -wave_h + Z - Z0; if(wave_arrived) it->FastGetSolutionStepValue(DISTANCE, 1) = node_distance * gamma + (1 - gamma) * distance; // if(distance <= 0) // correcting the pressure only to dense fluid part // it->FastGetSolutionStepValue(PRESSURE)= -distance * 1000; noalias(velocity) = it->FastGetSolutionStepValue(VELOCITY) * gamma; if(wave_arrived) if (distance <= 0) // applying velocity only to dense fluid part noalias(it->FastGetSolutionStepValue(VELOCITY)) = velocity + (1 - gamma) * wave_velocity; } } ///@} ///@name Operators ///@{ ///@} ///@name Operations ///@{ ///@} ///@name Access ///@{ ///@} ///@name Inquiry ///@{ ///@} ///@name Input and output ///@{ /// Turn back information as a string. virtual std::string Info() const { std::stringstream buffer; buffer << "WaveGenerator" ; return buffer.str(); } /// Print information about this object. virtual void PrintInfo(std::ostream& rOStream) const { rOStream << "WaveGenerator"; } /// Print object's data. virtual void PrintData(std::ostream& rOStream) const {} ///@} ///@name Friends ///@{ ///@} protected: ///@name Protected static Member Variables ///@{ ///@} ///@name Protected member Variables ///@{ ///@} ///@name Protected Operators ///@{ ///@} ///@name Protected Operations ///@{ ///@} ///@name Protected Access ///@{ ///@} ///@name Protected Inquiry ///@{ ///@} ///@name Protected LifeCycle ///@{ ///@} private: ///@name Static Member Variables ///@{ ///@} ///@name Member Variables ///@{ ///@} ///@name Private Operators ///@{ ///@} ///@name Private Operations ///@{ ///@} ///@name Private Access ///@{ ///@} ///@name Private Inquiry ///@{ ///@} ///@name Un accessible methods ///@{ /// Assignment operator. WaveGenerator& operator=(WaveGenerator const& rOther) { return *this; } /// Copy constructor. WaveGenerator(WaveGenerator const& rOther) {}; ///@} }; // Class WaveGenerator ///@} ///@name Type Definitions ///@{ ///@} ///@name Input and output ///@{ /// input stream function inline std::istream& operator >> (std::istream& rIStream, WaveGenerator& rThis) { return rIStream; } /// output stream function inline std::ostream& operator << (std::ostream& rOStream, const WaveGenerator& rThis) { rThis.PrintInfo(rOStream); rOStream << std::endl; rThis.PrintData(rOStream); return rOStream; } ///@} } // namespace Kratos. #endif // KRATOS_WAVEGENERATOR_H_INCLUDED defined
cpalamem_simple.c
#include <stdio.h> #include <string.h> #include <stdlib.h> #include <spllt_iface.h> #include <omp.h> #include <cpalamem_macro.h> #include <mat_csr.h> #include <mat_load_mm.h> #include <cpalamem_handler.h> #include <cpalamem_instrumentation.h> #define USAGE "Usage %s -m <matrixFileName> [--nrhs <integer DEFAULT:%d>] [--nb <integer DEFAULT:%d>] [--ncpu <integer DEFAULT:%d]" #define FILENAMESIZE 256 int main(int argc, char ** argv){ void *akeep = NULL; void *fkeep = NULL; void *tm = NULL; int *ptr = NULL; int *row = NULL; double *val = NULL; int *order = NULL; double *x = NULL; double *rhs = NULL; double *y = NULL; double *workspace = NULL; int n, nnz, nrhs; int nb = 32; long worksize; spllt_inform_t info; spllt_options_t options = SPLLT_OPTIONS_NULL(); int stat; CPLM_Mat_CSR_t A = CPLM_MatCSRNULL(); CPLM_Mat_CSR_t U = CPLM_MatCSRNULL(); int ierr = 0; int rank = 0; int matrixSet = 0; int ncpu = 1; long size = 0; char matrixFileName[FILENAMESIZE]; CPLM_Init(&argc, &argv); CPLM_PUSH CPLM_BEGIN_TIME CPLM_OPEN_TIMER //Handle parameters if(argc>0) { for(int i = 1; i < argc; i++) { if(!strcmp(argv[i],"-h")) { CPLM_Abort(USAGE, argv[0], nrhs, nb); } else if(!strcmp(argv[i],"--nb")) { i++; nb = atoi(argv[i]); } else if(!strcmp(argv[i],"--ncpu")) { i++; ncpu = atoi(argv[i]); } else if(!strcmp(argv[i],"--nrhs")) { i++; nrhs = atoi(argv[i]); } else if(!strcmp(argv[i],"-m")) { i++; strcpy(matrixFileName, argv[i]); matrixSet = 1; } else{ if(!rank) { CPLM_Abort(USAGE, argv[0], nrhs, nb); } } } } options.ncpu = ncpu; options.nb = nb; if(!matrixSet) { CPLM_Abort("Error, you have to provide a matrix to factor"); } ierr = CPLM_LoadMatrixMarket(matrixFileName, &A);CPLM_CHKERR(ierr); ierr = CPLM_MatCSRGetTriU(&A, &U);CPLM_CHKERR(ierr); CPLM_MatCSRConvertTo1BasedIndexing(&U); nnz = U.info.nnz; n = U.info.n; ptr = U.rowPtr; row = U.colInd; val = U.val; order = malloc(n * sizeof(int)); //Create RHS //nrhs = 1; x = malloc(n * nrhs * sizeof(double)); rhs = malloc(n * nrhs * sizeof(double)); for(int j = 0; j < nrhs; j++) for(int i = 0; i < n; i++) rhs[i + j * nrhs] = 1.0 * (j + 1); memcpy(x, rhs, n * nrhs * sizeof(double)); #pragma omp parallel #pragma omp single { spllt_task_manager_init(&tm); printf("Analysis\n"); spllt_analyse(&akeep, &fkeep, &options, n, ptr, row, &info, order); printf("Factor\n"); spllt_factor(akeep, fkeep, &options, nnz, val, &info); spllt_wait(); printf("Prepare solve\n"); spllt_prepare_solve(akeep, fkeep, nb, nrhs, &worksize, &info); printf("Need a workspace of size %ld\n", worksize); printf("Need a y vector of size %d\n", n * nrhs); y = calloc( n * nrhs, sizeof(double)); workspace = calloc( worksize, sizeof(double)); spllt_set_mem_solve(akeep, fkeep, nb, nrhs, worksize, y, workspace, &info); #if 0 spllt_solve_worker(fkeep, &options, order, nrhs, x, &info, 1, workspace, size, tm); spllt_solve_worker(fkeep, &options, order, nrhs, x, &info, 2, workspace, size, tm); #else spllt_solve(fkeep, &options, order, nrhs, x, &info, 7); spllt_solve(fkeep, &options, order, nrhs, x, &info, 8); spllt_wait(); //spllt_solve(fkeep, &options, order, nrhs, x, &info, 2); #endif spllt_chkerr(n, ptr, row, val, nrhs, x, rhs); } spllt_deallocate_akeep(&akeep, &stat); spllt_deallocate_fkeep(&fkeep, &stat); spllt_task_manager_deallocate(&tm, &stat); CPLM_MatCSRFree(&A); free(x); free(rhs); free(order); free(workspace); CPLM_CLOSE_TIMER CPLM_END_TIME CPLM_POP //CPLM_Finalize(); return 0; }
OrderedEndLink.c
int x; int main() { int i; #pragma omp for for (i = 0; i < 10; i++) { #pragma omp ordered { 100; } #pragma omp ordered { int x; } } }
DRB050-functionparameter-orig-yes.c
/* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov, schordan1@llnl.gov, karlin1@llnl.gov) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https://github.com/LLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <stdio.h> #include <stdlib.h> /* Arrays passed as function parameters */ void foo1(double o1[], double c[], int len) { int i ; for (i = 0; i < len; ++i) { double volnew_o8 = 0.5 * c[i]; o1[i] = volnew_o8; } } int main() { double o1[101]; double c[101]; int i; int len = 100; #pragma omp parallel for private(i) for (i = 0; i < len; ++i) { c[i] = i + 1.01; o1[i] = i + 1.01; } foo1 (&o1[1], &o1[0], 100); for (i = 0; i < len; ++i) { printf("%lf\n",o1[i]); } return 0; }
TSDFVoxelGridImpl.h
// ---------------------------------------------------------------------------- // - Open3D: www.open3d.org - // ---------------------------------------------------------------------------- // The MIT License (MIT) // // Copyright (c) 2018 www.open3d.org // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS // IN THE SOFTWARE. // ---------------------------------------------------------------------------- #include <atomic> #include <cmath> #include "open3d/core/Dispatch.h" #include "open3d/core/Dtype.h" #include "open3d/core/MemoryManager.h" #include "open3d/core/SizeVector.h" #include "open3d/core/Tensor.h" #include "open3d/t/geometry/Utility.h" #include "open3d/t/geometry/kernel/GeometryIndexer.h" #include "open3d/t/geometry/kernel/GeometryMacros.h" #include "open3d/t/geometry/kernel/TSDFVoxel.h" #include "open3d/t/geometry/kernel/TSDFVoxelGrid.h" #include "open3d/utility/Console.h" #include "open3d/utility/Timer.h" namespace open3d { namespace t { namespace geometry { namespace kernel { namespace tsdf { #if defined(__CUDACC__) void IntegrateCUDA #else void IntegrateCPU #endif (const core::Tensor& depth, const core::Tensor& color, const core::Tensor& indices, const core::Tensor& block_keys, core::Tensor& block_values, // Transforms const core::Tensor& intrinsics, const core::Tensor& extrinsics, // Parameters int64_t resolution, float voxel_size, float sdf_trunc, float depth_scale, float depth_max) { // Parameters int64_t resolution3 = resolution * resolution * resolution; // Shape / transform indexers, no data involved NDArrayIndexer voxel_indexer({resolution, resolution, resolution}); TransformIndexer transform_indexer(intrinsics, extrinsics, voxel_size); // Real data indexer NDArrayIndexer depth_indexer(depth, 2); NDArrayIndexer block_keys_indexer(block_keys, 1); NDArrayIndexer voxel_block_buffer_indexer(block_values, 4); // Optional color integration NDArrayIndexer color_indexer; bool integrate_color = false; if (color.NumElements() != 0) { color_indexer = NDArrayIndexer(color, 2); integrate_color = true; } // Plain arrays that does not require indexers const int64_t* indices_ptr = static_cast<const int64_t*>(indices.GetDataPtr()); int64_t n = indices.GetLength() * resolution3; #if defined(__CUDACC__) core::kernel::CUDALauncher launcher; #else core::kernel::CPULauncher launcher; #endif DISPATCH_BYTESIZE_TO_VOXEL( voxel_block_buffer_indexer.ElementByteSize(), [&]() { launcher.LaunchGeneralKernel(n, [=] OPEN3D_DEVICE( int64_t workload_idx) { // Natural index (0, N) -> (block_idx, voxel_idx) int64_t block_idx = indices_ptr[workload_idx / resolution3]; int64_t voxel_idx = workload_idx % resolution3; /// Coordinate transform // block_idx -> (x_block, y_block, z_block) int* block_key_ptr = block_keys_indexer.GetDataPtrFromCoord<int>( block_idx); int64_t xb = static_cast<int64_t>(block_key_ptr[0]); int64_t yb = static_cast<int64_t>(block_key_ptr[1]); int64_t zb = static_cast<int64_t>(block_key_ptr[2]); // voxel_idx -> (x_voxel, y_voxel, z_voxel) int64_t xv, yv, zv; voxel_indexer.WorkloadToCoord(voxel_idx, &xv, &yv, &zv); // coordinate in world (in voxel) int64_t x = (xb * resolution + xv); int64_t y = (yb * resolution + yv); int64_t z = (zb * resolution + zv); // coordinate in camera (in voxel -> in meter) float xc, yc, zc, u, v; transform_indexer.RigidTransform( static_cast<float>(x), static_cast<float>(y), static_cast<float>(z), &xc, &yc, &zc); // coordinate in image (in pixel) transform_indexer.Project(xc, yc, zc, &u, &v); if (!depth_indexer.InBoundary(u, v)) { return; } // Associate image workload and compute SDF and TSDF. float depth = *depth_indexer.GetDataPtrFromCoord<float>( static_cast<int64_t>(u), static_cast<int64_t>(v)) / depth_scale; float sdf = (depth - zc); if (depth <= 0 || depth > depth_max || zc <= 0 || sdf < -sdf_trunc) { return; } sdf = sdf < sdf_trunc ? sdf : sdf_trunc; sdf /= sdf_trunc; // Associate voxel workload and update TSDF/Weights voxel_t* voxel_ptr = voxel_block_buffer_indexer .GetDataPtrFromCoord<voxel_t>( xv, yv, zv, block_idx); if (integrate_color) { float* color_ptr = color_indexer.GetDataPtrFromCoord<float>( static_cast<int64_t>(u), static_cast<int64_t>(v)); voxel_ptr->Integrate(sdf, color_ptr[0], color_ptr[1], color_ptr[2]); } else { voxel_ptr->Integrate(sdf); } }); }); #if defined(__CUDACC__) OPEN3D_CUDA_CHECK(cudaDeviceSynchronize()); #endif } #if defined(__CUDACC__) void ExtractSurfacePointsCUDA #else void ExtractSurfacePointsCPU #endif (const core::Tensor& indices, const core::Tensor& nb_indices, const core::Tensor& nb_masks, const core::Tensor& block_keys, const core::Tensor& block_values, core::Tensor& points, utility::optional<std::reference_wrapper<core::Tensor>> normals, utility::optional<std::reference_wrapper<core::Tensor>> colors, int64_t resolution, float voxel_size, float weight_threshold, int& valid_size) { // Parameters int64_t resolution3 = resolution * resolution * resolution; // Shape / transform indexers, no data involved NDArrayIndexer voxel_indexer({resolution, resolution, resolution}); // Real data indexer NDArrayIndexer voxel_block_buffer_indexer(block_values, 4); NDArrayIndexer block_keys_indexer(block_keys, 1); NDArrayIndexer nb_block_masks_indexer(nb_masks, 2); NDArrayIndexer nb_block_indices_indexer(nb_indices, 2); // Plain arrays that does not require indexers const int64_t* indices_ptr = static_cast<const int64_t*>(indices.GetDataPtr()); int64_t n_blocks = indices.GetLength(); int64_t n = n_blocks * resolution3; // Output #if defined(__CUDACC__) core::Tensor count(std::vector<int>{0}, {1}, core::Dtype::Int32, block_values.GetDevice()); int* count_ptr = count.GetDataPtr<int>(); #else std::atomic<int> count_atomic(0); std::atomic<int>* count_ptr = &count_atomic; #endif #if defined(__CUDACC__) core::kernel::CUDALauncher launcher; #else core::kernel::CPULauncher launcher; #endif if (valid_size < 0) { utility::LogWarning( "No estimated max point cloud size provided, using a 2-pass " "estimation. Surface extraction could be slow."); // This pass determines valid number of points. DISPATCH_BYTESIZE_TO_VOXEL( voxel_block_buffer_indexer.ElementByteSize(), [&]() { launcher.LaunchGeneralKernel( n, [=] OPEN3D_DEVICE(int64_t workload_idx) { auto GetVoxelAt = [&] OPEN3D_DEVICE( int xo, int yo, int zo, int curr_block_idx) -> voxel_t* { return DeviceGetVoxelAt<voxel_t>( xo, yo, zo, curr_block_idx, static_cast<int>(resolution), nb_block_masks_indexer, nb_block_indices_indexer, voxel_block_buffer_indexer); }; // Natural index (0, N) -> (block_idx, // voxel_idx) int64_t workload_block_idx = workload_idx / resolution3; int64_t block_idx = indices_ptr[workload_block_idx]; int64_t voxel_idx = workload_idx % resolution3; // voxel_idx -> (x_voxel, y_voxel, z_voxel) int64_t xv, yv, zv; voxel_indexer.WorkloadToCoord(voxel_idx, &xv, &yv, &zv); voxel_t* voxel_ptr = voxel_block_buffer_indexer .GetDataPtrFromCoord<voxel_t>( xv, yv, zv, block_idx); float tsdf_o = voxel_ptr->GetTSDF(); float weight_o = voxel_ptr->GetWeight(); if (weight_o <= weight_threshold) return; // Enumerate x-y-z directions for (int i = 0; i < 3; ++i) { voxel_t* ptr = GetVoxelAt( static_cast<int>(xv) + (i == 0), static_cast<int>(yv) + (i == 1), static_cast<int>(zv) + (i == 2), static_cast<int>( workload_block_idx)); if (ptr == nullptr) continue; float tsdf_i = ptr->GetTSDF(); float weight_i = ptr->GetWeight(); if (weight_i > weight_threshold && tsdf_i * tsdf_o < 0) { OPEN3D_ATOMIC_ADD(count_ptr, 1); } } }); }); #if defined(__CUDACC__) valid_size = count[0].Item<int>(); count[0] = 0; #else valid_size = (*count_ptr).load(); (*count_ptr) = 0; #endif } int max_count = valid_size; if (points.GetLength() == 0) { points = core::Tensor({max_count, 3}, core::Dtype::Float32, block_values.GetDevice()); } NDArrayIndexer point_indexer(points, 1); // Normals bool extract_normal = false; NDArrayIndexer normal_indexer; if (normals.has_value()) { extract_normal = true; if (normals.value().get().GetLength() == 0) { normals.value().get() = core::Tensor({max_count, 3}, core::Dtype::Float32, block_values.GetDevice()); } normal_indexer = NDArrayIndexer(normals.value().get(), 1); } // This pass extracts exact surface points. DISPATCH_BYTESIZE_TO_VOXEL( voxel_block_buffer_indexer.ElementByteSize(), [&]() { // Colors bool extract_color = false; NDArrayIndexer color_indexer; if (voxel_t::HasColor() && colors.has_value()) { extract_color = true; if (colors.value().get().GetLength() == 0) { colors.value().get() = core::Tensor( {max_count, 3}, core::Dtype::Float32, block_values.GetDevice()); } color_indexer = NDArrayIndexer(colors.value().get(), 1); } launcher.LaunchGeneralKernel(n, [=] OPEN3D_DEVICE( int64_t workload_idx) { auto GetVoxelAt = [&] OPEN3D_DEVICE( int xo, int yo, int zo, int curr_block_idx) -> voxel_t* { return DeviceGetVoxelAt<voxel_t>( xo, yo, zo, curr_block_idx, static_cast<int>(resolution), nb_block_masks_indexer, nb_block_indices_indexer, voxel_block_buffer_indexer); }; auto GetNormalAt = [&] OPEN3D_DEVICE(int xo, int yo, int zo, int curr_block_idx, float* n) { return DeviceGetNormalAt<voxel_t>( xo, yo, zo, curr_block_idx, n, static_cast<int>(resolution), voxel_size, nb_block_masks_indexer, nb_block_indices_indexer, voxel_block_buffer_indexer); }; // Natural index (0, N) -> (block_idx, voxel_idx) int64_t workload_block_idx = workload_idx / resolution3; int64_t block_idx = indices_ptr[workload_block_idx]; int64_t voxel_idx = workload_idx % resolution3; /// Coordinate transform // block_idx -> (x_block, y_block, z_block) int* block_key_ptr = block_keys_indexer.GetDataPtrFromCoord<int>( block_idx); int64_t xb = static_cast<int64_t>(block_key_ptr[0]); int64_t yb = static_cast<int64_t>(block_key_ptr[1]); int64_t zb = static_cast<int64_t>(block_key_ptr[2]); // voxel_idx -> (x_voxel, y_voxel, z_voxel) int64_t xv, yv, zv; voxel_indexer.WorkloadToCoord(voxel_idx, &xv, &yv, &zv); voxel_t* voxel_ptr = voxel_block_buffer_indexer .GetDataPtrFromCoord<voxel_t>( xv, yv, zv, block_idx); float tsdf_o = voxel_ptr->GetTSDF(); float weight_o = voxel_ptr->GetWeight(); if (weight_o <= weight_threshold) return; int64_t x = xb * resolution + xv; int64_t y = yb * resolution + yv; int64_t z = zb * resolution + zv; float no[3] = {0}, ni[3] = {0}; if (extract_normal) { GetNormalAt(static_cast<int>(xv), static_cast<int>(yv), static_cast<int>(zv), static_cast<int>(workload_block_idx), no); } // Enumerate x-y-z axis for (int i = 0; i < 3; ++i) { voxel_t* ptr = GetVoxelAt( static_cast<int>(xv) + (i == 0), static_cast<int>(yv) + (i == 1), static_cast<int>(zv) + (i == 2), static_cast<int>(workload_block_idx)); if (ptr == nullptr) continue; float tsdf_i = ptr->GetTSDF(); float weight_i = ptr->GetWeight(); if (weight_i > weight_threshold && tsdf_i * tsdf_o < 0) { float ratio = (0 - tsdf_o) / (tsdf_i - tsdf_o); int idx = OPEN3D_ATOMIC_ADD(count_ptr, 1); if (idx >= valid_size) { printf("Point cloud size larger than " "estimated, please increase the " "estimation!\n"); return; } float* point_ptr = point_indexer.GetDataPtrFromCoord<float>( idx); point_ptr[0] = voxel_size * (x + ratio * int(i == 0)); point_ptr[1] = voxel_size * (y + ratio * int(i == 1)); point_ptr[2] = voxel_size * (z + ratio * int(i == 2)); if (extract_color) { float* color_ptr = color_indexer .GetDataPtrFromCoord<float>( idx); float r_o = voxel_ptr->GetR(); float g_o = voxel_ptr->GetG(); float b_o = voxel_ptr->GetB(); float r_i = ptr->GetR(); float g_i = ptr->GetG(); float b_i = ptr->GetB(); color_ptr[0] = ((1 - ratio) * r_o + ratio * r_i) / 255.0f; color_ptr[1] = ((1 - ratio) * g_o + ratio * g_i) / 255.0f; color_ptr[2] = ((1 - ratio) * b_o + ratio * b_i) / 255.0f; } if (extract_normal) { GetNormalAt( static_cast<int>(xv) + (i == 0), static_cast<int>(yv) + (i == 1), static_cast<int>(zv) + (i == 2), static_cast<int>(workload_block_idx), ni); float* normal_ptr = normal_indexer .GetDataPtrFromCoord<float>( idx); float nx = (1 - ratio) * no[0] + ratio * ni[0]; float ny = (1 - ratio) * no[1] + ratio * ni[1]; float nz = (1 - ratio) * no[2] + ratio * ni[2]; float norm = static_cast<float>( sqrt(nx * nx + ny * ny + nz * nz) + 1e-5); normal_ptr[0] = nx / norm; normal_ptr[1] = ny / norm; normal_ptr[2] = nz / norm; } } } }); }); #if defined(__CUDACC__) int total_count = count.Item<int>(); #else int total_count = (*count_ptr).load(); #endif utility::LogDebug("{} vertices extracted", total_count); valid_size = total_count; #if defined(BUILD_CUDA_MODULE) && defined(__CUDACC__) OPEN3D_CUDA_CHECK(cudaDeviceSynchronize()); #endif } #if defined(__CUDACC__) void ExtractSurfaceMeshCUDA #else void ExtractSurfaceMeshCPU #endif (const core::Tensor& indices, const core::Tensor& inv_indices, const core::Tensor& nb_indices, const core::Tensor& nb_masks, const core::Tensor& block_keys, const core::Tensor& block_values, core::Tensor& vertices, core::Tensor& triangles, core::Tensor& normals, core::Tensor& colors, int64_t resolution, float voxel_size, float weight_threshold) { int64_t resolution3 = resolution * resolution * resolution; // Shape / transform indexers, no data involved NDArrayIndexer voxel_indexer({resolution, resolution, resolution}); // Output #if defined(__CUDACC__) core::CUDACachedMemoryManager::ReleaseCache(); #endif int n_blocks = static_cast<int>(indices.GetLength()); // Voxel-wise mesh info. 4 channels correspond to: // 3 edges' corresponding vertex index + 1 table index. core::Tensor mesh_structure; try { mesh_structure = core::Tensor::Zeros( {n_blocks, resolution, resolution, resolution, 4}, core::Dtype::Int32, block_keys.GetDevice()); } catch (const std::runtime_error&) { utility::LogError( "[MeshExtractionKernel] Unable to allocate assistance mesh " "structure for Marching " "Cubes with {} active voxel blocks. Please consider using a " "larger voxel size (currently {}) for TSDF " "integration, or using tsdf_volume.cpu() to perform mesh " "extraction on CPU.", n_blocks, voxel_size); } // Real data indexer NDArrayIndexer voxel_block_buffer_indexer(block_values, 4); NDArrayIndexer mesh_structure_indexer(mesh_structure, 4); NDArrayIndexer nb_block_masks_indexer(nb_masks, 2); NDArrayIndexer nb_block_indices_indexer(nb_indices, 2); // Plain arrays that does not require indexers const int64_t* indices_ptr = indices.GetDataPtr<int64_t>(); const int64_t* inv_indices_ptr = inv_indices.GetDataPtr<int64_t>(); int64_t n = n_blocks * resolution3; #if defined(__CUDACC__) core::kernel::CUDALauncher launcher; #else core::kernel::CPULauncher launcher; #endif // Pass 0: analyze mesh structure, set up one-on-one correspondences from // edges to vertices. DISPATCH_BYTESIZE_TO_VOXEL( voxel_block_buffer_indexer.ElementByteSize(), [&]() { launcher.LaunchGeneralKernel(n, [=] OPEN3D_DEVICE( int64_t workload_idx) { auto GetVoxelAt = [&] OPEN3D_DEVICE( int xo, int yo, int zo, int curr_block_idx) -> voxel_t* { return DeviceGetVoxelAt<voxel_t>( xo, yo, zo, curr_block_idx, static_cast<int>(resolution), nb_block_masks_indexer, nb_block_indices_indexer, voxel_block_buffer_indexer); }; // Natural index (0, N) -> (block_idx, voxel_idx) int64_t workload_block_idx = workload_idx / resolution3; int64_t voxel_idx = workload_idx % resolution3; // voxel_idx -> (x_voxel, y_voxel, z_voxel) int64_t xv, yv, zv; voxel_indexer.WorkloadToCoord(voxel_idx, &xv, &yv, &zv); // Check per-vertex sign in the cube to determine cube type int table_idx = 0; for (int i = 0; i < 8; ++i) { voxel_t* voxel_ptr_i = GetVoxelAt( static_cast<int>(xv) + vtx_shifts[i][0], static_cast<int>(yv) + vtx_shifts[i][1], static_cast<int>(zv) + vtx_shifts[i][2], static_cast<int>(workload_block_idx)); if (voxel_ptr_i == nullptr) return; float tsdf_i = voxel_ptr_i->GetTSDF(); float weight_i = voxel_ptr_i->GetWeight(); if (weight_i <= weight_threshold) return; table_idx |= ((tsdf_i < 0) ? (1 << i) : 0); } int* mesh_struct_ptr = mesh_structure_indexer.GetDataPtrFromCoord<int>( xv, yv, zv, workload_block_idx); mesh_struct_ptr[3] = table_idx; if (table_idx == 0 || table_idx == 255) return; // Check per-edge sign in the cube to determine cube type int edges_with_vertices = edge_table[table_idx]; for (int i = 0; i < 12; ++i) { if (edges_with_vertices & (1 << i)) { int64_t xv_i = xv + edge_shifts[i][0]; int64_t yv_i = yv + edge_shifts[i][1]; int64_t zv_i = zv + edge_shifts[i][2]; int edge_i = edge_shifts[i][3]; int dxb = static_cast<int>(xv_i / resolution); int dyb = static_cast<int>(yv_i / resolution); int dzb = static_cast<int>(zv_i / resolution); int nb_idx = (dxb + 1) + (dyb + 1) * 3 + (dzb + 1) * 9; int64_t block_idx_i = *nb_block_indices_indexer .GetDataPtrFromCoord<int64_t>( workload_block_idx, nb_idx); int* mesh_ptr_i = mesh_structure_indexer.GetDataPtrFromCoord< int>(xv_i - dxb * resolution, yv_i - dyb * resolution, zv_i - dzb * resolution, inv_indices_ptr[block_idx_i]); // Non-atomic write, but we are safe mesh_ptr_i[edge_i] = -1; } } }); }); // Pass 1: determine valid number of vertices. #if defined(__CUDACC__) core::Tensor vtx_count(std::vector<int>{0}, {}, core::Dtype::Int32, block_values.GetDevice()); int* vtx_count_ptr = vtx_count.GetDataPtr<int>(); #else std::atomic<int> vtx_count_atomic(0); std::atomic<int>* vtx_count_ptr = &vtx_count_atomic; #endif #if defined(__CUDACC__) core::kernel::CUDALauncher::LaunchGeneralKernel( n, [=] OPEN3D_DEVICE(int64_t workload_idx) { #else core::kernel::CPULauncher::LaunchGeneralKernel( n, [&](int64_t workload_idx) { #endif // Natural index (0, N) -> (block_idx, voxel_idx) int64_t workload_block_idx = workload_idx / resolution3; int64_t voxel_idx = workload_idx % resolution3; // voxel_idx -> (x_voxel, y_voxel, z_voxel) int64_t xv, yv, zv; voxel_indexer.WorkloadToCoord(voxel_idx, &xv, &yv, &zv); // Obtain voxel's mesh struct ptr int* mesh_struct_ptr = mesh_structure_indexer.GetDataPtrFromCoord<int>( xv, yv, zv, workload_block_idx); // Early quit -- no allocated vertex to compute if (mesh_struct_ptr[0] != -1 && mesh_struct_ptr[1] != -1 && mesh_struct_ptr[2] != -1) { return; } // Enumerate 3 edges in the voxel for (int e = 0; e < 3; ++e) { int vertex_idx = mesh_struct_ptr[e]; if (vertex_idx != -1) continue; OPEN3D_ATOMIC_ADD(vtx_count_ptr, 1); } }); // Reset count_ptr #if defined(__CUDACC__) int total_vtx_count = vtx_count.Item<int>(); vtx_count = core::Tensor(std::vector<int>{0}, {}, core::Dtype::Int32, block_values.GetDevice()); vtx_count_ptr = vtx_count.GetDataPtr<int>(); #else int total_vtx_count = (*vtx_count_ptr).load(); (*vtx_count_ptr) = 0; #endif utility::LogDebug("Total vertex count = {}", total_vtx_count); vertices = core::Tensor({total_vtx_count, 3}, core::Dtype::Float32, block_values.GetDevice()); normals = core::Tensor({total_vtx_count, 3}, core::Dtype::Float32, block_values.GetDevice()); NDArrayIndexer block_keys_indexer(block_keys, 1); NDArrayIndexer vertex_indexer(vertices, 1); NDArrayIndexer normal_indexer(normals, 1); // Pass 2: extract vertices. DISPATCH_BYTESIZE_TO_VOXEL( voxel_block_buffer_indexer.ElementByteSize(), [&]() { bool extract_color = false; NDArrayIndexer color_indexer; if (voxel_t::HasColor()) { extract_color = true; colors = core::Tensor({total_vtx_count, 3}, core::Dtype::Float32, block_values.GetDevice()); color_indexer = NDArrayIndexer(colors, 1); } launcher.LaunchGeneralKernel(n, [=] OPEN3D_DEVICE( int64_t workload_idx) { auto GetVoxelAt = [&] OPEN3D_DEVICE( int xo, int yo, int zo, int curr_block_idx) -> voxel_t* { return DeviceGetVoxelAt<voxel_t>( xo, yo, zo, curr_block_idx, static_cast<int>(resolution), nb_block_masks_indexer, nb_block_indices_indexer, voxel_block_buffer_indexer); }; auto GetNormalAt = [&] OPEN3D_DEVICE(int xo, int yo, int zo, int curr_block_idx, float* n) { return DeviceGetNormalAt<voxel_t>( xo, yo, zo, curr_block_idx, n, static_cast<int>(resolution), voxel_size, nb_block_masks_indexer, nb_block_indices_indexer, voxel_block_buffer_indexer); }; // Natural index (0, N) -> (block_idx, voxel_idx) int64_t workload_block_idx = workload_idx / resolution3; int64_t block_idx = indices_ptr[workload_block_idx]; int64_t voxel_idx = workload_idx % resolution3; // block_idx -> (x_block, y_block, z_block) int* block_key_ptr = block_keys_indexer.GetDataPtrFromCoord<int>( block_idx); int64_t xb = static_cast<int64_t>(block_key_ptr[0]); int64_t yb = static_cast<int64_t>(block_key_ptr[1]); int64_t zb = static_cast<int64_t>(block_key_ptr[2]); // voxel_idx -> (x_voxel, y_voxel, z_voxel) int64_t xv, yv, zv; voxel_indexer.WorkloadToCoord(voxel_idx, &xv, &yv, &zv); // global coordinate (in voxels) int64_t x = xb * resolution + xv; int64_t y = yb * resolution + yv; int64_t z = zb * resolution + zv; // Obtain voxel's mesh struct ptr int* mesh_struct_ptr = mesh_structure_indexer.GetDataPtrFromCoord<int>( xv, yv, zv, workload_block_idx); // Early quit -- no allocated vertex to compute if (mesh_struct_ptr[0] != -1 && mesh_struct_ptr[1] != -1 && mesh_struct_ptr[2] != -1) { return; } // Obtain voxel ptr voxel_t* voxel_ptr = voxel_block_buffer_indexer .GetDataPtrFromCoord<voxel_t>( xv, yv, zv, block_idx); float tsdf_o = voxel_ptr->GetTSDF(); float no[3] = {0}, ne[3] = {0}; GetNormalAt(static_cast<int>(xv), static_cast<int>(yv), static_cast<int>(zv), static_cast<int>(workload_block_idx), no); // Enumerate 3 edges in the voxel for (int e = 0; e < 3; ++e) { int vertex_idx = mesh_struct_ptr[e]; if (vertex_idx != -1) continue; voxel_t* voxel_ptr_e = GetVoxelAt( static_cast<int>(xv) + (e == 0), static_cast<int>(yv) + (e == 1), static_cast<int>(zv) + (e == 2), static_cast<int>(workload_block_idx)); float tsdf_e = voxel_ptr_e->GetTSDF(); float ratio = (0 - tsdf_o) / (tsdf_e - tsdf_o); int idx = OPEN3D_ATOMIC_ADD(vtx_count_ptr, 1); mesh_struct_ptr[e] = idx; float ratio_x = ratio * int(e == 0); float ratio_y = ratio * int(e == 1); float ratio_z = ratio * int(e == 2); float* vertex_ptr = vertex_indexer.GetDataPtrFromCoord<float>(idx); vertex_ptr[0] = voxel_size * (x + ratio_x); vertex_ptr[1] = voxel_size * (y + ratio_y); vertex_ptr[2] = voxel_size * (z + ratio_z); float* normal_ptr = normal_indexer.GetDataPtrFromCoord<float>(idx); GetNormalAt(static_cast<int>(xv) + (e == 0), static_cast<int>(yv) + (e == 1), static_cast<int>(zv) + (e == 2), static_cast<int>(workload_block_idx), ne); float nx = (1 - ratio) * no[0] + ratio * ne[0]; float ny = (1 - ratio) * no[1] + ratio * ne[1]; float nz = (1 - ratio) * no[2] + ratio * ne[2]; float norm = static_cast<float>( sqrt(nx * nx + ny * ny + nz * nz) + 1e-5); normal_ptr[0] = nx / norm; normal_ptr[1] = ny / norm; normal_ptr[2] = nz / norm; if (extract_color) { float* color_ptr = color_indexer.GetDataPtrFromCoord<float>( idx); float r_o = voxel_ptr->GetR(); float g_o = voxel_ptr->GetG(); float b_o = voxel_ptr->GetB(); float r_e = voxel_ptr_e->GetR(); float g_e = voxel_ptr_e->GetG(); float b_e = voxel_ptr_e->GetB(); color_ptr[0] = ((1 - ratio) * r_o + ratio * r_e) / 255.0f; color_ptr[1] = ((1 - ratio) * g_o + ratio * g_e) / 255.0f; color_ptr[2] = ((1 - ratio) * b_o + ratio * b_e) / 255.0f; } } }); }); // Pass 3: connect vertices and form triangles. #if defined(__CUDACC__) core::Tensor triangle_count(std::vector<int>{0}, {}, core::Dtype::Int32, block_values.GetDevice()); int* tri_count_ptr = triangle_count.GetDataPtr<int>(); #else std::atomic<int> tri_count_atomic(0); std::atomic<int>* tri_count_ptr = &tri_count_atomic; #endif triangles = core::Tensor({total_vtx_count * 3, 3}, core::Dtype::Int64, block_values.GetDevice()); NDArrayIndexer triangle_indexer(triangles, 1); #if defined(__CUDACC__) core::kernel::CUDALauncher::LaunchGeneralKernel( n, [=] OPEN3D_DEVICE(int64_t workload_idx) { #else core::kernel::CPULauncher::LaunchGeneralKernel( n, [&](int64_t workload_idx) { #endif // Natural index (0, N) -> (block_idx, // voxel_idx) int64_t workload_block_idx = workload_idx / resolution3; int64_t voxel_idx = workload_idx % resolution3; // voxel_idx -> (x_voxel, y_voxel, z_voxel) int64_t xv, yv, zv; voxel_indexer.WorkloadToCoord(voxel_idx, &xv, &yv, &zv); // Obtain voxel's mesh struct ptr int* mesh_struct_ptr = mesh_structure_indexer.GetDataPtrFromCoord<int>( xv, yv, zv, workload_block_idx); int table_idx = mesh_struct_ptr[3]; if (tri_count[table_idx] == 0) return; for (size_t tri = 0; tri < 16; tri += 3) { if (tri_table[table_idx][tri] == -1) return; int tri_idx = OPEN3D_ATOMIC_ADD(tri_count_ptr, 1); for (size_t vertex = 0; vertex < 3; ++vertex) { int edge = tri_table[table_idx][tri + vertex]; int64_t xv_i = xv + edge_shifts[edge][0]; int64_t yv_i = yv + edge_shifts[edge][1]; int64_t zv_i = zv + edge_shifts[edge][2]; int64_t edge_i = edge_shifts[edge][3]; int dxb = static_cast<int>(xv_i / resolution); int dyb = static_cast<int>(yv_i / resolution); int dzb = static_cast<int>(zv_i / resolution); int nb_idx = (dxb + 1) + (dyb + 1) * 3 + (dzb + 1) * 9; int64_t block_idx_i = *nb_block_indices_indexer .GetDataPtrFromCoord<int64_t>( workload_block_idx, nb_idx); int* mesh_struct_ptr_i = mesh_structure_indexer.GetDataPtrFromCoord<int>( xv_i - dxb * resolution, yv_i - dyb * resolution, zv_i - dzb * resolution, inv_indices_ptr[block_idx_i]); int64_t* triangle_ptr = triangle_indexer.GetDataPtrFromCoord<int64_t>( tri_idx); triangle_ptr[2 - vertex] = mesh_struct_ptr_i[edge_i]; } } }); #if defined(__CUDACC__) int total_tri_count = triangle_count.Item<int>(); #else int total_tri_count = (*tri_count_ptr).load(); #endif utility::LogDebug("Total triangle count = {}", total_tri_count); triangles = triangles.Slice(0, 0, total_tri_count); } #if defined(__CUDACC__) void EstimateRangeCUDA #else void EstimateRangeCPU #endif (const core::Tensor& block_keys, core::Tensor& range_minmax_map, const core::Tensor& intrinsics, const core::Tensor& extrinsics, int h, int w, int down_factor, int64_t block_resolution, float voxel_size, float depth_min, float depth_max) { // TODO(wei): reserve it in a reusable buffer // Every 2 channels: (min, max) int h_down = h / down_factor; int w_down = w / down_factor; range_minmax_map = core::Tensor({h_down, w_down, 2}, core::Dtype::Float32, block_keys.GetDevice()); NDArrayIndexer range_map_indexer(range_minmax_map, 2); // Every 6 channels: (v_min, u_min, v_max, u_max, z_min, z_max) const int fragment_size = 16; const int frag_buffer_size = 65535; // TODO(wei): explicit buffer core::Tensor fragment_buffer = core::Tensor({frag_buffer_size, 6}, core::Dtype::Float32, block_keys.GetDevice()); NDArrayIndexer frag_buffer_indexer(fragment_buffer, 1); NDArrayIndexer block_keys_indexer(block_keys, 1); TransformIndexer w2c_transform_indexer(intrinsics, extrinsics); #if defined(__CUDACC__) core::Tensor count(std::vector<int>{0}, {1}, core::Dtype::Int32, block_keys.GetDevice()); int* count_ptr = count.GetDataPtr<int>(); #else std::atomic<int> count_atomic(0); std::atomic<int>* count_ptr = &count_atomic; #endif #if defined(__CUDACC__) core::kernel::CUDALauncher launcher; #else core::kernel::CPULauncher launcher; using std::ceil; using std::floor; using std::max; using std::min; #endif // Pass 0: iterate over blocks, fill-in an rendering fragment array launcher.LaunchGeneralKernel( block_keys.GetLength(), [=] OPEN3D_DEVICE(int64_t workload_idx) { int* key = block_keys_indexer.GetDataPtrFromCoord<int>( workload_idx); int u_min = w_down - 1, v_min = h_down - 1, u_max = 0, v_max = 0; float z_min = depth_max, z_max = depth_min; float xc, yc, zc, u, v; // Project 8 corners to low-res image and form a rectangle for (int i = 0; i < 8; ++i) { float xw = (key[0] + ((i & 1) > 0)) * block_resolution * voxel_size; float yw = (key[1] + ((i & 2) > 0)) * block_resolution * voxel_size; float zw = (key[2] + ((i & 4) > 0)) * block_resolution * voxel_size; w2c_transform_indexer.RigidTransform(xw, yw, zw, &xc, &yc, &zc); if (zc <= 0) continue; // Project to the down sampled image buffer w2c_transform_indexer.Project(xc, yc, zc, &u, &v); u /= down_factor; v /= down_factor; v_min = min(static_cast<int>(floor(v)), v_min); v_max = max(static_cast<int>(ceil(v)), v_max); u_min = min(static_cast<int>(floor(u)), u_min); u_max = max(static_cast<int>(ceil(u)), u_max); z_min = min(z_min, zc); z_max = max(z_max, zc); } v_min = max(0, v_min); v_max = min(h_down - 1, v_max); u_min = max(0, u_min); u_max = min(w_down - 1, u_max); if (v_min >= v_max || u_min >= u_max || z_min >= z_max) return; // Divide the rectangle into small 16x16 fragments int frag_v_count = ceil(float(v_max - v_min + 1) / float(fragment_size)); int frag_u_count = ceil(float(u_max - u_min + 1) / float(fragment_size)); int frag_count = frag_v_count * frag_u_count; int frag_count_start = OPEN3D_ATOMIC_ADD(count_ptr, 1); int frag_count_end = frag_count_start + frag_count; if (frag_count_end >= frag_buffer_size) { printf("Fragment count exceeding buffer size, abort!\n"); } int offset = 0; for (int frag_v = 0; frag_v < frag_v_count; ++frag_v) { for (int frag_u = 0; frag_u < frag_u_count; ++frag_u, ++offset) { float* frag_ptr = frag_buffer_indexer.GetDataPtrFromCoord<float>( frag_count_start + offset); // zmin, zmax frag_ptr[0] = z_min; frag_ptr[1] = z_max; // vmin, umin frag_ptr[2] = v_min + frag_v * fragment_size; frag_ptr[3] = u_min + frag_u * fragment_size; // vmax, umax frag_ptr[4] = min(frag_ptr[2] + fragment_size - 1, static_cast<float>(v_max)); frag_ptr[5] = min(frag_ptr[3] + fragment_size - 1, static_cast<float>(u_max)); } } }); #if defined(__CUDACC__) int frag_count = count[0].Item<int>(); #else int frag_count = (*count_ptr).load(); #endif // Pass 0.5: Fill in range map to prepare for atomic min/max launcher.LaunchGeneralKernel( h_down * w_down, [=] OPEN3D_DEVICE(int64_t workload_idx) { int v = workload_idx / w_down; int u = workload_idx % w_down; float* range_ptr = range_map_indexer.GetDataPtrFromCoord<float>(u, v); range_ptr[0] = depth_max; range_ptr[1] = depth_min; }); // Pass 1: iterate over rendering fragment array, fill-in range launcher.LaunchGeneralKernel( frag_count * fragment_size * fragment_size, [=] OPEN3D_DEVICE(int64_t workload_idx) { int frag_idx = workload_idx / (fragment_size * fragment_size); int local_idx = workload_idx % (fragment_size * fragment_size); int dv = local_idx / fragment_size; int du = local_idx % fragment_size; float* frag_ptr = frag_buffer_indexer.GetDataPtrFromCoord<float>( frag_idx); int v_min = static_cast<int>(frag_ptr[2]); int u_min = static_cast<int>(frag_ptr[3]); int v_max = static_cast<int>(frag_ptr[4]); int u_max = static_cast<int>(frag_ptr[5]); int v = v_min + dv; int u = u_min + du; if (v > v_max || u > u_max) return; float z_min = frag_ptr[0]; float z_max = frag_ptr[1]; float* range_ptr = range_map_indexer.GetDataPtrFromCoord<float>(u, v); #ifdef __CUDACC__ atomicMinf(&(range_ptr[0]), z_min); atomicMaxf(&(range_ptr[1]), z_max); #else #pragma omp critical { range_ptr[0] = min(z_min, range_ptr[0]); range_ptr[1] = max(z_max, range_ptr[1]); } #endif }); #if defined(__CUDACC__) OPEN3D_CUDA_CHECK(cudaDeviceSynchronize()); #endif } struct BlockCache { int x; int y; int z; int block_idx; inline int OPEN3D_DEVICE Check(int xin, int yin, int zin) { return (xin == x && yin == y && zin == z) ? block_idx : -1; } inline void OPEN3D_DEVICE Update(int xin, int yin, int zin, int block_idx_in) { x = xin; y = yin; z = zin; block_idx = block_idx_in; } }; #if defined(__CUDACC__) void RayCastCUDA #else void RayCastCPU #endif (std::shared_ptr<core::DeviceHashmap>& hashmap, const core::Tensor& block_values, const core::Tensor& range_map, core::Tensor& vertex_map, core::Tensor& depth_map, core::Tensor& color_map, core::Tensor& normal_map, const core::Tensor& intrinsics, const core::Tensor& extrinsics, int h, int w, int64_t block_resolution, float voxel_size, float sdf_trunc, float depth_scale, float depth_min, float depth_max, float weight_threshold) { using Key = core::Block<int, 3>; using Hash = core::BlockHash<int, 3>; #if defined(BUILD_CUDA_MODULE) && defined(__CUDACC__) auto cuda_hashmap = std::dynamic_pointer_cast<core::StdGPUHashmap<Key, Hash>>(hashmap); if (cuda_hashmap == nullptr) { utility::LogError( "Unsupported backend: CUDA raycasting only supports STDGPU."); } auto hashmap_impl = cuda_hashmap->GetImpl(); #else auto cpu_hashmap = std::dynamic_pointer_cast<core::TBBHashmap<Key, Hash>>(hashmap); auto hashmap_impl = *cpu_hashmap->GetImpl(); #endif NDArrayIndexer voxel_block_buffer_indexer(block_values, 4); NDArrayIndexer range_map_indexer(range_map, 2); NDArrayIndexer vertex_map_indexer; NDArrayIndexer depth_map_indexer; NDArrayIndexer color_map_indexer; NDArrayIndexer normal_map_indexer; bool enable_vertex = (vertex_map.GetLength() != 0); bool enable_depth = (depth_map.GetLength() != 0); bool enable_color = (color_map.GetLength() != 0); bool enable_normal = (normal_map.GetLength() != 0); if (!enable_vertex && !enable_depth && !enable_color && !enable_normal) { utility::LogWarning("No output specified for ray casting, exit."); return; } if (enable_vertex) { vertex_map_indexer = NDArrayIndexer(vertex_map, 2); } if (enable_depth) { depth_map_indexer = NDArrayIndexer(depth_map, 2); } if (enable_color) { color_map_indexer = NDArrayIndexer(color_map, 2); } if (enable_normal) { normal_map_indexer = NDArrayIndexer(normal_map, 2); } TransformIndexer c2w_transform_indexer( intrinsics, t::geometry::InverseTransformation(extrinsics)); TransformIndexer w2c_transform_indexer(intrinsics, extrinsics); int64_t rows = h; int64_t cols = w; float block_size = voxel_size * block_resolution; #if defined(BUILD_CUDA_MODULE) && defined(__CUDACC__) core::kernel::CUDALauncher launcher; #else core::kernel::CPULauncher launcher; using std::max; #endif DISPATCH_BYTESIZE_TO_VOXEL(voxel_block_buffer_indexer.ElementByteSize(), [&]() { launcher.LaunchGeneralKernel( rows * cols, [=] OPEN3D_DEVICE(int64_t workload_idx) { auto GetVoxelAtP = [&] OPEN3D_DEVICE( int x_b, int y_b, int z_b, int x_v, int y_v, int z_v, core::addr_t block_addr, BlockCache& cache) -> voxel_t* { int x_vn = (x_v + block_resolution) % block_resolution; int y_vn = (y_v + block_resolution) % block_resolution; int z_vn = (z_v + block_resolution) % block_resolution; int dx_b = Sign(x_v - x_vn); int dy_b = Sign(y_v - y_vn); int dz_b = Sign(z_v - z_vn); if (dx_b == 0 && dy_b == 0 && dz_b == 0) { return voxel_block_buffer_indexer .GetDataPtrFromCoord<voxel_t>(x_v, y_v, z_v, block_addr); } else { Key key; key(0) = x_b + dx_b; key(1) = y_b + dy_b; key(2) = z_b + dz_b; int block_addr = cache.Check(key(0), key(1), key(2)); if (block_addr < 0) { auto iter = hashmap_impl.find(key); if (iter == hashmap_impl.end()) return nullptr; block_addr = iter->second; cache.Update(key(0), key(1), key(2), block_addr); } return voxel_block_buffer_indexer .GetDataPtrFromCoord<voxel_t>( x_vn, y_vn, z_vn, block_addr); } }; auto GetVoxelAtT = [&] OPEN3D_DEVICE( float x_o, float y_o, float z_o, float x_d, float y_d, float z_d, float t, BlockCache& cache) -> voxel_t* { float x_g = x_o + t * x_d; float y_g = y_o + t * y_d; float z_g = z_o + t * z_d; // Block coordinate and look up int x_b = static_cast<int>(floor(x_g / block_size)); int y_b = static_cast<int>(floor(y_g / block_size)); int z_b = static_cast<int>(floor(z_g / block_size)); Key key; key(0) = x_b; key(1) = y_b; key(2) = z_b; int block_addr = cache.Check(x_b, y_b, z_b); if (block_addr < 0) { auto iter = hashmap_impl.find(key); if (iter == hashmap_impl.end()) return nullptr; block_addr = iter->second; cache.Update(x_b, y_b, z_b, block_addr); } // Voxel coordinate and look up int x_v = int((x_g - x_b * block_size) / voxel_size); int y_v = int((y_g - y_b * block_size) / voxel_size); int z_v = int((z_g - z_b * block_size) / voxel_size); return voxel_block_buffer_indexer .GetDataPtrFromCoord<voxel_t>(x_v, y_v, z_v, block_addr); }; int64_t y = workload_idx / cols; int64_t x = workload_idx % cols; float *depth_ptr = nullptr, *vertex_ptr = nullptr, *normal_ptr = nullptr, *color_ptr = nullptr; if (enable_depth) { depth_ptr = depth_map_indexer.GetDataPtrFromCoord<float>(x, y); *depth_ptr = 0; } if (enable_vertex) { vertex_ptr = vertex_map_indexer.GetDataPtrFromCoord<float>( x, y); vertex_ptr[0] = 0; vertex_ptr[1] = 0; vertex_ptr[2] = 0; } if (enable_color) { color_ptr = color_map_indexer.GetDataPtrFromCoord<float>(x, y); color_ptr[0] = 0; color_ptr[1] = 0; color_ptr[2] = 0; } if (enable_normal) { normal_ptr = normal_map_indexer.GetDataPtrFromCoord<float>( x, y); normal_ptr[0] = 0; normal_ptr[1] = 0; normal_ptr[2] = 0; } const float* range = range_map_indexer.GetDataPtrFromCoord<float>(x / 8, y / 8); float t = range[0]; const float t_max = range[1]; if (t >= t_max) return; // Coordinates in camera and global float x_c = 0, y_c = 0, z_c = 0; float x_g = 0, y_g = 0, z_g = 0; float x_o = 0, y_o = 0, z_o = 0; // Iterative ray intersection check float t_prev = t; float tsdf_prev = -1.0f; float tsdf = 1.0; float w = 0.0; // Camera origin c2w_transform_indexer.RigidTransform(0, 0, 0, &x_o, &y_o, &z_o); // Direction c2w_transform_indexer.Unproject(static_cast<float>(x), static_cast<float>(y), 1.0f, &x_c, &y_c, &z_c); c2w_transform_indexer.RigidTransform(x_c, y_c, z_c, &x_g, &y_g, &z_g); float x_d = (x_g - x_o); float y_d = (y_g - y_o); float z_d = (z_g - z_o); BlockCache cache{0, 0, 0, -1}; bool surface_found = false; while (t < t_max) { voxel_t* voxel_ptr = GetVoxelAtT(x_o, y_o, z_o, x_d, y_d, z_d, t, cache); if (!voxel_ptr) { t_prev = t; t += block_size; } else { tsdf_prev = tsdf; tsdf = voxel_ptr->GetTSDF(); w = voxel_ptr->GetWeight(); if (tsdf_prev > 0 && w >= weight_threshold && tsdf <= 0) { surface_found = true; break; } t_prev = t; float delta = tsdf * sdf_trunc; t += delta < voxel_size ? voxel_size : delta; } } if (surface_found) { float t_intersect = (t * tsdf_prev - t_prev * tsdf) / (tsdf_prev - tsdf); x_g = x_o + t_intersect * x_d; y_g = y_o + t_intersect * y_d; z_g = z_o + t_intersect * z_d; // Trivial vertex assignment if (enable_depth) { *depth_ptr = t_intersect * depth_scale; } if (enable_vertex) { w2c_transform_indexer.RigidTransform( x_g, y_g, z_g, vertex_ptr + 0, vertex_ptr + 1, vertex_ptr + 2); } // Trilinear interpolation // TODO(wei): simplify the flow by splitting the // functions given what is enabled if (enable_color || enable_normal) { int x_b = static_cast<int>(floor(x_g / block_size)); int y_b = static_cast<int>(floor(y_g / block_size)); int z_b = static_cast<int>(floor(z_g / block_size)); float x_v = (x_g - float(x_b) * block_size) / voxel_size; float y_v = (y_g - float(y_b) * block_size) / voxel_size; float z_v = (z_g - float(z_b) * block_size) / voxel_size; Key key; key(0) = x_b; key(1) = y_b; key(2) = z_b; int block_addr = cache.Check(x_b, y_b, z_b); if (block_addr < 0) { auto iter = hashmap_impl.find(key); if (iter == hashmap_impl.end()) return; block_addr = iter->second; cache.Update(x_b, y_b, z_b, block_addr); } int x_v_floor = static_cast<int>(floor(x_v)); int y_v_floor = static_cast<int>(floor(y_v)); int z_v_floor = static_cast<int>(floor(z_v)); float ratio_x = x_v - float(x_v_floor); float ratio_y = y_v - float(y_v_floor); float ratio_z = z_v - float(z_v_floor); float sum_weight_color = 0.0; float sum_weight_normal = 0.0; for (int k = 0; k < 8; ++k) { int dx_v = (k & 1) > 0 ? 1 : 0; int dy_v = (k & 2) > 0 ? 1 : 0; int dz_v = (k & 4) > 0 ? 1 : 0; float ratio = (dx_v * (ratio_x) + (1 - dx_v) * (1 - ratio_x)) * (dy_v * (ratio_y) + (1 - dy_v) * (1 - ratio_y)) * (dz_v * (ratio_z) + (1 - dz_v) * (1 - ratio_z)); voxel_t* voxel_ptr_k = GetVoxelAtP( x_b, y_b, z_b, x_v_floor + dx_v, y_v_floor + dy_v, z_v_floor + dz_v, block_addr, cache); if (enable_color && voxel_ptr_k && voxel_ptr_k->GetWeight() > 0) { sum_weight_color += ratio; color_ptr[0] += ratio * voxel_ptr_k->GetR(); color_ptr[1] += ratio * voxel_ptr_k->GetG(); color_ptr[2] += ratio * voxel_ptr_k->GetB(); } if (enable_normal) { for (int dim = 0; dim < 3; ++dim) { voxel_t* voxel_ptr_k_plus = GetVoxelAtP( x_b, y_b, z_b, x_v_floor + dx_v + (dim == 0), y_v_floor + dy_v + (dim == 1), z_v_floor + dz_v + (dim == 2), block_addr, cache); voxel_t* voxel_ptr_k_minus = GetVoxelAtP(x_b, y_b, z_b, x_v_floor + dx_v - (dim == 0), y_v_floor + dy_v - (dim == 1), z_v_floor + dz_v - (dim == 2), block_addr, cache); bool valid = false; if (voxel_ptr_k_plus && voxel_ptr_k_plus->GetWeight() > 0) { normal_ptr[dim] += ratio * voxel_ptr_k_plus ->GetTSDF() / (2 * voxel_size); valid = true; } if (voxel_ptr_k_minus && voxel_ptr_k_minus->GetWeight() > 0) { normal_ptr[dim] -= ratio * voxel_ptr_k_minus ->GetTSDF() / (2 * voxel_size); valid = true; } sum_weight_normal += valid ? ratio : 0; } } // if (enable_normal) } // loop over 8 neighbors if (enable_color && sum_weight_color > 0) { sum_weight_color *= 255.0; color_ptr[0] /= sum_weight_color; color_ptr[1] /= sum_weight_color; color_ptr[2] /= sum_weight_color; } if (enable_normal && sum_weight_normal > 0) { normal_ptr[0] /= sum_weight_normal; normal_ptr[1] /= sum_weight_normal; normal_ptr[2] /= sum_weight_normal; float norm = sqrt(normal_ptr[0] * normal_ptr[0] + normal_ptr[1] * normal_ptr[1] + normal_ptr[2] * normal_ptr[2]); w2c_transform_indexer.Rotate( normal_ptr[0] / norm, normal_ptr[1] / norm, normal_ptr[2] / norm, normal_ptr + 0, normal_ptr + 1, normal_ptr + 2); } } // if (color or normal) } // if (tsdf < 0) }); }); #if defined(__CUDACC__) OPEN3D_CUDA_CHECK(cudaDeviceSynchronize()); #endif } } // namespace tsdf } // namespace kernel } // namespace geometry } // namespace t } // namespace open3d
hip_runtime.h
/* * This file is part of hipCPU, a HIP implementation based on OpenMP * * Copyright (c) 2018,2019 Aksel Alpay * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef HIPCPU_RUNTIME_H #define HIPCPU_RUNTIME_H #define __HIPCPU__ #ifndef __global__ #define __global__ #endif #ifndef __device__ #define __device__ #endif #ifndef __host__ #define __host__ #endif #ifndef __constant__ #define __constant__ const #endif #ifndef __shared__ #define __shared__ static #endif #include <cstddef> #include <climits> #include <cstring> #include <limits> #include <memory> #include <cmath> #include <stdexcept> #include "detail/runtime.hpp" using hipcpu::dim3; #define HIP_KERNEL_NAME(...) __VA_ARGS__ typedef int hipLaunchParm; #define _hipcpu_runtime (hipcpu::runtime::get()) // Use a macro instead of a function with variadic template arguments // to avoid different properties of kernel template argument deduction // based on kernel arguments compared to AMDs implementation #define hipLaunchKernelGGL(f, grid, block, shared_mem, stream, ...) \ _hipcpu_runtime.submit_kernel(grid, block, shared_mem, stream, \ [=](){ \ f(__VA_ARGS__); \ }) #define hipLaunchKernel(f, grid, block, shared_mem, stream, ...) \ hipLaunchKernelGGL(f, grid, block, shared_mem, stream, 0, __VA_ARGS__) #define hipLaunchTask(f, stream, ...) \ _hipcpu_runtime.submit_operation([=](){\ f(__VA_ARGS__); \ }, stream) #define hipLaunchSequentialKernel(f, stream, scratch_mem, ...) \ _hipcpu_runtime.submit_unparallelized_kernel(scratch_mem, stream, \ [=](){ \ f(__VA_ARGS__); \ }) // TODO #define hipLaunchKernelNoBarrier(f, grid, block, stream, ...) #define HIP_DYNAMIC_SHARED_MEMORY _hipcpu_runtime.dev().get_dynamic_shared_memory() // TODO This dev() may be different if changed during kernel execution? // This is not a problem at the moment since we only treat the case of // one effective host device for now. #define hipThreadIdx_x (_hipcpu_runtime.dev().get_block().get_thread_id().x) #define hipThreadIdx_y (_hipcpu_runtime.dev().get_block().get_thread_id().y) #define hipThreadIdx_z (_hipcpu_runtime.dev().get_block().get_thread_id().z) #define hipBlockIdx_x (_hipcpu_runtime.dev().get_grid().get_block_id().x) #define hipBlockIdx_y (_hipcpu_runtime.dev().get_grid().get_block_id().y) #define hipBlockIdx_z (_hipcpu_runtime.dev().get_grid().get_block_id().z) #define hipBlockDim_x (_hipcpu_runtime.dev().get_block().get_block_dim().x) #define hipBlockDim_y (_hipcpu_runtime.dev().get_block().get_block_dim().y) #define hipBlockDim_z (_hipcpu_runtime.dev().get_block().get_block_dim().z) #define hipGridDim_x (_hipcpu_runtime.dev().get_grid().get_grid_dim().x) #define hipGridDim_y (_hipcpu_runtime.dev().get_grid().get_grid_dim().y) #define hipGridDim_z (_hipcpu_runtime.dev().get_grid().get_grid_dim().z) #define HIP_SYMBOL(X) X typedef enum hipMemcpyKind { hipMemcpyHostToHost, hipMemcpyHostToDevice, hipMemcpyDeviceToHost, hipMemcpyDeviceToDevice, hipMemcpyDefault } hipMemcpyKind; /* Textures are unimplemented // hipTextureAddressMode #define hipTextureAddressMode 0 #define hipAddressModeWrap 0 #define hipAddressModeClamp 0 #define hipAddressModeMirror 0 #define hipAddressModeBorder 0 // hipTextureFilterMode #define hipTextureFilterMode 0 #define hipFilterModePoint 0 #define hipFilterModeLinear 0 // hipTextureReadMode enum hipTextureReadMode {}; #define hipReadModeElementType 0 #define hipReadModeNormalizedFloat 0 template<class T, int dim, hipTextureReadMode readMode> struct texture {}; typedef enum hipChannelFormatKind { hipChannelFormatKindSigned = 0, hipChannelFormatKindUnsigned = 1, hipChannelFormatKindFloat = 2, hipChannelFormatKindNone = 3 } hipChannelFormatKind; #define hipSurfaceBoundaryMode 0 #define hipBoundaryModeZero 0 #define hipBoundaryModeTrap 0 #define hipBoundaryModeClamp 0 // hipResourceType #define hipResourceType 0 #define hipResourceTypeArray 0 #define hipResourceTypeMipmappedArray 0 #define hipResourceTypeLinear 0 #define hipResourceTypePitch2D 0 */ #define hipEventDefault hipEvent_t() #define hipEventBlockingSync 0 #define hipEventDisableTiming 0 #define hipEventInterprocess 0 #define hipEventReleaseToDevice 0 #define hipEventReleaseToSystem 0 #define hipHostMallocDefault 0x0 #define hipHostMallocPortable 0x1 #define hipHostMallocMapped 0x2 #define hipHostMallocWriteCombined 0x4 #define hipHostMallocCoherent 0x40000000 #define hipHostMallocNonCoherent 0x80000000 #define hipHostRegisterPortable 0 #define hipHostRegisterMapped 0 typedef int hipEvent_t; typedef int hipStream_t; typedef int hipIpcEventHandle_t; typedef int hipIpcMemHandle_t; typedef int hipLimit_t; typedef int hipFuncCache_t; typedef int hipCtx_t; typedef int hipSharedMemConfig; typedef int hipFuncCache; typedef int hipJitOption; typedef int hipDevice_t; typedef int hipModule_t; typedef int hipFunction_t; typedef void* hipDeviceptr_t; typedef int hipArray; typedef int* hipArray_const_t; typedef int hipFuncAttributes; typedef int hipCtx_t; typedef int hipTextureObject_t; typedef int hipSurfaceObject_t; typedef int hipResourceDesc; typedef int hipTextureDesc; typedef int hipResourceViewDesc; typedef int textureReference; enum hipError_t { hipSuccess, hipErrorInvalidContext, hipErrorInvalidKernelFile, hipErrorMemoryAllocation, hipErrorInitializationError, hipErrorLaunchFailure, hipErrorLaunchOutOfResources, hipErrorInvalidDevice, hipErrorInvalidValue, hipErrorInvalidDevicePointer, hipErrorInvalidMemcpyDirection, hipErrorUnknown, hipErrorInvalidResourceHandle, hipErrorNotReady, hipErrorNoDevice, hipErrorPeerAccessAlreadyEnabled, hipErrorPeerAccessNotEnabled, hipErrorRuntimeMemory, hipErrorRuntimeOther, hipErrorHostMemoryAlreadyRegistered, hipErrorHostMemoryNotRegistered, hipErrorMapBufferObjectFailed, hipErrorTbd }; typedef void* hipPitchedPtr; //struct hipExtent {}; //struct hipChannelFormatDesc {}; struct hipDeviceArch_t { unsigned hasGlobalInt32Atomics : 1; unsigned hasGlobalFloatAtomicExch : 1; unsigned hasSharedInt32Atomics : 1; unsigned hasSharedFloatAtomicExch : 1; unsigned hasFloatAtomicAdd : 1; // 64-bit Atomics unsigned hasGlobalInt64Atomics : 1; unsigned hasSharedInt64Atomics : 1; // Doubles unsigned hasDoubles : 1; // Warp cross-lane operations unsigned hasWarpVote : 1; unsigned hasWarpBallot : 1; unsigned hasWarpShuffle : 1; unsigned hasFunnelShift : 1; // Sync unsigned hasThreadFenceSystem : 1; unsigned hasSyncThreadsExt : 1; // Misc unsigned hasSurfaceFuncs : 1; unsigned has3dGrid : 1; unsigned hasDynamicParallelism : 1; }; struct hipDeviceProp_t { char name[256]; size_t totalGlobalMem; size_t sharedMemPerBlock; int regsPerBlock; int warpSize; int maxThreadsPerBlock; int maxThreadsDim[3]; int maxGridSize[3]; int clockRate; int memoryClockRate; int memoryBusWidth; size_t totalConstMem; int major; int minor; int multiProcessorCount; int l2CacheSize; int maxThreadsPerMultiProcessor; int computeMode; int clockInstructionRate; hipDeviceArch_t arch; int concurrentKernels; int pciBusID; int pciDeviceID; size_t maxSharedMemoryPerMultiProcessor; int isMultiGpuBoard; int canMapHostMemory; int gcnArch; }; struct hipMemcpy3DParms {}; enum hipDeviceAttribute_t { hipDeviceAttributeMaxThreadsPerBlock, hipDeviceAttributeMaxBlockDimX, hipDeviceAttributeMaxBlockDimY, hipDeviceAttributeMaxBlockDimZ, hipDeviceAttributeMaxGridDimX, hipDeviceAttributeMaxGridDimY, hipDeviceAttributeMaxGridDimZ, hipDeviceAttributeMaxSharedMemoryPerBlock, hipDeviceAttributeTotalConstantMemory, hipDeviceAttributeWarpSize, hipDeviceAttributeMaxRegistersPerBlock, hipDeviceAttributeClockRate, hipDeviceAttributeMemoryClockRate, hipDeviceAttributeMemoryBusWidth, hipDeviceAttributeMultiprocessorCount, hipDeviceAttributeComputeMode, hipDeviceAttributeL2CacheSize, hipDeviceAttributeMaxThreadsPerMultiProcessor, hipDeviceAttributeComputeCapabilityMajor, hipDeviceAttributeComputeCapabilityMinor, hipDeviceAttributeConcurrentKernels, hipDeviceAttributePciBusId, hipDeviceAttributePciDeviceId, hipDeviceAttributeMaxSharedMemoryPerMultiprocessor, hipDeviceAttributeIsMultiGpuBoard, hipDeviceAttributeIntegrated, }; struct hipPointerAttribute_t { hipDevice_t device; hipDeviceptr_t devicePointer; void* hostPointer; bool isManaged; int allocationFlags; }; #define hipStreamDefault 0 #define hipStreamNonBlocking 0 #define hipSharedMemBankSizeDefault 0 #define hipSharedMemBankSizeFourByte 0 #define hipSharedMemBankSizeEightByte 0 typedef void(*hipStreamCallback_t)(hipStream_t, hipError_t, void*); /* hipError_t hipDeviceReset(); hipError_t hipGetLastError(); hipError_t hipPeekAtLastError(); */ inline hipError_t hipMalloc(void** ptr, size_t size) { *ptr = hipcpu::detail::aligned_malloc(hipcpu::detail::default_alignment, size); if(*ptr == nullptr) return hipErrorMemoryAllocation; return hipSuccess; } //hipError_t hipMallocPitch(void** ptr, size_t* pitch, size_t width, size_t height); //hipError_t hipMalloc3D(hipPitchedPtr* pitchedDevPtr, hipExtent extent); inline hipError_t hipFree(void* ptr) { hipcpu::detail::aligned_free(ptr); return hipSuccess; } inline hipError_t hipMallocHost(void** ptr, size_t size) { return hipMalloc(ptr, size); } #define hipMemAttachGlobal 0 #define hipMemAttachHost 1 template<class T> inline hipError_t hipMallocManaged(T** ptr, size_t size, unsigned flags = hipMemAttachGlobal) { return hipMalloc(reinterpret_cast<void**>(ptr), size); } inline hipError_t hipHostAlloc(void** ptr, size_t size, unsigned int flags) { return hipMalloc(ptr, size); } inline hipError_t hipHostMalloc(void** ptr, size_t size, unsigned int flags) { return hipMalloc(ptr, size); } /* hipError_t hipMallocArray(hipArray** array, const hipChannelFormatDesc* desc, size_t width, size_t height, unsigned int flags); hipError_t hipMalloc3DArray(hipArray** array, const struct hipChannelFormatDesc* desc, struct hipExtent extent, unsigned int flags); hipError_t hipFreeArray(hipArray* array); hipError_t hipHostGetDevicePointer(void** devPtr, void* hostPtr, unsigned int flags); hipError_t hipHostGetFlags(unsigned int* flagsPtr, void* hostPtr); hipError_t hipHostRegister(void* ptr, size_t size, unsigned int flags); hipError_t hipHostUnregister(void* ptr);*/ inline hipError_t hipFreeHost(void* ptr) { return hipFree(ptr); } inline hipError_t hipHostFree(void* ptr) { return hipFree(ptr); } inline hipError_t hipSetDevice(int device) { if(device != 0) return hipErrorInvalidDevice; _hipcpu_runtime.set_device(device); return hipSuccess; } //hipError_t hipChooseDevice(int* device, const hipDeviceProp_t* prop); inline hipError_t hipStreamCreate(hipStream_t* stream) { *stream = _hipcpu_runtime.create_blocking_stream(); return hipSuccess; } //TODO Make sure semantics are correct for all allowed values of flags inline hipError_t hipStreamCreateWithFlags(hipStream_t* stream, unsigned int flags) { if(flags == hipStreamDefault) return hipStreamCreate(stream); else if (flags == hipStreamNonBlocking) { *stream = _hipcpu_runtime.create_async_stream(); return hipSuccess; } return hipErrorInvalidValue; } inline hipError_t hipStreamSynchronize(hipStream_t stream) { _hipcpu_runtime.streams().get(stream)->wait(); return hipSuccess; } inline hipError_t hipStreamDestroy(hipStream_t stream) { _hipcpu_runtime.destroy_stream(stream); return hipSuccess; } //TODO Make sure semantics are correct for all allowed values of flags inline hipError_t hipStreamWaitEvent(hipStream_t stream, hipEvent_t event, unsigned int flags) { std::shared_ptr<hipcpu::event> evt = _hipcpu_runtime.events().get_shared(event); _hipcpu_runtime.submit_operation([evt](){ // TODO store error code evt->wait(); }, stream); return hipSuccess; } inline hipError_t hipStreamQuery(hipStream_t stream) { hipcpu::stream* s = _hipcpu_runtime.streams().get(stream); if(s->is_idle()) return hipSuccess; return hipErrorNotReady; } //TODO Make sure semantics are correct for all allowed values of flags inline hipError_t hipStreamAddCallback(hipStream_t stream, hipStreamCallback_t callback, void *userData, unsigned int flags) { _hipcpu_runtime.submit_operation([stream, callback, userData](){ // TODO guarantee correct error propagation callback(stream, hipSuccess, userData); }, stream); return hipSuccess; } inline hipError_t hipMemcpyAsync(void* dst, const void* src, size_t sizeBytes, hipMemcpyKind copyKind, hipStream_t stream = 0) { if(!_hipcpu_runtime.streams().is_valid(stream)) return hipErrorInvalidValue; _hipcpu_runtime.submit_operation([=](){ memcpy(dst, src, sizeBytes); }, stream); return hipSuccess; } inline hipError_t hipMemcpy(void* dst, const void* src, size_t sizeBytes, hipMemcpyKind copyKind) { hipMemcpyAsync(dst, src, sizeBytes, copyKind, 0); _hipcpu_runtime.streams().get(0)->wait(); return hipSuccess; } inline hipError_t hipMemcpyHtoD(hipDeviceptr_t dst, void* src, size_t size) { return hipMemcpy(dst, src, size, hipMemcpyHostToDevice); } inline hipError_t hipMemcpyDtoH(void* dst, hipDeviceptr_t src, size_t size) { return hipMemcpy(dst, src, size, hipMemcpyDeviceToHost); } inline hipError_t hipMemcpyDtoD(hipDeviceptr_t dst, hipDeviceptr_t src, size_t size) { return hipMemcpy(dst, src, size, hipMemcpyDeviceToDevice); } inline hipError_t hipMemcpyHtoDAsync(hipDeviceptr_t dst, void* src, size_t size, hipStream_t stream) { return hipMemcpyAsync(dst, src, size, hipMemcpyHostToDevice, stream); } inline hipError_t hipMemcpyDtoHAsync(void* dst, hipDeviceptr_t src, size_t size, hipStream_t stream) { return hipMemcpyAsync(dst, src, size, hipMemcpyDeviceToHost, stream); } inline hipError_t hipMemcpyDtoDAsync(hipDeviceptr_t dst, hipDeviceptr_t src, size_t size, hipStream_t stream) { return hipMemcpyAsync(dst, src, size, hipMemcpyDeviceToDevice, stream); } inline hipError_t hipMemcpyToSymbolAsync(const void* symbol, const void* src, size_t sizeBytes, size_t offset, hipMemcpyKind copyType, hipStream_t stream = 0) { char* base_ptr = static_cast<char*>(const_cast<void*>(symbol)); void* ptr = static_cast<void*>(base_ptr + offset); return hipMemcpyAsync(ptr, src, sizeBytes, copyType, stream); } inline hipError_t hipMemcpyFromSymbolAsync(void* dst, const void* symbolName, size_t sizeBytes, size_t offset, hipMemcpyKind kind, hipStream_t stream = 0) { const void* ptr = static_cast<const void*>(static_cast<const char*>(symbolName)+offset); return hipMemcpyAsync(dst, ptr, sizeBytes, kind, stream); } inline hipError_t hipMemcpyToSymbol(const void* symbol, const void* src, size_t sizeBytes, size_t offset = 0, hipMemcpyKind copyType = hipMemcpyHostToDevice) { hipError_t err = hipMemcpyToSymbolAsync(symbol, src, sizeBytes, offset, copyType, 0); if(err != hipSuccess) return err; _hipcpu_runtime.streams().get(0)->wait(); return err; } inline hipError_t hipMemcpyFromSymbol(void *dst, const void *symbolName, size_t sizeBytes, size_t offset = 0, hipMemcpyKind kind = hipMemcpyDeviceToHost) { hipError_t err = hipMemcpyFromSymbolAsync(dst, symbolName, sizeBytes, offset, kind, 0); if(err != hipSuccess) return err; _hipcpu_runtime.streams().get(0)->wait(); return err; } hipError_t hipMemcpy3D(const struct hipMemcpy3DParms *p); inline hipError_t hipMemcpy2DAsync(void* dst, size_t dpitch, const void* src, size_t spitch, size_t width, size_t height, hipMemcpyKind kind, hipStream_t stream) { if(!_hipcpu_runtime.streams().is_valid(stream)) return hipErrorInvalidValue; _hipcpu_runtime.submit_operation([=](){ for(size_t row = 0; row < height; ++row) { void* row_dst_begin = reinterpret_cast<char*>(dst) + row * dpitch; const void* row_src_begin = reinterpret_cast<const char*>(src) + row * spitch; memcpy(row_dst_begin, row_src_begin, width); } }, stream); return hipSuccess; } inline hipError_t hipMemcpy2D(void* dst, size_t dpitch, const void* src, size_t spitch, size_t width, size_t height, hipMemcpyKind kind) { hipError_t err = hipMemcpy2DAsync(dst, dpitch, src, spitch, width, height, kind, 0); if(err != hipSuccess) return err; _hipcpu_runtime.streams().get(0)->wait(); return err; } hipError_t hipMemcpy2DToArray(hipArray* dst, size_t wOffset, size_t hOffset, const void* src, size_t spitch, size_t width, size_t height, hipMemcpyKind kind); hipError_t hipMemcpyToArray(hipArray* dst, size_t wOffset, size_t hOffset, const void* src, size_t count, hipMemcpyKind kind); hipError_t hipMemcpyFromArray(void* dst, hipArray_const_t srcArray, size_t wOffset, size_t hOffset, size_t count, hipMemcpyKind kind); hipError_t hipMemcpyAtoH(void* dst, hipArray* srcArray, size_t srcOffset, size_t count); hipError_t hipMemcpyHtoA(hipArray* dstArray, size_t dstOffset, const void* srcHost, size_t count); inline hipError_t hipDeviceSynchronize() { _hipcpu_runtime.streams().for_each([](hipcpu::stream* s){ s->wait(); }); return hipSuccess; } hipError_t hipDeviceGetCacheConfig(hipFuncCache_t* pCacheConfig); const char* hipGetErrorString(hipError_t error); const char* hipGetErrorName(hipError_t error); inline hipError_t hipGetDeviceCount(int* count) { *count = 1; return hipSuccess; } inline hipError_t hipGetDevice(int* device) { *device = 0; return hipSuccess; } /* hipError_t hipIpcCloseMemHandle(void* devPtr); hipError_t hipIpcGetEventHandle(hipIpcEventHandle_t* handle, hipEvent_t event); hipError_t hipIpcGetMemHandle(hipIpcMemHandle_t* handle, void* devPtr); hipError_t hipIpcOpenEventHandle(hipEvent_t* event, hipIpcEventHandle_t handle); hipError_t hipIpcOpenMemHandle(void** devPtr, hipIpcMemHandle_t handle, unsigned int flags); */ inline hipError_t hipMemsetAsync(void* devPtr, int value, size_t count, hipStream_t stream = 0) { if(!_hipcpu_runtime.streams().is_valid(stream)) return hipErrorInvalidValue; _hipcpu_runtime.submit_operation([=](){ memset(devPtr, value, count); }, stream); return hipSuccess; } inline hipError_t hipMemset(void* devPtr, int value, size_t count) { hipError_t err = hipMemsetAsync(devPtr, value, count, 0); if(err != hipSuccess) return err; _hipcpu_runtime.streams().get(0)->wait(); return hipSuccess; } inline hipError_t hipMemsetD8(hipDeviceptr_t dest, unsigned char value, size_t sizeBytes) { return hipMemset(dest, value, sizeBytes); } /* hipError_t hipMemset2D(void* dst, size_t pitch, int value, size_t width, size_t height); hipError_t hipMemset2DAsync(void* dst, size_t pitch, int value, size_t width, size_t height, hipStream_t stream = 0); hipError_t hipMemset3D(hipPitchedPtr pitchedDevPtr, int value, hipExtent extent ); hipError_t hipMemset3DAsync(hipPitchedPtr pitchedDevPtr, int value, hipExtent extent, hipStream_t stream = 0); */ inline hipError_t hipGetDeviceProperties(hipDeviceProp_t* p_prop, int device) { if(device != 0) return hipErrorInvalidDevice; std::string device_name = "hipCPU OpenMP host device"; int max_dim = std::numeric_limits<int>::max(); strncpy(p_prop->name, device_name.c_str(), 256); // TODO: Find available memory p_prop->totalGlobalMem = std::numeric_limits<size_t>::max(); p_prop->sharedMemPerBlock = _hipcpu_runtime.dev().get_max_shared_memory(); p_prop->regsPerBlock = std::numeric_limits<int>::max(); p_prop->warpSize = 1; p_prop->maxThreadsPerBlock = _hipcpu_runtime.dev().get_max_threads(); p_prop->maxGridSize[0] = max_dim; p_prop->maxGridSize[1] = max_dim; p_prop->maxGridSize[2] = max_dim; p_prop->maxGridSize[0] = max_dim; p_prop->maxGridSize[1] = max_dim; p_prop->maxGridSize[2] = max_dim; // TODO: Find actual value p_prop->clockRate = 1; p_prop->memoryClockRate = 1; p_prop->memoryBusWidth = 1; p_prop->totalConstMem = std::numeric_limits<std::size_t>::max(); p_prop->major = 1; p_prop->minor = 0; p_prop->multiProcessorCount = _hipcpu_runtime.dev().get_num_compute_units(); // TODO: Find actual value p_prop->l2CacheSize = std::numeric_limits<int>::max(); p_prop->maxThreadsPerMultiProcessor = p_prop->maxThreadsPerBlock; p_prop->computeMode = 0; p_prop->clockInstructionRate = p_prop->clockRate; hipDeviceArch_t arch; arch.hasGlobalInt32Atomics = 1; arch.hasGlobalFloatAtomicExch = 1; arch.hasSharedInt32Atomics = 1; arch.hasSharedFloatAtomicExch = 1; arch.hasFloatAtomicAdd = 1; arch.hasGlobalInt64Atomics = 1; arch.hasSharedInt64Atomics = 1; arch.hasDoubles = 1; arch.hasWarpVote = 0; arch.hasWarpBallot = 0; arch.hasWarpShuffle = 0; arch.hasFunnelShift = 0; arch.hasThreadFenceSystem = 1; arch.hasSyncThreadsExt = 1; arch.hasSurfaceFuncs = 0; arch.has3dGrid = 1; arch.hasDynamicParallelism = 0; p_prop->arch = arch; p_prop->concurrentKernels = 1; p_prop->pciBusID = 0; p_prop->pciDeviceID = 0; p_prop->maxSharedMemoryPerMultiProcessor = p_prop->sharedMemPerBlock; p_prop->isMultiGpuBoard = 0; p_prop->canMapHostMemory = 1; p_prop->gcnArch = 0; return hipSuccess; } hipError_t hipDeviceGetAttribute(int* pi, hipDeviceAttribute_t attr, int device); hipError_t hipOccupancyMaxActiveBlocksPerMultiprocessor(int* numBlocks, const void* func, int blockSize, size_t dynamicSMemSize); hipError_t hipPointerGetAttributes(hipPointerAttribute_t* attributes, void* ptr); hipError_t hipMemGetInfo(size_t* free, size_t* total); inline hipError_t hipEventCreate(hipEvent_t* event) { *event = _hipcpu_runtime.create_event(); return hipSuccess; } inline hipError_t hipEventRecord(hipEvent_t event, hipStream_t stream = 0) { if(!_hipcpu_runtime.events().is_valid(event) || !_hipcpu_runtime.streams().is_valid(stream)) return hipErrorInvalidValue; std::shared_ptr<hipcpu::event> evt = _hipcpu_runtime.events().get_shared(event); _hipcpu_runtime.submit_operation([evt](){ evt->mark_as_finished(); }, stream); return hipSuccess; } inline hipError_t hipEventSynchronize(hipEvent_t event) { if(!_hipcpu_runtime.events().is_valid(event)) return hipErrorInvalidValue; hipcpu::event* evt = _hipcpu_runtime.events().get(event); evt->wait(); if(evt->is_complete()) return hipSuccess; return hipErrorUnknown; } hipError_t hipEventElapsedTime(float* ms, hipEvent_t start, hipEvent_t stop); inline hipError_t hipEventDestroy(hipEvent_t event) { if(!_hipcpu_runtime.events().is_valid(event)) return hipErrorInvalidValue; _hipcpu_runtime.destroy_event(event); return hipSuccess; } hipError_t hipDriverGetVersion(int* driverVersion); inline hipError_t hipRuntimeGetVersion(int* runtimeVersion) { *runtimeVersion = 99999; return hipSuccess; } hipError_t hipDeviceCanAccessPeer(int* canAccessPeer, int device, int peerDevice); hipError_t hipDeviceDisablePeerAccess(int peerDevice); hipError_t hipDeviceEnablePeerAccess(int peerDevice, unsigned int flags); hipError_t hipCtxDisablePeerAccess(hipCtx_t peerCtx); hipError_t hipCtxEnablePeerAccess(hipCtx_t peerCtx, unsigned int flags); hipError_t hipDevicePrimaryCtxGetState(hipDevice_t dev, unsigned int* flags, int* active); hipError_t hipDevicePrimaryCtxRelease(hipDevice_t dev); hipError_t hipDevicePrimaryCtxRetain(hipCtx_t* pctx, hipDevice_t dev); hipError_t hipDevicePrimaryCtxReset(hipDevice_t dev); hipError_t hipDevicePrimaryCtxSetFlags(hipDevice_t dev, unsigned int flags); hipError_t hipMemGetAddressRange(hipDeviceptr_t* pbase, size_t* psize, hipDeviceptr_t dptr); hipError_t hipMemcpyPeer(void* dst, int dstDevice, const void* src, int srcDevice, size_t count); hipError_t hipMemcpyPeerAsync(void* dst, int dstDevice, const void* src, int srcDevice, size_t count, hipStream_t stream = 0); // Profile APIs: hipError_t hipProfilerStart(); hipError_t hipProfilerStop(); hipError_t hipSetDeviceFlags(unsigned int flags); hipError_t hipEventCreateWithFlags(hipEvent_t* event, unsigned int flags); inline hipError_t hipEventQuery(hipEvent_t event) { if(!_hipcpu_runtime.events().is_valid(event)) return hipErrorInvalidValue; bool is_ready = _hipcpu_runtime.events().get(event)->is_complete(); if(!is_ready) return hipErrorNotReady; return hipSuccess; } /* hipError_t hipCtxCreate(hipCtx_t* ctx, unsigned int flags, hipDevice_t device); hipError_t hipCtxDestroy(hipCtx_t ctx); hipError_t hipCtxPopCurrent(hipCtx_t* ctx); hipError_t hipCtxPushCurrent(hipCtx_t ctx); hipError_t hipCtxSetCurrent(hipCtx_t ctx); hipError_t hipCtxGetCurrent(hipCtx_t* ctx); hipError_t hipCtxGetDevice(hipDevice_t* device); hipError_t hipCtxGetApiVersion(hipCtx_t ctx, int* apiVersion); hipError_t hipCtxGetCacheConfig(hipFuncCache* cacheConfig); hipError_t hipCtxSetCacheConfig(hipFuncCache cacheConfig); hipError_t hipCtxSetSharedMemConfig(hipSharedMemConfig config); hipError_t hipCtxGetSharedMemConfig(hipSharedMemConfig* pConfig); hipError_t hipCtxSynchronize(void); hipError_t hipCtxGetFlags(unsigned int* flags); hipError_t hipCtxDetach(hipCtx_t ctx); hipError_t hipDeviceGet(hipDevice_t* device, int ordinal); hipError_t hipDeviceComputeCapability(int* major, int* minor, hipDevice_t device); hipError_t hipDeviceGetName(char* name, int len, hipDevice_t device); hipError_t hipDeviceGetPCIBusId(char* pciBusId, int len, hipDevice_t device); hipError_t hipDeviceGetByPCIBusId(int* device, const char* pciBusId); hipError_t hipDeviceGetSharedMemConfig(hipSharedMemConfig* config); hipError_t hipDeviceSetSharedMemConfig(hipSharedMemConfig config); hipError_t hipDeviceGetLimit(size_t* pValue, hipLimit_t limit); hipError_t hipDeviceTotalMem(size_t* bytes, hipDevice_t device); hipError_t hipModuleLoad(hipModule_t* module, const char* fname); hipError_t hipModuleUnload(hipModule_t hmod); hipError_t hipModuleGetFunction(hipFunction_t* function, hipModule_t module, const char* kname); hipError_t hipFuncGetAttributes(hipFuncAttributes* attr, const void* func); hipError_t hipModuleGetGlobal(hipDeviceptr_t* dptr, size_t* bytes, hipModule_t hmod, const char* name); hipError_t hipModuleLoadData(hipModule_t* module, const void* image); hipError_t hipModuleLoadDataEx(hipModule_t* module, const void* image, unsigned int numOptions, hipJitOption* options, void** optionValues); hipError_t hipModuleLaunchKernel(hipFunction_t f, unsigned int gridDimX, unsigned int gridDimY, unsigned int gridDimZ, unsigned int blockDimX, unsigned int blockDimY, unsigned int blockDimZ, unsigned int sharedMemBytes, hipStream_t stream, void** kernelParams, void** extra); hipError_t hipFuncSetCacheConfig(const void* func, hipFuncCache_t cacheConfig); */ template <class T> hipError_t hipOccupancyMaxPotentialBlockSize(int* minGridSize, int* blockSize, T func, size_t dynamicSMemSize = 0, int blockSizeLimit = 0, unsigned int flags = 0); /* template <class T, int dim, enum hipTextureReadMode readMode> hipError_t hipBindTexture(size_t* offset, const struct texture<T, dim, readMode>& tex, const void* devPtr, size_t size = UINT_MAX); template <class T, int dim, enum hipTextureReadMode readMode> hipError_t hipBindTexture(size_t* offset, struct texture<T, dim, readMode>& tex, const void* devPtr, const struct hipChannelFormatDesc& desc, size_t size = UINT_MAX); template <class T, int dim, enum hipTextureReadMode readMode> hipError_t hipUnbindTexture(struct texture<T, dim, readMode>* tex); hipError_t hipBindTexture(size_t* offset, textureReference* tex, const void* devPtr, const hipChannelFormatDesc* desc, size_t size = UINT_MAX); template <class T, int dim, enum hipTextureReadMode readMode> hipError_t hipBindTextureToArray(struct texture<T, dim, readMode>& tex, hipArray_const_t array, const struct hipChannelFormatDesc& desc); template <class T, int dim, enum hipTextureReadMode readMode> hipError_t hipBindTextureToArray(struct texture<T, dim, readMode> *tex, hipArray_const_t array, const struct hipChannelFormatDesc* desc); template <class T, int dim, enum hipTextureReadMode readMode> hipError_t hipBindTextureToArray(struct texture<T, dim, readMode>& tex, hipArray_const_t array); template <class T> hipChannelFormatDesc hipCreateChannelDesc(); hipChannelFormatDesc hipCreateChannelDesc(int x, int y, int z, int w, hipChannelFormatKind f); hipError_t hipCreateTextureObject(hipTextureObject_t* pTexObject, const hipResourceDesc* pResDesc, const hipTextureDesc* pTexDesc, const hipResourceViewDesc* pResViewDesc); hipError_t hipDestroyTextureObject(hipTextureObject_t textureObject); hipError_t hipCreateSurfaceObject(hipSurfaceObject_t* pSurfObject, const hipResourceDesc* pResDesc); hipError_t hipDestroySurfaceObject(hipSurfaceObject_t surfaceObject); hipError_t hipGetTextureObjectResourceDesc(hipResourceDesc* pResDesc, hipTextureObject_t textureObject); hipError_t hipGetTextureAlignmentOffset(size_t* offset, const textureReference* texref); hipError_t hipGetChannelDesc(hipChannelFormatDesc* desc, hipArray_const_t array); */ #define HIPCPU_MAKE_VECTOR1(T, name) \ struct name {\ T x; \ }; #define HIPCPU_MAKE_VECTOR2(T, name) \ struct name {\ T x; \ T y; \ }; #define HIPCPU_MAKE_VECTOR3(T, name) \ struct name {\ T x; \ T y; \ T z; \ }; #define HIPCPU_MAKE_VECTOR4(T, name) \ struct name {\ T x; \ T y; \ T z; \ T w; \ }; #define HIPCPU_MAKE_VECTOR_TYPE(T, prefix) \ HIPCPU_MAKE_VECTOR1(T, prefix##1) \ HIPCPU_MAKE_VECTOR2(T, prefix##2) \ HIPCPU_MAKE_VECTOR3(T, prefix##3) \ HIPCPU_MAKE_VECTOR4(T, prefix##4) HIPCPU_MAKE_VECTOR_TYPE(signed char, char) HIPCPU_MAKE_VECTOR_TYPE(unsigned char, uchar) HIPCPU_MAKE_VECTOR_TYPE(short, short) HIPCPU_MAKE_VECTOR_TYPE(unsigned short, ushort) HIPCPU_MAKE_VECTOR_TYPE(int, int) HIPCPU_MAKE_VECTOR_TYPE(unsigned, uint) HIPCPU_MAKE_VECTOR_TYPE(long, long) HIPCPU_MAKE_VECTOR_TYPE(unsigned long, ulong) HIPCPU_MAKE_VECTOR_TYPE(long long, longlong) HIPCPU_MAKE_VECTOR_TYPE(unsigned long long, ulonglong) HIPCPU_MAKE_VECTOR_TYPE(float, float) HIPCPU_MAKE_VECTOR_TYPE(double, double) __device__ inline void __syncthreads() { #pragma omp barrier } __device__ inline float __fadd_rd(float x, float y) { return x+y; } __device__ inline float __fadd_rn(float x, float y) { return x+y; } __device__ inline float __fadd_ru(float x, float y) { return x+y; } __device__ inline float __fadd_rz(float x, float y) { return x+y; } __device__ inline float __fdiv_rd(float x, float y) { return x/y; } __device__ inline float __fdiv_rn(float x, float y) { return x/y; } __device__ inline float __fdiv_ru(float x, float y) { return x/y; } __device__ inline float __fdiv_rz(float x, float y) { return x/y; } __device__ inline float __fdividef(float x, float y) { return x/y; } __device__ inline float __fmaf_rd(float x, float y, float z) { return std::fma(x,y,z); } __device__ inline float __fmaf_rn(float x, float y, float z) { return std::fma(x,y,z); } __device__ inline float __fmaf_ru(float x, float y, float z) { return std::fma(x,y,z); } __device__ inline float __fmaf_rz(float x, float y, float z) { return std::fma(x,y,z); } __device__ inline float __fmul_rd(float x, float y) { return x*y; } __device__ inline float __fmul_rn(float x, float y) { return x*y; } __device__ inline float __fmul_ru(float x, float y) { return x*y; } __device__ inline float __fmul_rz(float x, float y) { return x*y; } __device__ inline float __frcp_rd(float x) { return 1.f/x; } __device__ inline float __frcp_rn(float x) { return 1.f/x; } __device__ inline float __frcp_ru(float x) { return 1.f/x; } __device__ inline float __frcp_rz(float x) { return 1.f/x; } __device__ inline float __frsqrt_rn(float x) { return 1.f/std::sqrt(x); } __device__ inline float __fsqrt_rd(float x) { return std::sqrt(x); } __device__ inline float __fsqrt_rn(float x) { return std::sqrt(x); } __device__ inline float __fsqrt_ru(float x) { return std::sqrt(x); } __device__ inline float __fsqrt_rz(float x) { return std::sqrt(x); } __device__ inline float __fsub_rd(float x, float y) { return x-y; } __device__ inline float __fsub_rn(float x, float y) { return x-y; } __device__ inline float __fsub_ru(float x, float y) { return x-y; } __device__ inline float __fsub_rz(float x, float y) { return x-y; } __device__ inline double __dadd_rd(double x, double y) { return x+y; } __device__ inline double __dadd_rn(double x, double y) { return x+y; } __device__ inline double __dadd_ru(double x, double y) { return x+y; } __device__ inline double __dadd_rz(double x, double y) { return x+y; } __device__ inline double __ddiv_rd(double x, double y) { return x/y; } __device__ inline double __ddiv_rn(double x, double y) { return x/y; } __device__ inline double __ddiv_ru(double x, double y) { return x/y; } __device__ inline double __ddiv_rz(double x, double y) { return x/y; } __device__ inline double __dmul_rd(double x, double y) { return x*y; } __device__ inline double __dmul_rn(double x, double y) { return x*y; } __device__ inline double __dmul_ru(double x, double y) { return x*y; } __device__ inline double __dmul_rz(double x, double y) { return x*y; } __device__ inline double __drcp_rd(double x) { return 1./x; } __device__ inline double __drcp_rn(double x) { return 1./x; } __device__ inline double __drcp_ru(double x) { return 1./x; } __device__ inline double __drcp_rz(double x) { return 1./x; } __device__ inline double __dsqrt_rd(double x) { return std::sqrt(x); } __device__ inline double __dsqrt_rn(double x) { return std::sqrt(x); } __device__ inline double __dsqrt_ru(double x) { return std::sqrt(x); } __device__ inline double __dsqrt_rz(double x) { return std::sqrt(x); } __device__ inline double __dsub_rd(double x, double y) { return x - y; } __device__ inline double __dsub_rn(double x, double y) { return x - y; } __device__ inline double __dsub_ru(double x, double y) { return x - y; } __device__ inline double __dsub_rz(double x, double y) { return x - y; } __device__ inline double __fma_rd(double x, double y, double z) { return std::fma(x,y,z); } __device__ inline double __fma_rn(double x, double y, double z) { return std::fma(x,y,z); } __device__ inline double __fma_ru(double x, double y, double z) { return std::fma(x,y,z); } __device__ inline double __fma_rz(double x, double y, double z) { return std::fma(x,y,z); } #endif // HIPCPU_RUNTIME_H
linalg.h
/** * @file linalg.h * @author Nader KHAMMASSI - nader.khammassi@gmail.com * @date 02-10-15 * @brief linear algebra utils */ #ifndef QX_LINALG_H #define QX_LINALG_H // #define __BUILTIN_LINALG_ // #ifndef __BUILTIN_LINALG__ // #include <boost/numeric/ublas/vector.hpp> // #include <boost/numeric/ublas/matrix.hpp> // #include <boost/numeric/ublas/io.hpp> // #endif #include "qx/core/matrix.h" #include <iostream> #include <cstdint> #include <vector> #include <bitset> #include "qx/xpu/aligned_memory_allocator.h" #include "qx/xpu/vector.h" #include "qx/xpu/complex.h" #define println(x) std::cout << x << std::endl #define print(x) std::cout << x #define MAX_QB_N 64 // #ifndef __BUILTIN_LINALG__ // using namespace boost::numeric; // #endif namespace qx { namespace linalg { //typedef std::complex<double> complex_t; typedef xpu::complex_d complex_t; // #ifndef __BUILTIN_LINALG__ // typedef ublas::vector<complex_t> cvector_t; // typedef ublas::matrix<complex_t> cmatrix_t; // typedef ublas::identity_matrix<complex_t> cidentity_t; // #else typedef std::vector<complex_t,xpu::aligned_memory_allocator<complex_t,64> > cvector_t; // typedef xpu::vector<complex_t,16> cvector_t; // typedef qx::linalg::matrix<complex_t> cmatrix_t; typedef qx::linalg::tiny_matrix<complex_t,2> cmatrix_t; typedef qx::linalg::identity_matrix<complex_t> cidentity_t; // #endif // __BUILTIN_LINALG__ typedef std::vector<std::pair<uint32_t,uint32_t> > perm_t; #ifdef __SSE__ void cmul(complex_t * x, complex_t * y, complex_t * z) { __m128d mx, my, xr_xi, yr_yr, xi_yi, yi_xr; __m128d a,b; mx = _mm_load_pd((double *)x); my = _mm_load_pd((double *)y); xr_xi = mx; yr_yr = _mm_shuffle_pd(my, my, 0); //_mm_broadcastsd_pd(my); xi_yi = _mm_shuffle_pd(mx, my, 3); // 11b = 3 yi_xr = _mm_shuffle_pd(my, mx, 1); // 01b = 1 _mm_shuffle_pd(mx, mx, 1); // print("(xr,xi) : "); dump_m128d(xr_xi); // print("(yr,yr) : "); dump_m128d(yr_yr); // print("(xi,yi) : "); dump_m128d(xi_yi); // print("(yi,xr) : "); dump_m128d(yi_xr); a = _mm_mul_pd(xr_xi,yr_yr); b = _mm_mul_pd(xi_yi,yi_xr); a = _mm_addsub_pd(a,b); // print(" result : "); dump_m128d(a); _mm_store_pd((double *)z,a); } #else void cmul(complex_t * x, complex_t * y, complex_t * z) { *z=(*x)*(*y); } #endif /** * \brief tensor product of vectors */ cvector_t tensor(cvector_t v1, cvector_t v2) { uint32_t n1 = v1.size(); uint32_t n2 = v2.size(); cvector_t res(n1*n2); #pragma omp parallel for for(int64_t n = 0; n < (int64_t)res.size(); ++n) res[n] = 0.0; for (uint32_t i=0; i<n1; ++i) for (uint32_t j=0; j<n2; ++j) res[i*n2+j] = v1[i]+v2[j]; return res; } /** * \brief tensor product of matrices */ qx::linalg::matrix<complex_t> tensor(qx::linalg::matrix<complex_t> m1, qx::linalg::matrix<complex_t> m2) { uint32_t rows_1 = m1.size1(); uint32_t cols_1 = m1.size2(); uint32_t rows_2 = m2.size1(); uint32_t cols_2 = m2.size2(); uint32_t rows = rows_1*rows_2; uint32_t cols = cols_1*cols_2; complex_t z(0.0, 0.0); qx::linalg::matrix<complex_t> m(rows,cols,z); for (uint32_t i=0; i<rows; ++i) for (uint32_t j=0; j<cols; ++j) m(i,j) = m1(i/rows_2, j/cols_2) * m2(i%rows_2, j%cols_2); return m; } /** * \brief tensor product of matrices (result in m) */ uint32_t tensor(qx::linalg::matrix<complex_t>& m1, qx::linalg::matrix<complex_t>& m2, qx::linalg::matrix<complex_t>& m) { uint32_t rows_1 = m1.size1(); uint32_t cols_1 = m1.size2(); uint32_t rows_2 = m2.size1(); uint32_t cols_2 = m2.size2(); uint32_t rows = rows_1*rows_2; uint32_t cols = cols_1*cols_2; // cmatrix_t m(rows,cols); if ((m.size1()!=rows) || (m.size2()==cols)) m.resize(rows, cols, false); for (uint32_t i=0; i<rows; ++i) for (uint32_t j=0; j<cols; ++j) m(i,j) = m1(i/rows_2, j/cols_2) * m2(i%rows_2, j%cols_2); return 0; } /** * \brief nth tensor */ /* cmatrix_t tensor(cmatrix_t m, uint32_t n) { cmatrix_t r(m); while (r.size1() < n) r = tensor(r,m); return r; } */ /** * \brief matrix vector product */ cvector_t mxv(qx::linalg::matrix<complex_t> m, cvector_t v) { // #ifdef __BUILTIN_LINALG__ uint32_t n = v.size(); cvector_t r(n); #pragma omp parallel for for (int64_t i=0; i<(int64_t)n; ++i) { complex_t c(0.0, 0.0); for (uint32_t j=0; j<n; ++j) c += m(i,j)*v[j]; r[i] = c; } return r; // #else // return ublas::prec_prod(m,v); // #endif // __BUILTIN_LINALG__ } /** * \brief matrix matrix product */ qx::linalg::matrix<complex_t> mxm(qx::linalg::matrix<complex_t> m1, qx::linalg::matrix<complex_t> m2) { // #ifdef __BUILTIN_LINALG__ complex_t z(0.0, 0.0); qx::linalg::matrix<complex_t> r(m1.size1(), m2.size2(),z); qx::linalg::mul(m1,m2,r); return r; // #else // return ublas::prec_prod(m1,m2); // #endif } /** * \brief matrix matrix product */ cmatrix_t mxm(cmatrix_t m1, cmatrix_t m2) { // #ifdef __BUILTIN_LINALG__ complex_t z(0.0, 0.0); cmatrix_t r; qx::linalg::mul(m1,m2,r); return r; // #else // return ublas::prec_prod(m1,m2); // #endif } /** * \brief verify if the matrices m1 and m2 are equals */ bool equals(qx::linalg::matrix<complex_t>& m1, qx::linalg::matrix<complex_t>& m2, double epsilon=10e-14) { if (m1.size1() != m2.size1()) return false; if (m1.size2() != m2.size2()) return false; for (std::size_t i=0; i<m1.size1(); ++i) for (std::size_t j=0; j<m1.size2(); ++j) { // double d = std::abs(std::norm(m1(i,j))-std::norm(m2(i,j))); double d = m1(i,j).norm()-m2(i,j).norm(); if (d>epsilon) return false; } return true; } inline uint32_t pow2(uint32_t x) { return (1 << x); } /** * inc */ std::bitset<MAX_QB_N> inc(std::bitset<MAX_QB_N> in) { for (size_t i=0; i<MAX_QB_N; ++i) { if ( in[i] == 0 ) { in[i] = 1; break; } in[i] = 0; // this entry was 1; set to zero and carry the 1 } return in; } /** * perms */ // std::vector<std::pair<uint32_t,uint32_t> > perm_t perms(uint32_t n, uint32_t c, uint32_t t) { uint32_t nn = (1 << n); uint32_t p1, p2; std::bitset<MAX_QB_N> b; //std::vector<std::pair<uint32_t,uint32_t> > perms; perm_t res; b.reset(); b.set(c); uint32_t bc = b.to_ulong(); while (bc < nn) { b.set(c); p1 = b.to_ulong(); b.flip(t); p2 = b.to_ulong(); if (p2>bc) res.push_back(std::pair<uint32_t,uint32_t>(p1, p2)); b.flip(t); b = inc(b); b.set(c); bc = b.to_ulong(); } //for (int i=0; i<res.size(); ++i) // println("(" << res[i].first << ", " << res[i].second << ")"); return res; } void perms(uint32_t n, uint32_t c, uint32_t t, cvector_t& amp) { uint32_t nn = (1 << n); uint32_t p1, p2; std::bitset<MAX_QB_N> b; // perm_t res; b.reset(); b.set(c); uint32_t bc = b.to_ulong(); while (bc < nn) { b.set(c); p1 = b.to_ulong(); b.flip(t); p2 = b.to_ulong(); if (p2>bc) { std::swap(amp[p1],amp[p2]); // println("__swap(" << std::bitset<16>(p1) << ", " << std::bitset<16>(p2) << ")"); } b.flip(t); b = inc(b); b.set(c); bc = b.to_ulong(); } //return res; } perm_t perms(uint32_t n, uint32_t c1, uint32_t c2, uint32_t t) { uint32_t nn = (1 << n); uint32_t p1, p2; std::bitset<MAX_QB_N> b; //std::vector<std::pair<uint32_t,uint32_t> > perms; perm_t res; b.reset(); b.set(c1); b.set(c2); uint32_t bc = b.to_ulong(); while (bc < nn) { b.set(c1); b.set(c2); p1 = b.to_ulong(); b.flip(t); p2 = b.to_ulong(); res.push_back(std::pair<uint32_t,uint32_t>(p1, p2)); b.flip(t); b = inc(b); b.set(c1); b.set(c2); bc = b.to_ulong(); } //for (int i=0; i<res.size(); ++i) // println("(" << res[i].first << ", " << res[i].second << ")"); return res; } /** * dump matrix */ void dump_matrix(cmatrix_t& m, bool complex_format=false) { for (uint32_t i=0; i<m.size1(); ++i) { println(""); for (uint32_t j=0; j<m.size1(); ++j) { if (complex_format) print(m(i,j) << " "); else print(m(i,j).re << " "); //else print(m(i,j).real() << " "); } } println(""); } } } #endif // QX_LINALG_H
inplace_broadcast_binary_operation.h
/* Copyright 2021 NVIDIA Corporation * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ #ifndef __NUMPY_INPLACE_BROADCAST_BINARY_OPERATION_H__ #define __NUMPY_INPLACE_BROADCAST_BINARY_OPERATION_H__ #include "point_task.h" namespace legate { namespace numpy { #if defined(LEGATE_USE_CUDA) && defined(__CUDACC__) template <int DIM, typename BinaryFunction, typename Args> __global__ void __launch_bounds__(THREADS_PER_BLOCK, MIN_CTAS_PER_SM) gpu_inplace_broadcast_binary_op(const Args args, const bool dense) { const size_t idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= args.volume) return; BinaryFunction func; if (dense) { args.inoutptr[idx] = func(args.inoutptr[idx], args.scalar); } else { const Legion::Point<DIM> point = args.pitches.unflatten(idx, args.rect.lo); args.inout[point] = func(args.inout[point], args.scalar); } } #endif // Base class for all Legate's binary operation tasks template <class Derived, class BinaryFunction> class InplaceBroadcastBinaryOperationTask : public PointTask<Derived> { private: using first_argument_type = typename BinaryFunction::first_argument_type; using second_argument_type = typename BinaryFunction::second_argument_type; using result_type = std::result_of_t<BinaryFunction(first_argument_type, second_argument_type)>; public: static const int TASK_ID = task_id<BinaryFunction::op_code, NUMPY_INPLACE_BROADCAST_VARIANT_OFFSET, result_type, first_argument_type, second_argument_type>; // inout_region = op(inout_region, scalar) static const int REGIONS = 1; template <int N> struct DeserializedArgs { Legion::Rect<N> rect; AccessorRW<first_argument_type, N> inout; Pitches<N - 1> pitches; size_t volume; first_argument_type* inoutptr; second_argument_type scalar; bool deserialize(LegateDeserializer& derez, const Legion::Task* task, const std::vector<Legion::PhysicalRegion>& regions) { rect = NumPyProjectionFunctor::unpack_shape<N>(task, derez); inout = derez.unpack_accessor_RW<first_argument_type, N>(regions[0], rect); scalar = task->futures[0].get_result<second_argument_type>(true /*silence warnings*/); volume = pitches.flatten(rect); #ifndef LEGION_BOUNDS_CHECKS // Check to see if this is dense or not return inout.accessor.is_dense_row_major(rect) && (inoutptr = inout.ptr(rect)); #else // No dense execution if we're doing bounds checks return false; #endif } }; template <int DIM> static void dispatch_cpu(const Legion::Task* task, const std::vector<Legion::PhysicalRegion>& regions, LegateDeserializer& derez) { DeserializedArgs<DIM> args; const bool dense = args.deserialize(derez, task, regions); if (args.volume == 0) return; BinaryFunction func; if (dense) { for (size_t idx = 0; idx < args.volume; ++idx) args.inoutptr[idx] = func(args.inoutptr[idx], args.scalar); } else { const Scalar<second_argument_type, DIM> scalar(args.scalar); CPULoop<DIM>::binary_inplace(func, args.inout, scalar, args.rect); } } #ifdef LEGATE_USE_OPENMP template <int DIM> static void dispatch_omp(const Legion::Task* task, const std::vector<Legion::PhysicalRegion>& regions, LegateDeserializer& derez) { DeserializedArgs<DIM> args; const bool dense = args.deserialize(derez, task, regions); if (args.volume == 0) return; BinaryFunction func; if (dense) { #pragma omp parallel for schedule(static) for (size_t idx = 0; idx < args.volume; ++idx) args.inoutptr[idx] = func(args.inoutptr[idx], args.scalar); } else { const Scalar<second_argument_type, DIM> scalar(args.scalar); OMPLoop<DIM>::binary_inplace(func, args.inout, scalar, args.rect); } } #endif #if defined(LEGATE_USE_CUDA) && defined(__CUDACC__) template <int DIM> static void dispatch_gpu(const Legion::Task* task, const std::vector<Legion::PhysicalRegion>& regions, LegateDeserializer& derez) { DeserializedArgs<DIM> args; const bool dense = args.deserialize(derez, task, regions); if (args.volume == 0) return; const size_t blocks = (args.volume + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK; gpu_inplace_broadcast_binary_op<DIM, BinaryFunction, DeserializedArgs<DIM>> <<<blocks, THREADS_PER_BLOCK>>>(args, dense); } #elif defined(LEGATE_USE_CUDA) template <int DIM> static void dispatch_gpu(const Legion::Task* task, const std::vector<Legion::PhysicalRegion>& regions, LegateDeserializer& derez); #endif }; } // namespace numpy } // namespace legate #endif // __NUMPY_INPLACE_BROADCAST_BINARY_OPERATION_H__
GB_unop__identity_int8_fp64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__identity_int8_fp64) // op(A') function: GB (_unop_tran__identity_int8_fp64) // C type: int8_t // A type: double // cast: int8_t cij = GB_cast_to_int8_t ((double) (aij)) // unaryop: cij = aij #define GB_ATYPE \ double #define GB_CTYPE \ int8_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ double aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ int8_t z = GB_cast_to_int8_t ((double) (aij)) ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ double aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ int8_t z = GB_cast_to_int8_t ((double) (aij)) ; \ Cx [pC] = z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_INT8 || GxB_NO_FP64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__identity_int8_fp64) ( int8_t *Cx, // Cx and Ax may be aliased const double *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { double aij = Ax [p] ; int8_t z = GB_cast_to_int8_t ((double) (aij)) ; Cx [p] = z ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; double aij = Ax [p] ; int8_t z = GB_cast_to_int8_t ((double) (aij)) ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__identity_int8_fp64) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
distributed_sort.c
#include <stdio.h> #include <stdlib.h> #include <time.h> #include <math.h> #define AROW 3 #define ACOL 10000 #define MAX_VALUE 10 int compare_ints (const void *a, const void *b) { const int *da = (const int *) a; const int *db = (const int *) b; return (*da > *db) - (*da < *db); } int* sort(int* row) { qsort(row, ACOL, sizeof(int), compare_ints); return row; } void merge(int m, int n, int k, int A[], int B[], int C[]) { int i = 0, j = 0, p; while (i < m && j < n) { if (A[i] <= B[j]) { C[k] = A[i]; i++; } else { C[k] = B[j]; j++; } k++; } if (i < m) { for (p = i; p < m; p++) { C[k] = A[p]; k++; } } else { for (p = j; p < n; p++) { C[k] = B[p]; k++; } } } int* sort_full(int** a) { int* C = (int*) malloc(sizeof(int) * AROW * ACOL); int N = AROW * ACOL; int i; for (i = 0; i < AROW; i++) { merge(i * ACOL, ACOL, i * ACOL, C, a[i], C); } return C; } int main(int argc, char *argv[]) { int a[AROW][ACOL]; int i, j; struct timeval start, end; srand(time(NULL)); for (i = 0; i < AROW; i++) { for (j = 0; j < ACOL; j++) { a[i][j] = rand() % MAX_VALUE; } } gettimeofday(&start, NULL); #pragma omp parallel for for (i = 0; i < AROW; i++) { sort(a[i]); } //sort_full(a); gettimeofday(&end, NULL); //for (i = 0; i < AROW; i++) { // for (j = 0; j < ACOL; j++) { // printf("%3d ", a[i][j]); // } // printf("\n"); //} printf("\n"); long duration_ms = ((end.tv_sec - start.tv_sec) * 1000 + (end.tv_usec - start.tv_usec) / 1000.0); printf("Time: %ld ms\n", duration_ms); return 0; }
GB_unaryop__ainv_fp32_int8.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__ainv_fp32_int8 // op(A') function: GB_tran__ainv_fp32_int8 // C type: float // A type: int8_t // cast: float cij = (float) aij // unaryop: cij = -aij #define GB_ATYPE \ int8_t #define GB_CTYPE \ float // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int8_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = -x ; // casting #define GB_CASTING(z, aij) \ float z = (float) aij ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (z, aij) ; \ GB_OP (GB_CX (pC), z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_AINV || GxB_NO_FP32 || GxB_NO_INT8) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__ainv_fp32_int8 ( float *Cx, // Cx and Ax may be aliased int8_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__ainv_fp32_int8 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
allCFG.c
int g1; int g2; int g3; int g4; #pragma omp threadprivate(g3) int foo1(int aFoo, int, int cFoo); int foo(int aFoo, int, int cFoo); void testFoo() { g4++; auto int aFoo; #pragma omp flush im1: aFoo = 17; if (aFoo == 0) { l1Foo: aFoo = 10; if (aFoo != 10) { im2: foo(1, 1, 1); #pragma omp flush im3: goto l1Foo; } } if (aFoo < 10) { lFoo: return; } aFoo = 20; g4 = 1; goto lFoo; } int my2(int a) { return 1; // im4: return a; } int my(int a) { im4: return a; } int foo(int aFoo, int bFoo, int cFoo) { int tempo; tempo = 0; tempo += 1; if (aFoo > 10) { testFoo(); } if (bFoo > 20) { return aFoo + bFoo + cFoo; } g1 = 10; g2 = 100 + g1; int a; lNotTestFoo: return my(18); } void newFunc2() { int c = 30; int d = 40; if (d == 40) { return; } else if (d > 40) { // my2(10); c = 60; newFoo(0, 1, 2); d = 50; } else { my2(10); c = 60; } } void newFunc() { int a = 10; int b = 20; testThis: a = b + a; a = 100; newFunc2(); a = 110; return; } int newFoo(int a, int b, int c) { int x = 0; int y1; { y1 = 11; p: x = x + 10; x = y1 + 11; } x = x + 33; return (a + b + c) * x; } int main(int argc, char *argv[]) { int x = 0; int y1; { y1 = 11; p: x = x + 10; x = y1 + argc; } x = x + 33; int iter; im81: if (1 < 2) #pragma omp parallel sections { #pragma omp section { testThisNonLeaf: #pragma omp critical { x = x + 6; } } } im51: #pragma omp parallel for for (iter = 0; iter < 8; iter++) { int x1; int y1; x1 += my(8); foo(x1, y1, 1); } #pragma omp parallel if(my(40)) { int z = 0; static int i; static int y; g3 = g3 + 1; i++; im52: #pragma omp for for (iter = 0; iter < 8; iter++) { im6: #pragma omp atomic update z = z + 5; im7: #pragma omp ordered { { { int x1; int y1; x1 = 1; foo(x1, y1, 1); } { y = 10; } } } } im82: #pragma omp sections { #pragma omp section im9: #pragma omp critical { x = x + 6; } #pragma omp section { testThis: while (0) { im10: #pragma omp task { y = y + 7; } } y = y + 5; y = y + 10; foo(1, 1, 3); } } im11: #pragma omp single { x = x + 2; } im12: #pragma omp master { x = x + 1; } foo(x, y, 3); y = 1; x = 10; im13: #pragma omp flush y = y; } if (x > 11) x = x - 1; if (1 > 2) { x = x * 8; } else { x = x + 9; } im14: while (x != 0) { x = x - 1; } im15: do { x = x + 1; if (0) { im16: continue; } } while (x == 10); myfor: for (x = 0; x < 10; x++) g1 = x; im17: switch (x) { case 1: testerLabel: x = x + 11; break; case 2: x = x + 12; default: x = x + 13; im18: break; } newFoo(1, 1, 3); }
scimath.c
/****************************************************************************** * Copyright 2019 Kyle Kloberdanz *****************************************************************************/ #include <stdio.h> #include <stdlib.h> #include <math.h> #include <omp.h> #include "scimath.h" #define MAX(A, B) ((A) > (B)) ? (A) : (B) #define PAGE_SIZE 4096 ksm_GENERIC_VECTOR_IMPL(void_ptr) __attribute__((__noreturn__)) void greetings() { printf("hello world!\n"); exit(0); } double ksm_first_deriv(double (*f)(double), double x) { return (f(x + ksm_DERIV_H_CONST) - f(x - ksm_DERIV_H_CONST)) / (2 * ksm_DERIV_H_CONST); } double ksm_second_deriv(double (*f)(double), double x) { return (f(x + ksm_DERIV_H_CONST) - 2 * f(x) + f(x - ksm_DERIV_H_CONST)) / (ksm_DERIV_H_CONST * ksm_DERIV_H_CONST); } void ksm_map( double (*f)(double), double *dst, const double *src, size_t size ) { size_t i; #pragma omp parallel for for (i = 0; i < size; i++) { dst[i] = f(src[i]); } } /* * TODO: optimize with SIMD and intrinsic sqrt */ void ksm_vector_f64_sqrt(double *dst, const double *src, size_t size) { size_t i; #pragma omp parallel for for (i = 0; i < size; i++) { dst[i] = sqrt(src[i]); } } static struct MemoryPoolNode *kk_do_malloc(size_t size) { size_t capacity = MAX(size, PAGE_SIZE); void *memory = malloc(capacity); struct MemoryPoolNode *pool = malloc(sizeof(struct MemoryPoolNode)); pool->memory = memory; pool->next = NULL; pool->index = size; pool->capacity = capacity; return pool; } void kk_arena_init(struct Arena *arena) { arena->_pool = NULL; arena->_full_pool = NULL; } void *kk_arena_alloc(size_t size, struct Arena *arena) { start_alloc: if (arena->_pool == NULL) { /* first allocation */ struct MemoryPoolNode *pool = kk_do_malloc(size); arena->_pool = pool; return pool->memory; } else { struct MemoryPoolNode *pool; struct MemoryPoolNode *prev = NULL; struct MemoryPoolNode *full_pool = NULL; for (pool = arena->_pool; pool != NULL; pool = pool->next) { size_t bytes_left = pool->capacity - pool->index; if (bytes_left < 10) { /* remove full pool from active pools list */ if (prev == NULL) { arena->_pool = pool->next; } else { prev->next = pool->next; } /* move full pool to the _full_pool list */ full_pool = arena->_full_pool; if (full_pool == NULL) { arena->_full_pool = pool; } else { arena->_full_pool = pool; pool->next = full_pool; } goto start_alloc; } else if (size <= bytes_left) { /* has available memory in existing pool */ size_t index = pool->index; pool->index += size; return pool->memory + index; } if (pool) { prev = pool; } } /* needs to add new pool */ pool = kk_do_malloc(size); prev->next = pool; return pool->memory; } } static void free_pools(struct MemoryPoolNode *pool) { struct MemoryPoolNode *head; while (pool) { head = pool->next; free(pool->memory); free(pool); pool = head; } } void kk_arena_free_all(struct Arena *arena) { free_pools(arena->_pool); free_pools(arena->_full_pool); } void *kk_track_malloc(size_t size, struct ksm_void_ptr_Vector *vec) { void *ptr = malloc(size); if (ptr != NULL) { ksm_void_ptr_vector_push(vec, ptr); } return ptr; } void kk_track_free(struct ksm_void_ptr_Vector *vec) { size_t i; for (i = 0; i < vec->size; i++) { free(vec->data[i]); } ksm_void_ptr_vector_free(vec); } void kk_btree_init(struct BTree *btree, int (*compare_keys)(void *, void *)) { btree->compare_keys = compare_keys; btree->block = malloc(sizeof(struct BTreeBlock)); btree->size = kk_BTREE_SIZE; } static void kk_btree_do_insertion( struct BTreeBlock *block, char *key, double value, size_t btree_size ) { /* if there is room, just insert it */ if (block->index < btree_size) { struct BTreeNode *new_node; block->index++; new_node = &block->data[block->index]; new_node->key = key; new_node->value = value; /* now sort */ } } void kk_btree_insert(struct BTree *btree, char *key, double value) { kk_btree_do_insertion(btree->block, key, value, btree->size); }
GB_binop__iseq_uint16.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__iseq_uint16) // A.*B function (eWiseMult): GB (_AemultB_08__iseq_uint16) // A.*B function (eWiseMult): GB (_AemultB_02__iseq_uint16) // A.*B function (eWiseMult): GB (_AemultB_04__iseq_uint16) // A.*B function (eWiseMult): GB (_AemultB_bitmap__iseq_uint16) // A*D function (colscale): GB (_AxD__iseq_uint16) // D*A function (rowscale): GB (_DxB__iseq_uint16) // C+=B function (dense accum): GB (_Cdense_accumB__iseq_uint16) // C+=b function (dense accum): GB (_Cdense_accumb__iseq_uint16) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__iseq_uint16) // C=scalar+B GB (_bind1st__iseq_uint16) // C=scalar+B' GB (_bind1st_tran__iseq_uint16) // C=A+scalar GB (_bind2nd__iseq_uint16) // C=A'+scalar GB (_bind2nd_tran__iseq_uint16) // C type: uint16_t // A type: uint16_t // A pattern? 0 // B type: uint16_t // B pattern? 0 // BinaryOp: cij = (aij == bij) #define GB_ATYPE \ uint16_t #define GB_BTYPE \ uint16_t #define GB_CTYPE \ uint16_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint16_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ uint16_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint16_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x == y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ISEQ || GxB_NO_UINT16 || GxB_NO_ISEQ_UINT16) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__iseq_uint16) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__iseq_uint16) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__iseq_uint16) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint16_t uint16_t bwork = (*((uint16_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__iseq_uint16) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *restrict Cx = (uint16_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__iseq_uint16) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *restrict Cx = (uint16_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__iseq_uint16) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; uint16_t alpha_scalar ; uint16_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((uint16_t *) alpha_scalar_in)) ; beta_scalar = (*((uint16_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__iseq_uint16) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__iseq_uint16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__iseq_uint16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__iseq_uint16) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__iseq_uint16) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *Cx = (uint16_t *) Cx_output ; uint16_t x = (*((uint16_t *) x_input)) ; uint16_t *Bx = (uint16_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; uint16_t bij = GBX (Bx, p, false) ; Cx [p] = (x == bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__iseq_uint16) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint16_t *Cx = (uint16_t *) Cx_output ; uint16_t *Ax = (uint16_t *) Ax_input ; uint16_t y = (*((uint16_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint16_t aij = GBX (Ax, p, false) ; Cx [p] = (aij == y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x == aij) ; \ } GrB_Info GB (_bind1st_tran__iseq_uint16) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint16_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t x = (*((const uint16_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint16_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij == y) ; \ } GrB_Info GB (_bind2nd_tran__iseq_uint16) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t y = (*((const uint16_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
draw.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % DDDD RRRR AAA W W % % D D R R A A W W % % D D RRRR AAAAA W W W % % D D R RN A A WW WW % % DDDD R R A A W W % % % % % % MagickCore Image Drawing Methods % % % % % % Software Design % % Cristy % % July 1998 % % % % % % Copyright 1999-2020 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % Bill Radcliffe of Corbis (www.corbis.com) contributed the polygon % rendering code based on Paul Heckbert's "Concave Polygon Scan Conversion", % Graphics Gems, 1990. Leonard Rosenthal and David Harr of Appligent % (www.appligent.com) contributed the dash pattern, linecap stroking % algorithm, and minor rendering improvements. % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/annotate.h" #include "MagickCore/artifact.h" #include "MagickCore/blob.h" #include "MagickCore/cache.h" #include "MagickCore/cache-private.h" #include "MagickCore/cache-view.h" #include "MagickCore/channel.h" #include "MagickCore/color.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/composite.h" #include "MagickCore/composite-private.h" #include "MagickCore/constitute.h" #include "MagickCore/draw.h" #include "MagickCore/draw-private.h" #include "MagickCore/enhance.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/gem.h" #include "MagickCore/geometry.h" #include "MagickCore/image-private.h" #include "MagickCore/list.h" #include "MagickCore/log.h" #include "MagickCore/memory-private.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/option.h" #include "MagickCore/paint.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/pixel-private.h" #include "MagickCore/property.h" #include "MagickCore/resample.h" #include "MagickCore/resample-private.h" #include "MagickCore/resource_.h" #include "MagickCore/splay-tree.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/thread-private.h" #include "MagickCore/token.h" #include "MagickCore/transform-private.h" #include "MagickCore/utility.h" /* Define declarations. */ #define BezierQuantum 200 #define PrimitiveExtentPad 2053 #define MaxBezierCoordinates 67108864 #define ThrowPointExpectedException(token,exception) \ { \ (void) ThrowMagickException(exception,GetMagickModule(),DrawError, \ "NonconformingDrawingPrimitiveDefinition","`%s'",token); \ status=MagickFalse; \ break; \ } /* Typedef declarations. */ typedef struct _EdgeInfo { SegmentInfo bounds; double scanline; PointInfo *points; size_t number_points; ssize_t direction; MagickBooleanType ghostline; size_t highwater; } EdgeInfo; typedef struct _ElementInfo { double cx, cy, major, minor, angle; } ElementInfo; typedef struct _MVGInfo { PrimitiveInfo **primitive_info; size_t *extent; ssize_t offset; PointInfo point; ExceptionInfo *exception; } MVGInfo; typedef struct _PolygonInfo { EdgeInfo *edges; size_t number_edges; } PolygonInfo; typedef enum { MoveToCode, OpenCode, GhostlineCode, LineToCode, EndCode } PathInfoCode; typedef struct _PathInfo { PointInfo point; PathInfoCode code; } PathInfo; /* Forward declarations. */ static Image *DrawClippingMask(Image *,const DrawInfo *,const char *,const char *, ExceptionInfo *); static MagickBooleanType DrawStrokePolygon(Image *,const DrawInfo *,const PrimitiveInfo *, ExceptionInfo *), RenderMVGContent(Image *,const DrawInfo *,const size_t,ExceptionInfo *), TraceArc(MVGInfo *,const PointInfo,const PointInfo,const PointInfo), TraceArcPath(MVGInfo *,const PointInfo,const PointInfo,const PointInfo, const double,const MagickBooleanType,const MagickBooleanType), TraceBezier(MVGInfo *,const size_t), TraceCircle(MVGInfo *,const PointInfo,const PointInfo), TraceEllipse(MVGInfo *,const PointInfo,const PointInfo,const PointInfo), TraceLine(PrimitiveInfo *,const PointInfo,const PointInfo), TraceRectangle(PrimitiveInfo *,const PointInfo,const PointInfo), TraceRoundRectangle(MVGInfo *,const PointInfo,const PointInfo,PointInfo), TraceSquareLinecap(PrimitiveInfo *,const size_t,const double); static PrimitiveInfo *TraceStrokePolygon(const DrawInfo *,const PrimitiveInfo *,ExceptionInfo *); static ssize_t TracePath(MVGInfo *,const char *,ExceptionInfo *); /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A c q u i r e D r a w I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquireDrawInfo() returns a DrawInfo structure properly initialized. % % The format of the AcquireDrawInfo method is: % % DrawInfo *AcquireDrawInfo(void) % */ MagickExport DrawInfo *AcquireDrawInfo(void) { DrawInfo *draw_info; draw_info=(DrawInfo *) AcquireCriticalMemory(sizeof(*draw_info)); GetDrawInfo((ImageInfo *) NULL,draw_info); return(draw_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C l o n e D r a w I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CloneDrawInfo() makes a copy of the given draw_info structure. If NULL % is specified, a new DrawInfo structure is created initialized to default % values. % % The format of the CloneDrawInfo method is: % % DrawInfo *CloneDrawInfo(const ImageInfo *image_info, % const DrawInfo *draw_info) % % A description of each parameter follows: % % o image_info: the image info. % % o draw_info: the draw info. % */ MagickExport DrawInfo *CloneDrawInfo(const ImageInfo *image_info, const DrawInfo *draw_info) { DrawInfo *clone_info; ExceptionInfo *exception; clone_info=(DrawInfo *) AcquireCriticalMemory(sizeof(*clone_info)); GetDrawInfo(image_info,clone_info); if (draw_info == (DrawInfo *) NULL) return(clone_info); exception=AcquireExceptionInfo(); if (draw_info->id != (char *) NULL) (void) CloneString(&clone_info->id,draw_info->id); if (draw_info->primitive != (char *) NULL) (void) CloneString(&clone_info->primitive,draw_info->primitive); if (draw_info->geometry != (char *) NULL) (void) CloneString(&clone_info->geometry,draw_info->geometry); clone_info->compliance=draw_info->compliance; clone_info->viewbox=draw_info->viewbox; clone_info->affine=draw_info->affine; clone_info->gravity=draw_info->gravity; clone_info->fill=draw_info->fill; clone_info->stroke=draw_info->stroke; clone_info->stroke_width=draw_info->stroke_width; if (draw_info->fill_pattern != (Image *) NULL) clone_info->fill_pattern=CloneImage(draw_info->fill_pattern,0,0,MagickTrue, exception); if (draw_info->stroke_pattern != (Image *) NULL) clone_info->stroke_pattern=CloneImage(draw_info->stroke_pattern,0,0, MagickTrue,exception); clone_info->stroke_antialias=draw_info->stroke_antialias; clone_info->text_antialias=draw_info->text_antialias; clone_info->fill_rule=draw_info->fill_rule; clone_info->linecap=draw_info->linecap; clone_info->linejoin=draw_info->linejoin; clone_info->miterlimit=draw_info->miterlimit; clone_info->dash_offset=draw_info->dash_offset; clone_info->decorate=draw_info->decorate; clone_info->compose=draw_info->compose; if (draw_info->text != (char *) NULL) (void) CloneString(&clone_info->text,draw_info->text); if (draw_info->font != (char *) NULL) (void) CloneString(&clone_info->font,draw_info->font); if (draw_info->metrics != (char *) NULL) (void) CloneString(&clone_info->metrics,draw_info->metrics); if (draw_info->family != (char *) NULL) (void) CloneString(&clone_info->family,draw_info->family); clone_info->style=draw_info->style; clone_info->stretch=draw_info->stretch; clone_info->weight=draw_info->weight; if (draw_info->encoding != (char *) NULL) (void) CloneString(&clone_info->encoding,draw_info->encoding); clone_info->pointsize=draw_info->pointsize; clone_info->kerning=draw_info->kerning; clone_info->interline_spacing=draw_info->interline_spacing; clone_info->interword_spacing=draw_info->interword_spacing; clone_info->direction=draw_info->direction; if (draw_info->density != (char *) NULL) (void) CloneString(&clone_info->density,draw_info->density); clone_info->align=draw_info->align; clone_info->undercolor=draw_info->undercolor; clone_info->border_color=draw_info->border_color; if (draw_info->server_name != (char *) NULL) (void) CloneString(&clone_info->server_name,draw_info->server_name); if (draw_info->dash_pattern != (double *) NULL) { ssize_t x; for (x=0; fabs(draw_info->dash_pattern[x]) >= MagickEpsilon; x++) ; clone_info->dash_pattern=(double *) AcquireQuantumMemory((size_t) (2*x+2), sizeof(*clone_info->dash_pattern)); if (clone_info->dash_pattern == (double *) NULL) ThrowFatalException(ResourceLimitFatalError, "UnableToAllocateDashPattern"); (void) memset(clone_info->dash_pattern,0,(size_t) (2*x+2)* sizeof(*clone_info->dash_pattern)); (void) memcpy(clone_info->dash_pattern,draw_info->dash_pattern,(size_t) (x+1)*sizeof(*clone_info->dash_pattern)); } clone_info->gradient=draw_info->gradient; if (draw_info->gradient.stops != (StopInfo *) NULL) { size_t number_stops; number_stops=clone_info->gradient.number_stops; clone_info->gradient.stops=(StopInfo *) AcquireQuantumMemory((size_t) number_stops,sizeof(*clone_info->gradient.stops)); if (clone_info->gradient.stops == (StopInfo *) NULL) ThrowFatalException(ResourceLimitFatalError, "UnableToAllocateDashPattern"); (void) memcpy(clone_info->gradient.stops,draw_info->gradient.stops, (size_t) number_stops*sizeof(*clone_info->gradient.stops)); } clone_info->bounds=draw_info->bounds; clone_info->fill_alpha=draw_info->fill_alpha; clone_info->stroke_alpha=draw_info->stroke_alpha; clone_info->element_reference=draw_info->element_reference; clone_info->clip_path=draw_info->clip_path; clone_info->clip_units=draw_info->clip_units; if (draw_info->clip_mask != (char *) NULL) (void) CloneString(&clone_info->clip_mask,draw_info->clip_mask); if (draw_info->clipping_mask != (Image *) NULL) clone_info->clipping_mask=CloneImage(draw_info->clipping_mask,0,0, MagickTrue,exception); if (draw_info->composite_mask != (Image *) NULL) clone_info->composite_mask=CloneImage(draw_info->composite_mask,0,0, MagickTrue,exception); clone_info->render=draw_info->render; clone_info->debug=IsEventLogging(); exception=DestroyExceptionInfo(exception); return(clone_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C o n v e r t P a t h T o P o l y g o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ConvertPathToPolygon() converts a path to the more efficient sorted % rendering form. % % The format of the ConvertPathToPolygon method is: % % PolygonInfo *ConvertPathToPolygon(const PathInfo *path_info, % ExceptionInfo *excetion) % % A description of each parameter follows: % % o ConvertPathToPolygon() returns the path in a more efficient sorted % rendering form of type PolygonInfo. % % o draw_info: Specifies a pointer to an DrawInfo structure. % % o path_info: Specifies a pointer to an PathInfo structure. % % */ static PolygonInfo *DestroyPolygonInfo(PolygonInfo *polygon_info) { ssize_t i; if (polygon_info->edges != (EdgeInfo *) NULL) { for (i=0; i < (ssize_t) polygon_info->number_edges; i++) if (polygon_info->edges[i].points != (PointInfo *) NULL) polygon_info->edges[i].points=(PointInfo *) RelinquishMagickMemory(polygon_info->edges[i].points); polygon_info->edges=(EdgeInfo *) RelinquishMagickMemory( polygon_info->edges); } return((PolygonInfo *) RelinquishMagickMemory(polygon_info)); } #if defined(__cplusplus) || defined(c_plusplus) extern "C" { #endif static int DrawCompareEdges(const void *p_edge,const void *q_edge) { #define DrawCompareEdge(p,q) \ { \ if (((p)-(q)) < 0.0) \ return(-1); \ if (((p)-(q)) > 0.0) \ return(1); \ } const PointInfo *p, *q; /* Edge sorting for right-handed coordinate system. */ p=((const EdgeInfo *) p_edge)->points; q=((const EdgeInfo *) q_edge)->points; DrawCompareEdge(p[0].y,q[0].y); DrawCompareEdge(p[0].x,q[0].x); DrawCompareEdge((p[1].x-p[0].x)*(q[1].y-q[0].y),(p[1].y-p[0].y)* (q[1].x-q[0].x)); DrawCompareEdge(p[1].y,q[1].y); DrawCompareEdge(p[1].x,q[1].x); return(0); } #if defined(__cplusplus) || defined(c_plusplus) } #endif static void LogPolygonInfo(const PolygonInfo *polygon_info) { EdgeInfo *p; ssize_t i, j; (void) LogMagickEvent(DrawEvent,GetMagickModule()," begin active-edge"); p=polygon_info->edges; for (i=0; i < (ssize_t) polygon_info->number_edges; i++) { (void) LogMagickEvent(DrawEvent,GetMagickModule()," edge %.20g:", (double) i); (void) LogMagickEvent(DrawEvent,GetMagickModule()," direction: %s", p->direction != MagickFalse ? "down" : "up"); (void) LogMagickEvent(DrawEvent,GetMagickModule()," ghostline: %s", p->ghostline != MagickFalse ? "transparent" : "opaque"); (void) LogMagickEvent(DrawEvent,GetMagickModule(), " bounds: %g,%g - %g,%g",p->bounds.x1,p->bounds.y1, p->bounds.x2,p->bounds.y2); for (j=0; j < (ssize_t) p->number_points; j++) (void) LogMagickEvent(DrawEvent,GetMagickModule()," %g,%g", p->points[j].x,p->points[j].y); p++; } (void) LogMagickEvent(DrawEvent,GetMagickModule()," end active-edge"); } static void ReversePoints(PointInfo *points,const size_t number_points) { PointInfo point; ssize_t i; for (i=0; i < (ssize_t) (number_points >> 1); i++) { point=points[i]; points[i]=points[number_points-(i+1)]; points[number_points-(i+1)]=point; } } static PolygonInfo *ConvertPathToPolygon(const PathInfo *path_info, ExceptionInfo *exception) { long direction, next_direction; PointInfo point, *points; PolygonInfo *polygon_info; SegmentInfo bounds; ssize_t i, n; MagickBooleanType ghostline; size_t edge, number_edges, number_points; /* Convert a path to the more efficient sorted rendering form. */ polygon_info=(PolygonInfo *) AcquireMagickMemory(sizeof(*polygon_info)); if (polygon_info == (PolygonInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",""); return((PolygonInfo *) NULL); } number_edges=16; polygon_info->edges=(EdgeInfo *) AcquireQuantumMemory(number_edges, sizeof(*polygon_info->edges)); if (polygon_info->edges == (EdgeInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",""); return(DestroyPolygonInfo(polygon_info)); } (void) memset(polygon_info->edges,0,number_edges* sizeof(*polygon_info->edges)); direction=0; edge=0; ghostline=MagickFalse; n=0; number_points=0; points=(PointInfo *) NULL; (void) memset(&point,0,sizeof(point)); (void) memset(&bounds,0,sizeof(bounds)); polygon_info->edges[edge].number_points=(size_t) n; polygon_info->edges[edge].scanline=0.0; polygon_info->edges[edge].highwater=0; polygon_info->edges[edge].ghostline=ghostline; polygon_info->edges[edge].direction=(ssize_t) direction; polygon_info->edges[edge].points=points; polygon_info->edges[edge].bounds=bounds; polygon_info->number_edges=0; for (i=0; path_info[i].code != EndCode; i++) { if ((path_info[i].code == MoveToCode) || (path_info[i].code == OpenCode) || (path_info[i].code == GhostlineCode)) { /* Move to. */ if ((points != (PointInfo *) NULL) && (n >= 2)) { if (edge == number_edges) { number_edges<<=1; polygon_info->edges=(EdgeInfo *) ResizeQuantumMemory( polygon_info->edges,(size_t) number_edges, sizeof(*polygon_info->edges)); if (polygon_info->edges == (EdgeInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",""); points=(PointInfo *) RelinquishMagickMemory(points); return(DestroyPolygonInfo(polygon_info)); } } polygon_info->edges[edge].number_points=(size_t) n; polygon_info->edges[edge].scanline=(-1.0); polygon_info->edges[edge].highwater=0; polygon_info->edges[edge].ghostline=ghostline; polygon_info->edges[edge].direction=(ssize_t) (direction > 0); if (direction < 0) ReversePoints(points,(size_t) n); polygon_info->edges[edge].points=points; polygon_info->edges[edge].bounds=bounds; polygon_info->edges[edge].bounds.y1=points[0].y; polygon_info->edges[edge].bounds.y2=points[n-1].y; points=(PointInfo *) NULL; ghostline=MagickFalse; edge++; polygon_info->number_edges=edge; } if (points == (PointInfo *) NULL) { number_points=16; points=(PointInfo *) AcquireQuantumMemory((size_t) number_points, sizeof(*points)); if (points == (PointInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",""); return(DestroyPolygonInfo(polygon_info)); } } ghostline=path_info[i].code == GhostlineCode ? MagickTrue : MagickFalse; point=path_info[i].point; points[0]=point; bounds.x1=point.x; bounds.x2=point.x; direction=0; n=1; continue; } /* Line to. */ next_direction=((path_info[i].point.y > point.y) || ((fabs(path_info[i].point.y-point.y) < MagickEpsilon) && (path_info[i].point.x > point.x))) ? 1 : -1; if ((points != (PointInfo *) NULL) && (direction != 0) && (direction != next_direction)) { /* New edge. */ point=points[n-1]; if (edge == number_edges) { number_edges<<=1; polygon_info->edges=(EdgeInfo *) ResizeQuantumMemory( polygon_info->edges,(size_t) number_edges, sizeof(*polygon_info->edges)); if (polygon_info->edges == (EdgeInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",""); points=(PointInfo *) RelinquishMagickMemory(points); return(DestroyPolygonInfo(polygon_info)); } } polygon_info->edges[edge].number_points=(size_t) n; polygon_info->edges[edge].scanline=(-1.0); polygon_info->edges[edge].highwater=0; polygon_info->edges[edge].ghostline=ghostline; polygon_info->edges[edge].direction=(ssize_t) (direction > 0); if (direction < 0) ReversePoints(points,(size_t) n); polygon_info->edges[edge].points=points; polygon_info->edges[edge].bounds=bounds; polygon_info->edges[edge].bounds.y1=points[0].y; polygon_info->edges[edge].bounds.y2=points[n-1].y; polygon_info->number_edges=edge+1; points=(PointInfo *) NULL; number_points=16; points=(PointInfo *) AcquireQuantumMemory((size_t) number_points, sizeof(*points)); if (points == (PointInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",""); return(DestroyPolygonInfo(polygon_info)); } n=1; ghostline=MagickFalse; points[0]=point; bounds.x1=point.x; bounds.x2=point.x; edge++; } direction=next_direction; if (points == (PointInfo *) NULL) continue; if (n == (ssize_t) number_points) { number_points<<=1; points=(PointInfo *) ResizeQuantumMemory(points,(size_t) number_points, sizeof(*points)); if (points == (PointInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",""); return(DestroyPolygonInfo(polygon_info)); } } point=path_info[i].point; points[n]=point; if (point.x < bounds.x1) bounds.x1=point.x; if (point.x > bounds.x2) bounds.x2=point.x; n++; } if (points != (PointInfo *) NULL) { if (n < 2) points=(PointInfo *) RelinquishMagickMemory(points); else { if (edge == number_edges) { number_edges<<=1; polygon_info->edges=(EdgeInfo *) ResizeQuantumMemory( polygon_info->edges,(size_t) number_edges, sizeof(*polygon_info->edges)); if (polygon_info->edges == (EdgeInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",""); return(DestroyPolygonInfo(polygon_info)); } } polygon_info->edges[edge].number_points=(size_t) n; polygon_info->edges[edge].scanline=(-1.0); polygon_info->edges[edge].highwater=0; polygon_info->edges[edge].ghostline=ghostline; polygon_info->edges[edge].direction=(ssize_t) (direction > 0); if (direction < 0) ReversePoints(points,(size_t) n); polygon_info->edges[edge].points=points; polygon_info->edges[edge].bounds=bounds; polygon_info->edges[edge].bounds.y1=points[0].y; polygon_info->edges[edge].bounds.y2=points[n-1].y; points=(PointInfo *) NULL; ghostline=MagickFalse; edge++; polygon_info->number_edges=edge; } } polygon_info->number_edges=edge; polygon_info->edges=(EdgeInfo *) ResizeQuantumMemory(polygon_info->edges, polygon_info->number_edges,sizeof(*polygon_info->edges)); if (polygon_info->edges == (EdgeInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",""); return(DestroyPolygonInfo(polygon_info)); } for (i=0; i < (ssize_t) polygon_info->number_edges; i++) { EdgeInfo *edge_info; edge_info=polygon_info->edges+i; edge_info->points=(PointInfo *) ResizeQuantumMemory(edge_info->points, edge_info->number_points,sizeof(*edge_info->points)); if (edge_info->points == (PointInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",""); return(DestroyPolygonInfo(polygon_info)); } } qsort(polygon_info->edges,(size_t) polygon_info->number_edges, sizeof(*polygon_info->edges),DrawCompareEdges); if (IsEventLogging() != MagickFalse) LogPolygonInfo(polygon_info); return(polygon_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C o n v e r t P r i m i t i v e T o P a t h % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ConvertPrimitiveToPath() converts a PrimitiveInfo structure into a vector % path structure. % % The format of the ConvertPrimitiveToPath method is: % % PathInfo *ConvertPrimitiveToPath(const DrawInfo *draw_info, % const PrimitiveInfo *primitive_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o ConvertPrimitiveToPath() returns a vector path structure of type % PathInfo. % % o draw_info: a structure of type DrawInfo. % % o primitive_info: Specifies a pointer to an PrimitiveInfo structure. % */ static void LogPathInfo(const PathInfo *path_info) { const PathInfo *p; (void) LogMagickEvent(DrawEvent,GetMagickModule()," begin vector-path"); for (p=path_info; p->code != EndCode; p++) (void) LogMagickEvent(DrawEvent,GetMagickModule(), " %g,%g %s",p->point.x,p->point.y,p->code == GhostlineCode ? "moveto ghostline" : p->code == OpenCode ? "moveto open" : p->code == MoveToCode ? "moveto" : p->code == LineToCode ? "lineto" : "?"); (void) LogMagickEvent(DrawEvent,GetMagickModule()," end vector-path"); } static PathInfo *ConvertPrimitiveToPath(const PrimitiveInfo *primitive_info, ExceptionInfo *exception) { MagickBooleanType closed_subpath; PathInfo *path_info; PathInfoCode code; PointInfo p, q; ssize_t i, n; ssize_t coordinates, start; /* Converts a PrimitiveInfo structure into a vector path structure. */ switch (primitive_info->primitive) { case AlphaPrimitive: case ColorPrimitive: case ImagePrimitive: case PointPrimitive: case TextPrimitive: return((PathInfo *) NULL); default: break; } for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++) ; path_info=(PathInfo *) AcquireQuantumMemory((size_t) (3UL*i+1UL), sizeof(*path_info)); if (path_info == (PathInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",""); return((PathInfo *) NULL); } coordinates=0; closed_subpath=MagickFalse; n=0; p.x=(-1.0); p.y=(-1.0); q.x=(-1.0); q.y=(-1.0); start=0; for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++) { code=LineToCode; if (coordinates <= 0) { /* New subpath. */ coordinates=(ssize_t) primitive_info[i].coordinates; p=primitive_info[i].point; start=n; code=MoveToCode; closed_subpath=primitive_info[i].closed_subpath; } coordinates--; if ((code == MoveToCode) || (coordinates <= 0) || (fabs(q.x-primitive_info[i].point.x) >= MagickEpsilon) || (fabs(q.y-primitive_info[i].point.y) >= MagickEpsilon)) { /* Eliminate duplicate points. */ path_info[n].code=code; path_info[n].point=primitive_info[i].point; q=primitive_info[i].point; n++; } if (coordinates > 0) continue; /* next point in current subpath */ if (closed_subpath != MagickFalse) { closed_subpath=MagickFalse; continue; } /* Mark the p point as open if the subpath is not closed. */ path_info[start].code=OpenCode; path_info[n].code=GhostlineCode; path_info[n].point=primitive_info[i].point; n++; path_info[n].code=LineToCode; path_info[n].point=p; n++; } path_info[n].code=EndCode; path_info[n].point.x=0.0; path_info[n].point.y=0.0; if (IsEventLogging() != MagickFalse) LogPathInfo(path_info); path_info=(PathInfo *) ResizeQuantumMemory(path_info,(size_t) (n+1), sizeof(*path_info)); return(path_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e s t r o y D r a w I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyDrawInfo() deallocates memory associated with an DrawInfo structure. % % The format of the DestroyDrawInfo method is: % % DrawInfo *DestroyDrawInfo(DrawInfo *draw_info) % % A description of each parameter follows: % % o draw_info: the draw info. % */ MagickExport DrawInfo *DestroyDrawInfo(DrawInfo *draw_info) { assert(draw_info != (DrawInfo *) NULL); if (draw_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(draw_info->signature == MagickCoreSignature); if (draw_info->id != (char *) NULL) draw_info->id=DestroyString(draw_info->id); if (draw_info->primitive != (char *) NULL) draw_info->primitive=DestroyString(draw_info->primitive); if (draw_info->text != (char *) NULL) draw_info->text=DestroyString(draw_info->text); if (draw_info->geometry != (char *) NULL) draw_info->geometry=DestroyString(draw_info->geometry); if (draw_info->fill_pattern != (Image *) NULL) draw_info->fill_pattern=DestroyImage(draw_info->fill_pattern); if (draw_info->stroke_pattern != (Image *) NULL) draw_info->stroke_pattern=DestroyImage(draw_info->stroke_pattern); if (draw_info->font != (char *) NULL) draw_info->font=DestroyString(draw_info->font); if (draw_info->metrics != (char *) NULL) draw_info->metrics=DestroyString(draw_info->metrics); if (draw_info->family != (char *) NULL) draw_info->family=DestroyString(draw_info->family); if (draw_info->encoding != (char *) NULL) draw_info->encoding=DestroyString(draw_info->encoding); if (draw_info->density != (char *) NULL) draw_info->density=DestroyString(draw_info->density); if (draw_info->server_name != (char *) NULL) draw_info->server_name=(char *) RelinquishMagickMemory(draw_info->server_name); if (draw_info->dash_pattern != (double *) NULL) draw_info->dash_pattern=(double *) RelinquishMagickMemory( draw_info->dash_pattern); if (draw_info->gradient.stops != (StopInfo *) NULL) draw_info->gradient.stops=(StopInfo *) RelinquishMagickMemory( draw_info->gradient.stops); if (draw_info->clip_mask != (char *) NULL) draw_info->clip_mask=DestroyString(draw_info->clip_mask); if (draw_info->clipping_mask != (Image *) NULL) draw_info->clipping_mask=DestroyImage(draw_info->clipping_mask); if (draw_info->composite_mask != (Image *) NULL) draw_info->composite_mask=DestroyImage(draw_info->composite_mask); draw_info->signature=(~MagickCoreSignature); draw_info=(DrawInfo *) RelinquishMagickMemory(draw_info); return(draw_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w A f f i n e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawAffineImage() composites the source over the destination image as % dictated by the affine transform. % % The format of the DrawAffineImage method is: % % MagickBooleanType DrawAffineImage(Image *image,const Image *source, % const AffineMatrix *affine,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o source: the source image. % % o affine: the affine transform. % % o exception: return any errors or warnings in this structure. % */ static SegmentInfo AffineEdge(const Image *image,const AffineMatrix *affine, const double y,const SegmentInfo *edge) { double intercept, z; double x; SegmentInfo inverse_edge; /* Determine left and right edges. */ inverse_edge.x1=edge->x1; inverse_edge.y1=edge->y1; inverse_edge.x2=edge->x2; inverse_edge.y2=edge->y2; z=affine->ry*y+affine->tx; if (affine->sx >= MagickEpsilon) { intercept=(-z/affine->sx); x=intercept; if (x > inverse_edge.x1) inverse_edge.x1=x; intercept=(-z+(double) image->columns)/affine->sx; x=intercept; if (x < inverse_edge.x2) inverse_edge.x2=x; } else if (affine->sx < -MagickEpsilon) { intercept=(-z+(double) image->columns)/affine->sx; x=intercept; if (x > inverse_edge.x1) inverse_edge.x1=x; intercept=(-z/affine->sx); x=intercept; if (x < inverse_edge.x2) inverse_edge.x2=x; } else if ((z < 0.0) || ((size_t) floor(z+0.5) >= image->columns)) { inverse_edge.x2=edge->x1; return(inverse_edge); } /* Determine top and bottom edges. */ z=affine->sy*y+affine->ty; if (affine->rx >= MagickEpsilon) { intercept=(-z/affine->rx); x=intercept; if (x > inverse_edge.x1) inverse_edge.x1=x; intercept=(-z+(double) image->rows)/affine->rx; x=intercept; if (x < inverse_edge.x2) inverse_edge.x2=x; } else if (affine->rx < -MagickEpsilon) { intercept=(-z+(double) image->rows)/affine->rx; x=intercept; if (x > inverse_edge.x1) inverse_edge.x1=x; intercept=(-z/affine->rx); x=intercept; if (x < inverse_edge.x2) inverse_edge.x2=x; } else if ((z < 0.0) || ((size_t) floor(z+0.5) >= image->rows)) { inverse_edge.x2=edge->x2; return(inverse_edge); } return(inverse_edge); } static AffineMatrix InverseAffineMatrix(const AffineMatrix *affine) { AffineMatrix inverse_affine; double determinant; determinant=PerceptibleReciprocal(affine->sx*affine->sy-affine->rx* affine->ry); inverse_affine.sx=determinant*affine->sy; inverse_affine.rx=determinant*(-affine->rx); inverse_affine.ry=determinant*(-affine->ry); inverse_affine.sy=determinant*affine->sx; inverse_affine.tx=(-affine->tx)*inverse_affine.sx-affine->ty* inverse_affine.ry; inverse_affine.ty=(-affine->tx)*inverse_affine.rx-affine->ty* inverse_affine.sy; return(inverse_affine); } MagickExport MagickBooleanType DrawAffineImage(Image *image, const Image *source,const AffineMatrix *affine,ExceptionInfo *exception) { AffineMatrix inverse_affine; CacheView *image_view, *source_view; MagickBooleanType status; PixelInfo zero; PointInfo extent[4], min, max; ssize_t i; SegmentInfo edge; ssize_t start, stop, y; /* Determine bounding box. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(source != (const Image *) NULL); assert(source->signature == MagickCoreSignature); assert(affine != (AffineMatrix *) NULL); extent[0].x=0.0; extent[0].y=0.0; extent[1].x=(double) source->columns-1.0; extent[1].y=0.0; extent[2].x=(double) source->columns-1.0; extent[2].y=(double) source->rows-1.0; extent[3].x=0.0; extent[3].y=(double) source->rows-1.0; for (i=0; i < 4; i++) { PointInfo point; point=extent[i]; extent[i].x=point.x*affine->sx+point.y*affine->ry+affine->tx; extent[i].y=point.x*affine->rx+point.y*affine->sy+affine->ty; } min=extent[0]; max=extent[0]; for (i=1; i < 4; i++) { if (min.x > extent[i].x) min.x=extent[i].x; if (min.y > extent[i].y) min.y=extent[i].y; if (max.x < extent[i].x) max.x=extent[i].x; if (max.y < extent[i].y) max.y=extent[i].y; } /* Affine transform image. */ if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); status=MagickTrue; edge.x1=MagickMax(min.x,0.0); edge.y1=MagickMax(min.y,0.0); edge.x2=MagickMin(max.x,(double) image->columns-1.0); edge.y2=MagickMin(max.y,(double) image->rows-1.0); inverse_affine=InverseAffineMatrix(affine); GetPixelInfo(image,&zero); start=CastDoubleToLong(ceil(edge.y1-0.5)); stop=CastDoubleToLong(floor(edge.y2+0.5)); source_view=AcquireVirtualCacheView(source,exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(source,image,stop-start,1) #endif for (y=start; y <= stop; y++) { PixelInfo composite, pixel; PointInfo point; ssize_t x; Quantum *magick_restrict q; SegmentInfo inverse_edge; ssize_t x_offset; if (status == MagickFalse) continue; inverse_edge=AffineEdge(source,&inverse_affine,(double) y,&edge); if (inverse_edge.x2 < inverse_edge.x1) continue; q=GetCacheViewAuthenticPixels(image_view,CastDoubleToLong( ceil(inverse_edge.x1-0.5)),y,(size_t) CastDoubleToLong(floor( inverse_edge.x2+0.5)-ceil(inverse_edge.x1-0.5)+1),1,exception); if (q == (Quantum *) NULL) continue; pixel=zero; composite=zero; x_offset=0; for (x=CastDoubleToLong(ceil(inverse_edge.x1-0.5)); x <= CastDoubleToLong(floor(inverse_edge.x2+0.5)); x++) { point.x=(double) x*inverse_affine.sx+y*inverse_affine.ry+ inverse_affine.tx; point.y=(double) x*inverse_affine.rx+y*inverse_affine.sy+ inverse_affine.ty; status=InterpolatePixelInfo(source,source_view,UndefinedInterpolatePixel, point.x,point.y,&pixel,exception); if (status == MagickFalse) break; GetPixelInfoPixel(image,q,&composite); CompositePixelInfoOver(&pixel,pixel.alpha,&composite,composite.alpha, &composite); SetPixelViaPixelInfo(image,&composite,q); x_offset++; q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } source_view=DestroyCacheView(source_view); image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D r a w B o u n d i n g R e c t a n g l e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawBoundingRectangles() draws the bounding rectangles on the image. This % is only useful for developers debugging the rendering algorithm. % % The format of the DrawBoundingRectangles method is: % % MagickBooleanType DrawBoundingRectangles(Image *image, % const DrawInfo *draw_info,PolygonInfo *polygon_info, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % % o polygon_info: Specifies a pointer to a PolygonInfo structure. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType DrawBoundingRectangles(Image *image, const DrawInfo *draw_info,const PolygonInfo *polygon_info, ExceptionInfo *exception) { double mid; DrawInfo *clone_info; MagickStatusType status; PointInfo end, resolution, start; PrimitiveInfo primitive_info[6]; ssize_t i; SegmentInfo bounds; ssize_t coordinates; (void) memset(primitive_info,0,sizeof(primitive_info)); clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info); status=QueryColorCompliance("#000F",AllCompliance,&clone_info->fill, exception); if (status == MagickFalse) { clone_info=DestroyDrawInfo(clone_info); return(MagickFalse); } resolution.x=96.0; resolution.y=96.0; if (clone_info->density != (char *) NULL) { GeometryInfo geometry_info; MagickStatusType flags; flags=ParseGeometry(clone_info->density,&geometry_info); resolution.x=geometry_info.rho; resolution.y=geometry_info.sigma; if ((flags & SigmaValue) == MagickFalse) resolution.y=resolution.x; } mid=(resolution.x/96.0)*ExpandAffine(&clone_info->affine)* clone_info->stroke_width/2.0; bounds.x1=0.0; bounds.y1=0.0; bounds.x2=0.0; bounds.y2=0.0; if (polygon_info != (PolygonInfo *) NULL) { bounds=polygon_info->edges[0].bounds; for (i=1; i < (ssize_t) polygon_info->number_edges; i++) { if (polygon_info->edges[i].bounds.x1 < (double) bounds.x1) bounds.x1=polygon_info->edges[i].bounds.x1; if (polygon_info->edges[i].bounds.y1 < (double) bounds.y1) bounds.y1=polygon_info->edges[i].bounds.y1; if (polygon_info->edges[i].bounds.x2 > (double) bounds.x2) bounds.x2=polygon_info->edges[i].bounds.x2; if (polygon_info->edges[i].bounds.y2 > (double) bounds.y2) bounds.y2=polygon_info->edges[i].bounds.y2; } bounds.x1-=mid; bounds.x1=bounds.x1 < 0.0 ? 0.0 : bounds.x1 >= (double) image->columns ? (double) image->columns-1 : bounds.x1; bounds.y1-=mid; bounds.y1=bounds.y1 < 0.0 ? 0.0 : bounds.y1 >= (double) image->rows ? (double) image->rows-1 : bounds.y1; bounds.x2+=mid; bounds.x2=bounds.x2 < 0.0 ? 0.0 : bounds.x2 >= (double) image->columns ? (double) image->columns-1 : bounds.x2; bounds.y2+=mid; bounds.y2=bounds.y2 < 0.0 ? 0.0 : bounds.y2 >= (double) image->rows ? (double) image->rows-1 : bounds.y2; for (i=0; i < (ssize_t) polygon_info->number_edges; i++) { if (polygon_info->edges[i].direction != 0) status=QueryColorCompliance("#f00",AllCompliance,&clone_info->stroke, exception); else status=QueryColorCompliance("#0f0",AllCompliance,&clone_info->stroke, exception); if (status == MagickFalse) break; start.x=(double) (polygon_info->edges[i].bounds.x1-mid); start.y=(double) (polygon_info->edges[i].bounds.y1-mid); end.x=(double) (polygon_info->edges[i].bounds.x2+mid); end.y=(double) (polygon_info->edges[i].bounds.y2+mid); primitive_info[0].primitive=RectanglePrimitive; status&=TraceRectangle(primitive_info,start,end); primitive_info[0].method=ReplaceMethod; coordinates=(ssize_t) primitive_info[0].coordinates; primitive_info[coordinates].primitive=UndefinedPrimitive; status=DrawPrimitive(image,clone_info,primitive_info,exception); if (status == MagickFalse) break; } if (i < (ssize_t) polygon_info->number_edges) { clone_info=DestroyDrawInfo(clone_info); return(status == 0 ? MagickFalse : MagickTrue); } } status=QueryColorCompliance("#00f",AllCompliance,&clone_info->stroke, exception); if (status == MagickFalse) { clone_info=DestroyDrawInfo(clone_info); return(MagickFalse); } start.x=(double) (bounds.x1-mid); start.y=(double) (bounds.y1-mid); end.x=(double) (bounds.x2+mid); end.y=(double) (bounds.y2+mid); primitive_info[0].primitive=RectanglePrimitive; status&=TraceRectangle(primitive_info,start,end); primitive_info[0].method=ReplaceMethod; coordinates=(ssize_t) primitive_info[0].coordinates; primitive_info[coordinates].primitive=UndefinedPrimitive; status=DrawPrimitive(image,clone_info,primitive_info,exception); clone_info=DestroyDrawInfo(clone_info); return(status == 0 ? MagickFalse : MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w C l i p P a t h % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawClipPath() draws the clip path on the image mask. % % The format of the DrawClipPath method is: % % MagickBooleanType DrawClipPath(Image *image,const DrawInfo *draw_info, % const char *id,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % % o id: the clip path id. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType DrawClipPath(Image *image, const DrawInfo *draw_info,const char *id,ExceptionInfo *exception) { const char *clip_path; Image *clipping_mask; MagickBooleanType status; clip_path=GetImageArtifact(image,id); if (clip_path == (const char *) NULL) return(MagickFalse); clipping_mask=DrawClippingMask(image,draw_info,draw_info->clip_mask,clip_path, exception); if (clipping_mask == (Image *) NULL) return(MagickFalse); status=SetImageMask(image,WritePixelMask,clipping_mask,exception); clipping_mask=DestroyImage(clipping_mask); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w C l i p p i n g M a s k % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawClippingMask() draws the clip path and returns it as an image clipping % mask. % % The format of the DrawClippingMask method is: % % Image *DrawClippingMask(Image *image,const DrawInfo *draw_info, % const char *id,const char *clip_path,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % % o id: the clip path id. % % o clip_path: the clip path. % % o exception: return any errors or warnings in this structure. % */ static Image *DrawClippingMask(Image *image,const DrawInfo *draw_info, const char *id,const char *clip_path,ExceptionInfo *exception) { DrawInfo *clone_info; Image *clip_mask, *separate_mask; MagickStatusType status; /* Draw a clip path. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(draw_info != (const DrawInfo *) NULL); clip_mask=AcquireImage((const ImageInfo *) NULL,exception); status=SetImageExtent(clip_mask,image->columns,image->rows,exception); if (status == MagickFalse) return(DestroyImage(clip_mask)); status=SetImageMask(clip_mask,WritePixelMask,(Image *) NULL,exception); status=QueryColorCompliance("#0000",AllCompliance, &clip_mask->background_color,exception); clip_mask->background_color.alpha=(MagickRealType) TransparentAlpha; clip_mask->background_color.alpha_trait=BlendPixelTrait; status=SetImageBackgroundColor(clip_mask,exception); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(),"\nbegin clip-path %s", id); clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info); (void) CloneString(&clone_info->primitive,clip_path); status=QueryColorCompliance("#ffffff",AllCompliance,&clone_info->fill, exception); if (clone_info->clip_mask != (char *) NULL) clone_info->clip_mask=DestroyString(clone_info->clip_mask); status=QueryColorCompliance("#00000000",AllCompliance,&clone_info->stroke, exception); clone_info->stroke_width=0.0; clone_info->alpha=OpaqueAlpha; clone_info->clip_path=MagickTrue; status=RenderMVGContent(clip_mask,clone_info,0,exception); clone_info=DestroyDrawInfo(clone_info); separate_mask=SeparateImage(clip_mask,AlphaChannel,exception); if (separate_mask != (Image *) NULL) { clip_mask=DestroyImage(clip_mask); clip_mask=separate_mask; status=NegateImage(clip_mask,MagickFalse,exception); if (status == MagickFalse) clip_mask=DestroyImage(clip_mask); } if (status == MagickFalse) clip_mask=DestroyImage(clip_mask); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(),"end clip-path"); return(clip_mask); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w C o m p o s i t e M a s k % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawCompositeMask() draws the mask path and returns it as an image mask. % % The format of the DrawCompositeMask method is: % % Image *DrawCompositeMask(Image *image,const DrawInfo *draw_info, % const char *id,const char *mask_path,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % % o id: the mask path id. % % o mask_path: the mask path. % % o exception: return any errors or warnings in this structure. % */ static Image *DrawCompositeMask(Image *image,const DrawInfo *draw_info, const char *id,const char *mask_path,ExceptionInfo *exception) { Image *composite_mask, *separate_mask; DrawInfo *clone_info; MagickStatusType status; /* Draw a mask path. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(draw_info != (const DrawInfo *) NULL); composite_mask=AcquireImage((const ImageInfo *) NULL,exception); status=SetImageExtent(composite_mask,image->columns,image->rows,exception); if (status == MagickFalse) return(DestroyImage(composite_mask)); status=SetImageMask(composite_mask,CompositePixelMask,(Image *) NULL, exception); status=QueryColorCompliance("#0000",AllCompliance, &composite_mask->background_color,exception); composite_mask->background_color.alpha=(MagickRealType) TransparentAlpha; composite_mask->background_color.alpha_trait=BlendPixelTrait; (void) SetImageBackgroundColor(composite_mask,exception); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(),"\nbegin mask-path %s", id); clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info); (void) CloneString(&clone_info->primitive,mask_path); status=QueryColorCompliance("#ffffff",AllCompliance,&clone_info->fill, exception); status=QueryColorCompliance("#00000000",AllCompliance,&clone_info->stroke, exception); clone_info->stroke_width=0.0; clone_info->alpha=OpaqueAlpha; status=RenderMVGContent(composite_mask,clone_info,0,exception); clone_info=DestroyDrawInfo(clone_info); separate_mask=SeparateImage(composite_mask,AlphaChannel,exception); if (separate_mask != (Image *) NULL) { composite_mask=DestroyImage(composite_mask); composite_mask=separate_mask; status=NegateImage(composite_mask,MagickFalse,exception); if (status == MagickFalse) composite_mask=DestroyImage(composite_mask); } if (status == MagickFalse) composite_mask=DestroyImage(composite_mask); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(),"end mask-path"); return(composite_mask); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D r a w D a s h P o l y g o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawDashPolygon() draws a dashed polygon (line, rectangle, ellipse) on the % image while respecting the dash offset and dash pattern attributes. % % The format of the DrawDashPolygon method is: % % MagickBooleanType DrawDashPolygon(const DrawInfo *draw_info, % const PrimitiveInfo *primitive_info,Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o draw_info: the draw info. % % o primitive_info: Specifies a pointer to a PrimitiveInfo structure. % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType DrawDashPolygon(const DrawInfo *draw_info, const PrimitiveInfo *primitive_info,Image *image,ExceptionInfo *exception) { double length, maximum_length, offset, scale, total_length; DrawInfo *clone_info; MagickStatusType status; PrimitiveInfo *dash_polygon; double dx, dy; ssize_t i; size_t number_vertices; ssize_t j, n; assert(draw_info != (const DrawInfo *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule()," begin draw-dash"); for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++) ; number_vertices=(size_t) i; dash_polygon=(PrimitiveInfo *) AcquireQuantumMemory((size_t) (2UL*number_vertices+32UL),sizeof(*dash_polygon)); if (dash_polygon == (PrimitiveInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",""); return(MagickFalse); } (void) memset(dash_polygon,0,(2UL*number_vertices+32UL)* sizeof(*dash_polygon)); clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info); clone_info->miterlimit=0; dash_polygon[0]=primitive_info[0]; scale=ExpandAffine(&draw_info->affine); length=scale*draw_info->dash_pattern[0]; offset=fabs(draw_info->dash_offset) >= MagickEpsilon ? scale*draw_info->dash_offset : 0.0; j=1; for (n=0; offset > 0.0; j=0) { if (draw_info->dash_pattern[n] <= 0.0) break; length=scale*(draw_info->dash_pattern[n]+(n == 0 ? -0.5 : 0.5)); if (offset > length) { offset-=length; n++; length=scale*draw_info->dash_pattern[n]; continue; } if (offset < length) { length-=offset; offset=0.0; break; } offset=0.0; n++; } status=MagickTrue; maximum_length=0.0; total_length=0.0; for (i=1; (i < (ssize_t) number_vertices) && (length >= 0.0); i++) { dx=primitive_info[i].point.x-primitive_info[i-1].point.x; dy=primitive_info[i].point.y-primitive_info[i-1].point.y; maximum_length=hypot(dx,dy); if (maximum_length > (double) (MaxBezierCoordinates >> 2)) continue; if (fabs(length) < MagickEpsilon) { if (fabs(draw_info->dash_pattern[n]) >= MagickEpsilon) n++; if (fabs(draw_info->dash_pattern[n]) < MagickEpsilon) n=0; length=scale*draw_info->dash_pattern[n]; } for (total_length=0.0; (length >= 0.0) && (maximum_length >= (total_length+length)); ) { total_length+=length; if ((n & 0x01) != 0) { dash_polygon[0]=primitive_info[0]; dash_polygon[0].point.x=(double) (primitive_info[i-1].point.x+dx* total_length*PerceptibleReciprocal(maximum_length)); dash_polygon[0].point.y=(double) (primitive_info[i-1].point.y+dy* total_length*PerceptibleReciprocal(maximum_length)); j=1; } else { if ((j+1) > (ssize_t) number_vertices) break; dash_polygon[j]=primitive_info[i-1]; dash_polygon[j].point.x=(double) (primitive_info[i-1].point.x+dx* total_length*PerceptibleReciprocal(maximum_length)); dash_polygon[j].point.y=(double) (primitive_info[i-1].point.y+dy* total_length*PerceptibleReciprocal(maximum_length)); dash_polygon[j].coordinates=1; j++; dash_polygon[0].coordinates=(size_t) j; dash_polygon[j].primitive=UndefinedPrimitive; status&=DrawStrokePolygon(image,clone_info,dash_polygon,exception); if (status == MagickFalse) break; } if (fabs(draw_info->dash_pattern[n]) >= MagickEpsilon) n++; if (fabs(draw_info->dash_pattern[n]) < MagickEpsilon) n=0; length=scale*draw_info->dash_pattern[n]; } length-=(maximum_length-total_length); if ((n & 0x01) != 0) continue; dash_polygon[j]=primitive_info[i]; dash_polygon[j].coordinates=1; j++; } if ((status != MagickFalse) && (total_length < maximum_length) && ((n & 0x01) == 0) && (j > 1)) { dash_polygon[j]=primitive_info[i-1]; dash_polygon[j].point.x+=MagickEpsilon; dash_polygon[j].point.y+=MagickEpsilon; dash_polygon[j].coordinates=1; j++; dash_polygon[0].coordinates=(size_t) j; dash_polygon[j].primitive=UndefinedPrimitive; status&=DrawStrokePolygon(image,clone_info,dash_polygon,exception); } dash_polygon=(PrimitiveInfo *) RelinquishMagickMemory(dash_polygon); clone_info=DestroyDrawInfo(clone_info); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule()," end draw-dash"); return(status != 0 ? MagickTrue : MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w G r a d i e n t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawGradientImage() draws a linear gradient on the image. % % The format of the DrawGradientImage method is: % % MagickBooleanType DrawGradientImage(Image *image, % const DrawInfo *draw_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % % o exception: return any errors or warnings in this structure. % */ static inline double GetStopColorOffset(const GradientInfo *gradient, const ssize_t x,const ssize_t y) { switch (gradient->type) { case UndefinedGradient: case LinearGradient: { double gamma, length, offset, scale; PointInfo p, q; const SegmentInfo *gradient_vector; gradient_vector=(&gradient->gradient_vector); p.x=gradient_vector->x2-gradient_vector->x1; p.y=gradient_vector->y2-gradient_vector->y1; q.x=(double) x-gradient_vector->x1; q.y=(double) y-gradient_vector->y1; length=sqrt(q.x*q.x+q.y*q.y); gamma=sqrt(p.x*p.x+p.y*p.y)*length; gamma=PerceptibleReciprocal(gamma); scale=p.x*q.x+p.y*q.y; offset=gamma*scale*length; return(offset); } case RadialGradient: { PointInfo v; if (gradient->spread == RepeatSpread) { v.x=(double) x-gradient->center.x; v.y=(double) y-gradient->center.y; return(sqrt(v.x*v.x+v.y*v.y)); } v.x=(double) (((x-gradient->center.x)*cos(DegreesToRadians( gradient->angle)))+((y-gradient->center.y)*sin(DegreesToRadians( gradient->angle))))*PerceptibleReciprocal(gradient->radii.x); v.y=(double) (((x-gradient->center.x)*sin(DegreesToRadians( gradient->angle)))-((y-gradient->center.y)*cos(DegreesToRadians( gradient->angle))))*PerceptibleReciprocal(gradient->radii.y); return(sqrt(v.x*v.x+v.y*v.y)); } } return(0.0); } static int StopInfoCompare(const void *x,const void *y) { StopInfo *stop_1, *stop_2; stop_1=(StopInfo *) x; stop_2=(StopInfo *) y; if (stop_1->offset > stop_2->offset) return(1); if (fabs(stop_1->offset-stop_2->offset) <= MagickEpsilon) return(0); return(-1); } MagickExport MagickBooleanType DrawGradientImage(Image *image, const DrawInfo *draw_info,ExceptionInfo *exception) { CacheView *image_view; const GradientInfo *gradient; const SegmentInfo *gradient_vector; double length; MagickBooleanType status; PixelInfo zero; PointInfo point; RectangleInfo bounding_box; ssize_t y; /* Draw linear or radial gradient on image. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(draw_info != (const DrawInfo *) NULL); gradient=(&draw_info->gradient); qsort(gradient->stops,gradient->number_stops,sizeof(StopInfo), StopInfoCompare); gradient_vector=(&gradient->gradient_vector); point.x=gradient_vector->x2-gradient_vector->x1; point.y=gradient_vector->y2-gradient_vector->y1; length=sqrt(point.x*point.x+point.y*point.y); bounding_box=gradient->bounding_box; status=MagickTrue; GetPixelInfo(image,&zero); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,bounding_box.height-bounding_box.y,1) #endif for (y=bounding_box.y; y < (ssize_t) bounding_box.height; y++) { double alpha, offset; PixelInfo composite, pixel; Quantum *magick_restrict q; ssize_t i, x; ssize_t j; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } pixel=zero; composite=zero; offset=GetStopColorOffset(gradient,0,y); if (gradient->type != RadialGradient) offset*=PerceptibleReciprocal(length); for (x=bounding_box.x; x < (ssize_t) bounding_box.width; x++) { GetPixelInfoPixel(image,q,&pixel); switch (gradient->spread) { case UndefinedSpread: case PadSpread: { if ((x != CastDoubleToLong(ceil(gradient_vector->x1-0.5))) || (y != CastDoubleToLong(ceil(gradient_vector->y1-0.5)))) { offset=GetStopColorOffset(gradient,x,y); if (gradient->type != RadialGradient) offset*=PerceptibleReciprocal(length); } for (i=0; i < (ssize_t) gradient->number_stops; i++) if (offset < gradient->stops[i].offset) break; if ((offset < 0.0) || (i == 0)) composite=gradient->stops[0].color; else if ((offset > 1.0) || (i == (ssize_t) gradient->number_stops)) composite=gradient->stops[gradient->number_stops-1].color; else { j=i; i--; alpha=(offset-gradient->stops[i].offset)/ (gradient->stops[j].offset-gradient->stops[i].offset); CompositePixelInfoBlend(&gradient->stops[i].color,1.0-alpha, &gradient->stops[j].color,alpha,&composite); } break; } case ReflectSpread: { if ((x != CastDoubleToLong(ceil(gradient_vector->x1-0.5))) || (y != CastDoubleToLong(ceil(gradient_vector->y1-0.5)))) { offset=GetStopColorOffset(gradient,x,y); if (gradient->type != RadialGradient) offset*=PerceptibleReciprocal(length); } if (offset < 0.0) offset=(-offset); if ((ssize_t) fmod(offset,2.0) == 0) offset=fmod(offset,1.0); else offset=1.0-fmod(offset,1.0); for (i=0; i < (ssize_t) gradient->number_stops; i++) if (offset < gradient->stops[i].offset) break; if (i == 0) composite=gradient->stops[0].color; else if (i == (ssize_t) gradient->number_stops) composite=gradient->stops[gradient->number_stops-1].color; else { j=i; i--; alpha=(offset-gradient->stops[i].offset)/ (gradient->stops[j].offset-gradient->stops[i].offset); CompositePixelInfoBlend(&gradient->stops[i].color,1.0-alpha, &gradient->stops[j].color,alpha,&composite); } break; } case RepeatSpread: { double repeat; MagickBooleanType antialias; antialias=MagickFalse; repeat=0.0; if ((x != CastDoubleToLong(ceil(gradient_vector->x1-0.5))) || (y != CastDoubleToLong(ceil(gradient_vector->y1-0.5)))) { offset=GetStopColorOffset(gradient,x,y); if (gradient->type == LinearGradient) { repeat=fmod(offset,length); if (repeat < 0.0) repeat=length-fmod(-repeat,length); else repeat=fmod(offset,length); antialias=(repeat < length) && ((repeat+1.0) > length) ? MagickTrue : MagickFalse; offset=PerceptibleReciprocal(length)*repeat; } else { repeat=fmod(offset,gradient->radius); if (repeat < 0.0) repeat=gradient->radius-fmod(-repeat,gradient->radius); else repeat=fmod(offset,gradient->radius); antialias=repeat+1.0 > gradient->radius ? MagickTrue : MagickFalse; offset=repeat/gradient->radius; } } for (i=0; i < (ssize_t) gradient->number_stops; i++) if (offset < gradient->stops[i].offset) break; if (i == 0) composite=gradient->stops[0].color; else if (i == (ssize_t) gradient->number_stops) composite=gradient->stops[gradient->number_stops-1].color; else { j=i; i--; alpha=(offset-gradient->stops[i].offset)/ (gradient->stops[j].offset-gradient->stops[i].offset); if (antialias != MagickFalse) { if (gradient->type == LinearGradient) alpha=length-repeat; else alpha=gradient->radius-repeat; i=0; j=(ssize_t) gradient->number_stops-1L; } CompositePixelInfoBlend(&gradient->stops[i].color,1.0-alpha, &gradient->stops[j].color,alpha,&composite); } break; } } CompositePixelInfoOver(&composite,composite.alpha,&pixel,pixel.alpha, &pixel); SetPixelViaPixelInfo(image,&pixel,q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawImage() draws a graphic primitive on your image. The primitive % may be represented as a string or filename. Precede the filename with an % "at" sign (@) and the contents of the file are drawn on the image. You % can affect how text is drawn by setting one or more members of the draw % info structure. % % The format of the DrawImage method is: % % MagickBooleanType DrawImage(Image *image,const DrawInfo *draw_info, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType CheckPrimitiveExtent(MVGInfo *mvg_info, const size_t pad) { double extent; size_t quantum; /* Check if there is enough storage for drawing pimitives. */ extent=(double) mvg_info->offset+pad+PrimitiveExtentPad+1; quantum=sizeof(**mvg_info->primitive_info); if (extent <= (double) *mvg_info->extent) return(MagickTrue); *mvg_info->primitive_info=(PrimitiveInfo *) ResizeQuantumMemory( *mvg_info->primitive_info,(size_t) extent,quantum); if (*mvg_info->primitive_info != (PrimitiveInfo *) NULL) { ssize_t i; *mvg_info->extent=(size_t) extent; for (i=mvg_info->offset+1; i < (ssize_t) extent; i++) (*mvg_info->primitive_info)[i].primitive=UndefinedPrimitive; return(MagickTrue); } /* Reallocation failed, allocate a primitive to facilitate unwinding. */ (void) ThrowMagickException(mvg_info->exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",""); if (*mvg_info->primitive_info != (PrimitiveInfo *) NULL) *mvg_info->primitive_info=(PrimitiveInfo *) RelinquishMagickMemory( *mvg_info->primitive_info); *mvg_info->primitive_info=(PrimitiveInfo *) AcquireCriticalMemory( PrimitiveExtentPad*quantum); (void) memset(*mvg_info->primitive_info,0,PrimitiveExtentPad*quantum); *mvg_info->extent=1; return(MagickFalse); } static inline double GetDrawValue(const char *magick_restrict string, char **magick_restrict sentinal) { char **magick_restrict q; double value; q=sentinal; value=InterpretLocaleValue(string,q); if ((IsNaN(value) != 0) || (value < -((double) SSIZE_MAX-512.0)) || (value > ((double) SSIZE_MAX-512.0))) return(0.0); sentinal=q; return(value); } static int MVGMacroCompare(const void *target,const void *source) { const char *p, *q; p=(const char *) target; q=(const char *) source; return(strcmp(p,q)); } static SplayTreeInfo *GetMVGMacros(const char *primitive) { char *macro, *token; const char *q; size_t extent; SplayTreeInfo *macros; /* Scan graphic primitives for definitions and classes. */ if (primitive == (const char *) NULL) return((SplayTreeInfo *) NULL); macros=NewSplayTree(MVGMacroCompare,RelinquishMagickMemory, RelinquishMagickMemory); macro=AcquireString(primitive); token=AcquireString(primitive); extent=strlen(token)+MagickPathExtent; for (q=primitive; *q != '\0'; ) { if (GetNextToken(q,&q,extent,token) < 1) break; if (*token == '\0') break; if (LocaleCompare("push",token) == 0) { const char *end, *start; (void) GetNextToken(q,&q,extent,token); if (*q == '"') { char name[MagickPathExtent]; const char *p; ssize_t n; /* Named macro (e.g. push graphic-context "wheel"). */ (void) GetNextToken(q,&q,extent,token); start=q; end=q; (void) CopyMagickString(name,token,MagickPathExtent); n=1; for (p=q; *p != '\0'; ) { if (GetNextToken(p,&p,extent,token) < 1) break; if (*token == '\0') break; if (LocaleCompare(token,"pop") == 0) { end=p-strlen(token)-1; n--; } if (LocaleCompare(token,"push") == 0) n++; if ((n == 0) && (end > start)) { /* Extract macro. */ (void) GetNextToken(p,&p,extent,token); (void) CopyMagickString(macro,start,(size_t) (end-start)); (void) AddValueToSplayTree(macros,ConstantString(name), ConstantString(macro)); break; } } } } } token=DestroyString(token); macro=DestroyString(macro); return(macros); } static inline MagickBooleanType IsPoint(const char *point) { char *p; double value; value=GetDrawValue(point,&p); return((fabs(value) < MagickEpsilon) && (p == point) ? MagickFalse : MagickTrue); } static inline MagickBooleanType TracePoint(PrimitiveInfo *primitive_info, const PointInfo point) { primitive_info->coordinates=1; primitive_info->closed_subpath=MagickFalse; primitive_info->point=point; return(MagickTrue); } static MagickBooleanType RenderMVGContent(Image *image, const DrawInfo *draw_info,const size_t depth,ExceptionInfo *exception) { #define RenderImageTag "Render/Image" AffineMatrix affine, current; char keyword[MagickPathExtent], geometry[MagickPathExtent], *next_token, pattern[MagickPathExtent], *primitive, *token; const char *q; double angle, coordinates, cursor, factor, primitive_extent; DrawInfo *clone_info, **graphic_context; MagickBooleanType proceed; MagickStatusType status; MVGInfo mvg_info; PointInfo point; PrimitiveInfo *primitive_info; PrimitiveType primitive_type; const char *p; ssize_t i, x; SegmentInfo bounds; size_t extent, number_points, number_stops; SplayTreeInfo *macros; ssize_t defsDepth, j, k, n, symbolDepth; StopInfo *stops; TypeMetric metrics; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(draw_info != (DrawInfo *) NULL); assert(draw_info->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); if (depth > MagickMaxRecursionDepth) ThrowBinaryException(DrawError,"VectorGraphicsNestedTooDeeply", image->filename); if ((draw_info->primitive == (char *) NULL) || (*draw_info->primitive == '\0')) return(MagickFalse); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(),"begin draw-image"); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); if (image->alpha_trait == UndefinedPixelTrait) { status=SetImageAlphaChannel(image,OpaqueAlphaChannel,exception); if (status == MagickFalse) return(MagickFalse); } if ((*draw_info->primitive == '@') && (strlen(draw_info->primitive) > 1) && (*(draw_info->primitive+1) != '-') && (depth == 0)) primitive=FileToString(draw_info->primitive+1,~0UL,exception); else primitive=AcquireString(draw_info->primitive); if (primitive == (char *) NULL) return(MagickFalse); primitive_extent=(double) strlen(primitive); (void) SetImageArtifact(image,"mvg:vector-graphics",primitive); n=0; number_stops=0; stops=(StopInfo *) NULL; /* Allocate primitive info memory. */ graphic_context=(DrawInfo **) AcquireMagickMemory(sizeof(*graphic_context)); if (graphic_context == (DrawInfo **) NULL) { primitive=DestroyString(primitive); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } number_points=PrimitiveExtentPad; primitive_info=(PrimitiveInfo *) AcquireQuantumMemory((size_t) number_points, sizeof(*primitive_info)); if (primitive_info == (PrimitiveInfo *) NULL) { primitive=DestroyString(primitive); for ( ; n >= 0; n--) graphic_context[n]=DestroyDrawInfo(graphic_context[n]); graphic_context=(DrawInfo **) RelinquishMagickMemory(graphic_context); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } (void) memset(primitive_info,0,(size_t) number_points* sizeof(*primitive_info)); (void) memset(&mvg_info,0,sizeof(mvg_info)); mvg_info.primitive_info=(&primitive_info); mvg_info.extent=(&number_points); mvg_info.exception=exception; graphic_context[n]=CloneDrawInfo((ImageInfo *) NULL,draw_info); graphic_context[n]->viewbox=image->page; if ((image->page.width == 0) || (image->page.height == 0)) { graphic_context[n]->viewbox.width=image->columns; graphic_context[n]->viewbox.height=image->rows; } token=AcquireString(primitive); extent=strlen(token)+MagickPathExtent; defsDepth=0; symbolDepth=0; cursor=0.0; macros=GetMVGMacros(primitive); status=MagickTrue; for (q=primitive; *q != '\0'; ) { /* Interpret graphic primitive. */ if (GetNextToken(q,&q,MagickPathExtent,keyword) < 1) break; if (*keyword == '\0') break; if (*keyword == '#') { /* Comment. */ while ((*q != '\n') && (*q != '\0')) q++; continue; } p=q-strlen(keyword)-1; primitive_type=UndefinedPrimitive; current=graphic_context[n]->affine; GetAffineMatrix(&affine); *token='\0'; switch (*keyword) { case ';': break; case 'a': case 'A': { if (LocaleCompare("affine",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); affine.sx=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); affine.rx=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); affine.ry=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); affine.sy=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); affine.tx=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); affine.ty=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); break; } if (LocaleCompare("alpha",keyword) == 0) { primitive_type=AlphaPrimitive; break; } if (LocaleCompare("arc",keyword) == 0) { primitive_type=ArcPrimitive; break; } status=MagickFalse; break; } case 'b': case 'B': { if (LocaleCompare("bezier",keyword) == 0) { primitive_type=BezierPrimitive; break; } if (LocaleCompare("border-color",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); status&=QueryColorCompliance(token,AllCompliance, &graphic_context[n]->border_color,exception); break; } status=MagickFalse; break; } case 'c': case 'C': { if (LocaleCompare("class",keyword) == 0) { const char *mvg_class; (void) GetNextToken(q,&q,extent,token); if (*token == '\0') { status=MagickFalse; break; } if (LocaleCompare(token,graphic_context[n]->id) == 0) break; mvg_class=(const char *) GetValueFromSplayTree(macros,token); if ((mvg_class != (const char *) NULL) && (p > primitive)) { char *elements; ssize_t offset; /* Inject class elements in stream. */ offset=(ssize_t) (p-primitive); elements=AcquireString(primitive); elements[offset]='\0'; (void) ConcatenateString(&elements,mvg_class); (void) ConcatenateString(&elements,"\n"); (void) ConcatenateString(&elements,q); primitive=DestroyString(primitive); primitive=elements; q=primitive+offset; } break; } if (LocaleCompare("clip-path",keyword) == 0) { const char *clip_path; /* Take a node from within the MVG document, and duplicate it here. */ (void) GetNextToken(q,&q,extent,token); if (*token == '\0') { status=MagickFalse; break; } (void) CloneString(&graphic_context[n]->clip_mask,token); clip_path=(const char *) GetValueFromSplayTree(macros,token); if (clip_path != (const char *) NULL) { if (graphic_context[n]->clipping_mask != (Image *) NULL) graphic_context[n]->clipping_mask= DestroyImage(graphic_context[n]->clipping_mask); graphic_context[n]->clipping_mask=DrawClippingMask(image, graphic_context[n],token,clip_path,exception); if (graphic_context[n]->compliance != SVGCompliance) { clip_path=(const char *) GetValueFromSplayTree(macros, graphic_context[n]->clip_mask); if (clip_path != (const char *) NULL) (void) SetImageArtifact(image, graphic_context[n]->clip_mask,clip_path); status&=DrawClipPath(image,graphic_context[n], graphic_context[n]->clip_mask,exception); } } break; } if (LocaleCompare("clip-rule",keyword) == 0) { ssize_t fill_rule; (void) GetNextToken(q,&q,extent,token); fill_rule=ParseCommandOption(MagickFillRuleOptions,MagickFalse, token); if (fill_rule == -1) { status=MagickFalse; break; } graphic_context[n]->fill_rule=(FillRule) fill_rule; break; } if (LocaleCompare("clip-units",keyword) == 0) { ssize_t clip_units; (void) GetNextToken(q,&q,extent,token); clip_units=ParseCommandOption(MagickClipPathOptions,MagickFalse, token); if (clip_units == -1) { status=MagickFalse; break; } graphic_context[n]->clip_units=(ClipPathUnits) clip_units; if (clip_units == ObjectBoundingBox) { GetAffineMatrix(&current); affine.sx=draw_info->bounds.x2; affine.sy=draw_info->bounds.y2; affine.tx=draw_info->bounds.x1; affine.ty=draw_info->bounds.y1; break; } break; } if (LocaleCompare("circle",keyword) == 0) { primitive_type=CirclePrimitive; break; } if (LocaleCompare("color",keyword) == 0) { primitive_type=ColorPrimitive; break; } if (LocaleCompare("compliance",keyword) == 0) { /* MVG compliance associates a clipping mask with an image; SVG compliance associates a clipping mask with a graphics context. */ (void) GetNextToken(q,&q,extent,token); graphic_context[n]->compliance=(ComplianceType) ParseCommandOption( MagickComplianceOptions,MagickFalse,token); break; } if (LocaleCompare("currentColor",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); break; } status=MagickFalse; break; } case 'd': case 'D': { if (LocaleCompare("decorate",keyword) == 0) { ssize_t decorate; (void) GetNextToken(q,&q,extent,token); decorate=ParseCommandOption(MagickDecorateOptions,MagickFalse, token); if (decorate == -1) { status=MagickFalse; break; } graphic_context[n]->decorate=(DecorationType) decorate; break; } if (LocaleCompare("density",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); (void) CloneString(&graphic_context[n]->density,token); break; } if (LocaleCompare("direction",keyword) == 0) { ssize_t direction; (void) GetNextToken(q,&q,extent,token); direction=ParseCommandOption(MagickDirectionOptions,MagickFalse, token); if (direction == -1) status=MagickFalse; else graphic_context[n]->direction=(DirectionType) direction; break; } status=MagickFalse; break; } case 'e': case 'E': { if (LocaleCompare("ellipse",keyword) == 0) { primitive_type=EllipsePrimitive; break; } if (LocaleCompare("encoding",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); (void) CloneString(&graphic_context[n]->encoding,token); break; } status=MagickFalse; break; } case 'f': case 'F': { if (LocaleCompare("fill",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); if (graphic_context[n]->clip_path != MagickFalse) break; (void) FormatLocaleString(pattern,MagickPathExtent,"%s",token); if (GetImageArtifact(image,pattern) != (const char *) NULL) (void) DrawPatternPath(image,draw_info,token, &graphic_context[n]->fill_pattern,exception); else { status&=QueryColorCompliance(token,AllCompliance, &graphic_context[n]->fill,exception); if (graphic_context[n]->fill_alpha != OpaqueAlpha) graphic_context[n]->fill.alpha=graphic_context[n]->fill_alpha; } break; } if (LocaleCompare("fill-opacity",keyword) == 0) { double opacity; (void) GetNextToken(q,&q,extent,token); if (graphic_context[n]->clip_path != MagickFalse) break; factor=strchr(token,'%') != (char *) NULL ? 0.01 : 1.0; opacity=MagickMin(MagickMax(factor* GetDrawValue(token,&next_token),0.0),1.0); if (token == next_token) ThrowPointExpectedException(token,exception); if (graphic_context[n]->compliance == SVGCompliance) graphic_context[n]->fill_alpha*=opacity; else graphic_context[n]->fill_alpha=QuantumRange*opacity; if (graphic_context[n]->fill.alpha != TransparentAlpha) graphic_context[n]->fill.alpha=graphic_context[n]->fill_alpha; else graphic_context[n]->fill.alpha=(MagickRealType) ClampToQuantum(QuantumRange*(1.0-opacity)); break; } if (LocaleCompare("fill-rule",keyword) == 0) { ssize_t fill_rule; (void) GetNextToken(q,&q,extent,token); fill_rule=ParseCommandOption(MagickFillRuleOptions,MagickFalse, token); if (fill_rule == -1) { status=MagickFalse; break; } graphic_context[n]->fill_rule=(FillRule) fill_rule; break; } if (LocaleCompare("font",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); (void) CloneString(&graphic_context[n]->font,token); if (LocaleCompare("none",token) == 0) graphic_context[n]->font=(char *) RelinquishMagickMemory( graphic_context[n]->font); break; } if (LocaleCompare("font-family",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); (void) CloneString(&graphic_context[n]->family,token); break; } if (LocaleCompare("font-size",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); graphic_context[n]->pointsize=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); break; } if (LocaleCompare("font-stretch",keyword) == 0) { ssize_t stretch; (void) GetNextToken(q,&q,extent,token); stretch=ParseCommandOption(MagickStretchOptions,MagickFalse,token); if (stretch == -1) { status=MagickFalse; break; } graphic_context[n]->stretch=(StretchType) stretch; break; } if (LocaleCompare("font-style",keyword) == 0) { ssize_t style; (void) GetNextToken(q,&q,extent,token); style=ParseCommandOption(MagickStyleOptions,MagickFalse,token); if (style == -1) { status=MagickFalse; break; } graphic_context[n]->style=(StyleType) style; break; } if (LocaleCompare("font-weight",keyword) == 0) { ssize_t weight; (void) GetNextToken(q,&q,extent,token); weight=ParseCommandOption(MagickWeightOptions,MagickFalse,token); if (weight == -1) weight=(ssize_t) StringToUnsignedLong(token); graphic_context[n]->weight=(size_t) weight; break; } status=MagickFalse; break; } case 'g': case 'G': { if (LocaleCompare("gradient-units",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); break; } if (LocaleCompare("gravity",keyword) == 0) { ssize_t gravity; (void) GetNextToken(q,&q,extent,token); gravity=ParseCommandOption(MagickGravityOptions,MagickFalse,token); if (gravity == -1) { status=MagickFalse; break; } graphic_context[n]->gravity=(GravityType) gravity; break; } status=MagickFalse; break; } case 'i': case 'I': { if (LocaleCompare("image",keyword) == 0) { ssize_t compose; primitive_type=ImagePrimitive; (void) GetNextToken(q,&q,extent,token); compose=ParseCommandOption(MagickComposeOptions,MagickFalse,token); if (compose == -1) { status=MagickFalse; break; } graphic_context[n]->compose=(CompositeOperator) compose; break; } if (LocaleCompare("interline-spacing",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); graphic_context[n]->interline_spacing=GetDrawValue(token, &next_token); if (token == next_token) ThrowPointExpectedException(token,exception); break; } if (LocaleCompare("interword-spacing",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); graphic_context[n]->interword_spacing=GetDrawValue(token, &next_token); if (token == next_token) ThrowPointExpectedException(token,exception); break; } status=MagickFalse; break; } case 'k': case 'K': { if (LocaleCompare("kerning",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); graphic_context[n]->kerning=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); break; } status=MagickFalse; break; } case 'l': case 'L': { if (LocaleCompare("letter-spacing",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); if (IsPoint(token) == MagickFalse) break; clone_info=CloneDrawInfo((ImageInfo *) NULL,graphic_context[n]); clone_info->text=AcquireString(" "); status&=GetTypeMetrics(image,clone_info,&metrics,exception); graphic_context[n]->kerning=metrics.width* GetDrawValue(token,&next_token); clone_info=DestroyDrawInfo(clone_info); if (token == next_token) ThrowPointExpectedException(token,exception); break; } if (LocaleCompare("line",keyword) == 0) { primitive_type=LinePrimitive; break; } status=MagickFalse; break; } case 'm': case 'M': { if (LocaleCompare("mask",keyword) == 0) { const char *mask_path; /* Take a node from within the MVG document, and duplicate it here. */ (void) GetNextToken(q,&q,extent,token); mask_path=(const char *) GetValueFromSplayTree(macros,token); if (mask_path != (const char *) NULL) { if (graphic_context[n]->composite_mask != (Image *) NULL) graphic_context[n]->composite_mask= DestroyImage(graphic_context[n]->composite_mask); graphic_context[n]->composite_mask=DrawCompositeMask(image, graphic_context[n],token,mask_path,exception); if (graphic_context[n]->compliance != SVGCompliance) status=SetImageMask(image,CompositePixelMask, graphic_context[n]->composite_mask,exception); } break; } status=MagickFalse; break; } case 'o': case 'O': { if (LocaleCompare("offset",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); break; } if (LocaleCompare("opacity",keyword) == 0) { double opacity; (void) GetNextToken(q,&q,extent,token); if (graphic_context[n]->clip_path != MagickFalse) break; factor=strchr(token,'%') != (char *) NULL ? 0.01 : 1.0; opacity=MagickMin(MagickMax(factor* GetDrawValue(token,&next_token),0.0),1.0); if (token == next_token) ThrowPointExpectedException(token,exception); if (graphic_context[n]->compliance == SVGCompliance) { graphic_context[n]->fill_alpha*=opacity; graphic_context[n]->stroke_alpha*=opacity; } else { graphic_context[n]->fill_alpha=QuantumRange*opacity; graphic_context[n]->stroke_alpha=QuantumRange*opacity; } break; } status=MagickFalse; break; } case 'p': case 'P': { if (LocaleCompare("path",keyword) == 0) { primitive_type=PathPrimitive; break; } if (LocaleCompare("point",keyword) == 0) { primitive_type=PointPrimitive; break; } if (LocaleCompare("polyline",keyword) == 0) { primitive_type=PolylinePrimitive; break; } if (LocaleCompare("polygon",keyword) == 0) { primitive_type=PolygonPrimitive; break; } if (LocaleCompare("pop",keyword) == 0) { if (GetNextToken(q,&q,extent,token) < 1) break; if (LocaleCompare("class",token) == 0) break; if (LocaleCompare("clip-path",token) == 0) break; if (LocaleCompare("defs",token) == 0) { defsDepth--; graphic_context[n]->render=defsDepth > 0 ? MagickFalse : MagickTrue; break; } if (LocaleCompare("gradient",token) == 0) break; if (LocaleCompare("graphic-context",token) == 0) { if (n <= 0) { (void) ThrowMagickException(exception,GetMagickModule(), DrawError,"UnbalancedGraphicContextPushPop","`%s'",token); status=MagickFalse; n=0; break; } if ((graphic_context[n]->clip_mask != (char *) NULL) && (graphic_context[n]->compliance != SVGCompliance)) if (LocaleCompare(graphic_context[n]->clip_mask, graphic_context[n-1]->clip_mask) != 0) status=SetImageMask(image,WritePixelMask,(Image *) NULL, exception); graphic_context[n]=DestroyDrawInfo(graphic_context[n]); n--; break; } if (LocaleCompare("mask",token) == 0) break; if (LocaleCompare("pattern",token) == 0) break; if (LocaleCompare("symbol",token) == 0) { symbolDepth--; graphic_context[n]->render=symbolDepth > 0 ? MagickFalse : MagickTrue; break; } status=MagickFalse; break; } if (LocaleCompare("push",keyword) == 0) { if (GetNextToken(q,&q,extent,token) < 1) break; if (LocaleCompare("class",token) == 0) { /* Class context. */ for (p=q; *q != '\0'; ) { if (GetNextToken(q,&q,extent,token) < 1) break; if (LocaleCompare(token,"pop") != 0) continue; (void) GetNextToken(q,(const char **) NULL,extent,token); if (LocaleCompare(token,"class") != 0) continue; break; } (void) GetNextToken(q,&q,extent,token); break; } if (LocaleCompare("clip-path",token) == 0) { (void) GetNextToken(q,&q,extent,token); for (p=q; *q != '\0'; ) { if (GetNextToken(q,&q,extent,token) < 1) break; if (LocaleCompare(token,"pop") != 0) continue; (void) GetNextToken(q,(const char **) NULL,extent,token); if (LocaleCompare(token,"clip-path") != 0) continue; break; } if ((q == (char *) NULL) || (p == (char *) NULL) || ((q-4) < p)) { status=MagickFalse; break; } (void) GetNextToken(q,&q,extent,token); break; } if (LocaleCompare("defs",token) == 0) { defsDepth++; graphic_context[n]->render=defsDepth > 0 ? MagickFalse : MagickTrue; break; } if (LocaleCompare("gradient",token) == 0) { char key[2*MagickPathExtent], name[MagickPathExtent], type[MagickPathExtent]; SegmentInfo segment; (void) GetNextToken(q,&q,extent,token); (void) CopyMagickString(name,token,MagickPathExtent); (void) GetNextToken(q,&q,extent,token); (void) CopyMagickString(type,token,MagickPathExtent); (void) GetNextToken(q,&q,extent,token); segment.x1=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); segment.y1=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); segment.x2=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); segment.y2=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); if (LocaleCompare(type,"radial") == 0) { (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); } for (p=q; *q != '\0'; ) { if (GetNextToken(q,&q,extent,token) < 1) break; if (LocaleCompare(token,"pop") != 0) continue; (void) GetNextToken(q,(const char **) NULL,extent,token); if (LocaleCompare(token,"gradient") != 0) continue; break; } if ((q == (char *) NULL) || (p == (char *) NULL) || ((q-4) < p)) { status=MagickFalse; break; } (void) CopyMagickString(token,p,(size_t) (q-p-4+1)); bounds.x1=graphic_context[n]->affine.sx*segment.x1+ graphic_context[n]->affine.ry*segment.y1+ graphic_context[n]->affine.tx; bounds.y1=graphic_context[n]->affine.rx*segment.x1+ graphic_context[n]->affine.sy*segment.y1+ graphic_context[n]->affine.ty; bounds.x2=graphic_context[n]->affine.sx*segment.x2+ graphic_context[n]->affine.ry*segment.y2+ graphic_context[n]->affine.tx; bounds.y2=graphic_context[n]->affine.rx*segment.x2+ graphic_context[n]->affine.sy*segment.y2+ graphic_context[n]->affine.ty; (void) FormatLocaleString(key,MagickPathExtent,"%s",name); (void) SetImageArtifact(image,key,token); (void) FormatLocaleString(key,MagickPathExtent,"%s-type",name); (void) SetImageArtifact(image,key,type); (void) FormatLocaleString(key,MagickPathExtent,"%s-geometry", name); (void) FormatLocaleString(geometry,MagickPathExtent, "%gx%g%+.15g%+.15g", MagickMax(fabs(bounds.x2-bounds.x1+1.0),1.0), MagickMax(fabs(bounds.y2-bounds.y1+1.0),1.0), bounds.x1,bounds.y1); (void) SetImageArtifact(image,key,geometry); (void) GetNextToken(q,&q,extent,token); break; } if (LocaleCompare("graphic-context",token) == 0) { n++; graphic_context=(DrawInfo **) ResizeQuantumMemory( graphic_context,(size_t) (n+1),sizeof(*graphic_context)); if (graphic_context == (DrawInfo **) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'", image->filename); break; } graphic_context[n]=CloneDrawInfo((ImageInfo *) NULL, graphic_context[n-1]); if (*q == '"') { (void) GetNextToken(q,&q,extent,token); (void) CloneString(&graphic_context[n]->id,token); } break; } if (LocaleCompare("mask",token) == 0) { (void) GetNextToken(q,&q,extent,token); break; } if (LocaleCompare("pattern",token) == 0) { char key[2*MagickPathExtent], name[MagickPathExtent]; RectangleInfo bounds; (void) GetNextToken(q,&q,extent,token); (void) CopyMagickString(name,token,MagickPathExtent); (void) GetNextToken(q,&q,extent,token); bounds.x=CastDoubleToLong(ceil(GetDrawValue(token, &next_token)-0.5)); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); bounds.y=CastDoubleToLong(ceil(GetDrawValue(token, &next_token)-0.5)); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); bounds.width=(size_t) CastDoubleToLong(floor(GetDrawValue( token,&next_token)+0.5)); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); bounds.height=(size_t) floor(GetDrawValue(token,&next_token)+ 0.5); if (token == next_token) ThrowPointExpectedException(token,exception); for (p=q; *q != '\0'; ) { if (GetNextToken(q,&q,extent,token) < 1) break; if (LocaleCompare(token,"pop") != 0) continue; (void) GetNextToken(q,(const char **) NULL,extent,token); if (LocaleCompare(token,"pattern") != 0) continue; break; } if ((q == (char *) NULL) || (p == (char *) NULL) || ((q-4) < p)) { status=MagickFalse; break; } (void) CopyMagickString(token,p,(size_t) (q-p-4+1)); (void) FormatLocaleString(key,MagickPathExtent,"%s",name); (void) SetImageArtifact(image,key,token); (void) FormatLocaleString(key,MagickPathExtent,"%s-geometry", name); (void) FormatLocaleString(geometry,MagickPathExtent, "%.20gx%.20g%+.20g%+.20g",(double) bounds.width,(double) bounds.height,(double) bounds.x,(double) bounds.y); (void) SetImageArtifact(image,key,geometry); (void) GetNextToken(q,&q,extent,token); break; } if (LocaleCompare("symbol",token) == 0) { symbolDepth++; graphic_context[n]->render=symbolDepth > 0 ? MagickFalse : MagickTrue; break; } status=MagickFalse; break; } status=MagickFalse; break; } case 'r': case 'R': { if (LocaleCompare("rectangle",keyword) == 0) { primitive_type=RectanglePrimitive; break; } if (LocaleCompare("rotate",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); angle=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); affine.sx=cos(DegreesToRadians(fmod((double) angle,360.0))); affine.rx=sin(DegreesToRadians(fmod((double) angle,360.0))); affine.ry=(-sin(DegreesToRadians(fmod((double) angle,360.0)))); affine.sy=cos(DegreesToRadians(fmod((double) angle,360.0))); break; } if (LocaleCompare("roundRectangle",keyword) == 0) { primitive_type=RoundRectanglePrimitive; break; } status=MagickFalse; break; } case 's': case 'S': { if (LocaleCompare("scale",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); affine.sx=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); affine.sy=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); break; } if (LocaleCompare("skewX",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); angle=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); affine.ry=sin(DegreesToRadians(angle)); break; } if (LocaleCompare("skewY",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); angle=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); affine.rx=(-tan(DegreesToRadians(angle)/2.0)); break; } if (LocaleCompare("stop-color",keyword) == 0) { PixelInfo stop_color; number_stops++; if (number_stops == 1) stops=(StopInfo *) AcquireQuantumMemory(2,sizeof(*stops)); else if (number_stops > 2) stops=(StopInfo *) ResizeQuantumMemory(stops,number_stops, sizeof(*stops)); if (stops == (StopInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'", image->filename); break; } (void) GetNextToken(q,&q,extent,token); status&=QueryColorCompliance(token,AllCompliance,&stop_color, exception); stops[number_stops-1].color=stop_color; (void) GetNextToken(q,&q,extent,token); factor=strchr(token,'%') != (char *) NULL ? 0.01 : 1.0; stops[number_stops-1].offset=factor*GetDrawValue(token, &next_token); if (token == next_token) ThrowPointExpectedException(token,exception); break; } if (LocaleCompare("stroke",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); if (graphic_context[n]->clip_path != MagickFalse) break; (void) FormatLocaleString(pattern,MagickPathExtent,"%s",token); if (GetImageArtifact(image,pattern) != (const char *) NULL) (void) DrawPatternPath(image,draw_info,token, &graphic_context[n]->stroke_pattern,exception); else { status&=QueryColorCompliance(token,AllCompliance, &graphic_context[n]->stroke,exception); if (graphic_context[n]->stroke_alpha != OpaqueAlpha) graphic_context[n]->stroke.alpha= graphic_context[n]->stroke_alpha; } break; } if (LocaleCompare("stroke-antialias",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); graphic_context[n]->stroke_antialias=StringToLong(token) != 0 ? MagickTrue : MagickFalse; break; } if (LocaleCompare("stroke-dasharray",keyword) == 0) { if (graphic_context[n]->dash_pattern != (double *) NULL) graphic_context[n]->dash_pattern=(double *) RelinquishMagickMemory(graphic_context[n]->dash_pattern); if (IsPoint(q) != MagickFalse) { const char *r; r=q; (void) GetNextToken(r,&r,extent,token); if (*token == ',') (void) GetNextToken(r,&r,extent,token); for (x=0; IsPoint(token) != MagickFalse; x++) { (void) GetNextToken(r,&r,extent,token); if (*token == ',') (void) GetNextToken(r,&r,extent,token); } graphic_context[n]->dash_pattern=(double *) AcquireQuantumMemory((size_t) (2*x+2), sizeof(*graphic_context[n]->dash_pattern)); if (graphic_context[n]->dash_pattern == (double *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'", image->filename); status=MagickFalse; break; } (void) memset(graphic_context[n]->dash_pattern,0,(size_t) (2*x+2)*sizeof(*graphic_context[n]->dash_pattern)); for (j=0; j < x; j++) { (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); graphic_context[n]->dash_pattern[j]=GetDrawValue(token, &next_token); if (token == next_token) ThrowPointExpectedException(token,exception); if (graphic_context[n]->dash_pattern[j] < 0.0) status=MagickFalse; } if ((x & 0x01) != 0) for ( ; j < (2*x); j++) graphic_context[n]->dash_pattern[j]= graphic_context[n]->dash_pattern[j-x]; graphic_context[n]->dash_pattern[j]=0.0; break; } (void) GetNextToken(q,&q,extent,token); break; } if (LocaleCompare("stroke-dashoffset",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); graphic_context[n]->dash_offset=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); break; } if (LocaleCompare("stroke-linecap",keyword) == 0) { ssize_t linecap; (void) GetNextToken(q,&q,extent,token); linecap=ParseCommandOption(MagickLineCapOptions,MagickFalse,token); if (linecap == -1) { status=MagickFalse; break; } graphic_context[n]->linecap=(LineCap) linecap; break; } if (LocaleCompare("stroke-linejoin",keyword) == 0) { ssize_t linejoin; (void) GetNextToken(q,&q,extent,token); linejoin=ParseCommandOption(MagickLineJoinOptions,MagickFalse, token); if (linejoin == -1) { status=MagickFalse; break; } graphic_context[n]->linejoin=(LineJoin) linejoin; break; } if (LocaleCompare("stroke-miterlimit",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); graphic_context[n]->miterlimit=StringToUnsignedLong(token); break; } if (LocaleCompare("stroke-opacity",keyword) == 0) { double opacity; (void) GetNextToken(q,&q,extent,token); if (graphic_context[n]->clip_path != MagickFalse) break; factor=strchr(token,'%') != (char *) NULL ? 0.01 : 1.0; opacity=MagickMin(MagickMax(factor* GetDrawValue(token,&next_token),0.0),1.0); if (token == next_token) ThrowPointExpectedException(token,exception); if (graphic_context[n]->compliance == SVGCompliance) graphic_context[n]->stroke_alpha*=opacity; else graphic_context[n]->stroke_alpha=QuantumRange*opacity; if (graphic_context[n]->stroke.alpha != TransparentAlpha) graphic_context[n]->stroke.alpha=graphic_context[n]->stroke_alpha; else graphic_context[n]->stroke.alpha=(MagickRealType) ClampToQuantum(QuantumRange*(1.0-opacity)); break; } if (LocaleCompare("stroke-width",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); if (graphic_context[n]->clip_path != MagickFalse) break; graphic_context[n]->stroke_width=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); break; } status=MagickFalse; break; } case 't': case 'T': { if (LocaleCompare("text",keyword) == 0) { primitive_type=TextPrimitive; cursor=0.0; break; } if (LocaleCompare("text-align",keyword) == 0) { ssize_t align; (void) GetNextToken(q,&q,extent,token); align=ParseCommandOption(MagickAlignOptions,MagickFalse,token); if (align == -1) { status=MagickFalse; break; } graphic_context[n]->align=(AlignType) align; break; } if (LocaleCompare("text-anchor",keyword) == 0) { ssize_t align; (void) GetNextToken(q,&q,extent,token); align=ParseCommandOption(MagickAlignOptions,MagickFalse,token); if (align == -1) { status=MagickFalse; break; } graphic_context[n]->align=(AlignType) align; break; } if (LocaleCompare("text-antialias",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); graphic_context[n]->text_antialias=StringToLong(token) != 0 ? MagickTrue : MagickFalse; break; } if (LocaleCompare("text-undercolor",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); status&=QueryColorCompliance(token,AllCompliance, &graphic_context[n]->undercolor,exception); break; } if (LocaleCompare("translate",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); affine.tx=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); affine.ty=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); cursor=0.0; break; } status=MagickFalse; break; } case 'u': case 'U': { if (LocaleCompare("use",keyword) == 0) { const char *use; /* Get a macro from the MVG document, and "use" it here. */ (void) GetNextToken(q,&q,extent,token); use=(const char *) GetValueFromSplayTree(macros,token); if (use != (const char *) NULL) { clone_info=CloneDrawInfo((ImageInfo *) NULL,graphic_context[n]); (void) CloneString(&clone_info->primitive,use); status=RenderMVGContent(image,clone_info,depth+1,exception); clone_info=DestroyDrawInfo(clone_info); } break; } status=MagickFalse; break; } case 'v': case 'V': { if (LocaleCompare("viewbox",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); graphic_context[n]->viewbox.x=CastDoubleToLong(ceil( GetDrawValue(token,&next_token)-0.5)); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); graphic_context[n]->viewbox.y=CastDoubleToLong(ceil( GetDrawValue(token,&next_token)-0.5)); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); graphic_context[n]->viewbox.width=(size_t) CastDoubleToLong( floor(GetDrawValue(token,&next_token)+0.5)); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); graphic_context[n]->viewbox.height=(size_t) CastDoubleToLong( floor(GetDrawValue(token,&next_token)+0.5)); if (token == next_token) ThrowPointExpectedException(token,exception); break; } status=MagickFalse; break; } case 'w': case 'W': { if (LocaleCompare("word-spacing",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); graphic_context[n]->interword_spacing=GetDrawValue(token, &next_token); if (token == next_token) ThrowPointExpectedException(token,exception); break; } status=MagickFalse; break; } default: { status=MagickFalse; break; } } if (status == MagickFalse) break; if ((fabs(affine.sx-1.0) >= MagickEpsilon) || (fabs(affine.rx) >= MagickEpsilon) || (fabs(affine.ry) >= MagickEpsilon) || (fabs(affine.sy-1.0) >= MagickEpsilon) || (fabs(affine.tx) >= MagickEpsilon) || (fabs(affine.ty) >= MagickEpsilon)) { graphic_context[n]->affine.sx=current.sx*affine.sx+current.ry*affine.rx; graphic_context[n]->affine.rx=current.rx*affine.sx+current.sy*affine.rx; graphic_context[n]->affine.ry=current.sx*affine.ry+current.ry*affine.sy; graphic_context[n]->affine.sy=current.rx*affine.ry+current.sy*affine.sy; graphic_context[n]->affine.tx=current.sx*affine.tx+current.ry*affine.ty+ current.tx; graphic_context[n]->affine.ty=current.rx*affine.tx+current.sy*affine.ty+ current.ty; } if (primitive_type == UndefinedPrimitive) { if (*q == '\0') { if (number_stops > 1) { GradientType type; type=LinearGradient; if (draw_info->gradient.type == RadialGradient) type=RadialGradient; (void) GradientImage(image,type,PadSpread,stops,number_stops, exception); } if (number_stops > 0) stops=(StopInfo *) RelinquishMagickMemory(stops); } if ((image->debug != MagickFalse) && (q > p)) (void) LogMagickEvent(DrawEvent,GetMagickModule()," %.*s",(int) (q-p-1),p); continue; } /* Parse the primitive attributes. */ for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++) if ((primitive_info[i].primitive == TextPrimitive) || (primitive_info[i].primitive == ImagePrimitive)) if (primitive_info[i].text != (char *) NULL) primitive_info[i].text=DestroyString(primitive_info[i].text); i=0; mvg_info.offset=i; j=0; primitive_info[0].point.x=0.0; primitive_info[0].point.y=0.0; primitive_info[0].coordinates=0; primitive_info[0].method=FloodfillMethod; primitive_info[0].closed_subpath=MagickFalse; for (x=0; *q != '\0'; x++) { /* Define points. */ if (IsPoint(q) == MagickFalse) break; (void) GetNextToken(q,&q,extent,token); point.x=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); point.y=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(q,(const char **) NULL,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); primitive_info[i].primitive=primitive_type; primitive_info[i].point=point; primitive_info[i].coordinates=0; primitive_info[i].method=FloodfillMethod; primitive_info[i].closed_subpath=MagickFalse; i++; mvg_info.offset=i; if (i < (ssize_t) number_points) continue; status&=CheckPrimitiveExtent(&mvg_info,number_points); } if (status == MagickFalse) break; if ((primitive_info[j].primitive == TextPrimitive) || (primitive_info[j].primitive == ImagePrimitive)) if (primitive_info[j].text != (char *) NULL) primitive_info[j].text=DestroyString(primitive_info[j].text); primitive_info[j].primitive=primitive_type; primitive_info[j].coordinates=(size_t) x; primitive_info[j].method=FloodfillMethod; primitive_info[j].closed_subpath=MagickFalse; /* Circumscribe primitive within a circle. */ bounds.x1=primitive_info[j].point.x; bounds.y1=primitive_info[j].point.y; bounds.x2=primitive_info[j].point.x; bounds.y2=primitive_info[j].point.y; for (k=1; k < (ssize_t) primitive_info[j].coordinates; k++) { point=primitive_info[j+k].point; if (point.x < bounds.x1) bounds.x1=point.x; if (point.y < bounds.y1) bounds.y1=point.y; if (point.x > bounds.x2) bounds.x2=point.x; if (point.y > bounds.y2) bounds.y2=point.y; } /* Speculate how many points our primitive might consume. */ coordinates=(double) primitive_info[j].coordinates; switch (primitive_type) { case RectanglePrimitive: { coordinates*=5.0; break; } case RoundRectanglePrimitive: { double alpha, beta, radius; alpha=bounds.x2-bounds.x1; beta=bounds.y2-bounds.y1; radius=hypot(alpha,beta); coordinates*=5.0; coordinates+=2.0*((size_t) ceil((double) MagickPI*radius))+6.0* BezierQuantum+360.0; break; } case BezierPrimitive: { coordinates=(BezierQuantum*(double) primitive_info[j].coordinates); if (primitive_info[j].coordinates > (108*BezierQuantum)) { (void) ThrowMagickException(exception,GetMagickModule(),DrawError, "TooManyBezierCoordinates","`%s'",token); status=MagickFalse; break; } break; } case PathPrimitive: { char *s, *t; (void) GetNextToken(q,&q,extent,token); coordinates=1.0; t=token; for (s=token; *s != '\0'; s=t) { double value; value=GetDrawValue(s,&t); (void) value; if (s == t) { t++; continue; } coordinates++; } for (s=token; *s != '\0'; s++) if (strspn(s,"AaCcQqSsTt") != 0) coordinates+=(20.0*BezierQuantum)+360.0; break; } default: break; } if (status == MagickFalse) break; if (((size_t) (i+coordinates)) >= number_points) { /* Resize based on speculative points required by primitive. */ number_points+=coordinates+1; if (number_points < (size_t) coordinates) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'", image->filename); break; } mvg_info.offset=i; status&=CheckPrimitiveExtent(&mvg_info,number_points); } status&=CheckPrimitiveExtent(&mvg_info,PrimitiveExtentPad); if (status == MagickFalse) break; mvg_info.offset=j; switch (primitive_type) { case PointPrimitive: default: { if (primitive_info[j].coordinates != 1) { status=MagickFalse; break; } status&=TracePoint(primitive_info+j,primitive_info[j].point); i=(ssize_t) (j+primitive_info[j].coordinates); break; } case LinePrimitive: { double dx, dy, maximum_length; if (primitive_info[j].coordinates != 2) { status=MagickFalse; break; } dx=primitive_info[i].point.x-primitive_info[i-1].point.x; dy=primitive_info[i].point.y-primitive_info[i-1].point.y; maximum_length=hypot(dx,dy); if (maximum_length > (MaxBezierCoordinates/100.0)) ThrowPointExpectedException(keyword,exception); status&=TraceLine(primitive_info+j,primitive_info[j].point, primitive_info[j+1].point); i=(ssize_t) (j+primitive_info[j].coordinates); break; } case RectanglePrimitive: { if (primitive_info[j].coordinates != 2) { status=MagickFalse; break; } status&=TraceRectangle(primitive_info+j,primitive_info[j].point, primitive_info[j+1].point); i=(ssize_t) (j+primitive_info[j].coordinates); break; } case RoundRectanglePrimitive: { if (primitive_info[j].coordinates != 3) { status=MagickFalse; break; } if ((primitive_info[j+2].point.x < 0.0) || (primitive_info[j+2].point.y < 0.0)) { status=MagickFalse; break; } if ((primitive_info[j+1].point.x-primitive_info[j].point.x) < 0.0) { status=MagickFalse; break; } if ((primitive_info[j+1].point.y-primitive_info[j].point.y) < 0.0) { status=MagickFalse; break; } status&=TraceRoundRectangle(&mvg_info,primitive_info[j].point, primitive_info[j+1].point,primitive_info[j+2].point); i=(ssize_t) (j+primitive_info[j].coordinates); break; } case ArcPrimitive: { if (primitive_info[j].coordinates != 3) { status=MagickFalse; break; } status&=TraceArc(&mvg_info,primitive_info[j].point, primitive_info[j+1].point,primitive_info[j+2].point); i=(ssize_t) (j+primitive_info[j].coordinates); break; } case EllipsePrimitive: { if (primitive_info[j].coordinates != 3) { status=MagickFalse; break; } if ((primitive_info[j+1].point.x < 0.0) || (primitive_info[j+1].point.y < 0.0)) { status=MagickFalse; break; } status&=TraceEllipse(&mvg_info,primitive_info[j].point, primitive_info[j+1].point,primitive_info[j+2].point); i=(ssize_t) (j+primitive_info[j].coordinates); break; } case CirclePrimitive: { if (primitive_info[j].coordinates != 2) { status=MagickFalse; break; } status&=TraceCircle(&mvg_info,primitive_info[j].point, primitive_info[j+1].point); i=(ssize_t) (j+primitive_info[j].coordinates); break; } case PolylinePrimitive: { if (primitive_info[j].coordinates < 1) { status=MagickFalse; break; } break; } case PolygonPrimitive: { if (primitive_info[j].coordinates < 3) { status=MagickFalse; break; } primitive_info[i]=primitive_info[j]; primitive_info[i].coordinates=0; primitive_info[j].coordinates++; primitive_info[j].closed_subpath=MagickTrue; i++; break; } case BezierPrimitive: { if (primitive_info[j].coordinates < 3) { status=MagickFalse; break; } status&=TraceBezier(&mvg_info,primitive_info[j].coordinates); i=(ssize_t) (j+primitive_info[j].coordinates); break; } case PathPrimitive: { coordinates=(double) TracePath(&mvg_info,token,exception); if (coordinates < 0.0) { status=MagickFalse; break; } i=(ssize_t) (j+coordinates); break; } case AlphaPrimitive: case ColorPrimitive: { ssize_t method; if (primitive_info[j].coordinates != 1) { status=MagickFalse; break; } (void) GetNextToken(q,&q,extent,token); method=ParseCommandOption(MagickMethodOptions,MagickFalse,token); if (method == -1) { status=MagickFalse; break; } primitive_info[j].method=(PaintMethod) method; break; } case TextPrimitive: { char geometry[MagickPathExtent]; if (primitive_info[j].coordinates != 1) { status=MagickFalse; break; } if (*token != ',') (void) GetNextToken(q,&q,extent,token); (void) CloneString(&primitive_info[j].text,token); /* Compute text cursor offset. */ clone_info=CloneDrawInfo((ImageInfo *) NULL,graphic_context[n]); if ((fabs(mvg_info.point.x-primitive_info->point.x) < MagickEpsilon) && (fabs(mvg_info.point.y-primitive_info->point.y) < MagickEpsilon)) { mvg_info.point=primitive_info->point; primitive_info->point.x+=cursor; } else { mvg_info.point=primitive_info->point; cursor=0.0; } (void) FormatLocaleString(geometry,MagickPathExtent,"%+f%+f", primitive_info->point.x,primitive_info->point.y); clone_info->render=MagickFalse; clone_info->text=AcquireString(token); status&=GetTypeMetrics(image,clone_info,&metrics,exception); clone_info=DestroyDrawInfo(clone_info); cursor+=metrics.width; if (graphic_context[n]->compliance != SVGCompliance) cursor=0.0; break; } case ImagePrimitive: { if (primitive_info[j].coordinates != 2) { status=MagickFalse; break; } (void) GetNextToken(q,&q,extent,token); (void) CloneString(&primitive_info[j].text,token); break; } } mvg_info.offset=i; if (status == 0) break; primitive_info[i].primitive=UndefinedPrimitive; if ((image->debug != MagickFalse) && (q > p)) (void) LogMagickEvent(DrawEvent,GetMagickModule()," %.*s",(int) (q-p-1), p); /* Sanity check. */ status&=CheckPrimitiveExtent(&mvg_info,(size_t) ExpandAffine(&graphic_context[n]->affine)); if (status == 0) break; status&=CheckPrimitiveExtent(&mvg_info,(size_t) graphic_context[n]->stroke_width); if (status == 0) break; if (i == 0) continue; /* Transform points. */ for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++) { point=primitive_info[i].point; primitive_info[i].point.x=graphic_context[n]->affine.sx*point.x+ graphic_context[n]->affine.ry*point.y+graphic_context[n]->affine.tx; primitive_info[i].point.y=graphic_context[n]->affine.rx*point.x+ graphic_context[n]->affine.sy*point.y+graphic_context[n]->affine.ty; point=primitive_info[i].point; if (point.x < graphic_context[n]->bounds.x1) graphic_context[n]->bounds.x1=point.x; if (point.y < graphic_context[n]->bounds.y1) graphic_context[n]->bounds.y1=point.y; if (point.x > graphic_context[n]->bounds.x2) graphic_context[n]->bounds.x2=point.x; if (point.y > graphic_context[n]->bounds.y2) graphic_context[n]->bounds.y2=point.y; if (primitive_info[i].primitive == ImagePrimitive) break; if (i >= (ssize_t) number_points) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); } if (graphic_context[n]->render != MagickFalse) { if ((n != 0) && (graphic_context[n]->compliance != SVGCompliance) && (graphic_context[n]->clip_mask != (char *) NULL) && (LocaleCompare(graphic_context[n]->clip_mask, graphic_context[n-1]->clip_mask) != 0)) { const char *clip_path; clip_path=(const char *) GetValueFromSplayTree(macros, graphic_context[n]->clip_mask); if (clip_path != (const char *) NULL) (void) SetImageArtifact(image,graphic_context[n]->clip_mask, clip_path); status&=DrawClipPath(image,graphic_context[n], graphic_context[n]->clip_mask,exception); } status&=DrawPrimitive(image,graphic_context[n],primitive_info, exception); } proceed=SetImageProgress(image,RenderImageTag,q-primitive,(MagickSizeType) primitive_extent); if (proceed == MagickFalse) break; if (status == 0) break; } if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(),"end draw-image"); /* Relinquish resources. */ macros=DestroySplayTree(macros); token=DestroyString(token); if (primitive_info != (PrimitiveInfo *) NULL) { for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++) if ((primitive_info[i].primitive == TextPrimitive) || (primitive_info[i].primitive == ImagePrimitive)) if (primitive_info[i].text != (char *) NULL) primitive_info[i].text=DestroyString(primitive_info[i].text); primitive_info=(PrimitiveInfo *) RelinquishMagickMemory(primitive_info); } primitive=DestroyString(primitive); if (stops != (StopInfo *) NULL) stops=(StopInfo *) RelinquishMagickMemory(stops); for ( ; n >= 0; n--) graphic_context[n]=DestroyDrawInfo(graphic_context[n]); graphic_context=(DrawInfo **) RelinquishMagickMemory(graphic_context); if (status == MagickFalse) ThrowBinaryException(DrawError,"NonconformingDrawingPrimitiveDefinition", keyword); return(status != 0 ? MagickTrue : MagickFalse); } MagickExport MagickBooleanType DrawImage(Image *image,const DrawInfo *draw_info, ExceptionInfo *exception) { return(RenderMVGContent(image,draw_info,0,exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w P a t t e r n P a t h % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawPatternPath() draws a pattern. % % The format of the DrawPatternPath method is: % % MagickBooleanType DrawPatternPath(Image *image,const DrawInfo *draw_info, % const char *name,Image **pattern,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % % o name: the pattern name. % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType DrawPatternPath(Image *image, const DrawInfo *draw_info,const char *name,Image **pattern, ExceptionInfo *exception) { char property[MagickPathExtent]; const char *geometry, *path, *type; DrawInfo *clone_info; ImageInfo *image_info; MagickBooleanType status; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(draw_info != (const DrawInfo *) NULL); assert(name != (const char *) NULL); (void) FormatLocaleString(property,MagickPathExtent,"%s",name); path=GetImageArtifact(image,property); if (path == (const char *) NULL) return(MagickFalse); (void) FormatLocaleString(property,MagickPathExtent,"%s-geometry",name); geometry=GetImageArtifact(image,property); if (geometry == (const char *) NULL) return(MagickFalse); if ((*pattern) != (Image *) NULL) *pattern=DestroyImage(*pattern); image_info=AcquireImageInfo(); image_info->size=AcquireString(geometry); *pattern=AcquireImage(image_info,exception); image_info=DestroyImageInfo(image_info); (void) QueryColorCompliance("#00000000",AllCompliance, &(*pattern)->background_color,exception); (void) SetImageBackgroundColor(*pattern,exception); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(), "begin pattern-path %s %s",name,geometry); clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info); if (clone_info->fill_pattern != (Image *) NULL) clone_info->fill_pattern=DestroyImage(clone_info->fill_pattern); if (clone_info->stroke_pattern != (Image *) NULL) clone_info->stroke_pattern=DestroyImage(clone_info->stroke_pattern); (void) FormatLocaleString(property,MagickPathExtent,"%s-type",name); type=GetImageArtifact(image,property); if (type != (const char *) NULL) clone_info->gradient.type=(GradientType) ParseCommandOption( MagickGradientOptions,MagickFalse,type); (void) CloneString(&clone_info->primitive,path); status=RenderMVGContent(*pattern,clone_info,0,exception); clone_info=DestroyDrawInfo(clone_info); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(),"end pattern-path"); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D r a w P o l y g o n P r i m i t i v e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawPolygonPrimitive() draws a polygon on the image. % % The format of the DrawPolygonPrimitive method is: % % MagickBooleanType DrawPolygonPrimitive(Image *image, % const DrawInfo *draw_info,const PrimitiveInfo *primitive_info, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % % o primitive_info: Specifies a pointer to a PrimitiveInfo structure. % % o exception: return any errors or warnings in this structure. % */ static PolygonInfo **DestroyPolygonThreadSet(PolygonInfo **polygon_info) { ssize_t i; assert(polygon_info != (PolygonInfo **) NULL); for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++) if (polygon_info[i] != (PolygonInfo *) NULL) polygon_info[i]=DestroyPolygonInfo(polygon_info[i]); polygon_info=(PolygonInfo **) RelinquishMagickMemory(polygon_info); return(polygon_info); } static PolygonInfo **AcquirePolygonThreadSet( const PrimitiveInfo *primitive_info,ExceptionInfo *exception) { PathInfo *magick_restrict path_info; PolygonInfo **polygon_info; ssize_t i; size_t number_threads; number_threads=(size_t) GetMagickResourceLimit(ThreadResource); polygon_info=(PolygonInfo **) AcquireQuantumMemory(number_threads, sizeof(*polygon_info)); if (polygon_info == (PolygonInfo **) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",""); return((PolygonInfo **) NULL); } (void) memset(polygon_info,0,number_threads*sizeof(*polygon_info)); path_info=ConvertPrimitiveToPath(primitive_info,exception); if (path_info == (PathInfo *) NULL) return(DestroyPolygonThreadSet(polygon_info)); polygon_info[0]=ConvertPathToPolygon(path_info,exception); if (polygon_info[0] == (PolygonInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",""); return(DestroyPolygonThreadSet(polygon_info)); } for (i=1; i < (ssize_t) number_threads; i++) { EdgeInfo *edge_info; ssize_t j; polygon_info[i]=(PolygonInfo *) AcquireMagickMemory( sizeof(*polygon_info[i])); if (polygon_info[i] == (PolygonInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",""); return(DestroyPolygonThreadSet(polygon_info)); } polygon_info[i]->number_edges=0; edge_info=polygon_info[0]->edges; polygon_info[i]->edges=(EdgeInfo *) AcquireQuantumMemory( polygon_info[0]->number_edges,sizeof(*edge_info)); if (polygon_info[i]->edges == (EdgeInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",""); return(DestroyPolygonThreadSet(polygon_info)); } (void) memcpy(polygon_info[i]->edges,edge_info, polygon_info[0]->number_edges*sizeof(*edge_info)); for (j=0; j < (ssize_t) polygon_info[i]->number_edges; j++) polygon_info[i]->edges[j].points=(PointInfo *) NULL; polygon_info[i]->number_edges=polygon_info[0]->number_edges; for (j=0; j < (ssize_t) polygon_info[i]->number_edges; j++) { edge_info=polygon_info[0]->edges+j; polygon_info[i]->edges[j].points=(PointInfo *) AcquireQuantumMemory( edge_info->number_points,sizeof(*edge_info)); if (polygon_info[i]->edges[j].points == (PointInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",""); return(DestroyPolygonThreadSet(polygon_info)); } (void) memcpy(polygon_info[i]->edges[j].points,edge_info->points, edge_info->number_points*sizeof(*edge_info->points)); } } path_info=(PathInfo *) RelinquishMagickMemory(path_info); return(polygon_info); } static size_t DestroyEdge(PolygonInfo *polygon_info,const ssize_t edge) { assert(edge < (ssize_t) polygon_info->number_edges); polygon_info->edges[edge].points=(PointInfo *) RelinquishMagickMemory( polygon_info->edges[edge].points); polygon_info->number_edges--; if (edge < (ssize_t) polygon_info->number_edges) (void) memmove(polygon_info->edges+edge,polygon_info->edges+edge+1, (size_t) (polygon_info->number_edges-edge)*sizeof(*polygon_info->edges)); return(polygon_info->number_edges); } static double GetFillAlpha(PolygonInfo *polygon_info,const double mid, const MagickBooleanType fill,const FillRule fill_rule,const ssize_t x, const ssize_t y,double *stroke_alpha) { double alpha, beta, distance, subpath_alpha; PointInfo delta; const PointInfo *q; EdgeInfo *p; ssize_t i; ssize_t j, winding_number; /* Compute fill & stroke opacity for this (x,y) point. */ *stroke_alpha=0.0; subpath_alpha=0.0; p=polygon_info->edges; for (j=0; j < (ssize_t) polygon_info->number_edges; j++, p++) { if ((double) y <= (p->bounds.y1-mid-0.5)) break; if ((double) y > (p->bounds.y2+mid+0.5)) { (void) DestroyEdge(polygon_info,j); continue; } if (((double) x <= (p->bounds.x1-mid-0.5)) || ((double) x > (p->bounds.x2+mid+0.5))) continue; i=(ssize_t) MagickMax((double) p->highwater,1.0); for ( ; i < (ssize_t) p->number_points; i++) { if ((double) y <= (p->points[i-1].y-mid-0.5)) break; if ((double) y > (p->points[i].y+mid+0.5)) continue; if (p->scanline != (double) y) { p->scanline=(double) y; p->highwater=(size_t) i; } /* Compute distance between a point and an edge. */ q=p->points+i-1; delta.x=(q+1)->x-q->x; delta.y=(q+1)->y-q->y; beta=delta.x*(x-q->x)+delta.y*(y-q->y); if (beta <= 0.0) { delta.x=(double) x-q->x; delta.y=(double) y-q->y; distance=delta.x*delta.x+delta.y*delta.y; } else { alpha=delta.x*delta.x+delta.y*delta.y; if (beta >= alpha) { delta.x=(double) x-(q+1)->x; delta.y=(double) y-(q+1)->y; distance=delta.x*delta.x+delta.y*delta.y; } else { alpha=PerceptibleReciprocal(alpha); beta=delta.x*(y-q->y)-delta.y*(x-q->x)+MagickEpsilon; distance=alpha*beta*beta; } } /* Compute stroke & subpath opacity. */ beta=0.0; if (p->ghostline == MagickFalse) { alpha=mid+0.5; if ((*stroke_alpha < 1.0) && (distance <= ((alpha+0.25)*(alpha+0.25)))) { alpha=mid-0.5; if (distance <= ((alpha+0.25)*(alpha+0.25))) *stroke_alpha=1.0; else { beta=1.0; if (fabs(distance-1.0) >= MagickEpsilon) beta=sqrt((double) distance); alpha=beta-mid-0.5; if (*stroke_alpha < ((alpha-0.25)*(alpha-0.25))) *stroke_alpha=(alpha-0.25)*(alpha-0.25); } } } if ((fill == MagickFalse) || (distance > 1.0) || (subpath_alpha >= 1.0)) continue; if (distance <= 0.0) { subpath_alpha=1.0; continue; } if (distance > 1.0) continue; if (fabs(beta) < MagickEpsilon) { beta=1.0; if (fabs(distance-1.0) >= MagickEpsilon) beta=sqrt(distance); } alpha=beta-1.0; if (subpath_alpha < (alpha*alpha)) subpath_alpha=alpha*alpha; } } /* Compute fill opacity. */ if (fill == MagickFalse) return(0.0); if (subpath_alpha >= 1.0) return(1.0); /* Determine winding number. */ winding_number=0; p=polygon_info->edges; for (j=0; j < (ssize_t) polygon_info->number_edges; j++, p++) { if ((double) y <= p->bounds.y1) break; if (((double) y > p->bounds.y2) || ((double) x <= p->bounds.x1)) continue; if ((double) x > p->bounds.x2) { winding_number+=p->direction ? 1 : -1; continue; } i=(ssize_t) MagickMax((double) p->highwater,1.0); for ( ; i < (ssize_t) (p->number_points-1); i++) if ((double) y <= p->points[i].y) break; q=p->points+i-1; if ((((q+1)->x-q->x)*(y-q->y)) <= (((q+1)->y-q->y)*(x-q->x))) winding_number+=p->direction ? 1 : -1; } if (fill_rule != NonZeroRule) { if ((MagickAbsoluteValue(winding_number) & 0x01) != 0) return(1.0); } else if (MagickAbsoluteValue(winding_number) != 0) return(1.0); return(subpath_alpha); } static MagickBooleanType DrawPolygonPrimitive(Image *image, const DrawInfo *draw_info,const PrimitiveInfo *primitive_info, ExceptionInfo *exception) { CacheView *image_view; const char *artifact; MagickBooleanType fill, status; double mid; PolygonInfo **magick_restrict polygon_info; EdgeInfo *p; ssize_t i; SegmentInfo bounds; ssize_t start_y, stop_y, y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(draw_info != (DrawInfo *) NULL); assert(draw_info->signature == MagickCoreSignature); assert(primitive_info != (PrimitiveInfo *) NULL); if (primitive_info->coordinates <= 1) return(MagickTrue); /* Compute bounding box. */ polygon_info=AcquirePolygonThreadSet(primitive_info,exception); if (polygon_info == (PolygonInfo **) NULL) return(MagickFalse); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule()," begin draw-polygon"); fill=(primitive_info->method == FillToBorderMethod) || (primitive_info->method == FloodfillMethod) ? MagickTrue : MagickFalse; mid=ExpandAffine(&draw_info->affine)*draw_info->stroke_width/2.0; bounds=polygon_info[0]->edges[0].bounds; artifact=GetImageArtifact(image,"draw:render-bounding-rectangles"); if (IsStringTrue(artifact) != MagickFalse) (void) DrawBoundingRectangles(image,draw_info,polygon_info[0],exception); for (i=1; i < (ssize_t) polygon_info[0]->number_edges; i++) { p=polygon_info[0]->edges+i; if (p->bounds.x1 < bounds.x1) bounds.x1=p->bounds.x1; if (p->bounds.y1 < bounds.y1) bounds.y1=p->bounds.y1; if (p->bounds.x2 > bounds.x2) bounds.x2=p->bounds.x2; if (p->bounds.y2 > bounds.y2) bounds.y2=p->bounds.y2; } bounds.x1-=(mid+1.0); bounds.y1-=(mid+1.0); bounds.x2+=(mid+1.0); bounds.y2+=(mid+1.0); if ((bounds.x1 >= (double) image->columns) || (bounds.y1 >= (double) image->rows) || (bounds.x2 <= 0.0) || (bounds.y2 <= 0.0)) { polygon_info=DestroyPolygonThreadSet(polygon_info); return(MagickTrue); /* virtual polygon */ } bounds.x1=bounds.x1 < 0.0 ? 0.0 : bounds.x1 >= (double) image->columns-1.0 ? (double) image->columns-1.0 : bounds.x1; bounds.y1=bounds.y1 < 0.0 ? 0.0 : bounds.y1 >= (double) image->rows-1.0 ? (double) image->rows-1.0 : bounds.y1; bounds.x2=bounds.x2 < 0.0 ? 0.0 : bounds.x2 >= (double) image->columns-1.0 ? (double) image->columns-1.0 : bounds.x2; bounds.y2=bounds.y2 < 0.0 ? 0.0 : bounds.y2 >= (double) image->rows-1.0 ? (double) image->rows-1.0 : bounds.y2; status=MagickTrue; image_view=AcquireAuthenticCacheView(image,exception); if ((primitive_info->coordinates == 1) || (polygon_info[0]->number_edges == 0)) { /* Draw point. */ start_y=CastDoubleToLong(ceil(bounds.y1-0.5)); stop_y=CastDoubleToLong(floor(bounds.y2+0.5)); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,stop_y-start_y+1,1) #endif for (y=start_y; y <= stop_y; y++) { MagickBooleanType sync; PixelInfo pixel; ssize_t x; Quantum *magick_restrict q; ssize_t start_x, stop_x; if (status == MagickFalse) continue; start_x=CastDoubleToLong(ceil(bounds.x1-0.5)); stop_x=CastDoubleToLong(floor(bounds.x2+0.5)); x=start_x; q=GetCacheViewAuthenticPixels(image_view,x,y,(size_t) (stop_x-x+1),1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } GetPixelInfo(image,&pixel); for ( ; x <= stop_x; x++) { if ((x == CastDoubleToLong(ceil(primitive_info->point.x-0.5))) && (y == CastDoubleToLong(ceil(primitive_info->point.y-0.5)))) { GetFillColor(draw_info,x-start_x,y-start_y,&pixel,exception); SetPixelViaPixelInfo(image,&pixel,q); } q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); polygon_info=DestroyPolygonThreadSet(polygon_info); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(), " end draw-polygon"); return(status); } /* Draw polygon or line. */ start_y=CastDoubleToLong(ceil(bounds.y1-0.5)); stop_y=CastDoubleToLong(floor(bounds.y2+0.5)); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,stop_y-start_y+1,1) #endif for (y=start_y; y <= stop_y; y++) { const int id = GetOpenMPThreadId(); Quantum *magick_restrict q; ssize_t x; ssize_t start_x, stop_x; if (status == MagickFalse) continue; start_x=CastDoubleToLong(ceil(bounds.x1-0.5)); stop_x=CastDoubleToLong(floor(bounds.x2+0.5)); q=GetCacheViewAuthenticPixels(image_view,start_x,y,(size_t) (stop_x-start_x+ 1),1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=start_x; x <= stop_x; x++) { double fill_alpha, stroke_alpha; PixelInfo fill_color, stroke_color; /* Fill and/or stroke. */ fill_alpha=GetFillAlpha(polygon_info[id],mid,fill,draw_info->fill_rule, x,y,&stroke_alpha); if (draw_info->stroke_antialias == MagickFalse) { fill_alpha=fill_alpha > 0.5 ? 1.0 : 0.0; stroke_alpha=stroke_alpha > 0.5 ? 1.0 : 0.0; } GetFillColor(draw_info,x-start_x,y-start_y,&fill_color,exception); CompositePixelOver(image,&fill_color,fill_alpha*fill_color.alpha,q, (double) GetPixelAlpha(image,q),q); GetStrokeColor(draw_info,x-start_x,y-start_y,&stroke_color,exception); CompositePixelOver(image,&stroke_color,stroke_alpha*stroke_color.alpha,q, (double) GetPixelAlpha(image,q),q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); polygon_info=DestroyPolygonThreadSet(polygon_info); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule()," end draw-polygon"); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w P r i m i t i v e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawPrimitive() draws a primitive (line, rectangle, ellipse) on the image. % % The format of the DrawPrimitive method is: % % MagickBooleanType DrawPrimitive(Image *image,const DrawInfo *draw_info, % PrimitiveInfo *primitive_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % % o primitive_info: Specifies a pointer to a PrimitiveInfo structure. % % o exception: return any errors or warnings in this structure. % */ static void LogPrimitiveInfo(const PrimitiveInfo *primitive_info) { const char *methods[] = { "point", "replace", "floodfill", "filltoborder", "reset", "?" }; PointInfo p, point, q; ssize_t i, x; ssize_t coordinates, y; x=CastDoubleToLong(ceil(primitive_info->point.x-0.5)); y=CastDoubleToLong(ceil(primitive_info->point.y-0.5)); switch (primitive_info->primitive) { case AlphaPrimitive: { (void) LogMagickEvent(DrawEvent,GetMagickModule(), "AlphaPrimitive %.20g,%.20g %s",(double) x,(double) y, methods[primitive_info->method]); return; } case ColorPrimitive: { (void) LogMagickEvent(DrawEvent,GetMagickModule(), "ColorPrimitive %.20g,%.20g %s",(double) x,(double) y, methods[primitive_info->method]); return; } case ImagePrimitive: { (void) LogMagickEvent(DrawEvent,GetMagickModule(), "ImagePrimitive %.20g,%.20g",(double) x,(double) y); return; } case PointPrimitive: { (void) LogMagickEvent(DrawEvent,GetMagickModule(), "PointPrimitive %.20g,%.20g %s",(double) x,(double) y, methods[primitive_info->method]); return; } case TextPrimitive: { (void) LogMagickEvent(DrawEvent,GetMagickModule(), "TextPrimitive %.20g,%.20g",(double) x,(double) y); return; } default: break; } coordinates=0; p=primitive_info[0].point; q.x=(-1.0); q.y=(-1.0); for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++) { point=primitive_info[i].point; if (coordinates <= 0) { coordinates=(ssize_t) primitive_info[i].coordinates; (void) LogMagickEvent(DrawEvent,GetMagickModule(), " begin open (%.20g)",(double) coordinates); p=point; } point=primitive_info[i].point; if ((fabs(q.x-point.x) >= MagickEpsilon) || (fabs(q.y-point.y) >= MagickEpsilon)) (void) LogMagickEvent(DrawEvent,GetMagickModule(), " %.20g: %.18g,%.18g",(double) coordinates,point.x,point.y); else (void) LogMagickEvent(DrawEvent,GetMagickModule(), " %.20g: %g %g (duplicate)",(double) coordinates,point.x,point.y); q=point; coordinates--; if (coordinates > 0) continue; if ((fabs(p.x-point.x) >= MagickEpsilon) || (fabs(p.y-point.y) >= MagickEpsilon)) (void) LogMagickEvent(DrawEvent,GetMagickModule()," end last (%.20g)", (double) coordinates); else (void) LogMagickEvent(DrawEvent,GetMagickModule()," end open (%.20g)", (double) coordinates); } } MagickExport MagickBooleanType DrawPrimitive(Image *image, const DrawInfo *draw_info,const PrimitiveInfo *primitive_info, ExceptionInfo *exception) { CacheView *image_view; MagickStatusType status; ssize_t i, x; ssize_t y; if (image->debug != MagickFalse) { (void) LogMagickEvent(DrawEvent,GetMagickModule(), " begin draw-primitive"); (void) LogMagickEvent(DrawEvent,GetMagickModule(), " affine: %g,%g,%g,%g,%g,%g",draw_info->affine.sx, draw_info->affine.rx,draw_info->affine.ry,draw_info->affine.sy, draw_info->affine.tx,draw_info->affine.ty); } status=MagickTrue; if ((IsGrayColorspace(image->colorspace) != MagickFalse) && ((IsPixelInfoGray(&draw_info->fill) == MagickFalse) || (IsPixelInfoGray(&draw_info->stroke) == MagickFalse))) status&=SetImageColorspace(image,sRGBColorspace,exception); if (draw_info->compliance == SVGCompliance) { status&=SetImageMask(image,WritePixelMask,draw_info->clipping_mask, exception); status&=SetImageMask(image,CompositePixelMask,draw_info->composite_mask, exception); } x=CastDoubleToLong(ceil(primitive_info->point.x-0.5)); y=CastDoubleToLong(ceil(primitive_info->point.y-0.5)); image_view=AcquireAuthenticCacheView(image,exception); switch (primitive_info->primitive) { case AlphaPrimitive: { if (image->alpha_trait == UndefinedPixelTrait) status&=SetImageAlphaChannel(image,OpaqueAlphaChannel,exception); switch (primitive_info->method) { case PointMethod: default: { PixelInfo pixel; Quantum *q; q=GetCacheViewAuthenticPixels(image_view,x,y,1,1,exception); if (q == (Quantum *) NULL) break; GetFillColor(draw_info,x,y,&pixel,exception); SetPixelAlpha(image,ClampToQuantum(pixel.alpha),q); status&=SyncCacheViewAuthenticPixels(image_view,exception); break; } case ReplaceMethod: { PixelInfo pixel, target; status&=GetOneCacheViewVirtualPixelInfo(image_view,x,y,&target, exception); GetPixelInfo(image,&pixel); for (y=0; y < (ssize_t) image->rows; y++) { Quantum *magick_restrict q; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { GetPixelInfoPixel(image,q,&pixel); if (IsFuzzyEquivalencePixelInfo(&pixel,&target) == MagickFalse) { q+=GetPixelChannels(image); continue; } GetFillColor(draw_info,x,y,&pixel,exception); SetPixelAlpha(image,ClampToQuantum(pixel.alpha),q); q+=GetPixelChannels(image); } status&=SyncCacheViewAuthenticPixels(image_view,exception); if (status == MagickFalse) break; } break; } case FloodfillMethod: case FillToBorderMethod: { ChannelType channel_mask; PixelInfo target; status&=GetOneVirtualPixelInfo(image,TileVirtualPixelMethod,x,y, &target,exception); if (primitive_info->method == FillToBorderMethod) { target.red=(double) draw_info->border_color.red; target.green=(double) draw_info->border_color.green; target.blue=(double) draw_info->border_color.blue; } channel_mask=SetImageChannelMask(image,AlphaChannel); status&=FloodfillPaintImage(image,draw_info,&target,x,y, primitive_info->method == FloodfillMethod ? MagickFalse : MagickTrue,exception); (void) SetImageChannelMask(image,channel_mask); break; } case ResetMethod: { PixelInfo pixel; for (y=0; y < (ssize_t) image->rows; y++) { Quantum *magick_restrict q; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { GetFillColor(draw_info,x,y,&pixel,exception); SetPixelAlpha(image,ClampToQuantum(pixel.alpha),q); q+=GetPixelChannels(image); } status&=SyncCacheViewAuthenticPixels(image_view,exception); if (status == MagickFalse) break; } break; } } break; } case ColorPrimitive: { switch (primitive_info->method) { case PointMethod: default: { PixelInfo pixel; Quantum *q; q=GetCacheViewAuthenticPixels(image_view,x,y,1,1,exception); if (q == (Quantum *) NULL) break; GetPixelInfo(image,&pixel); GetFillColor(draw_info,x,y,&pixel,exception); SetPixelViaPixelInfo(image,&pixel,q); status&=SyncCacheViewAuthenticPixels(image_view,exception); break; } case ReplaceMethod: { PixelInfo pixel, target; status&=GetOneCacheViewVirtualPixelInfo(image_view,x,y,&target, exception); for (y=0; y < (ssize_t) image->rows; y++) { Quantum *magick_restrict q; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { GetPixelInfoPixel(image,q,&pixel); if (IsFuzzyEquivalencePixelInfo(&pixel,&target) == MagickFalse) { q+=GetPixelChannels(image); continue; } GetFillColor(draw_info,x,y,&pixel,exception); SetPixelViaPixelInfo(image,&pixel,q); q+=GetPixelChannels(image); } status&=SyncCacheViewAuthenticPixels(image_view,exception); if (status == MagickFalse) break; } break; } case FloodfillMethod: case FillToBorderMethod: { PixelInfo target; status&=GetOneVirtualPixelInfo(image,TileVirtualPixelMethod,x,y, &target,exception); if (primitive_info->method == FillToBorderMethod) { target.red=(double) draw_info->border_color.red; target.green=(double) draw_info->border_color.green; target.blue=(double) draw_info->border_color.blue; } status&=FloodfillPaintImage(image,draw_info,&target,x,y, primitive_info->method == FloodfillMethod ? MagickFalse : MagickTrue,exception); break; } case ResetMethod: { PixelInfo pixel; GetPixelInfo(image,&pixel); for (y=0; y < (ssize_t) image->rows; y++) { Quantum *magick_restrict q; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { GetFillColor(draw_info,x,y,&pixel,exception); SetPixelViaPixelInfo(image,&pixel,q); q+=GetPixelChannels(image); } status&=SyncCacheViewAuthenticPixels(image_view,exception); if (status == MagickFalse) break; } break; } } break; } case ImagePrimitive: { AffineMatrix affine; char composite_geometry[MagickPathExtent]; Image *composite_image, *composite_images; ImageInfo *clone_info; RectangleInfo geometry; ssize_t x1, y1; if (primitive_info->text == (char *) NULL) break; clone_info=AcquireImageInfo(); composite_images=(Image *) NULL; if (LocaleNCompare(primitive_info->text,"data:",5) == 0) composite_images=ReadInlineImage(clone_info,primitive_info->text, exception); else if (*primitive_info->text != '\0') { (void) CopyMagickString(clone_info->filename,primitive_info->text, MagickPathExtent); status&=SetImageInfo(clone_info,0,exception); if ((LocaleNCompare(clone_info->magick,"http",4) == 0) || (LocaleCompare(clone_info->magick,"mpri") == 0)) (void) CopyMagickString(clone_info->filename,primitive_info->text, MagickPathExtent); if (clone_info->size != (char *) NULL) clone_info->size=DestroyString(clone_info->size); if (clone_info->extract != (char *) NULL) clone_info->extract=DestroyString(clone_info->extract); if (*clone_info->filename != '\0') composite_images=ReadImage(clone_info,exception); } clone_info=DestroyImageInfo(clone_info); if (composite_images == (Image *) NULL) { status=MagickFalse; break; } composite_image=RemoveFirstImageFromList(&composite_images); composite_images=DestroyImageList(composite_images); (void) SetImageProgressMonitor(composite_image,(MagickProgressMonitor) NULL,(void *) NULL); x1=CastDoubleToLong(ceil(primitive_info[1].point.x-0.5)); y1=CastDoubleToLong(ceil(primitive_info[1].point.y-0.5)); if (((x1 != 0L) && (x1 != (ssize_t) composite_image->columns)) || ((y1 != 0L) && (y1 != (ssize_t) composite_image->rows))) { /* Resize image. */ (void) FormatLocaleString(composite_geometry,MagickPathExtent, "%gx%g!",primitive_info[1].point.x,primitive_info[1].point.y); composite_image->filter=image->filter; status&=TransformImage(&composite_image,(char *) NULL, composite_geometry,exception); } if (composite_image->alpha_trait == UndefinedPixelTrait) status&=SetImageAlphaChannel(composite_image,OpaqueAlphaChannel, exception); if (draw_info->alpha != OpaqueAlpha) status&=SetImageAlpha(composite_image,draw_info->alpha,exception); SetGeometry(image,&geometry); image->gravity=draw_info->gravity; geometry.x=x; geometry.y=y; (void) FormatLocaleString(composite_geometry,MagickPathExtent, "%.20gx%.20g%+.20g%+.20g",(double) composite_image->columns,(double) composite_image->rows,(double) geometry.x,(double) geometry.y); (void) ParseGravityGeometry(image,composite_geometry,&geometry,exception); affine=draw_info->affine; affine.tx=(double) geometry.x; affine.ty=(double) geometry.y; composite_image->interpolate=image->interpolate; if ((draw_info->compose == OverCompositeOp) || (draw_info->compose == SrcOverCompositeOp)) status&=DrawAffineImage(image,composite_image,&affine,exception); else status&=CompositeImage(image,composite_image,draw_info->compose, MagickTrue,geometry.x,geometry.y,exception); composite_image=DestroyImage(composite_image); break; } case PointPrimitive: { PixelInfo fill_color; Quantum *q; if ((y < 0) || (y >= (ssize_t) image->rows)) break; if ((x < 0) || (x >= (ssize_t) image->columns)) break; q=GetCacheViewAuthenticPixels(image_view,x,y,1,1,exception); if (q == (Quantum *) NULL) break; GetFillColor(draw_info,x,y,&fill_color,exception); CompositePixelOver(image,&fill_color,(double) fill_color.alpha,q,(double) GetPixelAlpha(image,q),q); status&=SyncCacheViewAuthenticPixels(image_view,exception); break; } case TextPrimitive: { char geometry[MagickPathExtent]; DrawInfo *clone_info; if (primitive_info->text == (char *) NULL) break; clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info); (void) CloneString(&clone_info->text,primitive_info->text); (void) FormatLocaleString(geometry,MagickPathExtent,"%+f%+f", primitive_info->point.x,primitive_info->point.y); (void) CloneString(&clone_info->geometry,geometry); status&=AnnotateImage(image,clone_info,exception); clone_info=DestroyDrawInfo(clone_info); break; } default: { double mid, scale; DrawInfo *clone_info; if (IsEventLogging() != MagickFalse) LogPrimitiveInfo(primitive_info); scale=ExpandAffine(&draw_info->affine); if ((draw_info->dash_pattern != (double *) NULL) && (fabs(draw_info->dash_pattern[0]) >= MagickEpsilon) && (fabs(scale*draw_info->stroke_width) >= MagickEpsilon) && (draw_info->stroke.alpha != (Quantum) TransparentAlpha)) { /* Draw dash polygon. */ clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info); clone_info->stroke_width=0.0; clone_info->stroke.alpha=(MagickRealType) TransparentAlpha; status&=DrawPolygonPrimitive(image,clone_info,primitive_info, exception); clone_info=DestroyDrawInfo(clone_info); if (status != MagickFalse) status&=DrawDashPolygon(draw_info,primitive_info,image,exception); break; } mid=ExpandAffine(&draw_info->affine)*draw_info->stroke_width/2.0; if ((mid > 1.0) && ((draw_info->stroke.alpha != (Quantum) TransparentAlpha) || (draw_info->stroke_pattern != (Image *) NULL))) { double x, y; MagickBooleanType closed_path; /* Draw strokes while respecting line cap/join attributes. */ closed_path=primitive_info[0].closed_subpath; i=(ssize_t) primitive_info[0].coordinates; x=fabs(primitive_info[i-1].point.x-primitive_info[0].point.x); y=fabs(primitive_info[i-1].point.y-primitive_info[0].point.y); if ((x < MagickEpsilon) && (y < MagickEpsilon)) closed_path=MagickTrue; if ((((draw_info->linecap == RoundCap) || (closed_path != MagickFalse)) && (draw_info->linejoin == RoundJoin)) || (primitive_info[i].primitive != UndefinedPrimitive)) { status&=DrawPolygonPrimitive(image,draw_info,primitive_info, exception); break; } clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info); clone_info->stroke_width=0.0; clone_info->stroke.alpha=(MagickRealType) TransparentAlpha; status&=DrawPolygonPrimitive(image,clone_info,primitive_info, exception); clone_info=DestroyDrawInfo(clone_info); if (status != MagickFalse) status&=DrawStrokePolygon(image,draw_info,primitive_info,exception); break; } status&=DrawPolygonPrimitive(image,draw_info,primitive_info,exception); break; } } image_view=DestroyCacheView(image_view); if (draw_info->compliance == SVGCompliance) { status&=SetImageMask(image,WritePixelMask,(Image *) NULL,exception); status&=SetImageMask(image,CompositePixelMask,(Image *) NULL,exception); } if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule()," end draw-primitive"); return(status != 0 ? MagickTrue : MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D r a w S t r o k e P o l y g o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawStrokePolygon() draws a stroked polygon (line, rectangle, ellipse) on % the image while respecting the line cap and join attributes. % % The format of the DrawStrokePolygon method is: % % MagickBooleanType DrawStrokePolygon(Image *image, % const DrawInfo *draw_info,const PrimitiveInfo *primitive_info) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % % o primitive_info: Specifies a pointer to a PrimitiveInfo structure. % % */ static MagickBooleanType DrawRoundLinecap(Image *image, const DrawInfo *draw_info,const PrimitiveInfo *primitive_info, ExceptionInfo *exception) { PrimitiveInfo linecap[5]; ssize_t i; for (i=0; i < 4; i++) linecap[i]=(*primitive_info); linecap[0].coordinates=4; linecap[1].point.x+=2.0*MagickEpsilon; linecap[2].point.x+=2.0*MagickEpsilon; linecap[2].point.y+=2.0*MagickEpsilon; linecap[3].point.y+=2.0*MagickEpsilon; linecap[4].primitive=UndefinedPrimitive; return(DrawPolygonPrimitive(image,draw_info,linecap,exception)); } static MagickBooleanType DrawStrokePolygon(Image *image, const DrawInfo *draw_info,const PrimitiveInfo *primitive_info, ExceptionInfo *exception) { DrawInfo *clone_info; MagickBooleanType closed_path; MagickStatusType status; PrimitiveInfo *stroke_polygon; const PrimitiveInfo *p, *q; /* Draw stroked polygon. */ if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(), " begin draw-stroke-polygon"); clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info); clone_info->fill=draw_info->stroke; if (clone_info->fill_pattern != (Image *) NULL) clone_info->fill_pattern=DestroyImage(clone_info->fill_pattern); if (clone_info->stroke_pattern != (Image *) NULL) clone_info->fill_pattern=CloneImage(clone_info->stroke_pattern,0,0, MagickTrue,exception); clone_info->stroke.alpha=(MagickRealType) TransparentAlpha; clone_info->stroke_width=0.0; clone_info->fill_rule=NonZeroRule; status=MagickTrue; for (p=primitive_info; p->primitive != UndefinedPrimitive; p+=p->coordinates) { if (p->coordinates == 1) continue; stroke_polygon=TraceStrokePolygon(draw_info,p,exception); if (stroke_polygon == (PrimitiveInfo *) NULL) { status=0; break; } status&=DrawPolygonPrimitive(image,clone_info,stroke_polygon,exception); stroke_polygon=(PrimitiveInfo *) RelinquishMagickMemory(stroke_polygon); if (status == 0) break; q=p+p->coordinates-1; closed_path=p->closed_subpath; if ((draw_info->linecap == RoundCap) && (closed_path == MagickFalse)) { status&=DrawRoundLinecap(image,draw_info,p,exception); status&=DrawRoundLinecap(image,draw_info,q,exception); } } clone_info=DestroyDrawInfo(clone_info); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(), " end draw-stroke-polygon"); return(status != 0 ? MagickTrue : MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t A f f i n e M a t r i x % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetAffineMatrix() returns an AffineMatrix initialized to the identity % matrix. % % The format of the GetAffineMatrix method is: % % void GetAffineMatrix(AffineMatrix *affine_matrix) % % A description of each parameter follows: % % o affine_matrix: the affine matrix. % */ MagickExport void GetAffineMatrix(AffineMatrix *affine_matrix) { (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(affine_matrix != (AffineMatrix *) NULL); (void) memset(affine_matrix,0,sizeof(*affine_matrix)); affine_matrix->sx=1.0; affine_matrix->sy=1.0; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t D r a w I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetDrawInfo() initializes draw_info to default values from image_info. % % The format of the GetDrawInfo method is: % % void GetDrawInfo(const ImageInfo *image_info,DrawInfo *draw_info) % % A description of each parameter follows: % % o image_info: the image info.. % % o draw_info: the draw info. % */ MagickExport void GetDrawInfo(const ImageInfo *image_info,DrawInfo *draw_info) { char *next_token; const char *option; ExceptionInfo *exception; ImageInfo *clone_info; /* Initialize draw attributes. */ (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(draw_info != (DrawInfo *) NULL); (void) memset(draw_info,0,sizeof(*draw_info)); clone_info=CloneImageInfo(image_info); GetAffineMatrix(&draw_info->affine); exception=AcquireExceptionInfo(); (void) QueryColorCompliance("#000F",AllCompliance,&draw_info->fill, exception); (void) QueryColorCompliance("#FFF0",AllCompliance,&draw_info->stroke, exception); draw_info->stroke_antialias=clone_info->antialias; draw_info->stroke_width=1.0; draw_info->fill_rule=EvenOddRule; draw_info->alpha=OpaqueAlpha; draw_info->fill_alpha=OpaqueAlpha; draw_info->stroke_alpha=OpaqueAlpha; draw_info->linecap=ButtCap; draw_info->linejoin=MiterJoin; draw_info->miterlimit=10; draw_info->decorate=NoDecoration; draw_info->pointsize=12.0; draw_info->undercolor.alpha=(MagickRealType) TransparentAlpha; draw_info->compose=OverCompositeOp; draw_info->render=MagickTrue; draw_info->clip_path=MagickFalse; draw_info->debug=IsEventLogging(); if (clone_info->font != (char *) NULL) draw_info->font=AcquireString(clone_info->font); if (clone_info->density != (char *) NULL) draw_info->density=AcquireString(clone_info->density); draw_info->text_antialias=clone_info->antialias; if (fabs(clone_info->pointsize) >= MagickEpsilon) draw_info->pointsize=clone_info->pointsize; draw_info->border_color=clone_info->border_color; if (clone_info->server_name != (char *) NULL) draw_info->server_name=AcquireString(clone_info->server_name); option=GetImageOption(clone_info,"direction"); if (option != (const char *) NULL) draw_info->direction=(DirectionType) ParseCommandOption( MagickDirectionOptions,MagickFalse,option); else draw_info->direction=UndefinedDirection; option=GetImageOption(clone_info,"encoding"); if (option != (const char *) NULL) (void) CloneString(&draw_info->encoding,option); option=GetImageOption(clone_info,"family"); if (option != (const char *) NULL) (void) CloneString(&draw_info->family,option); option=GetImageOption(clone_info,"fill"); if (option != (const char *) NULL) (void) QueryColorCompliance(option,AllCompliance,&draw_info->fill, exception); option=GetImageOption(clone_info,"gravity"); if (option != (const char *) NULL) draw_info->gravity=(GravityType) ParseCommandOption(MagickGravityOptions, MagickFalse,option); option=GetImageOption(clone_info,"interline-spacing"); if (option != (const char *) NULL) draw_info->interline_spacing=GetDrawValue(option,&next_token); option=GetImageOption(clone_info,"interword-spacing"); if (option != (const char *) NULL) draw_info->interword_spacing=GetDrawValue(option,&next_token); option=GetImageOption(clone_info,"kerning"); if (option != (const char *) NULL) draw_info->kerning=GetDrawValue(option,&next_token); option=GetImageOption(clone_info,"stroke"); if (option != (const char *) NULL) (void) QueryColorCompliance(option,AllCompliance,&draw_info->stroke, exception); option=GetImageOption(clone_info,"strokewidth"); if (option != (const char *) NULL) draw_info->stroke_width=GetDrawValue(option,&next_token); option=GetImageOption(clone_info,"style"); if (option != (const char *) NULL) draw_info->style=(StyleType) ParseCommandOption(MagickStyleOptions, MagickFalse,option); option=GetImageOption(clone_info,"undercolor"); if (option != (const char *) NULL) (void) QueryColorCompliance(option,AllCompliance,&draw_info->undercolor, exception); option=GetImageOption(clone_info,"weight"); if (option != (const char *) NULL) { ssize_t weight; weight=ParseCommandOption(MagickWeightOptions,MagickFalse,option); if (weight == -1) weight=(ssize_t) StringToUnsignedLong(option); draw_info->weight=(size_t) weight; } exception=DestroyExceptionInfo(exception); draw_info->signature=MagickCoreSignature; clone_info=DestroyImageInfo(clone_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + P e r m u t a t e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % Permutate() returns the permuation of the (n,k). % % The format of the Permutate method is: % % void Permutate(ssize_t n,ssize_t k) % % A description of each parameter follows: % % o n: % % o k: % % */ static inline double Permutate(const ssize_t n,const ssize_t k) { double r; ssize_t i; r=1.0; for (i=k+1; i <= n; i++) r*=i; for (i=1; i <= (n-k); i++) r/=i; return(r); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + T r a c e P r i m i t i v e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TracePrimitive is a collection of methods for generating graphic % primitives such as arcs, ellipses, paths, etc. % */ static MagickBooleanType TraceArc(MVGInfo *mvg_info,const PointInfo start, const PointInfo end,const PointInfo degrees) { PointInfo center, radius; center.x=0.5*(end.x+start.x); center.y=0.5*(end.y+start.y); radius.x=fabs(center.x-start.x); radius.y=fabs(center.y-start.y); return(TraceEllipse(mvg_info,center,radius,degrees)); } static MagickBooleanType TraceArcPath(MVGInfo *mvg_info,const PointInfo start, const PointInfo end,const PointInfo arc,const double angle, const MagickBooleanType large_arc,const MagickBooleanType sweep) { double alpha, beta, delta, factor, gamma, theta; MagickStatusType status; PointInfo center, points[3], radii; double cosine, sine; PrimitiveInfo *primitive_info; PrimitiveInfo *p; ssize_t i; size_t arc_segments; ssize_t offset; offset=mvg_info->offset; primitive_info=(*mvg_info->primitive_info)+mvg_info->offset; primitive_info->coordinates=0; if ((fabs(start.x-end.x) < MagickEpsilon) && (fabs(start.y-end.y) < MagickEpsilon)) return(TracePoint(primitive_info,end)); radii.x=fabs(arc.x); radii.y=fabs(arc.y); if ((radii.x < MagickEpsilon) || (radii.y < MagickEpsilon)) return(TraceLine(primitive_info,start,end)); cosine=cos(DegreesToRadians(fmod((double) angle,360.0))); sine=sin(DegreesToRadians(fmod((double) angle,360.0))); center.x=(double) (cosine*(end.x-start.x)/2+sine*(end.y-start.y)/2); center.y=(double) (cosine*(end.y-start.y)/2-sine*(end.x-start.x)/2); delta=(center.x*center.x)/(radii.x*radii.x)+(center.y*center.y)/ (radii.y*radii.y); if (delta < MagickEpsilon) return(TraceLine(primitive_info,start,end)); if (delta > 1.0) { radii.x*=sqrt((double) delta); radii.y*=sqrt((double) delta); } points[0].x=(double) (cosine*start.x/radii.x+sine*start.y/radii.x); points[0].y=(double) (cosine*start.y/radii.y-sine*start.x/radii.y); points[1].x=(double) (cosine*end.x/radii.x+sine*end.y/radii.x); points[1].y=(double) (cosine*end.y/radii.y-sine*end.x/radii.y); alpha=points[1].x-points[0].x; beta=points[1].y-points[0].y; if (fabs(alpha*alpha+beta*beta) < MagickEpsilon) return(TraceLine(primitive_info,start,end)); factor=PerceptibleReciprocal(alpha*alpha+beta*beta)-0.25; if (factor <= 0.0) factor=0.0; else { factor=sqrt((double) factor); if (sweep == large_arc) factor=(-factor); } center.x=(double) ((points[0].x+points[1].x)/2-factor*beta); center.y=(double) ((points[0].y+points[1].y)/2+factor*alpha); alpha=atan2(points[0].y-center.y,points[0].x-center.x); theta=atan2(points[1].y-center.y,points[1].x-center.x)-alpha; if ((theta < 0.0) && (sweep != MagickFalse)) theta+=2.0*MagickPI; else if ((theta > 0.0) && (sweep == MagickFalse)) theta-=2.0*MagickPI; arc_segments=(size_t) CastDoubleToLong(ceil(fabs((double) (theta/(0.5* MagickPI+MagickEpsilon))))); status=MagickTrue; p=primitive_info; for (i=0; i < (ssize_t) arc_segments; i++) { beta=0.5*((alpha+(i+1)*theta/arc_segments)-(alpha+i*theta/arc_segments)); gamma=(8.0/3.0)*sin(fmod((double) (0.5*beta),DegreesToRadians(360.0)))* sin(fmod((double) (0.5*beta),DegreesToRadians(360.0)))/ sin(fmod((double) beta,DegreesToRadians(360.0))); points[0].x=(double) (center.x+cos(fmod((double) (alpha+(double) i*theta/ arc_segments),DegreesToRadians(360.0)))-gamma*sin(fmod((double) (alpha+ (double) i*theta/arc_segments),DegreesToRadians(360.0)))); points[0].y=(double) (center.y+sin(fmod((double) (alpha+(double) i*theta/ arc_segments),DegreesToRadians(360.0)))+gamma*cos(fmod((double) (alpha+ (double) i*theta/arc_segments),DegreesToRadians(360.0)))); points[2].x=(double) (center.x+cos(fmod((double) (alpha+(double) (i+1)* theta/arc_segments),DegreesToRadians(360.0)))); points[2].y=(double) (center.y+sin(fmod((double) (alpha+(double) (i+1)* theta/arc_segments),DegreesToRadians(360.0)))); points[1].x=(double) (points[2].x+gamma*sin(fmod((double) (alpha+(double) (i+1)*theta/arc_segments),DegreesToRadians(360.0)))); points[1].y=(double) (points[2].y-gamma*cos(fmod((double) (alpha+(double) (i+1)*theta/arc_segments),DegreesToRadians(360.0)))); p->point.x=(p == primitive_info) ? start.x : (p-1)->point.x; p->point.y=(p == primitive_info) ? start.y : (p-1)->point.y; (p+1)->point.x=(double) (cosine*radii.x*points[0].x-sine*radii.y* points[0].y); (p+1)->point.y=(double) (sine*radii.x*points[0].x+cosine*radii.y* points[0].y); (p+2)->point.x=(double) (cosine*radii.x*points[1].x-sine*radii.y* points[1].y); (p+2)->point.y=(double) (sine*radii.x*points[1].x+cosine*radii.y* points[1].y); (p+3)->point.x=(double) (cosine*radii.x*points[2].x-sine*radii.y* points[2].y); (p+3)->point.y=(double) (sine*radii.x*points[2].x+cosine*radii.y* points[2].y); if (i == (ssize_t) (arc_segments-1)) (p+3)->point=end; status&=TraceBezier(mvg_info,4); if (status == 0) break; p=(*mvg_info->primitive_info)+mvg_info->offset; mvg_info->offset+=p->coordinates; p+=p->coordinates; } if (status == 0) return(MagickFalse); mvg_info->offset=offset; primitive_info=(*mvg_info->primitive_info)+mvg_info->offset; primitive_info->coordinates=(size_t) (p-primitive_info); primitive_info->closed_subpath=MagickFalse; for (i=0; i < (ssize_t) primitive_info->coordinates; i++) { p->primitive=primitive_info->primitive; p--; } return(MagickTrue); } static MagickBooleanType TraceBezier(MVGInfo *mvg_info, const size_t number_coordinates) { double alpha, *coefficients, weight; PointInfo end, point, *points; PrimitiveInfo *primitive_info; PrimitiveInfo *p; ssize_t i, j; size_t control_points, quantum; /* Allocate coefficients. */ primitive_info=(*mvg_info->primitive_info)+mvg_info->offset; quantum=number_coordinates; for (i=0; i < (ssize_t) number_coordinates; i++) { for (j=i+1; j < (ssize_t) number_coordinates; j++) { alpha=fabs(primitive_info[j].point.x-primitive_info[i].point.x); if (alpha > (double) SSIZE_MAX) { (void) ThrowMagickException(mvg_info->exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",""); return(MagickFalse); } if (alpha > (double) quantum) quantum=(size_t) alpha; alpha=fabs(primitive_info[j].point.y-primitive_info[i].point.y); if (alpha > (double) SSIZE_MAX) { (void) ThrowMagickException(mvg_info->exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",""); return(MagickFalse); } if (alpha > (double) quantum) quantum=(size_t) alpha; } } primitive_info=(*mvg_info->primitive_info)+mvg_info->offset; quantum=MagickMin(quantum/number_coordinates,BezierQuantum); coefficients=(double *) AcquireQuantumMemory(number_coordinates, sizeof(*coefficients)); points=(PointInfo *) AcquireQuantumMemory(quantum,number_coordinates* sizeof(*points)); if ((coefficients == (double *) NULL) || (points == (PointInfo *) NULL)) { if (points != (PointInfo *) NULL) points=(PointInfo *) RelinquishMagickMemory(points); if (coefficients != (double *) NULL) coefficients=(double *) RelinquishMagickMemory(coefficients); (void) ThrowMagickException(mvg_info->exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",""); return(MagickFalse); } control_points=quantum*number_coordinates; if (CheckPrimitiveExtent(mvg_info,control_points+1) == MagickFalse) { points=(PointInfo *) RelinquishMagickMemory(points); coefficients=(double *) RelinquishMagickMemory(coefficients); return(MagickFalse); } primitive_info=(*mvg_info->primitive_info)+mvg_info->offset; /* Compute bezier points. */ end=primitive_info[number_coordinates-1].point; for (i=0; i < (ssize_t) number_coordinates; i++) coefficients[i]=Permutate((ssize_t) number_coordinates-1,i); weight=0.0; for (i=0; i < (ssize_t) control_points; i++) { p=primitive_info; point.x=0.0; point.y=0.0; alpha=pow((double) (1.0-weight),(double) number_coordinates-1.0); for (j=0; j < (ssize_t) number_coordinates; j++) { point.x+=alpha*coefficients[j]*p->point.x; point.y+=alpha*coefficients[j]*p->point.y; alpha*=weight/(1.0-weight); p++; } points[i]=point; weight+=1.0/control_points; } /* Bezier curves are just short segmented polys. */ p=primitive_info; for (i=0; i < (ssize_t) control_points; i++) { if (TracePoint(p,points[i]) == MagickFalse) { points=(PointInfo *) RelinquishMagickMemory(points); coefficients=(double *) RelinquishMagickMemory(coefficients); return(MagickFalse); } p+=p->coordinates; } if (TracePoint(p,end) == MagickFalse) { points=(PointInfo *) RelinquishMagickMemory(points); coefficients=(double *) RelinquishMagickMemory(coefficients); return(MagickFalse); } p+=p->coordinates; primitive_info->coordinates=(size_t) (p-primitive_info); primitive_info->closed_subpath=MagickFalse; for (i=0; i < (ssize_t) primitive_info->coordinates; i++) { p->primitive=primitive_info->primitive; p--; } points=(PointInfo *) RelinquishMagickMemory(points); coefficients=(double *) RelinquishMagickMemory(coefficients); return(MagickTrue); } static MagickBooleanType TraceCircle(MVGInfo *mvg_info,const PointInfo start, const PointInfo end) { double alpha, beta, radius; PointInfo offset, degrees; alpha=end.x-start.x; beta=end.y-start.y; radius=hypot((double) alpha,(double) beta); offset.x=(double) radius; offset.y=(double) radius; degrees.x=0.0; degrees.y=360.0; return(TraceEllipse(mvg_info,start,offset,degrees)); } static MagickBooleanType TraceEllipse(MVGInfo *mvg_info,const PointInfo center, const PointInfo radii,const PointInfo arc) { double coordinates, delta, step, x, y; PointInfo angle, point; PrimitiveInfo *primitive_info; PrimitiveInfo *p; ssize_t i; /* Ellipses are just short segmented polys. */ primitive_info=(*mvg_info->primitive_info)+mvg_info->offset; primitive_info->coordinates=0; if ((fabs(radii.x) < MagickEpsilon) || (fabs(radii.y) < MagickEpsilon)) return(MagickTrue); delta=2.0*PerceptibleReciprocal(MagickMax(radii.x,radii.y)); step=MagickPI/8.0; if ((delta >= 0.0) && (delta < (MagickPI/8.0))) step=MagickPI/4.0/(MagickPI*PerceptibleReciprocal(delta)/2.0); angle.x=DegreesToRadians(arc.x); y=arc.y; while (y < arc.x) y+=360.0; angle.y=DegreesToRadians(y); coordinates=ceil((angle.y-angle.x)/step+1.0); if (coordinates > (108.0*BezierQuantum)) { (void) ThrowMagickException(mvg_info->exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",""); return(MagickFalse); } if (CheckPrimitiveExtent(mvg_info,(size_t) coordinates) == MagickFalse) return(MagickFalse); primitive_info=(*mvg_info->primitive_info)+mvg_info->offset; for (p=primitive_info; angle.x < angle.y; angle.x+=step) { point.x=cos(fmod(angle.x,DegreesToRadians(360.0)))*radii.x+center.x; point.y=sin(fmod(angle.x,DegreesToRadians(360.0)))*radii.y+center.y; if (TracePoint(p,point) == MagickFalse) return(MagickFalse); p+=p->coordinates; } point.x=cos(fmod(angle.y,DegreesToRadians(360.0)))*radii.x+center.x; point.y=sin(fmod(angle.y,DegreesToRadians(360.0)))*radii.y+center.y; if (TracePoint(p,point) == MagickFalse) return(MagickFalse); p+=p->coordinates; primitive_info->coordinates=(size_t) (p-primitive_info); primitive_info->closed_subpath=MagickFalse; x=fabs(primitive_info[0].point.x- primitive_info[primitive_info->coordinates-1].point.x); y=fabs(primitive_info[0].point.y- primitive_info[primitive_info->coordinates-1].point.y); if ((x < MagickEpsilon) && (y < MagickEpsilon)) primitive_info->closed_subpath=MagickTrue; for (i=0; i < (ssize_t) primitive_info->coordinates; i++) { p->primitive=primitive_info->primitive; p--; } return(MagickTrue); } static MagickBooleanType TraceLine(PrimitiveInfo *primitive_info, const PointInfo start,const PointInfo end) { if (TracePoint(primitive_info,start) == MagickFalse) return(MagickFalse); if ((fabs(start.x-end.x) < MagickEpsilon) && (fabs(start.y-end.y) < MagickEpsilon)) { primitive_info->primitive=PointPrimitive; primitive_info->coordinates=1; return(MagickTrue); } if (TracePoint(primitive_info+1,end) == MagickFalse) return(MagickFalse); (primitive_info+1)->primitive=primitive_info->primitive; primitive_info->coordinates=2; primitive_info->closed_subpath=MagickFalse; return(MagickTrue); } static ssize_t TracePath(MVGInfo *mvg_info,const char *path, ExceptionInfo *exception) { char *next_token, token[MagickPathExtent]; const char *p; double x, y; int attribute, last_attribute; MagickBooleanType status; PointInfo end = {0.0, 0.0}, points[4] = { {0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0} }, point = {0.0, 0.0}, start = {0.0, 0.0}; PrimitiveInfo *primitive_info; PrimitiveType primitive_type; PrimitiveInfo *q; ssize_t i; size_t number_coordinates, z_count; ssize_t subpath_offset; subpath_offset=mvg_info->offset; primitive_info=(*mvg_info->primitive_info)+mvg_info->offset; status=MagickTrue; attribute=0; number_coordinates=0; z_count=0; primitive_type=primitive_info->primitive; q=primitive_info; for (p=path; *p != '\0'; ) { if (status == MagickFalse) break; while (isspace((int) ((unsigned char) *p)) != 0) p++; if (*p == '\0') break; last_attribute=attribute; attribute=(int) (*p++); switch (attribute) { case 'a': case 'A': { double angle = 0.0; MagickBooleanType large_arc = MagickFalse, sweep = MagickFalse; PointInfo arc = {0.0, 0.0}; /* Elliptical arc. */ do { (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); arc.x=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); arc.y=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); angle=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); large_arc=StringToLong(token) != 0 ? MagickTrue : MagickFalse; (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); sweep=StringToLong(token) != 0 ? MagickTrue : MagickFalse; if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); x=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); y=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); end.x=(double) (attribute == (int) 'A' ? x : point.x+x); end.y=(double) (attribute == (int) 'A' ? y : point.y+y); if (TraceArcPath(mvg_info,point,end,arc,angle,large_arc,sweep) == MagickFalse) return(-1); q=(*mvg_info->primitive_info)+mvg_info->offset; mvg_info->offset+=q->coordinates; q+=q->coordinates; point=end; while (isspace((int) ((unsigned char) *p)) != 0) p++; if (*p == ',') p++; } while (IsPoint(p) != MagickFalse); break; } case 'c': case 'C': { /* Cubic Bézier curve. */ do { points[0]=point; for (i=1; i < 4; i++) { (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); x=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); y=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); end.x=(double) (attribute == (int) 'C' ? x : point.x+x); end.y=(double) (attribute == (int) 'C' ? y : point.y+y); points[i]=end; } for (i=0; i < 4; i++) (q+i)->point=points[i]; if (TraceBezier(mvg_info,4) == MagickFalse) return(-1); q=(*mvg_info->primitive_info)+mvg_info->offset; mvg_info->offset+=q->coordinates; q+=q->coordinates; point=end; while (isspace((int) ((unsigned char) *p)) != 0) p++; if (*p == ',') p++; } while (IsPoint(p) != MagickFalse); break; } case 'H': case 'h': { do { (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); x=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); point.x=(double) (attribute == (int) 'H' ? x: point.x+x); if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse) return(-1); q=(*mvg_info->primitive_info)+mvg_info->offset; if (TracePoint(q,point) == MagickFalse) return(-1); mvg_info->offset+=q->coordinates; q+=q->coordinates; while (isspace((int) ((unsigned char) *p)) != 0) p++; if (*p == ',') p++; } while (IsPoint(p) != MagickFalse); break; } case 'l': case 'L': { /* Line to. */ do { (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); x=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); y=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); point.x=(double) (attribute == (int) 'L' ? x : point.x+x); point.y=(double) (attribute == (int) 'L' ? y : point.y+y); if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse) return(-1); q=(*mvg_info->primitive_info)+mvg_info->offset; if (TracePoint(q,point) == MagickFalse) return(-1); mvg_info->offset+=q->coordinates; q+=q->coordinates; while (isspace((int) ((unsigned char) *p)) != 0) p++; if (*p == ',') p++; } while (IsPoint(p) != MagickFalse); break; } case 'M': case 'm': { /* Move to. */ if (mvg_info->offset != subpath_offset) { primitive_info=(*mvg_info->primitive_info)+subpath_offset; primitive_info->coordinates=(size_t) (q-primitive_info); number_coordinates+=primitive_info->coordinates; primitive_info=q; subpath_offset=mvg_info->offset; } i=0; do { (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); x=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); y=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); point.x=(double) (attribute == (int) 'M' ? x : point.x+x); point.y=(double) (attribute == (int) 'M' ? y : point.y+y); if (i == 0) start=point; i++; if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse) return(-1); q=(*mvg_info->primitive_info)+mvg_info->offset; if (TracePoint(q,point) == MagickFalse) return(-1); mvg_info->offset+=q->coordinates; q+=q->coordinates; while (isspace((int) ((unsigned char) *p)) != 0) p++; if (*p == ',') p++; } while (IsPoint(p) != MagickFalse); break; } case 'q': case 'Q': { /* Quadratic Bézier curve. */ do { points[0]=point; for (i=1; i < 3; i++) { (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); x=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); y=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); if (*p == ',') p++; end.x=(double) (attribute == (int) 'Q' ? x : point.x+x); end.y=(double) (attribute == (int) 'Q' ? y : point.y+y); points[i]=end; } for (i=0; i < 3; i++) (q+i)->point=points[i]; if (TraceBezier(mvg_info,3) == MagickFalse) return(-1); q=(*mvg_info->primitive_info)+mvg_info->offset; mvg_info->offset+=q->coordinates; q+=q->coordinates; point=end; while (isspace((int) ((unsigned char) *p)) != 0) p++; if (*p == ',') p++; } while (IsPoint(p) != MagickFalse); break; } case 's': case 'S': { /* Cubic Bézier curve. */ do { points[0]=points[3]; points[1].x=2.0*points[3].x-points[2].x; points[1].y=2.0*points[3].y-points[2].y; for (i=2; i < 4; i++) { (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); x=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); y=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); if (*p == ',') p++; end.x=(double) (attribute == (int) 'S' ? x : point.x+x); end.y=(double) (attribute == (int) 'S' ? y : point.y+y); points[i]=end; } if (strchr("CcSs",last_attribute) == (char *) NULL) { points[0]=point; points[1]=point; } for (i=0; i < 4; i++) (q+i)->point=points[i]; if (TraceBezier(mvg_info,4) == MagickFalse) return(-1); q=(*mvg_info->primitive_info)+mvg_info->offset; mvg_info->offset+=q->coordinates; q+=q->coordinates; point=end; last_attribute=attribute; while (isspace((int) ((unsigned char) *p)) != 0) p++; if (*p == ',') p++; } while (IsPoint(p) != MagickFalse); break; } case 't': case 'T': { /* Quadratic Bézier curve. */ do { points[0]=points[2]; points[1].x=2.0*points[2].x-points[1].x; points[1].y=2.0*points[2].y-points[1].y; for (i=2; i < 3; i++) { (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); x=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); y=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); end.x=(double) (attribute == (int) 'T' ? x : point.x+x); end.y=(double) (attribute == (int) 'T' ? y : point.y+y); points[i]=end; } if (status == MagickFalse) break; if (strchr("QqTt",last_attribute) == (char *) NULL) { points[0]=point; points[1]=point; } for (i=0; i < 3; i++) (q+i)->point=points[i]; if (TraceBezier(mvg_info,3) == MagickFalse) return(-1); q=(*mvg_info->primitive_info)+mvg_info->offset; mvg_info->offset+=q->coordinates; q+=q->coordinates; point=end; last_attribute=attribute; while (isspace((int) ((unsigned char) *p)) != 0) p++; if (*p == ',') p++; } while (IsPoint(p) != MagickFalse); break; } case 'v': case 'V': { /* Line to. */ do { (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); y=GetDrawValue(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); point.y=(double) (attribute == (int) 'V' ? y : point.y+y); if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse) return(-1); q=(*mvg_info->primitive_info)+mvg_info->offset; if (TracePoint(q,point) == MagickFalse) return(-1); mvg_info->offset+=q->coordinates; q+=q->coordinates; while (isspace((int) ((unsigned char) *p)) != 0) p++; if (*p == ',') p++; } while (IsPoint(p) != MagickFalse); break; } case 'z': case 'Z': { /* Close path. */ point=start; if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse) return(-1); q=(*mvg_info->primitive_info)+mvg_info->offset; if (TracePoint(q,point) == MagickFalse) return(-1); mvg_info->offset+=q->coordinates; q+=q->coordinates; primitive_info=(*mvg_info->primitive_info)+subpath_offset; primitive_info->coordinates=(size_t) (q-primitive_info); primitive_info->closed_subpath=MagickTrue; number_coordinates+=primitive_info->coordinates; primitive_info=q; subpath_offset=mvg_info->offset; z_count++; break; } default: { ThrowPointExpectedException(token,exception); break; } } } if (status == MagickFalse) return(-1); primitive_info=(*mvg_info->primitive_info)+subpath_offset; primitive_info->coordinates=(size_t) (q-primitive_info); number_coordinates+=primitive_info->coordinates; for (i=0; i < (ssize_t) number_coordinates; i++) { q--; q->primitive=primitive_type; if (z_count > 1) q->method=FillToBorderMethod; } q=primitive_info; return((ssize_t) number_coordinates); } static MagickBooleanType TraceRectangle(PrimitiveInfo *primitive_info, const PointInfo start,const PointInfo end) { PointInfo point; PrimitiveInfo *p; ssize_t i; p=primitive_info; if (TracePoint(p,start) == MagickFalse) return(MagickFalse); p+=p->coordinates; point.x=start.x; point.y=end.y; if (TracePoint(p,point) == MagickFalse) return(MagickFalse); p+=p->coordinates; if (TracePoint(p,end) == MagickFalse) return(MagickFalse); p+=p->coordinates; point.x=end.x; point.y=start.y; if (TracePoint(p,point) == MagickFalse) return(MagickFalse); p+=p->coordinates; if (TracePoint(p,start) == MagickFalse) return(MagickFalse); p+=p->coordinates; primitive_info->coordinates=(size_t) (p-primitive_info); primitive_info->closed_subpath=MagickTrue; for (i=0; i < (ssize_t) primitive_info->coordinates; i++) { p->primitive=primitive_info->primitive; p--; } return(MagickTrue); } static MagickBooleanType TraceRoundRectangle(MVGInfo *mvg_info, const PointInfo start,const PointInfo end,PointInfo arc) { PointInfo degrees, point, segment; PrimitiveInfo *primitive_info; PrimitiveInfo *p; ssize_t i; ssize_t offset; offset=mvg_info->offset; segment.x=fabs(end.x-start.x); segment.y=fabs(end.y-start.y); if ((segment.x < MagickEpsilon) || (segment.y < MagickEpsilon)) { (*mvg_info->primitive_info+mvg_info->offset)->coordinates=0; return(MagickTrue); } if (arc.x > (0.5*segment.x)) arc.x=0.5*segment.x; if (arc.y > (0.5*segment.y)) arc.y=0.5*segment.y; point.x=start.x+segment.x-arc.x; point.y=start.y+arc.y; degrees.x=270.0; degrees.y=360.0; if (TraceEllipse(mvg_info,point,arc,degrees) == MagickFalse) return(MagickFalse); p=(*mvg_info->primitive_info)+mvg_info->offset; mvg_info->offset+=p->coordinates; point.x=start.x+segment.x-arc.x; point.y=start.y+segment.y-arc.y; degrees.x=0.0; degrees.y=90.0; if (TraceEllipse(mvg_info,point,arc,degrees) == MagickFalse) return(MagickFalse); p=(*mvg_info->primitive_info)+mvg_info->offset; mvg_info->offset+=p->coordinates; point.x=start.x+arc.x; point.y=start.y+segment.y-arc.y; degrees.x=90.0; degrees.y=180.0; if (TraceEllipse(mvg_info,point,arc,degrees) == MagickFalse) return(MagickFalse); p=(*mvg_info->primitive_info)+mvg_info->offset; mvg_info->offset+=p->coordinates; point.x=start.x+arc.x; point.y=start.y+arc.y; degrees.x=180.0; degrees.y=270.0; if (TraceEllipse(mvg_info,point,arc,degrees) == MagickFalse) return(MagickFalse); p=(*mvg_info->primitive_info)+mvg_info->offset; mvg_info->offset+=p->coordinates; if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse) return(MagickFalse); p=(*mvg_info->primitive_info)+mvg_info->offset; if (TracePoint(p,(*mvg_info->primitive_info+offset)->point) == MagickFalse) return(MagickFalse); p+=p->coordinates; mvg_info->offset=offset; primitive_info=(*mvg_info->primitive_info)+offset; primitive_info->coordinates=(size_t) (p-primitive_info); primitive_info->closed_subpath=MagickTrue; for (i=0; i < (ssize_t) primitive_info->coordinates; i++) { p->primitive=primitive_info->primitive; p--; } return(MagickTrue); } static MagickBooleanType TraceSquareLinecap(PrimitiveInfo *primitive_info, const size_t number_vertices,const double offset) { double distance; double dx, dy; ssize_t i; ssize_t j; dx=0.0; dy=0.0; for (i=1; i < (ssize_t) number_vertices; i++) { dx=primitive_info[0].point.x-primitive_info[i].point.x; dy=primitive_info[0].point.y-primitive_info[i].point.y; if ((fabs((double) dx) >= MagickEpsilon) || (fabs((double) dy) >= MagickEpsilon)) break; } if (i == (ssize_t) number_vertices) i=(ssize_t) number_vertices-1L; distance=hypot((double) dx,(double) dy); primitive_info[0].point.x=(double) (primitive_info[i].point.x+ dx*(distance+offset)/distance); primitive_info[0].point.y=(double) (primitive_info[i].point.y+ dy*(distance+offset)/distance); for (j=(ssize_t) number_vertices-2; j >= 0; j--) { dx=primitive_info[number_vertices-1].point.x-primitive_info[j].point.x; dy=primitive_info[number_vertices-1].point.y-primitive_info[j].point.y; if ((fabs((double) dx) >= MagickEpsilon) || (fabs((double) dy) >= MagickEpsilon)) break; } distance=hypot((double) dx,(double) dy); primitive_info[number_vertices-1].point.x=(double) (primitive_info[j].point.x+ dx*(distance+offset)/distance); primitive_info[number_vertices-1].point.y=(double) (primitive_info[j].point.y+ dy*(distance+offset)/distance); return(MagickTrue); } static PrimitiveInfo *TraceStrokePolygon(const DrawInfo *draw_info, const PrimitiveInfo *primitive_info,ExceptionInfo *exception) { #define MaxStrokePad (6*BezierQuantum+360) #define CheckPathExtent(pad_p,pad_q) \ { \ if ((pad_p) > MaxBezierCoordinates) \ stroke_p=(PointInfo *) RelinquishMagickMemory(stroke_p); \ else \ if ((ssize_t) (p+(pad_p)) >= (ssize_t) extent_p) \ { \ if (~extent_p < (pad_p)) \ stroke_p=(PointInfo *) RelinquishMagickMemory(stroke_p); \ else \ { \ extent_p+=(pad_p); \ stroke_p=(PointInfo *) ResizeQuantumMemory(stroke_p,extent_p+ \ MaxStrokePad,sizeof(*stroke_p)); \ } \ } \ if ((pad_q) > MaxBezierCoordinates) \ stroke_q=(PointInfo *) RelinquishMagickMemory(stroke_q); \ else \ if ((ssize_t) (q+(pad_q)) >= (ssize_t) extent_q) \ { \ if (~extent_q < (pad_q)) \ stroke_q=(PointInfo *) RelinquishMagickMemory(stroke_q); \ else \ { \ extent_q+=(pad_q); \ stroke_q=(PointInfo *) ResizeQuantumMemory(stroke_q,extent_q+ \ MaxStrokePad,sizeof(*stroke_q)); \ } \ } \ if ((stroke_p == (PointInfo *) NULL) || (stroke_q == (PointInfo *) NULL)) \ { \ if (stroke_p != (PointInfo *) NULL) \ stroke_p=(PointInfo *) RelinquishMagickMemory(stroke_p); \ if (stroke_q != (PointInfo *) NULL) \ stroke_q=(PointInfo *) RelinquishMagickMemory(stroke_q); \ polygon_primitive=(PrimitiveInfo *) \ RelinquishMagickMemory(polygon_primitive); \ (void) ThrowMagickException(exception,GetMagickModule(), \ ResourceLimitError,"MemoryAllocationFailed","`%s'",""); \ return((PrimitiveInfo *) NULL); \ } \ } typedef struct _StrokeSegment { double p, q; } StrokeSegment; double delta_theta, dot_product, mid, miterlimit; MagickBooleanType closed_path; PointInfo box_p[5], box_q[5], center, offset, *stroke_p, *stroke_q; PrimitiveInfo *polygon_primitive, *stroke_polygon; ssize_t i; size_t arc_segments, extent_p, extent_q, number_vertices; ssize_t j, n, p, q; StrokeSegment dx = {0.0, 0.0}, dy = {0.0, 0.0}, inverse_slope = {0.0, 0.0}, slope = {0.0, 0.0}, theta = {0.0, 0.0}; /* Allocate paths. */ number_vertices=primitive_info->coordinates; polygon_primitive=(PrimitiveInfo *) AcquireQuantumMemory((size_t) number_vertices+2UL,sizeof(*polygon_primitive)); if (polygon_primitive == (PrimitiveInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",""); return((PrimitiveInfo *) NULL); } (void) memcpy(polygon_primitive,primitive_info,(size_t) number_vertices* sizeof(*polygon_primitive)); offset.x=primitive_info[number_vertices-1].point.x-primitive_info[0].point.x; offset.y=primitive_info[number_vertices-1].point.y-primitive_info[0].point.y; closed_path=(fabs(offset.x) < MagickEpsilon) && (fabs(offset.y) < MagickEpsilon) ? MagickTrue : MagickFalse; if (((draw_info->linejoin == RoundJoin) || (draw_info->linejoin == MiterJoin)) && (closed_path != MagickFalse)) { polygon_primitive[number_vertices]=primitive_info[1]; number_vertices++; } polygon_primitive[number_vertices].primitive=UndefinedPrimitive; /* Compute the slope for the first line segment, p. */ dx.p=0.0; dy.p=0.0; for (n=1; n < (ssize_t) number_vertices; n++) { dx.p=polygon_primitive[n].point.x-polygon_primitive[0].point.x; dy.p=polygon_primitive[n].point.y-polygon_primitive[0].point.y; if ((fabs(dx.p) >= MagickEpsilon) || (fabs(dy.p) >= MagickEpsilon)) break; } if (n == (ssize_t) number_vertices) { if ((draw_info->linecap != RoundCap) || (closed_path != MagickFalse)) { /* Zero length subpath. */ stroke_polygon=(PrimitiveInfo *) AcquireCriticalMemory( sizeof(*stroke_polygon)); stroke_polygon[0]=polygon_primitive[0]; stroke_polygon[0].coordinates=0; polygon_primitive=(PrimitiveInfo *) RelinquishMagickMemory( polygon_primitive); return(stroke_polygon); } n=(ssize_t) number_vertices-1L; } extent_p=2*number_vertices; extent_q=2*number_vertices; stroke_p=(PointInfo *) AcquireQuantumMemory((size_t) extent_p+MaxStrokePad, sizeof(*stroke_p)); stroke_q=(PointInfo *) AcquireQuantumMemory((size_t) extent_q+MaxStrokePad, sizeof(*stroke_q)); if ((stroke_p == (PointInfo *) NULL) || (stroke_q == (PointInfo *) NULL)) { if (stroke_p != (PointInfo *) NULL) stroke_p=(PointInfo *) RelinquishMagickMemory(stroke_p); if (stroke_q != (PointInfo *) NULL) stroke_q=(PointInfo *) RelinquishMagickMemory(stroke_q); polygon_primitive=(PrimitiveInfo *) RelinquishMagickMemory(polygon_primitive); (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",""); return((PrimitiveInfo *) NULL); } slope.p=0.0; inverse_slope.p=0.0; if (fabs(dx.p) < MagickEpsilon) { if (dx.p >= 0.0) slope.p=dy.p < 0.0 ? -1.0/MagickEpsilon : 1.0/MagickEpsilon; else slope.p=dy.p < 0.0 ? 1.0/MagickEpsilon : -1.0/MagickEpsilon; } else if (fabs(dy.p) < MagickEpsilon) { if (dy.p >= 0.0) inverse_slope.p=dx.p < 0.0 ? -1.0/MagickEpsilon : 1.0/MagickEpsilon; else inverse_slope.p=dx.p < 0.0 ? 1.0/MagickEpsilon : -1.0/MagickEpsilon; } else { slope.p=dy.p/dx.p; inverse_slope.p=(-1.0/slope.p); } mid=ExpandAffine(&draw_info->affine)*draw_info->stroke_width/2.0; miterlimit=(double) (draw_info->miterlimit*draw_info->miterlimit*mid*mid); if ((draw_info->linecap == SquareCap) && (closed_path == MagickFalse)) (void) TraceSquareLinecap(polygon_primitive,number_vertices,mid); offset.x=sqrt((double) (mid*mid/(inverse_slope.p*inverse_slope.p+1.0))); offset.y=(double) (offset.x*inverse_slope.p); if ((dy.p*offset.x-dx.p*offset.y) > 0.0) { box_p[0].x=polygon_primitive[0].point.x-offset.x; box_p[0].y=polygon_primitive[0].point.y-offset.x*inverse_slope.p; box_p[1].x=polygon_primitive[n].point.x-offset.x; box_p[1].y=polygon_primitive[n].point.y-offset.x*inverse_slope.p; box_q[0].x=polygon_primitive[0].point.x+offset.x; box_q[0].y=polygon_primitive[0].point.y+offset.x*inverse_slope.p; box_q[1].x=polygon_primitive[n].point.x+offset.x; box_q[1].y=polygon_primitive[n].point.y+offset.x*inverse_slope.p; } else { box_p[0].x=polygon_primitive[0].point.x+offset.x; box_p[0].y=polygon_primitive[0].point.y+offset.y; box_p[1].x=polygon_primitive[n].point.x+offset.x; box_p[1].y=polygon_primitive[n].point.y+offset.y; box_q[0].x=polygon_primitive[0].point.x-offset.x; box_q[0].y=polygon_primitive[0].point.y-offset.y; box_q[1].x=polygon_primitive[n].point.x-offset.x; box_q[1].y=polygon_primitive[n].point.y-offset.y; } /* Create strokes for the line join attribute: bevel, miter, round. */ p=0; q=0; stroke_q[p++]=box_q[0]; stroke_p[q++]=box_p[0]; for (i=(ssize_t) n+1; i < (ssize_t) number_vertices; i++) { /* Compute the slope for this line segment, q. */ dx.q=polygon_primitive[i].point.x-polygon_primitive[n].point.x; dy.q=polygon_primitive[i].point.y-polygon_primitive[n].point.y; dot_product=dx.q*dx.q+dy.q*dy.q; if (dot_product < 0.25) continue; slope.q=0.0; inverse_slope.q=0.0; if (fabs(dx.q) < MagickEpsilon) { if (dx.q >= 0.0) slope.q=dy.q < 0.0 ? -1.0/MagickEpsilon : 1.0/MagickEpsilon; else slope.q=dy.q < 0.0 ? 1.0/MagickEpsilon : -1.0/MagickEpsilon; } else if (fabs(dy.q) < MagickEpsilon) { if (dy.q >= 0.0) inverse_slope.q=dx.q < 0.0 ? -1.0/MagickEpsilon : 1.0/MagickEpsilon; else inverse_slope.q=dx.q < 0.0 ? 1.0/MagickEpsilon : -1.0/MagickEpsilon; } else { slope.q=dy.q/dx.q; inverse_slope.q=(-1.0/slope.q); } offset.x=sqrt((double) (mid*mid/(inverse_slope.q*inverse_slope.q+1.0))); offset.y=(double) (offset.x*inverse_slope.q); dot_product=dy.q*offset.x-dx.q*offset.y; if (dot_product > 0.0) { box_p[2].x=polygon_primitive[n].point.x-offset.x; box_p[2].y=polygon_primitive[n].point.y-offset.y; box_p[3].x=polygon_primitive[i].point.x-offset.x; box_p[3].y=polygon_primitive[i].point.y-offset.y; box_q[2].x=polygon_primitive[n].point.x+offset.x; box_q[2].y=polygon_primitive[n].point.y+offset.y; box_q[3].x=polygon_primitive[i].point.x+offset.x; box_q[3].y=polygon_primitive[i].point.y+offset.y; } else { box_p[2].x=polygon_primitive[n].point.x+offset.x; box_p[2].y=polygon_primitive[n].point.y+offset.y; box_p[3].x=polygon_primitive[i].point.x+offset.x; box_p[3].y=polygon_primitive[i].point.y+offset.y; box_q[2].x=polygon_primitive[n].point.x-offset.x; box_q[2].y=polygon_primitive[n].point.y-offset.y; box_q[3].x=polygon_primitive[i].point.x-offset.x; box_q[3].y=polygon_primitive[i].point.y-offset.y; } if (fabs((double) (slope.p-slope.q)) < MagickEpsilon) { box_p[4]=box_p[1]; box_q[4]=box_q[1]; } else { box_p[4].x=(double) ((slope.p*box_p[0].x-box_p[0].y-slope.q*box_p[3].x+ box_p[3].y)/(slope.p-slope.q)); box_p[4].y=(double) (slope.p*(box_p[4].x-box_p[0].x)+box_p[0].y); box_q[4].x=(double) ((slope.p*box_q[0].x-box_q[0].y-slope.q*box_q[3].x+ box_q[3].y)/(slope.p-slope.q)); box_q[4].y=(double) (slope.p*(box_q[4].x-box_q[0].x)+box_q[0].y); } CheckPathExtent(MaxStrokePad,MaxStrokePad); dot_product=dx.q*dy.p-dx.p*dy.q; if (dot_product <= 0.0) switch (draw_info->linejoin) { case BevelJoin: { stroke_q[q++]=box_q[1]; stroke_q[q++]=box_q[2]; dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+ (box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y); if (dot_product <= miterlimit) stroke_p[p++]=box_p[4]; else { stroke_p[p++]=box_p[1]; stroke_p[p++]=box_p[2]; } break; } case MiterJoin: { dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+ (box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y); if (dot_product <= miterlimit) { stroke_q[q++]=box_q[4]; stroke_p[p++]=box_p[4]; } else { stroke_q[q++]=box_q[1]; stroke_q[q++]=box_q[2]; stroke_p[p++]=box_p[1]; stroke_p[p++]=box_p[2]; } break; } case RoundJoin: { dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+ (box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y); if (dot_product <= miterlimit) stroke_p[p++]=box_p[4]; else { stroke_p[p++]=box_p[1]; stroke_p[p++]=box_p[2]; } center=polygon_primitive[n].point; theta.p=atan2(box_q[1].y-center.y,box_q[1].x-center.x); theta.q=atan2(box_q[2].y-center.y,box_q[2].x-center.x); if (theta.q < theta.p) theta.q+=2.0*MagickPI; arc_segments=(size_t) CastDoubleToLong(ceil((double) ((theta. q-theta.p)/(2.0*sqrt((double) (1.0/mid)))))); CheckPathExtent(MaxStrokePad,arc_segments+MaxStrokePad); stroke_q[q].x=box_q[1].x; stroke_q[q].y=box_q[1].y; q++; for (j=1; j < (ssize_t) arc_segments; j++) { delta_theta=(double) (j*(theta.q-theta.p)/arc_segments); stroke_q[q].x=(double) (center.x+mid*cos(fmod((double) (theta.p+delta_theta),DegreesToRadians(360.0)))); stroke_q[q].y=(double) (center.y+mid*sin(fmod((double) (theta.p+delta_theta),DegreesToRadians(360.0)))); q++; } stroke_q[q++]=box_q[2]; break; } default: break; } else switch (draw_info->linejoin) { case BevelJoin: { stroke_p[p++]=box_p[1]; stroke_p[p++]=box_p[2]; dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+ (box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y); if (dot_product <= miterlimit) stroke_q[q++]=box_q[4]; else { stroke_q[q++]=box_q[1]; stroke_q[q++]=box_q[2]; } break; } case MiterJoin: { dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+ (box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y); if (dot_product <= miterlimit) { stroke_q[q++]=box_q[4]; stroke_p[p++]=box_p[4]; } else { stroke_q[q++]=box_q[1]; stroke_q[q++]=box_q[2]; stroke_p[p++]=box_p[1]; stroke_p[p++]=box_p[2]; } break; } case RoundJoin: { dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+ (box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y); if (dot_product <= miterlimit) stroke_q[q++]=box_q[4]; else { stroke_q[q++]=box_q[1]; stroke_q[q++]=box_q[2]; } center=polygon_primitive[n].point; theta.p=atan2(box_p[1].y-center.y,box_p[1].x-center.x); theta.q=atan2(box_p[2].y-center.y,box_p[2].x-center.x); if (theta.p < theta.q) theta.p+=2.0*MagickPI; arc_segments=(size_t) CastDoubleToLong(ceil((double) ((theta.p- theta.q)/(2.0*sqrt((double) (1.0/mid)))))); CheckPathExtent(arc_segments+MaxStrokePad,MaxStrokePad); stroke_p[p++]=box_p[1]; for (j=1; j < (ssize_t) arc_segments; j++) { delta_theta=(double) (j*(theta.q-theta.p)/arc_segments); stroke_p[p].x=(double) (center.x+mid*cos(fmod((double) (theta.p+delta_theta),DegreesToRadians(360.0)))); stroke_p[p].y=(double) (center.y+mid*sin(fmod((double) (theta.p+delta_theta),DegreesToRadians(360.0)))); p++; } stroke_p[p++]=box_p[2]; break; } default: break; } slope.p=slope.q; inverse_slope.p=inverse_slope.q; box_p[0]=box_p[2]; box_p[1]=box_p[3]; box_q[0]=box_q[2]; box_q[1]=box_q[3]; dx.p=dx.q; dy.p=dy.q; n=i; } stroke_p[p++]=box_p[1]; stroke_q[q++]=box_q[1]; /* Trace stroked polygon. */ stroke_polygon=(PrimitiveInfo *) AcquireQuantumMemory((size_t) (p+q+2UL*closed_path+2UL),sizeof(*stroke_polygon)); if (stroke_polygon == (PrimitiveInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",""); stroke_p=(PointInfo *) RelinquishMagickMemory(stroke_p); stroke_q=(PointInfo *) RelinquishMagickMemory(stroke_q); polygon_primitive=(PrimitiveInfo *) RelinquishMagickMemory( polygon_primitive); return(stroke_polygon); } for (i=0; i < (ssize_t) p; i++) { stroke_polygon[i]=polygon_primitive[0]; stroke_polygon[i].point=stroke_p[i]; } if (closed_path != MagickFalse) { stroke_polygon[i]=polygon_primitive[0]; stroke_polygon[i].point=stroke_polygon[0].point; i++; } for ( ; i < (ssize_t) (p+q+closed_path); i++) { stroke_polygon[i]=polygon_primitive[0]; stroke_polygon[i].point=stroke_q[p+q+closed_path-(i+1)]; } if (closed_path != MagickFalse) { stroke_polygon[i]=polygon_primitive[0]; stroke_polygon[i].point=stroke_polygon[p+closed_path].point; i++; } stroke_polygon[i]=polygon_primitive[0]; stroke_polygon[i].point=stroke_polygon[0].point; i++; stroke_polygon[i].primitive=UndefinedPrimitive; stroke_polygon[0].coordinates=(size_t) (p+q+2*closed_path+1); stroke_p=(PointInfo *) RelinquishMagickMemory(stroke_p); stroke_q=(PointInfo *) RelinquishMagickMemory(stroke_q); polygon_primitive=(PrimitiveInfo *) RelinquishMagickMemory(polygon_primitive); return(stroke_polygon); }
LinkedCellParallelBuffer.h
#pragma once #include "physics/Physics.h" #include "physics/variants/LennardJones.h" #include "container/LinkedCell/LinkedCellContainer.h" #include "LinkedCell.h" /** * This class implements the linked cell algorithm in the form of a parallel algorithm that works with buffer and without locks. * @tparam T The physics to be used * @tparam dim The dimension of our simulation */ template<typename T, size_t dim, typename std::enable_if<std::is_base_of<PhysicsType, T>::value, bool>::type = true> class LinkedCellParallelBuffer : LinkedCell<T, dim> { public: LinkedCellParallelBuffer() = default; //----------------------------------------Methods---------------------------------------- /** * This method calculates the forces between the different particles in the different cells. * @param particleContainer that provides possible required values and functionalities */ void performUpdate(ParticleContainer<dim> &particleContainer) const override; /** * This method calculates the force, position and velocity of the particles in the container. * In addition, the structure is updated appropriately and renewed if needed. * Particles that leave the structure are deleted. * @param particleContainer The ParticleContainer, for whose contents the positions should be calculated * @param deltaT time step of our simulation * @param gravitation additional vector of gravitational force applied on all particles * @param current_time current time of this iteration */ void calculateNextStep(ParticleContainer<dim> &particleContainer, double deltaT, double &gravitation, double current_time) const override { LinkedCell<T, dim>::calculateNextStep(particleContainer, deltaT, gravitation, current_time); } }; /** * This class implements the linked cell algorithm in the form of a parallel algorithm that works with buffer and without locks. * @tparam dim The dimension of our simulation */ template<size_t dim> class LinkedCellParallelBuffer<LennardJones, dim> : public LinkedCell<LennardJones, dim> { public: LinkedCellParallelBuffer() = default; //----------------------------------------Methods---------------------------------------- /** * This method calculates the forces between the different particles in the different cells. * @param particleContainer that provides possible required values and functionalities */ void performUpdate(ParticleContainer<dim> &particleContainer) const override { auto &cellContainer = static_cast<LinkedCellContainer<dim> &>(particleContainer); #pragma omp declare reduction (merge : std::vector<std::pair<Particle<dim> *, Vector<dim>>> : omp_out.insert(omp_out.end(), omp_in.begin(), omp_in.end())) // particle and value std::vector<std::pair<Particle<dim> *, Vector<dim>>> updates; #pragma omp parallel for shared(cellContainer) default(none) for (size_t i = 0; i < cellContainer.getBoundaryCells().size(); ++i) { Boundary<dim> &b = cellContainer.getBoundaryCells()[i]; b.applyCellProperties(); } #pragma omp parallel for shared(cellContainer) default(none) reduction(merge: updates) for (size_t c = 0; c < cellContainer.getBoundaryAndInnerCells().size(); ++c) { Cell<dim> *cell = cellContainer.getBoundaryAndInnerCells()[c]; std::vector<Cell<dim> *> &neighbours = cell->getNeighbours(); std::vector<Particle<dim> *> &cellParticles = cell->getParticles(); if (!cellParticles.empty()) { // Calc between particles in cells and relevant neighbours for (auto n = neighbours.begin(); n != neighbours.end(); ++n) { for (auto i = cellParticles.begin(); i != cellParticles.end(); ++i) { for (auto j = (*n)->getParticles().begin(); j != (*n)->getParticles().end(); ++j) { Vector<dim> force = LinkedCell<LennardJones, dim>::calculateLennardJones(*(*i), *(*j), cellContainer); if (!isNull(force)) { (*i)->updateForce(force); updates.emplace_back(*j, -force); } } } } // Calc in the cells for (auto i = cellParticles.begin(); i != cellParticles.end(); ++i) { for (auto j = i + 1; j != cellParticles.end(); ++j) { Vector<dim> force = LinkedCell<LennardJones, dim>::calculateLennardJones(*(*i), *(*j), cellContainer); if (!isNull(force)) { (*i)->updateForce(force); updates.emplace_back(*j, -force); } } } } } for (auto &u: updates) { u.first->updateForce(u.second); } // Periodic neighbours sequentially for (size_t c = 0; c < cellContainer.getBoundaryCells().size(); ++c) { Boundary<dim> &cell = cellContainer.getBoundaryCells()[c]; std::vector<Particle<dim> *> &cellParticles = cell.getParticles(); LinkedCell<LennardJones, dim>::calcPeriodic(cellParticles, cellContainer, cell); } LinkedCell<LennardJones, dim>::calculateMolecules(cellContainer); } /** * This method calculates the force, position and velocity of the particles in the container. * In addition, the structure is updated appropriately and renewed if needed. * Particles that leave the structure are deleted. * @param particleContainer The ParticleContainer, for whose contents the positions should be calculated * @param deltaT time step of our simulation * @param gravitation additional vector of gravitational force applied on all particles * @param current_time current time of this iteration */ void calculateNextStep(ParticleContainer<dim> &particleContainer, double deltaT, Vector<dim> &gravitation, double current_time) const override { LinkedCell<LennardJones, dim>::calculateNextStep(particleContainer, deltaT, gravitation, current_time); } };
directive.h
#pragma omp taskwait
utils.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * Copyright (c) 2015 by Contributors * \file utils.h * \brief Basic utilility functions. */ #ifndef MXNET_COMMON_UTILS_H_ #define MXNET_COMMON_UTILS_H_ #include <dmlc/logging.h> #include <dmlc/omp.h> #include <nnvm/graph.h> #include <mxnet/engine.h> #include <mxnet/ndarray.h> #include <mxnet/op_attr_types.h> #include <mxnet/graph_attr_types.h> #include <nnvm/graph_attr_types.h> #include <memory> #include <vector> #include <type_traits> #include <utility> #include <random> #include <string> #include <thread> #include <algorithm> #include <functional> #include <limits> #include "../operator/mxnet_op.h" #if MXNET_USE_MKLDNN == 1 #include "../operator/nn/mkldnn/mkldnn_base-inl.h" #endif namespace mxnet { namespace common { /*! * \brief IndPtr should be non-negative, in non-decreasing order, start with 0 * and end with value equal with size of indices. */ struct csr_indptr_check { template<typename DType, typename IType> MSHADOW_XINLINE static void Map(int i, DType* out, const IType* indptr, const nnvm::dim_t end, const nnvm::dim_t idx_size) { if (indptr[i+1] < 0 || indptr[i+1] < indptr[i] || (i == 0 && indptr[i] != 0) || (i == end - 1 && indptr[end] != idx_size)) *out = kCSRIndPtrErr; } }; /*! * \brief Indices should be non-negative, less than the number of columns * and in ascending order per row. */ struct csr_idx_check { template<typename DType, typename IType, typename RType> MSHADOW_XINLINE static void Map(int i, DType* out, const IType* idx, const RType* indptr, const nnvm::dim_t ncols) { for (RType j = indptr[i]; j < indptr[i+1]; j++) { if (idx[j] >= ncols || idx[j] < 0 || (j < indptr[i+1] - 1 && idx[j] >= idx[j+1])) { *out = kCSRIdxErr; break; } } } }; /*! * \brief Indices of RSPNDArray should be non-negative, * less than the size of first dimension and in ascending order */ struct rsp_idx_check { template<typename DType, typename IType> MSHADOW_XINLINE static void Map(int i, DType* out, const IType* idx, const nnvm::dim_t end, const nnvm::dim_t nrows) { if ((i < end && idx[i+1] <= idx[i]) || idx[i] < 0 || idx[i] >= nrows) *out = kRSPIdxErr; } }; template<typename xpu> void CheckFormatWrapper(const RunContext &rctx, const NDArray &input, const TBlob &err_cpu, const bool full_check); /*! * \brief Check the validity of CSRNDArray. * \param rctx Execution context. * \param input Input NDArray of CSRStorage. * \param err_cpu Error number on cpu. * \param full_check If true, rigorous check, O(N) operations, * otherwise basic check, O(1) operations. */ template<typename xpu> void CheckFormatCSRImpl(const RunContext &rctx, const NDArray &input, const TBlob &err_cpu, const bool full_check) { using namespace op::mxnet_op; CHECK_EQ(input.storage_type(), kCSRStorage) << "CheckFormatCSRImpl is for CSRNDArray"; const mxnet::TShape shape = input.shape(); const mxnet::TShape idx_shape = input.aux_shape(csr::kIdx); const mxnet::TShape indptr_shape = input.aux_shape(csr::kIndPtr); const mxnet::TShape storage_shape = input.storage_shape(); if ((shape.ndim() != 2) || (idx_shape.ndim() != 1 || indptr_shape.ndim() != 1 || storage_shape.ndim() != 1) || (indptr_shape[0] != shape[0] + 1) || (idx_shape[0] != storage_shape[0])) { MSHADOW_TYPE_SWITCH(err_cpu.type_flag_, DType, { DType* err = err_cpu.dptr<DType>(); *err = kCSRShapeErr; }); return; } if (full_check) { MSHADOW_TYPE_SWITCH(err_cpu.type_flag_, DType, { MSHADOW_IDX_TYPE_SWITCH(input.aux_type(csr::kIndPtr), RType, { MSHADOW_IDX_TYPE_SWITCH(input.aux_type(csr::kIdx), IType, { mshadow::Stream<xpu> *s = rctx.get_stream<xpu>(); NDArray ret_xpu = NDArray(mshadow::Shape1(1), rctx.get_ctx(), false, err_cpu.type_flag_); TBlob val_xpu = ret_xpu.data(); Kernel<set_to_int<kNormalErr>, xpu>::Launch(s, val_xpu.Size(), val_xpu.dptr<DType>()); Kernel<csr_indptr_check, xpu>::Launch(s, indptr_shape[0] - 1, val_xpu.dptr<DType>(), input.aux_data(csr::kIndPtr).dptr<RType>(), indptr_shape[0] - 1, idx_shape[0]); // no need to check indices if indices are empty if (idx_shape[0] != 0) { Kernel<csr_idx_check, xpu>::Launch(s, indptr_shape[0] - 1, val_xpu.dptr<DType>(), input.aux_data(csr::kIdx).dptr<IType>(), input.aux_data(csr::kIndPtr).dptr<RType>(), shape[1]); } mshadow::Copy(err_cpu.get<cpu, 1, DType>(), val_xpu.get<xpu, 1, DType>(s), s); }); }); }); } } /*! * \brief Check the validity of RowSparseNDArray. * \param rctx Execution context. * \param input Input NDArray of RowSparseStorage. * \param err_cpu Error number on cpu. * \param full_check If true, rigorous check, O(N) operations, * otherwise basic check, O(1) operations. */ template<typename xpu> void CheckFormatRSPImpl(const RunContext &rctx, const NDArray &input, const TBlob &err_cpu, const bool full_check) { using namespace op::mxnet_op; CHECK_EQ(input.storage_type(), kRowSparseStorage) << "CheckFormatRSPImpl is for RSPNDArray"; const mxnet::TShape idx_shape = input.aux_shape(rowsparse::kIdx); if (idx_shape[0] != input.storage_shape()[0]) { MSHADOW_TYPE_SWITCH(err_cpu.type_flag_, DType, { DType* err = err_cpu.dptr<DType>(); *err = kRSPShapeErr; }); return; } if (idx_shape[0] == 0) { return; } if (full_check) { MSHADOW_TYPE_SWITCH(err_cpu.type_flag_, DType, { MSHADOW_IDX_TYPE_SWITCH(input.aux_type(rowsparse::kIdx), IType, { mshadow::Stream<xpu> *s = rctx.get_stream<xpu>(); NDArray ret_xpu = NDArray(mshadow::Shape1(1), rctx.get_ctx(), false, err_cpu.type_flag_); TBlob val_xpu = ret_xpu.data(); Kernel<set_to_int<kNormalErr>, xpu>::Launch(s, val_xpu.Size(), val_xpu.dptr<DType>()); Kernel<rsp_idx_check, xpu>::Launch(s, idx_shape[0], val_xpu.dptr<DType>(), input.aux_data(rowsparse::kIdx).dptr<IType>(), idx_shape[0] - 1, input.shape()[0]); mshadow::Copy(err_cpu.get<cpu, 1, DType>(), val_xpu.get<xpu, 1, DType>(s), s); }); }); } } template<typename xpu> void CheckFormatImpl(const RunContext &rctx, const NDArray &input, const TBlob &err_cpu, const bool full_check) { int stype = input.storage_type(); if (stype == kCSRStorage) { CheckFormatCSRImpl<xpu>(rctx, input, err_cpu, full_check); } else if (stype == kRowSparseStorage) { CheckFormatRSPImpl<xpu>(rctx, input, err_cpu, full_check); } else if (stype == kDefaultStorage) { // no-op for default storage } else { LOG(FATAL) << "Unknown storage type " << stype; } } /*! \brief Pick rows specified by user input index array from a row sparse ndarray * and save them in the output sparse ndarray. */ template<typename xpu> void SparseRetainOpForwardRspWrapper(mshadow::Stream<xpu> *s, const NDArray& input_nd, const TBlob& idx_data, const OpReqType req, NDArray* output_nd); /* \brief Casts tensor storage type to the new type. */ template<typename xpu> void CastStorageDispatch(const OpContext& ctx, const NDArray& input, const NDArray& output); /*! \brief returns true if all storage types in `vstorage` are the same as target `stype`. * false is returned for empty inputs. */ inline bool ContainsOnlyStorage(const StorageTypeVector& vstorage, const NDArrayStorageType stype) { if (!vstorage.empty()) { for (const auto& i : vstorage) { if (i != stype) return false; } return true; } return false; } /*! \brief returns true if all storage types in `vstorage` are the same as target `stype1` * or `stype2'. Sets boolean if both found. * false is returned for empty inputs. */ inline bool ContainsOnlyStorage(const StorageTypeVector& vstorage, const NDArrayStorageType stype1, const NDArrayStorageType stype2, bool *has_both) { if (has_both) { *has_both = false; } if (!vstorage.empty()) { uint8_t has = 0; for (const auto i : vstorage) { if (i == stype1) { has |= 1; } else if (i == stype2) { has |= 2; } else { return false; } } if (has_both) { *has_both = has == 3; } return true; } return false; } /*! \brief returns true if the storage types of arrays in `ndarrays` * are the same as target `stype`. false is returned for empty inputs. */ inline bool ContainsOnlyStorage(const std::vector<NDArray>& ndarrays, const NDArrayStorageType stype) { if (!ndarrays.empty()) { for (const auto& nd : ndarrays) { if (nd.storage_type() != stype) { return false; } } return true; } return false; } /*! \brief returns true if the storage types of arrays in `ndarrays` * are the same as targets `stype1` or `stype2`. false is returned for empty inputs. */ inline bool ContainsOnlyStorage(const std::vector<NDArray>& ndarrays, const NDArrayStorageType stype1, const NDArrayStorageType stype2, bool *has_both) { if (has_both) { *has_both = false; } if (!ndarrays.empty()) { uint8_t has = 0; for (const auto& nd : ndarrays) { const NDArrayStorageType stype = nd.storage_type(); if (stype == stype1) { has |= 1; } else if (stype == stype2) { has |= 2; } else { return false; } } if (has_both) { *has_both = has == 3; } return true; } return false; } /*! \brief returns true if storage type of any array in `ndarrays` * is the same as the target `stype`. false is returned for empty inputs. */ inline bool ContainsStorageType(const std::vector<NDArray>& ndarrays, const NDArrayStorageType stype) { if (!ndarrays.empty()) { for (const auto& nd : ndarrays) { if (nd.storage_type() == stype) { return true; } } } return false; } /*! \brief returns true if any storage type `ndstype` in `ndstypes` * is the same as the target `stype`. false is returned for empty inputs. */ inline bool ContainsStorageType(const std::vector<int>& ndstypes, const NDArrayStorageType stype) { if (!ndstypes.empty()) { for (const auto& ndstype : ndstypes) { if (ndstype == stype) { return true; } } } return false; } /*! \brief get string representation of dispatch_mode */ inline std::string dispatch_mode_string(const DispatchMode x) { switch (x) { case DispatchMode::kFCompute: return "fcompute"; case DispatchMode::kFComputeEx: return "fcompute_ex"; case DispatchMode::kFComputeFallback: return "fcompute_fallback"; case DispatchMode::kVariable: return "variable"; case DispatchMode::kUndefined: return "undefined"; } return "unknown"; } /*! \brief get string representation of storage_type */ inline std::string stype_string(const int x) { switch (x) { case kDefaultStorage: return "default"; case kCSRStorage: return "csr"; case kRowSparseStorage: return "row_sparse"; } return "unknown"; } /*! \brief get string representation of device type */ inline std::string dev_type_string(const int dev_type) { switch (dev_type) { case Context::kCPU: return "cpu"; case Context::kGPU: return "gpu"; case Context::kCPUPinned: return "cpu_pinned"; case Context::kCPUShared: return "cpu_shared"; } return "unknown"; } /*! \brief get string representation of the operator stypes */ inline std::string operator_stype_string(const nnvm::NodeAttrs& attrs, const int dev_mask, const std::vector<int>& in_attrs, const std::vector<int>& out_attrs) { std::ostringstream os; os << "operator = " << attrs.op->name << "\ninput storage types = ["; for (const int attr : in_attrs) { os << stype_string(attr) << ", "; } os << "]\n" << "output storage types = ["; for (const int attr : out_attrs) { os << stype_string(attr) << ", "; } os << "]\n" << "params = {"; for (auto kv : attrs.dict) { os << "\"" << kv.first << "\" : " << kv.second << ", "; } os << "}\n" << "context.dev_mask = " << dev_type_string(dev_mask); return os.str(); } /*! \brief get string representation of the operator */ inline std::string operator_string(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<NDArray>& inputs, const std::vector<OpReqType>& req, const std::vector<NDArray>& outputs) { std::string result = ""; std::vector<int> in_stypes; std::vector<int> out_stypes; in_stypes.reserve(inputs.size()); out_stypes.reserve(outputs.size()); auto xform = [](const NDArray arr) -> int { return arr.storage_type(); }; std::transform(inputs.begin(), inputs.end(), std::back_inserter(in_stypes), xform); std::transform(outputs.begin(), outputs.end(), std::back_inserter(out_stypes), xform); result += operator_stype_string(attrs, ctx.run_ctx.ctx.dev_mask(), in_stypes, out_stypes); return result; } /*! \brief log message once. Intended for storage fallback warning messages. */ inline void LogOnce(const std::string& message) { typedef dmlc::ThreadLocalStore<std::unordered_set<std::string>> LogStore; auto log_store = LogStore::Get(); if (log_store->find(message) == log_store->end()) { LOG(INFO) << message; log_store->insert(message); } } /*! \brief log storage fallback event */ inline void LogStorageFallback(const nnvm::NodeAttrs& attrs, const int dev_mask, const std::vector<int>* in_attrs, const std::vector<int>* out_attrs) { static bool log = dmlc::GetEnv("MXNET_STORAGE_FALLBACK_LOG_VERBOSE", true); if (!log) return; const std::string op_str = operator_stype_string(attrs, dev_mask, *in_attrs, *out_attrs); std::ostringstream os; const char* warning = "\nThe operator with default storage type will be dispatched " "for execution. You're seeing this warning message because the operator above is unable " "to process the given ndarrays with specified storage types, context and parameter. " "Temporary dense ndarrays are generated in order to execute the operator. " "This does not affect the correctness of the programme. " "You can set environment variable MXNET_STORAGE_FALLBACK_LOG_VERBOSE to " "0 to suppress this warning."; os << "\nStorage type fallback detected:\n" << op_str << warning; LogOnce(os.str()); #if MXNET_USE_MKLDNN == 1 if (!MKLDNNEnvSet()) common::LogOnce("MXNET_MKLDNN_ENABLED flag is off. " "You can re-enable by setting MXNET_MKLDNN_ENABLED=1"); if (GetMKLDNNCacheSize() != -1) common::LogOnce("MXNET_MKLDNN_CACHE_NUM is set." "Should only be set if " "your model has variable input shapes, " "as cache size may grow unbounded"); #endif } // heuristic to dermine number of threads per GPU inline int GetNumThreadsPerGPU() { // This is resource efficient option. return dmlc::GetEnv("MXNET_GPU_WORKER_NTHREADS", 2); } // heuristic to get number of matching colors. // this decides how much parallelism we can get in each GPU. inline int GetExecNumMatchColor() { // This is resource efficient option. int num_match_color = dmlc::GetEnv("MXNET_EXEC_NUM_TEMP", 1); return std::min(num_match_color, GetNumThreadsPerGPU()); } template<typename T, typename V> V ParallelAccumulate(const T* a, const int n, V start) { V sum = start; #pragma omp parallel for reduction(+:sum) for (int i = 0; i < n; ++i) { sum += a[i]; } return sum; } /*! * \brief * Helper function for ParallelSort. * DO NOT call this function directly. * Use the interface ParallelSort instead. * Ref: https://github.com/dmlc/difacto/blob/master/src/common/parallel_sort.h */ template<typename RandomIt, typename Compare> void ParallelSortHelper(RandomIt first, size_t len, size_t grainsize, const Compare& comp) { if (len < grainsize) { std::sort(first, first+len, comp); } else { std::thread thr(ParallelSortHelper<RandomIt, Compare>, first, len/2, grainsize, comp); ParallelSortHelper(first+len/2, len - len/2, grainsize, comp); thr.join(); std::inplace_merge(first, first+len/2, first+len, comp); } } /*! * \brief * Sort the elements in the range [first, last) into the ascending order defined by * the comparator comp. * If the length of the range [first, last) is greater than a certain threshold, * the range will be recursively divided into two and assign two threads * to sort each half range. * Ref: https://github.com/dmlc/difacto/blob/master/src/common/parallel_sort.h */ template<typename RandomIt, typename Compare> void ParallelSort(RandomIt first, RandomIt last, size_t num_threads, Compare comp) { const auto num = std::distance(first, last); size_t grainsize = std::max(num / num_threads + 5, static_cast<size_t>(1024*16)); ParallelSortHelper(first, num, grainsize, comp); } /*! * \brief * Sort the elements in the range [first, last) into ascending order. * The elements are compared using the default < operator. * If the length of the range [first, last) is greater than a certain threshold, * the range will be recursively divided into two and assign two threads * to sort each half range. * Ref: https://github.com/dmlc/difacto/blob/master/src/common/parallel_sort.h */ template<typename RandomIt> void ParallelSort(RandomIt first, RandomIt last, size_t num_threads) { ParallelSort(first, last, num_threads, std::less<typename std::iterator_traits<RandomIt>::value_type>()); } /*! * \brief Random Engine */ typedef std::mt19937 RANDOM_ENGINE; /*! * \brief Helper functions. */ namespace helper { /*! * \brief Helper for non-array type `T`. */ template <class T> struct UniqueIf { /*! * \brief Type of `T`. */ using SingleObject = std::unique_ptr<T>; }; /*! * \brief Helper for an array of unknown bound `T`. */ template <class T> struct UniqueIf<T[]> { /*! * \brief Type of `T`. */ using UnknownBound = std::unique_ptr<T[]>; }; /*! * \brief Helper for an array of known bound `T`. */ template <class T, size_t kSize> struct UniqueIf<T[kSize]> { /*! * \brief Type of `T`. */ using KnownBound = void; }; } // namespace helper /*! * \brief Constructs an object of type `T` and wraps it in a * `std``::``unique_ptr`. * \param args List of arguments with which an instance of `T` will be * constructed. * \return `std``::``unique_ptr` of an instance of type `T`. * * Constructs a non-array type `T`. The arguments `args` are passed to the * constructor of `T`. The function does not participate in the overload * resolution if `T` is an array type. */ template <class T, class... Args> typename helper::UniqueIf<T>::SingleObject MakeUnique(Args&&... args) { return std::unique_ptr<T>(new T(std::forward<Args>(args)...)); } /*! * \brief Constructs an object of type `T` and wraps it in a * `std``::``unique_ptr`. * \param n The size of the array to construct. * \return `std``::``unique_ptr` of an instance of type `T`. * * Constructs an array of unknown bound `T`. The function does not participate * in the overload resolution unless `T` is an array of unknown bound. */ template <class T> typename helper::UniqueIf<T>::UnknownBound MakeUnique(size_t n) { using U = typename std::remove_extent<T>::type; return std::unique_ptr<T>(new U[n]{}); } /*! * \brief Constructs an object of type `T` and wraps it in a * `std``::``unique_ptr`. * \param args List of arguments with which an instance of `T` will be * constructed. * * Constructs an arrays of known bound is disallowed. */ template <class T, class... Args> typename helper::UniqueIf<T>::KnownBound MakeUnique(Args&&... args) = delete; template<typename FCompType> FCompType GetFCompute(const nnvm::Op* op, const std::string& name, const Context& ctx) { static auto& fcompute_cpu = nnvm::Op::GetAttr<FCompType>(name + "<cpu>"); static auto& fcompute_gpu = nnvm::Op::GetAttr<FCompType>(name + "<gpu>"); if (ctx.dev_mask() == cpu::kDevMask) { return fcompute_cpu.get(op, nullptr); } else if (ctx.dev_mask() == gpu::kDevMask) { return fcompute_gpu.get(op, nullptr); } else { LOG(FATAL) << "Unknown device mask"; return nullptr; } } /*! * \brief Return the max integer value representable in the type `T` without loss of precision. */ template <typename T> constexpr size_t MaxIntegerValue() { return std::is_integral<T>::value ? std::numeric_limits<T>::max(): size_t(2) << (std::numeric_limits<T>::digits - 1); } template <> constexpr size_t MaxIntegerValue<mshadow::half::half_t>() { return size_t(2) << 10; } MSHADOW_XINLINE int ilog2ul(size_t a) { int k = 1; while (a >>= 1) ++k; return k; } MSHADOW_XINLINE int ilog2ui(unsigned int a) { int k = 1; while (a >>= 1) ++k; return k; } /*! * \brief Return an NDArray of all zeros. */ inline NDArray InitZeros(const NDArrayStorageType stype, const mxnet::TShape &shape, const Context &ctx, const int dtype) { // NDArray with default storage if (stype == kDefaultStorage) { NDArray ret(shape, ctx, false, dtype); ret = 0; return ret; } // NDArray with non-default storage. Storage allocation is always delayed. return NDArray(stype, shape, ctx, true, dtype); } /*! * \brief Helper to add a NDArray of zeros to a std::vector. */ inline void EmplaceBackZeros(const NDArrayStorageType stype, const mxnet::TShape &shape, const Context &ctx, const int dtype, std::vector<NDArray> *vec) { // NDArray with default storage if (stype == kDefaultStorage) { vec->emplace_back(shape, ctx, false, dtype); vec->back() = 0; } else { // NDArray with non-default storage. Storage allocation is always delayed. vec->emplace_back(stype, shape, ctx, true, dtype); } } /*! * \brief parallelize copy by OpenMP. */ template<typename DType> inline void ParallelCopy(DType* dst, const DType* src, index_t size) { static index_t copy_block_size = dmlc::GetEnv("MXNET_CPU_PARALLEL_COPY_SIZE", 200000); if (size >= copy_block_size) { #pragma omp parallel for num_threads(engine::OpenMP::Get()->GetRecommendedOMPThreadCount()) for (index_t i = 0; i < size; ++i) { dst[i] = src[i]; } } else { std::memcpy(dst, src, sizeof(DType) * size); } } } // namespace common } // namespace mxnet #endif // MXNET_COMMON_UTILS_H_
fci_rdm.c
/* Copyright 2014-2018 The PySCF Developers. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. * * Author: Qiming Sun <osirpt.sun@gmail.com> */ #include <stdlib.h> #include <string.h> //#include <omp.h> #include "config.h" #include "vhf/fblas.h" #include "fci.h" #define MIN(X,Y) ((X)<(Y)?(X):(Y)) #define CSUMTHR 1e-28 #define BUFBASE 96 #define SQRT2 1.4142135623730950488 #define BRAKETSYM 1 #define PARTICLESYM 2 /* * i is the index of the annihilation operator, a is the index of * creation operator. t1[I,i*norb+a] because it represents that * starting from the intermediate I, removing i and creating a leads to * determinant of str1 */ double FCIrdm2_a_t1ci(double *ci0, double *t1, int bcount, int stra_id, int strb_id, int norb, int nstrb, int nlinka, _LinkT *clink_indexa) { ci0 += strb_id; const int nnorb = norb * norb; int i, j, k, a, sign; size_t str1; const _LinkT *tab = clink_indexa + stra_id * nlinka; double *pt1, *pci; double csum = 0; for (j = 0; j < nlinka; j++) { a = EXTRACT_CRE (tab[j]); i = EXTRACT_DES (tab[j]); str1 = EXTRACT_ADDR(tab[j]); sign = EXTRACT_SIGN(tab[j]); pci = ci0 + str1*nstrb; pt1 = t1 + i*norb+a; if (sign == 0) { break; } else if (sign > 0) { for (k = 0; k < bcount; k++) { pt1[k*nnorb] += pci[k]; csum += pci[k] * pci[k]; } } else { for (k = 0; k < bcount; k++) { pt1[k*nnorb] -= pci[k]; csum += pci[k] * pci[k]; } } } return csum; } double FCIrdm2_b_t1ci(double *ci0, double *t1, int bcount, int stra_id, int strb_id, int norb, int nstrb, int nlinkb, _LinkT *clink_indexb) { const int nnorb = norb * norb; int i, j, a, str0, str1, sign; const _LinkT *tab = clink_indexb + strb_id * nlinkb; double *pci = ci0 + stra_id*(size_t)nstrb; double csum = 0; for (str0 = 0; str0 < bcount; str0++) { for (j = 0; j < nlinkb; j++) { a = EXTRACT_CRE (tab[j]); i = EXTRACT_DES (tab[j]); str1 = EXTRACT_ADDR(tab[j]); sign = EXTRACT_SIGN(tab[j]); if (sign == 0) { break; } else { t1[i*norb+a] += sign * pci[str1]; csum += pci[str1] * pci[str1]; } } t1 += nnorb; tab += nlinkb; } return csum; } double FCIrdm2_0b_t1ci(double *ci0, double *t1, int bcount, int stra_id, int strb_id, int norb, int nstrb, int nlinkb, _LinkT *clink_indexb) { const int nnorb = norb * norb; int i, j, a, str0, str1, sign; const _LinkT *tab = clink_indexb + strb_id * nlinkb; double *pci = ci0 + stra_id*(size_t)nstrb; double csum = 0; for (str0 = 0; str0 < bcount; str0++) { memset(t1, 0, sizeof(double) * nnorb); for (j = 0; j < nlinkb; j++) { a = EXTRACT_CRE (tab[j]); i = EXTRACT_DES (tab[j]); str1 = EXTRACT_ADDR(tab[j]); sign = EXTRACT_SIGN(tab[j]); t1[i*norb+a] += sign * pci[str1]; csum += pci[str1] * pci[str1]; } t1 += nnorb; tab += nlinkb; } return csum; } /* spin free E^i_j | ci0 > */ double FCI_t1ci_sf(double *ci0, double *t1, int bcount, int stra_id, int strb_id, int norb, int na, int nb, int nlinka, int nlinkb, _LinkT *clink_indexa, _LinkT *clink_indexb) { double csum; csum = FCIrdm2_0b_t1ci(ci0, t1, bcount, stra_id, strb_id, norb, nb, nlinkb, clink_indexb) + FCIrdm2_a_t1ci (ci0, t1, bcount, stra_id, strb_id, norb, nb, nlinka, clink_indexa); return csum; } static void tril_particle_symm(double *rdm2, double *tbra, double *tket, int bcount, int norb, double alpha, double beta) { const char TRANS_N = 'N'; const char TRANS_T = 'T'; int nnorb = norb * norb; int i, j, k, m, n; int blk = MIN(((int)(48/norb))*norb, nnorb); double *buf = malloc(sizeof(double) * nnorb*bcount); double *p1; for (n = 0, k = 0; k < bcount; k++) { p1 = tbra + k * nnorb; for (i = 0; i < norb; i++) { for (j = 0; j < norb; j++, n++) { buf[n] = p1[j*norb+i]; } } } // dgemm_(&TRANS_N, &TRANS_T, &nnorb, &nnorb, &bcount, // &alpha, tket, &nnorb, buf, &nnorb, &beta, rdm2, &nnorb); for (m = 0; m < nnorb-blk; m+=blk) { n = nnorb - m; dgemm_(&TRANS_N, &TRANS_T, &blk, &n, &bcount, &alpha, tket+m, &nnorb, buf+m, &nnorb, &beta, rdm2+m*nnorb+m, &nnorb); } n = nnorb - m; dgemm_(&TRANS_N, &TRANS_T, &n, &n, &bcount, &alpha, tket+m, &nnorb, buf+m, &nnorb, &beta, rdm2+m*nnorb+m, &nnorb); free(buf); } static void _transpose_jikl(double *dm2, int norb) { int nnorb = norb * norb; int i, j; double *p0, *p1; double *tmp = malloc(sizeof(double)*nnorb*nnorb); memcpy(tmp, dm2, sizeof(double)*nnorb*nnorb); for (i = 0; i < norb; i++) { for (j = 0; j < norb; j++) { p0 = tmp + (j*norb+i) * nnorb; p1 = dm2 + (i*norb+j) * nnorb; memcpy(p1, p0, sizeof(double)*nnorb); } } free(tmp); } /* * Note! The returned rdm2 from FCI*kern* function corresponds to * [(p^+ q on <bra|) r^+ s] = [p q^+ r^+ s] * in FCIrdm12kern_sf, FCIrdm12kern_spin0, FCIrdm12kern_a, ... * t1 is calculated as |K> = i^+ j|0>. by doing dot(t1.T,t1) to get "rdm2", * The ket part (k^+ l|0>) will generate the correct order for the last * two indices kl of rdm2(i,j,k,l), But the bra part (i^+ j|0>)^dagger * will generate an order of (i,j), which is identical to call a bra of * (<0|i j^+). The so-obtained rdm2(i,j,k,l) corresponds to the * operator sequence i j^+ k^+ l. * * symm = 1: symmetrizes the 1pdm, and 2pdm. This is true only if bra == ket, * and the operators on bra are equivalent to those on ket, like * FCIrdm12kern_a, FCIrdm12kern_b, FCIrdm12kern_sf, FCIrdm12kern_spin0 * sym = 2: consider the particle permutation symmetry: * E^j_l E^i_k = E^i_k E^j_l - \delta_{il}E^j_k + \dleta_{jk}E^i_l */ void FCIrdm12_drv(void (*dm12kernel)(), double *rdm1, double *rdm2, double *bra, double *ket, int norb, int na, int nb, int nlinka, int nlinkb, int *link_indexa, int *link_indexb, int symm) { const int nnorb = norb * norb; int strk, i, j, k, l, ib, blen; double *pdm1, *pdm2; memset(rdm1, 0, sizeof(double) * nnorb); memset(rdm2, 0, sizeof(double) * nnorb*nnorb); _LinkT *clinka = malloc(sizeof(_LinkT) * nlinka * na); _LinkT *clinkb = malloc(sizeof(_LinkT) * nlinkb * nb); FCIcompress_link(clinka, link_indexa, norb, na, nlinka); FCIcompress_link(clinkb, link_indexb, norb, nb, nlinkb); #pragma omp parallel private(strk, i, ib, blen, pdm1, pdm2) { pdm1 = calloc(nnorb+2, sizeof(double)); pdm2 = calloc(nnorb*nnorb+2, sizeof(double)); #pragma omp for schedule(dynamic, 40) for (strk = 0; strk < na; strk++) { for (ib = 0; ib < nb; ib += BUFBASE) { blen = MIN(BUFBASE, nb-ib); (*dm12kernel)(pdm1, pdm2, bra, ket, blen, strk, ib, norb, na, nb, nlinka, nlinkb, clinka, clinkb, symm); } } #pragma omp critical { for (i = 0; i < nnorb; i++) { rdm1[i] += pdm1[i]; } for (i = 0; i < nnorb*nnorb; i++) { rdm2[i] += pdm2[i]; } } free(pdm1); free(pdm2); } free(clinka); free(clinkb); switch (symm) { case BRAKETSYM: for (i = 0; i < norb; i++) { for (j = 0; j < i; j++) { rdm1[j*norb+i] = rdm1[i*norb+j]; } } for (i = 0; i < nnorb; i++) { for (j = 0; j < i; j++) { rdm2[j*nnorb+i] = rdm2[i*nnorb+j]; } } _transpose_jikl(rdm2, norb); break; case PARTICLESYM: // right 2pdm order is required here, which transposes the cre/des on bra for (i = 0; i < norb; i++) { for (j = 0; j < i; j++) { pdm1 = rdm2 + (i*nnorb+j)*norb; pdm2 = rdm2 + (j*nnorb+i)*norb; for (k = 0; k < norb; k++) { for (l = 0; l < norb; l++) { pdm2[l*nnorb+k] = pdm1[k*nnorb+l]; } } // E^j_lE^i_k = E^i_kE^j_l + \delta_{il}E^j_k - \dleta_{jk}E^i_l for (k = 0; k < norb; k++) { pdm2[i*nnorb+k] += rdm1[j*norb+k]; pdm2[k*nnorb+j] -= rdm1[i*norb+k]; } } } break; default: _transpose_jikl(rdm2, norb); } } void FCIrdm12kern_sf(double *rdm1, double *rdm2, double *bra, double *ket, int bcount, int stra_id, int strb_id, int norb, int na, int nb, int nlinka, int nlinkb, _LinkT *clink_indexa, _LinkT *clink_indexb, int symm) { const int INC1 = 1; const char UP = 'U'; const char TRANS_N = 'N'; const char TRANS_T = 'T'; const double D1 = 1; const int nnorb = norb * norb; double csum; double *buf = malloc(sizeof(double) * nnorb * bcount); csum = FCI_t1ci_sf(ket, buf, bcount, stra_id, strb_id, norb, na, nb, nlinka, nlinkb, clink_indexa, clink_indexb); if (csum > CSUMTHR) { dgemv_(&TRANS_N, &nnorb, &bcount, &D1, buf, &nnorb, ket+stra_id*nb+strb_id, &INC1, &D1, rdm1, &INC1); switch (symm) { case BRAKETSYM: dsyrk_(&UP, &TRANS_N, &nnorb, &bcount, &D1, buf, &nnorb, &D1, rdm2, &nnorb); break; case PARTICLESYM: tril_particle_symm(rdm2, buf, buf, bcount, norb, 1, 1); break; default: dgemm_(&TRANS_N, &TRANS_T, &nnorb, &nnorb, &bcount, &D1, buf, &nnorb, buf, &nnorb, &D1, rdm2, &nnorb); } } free(buf); } /* * _spin0 assumes the strict symmetry on alpha and beta electrons */ void FCIrdm12kern_spin0(double *rdm1, double *rdm2, double *bra, double *ket, int bcount, int stra_id, int strb_id, int norb, int na, int nb, int nlinka, int nlinkb, _LinkT *clink_indexa, _LinkT *clink_indexb, int symm) { if (stra_id < strb_id) { return; } const int INC1 = 1; const char UP = 'U'; const char TRANS_N = 'N'; const char TRANS_T = 'T'; const double D1 = 1; const double D2 = 2; const int nnorb = norb * norb; int fill0, fill1, i; double csum = 0; double *buf = calloc(nnorb * na, sizeof(double)); if (strb_id+bcount <= stra_id) { fill0 = bcount; fill1 = bcount; csum = FCIrdm2_b_t1ci(ket, buf, fill0, stra_id, strb_id, norb, na, nlinka, clink_indexa) + FCIrdm2_a_t1ci(ket, buf, fill1, stra_id, strb_id, norb, na, nlinka, clink_indexa); } else if (stra_id >= strb_id) { fill0 = stra_id - strb_id; fill1 = stra_id - strb_id + 1; csum = FCIrdm2_b_t1ci(ket, buf, fill0, stra_id, strb_id, norb, na, nlinka, clink_indexa) + FCIrdm2_a_t1ci(ket, buf, fill1, stra_id, strb_id, norb, na, nlinka, clink_indexa); } if (csum > CSUMTHR) { dgemv_(&TRANS_N, &nnorb, &fill1, &D2, buf, &nnorb, ket+stra_id*na+strb_id, &INC1, &D1, rdm1, &INC1); for (i = fill0*nnorb; i < fill1*nnorb; i++) { buf[i] *= SQRT2; } switch (symm) { case BRAKETSYM: dsyrk_(&UP, &TRANS_N, &nnorb, &fill1, &D2, buf, &nnorb, &D1, rdm2, &nnorb); break; case PARTICLESYM: tril_particle_symm(rdm2, buf, buf, fill1, norb, D2, D1); break; default: dgemm_(&TRANS_N, &TRANS_T, &nnorb, &nnorb, &fill1, &D2, buf, &nnorb, buf, &nnorb, &D1, rdm2, &nnorb); } } free(buf); } /* * *********************************************** * transition density matrix, spin free */ void FCItdm12kern_sf(double *tdm1, double *tdm2, double *bra, double *ket, int bcount, int stra_id, int strb_id, int norb, int na, int nb, int nlinka, int nlinkb, _LinkT *clink_indexa, _LinkT *clink_indexb, int symm) { const int INC1 = 1; const char TRANS_N = 'N'; const char TRANS_T = 'T'; const double D1 = 1; const int nnorb = norb * norb; double csum; double *buf0 = malloc(sizeof(double) * nnorb*bcount); double *buf1 = malloc(sizeof(double) * nnorb*bcount); csum = FCI_t1ci_sf(bra, buf1, bcount, stra_id, strb_id, norb, na, nb, nlinka, nlinkb, clink_indexa, clink_indexb); if (csum < CSUMTHR) { goto _normal_end; } csum = FCI_t1ci_sf(ket, buf0, bcount, stra_id, strb_id, norb, na, nb, nlinka, nlinkb, clink_indexa, clink_indexb); if (csum < CSUMTHR) { goto _normal_end; } dgemv_(&TRANS_N, &nnorb, &bcount, &D1, buf0, &nnorb, bra+stra_id*nb+strb_id, &INC1, &D1, tdm1, &INC1); switch (symm) { case PARTICLESYM: tril_particle_symm(tdm2, buf1, buf0, bcount, norb, D1, D1); break; default: dgemm_(&TRANS_N, &TRANS_T, &nnorb, &nnorb, &bcount, &D1, buf0, &nnorb, buf1, &nnorb, &D1, tdm2, &nnorb); } _normal_end: free(buf0); free(buf1); } /* * *********************************************** * 2pdm kernel for alpha^i alpha_j | ci0 > * *********************************************** */ void FCIrdm12kern_a(double *rdm1, double *rdm2, double *bra, double *ket, int bcount, int stra_id, int strb_id, int norb, int na, int nb, int nlinka, int nlinkb, _LinkT *clink_indexa, _LinkT *clink_indexb, int symm) { const int INC1 = 1; const char UP = 'U'; const char TRANS_N = 'N'; const char TRANS_T = 'T'; const double D1 = 1; const int nnorb = norb * norb; double csum; double *buf = calloc(nnorb*bcount, sizeof(double)); csum = FCIrdm2_a_t1ci(ket, buf, bcount, stra_id, strb_id, norb, nb, nlinka, clink_indexa); if (csum > CSUMTHR) { dgemv_(&TRANS_N, &nnorb, &bcount, &D1, buf, &nnorb, ket+stra_id*nb+strb_id, &INC1, &D1, rdm1, &INC1); switch (symm) { case BRAKETSYM: dsyrk_(&UP, &TRANS_N, &nnorb, &bcount, &D1, buf, &nnorb, &D1, rdm2, &nnorb); break; case PARTICLESYM: tril_particle_symm(rdm2, buf, buf, bcount, norb, 1, 1); break; default: dgemm_(&TRANS_N, &TRANS_T, &nnorb, &nnorb, &bcount, &D1, buf, &nnorb, buf, &nnorb, &D1, rdm2, &nnorb); } } free(buf); } /* * 2pdm kernel for beta^i beta_j | ci0 > */ void FCIrdm12kern_b(double *rdm1, double *rdm2, double *bra, double *ket, int bcount, int stra_id, int strb_id, int norb, int na, int nb, int nlinka, int nlinkb, _LinkT *clink_indexa, _LinkT *clink_indexb, int symm) { const int INC1 = 1; const char UP = 'U'; const char TRANS_N = 'N'; const char TRANS_T = 'T'; const double D1 = 1; const int nnorb = norb * norb; double csum; double *buf = calloc(nnorb*bcount, sizeof(double)); csum = FCIrdm2_b_t1ci(ket, buf, bcount, stra_id, strb_id, norb, nb, nlinkb, clink_indexb); if (csum > CSUMTHR) { dgemv_(&TRANS_N, &nnorb, &bcount, &D1, buf, &nnorb, ket+stra_id*nb+strb_id, &INC1, &D1, rdm1, &INC1); switch (symm) { case BRAKETSYM: dsyrk_(&UP, &TRANS_N, &nnorb, &bcount, &D1, buf, &nnorb, &D1, rdm2, &nnorb); break; case PARTICLESYM: tril_particle_symm(rdm2, buf, buf, bcount, norb, 1, 1); break; default: dgemm_(&TRANS_N, &TRANS_T, &nnorb, &nnorb, &bcount, &D1, buf, &nnorb, buf, &nnorb, &D1, rdm2, &nnorb); } } free(buf); } void FCItdm12kern_a(double *tdm1, double *tdm2, double *bra, double *ket, int bcount, int stra_id, int strb_id, int norb, int na, int nb, int nlinka, int nlinkb, _LinkT *clink_indexa, _LinkT *clink_indexb, int symm) { const int INC1 = 1; const char TRANS_N = 'N'; const char TRANS_T = 'T'; const double D1 = 1; const int nnorb = norb * norb; double csum; double *buf0 = calloc(nnorb*bcount, sizeof(double)); double *buf1 = calloc(nnorb*bcount, sizeof(double)); csum = FCIrdm2_a_t1ci(bra, buf1, bcount, stra_id, strb_id, norb, nb, nlinka, clink_indexa); if (csum < CSUMTHR) { goto _normal_end; } csum = FCIrdm2_a_t1ci(ket, buf0, bcount, stra_id, strb_id, norb, nb, nlinka, clink_indexa); if (csum < CSUMTHR) { goto _normal_end; } dgemv_(&TRANS_N, &nnorb, &bcount, &D1, buf0, &nnorb, bra+stra_id*nb+strb_id, &INC1, &D1, tdm1, &INC1); switch (symm) { case PARTICLESYM: tril_particle_symm(tdm2, buf1, buf0, bcount, norb, D1, D1); break; default: dgemm_(&TRANS_N, &TRANS_T, &nnorb, &nnorb, &bcount, &D1, buf0, &nnorb, buf1, &nnorb, &D1, tdm2, &nnorb); } _normal_end: free(buf0); free(buf1); } void FCItdm12kern_b(double *tdm1, double *tdm2, double *bra, double *ket, int bcount, int stra_id, int strb_id, int norb, int na, int nb, int nlinka, int nlinkb, _LinkT *clink_indexa, _LinkT *clink_indexb, int symm) { const int INC1 = 1; const char TRANS_N = 'N'; const char TRANS_T = 'T'; const double D1 = 1; const int nnorb = norb * norb; double csum; double *buf0 = calloc(nnorb*bcount, sizeof(double)); double *buf1 = calloc(nnorb*bcount, sizeof(double)); csum = FCIrdm2_b_t1ci(bra, buf1, bcount, stra_id, strb_id, norb, nb, nlinkb, clink_indexb); if (csum < CSUMTHR) { goto _normal_end; } csum = FCIrdm2_b_t1ci(ket, buf0, bcount, stra_id, strb_id, norb, nb, nlinkb, clink_indexb); if (csum < CSUMTHR) { goto _normal_end; } dgemv_(&TRANS_N, &nnorb, &bcount, &D1, buf0, &nnorb, bra+stra_id*nb+strb_id, &INC1, &D1, tdm1, &INC1); switch (symm) { case PARTICLESYM: tril_particle_symm(tdm2, buf1, buf0, bcount, norb, D1, D1); break; default: dgemm_(&TRANS_N, &TRANS_T, &nnorb, &nnorb, &bcount, &D1, buf0, &nnorb, buf1, &nnorb, &D1, tdm2, &nnorb); } _normal_end: free(buf0); free(buf1); } void FCItdm12kern_ab(double *tdm1, double *tdm2, double *bra, double *ket, int bcount, int stra_id, int strb_id, int norb, int na, int nb, int nlinka, int nlinkb, _LinkT *clink_indexa, _LinkT *clink_indexb, int symm) { const char TRANS_N = 'N'; const char TRANS_T = 'T'; const double D1 = 1; const int nnorb = norb * norb; double csum; double *bufb = calloc(nnorb*bcount, sizeof(double)); double *bufa = calloc(nnorb*bcount, sizeof(double)); csum = FCIrdm2_a_t1ci(bra, bufa, bcount, stra_id, strb_id, norb, nb, nlinka, clink_indexa); if (csum < CSUMTHR) { goto _normal_end; } csum = FCIrdm2_b_t1ci(ket, bufb, bcount, stra_id, strb_id, norb, nb, nlinkb, clink_indexb); if (csum < CSUMTHR) { goto _normal_end; } // no particle symmetry between alpha-alpha-beta-beta 2pdm dgemm_(&TRANS_N, &TRANS_T, &nnorb, &nnorb, &bcount, &D1, bufb, &nnorb, bufa, &nnorb, &D1, tdm2, &nnorb); _normal_end: free(bufb); free(bufa); } /* * *********************************************** * 1-pdm * *********************************************** */ void FCItrans_rdm1a(double *rdm1, double *bra, double *ket, int norb, int na, int nb, int nlinka, int nlinkb, int *link_indexa, int *link_indexb) { int i, a, j, k, str0, str1, sign; double *pket, *pbra; _LinkT *tab; _LinkT *clink = malloc(sizeof(_LinkT) * nlinka * na); FCIcompress_link(clink, link_indexa, norb, na, nlinka); memset(rdm1, 0, sizeof(double) * norb*norb); for (str0 = 0; str0 < na; str0++) { tab = clink + str0 * nlinka; pket = ket + str0 * nb; for (j = 0; j < nlinka; j++) { a = EXTRACT_CRE (tab[j]); i = EXTRACT_DES (tab[j]); str1 = EXTRACT_ADDR(tab[j]); sign = EXTRACT_SIGN(tab[j]); pbra = bra + str1 * nb; if (sign == 0) { break; } else if (sign > 0) { for (k = 0; k < nb; k++) { rdm1[a*norb+i] += pbra[k]*pket[k]; } } else { for (k = 0; k < nb; k++) { rdm1[a*norb+i] -= pbra[k]*pket[k]; } } } } free(clink); } void FCItrans_rdm1b(double *rdm1, double *bra, double *ket, int norb, int na, int nb, int nlinka, int nlinkb, int *link_indexa, int *link_indexb) { int i, a, j, k, str0, str1, sign; double *pket, *pbra; double tmp; _LinkT *tab; _LinkT *clink = malloc(sizeof(_LinkT) * nlinkb * nb); FCIcompress_link(clink, link_indexb, norb, nb, nlinkb); memset(rdm1, 0, sizeof(double) * norb*norb); for (str0 = 0; str0 < na; str0++) { pbra = bra + str0 * nb; pket = ket + str0 * nb; for (k = 0; k < nb; k++) { tab = clink + k * nlinkb; tmp = pket[k]; for (j = 0; j < nlinkb; j++) { a = EXTRACT_CRE (tab[j]); i = EXTRACT_DES (tab[j]); str1 = EXTRACT_ADDR(tab[j]); sign = EXTRACT_SIGN(tab[j]); if (sign == 0) { break; } else { rdm1[a*norb+i] += sign*pbra[str1]*tmp; } } } } free(clink); } /* * make_rdm1 assumed the hermitian of density matrix */ void FCImake_rdm1a(double *rdm1, double *cibra, double *ciket, int norb, int na, int nb, int nlinka, int nlinkb, int *link_indexa, int *link_indexb) { int i, a, j, k, str0, str1, sign; double *pci0, *pci1; double *ci0 = ciket; _LinkT *tab; _LinkT *clink = malloc(sizeof(_LinkT) * nlinka * na); FCIcompress_link(clink, link_indexa, norb, na, nlinka); memset(rdm1, 0, sizeof(double) * norb*norb); for (str0 = 0; str0 < na; str0++) { tab = clink + str0 * nlinka; pci0 = ci0 + str0 * nb; for (j = 0; j < nlinka; j++) { a = EXTRACT_CRE (tab[j]); i = EXTRACT_DES (tab[j]); str1 = EXTRACT_ADDR(tab[j]); sign = EXTRACT_SIGN(tab[j]); pci1 = ci0 + str1 * nb; if (a >= i) { if (sign == 0) { break; } else if (sign > 0) { for (k = 0; k < nb; k++) { rdm1[a*norb+i] += pci0[k]*pci1[k]; } } else { for (k = 0; k < nb; k++) { rdm1[a*norb+i] -= pci0[k]*pci1[k]; } } } } } for (j = 0; j < norb; j++) { for (k = 0; k < j; k++) { rdm1[k*norb+j] = rdm1[j*norb+k]; } } free(clink); } void FCImake_rdm1b(double *rdm1, double *cibra, double *ciket, int norb, int na, int nb, int nlinka, int nlinkb, int *link_indexa, int *link_indexb) { int i, a, j, k, str0, str1, sign; double *pci0; double *ci0 = ciket; double tmp; _LinkT *tab; _LinkT *clink = malloc(sizeof(_LinkT) * nlinkb * nb); FCIcompress_link(clink, link_indexb, norb, nb, nlinkb); memset(rdm1, 0, sizeof(double) * norb*norb); for (str0 = 0; str0 < na; str0++) { pci0 = ci0 + str0 * nb; for (k = 0; k < nb; k++) { tab = clink + k * nlinkb; tmp = pci0[k]; for (j = 0; j < nlinkb; j++) { a = EXTRACT_CRE (tab[j]); i = EXTRACT_DES (tab[j]); str1 = EXTRACT_ADDR(tab[j]); sign = EXTRACT_SIGN(tab[j]); if (a >= i) { if (sign == 0) { break; } else if (sign > 0) { rdm1[a*norb+i] += pci0[str1]*tmp; } else { rdm1[a*norb+i] -= pci0[str1]*tmp; } } } } } for (j = 0; j < norb; j++) { for (k = 0; k < j; k++) { rdm1[k*norb+j] = rdm1[j*norb+k]; } } free(clink); }
parallel_random_walk.c
#include <stdio.h> #include <stdlib.h> #include <omp.h> #include <math.h> #include <time.h> #include <limits.h> #include <errno.h> #include "pcg.h" #define GRID_SIZE 10240 #define NSTEPS 320000 #define D_NSTEPS 0 #define NMARKERS 1000000 #define TOLERANCE 0.01 struct mrk{ int id; int thread; int team; int stopAt; pcg32_random_t rng; int location; float integral; }; typedef struct mrk mrk_t; #pragma omp declare target int stepMarker(mrk_t *, float *, int); #pragma omp end declare target int stepMarker(mrk_t *marker, float *grid, int gridsize){ marker->location = (int) pcg32_boundedrand_r( &(marker->rng), gridsize); marker->integral += grid[marker->location]; return 0; } int initField(int gridSize, float *grid){ int i; /* Init the field s.t. the average = 1.0 */ for(i=0;i<gridSize;i++){ grid[i] = 2.0 * ( (float) i ) / ( (float) gridSize ) ; } return 0; } int initMarkers(mrk_t *markers, int nMarks, int nsteps, int d_nsteps){ int i; for(i=0;i<nMarks;i++){ pcg32_srandom_r( &(markers[i].rng), 0x853c49e6748fea9bULL, i); markers[i].id=i; markers[i].integral=0; markers[i].stopAt = nsteps + rand() / (RAND_MAX / (2*d_nsteps + 1) + 1) - d_nsteps; } return 0; } int printMarker(mrk_t marker){ if ( fabs(marker.integral - 1.000 ) < TOLERANCE ) { printf("M%05d: @%06d by tm%04d:th%04d s%d integral %f\n", marker.id, marker.location, marker.team, marker.thread, marker.stopAt, marker.integral); } else { printf("M%05d: @%06d by tm%04d:th%04d s%d integral %f !!!!!!!!!!!!\n", marker.id, marker.location, marker.team, marker.thread, marker.stopAt, marker.integral); } return 0; } int parse_arguments( int argc, char *argv[], int *gridsize, int *nsteps, int *d_nsteps){ char *endptr; /* Get the gridsize as input */ if(argc >= 2 ){ *gridsize = strtol(argv[1],&endptr,10); if(errno == EINVAL || *gridsize <= 0 ){ printf("Interpreting '%s' as %d, which is not reasonable gridsize.\n", argv[1], *gridsize); return 1; } } else { *gridsize = GRID_SIZE; } /* Get the number of steps and its variability as input */ if(argc >= 3 ){ *nsteps = strtol(argv[2],&endptr,10); if(errno == EINVAL || *nsteps <= 0 ){ printf("Interpreting '%s' as %d, which is not reasonable gridsize.\n", argv[2], *nsteps); return 2; } } else { *nsteps = NSTEPS; } if(argc >= 4 ){ *d_nsteps = strtol(argv[3],&endptr,10); if(errno == EINVAL || *d_nsteps < 0 ){ printf("Interpreting '%s' as %d, which is not reasonable variability of steps.\n", argv[3], *nsteps); return 3; } } else { *d_nsteps = D_NSTEPS; } return 0; } int main( int argc, char *argv[] ){ int i; int resval; mrk_t *markers; const int nMarks = NMARKERS; float *grid; int gridsize; int nsteps, d_nsteps; int maxTeam,maxThread; struct timespec wc_begin,wc_end,cpu_begin,cpu_end; int *nFinished; /* This variable is here to test atomic/critical pragmas */ int N; resval = parse_arguments( argc, argv, &gridsize, &nsteps, &d_nsteps); if(resval != 0) { return resval; } printf("Markers %d; Steps %d pm %d; Gridsize %d.\n", nMarks, nsteps, d_nsteps, gridsize ); markers = (mrk_t *) malloc( nMarks * sizeof(mrk_t)); grid = (float *) malloc( gridsize * sizeof(float)); initMarkers(markers,nMarks,nsteps,d_nsteps); initField(gridsize,grid); nFinished = &N; *nFinished = -1; /* Just mark this with something non-default*/ printf("Finished %d/%d markers.\n",*nFinished,nMarks); clock_gettime( CLOCK_REALTIME, &wc_begin ); clock_gettime( CLOCK_PROCESS_CPUTIME_ID, &cpu_begin ); /*Move all the data to the target on one go*/ #pragma omp target data map(tofrom:markers[0:nMarks]) map(to:grid[0:gridsize]) map(tofrom:nFinished[0:1]) { /* if ( omp_is_initial_device() ) { printf("Running on host (target data map).\n"); } else { printf("Running on target (target data map)\n"); } */ #pragma omp target { #pragma omp atomic write (*nFinished) = 0; /* Test the atomic save.*/ } #pragma omp target teams distribute parallel for shared(nFinished) for(i=0;i<nMarks;i++){ if(i==0){ if ( omp_is_initial_device() ) { printf("Running on host (ottdpf)\n"); } else { printf("Running on target (ottdpf)\n"); } } markers[i].thread = omp_get_thread_num(); markers[i].team = omp_get_team_num(); int j; for(j=0;j<markers[i].stopAt;j++){ stepMarker( &(markers[i]), grid, gridsize ); } /* Scale the integral by number of steps */ markers[i].integral /= (float) markers[i].stopAt; #pragma omp atomic update (*nFinished)++; /* printf("i=%5d ready=%5d\n",i,*nFinished); */ } /* printf("** i=----- ready=%5d **\n",*nFinished); */ } clock_gettime( CLOCK_REALTIME, &wc_end ); clock_gettime( CLOCK_PROCESS_CPUTIME_ID, &cpu_end ); printf("Finished %d/%d markers.\n",*nFinished,nMarks); printf("Wall clock time: %lf s\n", (double) ( wc_end.tv_sec- wc_begin.tv_sec) + ((double) ( wc_end.tv_nsec- wc_begin.tv_nsec ))*1.0e-9 ); printf(" CPU time: %lf s\n", (double) (cpu_end.tv_sec-cpu_begin.tv_sec) + ((double) (cpu_end.tv_nsec-cpu_begin.tv_nsec ))*1.0e-9 ); for (i=0; i<nMarks; i+=19331){ printMarker(markers[i]); } maxThread = -1; maxTeam = -1; for (i=0; i<nMarks; i++){ if ( markers[i].team > maxTeam ) maxTeam = markers[i].team; if ( markers[i].thread > maxThread) maxThread = markers[i].thread; } printf("Teams %d Threads %d\n", maxTeam+1, maxThread+1); return 0; }
SwathFileConsumer.h
// -------------------------------------------------------------------------- // OpenMS -- Open-Source Mass Spectrometry // -------------------------------------------------------------------------- // Copyright The OpenMS Team -- Eberhard Karls University Tuebingen, // ETH Zurich, and Freie Universitaet Berlin 2002-2020. // // This software is released under a three-clause BSD license: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of any author or any participating institution // may be used to endorse or promote products derived from this software // without specific prior written permission. // For a full list of authors, refer to the file AUTHORS. // -------------------------------------------------------------------------- // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" // AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE // ARE DISCLAIMED. IN NO EVENT SHALL ANY OF THE AUTHORS OR THE CONTRIBUTING // INSTITUTIONS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; // OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR // OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF // ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // -------------------------------------------------------------------------- // $Maintainer: Hannes Roest $ // $Authors: Hannes Roest $ // -------------------------------------------------------------------------- #pragma once #include <boost/cast.hpp> // Datastructures #include <OpenMS/OPENSWATHALGO/DATAACCESS/DataStructures.h> #include <OpenMS/OPENSWATHALGO/DATAACCESS/SwathMap.h> // Consumers #include <OpenMS/FORMAT/DATAACCESS/MSDataCachedConsumer.h> #include <OpenMS/FORMAT/DATAACCESS/MSDataWritingConsumer.h> #include <OpenMS/FORMAT/DATAACCESS/MSDataTransformingConsumer.h> // Helpers #include <OpenMS/ANALYSIS/OPENSWATH/OpenSwathHelper.h> #include <OpenMS/ANALYSIS/OPENSWATH/DATAACCESS/SimpleOpenMSSpectraAccessFactory.h> #include <OpenMS/INTERFACES/IMSDataConsumer.h> #include <OpenMS/FORMAT/HANDLERS/CachedMzMLHandler.h> #include <OpenMS/KERNEL/StandardTypes.h> #ifdef _OPENMP #include <omp.h> #endif namespace OpenMS { /** * @brief Abstract base class which can consume spectra coming from SWATH experiment stored in a single file. * * The class consumes spectra which are coming from a complete SWATH * experiment. It will group MS2 spectra by their precursor m/z, assuming * that they correspond to the same SWATH window. For example, the spectra * could be arranged in the following fashion: * * - MS1 Spectrum (no precursor) * - MS2 Spectrum (precursor = [400,425]) * - MS2 Spectrum (precursor = [425,450]) * - [...] * - MS2 Spectrum (precursor = [1175,1200]) * - MS1 Spectrum (no precursor) * - MS2 Spectrum (precursor = [400,425]) * - MS2 Spectrum (precursor = [425,450]) * - [...] * * Base classes are expected to implement functions consuming a spectrum coming * from a specific SWATH or an MS1 spectrum and a final function * ensureMapsAreFilled_ after which the swath_maps_ vector needs to contain * valid pointers to MSExperiment. * * In addition it is possible to provide the swath boundaries and the read in * spectra will be matched by their precursor m/z to the "center" attribute * of the provided Swath maps. * * Usage: * * @code * FullSwathFileConsumer * dataConsumer; * // assign dataConsumer to an implementation of FullSwathFileConsumer * MzMLFile().transform(file, dataConsumer); * dataConsumer->retrieveSwathMaps(maps); * @endcode * */ class OPENMS_DLLAPI FullSwathFileConsumer : public Interfaces::IMSDataConsumer { public: typedef PeakMap MapType; typedef MapType::SpectrumType SpectrumType; typedef MapType::ChromatogramType ChromatogramType; FullSwathFileConsumer() : ms1_map_(), // initialize to null consuming_possible_(true), use_external_boundaries_(false), correct_window_counter_(0) { use_external_boundaries_ = !swath_map_boundaries_.empty(); } /** * @brief Constructor * * @param swath_boundaries A vector of SwathMaps of which only the center, * lower and upper attributes will be used to infer the expected Swath maps. * */ FullSwathFileConsumer(std::vector<OpenSwath::SwathMap> swath_boundaries) : swath_map_boundaries_(swath_boundaries), ms1_map_(), // initialize to null consuming_possible_(true), use_external_boundaries_(false), correct_window_counter_(0) { use_external_boundaries_ = !swath_map_boundaries_.empty(); } ~FullSwathFileConsumer() override {} void setExpectedSize(Size, Size) override {} void setExperimentalSettings(const ExperimentalSettings& exp) override {settings_ = exp; } /** * @brief Populate the vector of swath maps after consuming all spectra. * * Will populate the input vector with SwathMap objects which correspond to * the MS1 map (if present) and the MS2 maps (SWATH maps). This should be * called after all spectra are consumed. * * @note It is not possible to consume any more spectra after calling this * function (it contains finalization code and may close file streams). * */ void retrieveSwathMaps(std::vector<OpenSwath::SwathMap>& maps) { consuming_possible_ = false; // make consumption of further spectra / chromatograms impossible ensureMapsAreFilled_(); if (ms1_map_) { OpenSwath::SwathMap map; map.sptr = SimpleOpenMSSpectraFactory::getSpectrumAccessOpenMSPtr(ms1_map_); map.lower = -1; map.upper = -1; map.center = -1; map.ms1 = true; maps.push_back(map); } // Print warning if the lower/upper window could not be determined and we // required manual determination of the boundaries. if (!use_external_boundaries_ && correct_window_counter_ != swath_maps_.size()) { std::cout << "WARNING: Could not correctly read the upper/lower limits of the SWATH windows from your input file. Read " << correct_window_counter_ << " correct (non-zero) window limits (expected " << swath_maps_.size() << " windows)." << std::endl; } size_t nonempty_maps = 0; for (Size i = 0; i < swath_maps_.size(); i++) { OpenSwath::SwathMap map; map.sptr = SimpleOpenMSSpectraFactory::getSpectrumAccessOpenMSPtr(swath_maps_[i]); map.lower = swath_map_boundaries_[i].lower; map.upper = swath_map_boundaries_[i].upper; map.center = swath_map_boundaries_[i].center; map.ms1 = false; maps.push_back(map); if (map.sptr->getNrSpectra() > 0) {nonempty_maps++;} } if (nonempty_maps != swath_map_boundaries_.size()) { std::cout << "WARNING: The number nonempty maps found in the input file (" << nonempty_maps << ") is not equal to the number of provided swath window boundaries (" << swath_map_boundaries_.size() << "). Please check your input." << std::endl; } } /// Consume a chromatogram -> should not happen when dealing with SWATH maps void consumeChromatogram(MapType::ChromatogramType&) override { std::cerr << "Read chromatogram while reading SWATH files, did not expect that!" << std::endl; } /** * @brief * Consume a spectrum which may belong either to an MS1 scan or * one of n MS2 (SWATH) scans * */ void consumeSpectrum(MapType::SpectrumType& s) override { if (!consuming_possible_) { throw Exception::IllegalArgument(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "FullSwathFileConsumer cannot consume any more spectra after retrieveSwathMaps has been called already"); } if (s.getMSLevel() == 1) { consumeMS1Spectrum_(s); } else { if (s.getPrecursors().empty()) { throw Exception::InvalidParameter(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "Swath scan does not provide a precursor."); } const std::vector<Precursor> prec = s.getPrecursors(); double center = prec[0].getMZ(); double lower = prec[0].getMZ() - prec[0].getIsolationWindowLowerOffset(); double upper = prec[0].getMZ() + prec[0].getIsolationWindowUpperOffset(); bool found = false; // Check if enough information is present to infer the swath if (center <= 0.0) { throw Exception::InvalidParameter(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, "Swath scan does not provide any precursor isolation information."); } // try to match the current scan to one of the already known windows for (Size i = 0; i < swath_map_boundaries_.size(); i++) { // We group by the precursor mz (center of the window) since this // should be present in all SWATH scans. if (std::fabs(center - swath_map_boundaries_[i].center) < 1e-6) { found = true; consumeSwathSpectrum_(s, i); break; } } if (!found) { if (use_external_boundaries_) { throw Exception::InvalidParameter(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION, String("Encountered SWATH scan with boundary ") + center + " m/z which was not present in the provided windows."); } else { consumeSwathSpectrum_(s, swath_map_boundaries_.size()); // we found a new SWATH window if (lower > 0.0 && upper > 0.0) {correct_window_counter_++;} OpenSwath::SwathMap boundary; boundary.lower = lower; boundary.upper = upper; boundary.center = center; swath_map_boundaries_.push_back(boundary); OPENMS_LOG_DEBUG << "Adding Swath centered at " << center << " m/z with an isolation window of " << lower << " to " << upper << " m/z." << std::endl; } } } } protected: /** * @brief Consume an MS2 spectrum belonging to SWATH "swath_nr" * * This function should handle a spectrum belonging to a specific SWATH * (indicated by swath_nr). * */ virtual void consumeSwathSpectrum_(MapType::SpectrumType& s, size_t swath_nr) = 0; /** * @brief Consume an MS1 spectrum * * This function should handle an MS1 spectrum. * */ virtual void consumeMS1Spectrum_(MapType::SpectrumType& s) = 0; /** * @brief Callback function after the reading is complete * * Has to ensure that swath_maps_ and ms1_map_ are correctly populated. */ virtual void ensureMapsAreFilled_() = 0; /// A list of Swath map identifiers (lower/upper boundary and center) std::vector<OpenSwath::SwathMap> swath_map_boundaries_; /// A list of SWATH maps and the MS1 map std::vector<boost::shared_ptr<PeakMap > > swath_maps_; boost::shared_ptr<PeakMap > ms1_map_; /// The Experimental settings // (MSExperiment has no constructor using ExperimentalSettings) PeakMap settings_; /// Whether further spectra can still be consumed bool consuming_possible_; /// Whether to use external input for SWATH boundaries bool use_external_boundaries_; /// How many windows were correctly annotated (non-zero window limits) size_t correct_window_counter_; }; /** * @brief In-memory implementation of FullSwathFileConsumer * * Keeps all the spectra in memory by just appending them to an MSExperiment. * */ class OPENMS_DLLAPI RegularSwathFileConsumer : public FullSwathFileConsumer { public: typedef PeakMap MapType; typedef MapType::SpectrumType SpectrumType; typedef MapType::ChromatogramType ChromatogramType; RegularSwathFileConsumer() {} RegularSwathFileConsumer(std::vector<OpenSwath::SwathMap> known_window_boundaries) : FullSwathFileConsumer(known_window_boundaries) {} protected: void addNewSwathMap_() { boost::shared_ptr<PeakMap > exp(new PeakMap(settings_)); swath_maps_.push_back(exp); } void consumeSwathSpectrum_(MapType::SpectrumType& s, size_t swath_nr) override { while (swath_maps_.size() <= swath_nr) { addNewSwathMap_(); } swath_maps_[swath_nr]->addSpectrum(s); } void addMS1Map_() { boost::shared_ptr<PeakMap > exp(new PeakMap(settings_)); ms1_map_ = exp; } void consumeMS1Spectrum_(MapType::SpectrumType& s) override { if (!ms1_map_) { addMS1Map_(); } ms1_map_->addSpectrum(s); } void ensureMapsAreFilled_() override {} }; /** * @brief On-disk cached implementation of FullSwathFileConsumer * * Writes all spectra immediately to disk in a user-specified caching * location using the MSDataCachedConsumer. Internally, it handles * n+1 (n SWATH + 1 MS1 map) objects of MSDataCachedConsumer which can consume the * spectra and write them to disk immediately. * */ class OPENMS_DLLAPI CachedSwathFileConsumer : public FullSwathFileConsumer { public: typedef PeakMap MapType; typedef MapType::SpectrumType SpectrumType; typedef MapType::ChromatogramType ChromatogramType; CachedSwathFileConsumer(String cachedir, String basename, Size nr_ms1_spectra, std::vector<int> nr_ms2_spectra) : ms1_consumer_(nullptr), swath_consumers_(), cachedir_(cachedir), basename_(basename), nr_ms1_spectra_(nr_ms1_spectra), nr_ms2_spectra_(nr_ms2_spectra) {} CachedSwathFileConsumer(std::vector<OpenSwath::SwathMap> known_window_boundaries, String cachedir, String basename, Size nr_ms1_spectra, std::vector<int> nr_ms2_spectra) : FullSwathFileConsumer(known_window_boundaries), ms1_consumer_(nullptr), swath_consumers_(), cachedir_(cachedir), basename_(basename), nr_ms1_spectra_(nr_ms1_spectra), nr_ms2_spectra_(nr_ms2_spectra) {} ~CachedSwathFileConsumer() override { // Properly delete the MSDataCachedConsumer -> free memory and _close_ file stream while (!swath_consumers_.empty()) { delete swath_consumers_.back(); swath_consumers_.pop_back(); } if (ms1_consumer_ != nullptr) { delete ms1_consumer_; ms1_consumer_ = nullptr; } } protected: void addNewSwathMap_() { String meta_file = cachedir_ + basename_ + "_" + String(swath_consumers_.size()) + ".mzML"; String cached_file = meta_file + ".cached"; MSDataCachedConsumer* consumer = new MSDataCachedConsumer(cached_file, true); consumer->setExpectedSize(nr_ms2_spectra_[swath_consumers_.size()], 0); swath_consumers_.push_back(consumer); // maps for meta data boost::shared_ptr<PeakMap > exp(new PeakMap(settings_)); swath_maps_.push_back(exp); } void consumeSwathSpectrum_(MapType::SpectrumType& s, size_t swath_nr) override { while (swath_maps_.size() <= swath_nr) { addNewSwathMap_(); } swath_consumers_[swath_nr]->consumeSpectrum(s); // write data to cached file; clear data from spectrum s swath_maps_[swath_nr]->addSpectrum(s); // append for the metadata (actual data was deleted) } void addMS1Map_() { String meta_file = cachedir_ + basename_ + "_ms1.mzML"; String cached_file = meta_file + ".cached"; ms1_consumer_ = new MSDataCachedConsumer(cached_file, true); ms1_consumer_->setExpectedSize(nr_ms1_spectra_, 0); boost::shared_ptr<PeakMap > exp(new PeakMap(settings_)); ms1_map_ = exp; } void consumeMS1Spectrum_(MapType::SpectrumType& s) override { if (ms1_consumer_ == nullptr) { addMS1Map_(); } ms1_consumer_->consumeSpectrum(s); ms1_map_->addSpectrum(s); // append for the metadata (actual data is deleted) } void ensureMapsAreFilled_() override { size_t swath_consumers_size = swath_consumers_.size(); bool have_ms1 = (ms1_consumer_ != nullptr); // Properly delete the MSDataCachedConsumer -> free memory and _close_ file stream // The file streams to the cached data on disc can and should be closed // here safely. Since ensureMapsAreFilled_ is called after consuming all // the spectra, there will be no more spectra to append but the client // might already want to read after this call, so all data needs to be // present on disc and the file streams closed. // // TODO merge with destructor code into own function! while (!swath_consumers_.empty()) { delete swath_consumers_.back(); swath_consumers_.pop_back(); } if (ms1_consumer_ != nullptr) { delete ms1_consumer_; ms1_consumer_ = nullptr; } if (have_ms1) { boost::shared_ptr<PeakMap > exp(new PeakMap); String meta_file = cachedir_ + basename_ + "_ms1.mzML"; // write metadata to disk and store the correct data processing tag Internal::CachedMzMLHandler().writeMetadata(*ms1_map_, meta_file, true); MzMLFile().load(meta_file, *exp.get()); ms1_map_ = exp; } #ifdef _OPENMP #pragma omp parallel for #endif for (SignedSize i = 0; i < boost::numeric_cast<SignedSize>(swath_consumers_size); i++) { boost::shared_ptr<PeakMap > exp(new PeakMap); String meta_file = cachedir_ + basename_ + "_" + String(i) + ".mzML"; // write metadata to disk and store the correct data processing tag Internal::CachedMzMLHandler().writeMetadata(*swath_maps_[i], meta_file, true); MzMLFile().load(meta_file, *exp.get()); swath_maps_[i] = exp; } } MSDataCachedConsumer* ms1_consumer_; std::vector<MSDataCachedConsumer*> swath_consumers_; String cachedir_; String basename_; int nr_ms1_spectra_; std::vector<int> nr_ms2_spectra_; }; /** * @brief On-disk mzML implementation of FullSwathFileConsumer * * Writes all spectra immediately to disk to an mzML file location using the * PlainMSDataWritingConsumer. Internally, it handles n+1 (n SWATH + 1 MS1 * map) objects of MSDataCachedConsumer which can consume the spectra and * write them to disk immediately. * * Warning: no swathmaps (MS1 nor MS2) will be available when calling retrieveSwathMaps() * for downstream use. * */ class OPENMS_DLLAPI MzMLSwathFileConsumer : public FullSwathFileConsumer { public: typedef PeakMap MapType; typedef MapType::SpectrumType SpectrumType; typedef MapType::ChromatogramType ChromatogramType; MzMLSwathFileConsumer(const String& cachedir, const String& basename, Size nr_ms1_spectra, const std::vector<int>& nr_ms2_spectra) : ms1_consumer_(nullptr), swath_consumers_(), cachedir_(cachedir), basename_(basename), nr_ms1_spectra_(nr_ms1_spectra), nr_ms2_spectra_(nr_ms2_spectra) {} MzMLSwathFileConsumer(std::vector<OpenSwath::SwathMap> known_window_boundaries, const String& cachedir, const String& basename, Size nr_ms1_spectra, const std::vector<int>& nr_ms2_spectra) : FullSwathFileConsumer(known_window_boundaries), ms1_consumer_(nullptr), swath_consumers_(), cachedir_(cachedir), basename_(basename), nr_ms1_spectra_(nr_ms1_spectra), nr_ms2_spectra_(nr_ms2_spectra) {} ~MzMLSwathFileConsumer() override { deleteSetNull_(); } protected: void deleteSetNull_() { // Properly delete the MSDataCachedConsumer -> free memory and _close_ file stream while (!swath_consumers_.empty()) { delete swath_consumers_.back(); swath_consumers_.pop_back(); } if (ms1_consumer_ != nullptr) { delete ms1_consumer_; ms1_consumer_ = nullptr; } } void addNewSwathMap_() { String mzml_file = cachedir_ + basename_ + "_" + String(swath_consumers_.size()) + ".mzML"; PlainMSDataWritingConsumer* consumer = new PlainMSDataWritingConsumer(mzml_file); consumer->getOptions().setCompression(true); consumer->setExpectedSize(nr_ms2_spectra_[swath_consumers_.size()], 0); swath_consumers_.push_back(consumer); } void consumeSwathSpectrum_(MapType::SpectrumType& s, size_t swath_nr) override { // only use swath_consumers_ to count how many we have already added while (swath_consumers_.size() <= swath_nr) { addNewSwathMap_(); } swath_consumers_[swath_nr]->consumeSpectrum(s); s.clear(false); } void addMS1Map_() { String mzml_file = cachedir_ + basename_ + "_ms1.mzML"; ms1_consumer_ = new PlainMSDataWritingConsumer(mzml_file); ms1_consumer_->setExpectedSize(nr_ms1_spectra_, 0); ms1_consumer_->getOptions().setCompression(true); } void consumeMS1Spectrum_(MapType::SpectrumType& s) override { if (ms1_consumer_ == nullptr) { addMS1Map_(); } ms1_consumer_->consumeSpectrum(s); } void ensureMapsAreFilled_() override { deleteSetNull_(); } PlainMSDataWritingConsumer* ms1_consumer_; std::vector<PlainMSDataWritingConsumer*> swath_consumers_; String cachedir_; String basename_; int nr_ms1_spectra_; std::vector<int> nr_ms2_spectra_; }; }
3d7pt_var.c
/* * Order-1, 3D 7 point stencil with variable coefficients * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, m, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+2; Ny = atoi(argv[2])+2; Nz = atoi(argv[3])+2; } if (argc > 4) Nt = atoi(argv[4]); // allocate the arrays double ****A = (double ****) malloc(sizeof(double***)*2); for(m=0; m<2;m++){ A[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } double ****coef = (double ****) malloc(sizeof(double***)*7); for(m=0; m<7;m++){ coef[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ coef[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ coef[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 16; tile_size[1] = 16; tile_size[2] = 24; tile_size[3] = 256; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } for (m=0; m<7; m++) { for (i=1; i<Nz; i++) { for (j=1; j<Ny; j++) { for (k=1; k<Nx; k++) { coef[m][i][j][k] = 1.0 * (rand() % BASE); } } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 #pragma scop for (t = 0; t < Nt-1; t++) { for (i = 1; i < Nz-1; i++) { for (j = 1; j < Ny-1; j++) { for (k = 1; k < Nx-1; k++) { A[(t+1)%2][i][j][k] = coef[0][i][j][k] * A[t%2][i ][j ][k ] + coef[1][i][j][k] * A[t%2][i-1][j ][k ] + coef[2][i][j][k] * A[t%2][i ][j-1][k ] + coef[3][i][j][k] * A[t%2][i ][j ][k-1] + coef[4][i][j][k] * A[t%2][i+1][j ][k ] + coef[5][i][j][k] * A[t%2][i ][j+1][k ] + coef[6][i][j][k] * A[t%2][i ][j ][k+1]; } } } } #pragma endscop gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(1, "variable no-symmetry") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); for(m=0; m<7;m++){ for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(coef[m][i][j]); } free(coef[m][i]); } free(coef[m]); } return 0; }
pr29955.c
/* PR c/29955 */ /* { dg-do compile } */ /* { dg-options "-O2 -fopenmp -fexceptions" } */ extern void bar (int); void foo (int n) { int i; #pragma omp parallel for schedule(dynamic) for (i = 0; i < n; i++) bar (0); }
time_multi.c
///////////////////////////////////////////////////////////////////////////// // einspline: a library for creating and evaluating B-splines // // Copyright (C) 2007 Kenneth P. Esler, Jr. // // // // This program is free software; you can redistribute it and/or modify // // it under the terms of the GNU General Public License as published by // // the Free Software Foundation; either version 2 of the License, or // // (at your option) any later version. // // // // This program is distributed in the hope that it will be useful, // // but WITHOUT ANY WARRANTY; without even the implied warranty of // // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // // GNU General Public License for more details. // // // // You should have received a copy of the GNU General Public License // // along with this program; if not, write to the Free Software // // Foundation, Inc., 51 Franklin Street, Fifth Floor, // // Boston, MA 02110-1301 USA // ///////////////////////////////////////////////////////////////////////////// #include "multi_bspline.h" #include "bspline.h" #include "multi_nubspline.h" #include "nubspline.h" #include <stdio.h> #include <stdlib.h> #include <math.h> #include <time.h> double drand48(); inline double diff (double a, double b, double tol) { if (fabs(a-b) > tol) return 1; else return 0; } ////////////////////////////////////////// // Single-precision real test functions // ////////////////////////////////////////// int test_1d_float_all() { int Nx=73; int num_splines = 21; Ugrid x_grid; x_grid.start = 3.1; x_grid.end = 9.1; x_grid.num = Nx; BCtype_s xBC; xBC.lCode = xBC.rCode = PERIODIC; // First, create splines the normal way UBspline_1d_s* norm_splines[num_splines]; multi_UBspline_1d_s *multi_spline; // First, create multispline multi_spline = create_multi_UBspline_1d_s (x_grid, xBC, num_splines); float data[Nx]; // Now, create normal splines and set multispline data for (int i=0; i<num_splines; i++) { for (int j=0; j<Nx; j++) data[j] = (drand48()-0.5); norm_splines[i] = create_UBspline_1d_s (x_grid, xBC, data); set_multi_UBspline_1d_s (multi_spline, i, data); } // fprintf (stderr, "\nnorm coef = %1.14e\n", // norm_splines[19]->coefs[27]); // fprintf (stderr, "multi coef = %1.14e\n", // multi_spline->coefs[19+27*multi_spline->x_stride]); // Now, test random values int num_vals = 100; float multi_vals[num_splines], norm_vals [num_splines]; float multi_grads[num_splines], norm_grads[num_splines]; float multi_lapl[num_splines], norm_lapl [num_splines]; for (int i=0; i<num_vals; i++) { double rx = drand48(); double x = rx*x_grid.start + (1.0-rx)*x_grid.end; ////////////////////////// // Check value routine // ////////////////////////// eval_multi_UBspline_1d_s (multi_spline, x, multi_vals); for (int j=0; j<num_splines; j++) eval_UBspline_1d_s (norm_splines[j], x, &(norm_vals[j])); for (int j=0; j<num_splines; j++) { // Check value if (diff(norm_vals[j], multi_vals[j], 1.0e-6)) { fprintf (stderr, " norm_vals[j] = %1.8e\n", norm_vals[j]); fprintf (stderr, "multi_vals[j] = %1.8e\n", multi_vals[j]); return -1; } } /////////////////////// // Check VG routine // /////////////////////// eval_multi_UBspline_1d_s_vg (multi_spline, x, multi_vals, multi_grads); for (int j=0; j<num_splines; j++) eval_UBspline_1d_s_vg (norm_splines[j], x, &(norm_vals[j]), &(norm_grads[j])); for (int j=0; j<num_splines; j++) { // Check value if (diff(norm_vals[j], multi_vals[j], 1.0e-6)) return -2; // Check gradients if (diff (norm_grads[j], multi_grads[j], 1.0e-5)) return -3; } /////////////////////// // Check VGL routine // /////////////////////// eval_multi_UBspline_1d_s_vgl (multi_spline, x, multi_vals, multi_grads, multi_lapl); for (int j=0; j<num_splines; j++) eval_UBspline_1d_s_vgl (norm_splines[j], x, &(norm_vals[j]), &(norm_grads[j]), &(norm_lapl[j])); for (int j=0; j<num_splines; j++) { // Check value if (diff(norm_vals[j], multi_vals[j], 1.0e-6)) return -4; // Check gradients if (diff (norm_grads[j], multi_grads[j], 1.0e-5)) return -5; // Check laplacian if (diff (norm_lapl[j], multi_lapl[j], 1.0e-3)) return -6; } } return 0; } int test_2d_float_all() { int Nx=73; int Ny=91; int num_splines = 21; Ugrid x_grid, y_grid; x_grid.start = 3.1; x_grid.end = 9.1; x_grid.num = Nx; y_grid.start = 8.7; y_grid.end = 12.7; y_grid.num = Ny; BCtype_s xBC, yBC; xBC.lCode = xBC.rCode = PERIODIC; yBC.lCode = yBC.rCode = PERIODIC; // First, create splines the normal way UBspline_2d_s* norm_splines[num_splines]; multi_UBspline_2d_s *multi_spline; // First, create multispline multi_spline = create_multi_UBspline_2d_s (x_grid, y_grid, xBC, yBC, num_splines); float data[Nx*Ny]; // Now, create normal splines and set multispline data for (int i=0; i<num_splines; i++) { for (int j=0; j<Nx*Ny; j++) data[j] = (drand48()-0.5); norm_splines[i] = create_UBspline_2d_s (x_grid, y_grid, xBC, yBC, data); set_multi_UBspline_2d_s (multi_spline, i, data); } // fprintf (stderr, "norm coef = %1.14e + %1.14ei\n", // creal(norm_splines[19]->coefs[227]), // cimag(norm_splines[19]->coefs[227])); // fprintf (stderr, "multi coef = %1.14e + %1.14ei\n", // creal(multi_spline->coefs[19+227*multi_spline->z_stride]), // cimag(multi_spline->coefs[19+227*multi_spline->z_stride])); // Now, test random values int num_vals = 100; float multi_vals[num_splines], norm_vals[num_splines]; float multi_grads[2*num_splines], norm_grads[2*num_splines]; float multi_lapl[num_splines], norm_lapl[num_splines]; float multi_hess[4*num_splines], norm_hess[4*num_splines]; for (int i=0; i<num_vals; i++) { double rx = drand48(); double x = rx*x_grid.start + (1.0-rx)*x_grid.end; double ry = drand48(); double y = ry*y_grid.start + (1.0-ry)*y_grid.end; ////////////////////////// // Check value routine // ////////////////////////// eval_multi_UBspline_2d_s (multi_spline, x, y, multi_vals); for (int j=0; j<num_splines; j++) eval_UBspline_2d_s (norm_splines[j], x, y, &(norm_vals[j])); for (int j=0; j<num_splines; j++) { // Check value if (diff(norm_vals[j], multi_vals[j], 1.0e-6)) return -1; } /////////////////////// // Check VG routine // /////////////////////// eval_multi_UBspline_2d_s_vg (multi_spline, x, y, multi_vals, multi_grads); for (int j=0; j<num_splines; j++) eval_UBspline_2d_s_vg (norm_splines[j], x, y, &(norm_vals[j]), &(norm_grads[2*j])); for (int j=0; j<num_splines; j++) { // Check value if (diff(norm_vals[j], multi_vals[j], 1.0e-6)) return -1; // Check gradients for (int n=0; n<2; n++) if (diff (norm_grads[2*j+n], multi_grads[2*j+n], 1.0e-5)) return -2; } /////////////////////// // Check VGL routine // /////////////////////// eval_multi_UBspline_2d_s_vgl (multi_spline, x, y, multi_vals, multi_grads, multi_lapl); for (int j=0; j<num_splines; j++) eval_UBspline_2d_s_vgl (norm_splines[j], x, y, &(norm_vals[j]), &(norm_grads[2*j]), &(norm_lapl[j])); for (int j=0; j<num_splines; j++) { // Check value if (diff(norm_vals[j], multi_vals[j], 1.0e-6)) return -3; // Check gradients for (int n=0; n<2; n++) if (diff (norm_grads[2*j+n], multi_grads[2*j+n], 1.0e-5)) return -4; // Check laplacian if (diff (norm_lapl[j], multi_lapl[j], 1.0e-3)) return -5; } /////////////////////// // Check VGH routine // /////////////////////// eval_multi_UBspline_2d_s_vgh (multi_spline, x, y, multi_vals, multi_grads, multi_hess); for (int j=0; j<num_splines; j++) eval_UBspline_2d_s_vgh (norm_splines[j], x, y, &(norm_vals[j]), &(norm_grads[2*j]), &(norm_hess[4*j])); for (int j=0; j<num_splines; j++) { // Check value if (diff(norm_vals[j], multi_vals[j], 1.0e-6)) { fprintf (stderr, "j = %d\n", j); fprintf (stderr, "norm_vals[j] = %1.14e\n", norm_vals[j]); fprintf (stderr, "multi_vals[j] = %1.14e\n", multi_vals[j]); //return -6; } // Check gradients for (int n=0; n<2; n++) if (diff (norm_grads[2*j+n], multi_grads[2*j+n], 1.0e-5)) return -7; // Check hessian for (int n=0; n<4; n++) if (diff (norm_hess[4*j+n], multi_hess[4*j+n], 1.0e-3)) { fprintf (stderr, "j = %d n = %d \n", j, n); fprintf (stderr, "norm_hess[j] = %1.14e\n", norm_hess[4*j+n]); fprintf (stderr, "multi_hess[j] = %1.14e\n", multi_hess[4*j+n]); //return -8; } } } return 0; } int test_3d_float_all() { int Nx=73; int Ny=91; int Nz = 29; int num_splines = 23; Ugrid x_grid, y_grid, z_grid; x_grid.start = 3.1; x_grid.end = 9.1; x_grid.num = Nx; y_grid.start = 8.7; y_grid.end = 12.7; y_grid.num = Ny; z_grid.start = 4.5; z_grid.end = 9.3; z_grid.num = Nz; BCtype_s xBC, yBC, zBC; xBC.lCode = xBC.rCode = PERIODIC; yBC.lCode = yBC.rCode = PERIODIC; zBC.lCode = zBC.rCode = PERIODIC; // First, create splines the normal way UBspline_3d_s* norm_splines[num_splines]; multi_UBspline_3d_s *multi_spline; // First, create multispline multi_spline = create_multi_UBspline_3d_s (x_grid, y_grid, z_grid, xBC, yBC, zBC, num_splines); float data[Nx*Ny*Nz]; // Now, create normal splines and set multispline data for (int i=0; i<num_splines; i++) { for (int j=0; j<Nx*Ny*Nz; j++) data[j] = (drand48()-0.5) + (drand48()-0.5)*1.0i; norm_splines[i] = create_UBspline_3d_s (x_grid, y_grid, z_grid, xBC, yBC, zBC, data); set_multi_UBspline_3d_s (multi_spline, i, data); } // fprintf (stderr, "norm coef = %1.14e + %1.14ei\n", // creal(norm_splines[19]->coefs[227]), // cimag(norm_splines[19]->coefs[227])); // fprintf (stderr, "multi coef = %1.14e + %1.14ei\n", // creal(multi_spline->coefs[19+227*multi_spline->z_stride]), // cimag(multi_spline->coefs[19+227*multi_spline->z_stride])); // Now, test random values int num_vals = 100; float multi_vals[num_splines], norm_vals[num_splines]; float multi_grads[3*num_splines], norm_grads[3*num_splines]; float multi_lapl[num_splines], norm_lapl[num_splines]; float multi_hess[9*num_splines], norm_hess[9*num_splines]; for (int i=0; i<num_vals; i++) { double rx = drand48(); double x = rx*x_grid.start + (1.0-rx)*x_grid.end; double ry = drand48(); double y = ry*y_grid.start + (1.0-ry)*y_grid.end; double rz = drand48(); double z = rz*z_grid.start + (1.0-rz)*z_grid.end; ////////////////////////// // Check value routine // ///////////////////////// eval_multi_UBspline_3d_s (multi_spline, x, y, z, multi_vals); for (int j=0; j<num_splines; j++) eval_UBspline_3d_s (norm_splines[j], x, y, z, &(norm_vals[j])); for (int j=0; j<num_splines; j++) { // Check value if (diff(norm_vals[j], multi_vals[j], 1.0e-6)) return -1; } /////////////////////// // Check VG routine // /////////////////////// eval_multi_UBspline_3d_s_vg (multi_spline, x, y, z, multi_vals, multi_grads); for (int j=0; j<num_splines; j++) eval_UBspline_3d_s_vg (norm_splines[j], x, y, z, &(norm_vals[j]), &(norm_grads[3*j])); for (int j=0; j<num_splines; j++) { // Check value if (diff(norm_vals[j], multi_vals[j], 1.0e-6)) return -1; // Check gradients for (int n=0; n<3; n++) if (diff (norm_grads[3*j+n], multi_grads[3*j+n], 1.0e-4)) return -2; } /////////////////////// // Check VGL routine // /////////////////////// eval_multi_UBspline_3d_s_vgl (multi_spline, x, y, z, multi_vals, multi_grads, multi_lapl); for (int j=0; j<num_splines; j++) eval_UBspline_3d_s_vgl (norm_splines[j], x, y, z, &(norm_vals[j]), &(norm_grads[3*j]), &(norm_lapl[j])); for (int j=0; j<num_splines; j++) { // Check value if (diff(norm_vals[j], multi_vals[j], 1.0e-6)) return -3; // Check gradients for (int n=0; n<3; n++) if (diff (norm_grads[3*j+n], multi_grads[3*j+n], 1.0e-4)) return -4; // Check laplacian if (diff (norm_lapl[j], multi_lapl[j], 1.0e-3)) return -5; } /////////////////////// // Check VGH routine // /////////////////////// eval_multi_UBspline_3d_s_vgh (multi_spline, x, y, z, multi_vals, multi_grads, multi_hess); for (int j=0; j<num_splines; j++) eval_UBspline_3d_s_vgh (norm_splines[j], x, y, z, &(norm_vals[j]), &(norm_grads[3*j]), &(norm_hess[9*j])); for (int j=0; j<num_splines; j++) { // Check value if (diff(norm_vals[j], multi_vals[j], 1.0e-6)) return -6; // Check gradients for (int n=0; n<3; n++) if (diff (norm_grads[3*j+n], multi_grads[3*j+n], 1.0e-4)) { fprintf (stderr, "n=%d j=%d\n", n, j); fprintf (stderr, " norm_grads[3*j+n] = %1.8e\n", norm_grads[3*j+n]); fprintf (stderr, "multi_grads[3*j+n] = %1.8e\n", multi_grads[3*j+n]); //return -7; } // Check hessian for (int n=0; n<9; n++) if (diff (norm_hess[9*j+n], multi_hess[9*j+n], 1.0e-3)) return -8; } } // num_vals = 100000; // // Now do timing // clock_t norm_start, norm_end, multi_start, multi_end, rand_start, rand_end; // rand_start = clock(); // for (int i=0; i<num_vals; i++) { // double rx = drand48(); double x = rx*x_grid.start + (1.0-rx)*x_grid.end; // double ry = drand48(); double y = ry*y_grid.start + (1.0-ry)*y_grid.end; // double rz = drand48(); double z = rz*z_grid.start + (1.0-rz)*z_grid.end; // } // rand_end = clock(); // norm_start = clock(); // for (int i=0; i<num_vals; i++) { // double rx = drand48(); double x = rx*x_grid.start + (1.0-rx)*x_grid.end; // double ry = drand48(); double y = ry*y_grid.start + (1.0-ry)*y_grid.end; // double rz = drand48(); double z = rz*z_grid.start + (1.0-rz)*z_grid.end; // for (int j=0; j<num_splines; j++) // eval_UBspline_3d_s_vgh (norm_splines[j], x, y, z, &(norm_vals[j]), // &(norm_grads[3*j]), &norm_hess[9*j]); // } // norm_end = clock(); // multi_start = clock(); // for (int i=0; i<num_vals; i++) { // double rx = drand48(); double x = rx*x_grid.start + (1.0-rx)*x_grid.end; // double ry = drand48(); double y = ry*y_grid.start + (1.0-ry)*y_grid.end; // double rz = drand48(); double z = rz*z_grid.start + (1.0-rz)*z_grid.end; // eval_multi_UBspline_3d_s_vgh (multi_spline, x, y, z, multi_vals, // multi_grads, multi_hess); // } // multi_end = clock(); // fprintf (stderr, "Normal spline time = %1.5f\n", // (double)(norm_end-norm_start+rand_start-rand_end)/CLOCKS_PER_SEC); // fprintf (stderr, "Multi spline time = %1.5f\n", // (double)(multi_end-multi_start+rand_start-rand_end)/CLOCKS_PER_SEC); return 0; } ////////////////////////////////////////// // Double-precision real test functions // ////////////////////////////////////////// int test_1d_double_all() { int Nx=73; int num_splines = 21; Ugrid x_grid; x_grid.start = 3.1; x_grid.end = 9.1; x_grid.num = Nx; BCtype_d xBC; xBC.lCode = xBC.rCode = PERIODIC; // First, create splines the normal way UBspline_1d_d* norm_splines[num_splines]; multi_UBspline_1d_d *multi_spline; // First, create multispline multi_spline = create_multi_UBspline_1d_d (x_grid, xBC, num_splines); double data[Nx]; // Now, create normal splines and set multispline data for (int i=0; i<num_splines; i++) { for (int j=0; j<Nx; j++) data[j] = (drand48()-0.5); norm_splines[i] = create_UBspline_1d_d (x_grid, xBC, data); set_multi_UBspline_1d_d (multi_spline, i, data); } // Now, test random values int num_vals = 100; double multi_vals[num_splines], norm_vals [num_splines]; double multi_grads[num_splines], norm_grads[num_splines]; double multi_lapl[num_splines], norm_lapl [num_splines]; for (int i=0; i<num_vals; i++) { double rx = drand48(); double x = rx*x_grid.start + (1.0-rx)*x_grid.end; ////////////////////////// // Check value routine // ////////////////////////// eval_multi_UBspline_1d_d (multi_spline, x, multi_vals); for (int j=0; j<num_splines; j++) eval_UBspline_1d_d (norm_splines[j], x, &(norm_vals[j])); for (int j=0; j<num_splines; j++) { // Check value if (diff(norm_vals[j], multi_vals[j], 1.0e-12)) return -1; } /////////////////////// // Check VG routine // /////////////////////// eval_multi_UBspline_1d_d_vg (multi_spline, x, multi_vals, multi_grads); for (int j=0; j<num_splines; j++) eval_UBspline_1d_d_vg (norm_splines[j], x, &(norm_vals[j]), &(norm_grads[j])); for (int j=0; j<num_splines; j++) { // Check value if (diff(norm_vals[j], multi_vals[j], 1.0e-12)) return -1; // Check gradients if (diff (norm_grads[j], multi_grads[j], 1.0e-12)) return -2; } /////////////////////// // Check VGL routine // /////////////////////// eval_multi_UBspline_1d_d_vgl (multi_spline, x, multi_vals, multi_grads, multi_lapl); for (int j=0; j<num_splines; j++) eval_UBspline_1d_d_vgl (norm_splines[j], x, &(norm_vals[j]), &(norm_grads[j]), &(norm_lapl[j])); for (int j=0; j<num_splines; j++) { // Check value if (diff(norm_vals[j], multi_vals[j], 1.0e-12)) return -3; // Check gradients if (diff (norm_grads[j], multi_grads[j], 1.0e-10)) return -4; // Check laplacian if (diff (norm_lapl[j], multi_lapl[j], 1.0e-10)) return -5; } } return 0; } int test_2d_double_all() { int Nx=73; int Ny=91; int num_splines = 21; Ugrid x_grid, y_grid; x_grid.start = 3.1; x_grid.end = 9.1; x_grid.num = Nx; y_grid.start = 8.7; y_grid.end = 12.7; y_grid.num = Ny; BCtype_d xBC, yBC; xBC.lCode = xBC.rCode = PERIODIC; yBC.lCode = yBC.rCode = PERIODIC; // First, create splines the normal way UBspline_2d_d* norm_splines[num_splines]; multi_UBspline_2d_d *multi_spline; // First, create multispline multi_spline = create_multi_UBspline_2d_d (x_grid, y_grid, xBC, yBC, num_splines); double data[Nx*Ny]; // Now, create normal splines and set multispline data for (int i=0; i<num_splines; i++) { for (int j=0; j<Nx*Ny; j++) data[j] = (drand48()-0.5); norm_splines[i] = create_UBspline_2d_d (x_grid, y_grid, xBC, yBC, data); set_multi_UBspline_2d_d (multi_spline, i, data); } // fprintf (stderr, "norm coef = %1.14e + %1.14ei\n", // creal(norm_splines[19]->coefs[227]), // cimag(norm_splines[19]->coefs[227])); // fprintf (stderr, "multi coef = %1.14e + %1.14ei\n", // creal(multi_spline->coefs[19+227*multi_spline->z_stride]), // cimag(multi_spline->coefs[19+227*multi_spline->z_stride])); // Now, test random values int num_vals = 100; double multi_vals[num_splines], norm_vals[num_splines]; double multi_grads[2*num_splines], norm_grads[2*num_splines]; double multi_lapl[num_splines], norm_lapl[num_splines]; double multi_hess[4*num_splines], norm_hess[4*num_splines]; for (int i=0; i<num_vals; i++) { double rx = drand48(); double x = rx*x_grid.start + (1.0-rx)*x_grid.end; double ry = drand48(); double y = ry*y_grid.start + (1.0-ry)*y_grid.end; ////////////////////////// // Check value routine // ////////////////////////// eval_multi_UBspline_2d_d (multi_spline, x, y, multi_vals); for (int j=0; j<num_splines; j++) eval_UBspline_2d_d (norm_splines[j], x, y, &(norm_vals[j])); for (int j=0; j<num_splines; j++) { // Check value if (diff(norm_vals[j], multi_vals[j], 1.0e-12)) return -1; } /////////////////////// // Check VG routine // /////////////////////// eval_multi_UBspline_2d_d_vg (multi_spline, x, y, multi_vals, multi_grads); for (int j=0; j<num_splines; j++) eval_UBspline_2d_d_vg (norm_splines[j], x, y, &(norm_vals[j]), &(norm_grads[2*j])); for (int j=0; j<num_splines; j++) { // Check value if (diff(norm_vals[j], multi_vals[j], 1.0e-12)) return -1; // Check gradients for (int n=0; n<2; n++) if (diff (norm_grads[2*j+n], multi_grads[2*j+n], 1.0e-12)) return -2; } /////////////////////// // Check VGL routine // /////////////////////// eval_multi_UBspline_2d_d_vgl (multi_spline, x, y, multi_vals, multi_grads, multi_lapl); for (int j=0; j<num_splines; j++) eval_UBspline_2d_d_vgl (norm_splines[j], x, y, &(norm_vals[j]), &(norm_grads[2*j]), &(norm_lapl[j])); for (int j=0; j<num_splines; j++) { // Check value if (diff(norm_vals[j], multi_vals[j], 1.0e-12)) return -3; // Check gradients for (int n=0; n<2; n++) if (diff (norm_grads[2*j+n], multi_grads[2*j+n], 1.0e-10)) return -4; // Check laplacian if (diff (norm_lapl[j], multi_lapl[j], 1.0e-10)) return -5; } /////////////////////// // Check VGH routine // /////////////////////// eval_multi_UBspline_2d_d_vgh (multi_spline, x, y, multi_vals, multi_grads, multi_hess); for (int j=0; j<num_splines; j++) eval_UBspline_2d_d_vgh (norm_splines[j], x, y, &(norm_vals[j]), &(norm_grads[2*j]), &(norm_hess[4*j])); for (int j=0; j<num_splines; j++) { // Check value if (diff(norm_vals[j], multi_vals[j], 1.0e-12)) { fprintf (stderr, "j = %d\n", j); fprintf (stderr, "norm_vals[j] = %1.14e\n", norm_vals[j]); fprintf (stderr, "multi_vals[j] = %1.14e\n", multi_vals[j]); //return -6; } // Check gradients for (int n=0; n<2; n++) if (diff (norm_grads[2*j+n], multi_grads[2*j+n], 1.0e-12)) return -7; // Check hessian for (int n=0; n<4; n++) if (diff (norm_hess[4*j+n], multi_hess[4*j+n], 1.0e-10)) { fprintf (stderr, "j = %d n = %d \n", j, n); fprintf (stderr, "norm_hess[j] = %1.14e\n", norm_hess[4*j+n]); fprintf (stderr, "multi_hess[j] = %1.14e\n", multi_hess[4*j+n]); //return -8; } } } return 0; } int test_3d_double_all() { int Nx=73; int Ny=91; int Nz = 29; int num_splines = 21; Ugrid x_grid, y_grid, z_grid; x_grid.start = 3.1; x_grid.end = 9.1; x_grid.num = Nx; y_grid.start = 8.7; y_grid.end = 12.7; y_grid.num = Ny; z_grid.start = 4.5; z_grid.end = 9.3; z_grid.num = Nz; BCtype_d xBC, yBC, zBC; xBC.lCode = xBC.rCode = PERIODIC; yBC.lCode = yBC.rCode = PERIODIC; zBC.lCode = zBC.rCode = PERIODIC; // First, create splines the normal way UBspline_3d_d* norm_splines[num_splines]; multi_UBspline_3d_d *multi_spline; // First, create multispline multi_spline = create_multi_UBspline_3d_d (x_grid, y_grid, z_grid, xBC, yBC, zBC, num_splines); double data[Nx*Ny*Nz]; // Now, create normal splines and set multispline data for (int i=0; i<num_splines; i++) { for (int j=0; j<Nx*Ny*Nz; j++) data[j] = (drand48()-0.5) + (drand48()-0.5)*1.0i; norm_splines[i] = create_UBspline_3d_d (x_grid, y_grid, z_grid, xBC, yBC, zBC, data); set_multi_UBspline_3d_d (multi_spline, i, data); } // fprintf (stderr, "norm coef = %1.14e + %1.14ei\n", // creal(norm_splines[19]->coefs[227]), // cimag(norm_splines[19]->coefs[227])); // fprintf (stderr, "multi coef = %1.14e + %1.14ei\n", // creal(multi_spline->coefs[19+227*multi_spline->z_stride]), // cimag(multi_spline->coefs[19+227*multi_spline->z_stride])); // Now, test random values int num_vals = 100; double multi_vals[num_splines], norm_vals[num_splines]; double multi_grads[3*num_splines], norm_grads[3*num_splines]; double multi_lapl[num_splines], norm_lapl[num_splines]; double multi_hess[9*num_splines], norm_hess[9*num_splines]; for (int i=0; i<num_vals; i++) { double rx = drand48(); double x = rx*x_grid.start + (1.0-rx)*x_grid.end; double ry = drand48(); double y = ry*y_grid.start + (1.0-ry)*y_grid.end; double rz = drand48(); double z = rz*z_grid.start + (1.0-rz)*z_grid.end; /////////////////////// // Check VG routine // /////////////////////// eval_multi_UBspline_3d_d_vg (multi_spline, x, y, z, multi_vals, multi_grads); for (int j=0; j<num_splines; j++) eval_UBspline_3d_d_vg (norm_splines[j], x, y, z, &(norm_vals[j]), &(norm_grads[3*j])); for (int j=0; j<num_splines; j++) { // Check value if (diff(norm_vals[j], multi_vals[j], 1.0e-12)) return -1; // Check gradients for (int n=0; n<3; n++) if (diff (norm_grads[3*j+n], multi_grads[3*j+n], 1.0e-12)) return -2; } /////////////////////// // Check VGL routine // /////////////////////// eval_multi_UBspline_3d_d_vgl (multi_spline, x, y, z, multi_vals, multi_grads, multi_lapl); for (int j=0; j<num_splines; j++) eval_UBspline_3d_d_vgl (norm_splines[j], x, y, z, &(norm_vals[j]), &(norm_grads[3*j]), &(norm_lapl[j])); for (int j=0; j<num_splines; j++) { // Check value if (diff(norm_vals[j], multi_vals[j], 1.0e-12)) return -3; // Check gradients for (int n=0; n<3; n++) if (diff (norm_grads[3*j+n], multi_grads[3*j+n], 1.0e-10)) return -4; // Check laplacian if (diff (norm_lapl[j], multi_lapl[j], 1.0e-10)) return -5; } /////////////////////// // Check VGH routine // /////////////////////// eval_multi_UBspline_3d_d_vgh (multi_spline, x, y, z, multi_vals, multi_grads, multi_hess); for (int j=0; j<num_splines; j++) eval_UBspline_3d_d_vgh (norm_splines[j], x, y, z, &(norm_vals[j]), &(norm_grads[3*j]), &(norm_hess[9*j])); for (int j=0; j<num_splines; j++) { // Check value if (diff(norm_vals[j], multi_vals[j], 1.0e-12)) return -6; // Check gradients for (int n=0; n<3; n++) if (diff (norm_grads[3*j+n], multi_grads[3*j+n], 1.0e-12)) return -7; // Check hessian for (int n=0; n<9; n++) if (diff (norm_hess[9*j+n], multi_hess[9*j+n], 1.0e-10)) return -8; } } return 0; } ///////////////////////////////////////////// // Single-precision complex test functions // ///////////////////////////////////////////// inline int cdiff (complex_float a, complex_float b, double tol) { double rdiff = fabs(creal(a) - creal(b)); double idiff = fabs(cimag(a) - cimag(b)); if (rdiff > tol || idiff > tol) return 1; else return 0; } int test_1d_complex_float_all() { int Nx=73; int num_splines = 21; Ugrid x_grid; x_grid.start = 3.1; x_grid.end = 9.1; x_grid.num = Nx; BCtype_c xBC; xBC.lCode = xBC.rCode = PERIODIC; // First, create splines the normal way UBspline_1d_c* norm_splines[num_splines]; multi_UBspline_1d_c *multi_spline; // First, create multispline multi_spline = create_multi_UBspline_1d_c (x_grid, xBC, num_splines); complex_float data[Nx]; // Now, create normal splines and set multispline data for (int i=0; i<num_splines; i++) { for (int j=0; j<Nx; j++) data[j] = (drand48()-0.5) + (drand48()-0.5)*1.0i; norm_splines[i] = create_UBspline_1d_c (x_grid, xBC, data); set_multi_UBspline_1d_c (multi_spline, i, data); } // fprintf (stderr, "\nnorm coef = %1.14e + %1.14ei\n", // crealf(norm_splines[19]->coefs[27]), // cimagf(norm_splines[19]->coefs[27])); // fprintf (stderr, "multi coef = %1.14e + %1.14ei\n", // crealf(multi_spline->coefs[19+27*multi_spline->x_stride]), // cimagf(multi_spline->coefs[19+27*multi_spline->x_stride])); // Now, test random values int num_vals = 100; complex_float multi_vals[num_splines], norm_vals [num_splines]; complex_float multi_grads[num_splines], norm_grads[num_splines]; complex_float multi_lapl[num_splines], norm_lapl [num_splines]; for (int i=0; i<num_vals; i++) { double rx = drand48(); double x = rx*x_grid.start + (1.0-rx)*x_grid.end; ////////////////////////// // Check value routine // ////////////////////////// eval_multi_UBspline_1d_c (multi_spline, x, multi_vals); for (int j=0; j<num_splines; j++) eval_UBspline_1d_c (norm_splines[j], x, &(norm_vals[j])); for (int j=0; j<num_splines; j++) { // Check value if (cdiff(norm_vals[j], multi_vals[j], 1.0e-6)) { fprintf (stderr, " j = %d\n", j); fprintf (stderr, " norm_vals[j] = %1.14e + %1.14ei\n", creal (norm_vals[j]), cimag(norm_vals[j])); fprintf (stderr, "multi_vals[j] = %1.14e + %1.14ei\n", creal (multi_vals[j]), cimag(multi_vals[j])); return -1; } } /////////////////////// // Check VG routine // /////////////////////// eval_multi_UBspline_1d_c_vg (multi_spline, x, multi_vals, multi_grads); for (int j=0; j<num_splines; j++) eval_UBspline_1d_c_vg (norm_splines[j], x, &(norm_vals[j]), &(norm_grads[j])); for (int j=0; j<num_splines; j++) { // Check value if (cdiff(norm_vals[j], multi_vals[j], 1.0e-6)) return -1; // Check gradients if (cdiff (norm_grads[j], multi_grads[j], 1.0e-5)) return -2; } /////////////////////// // Check VGL routine // /////////////////////// eval_multi_UBspline_1d_c_vgl (multi_spline, x, multi_vals, multi_grads, multi_lapl); for (int j=0; j<num_splines; j++) eval_UBspline_1d_c_vgl (norm_splines[j], x, &(norm_vals[j]), &(norm_grads[j]), &(norm_lapl[j])); for (int j=0; j<num_splines; j++) { // Check value if (cdiff(norm_vals[j], multi_vals[j], 1.0e-6)) return -3; // Check gradients if (cdiff (norm_grads[j], multi_grads[j], 1.0e-5)) return -4; // Check laplacian if (cdiff (norm_lapl[j], multi_lapl[j], 1.0e-3)) return -5; } } return 0; } int test_2d_complex_float_all() { int Nx=73; int Ny=91; int num_splines = 20; Ugrid x_grid, y_grid; x_grid.start = 3.1; x_grid.end = 9.1; x_grid.num = Nx; y_grid.start = 8.7; y_grid.end = 12.7; y_grid.num = Ny; BCtype_c xBC, yBC; xBC.lCode = xBC.rCode = PERIODIC; yBC.lCode = yBC.rCode = PERIODIC; // First, create splines the normal way UBspline_2d_c* norm_splines[num_splines]; multi_UBspline_2d_c *multi_spline; // First, create multispline multi_spline = create_multi_UBspline_2d_c (x_grid, y_grid, xBC, yBC, num_splines); complex_float data[Nx*Ny]; // Now, create normal splines and set multispline data for (int i=0; i<num_splines; i++) { for (int j=0; j<Nx*Ny; j++) data[j] = (drand48()-0.5) + (drand48()-0.5)*1.0i; norm_splines[i] = create_UBspline_2d_c (x_grid, y_grid, xBC, yBC, data); set_multi_UBspline_2d_c (multi_spline, i, data); } // fprintf (stderr, "norm coef = %1.14e + %1.14ei\n", // creal(norm_splines[19]->coefs[2127]), // cimag(norm_splines[19]->coefs[2127])); // fprintf (stderr, "multi coef = %1.14e + %1.14ei\n", // creal(multi_spline->coefs[19+2127*multi_spline->y_stride]), // cimag(multi_spline->coefs[19+2127*multi_spline->y_stride])); // Now, test random values int num_vals = 100; complex_float multi_vals[num_splines], norm_vals[num_splines]; complex_float multi_grads[2*num_splines], norm_grads[2*num_splines]; complex_float multi_lapl[num_splines], norm_lapl[num_splines]; complex_float multi_hess[4*num_splines], norm_hess[4*num_splines]; for (int i=0; i<num_vals; i++) { double rx = drand48(); double x = rx*x_grid.start + (1.0-rx)*x_grid.end; double ry = drand48(); double y = ry*y_grid.start + (1.0-ry)*y_grid.end; ////////////////////////// // Check value routine // ////////////////////////// eval_multi_UBspline_2d_c (multi_spline, x, y, multi_vals); for (int j=0; j<num_splines; j++) eval_UBspline_2d_c (norm_splines[j], x, y, &(norm_vals[j])); for (int j=0; j<num_splines; j++) { // Check value if (cdiff(norm_vals[j], multi_vals[j], 1.0e-5)) return -1; } /////////////////////// // Check VG routine // /////////////////////// eval_multi_UBspline_2d_c_vg (multi_spline, x, y, multi_vals, multi_grads); for (int j=0; j<num_splines; j++) eval_UBspline_2d_c_vg (norm_splines[j], x, y, &(norm_vals[j]), &(norm_grads[2*j])); for (int j=0; j<num_splines; j++) { // Check value if (cdiff(norm_vals[j], multi_vals[j], 1.0e-5)) { fprintf (stderr, " norm_vals[j] = %1.8f + %1.8fi\n", crealf(norm_vals[j]), cimagf(norm_vals[j])); fprintf (stderr, "multi_vals[j] = %1.8f + %1.8fi\n", crealf(multi_vals[j]), cimagf(multi_vals[j])); return -2; } // Check gradients for (int n=0; n<2; n++) if (cdiff (norm_grads[2*j+n], multi_grads[2*j+n], 1.0e-3)) { fprintf (stderr, "norm_grads[j] = %1.14e + %1.14ei\n", creal(norm_grads[2*j+n]), cimag(norm_grads[2*j+n])); fprintf (stderr, "multi_grads[j] = %1.14e + %1.14ei\n", creal(multi_grads[2*j+n]), cimag(multi_grads[2*j+n])); return -3; } } /////////////////////// // Check VGL routine // /////////////////////// eval_multi_UBspline_2d_c_vgl (multi_spline, x, y, multi_vals, multi_grads, multi_lapl); for (int j=0; j<num_splines; j++) eval_UBspline_2d_c_vgl (norm_splines[j], x, y, &(norm_vals[j]), &(norm_grads[2*j]), &(norm_lapl[j])); for (int j=0; j<num_splines; j++) { // Check value if (cdiff(norm_vals[j], multi_vals[j], 1.0e-5)) return -4; // Check gradients for (int n=0; n<2; n++) if (cdiff (norm_grads[2*j+n], multi_grads[2*j+n], 1.0e-3)) return -5; // Check laplacian if (cdiff (norm_lapl[j], multi_lapl[j], 1.0e-2)) { fprintf (stderr, "norm_lapl[j] = %1.6f + %1.6fi\n", creal(norm_lapl[j]), cimag(norm_lapl[j])); fprintf (stderr, "multi_lapl[j] = %1.6f + %1.6fi\n", creal(multi_lapl[j]), cimag(multi_lapl[j])); return -6; } } /////////////////////// // Check VGH routine // /////////////////////// eval_multi_UBspline_2d_c_vgh (multi_spline, x, y, multi_vals, multi_grads, multi_hess); for (int j=0; j<num_splines; j++) eval_UBspline_2d_c_vgh (norm_splines[j], x, y, &(norm_vals[j]), &(norm_grads[2*j]), &(norm_hess[4*j])); for (int j=0; j<num_splines; j++) { // Check value if (cdiff(norm_vals[j], multi_vals[j], 1.0e-5)) { fprintf (stderr, "j = %d\n", j); fprintf (stderr, "norm_vals[j] = %1.14e + %1.14ei\n", creal(norm_vals[j]), cimag(norm_vals[j])); fprintf (stderr, "multi_vals[j] = %1.14e + %1.14ei\n", creal(multi_vals[j]), cimag(multi_vals[j])); return -7; } // Check gradients for (int n=0; n<2; n++) if (cdiff (norm_grads[2*j+n], multi_grads[2*j+n], 1.0e-3)) { fprintf (stderr, "j = %d\n", j); fprintf (stderr, "norm_grads[j] = %1.14e + %1.14ei\n", creal(norm_grads[2*j+n]), cimag(norm_grads[2*j+n])); fprintf (stderr, "multi_grads[j] = %1.14e + %1.14ei\n", creal(multi_grads[2*j+n]), cimag(multi_grads[2*j+n])); return -8; } // Check hessian for (int n=0; n<4; n++) if (cdiff (norm_hess[4*j+n], multi_hess[4*j+n], 1.0e-2)) { fprintf (stderr, "\nj = %d n = %d \n", j, n); fprintf (stderr, "norm_hess[j] = %1.6f + %1.6fi\n", creal(norm_hess[4*j+n]), cimag(norm_hess[4*j+n])); fprintf (stderr, "multi_hess[j] = %1.6f + %1.6fi\n", creal(multi_hess[4*j+n]), cimag(multi_hess[4*j+n])); return -9; } } } return 0; } int test_3d_real_float_all() { int Nx=33; int Ny=21; int Nz = 29; int num_splines = 128; Ugrid x_grid, y_grid, z_grid; x_grid.start = 3.1; x_grid.end = 9.1; x_grid.num = Nx; y_grid.start = 8.7; y_grid.end = 12.7; y_grid.num = Ny; z_grid.start = 4.5; z_grid.end = 9.3; z_grid.num = Nz; BCtype_s xBC, yBC, zBC; xBC.lCode = xBC.rCode = PERIODIC; yBC.lCode = yBC.rCode = PERIODIC; zBC.lCode = zBC.rCode = PERIODIC; // First, create splines the normal way UBspline_3d_s* norm_splines[num_splines]; multi_UBspline_3d_s *multi_spline; // First, create multispline multi_spline = create_multi_UBspline_3d_s (x_grid, y_grid, z_grid, xBC, yBC, zBC, num_splines); float data[Nx*Ny*Nz]; // Now, create normal splines and set multispline data for (int i=0; i<num_splines; i++) { for (int j=0; j<Nx*Ny*Nz; j++) data[j] = (drand48()-0.5); norm_splines[i] = create_UBspline_3d_s (x_grid, y_grid, z_grid, xBC, yBC, zBC, data); set_multi_UBspline_3d_s (multi_spline, i, data); } // Now, test random values int num_vals = 100; complex_float multi_vals[num_splines], norm_vals[num_splines]; complex_float multi_grads[3*num_splines], norm_grads[3*num_splines]; complex_float multi_lapl[num_splines], norm_lapl[num_splines]; complex_float multi_hess[9*num_splines], norm_hess[9*num_splines]; for (int i=0; i<num_vals; i++) { double rx = drand48(); double x = rx*x_grid.start + (1.0-rx)*x_grid.end; double ry = drand48(); double y = ry*y_grid.start + (1.0-ry)*y_grid.end; double rz = drand48(); double z = rz*z_grid.start + (1.0-rz)*z_grid.end; ///////////////////////// // Check value routine // ///////////////////////// eval_multi_UBspline_3d_s (multi_spline, x, y, z, multi_vals); for (int j=0; j<num_splines; j++) eval_UBspline_3d_s (norm_splines[j], x, y, z, &(norm_vals[j])); for (int j=0; j<num_splines; j++) { // Check value if (cdiff(norm_vals[j], multi_vals[j], 1.0e-6)) return -1; } /////////////////////// // Check VG routine // /////////////////////// eval_multi_UBspline_3d_s_vg (multi_spline, x, y, z, multi_vals, multi_grads); for (int j=0; j<num_splines; j++) eval_UBspline_3d_s_vg (norm_splines[j], x, y, z, &(norm_vals[j]), &(norm_grads[3*j])); for (int j=0; j<num_splines; j++) { // Check value if (cdiff(norm_vals[j], multi_vals[j], 1.0e-6)) return -2; // Check gradients for (int n=0; n<3; n++) if (cdiff (norm_grads[3*j+n], multi_grads[3*j+n], 1.0e-4)) return -3; } /////////////////////// // Check VGL routine // /////////////////////// eval_multi_UBspline_3d_s_vgl (multi_spline, x, y, z, multi_vals, multi_grads, multi_lapl); for (int j=0; j<num_splines; j++) eval_UBspline_3d_s_vgl (norm_splines[j], x, y, z, &(norm_vals[j]), &(norm_grads[3*j]), &(norm_lapl[j])); for (int j=0; j<num_splines; j++) { // Check value if (cdiff(norm_vals[j], multi_vals[j], 1.0e-6)) return -4; // Check gradients for (int n=0; n<3; n++) if (cdiff (norm_grads[3*j+n], multi_grads[3*j+n], 1.0e-4)) return -5; // Check laplacian if (cdiff (norm_lapl[j], multi_lapl[j], 1.0e-2)) return -6; } /////////////////////// // Check VGH routine // /////////////////////// eval_multi_UBspline_3d_s_vgh (multi_spline, x, y, z, multi_vals, multi_grads, multi_hess); for (int j=0; j<num_splines; j++) eval_UBspline_3d_s_vgh (norm_splines[j], x, y, z, &(norm_vals[j]), &(norm_grads[3*j]), &(norm_hess[9*j])); for (int j=0; j<num_splines; j++) { // Check value if (cdiff(norm_vals[j], multi_vals[j], 1.0e-6)) return -7; // Check gradients for (int n=0; n<3; n++) if (cdiff (norm_grads[3*j+n], multi_grads[3*j+n], 1.0e-4)) return -8; // Check hessian for (int n=0; n<9; n++) if (cdiff (norm_hess[9*j+n], multi_hess[9*j+n], 1.0e-2)) return -9; } } return 0; } int test_3d_complex_float_all() { int Nx=33; int Ny=21; int Nz = 29; int num_splines = 131; Ugrid x_grid, y_grid, z_grid; x_grid.start = 3.1; x_grid.end = 9.1; x_grid.num = Nx; y_grid.start = 8.7; y_grid.end = 12.7; y_grid.num = Ny; z_grid.start = 4.5; z_grid.end = 9.3; z_grid.num = Nz; BCtype_c xBC, yBC, zBC; xBC.lCode = xBC.rCode = PERIODIC; yBC.lCode = yBC.rCode = PERIODIC; zBC.lCode = zBC.rCode = PERIODIC; // First, create splines the normal way UBspline_3d_c* norm_splines[num_splines]; multi_UBspline_3d_c *multi_spline; // First, create multispline multi_spline = create_multi_UBspline_3d_c (x_grid, y_grid, z_grid, xBC, yBC, zBC, num_splines); complex_float data[Nx*Ny*Nz]; // Now, create normal splines and set multispline data for (int i=0; i<num_splines; i++) { for (int j=0; j<Nx*Ny*Nz; j++) data[j] = (drand48()-0.5) + (drand48()-0.5)*1.0i; norm_splines[i] = create_UBspline_3d_c (x_grid, y_grid, z_grid, xBC, yBC, zBC, data); set_multi_UBspline_3d_c (multi_spline, i, data); } // fprintf (stderr, "norm coef = %1.14e + %1.14ei\n", // creal(norm_splines[19]->coefs[227]), // cimag(norm_splines[19]->coefs[227])); // fprintf (stderr, "multi coef = %1.14e + %1.14ei\n", // creal(multi_spline->coefs[19+227*multi_spline->z_stride]), // cimag(multi_spline->coefs[19+227*multi_spline->z_stride])); // Now, test random values int num_vals = 100; complex_float multi_vals[num_splines], norm_vals[num_splines]; complex_float multi_grads[3*num_splines], norm_grads[3*num_splines]; complex_float multi_lapl[num_splines], norm_lapl[num_splines]; complex_float multi_hess[9*num_splines], norm_hess[9*num_splines]; for (int i=0; i<num_vals; i++) { double rx = drand48(); double x = rx*x_grid.start + (1.0-rx)*x_grid.end; double ry = drand48(); double y = ry*y_grid.start + (1.0-ry)*y_grid.end; double rz = drand48(); double z = rz*z_grid.start + (1.0-rz)*z_grid.end; ///////////////////////// // Check value routine // ///////////////////////// eval_multi_UBspline_3d_c (multi_spline, x, y, z, multi_vals); for (int j=0; j<num_splines; j++) eval_UBspline_3d_c (norm_splines[j], x, y, z, &(norm_vals[j])); for (int j=0; j<num_splines; j++) { // Check value if (cdiff(norm_vals[j], multi_vals[j], 1.0e-6)) return -1; } /////////////////////// // Check VG routine // /////////////////////// eval_multi_UBspline_3d_c_vg (multi_spline, x, y, z, multi_vals, multi_grads); for (int j=0; j<num_splines; j++) eval_UBspline_3d_c_vg (norm_splines[j], x, y, z, &(norm_vals[j]), &(norm_grads[3*j])); for (int j=0; j<num_splines; j++) { // Check value if (cdiff(norm_vals[j], multi_vals[j], 1.0e-6)) return -2; // Check gradients for (int n=0; n<3; n++) if (cdiff (norm_grads[3*j+n], multi_grads[3*j+n], 1.0e-4)) return -3; } /////////////////////// // Check VGL routine // /////////////////////// eval_multi_UBspline_3d_c_vgl (multi_spline, x, y, z, multi_vals, multi_grads, multi_lapl); for (int j=0; j<num_splines; j++) eval_UBspline_3d_c_vgl (norm_splines[j], x, y, z, &(norm_vals[j]), &(norm_grads[3*j]), &(norm_lapl[j])); for (int j=0; j<num_splines; j++) { // Check value if (cdiff(norm_vals[j], multi_vals[j], 1.0e-6)) return -4; // Check gradients for (int n=0; n<3; n++) if (cdiff (norm_grads[3*j+n], multi_grads[3*j+n], 1.0e-4)) return -5; // Check laplacian if (cdiff (norm_lapl[j], multi_lapl[j], 1.0e-2)) return -6; } /////////////////////// // Check VGH routine // /////////////////////// eval_multi_UBspline_3d_c_vgh (multi_spline, x, y, z, multi_vals, multi_grads, multi_hess); for (int j=0; j<num_splines; j++) eval_UBspline_3d_c_vgh (norm_splines[j], x, y, z, &(norm_vals[j]), &(norm_grads[3*j]), &(norm_hess[9*j])); for (int j=0; j<num_splines; j++) { // Check value if (cdiff(norm_vals[j], multi_vals[j], 1.0e-6)) return -7; // Check gradients for (int n=0; n<3; n++) if (cdiff (norm_grads[3*j+n], multi_grads[3*j+n], 1.0e-4)) return -8; // Check hessian for (int n=0; n<9; n++) if (cdiff (norm_hess[9*j+n], multi_hess[9*j+n], 1.0e-2)) return -9; } } return 0; } ///////////////////////////////////////////// // Double-precision complex test functions // ///////////////////////////////////////////// void test_complex_double() { int Nx=73; int Ny=91; int Nz = 29; int num_splines = 129; Ugrid x_grid, y_grid, z_grid; x_grid.start = 3.1; x_grid.end = 9.1; x_grid.num = Nx; y_grid.start = 8.7; y_grid.end = 12.7; y_grid.num = Ny; z_grid.start = 4.5; z_grid.end = 9.3; z_grid.num = Nz; BCtype_z xBC, yBC, zBC; xBC.lCode = xBC.rCode = PERIODIC; yBC.lCode = yBC.rCode = PERIODIC; zBC.lCode = zBC.rCode = PERIODIC; // First, create splines the normal way UBspline_3d_z* norm_splines[num_splines]; multi_UBspline_3d_z *multi_spline; // First, create multispline multi_spline = create_multi_UBspline_3d_z (x_grid, y_grid, z_grid, xBC, yBC, zBC, num_splines); complex_double data[Nx*Ny*Nz]; // Now, create normal splines and set multispline data for (int i=0; i<num_splines; i++) { for (int j=0; j<Nx*Ny*Nz; j++) data[j] = (drand48()-0.5) + (drand48()-0.5)*1.0i; norm_splines[i] = create_UBspline_3d_z (x_grid, y_grid, z_grid, xBC, yBC, zBC, data); set_multi_UBspline_3d_z (multi_spline, i, data); } fprintf (stderr, "norm coef = %1.14e + %1.14ei\n", creal(norm_splines[19]->coefs[227]), cimag(norm_splines[19]->coefs[227])); fprintf (stderr, "multi coef = %1.14e + %1.14ei\n", creal(multi_spline->coefs[19+227*multi_spline->z_stride]), cimag(multi_spline->coefs[19+227*multi_spline->z_stride])); //return; // Now, test random values int num_vals = 100; complex_double multi_vals[num_splines], norm_vals[num_splines]; for (int i=0; i<num_vals; i++) { double rx = drand48(); double x = rx*x_grid.start + (1.0-rx)*x_grid.end; double ry = drand48(); double y = ry*y_grid.start + (1.0-ry)*y_grid.end; double rz = drand48(); double z = rz*z_grid.start + (1.0-rz)*z_grid.end; eval_multi_UBspline_3d_z (multi_spline, x, y, z, multi_vals); for (int j=0; j<num_splines; j++) eval_UBspline_3d_z (norm_splines[j], x, y, z, &(norm_vals[j])); for (int j=0; j<num_splines; j++) { double rdiff = creal(norm_vals[j]) - creal(multi_vals[j]); double idiff = cimag(norm_vals[j]) - cimag(multi_vals[j]); if (fabs(rdiff) > 1.0e-12 || fabs(idiff) > 1.0e-12) { fprintf (stderr, "Error! norm_vals[j] = %1.14e + %1.14ei\n", creal(norm_vals[j]), cimag(norm_vals[j])); fprintf (stderr, " multi_vals[j] = %1.14e + %1.14ei\n", creal(multi_vals[j]), cimag(multi_vals[j])); } } } num_vals = 100000; // Now do timing clock_t norm_start, norm_end, multi_start, multi_end, rand_start, rand_end; rand_start = clock(); for (int i=0; i<num_vals; i++) { double rx = drand48(); double x = rx*x_grid.start + (1.0-rx)*x_grid.end; double ry = drand48(); double y = ry*y_grid.start + (1.0-ry)*y_grid.end; double rz = drand48(); double z = rz*z_grid.start + (1.0-rz)*z_grid.end; } rand_end = clock(); norm_start = clock(); for (int i=0; i<num_vals; i++) { double rx = drand48(); double x = rx*x_grid.start + (1.0-rx)*x_grid.end; double ry = drand48(); double y = ry*y_grid.start + (1.0-ry)*y_grid.end; double rz = drand48(); double z = rz*z_grid.start + (1.0-rz)*z_grid.end; for (int j=0; j<num_splines; j++) eval_UBspline_3d_z (norm_splines[j], x, y, z, &(norm_vals[j])); } norm_end = clock(); multi_start = clock(); for (int i=0; i<num_vals; i++) { double rx = drand48(); double x = rx*x_grid.start + (1.0-rx)*x_grid.end; double ry = drand48(); double y = ry*y_grid.start + (1.0-ry)*y_grid.end; double rz = drand48(); double z = rz*z_grid.start + (1.0-rz)*z_grid.end; eval_multi_UBspline_3d_z (multi_spline, x, y, z, multi_vals); } multi_end = clock(); fprintf (stderr, "Normal spline time = %1.5f\n", (double)(norm_end-norm_start+rand_start-rand_end)/CLOCKS_PER_SEC); fprintf (stderr, "Multi spline time = %1.5f\n", (double)(multi_end-multi_start+rand_start-rand_end)/CLOCKS_PER_SEC); } inline int zdiff (complex_double a, complex_double b, double tol) { double rdiff = fabs(creal(a) - creal(b)); double idiff = fabs(cimag(a) - cimag(b)); if (rdiff > tol || idiff > tol) return 1; else return 0; } int test_1d_complex_double_all() { int Nx=73; int num_splines = 21; Ugrid x_grid; x_grid.start = 3.1; x_grid.end = 9.1; x_grid.num = Nx; BCtype_z xBC; xBC.lCode = xBC.rCode = PERIODIC; // First, create splines the normal way UBspline_1d_z* norm_splines[num_splines]; multi_UBspline_1d_z *multi_spline; // First, create multispline multi_spline = create_multi_UBspline_1d_z (x_grid, xBC, num_splines); complex_double data[Nx]; // Now, create normal splines and set multispline data for (int i=0; i<num_splines; i++) { for (int j=0; j<Nx; j++) data[j] = (drand48()-0.5) + (drand48()-0.5)*1.0i; norm_splines[i] = create_UBspline_1d_z (x_grid, xBC, data); set_multi_UBspline_1d_z (multi_spline, i, data); } // fprintf (stderr, "\nnorm coef = %1.14e + %1.14ei\n", // creal(norm_splines[19]->coefs[27]), // cimag(norm_splines[19]->coefs[27])); // fprintf (stderr, "multi coef = %1.14e + %1.14ei\n", // creal(multi_spline->coefs[19+27*multi_spline->x_stride]), // cimag(multi_spline->coefs[19+27*multi_spline->x_stride])); // Now, test random values int num_vals = 100; complex_double multi_vals[num_splines], norm_vals [num_splines]; complex_double multi_grads[num_splines], norm_grads[num_splines]; complex_double multi_lapl[num_splines], norm_lapl [num_splines]; for (int i=0; i<num_vals; i++) { double rx = drand48(); double x = rx*x_grid.start + (1.0-rx)*x_grid.end; ////////////////////////// // Check value routine // ////////////////////////// eval_multi_UBspline_1d_z (multi_spline, x, multi_vals); for (int j=0; j<num_splines; j++) eval_UBspline_1d_z (norm_splines[j], x, &(norm_vals[j])); for (int j=0; j<num_splines; j++) { // Check value if (zdiff(norm_vals[j], multi_vals[j], 1.0e-12)) { fprintf (stderr, " norm_vals[j] = %1.14e + %1.14ei\n", creal (norm_vals[j]), cimag(norm_vals[j])); fprintf (stderr, "multi_vals[j] = %1.14e + %1.14ei\n", creal (multi_vals[j]), cimag(multi_vals[j])); return -1; } } /////////////////////// // Check VG routine // /////////////////////// eval_multi_UBspline_1d_z_vg (multi_spline, x, multi_vals, multi_grads); for (int j=0; j<num_splines; j++) eval_UBspline_1d_z_vg (norm_splines[j], x, &(norm_vals[j]), &(norm_grads[j])); for (int j=0; j<num_splines; j++) { // Check value if (zdiff(norm_vals[j], multi_vals[j], 1.0e-12)) return -1; // Check gradients if (zdiff (norm_grads[j], multi_grads[j], 1.0e-12)) return -2; } /////////////////////// // Check VGL routine // /////////////////////// eval_multi_UBspline_1d_z_vgl (multi_spline, x, multi_vals, multi_grads, multi_lapl); for (int j=0; j<num_splines; j++) eval_UBspline_1d_z_vgl (norm_splines[j], x, &(norm_vals[j]), &(norm_grads[j]), &(norm_lapl[j])); for (int j=0; j<num_splines; j++) { // Check value if (zdiff(norm_vals[j], multi_vals[j], 1.0e-12)) return -3; // Check gradients if (zdiff (norm_grads[j], multi_grads[j], 1.0e-10)) return -4; // Check laplacian if (zdiff (norm_lapl[j], multi_lapl[j], 1.0e-10)) return -5; } } return 0; } int test_2d_complex_double_all() { int Nx=73; int Ny=91; int num_splines = 21; Ugrid x_grid, y_grid; x_grid.start = 3.1; x_grid.end = 9.1; x_grid.num = Nx; y_grid.start = 8.7; y_grid.end = 12.7; y_grid.num = Ny; BCtype_z xBC, yBC; xBC.lCode = xBC.rCode = PERIODIC; yBC.lCode = yBC.rCode = PERIODIC; // First, create splines the normal way UBspline_2d_z* norm_splines[num_splines]; multi_UBspline_2d_z *multi_spline; // First, create multispline multi_spline = create_multi_UBspline_2d_z (x_grid, y_grid, xBC, yBC, num_splines); complex_double data[Nx*Ny]; // Now, create normal splines and set multispline data for (int i=0; i<num_splines; i++) { for (int j=0; j<Nx*Ny; j++) data[j] = (drand48()-0.5) + (drand48()-0.5)*1.0i; norm_splines[i] = create_UBspline_2d_z (x_grid, y_grid, xBC, yBC, data); set_multi_UBspline_2d_z (multi_spline, i, data); } // fprintf (stderr, "norm coef = %1.14e + %1.14ei\n", // creal(norm_splines[19]->coefs[227]), // cimag(norm_splines[19]->coefs[227])); // fprintf (stderr, "multi coef = %1.14e + %1.14ei\n", // creal(multi_spline->coefs[19+227*multi_spline->y_stride]), // cimag(multi_spline->coefs[19+227*multi_spline->y_stride])); // Now, test random values int num_vals = 100; complex_double multi_vals[num_splines], norm_vals[num_splines]; complex_double multi_grads[2*num_splines], norm_grads[2*num_splines]; complex_double multi_lapl[num_splines], norm_lapl[num_splines]; complex_double multi_hess[4*num_splines], norm_hess[4*num_splines]; for (int i=0; i<num_vals; i++) { double rx = drand48(); double x = rx*x_grid.start + (1.0-rx)*x_grid.end; double ry = drand48(); double y = ry*y_grid.start + (1.0-ry)*y_grid.end; ////////////////////////// // Check value routine // ////////////////////////// eval_multi_UBspline_2d_z (multi_spline, x, y, multi_vals); for (int j=0; j<num_splines; j++) eval_UBspline_2d_z (norm_splines[j], x, y, &(norm_vals[j])); for (int j=0; j<num_splines; j++) { // Check value if (zdiff(norm_vals[j], multi_vals[j], 1.0e-12)) return -1; } /////////////////////// // Check VG routine // /////////////////////// eval_multi_UBspline_2d_z_vg (multi_spline, x, y, multi_vals, multi_grads); for (int j=0; j<num_splines; j++) eval_UBspline_2d_z_vg (norm_splines[j], x, y, &(norm_vals[j]), &(norm_grads[2*j])); for (int j=0; j<num_splines; j++) { // Check value if (zdiff(norm_vals[j], multi_vals[j], 1.0e-12)) return -1; // Check gradients for (int n=0; n<2; n++) if (zdiff (norm_grads[2*j+n], multi_grads[2*j+n], 1.0e-12)) return -2; } /////////////////////// // Check VGL routine // /////////////////////// eval_multi_UBspline_2d_z_vgl (multi_spline, x, y, multi_vals, multi_grads, multi_lapl); for (int j=0; j<num_splines; j++) eval_UBspline_2d_z_vgl (norm_splines[j], x, y, &(norm_vals[j]), &(norm_grads[2*j]), &(norm_lapl[j])); for (int j=0; j<num_splines; j++) { // Check value if (zdiff(norm_vals[j], multi_vals[j], 1.0e-12)) return -3; // Check gradients for (int n=0; n<2; n++) if (zdiff (norm_grads[2*j+n], multi_grads[2*j+n], 1.0e-10)) return -4; // Check laplacian if (zdiff (norm_lapl[j], multi_lapl[j], 1.0e-9)) { fprintf (stderr, "norm_lapl[j] = %1.14e + %1.14ei\n", creal(norm_lapl[j]), cimag(norm_lapl[j])); fprintf (stderr, "multi_lapl[j] = %1.14e + %1.14ei\n", creal(multi_lapl[j]), cimag(multi_lapl[j])); return -5; } } /////////////////////// // Check VGH routine // /////////////////////// eval_multi_UBspline_2d_z_vgh (multi_spline, x, y, multi_vals, multi_grads, multi_hess); for (int j=0; j<num_splines; j++) eval_UBspline_2d_z_vgh (norm_splines[j], x, y, &(norm_vals[j]), &(norm_grads[2*j]), &(norm_hess[4*j])); for (int j=0; j<num_splines; j++) { // Check value if (zdiff(norm_vals[j], multi_vals[j], 1.0e-12)) { fprintf (stderr, "j = %d\n", j); fprintf (stderr, "norm_vals[j] = %1.14e + %1.14ei\n", creal(norm_vals[j]), cimag(norm_vals[j])); fprintf (stderr, "multi_vals[j] = %1.14e + %1.14ei\n", creal(multi_vals[j]), cimag(multi_vals[j])); return -6; } // Check gradients for (int n=0; n<2; n++) if (zdiff (norm_grads[2*j+n], multi_grads[2*j+n], 1.0e-12)) return -7; // Check hessian for (int n=0; n<4; n++) if (zdiff (norm_hess[4*j+n], multi_hess[4*j+n], 1.0e-10)) { fprintf (stderr, "j = %d n = %d \n", j, n); fprintf (stderr, "norm_hess[j] = %1.14e + %1.14ei\n", creal(norm_hess[4*j+n]), cimag(norm_hess[4*j+n])); fprintf (stderr, "multi_hess[j] = %1.14e + %1.15ei\n", creal(multi_hess[4*j+n]), cimag(multi_hess[4*j+n])); return -8; } } } return 0; } void time_3d_complex_float_all() { int Nx=23; int Ny=21; int Nz = 29; int num_splines = 128; Ugrid x_grid, y_grid, z_grid; x_grid.start = 3.1; x_grid.end = 9.1; x_grid.num = Nx; y_grid.start = 8.7; y_grid.end = 12.7; y_grid.num = Ny; z_grid.start = 4.5; z_grid.end = 9.3; z_grid.num = Nz; BCtype_c xBC, yBC, zBC; xBC.lCode = xBC.rCode = PERIODIC; yBC.lCode = yBC.rCode = PERIODIC; zBC.lCode = zBC.rCode = PERIODIC; // First, create splines the normal way UBspline_3d_c* norm_splines[num_splines]; multi_UBspline_3d_c *multi_spline; // First, create multispline multi_spline = create_multi_UBspline_3d_c (x_grid, y_grid, z_grid, xBC, yBC, zBC, num_splines); complex_float data[Nx*Ny*Nz]; // Now, create normal splines and set multispline data for (int i=0; i<num_splines; i++) { for (int j=0; j<Nx*Ny*Nz; j++) data[j] = (drand48()-0.5) + (drand48()-0.5)*1.0i; norm_splines[i] = create_UBspline_3d_c (x_grid, y_grid, z_grid, xBC, yBC, zBC, data); set_multi_UBspline_3d_c (multi_spline, i, data); } // Now, test random values int num_vals = 100000; complex_float multi_vals[num_splines], norm_vals[num_splines]; complex_float multi_grads[3*num_splines], norm_grads[3*num_splines]; complex_float multi_lapl[num_splines], norm_lapl[num_splines]; complex_float multi_hess[9*num_splines], norm_hess[9*num_splines]; clock_t rand_start, rand_end, norm_start, norm_end, multi_start, multi_end; rand_start = clock(); for (int i=0; i<num_vals; i++) { double rx = drand48(); double x = rx*x_grid.start + (1.0-rx)*x_grid.end; double ry = drand48(); double y = ry*y_grid.start + (1.0-ry)*y_grid.end; double rz = drand48(); double z = rz*z_grid.start + (1.0-rz)*z_grid.end; } rand_end = clock(); /////////////////////// // Check value routine // /////////////////////// multi_start = clock(); for (int i=0; i<num_vals; i++) { double rx = drand48(); double x = rx*x_grid.start + (1.0-rx)*x_grid.end; double ry = drand48(); double y = ry*y_grid.start + (1.0-ry)*y_grid.end; double rz = drand48(); double z = rz*z_grid.start + (1.0-rz)*z_grid.end; eval_multi_UBspline_3d_c (multi_spline, x, y, z, multi_vals); } multi_end = clock(); norm_start = clock(); for (int i=0; i<num_vals; i++) { double rx = drand48(); double x = rx*x_grid.start + (1.0-rx)*x_grid.end; double ry = drand48(); double y = ry*y_grid.start + (1.0-ry)*y_grid.end; double rz = drand48(); double z = rz*z_grid.start + (1.0-rz)*z_grid.end; for (int j=0; j<num_splines; j++) eval_UBspline_3d_c (norm_splines[j], x, y, z, &(norm_vals[j])); } norm_end = clock(); double norm_time = (double)(norm_end - norm_start + rand_start - rand_end) / (double)CLOCKS_PER_SEC; double multi_time = (double)(multi_end - multi_start + rand_start - rand_end) / (double)CLOCKS_PER_SEC; double norm_speed = (double) num_vals*num_splines / norm_time; double multi_speed = (double) num_vals*num_splines / multi_time; fprintf (stderr, "Normal value speed = %13.3f evaluations per second.\n", norm_speed); fprintf (stderr, "Multi value speed = %13.3f evaluations per second.\n", multi_speed); /////////////////////// // Check VGL routine // /////////////////////// // eval_multi_UBspline_3d_c_vgl (multi_spline, x, y, z, // multi_vals, multi_grads, multi_lapl); // for (int j=0; j<num_splines; j++) // eval_UBspline_3d_c_vgl (norm_splines[j], x, y, z, &(norm_vals[j]), // &(norm_grads[3*j]), &(norm_lapl[j])); // for (int j=0; j<num_splines; j++) { // // Check value // if (zdiff(norm_vals[j], multi_vals[j], 1.0e-12)) // return -3; // // Check gradients // for (int n=0; n<3; n++) // if (zdiff (norm_grads[3*j+n], multi_grads[3*j+n], 1.0e-10)) // return -4; // // Check laplacian // if (zdiff (norm_lapl[j], multi_lapl[j], 1.0e-10)) // return -5; // } /////////////////////// // Check VGH routine // /////////////////////// multi_start = clock(); for (int i=0; i<num_vals; i++) { double rx = drand48(); double x = rx*x_grid.start + (1.0-rx)*x_grid.end; double ry = drand48(); double y = ry*y_grid.start + (1.0-ry)*y_grid.end; double rz = drand48(); double z = rz*z_grid.start + (1.0-rz)*z_grid.end; eval_multi_UBspline_3d_c_vgh (multi_spline, x, y, z, multi_vals, multi_grads, multi_hess); } multi_end = clock(); norm_start = clock(); for (int i=0; i<num_vals; i++) { double rx = drand48(); double x = rx*x_grid.start + (1.0-rx)*x_grid.end; double ry = drand48(); double y = ry*y_grid.start + (1.0-ry)*y_grid.end; double rz = drand48(); double z = rz*z_grid.start + (1.0-rz)*z_grid.end; for (int j=0; j<num_splines; j++) eval_UBspline_3d_c_vgh (norm_splines[j], x, y, z, &(norm_vals[j]), &(norm_grads[3*j]), &(norm_hess[9*j])); } norm_end = clock(); norm_time = (double)(norm_end - norm_start + rand_start - rand_end) / (double)CLOCKS_PER_SEC; multi_time = (double)(multi_end - multi_start + rand_start - rand_end) / (double)CLOCKS_PER_SEC; norm_speed = (double) num_vals*num_splines / norm_time; multi_speed = (double) num_vals*num_splines / multi_time; fprintf (stderr, "Normal VGH speed = %13.3f evaluations per second.\n", norm_speed); fprintf (stderr, "Multi VGH speed = %13.3f evaluations per second.\n", multi_speed); destroy_Bspline (multi_spline); for (int i=0; i<num_splines; i++) destroy_Bspline(norm_splines[i]); } void time_3d_real_float_all() { int Nx=23; int Ny=21; int Nz = 29; int num_splines = 128; Ugrid x_grid, y_grid, z_grid; x_grid.start = 3.1; x_grid.end = 9.1; x_grid.num = Nx; y_grid.start = 8.7; y_grid.end = 12.7; y_grid.num = Ny; z_grid.start = 4.5; z_grid.end = 9.3; z_grid.num = Nz; BCtype_s xBC, yBC, zBC; xBC.lCode = xBC.rCode = PERIODIC; yBC.lCode = yBC.rCode = PERIODIC; zBC.lCode = zBC.rCode = PERIODIC; // First, create splines the normal way UBspline_3d_s* norm_splines[num_splines]; multi_UBspline_3d_s *multi_spline; // First, create multispline multi_spline = create_multi_UBspline_3d_s (x_grid, y_grid, z_grid, xBC, yBC, zBC, num_splines); float data[Nx*Ny*Nz]; // Now, create normal splines and set multispline data for (int i=0; i<num_splines; i++) { for (int j=0; j<Nx*Ny*Nz; j++) data[j] = (drand48()-0.5); norm_splines[i] = create_UBspline_3d_s (x_grid, y_grid, z_grid, xBC, yBC, zBC, data); set_multi_UBspline_3d_s (multi_spline, i, data); } // Now, test random values int num_vals = 100000; float multi_vals[num_splines], norm_vals[num_splines]; float multi_grads[3*num_splines], norm_grads[3*num_splines]; float multi_lapl[num_splines], norm_lapl[num_splines]; float multi_hess[9*num_splines], norm_hess[9*num_splines]; clock_t rand_start, rand_end, norm_start, norm_end, multi_start, multi_end; rand_start = clock(); for (int i=0; i<num_vals; i++) { double rx = drand48(); double x = rx*x_grid.start + (1.0-rx)*x_grid.end; double ry = drand48(); double y = ry*y_grid.start + (1.0-ry)*y_grid.end; double rz = drand48(); double z = rz*z_grid.start + (1.0-rz)*z_grid.end; } rand_end = clock(); /////////////////////// // Check value routine // /////////////////////// multi_start = clock(); for (int i=0; i<num_vals; i++) { double rx = drand48(); double x = rx*x_grid.start + (1.0-rx)*x_grid.end; double ry = drand48(); double y = ry*y_grid.start + (1.0-ry)*y_grid.end; double rz = drand48(); double z = rz*z_grid.start + (1.0-rz)*z_grid.end; eval_multi_UBspline_3d_s (multi_spline, x, y, z, multi_vals); } multi_end = clock(); norm_start = clock(); for (int i=0; i<num_vals; i++) { double rx = drand48(); double x = rx*x_grid.start + (1.0-rx)*x_grid.end; double ry = drand48(); double y = ry*y_grid.start + (1.0-ry)*y_grid.end; double rz = drand48(); double z = rz*z_grid.start + (1.0-rz)*z_grid.end; for (int j=0; j<num_splines; j++) eval_UBspline_3d_s (norm_splines[j], x, y, z, &(norm_vals[j])); } norm_end = clock(); double norm_time = (double)(norm_end - norm_start + rand_start - rand_end) / (double)CLOCKS_PER_SEC; double multi_time = (double)(multi_end - multi_start + rand_start - rand_end) / (double)CLOCKS_PER_SEC; double norm_speed = (double) num_vals*num_splines / norm_time; double multi_speed = (double) num_vals*num_splines / multi_time; fprintf (stderr, "Normal value speed = %13.3f evaluations per second.\n", norm_speed); fprintf (stderr, "Multi value speed = %13.3f evaluations per second.\n", multi_speed); /////////////////////// // Check VGH routine // /////////////////////// multi_start = clock(); for (int i=0; i<num_vals; i++) { double rx = drand48(); double x = rx*x_grid.start + (1.0-rx)*x_grid.end; double ry = drand48(); double y = ry*y_grid.start + (1.0-ry)*y_grid.end; double rz = drand48(); double z = rz*z_grid.start + (1.0-rz)*z_grid.end; eval_multi_UBspline_3d_s_vgh (multi_spline, x, y, z, multi_vals, multi_grads, multi_hess); } multi_end = clock(); norm_start = clock(); for (int i=0; i<num_vals; i++) { double rx = drand48(); double x = rx*x_grid.start + (1.0-rx)*x_grid.end; double ry = drand48(); double y = ry*y_grid.start + (1.0-ry)*y_grid.end; double rz = drand48(); double z = rz*z_grid.start + (1.0-rz)*z_grid.end; for (int j=0; j<num_splines; j++) eval_UBspline_3d_s_vgh (norm_splines[j], x, y, z, &(norm_vals[j]), &(norm_grads[3*j]), &(norm_hess[9*j])); } norm_end = clock(); norm_time = (double)(norm_end - norm_start + rand_start - rand_end) / (double)CLOCKS_PER_SEC; multi_time = (double)(multi_end - multi_start + rand_start - rand_end) / (double)CLOCKS_PER_SEC; norm_speed = (double) num_vals*num_splines / norm_time; multi_speed = (double) num_vals*num_splines / multi_time; fprintf (stderr, "Normal VGH speed = %13.3f evaluations per second.\n", norm_speed); fprintf (stderr, "Multi VGH speed = %13.3f evaluations per second.\n", multi_speed); destroy_Bspline (multi_spline); for (int i=0; i<num_splines; i++) destroy_Bspline(norm_splines[i]); } #ifdef _OPENMP #include <omp.h> #include <numa.h> void time_3d_real_double_omp() { int avail = numa_available(); int nthr = omp_get_max_threads(); int nnodes = numa_max_node(); fprintf (stderr, "Performing test with %d NUMA nodes.\n", avail, nnodes); if (!nnodes) nnodes++; int Nx=63; int Ny=61; int Nz = 69; int num_splines = 128; Ugrid x_grid, y_grid, z_grid; x_grid.start = 3.1; x_grid.end = 9.1; x_grid.num = Nx; y_grid.start = 8.7; y_grid.end = 12.7; y_grid.num = Ny; z_grid.start = 4.5; z_grid.end = 9.3; z_grid.num = Nz; BCtype_d xBC, yBC, zBC; xBC.lCode = xBC.rCode = PERIODIC; yBC.lCode = yBC.rCode = PERIODIC; zBC.lCode = zBC.rCode = PERIODIC; // First, create splines the normal way UBspline_3d_d* norm_splines[num_splines]; multi_UBspline_3d_d *multi_spline[nnodes]; // First, create multispline for (int node=0; node<nnodes; node++) { nodemask_t mask; nodemask_zero(&mask); nodemask_set (&mask, node); numa_set_membind (&mask); multi_spline[node] = create_multi_UBspline_3d_d (x_grid, y_grid, z_grid, xBC, yBC, zBC, num_splines); } double data[Nx*Ny*Nz]; // Now, create normal splines and set multispline data for (int i=0; i<num_splines; i++) { for (int j=0; j<Nx*Ny*Nz; j++) data[j] = (drand48()-0.5); norm_splines[i] = create_UBspline_3d_d (x_grid, y_grid, z_grid, xBC, yBC, zBC, data); for (int node=0; node<nnodes; node++) { nodemask_t mask; nodemask_zero(&mask); nodemask_set (&mask, node); numa_set_membind (&mask); set_multi_UBspline_3d_d (multi_spline[node], i, data); } } // Now, test random values double rand_start, rand_end, norm_start[nthr], norm_end[nthr], multi_start[nthr], multi_end[nthr]; int num_vals = 100000; double multi_vals[nthr][num_splines], norm_vals[nthr][num_splines]; double multi_grads[nthr][3*num_splines], norm_grads[nthr][3*num_splines]; double multi_lapl[nthr][num_splines], norm_lapl[nthr][num_splines]; double multi_hess[nthr][9*num_splines], norm_hess[nthr][9*num_splines]; rand_start = omp_get_wtime(); for (int i=0; i<num_vals; i++) { double rx = drand48(); double x = rx*x_grid.start + (1.0-rx)*x_grid.end; double ry = drand48(); double y = ry*y_grid.start + (1.0-ry)*y_grid.end; double rz = drand48(); double z = rz*z_grid.start + (1.0-rz)*z_grid.end; } rand_end = omp_get_wtime(); /////////////////////// // Check value routine // /////////////////////// double rx = drand48(); double x = rx*x_grid.start + (1.0-rx)*x_grid.end; double ry = drand48(); double y = ry*y_grid.start + (1.0-ry)*y_grid.end; double rz = drand48(); double z = rz*z_grid.start + (1.0-rz)*z_grid.end; int thr_per_node = nthr/nnodes; #pragma omp parallel for for (int thr=0; thr<nthr; thr++) { int node = thr/thr_per_node; multi_start[thr] = omp_get_wtime(); for (int i=0; i<num_vals; i++) { double rx = drand48(); double x = rx*x_grid.start + (1.0-rx)*x_grid.end; double ry = drand48(); double y = ry*y_grid.start + (1.0-ry)*y_grid.end; double rz = drand48(); double z = rz*z_grid.start + (1.0-rz)*z_grid.end; eval_multi_UBspline_3d_d (multi_spline[node], x, y, z, multi_vals[thr]); } multi_end[thr] = omp_get_wtime(); } #pragma omp parallel for for (int thr=0; thr<nthr; thr++) { norm_start[thr] = omp_get_wtime(); for (int i=0; i<num_vals; i++) { double rx = drand48(); double x = rx*x_grid.start + (1.0-rx)*x_grid.end; double ry = drand48(); double y = ry*y_grid.start + (1.0-ry)*y_grid.end; double rz = drand48(); double z = rz*z_grid.start + (1.0-rz)*z_grid.end; for (int j=0; j<num_splines; j++) eval_UBspline_3d_d (norm_splines[j], x, y, z, &(norm_vals[thr][j])); } norm_end[thr] = omp_get_wtime(); } double norm_avg=0.0, multi_avg=0.0; for (int thr=0; thr<nthr; thr++) { double norm_time = (double)(norm_end[thr] - norm_start[thr] + rand_start - rand_end); double multi_time = (double)(multi_end[thr] - multi_start[thr] + rand_start - rand_end); norm_avg += norm_time; multi_avg += multi_time; } norm_avg /= nthr; multi_avg /= nthr; double norm_speed = (double) num_vals*num_splines / norm_avg; double multi_speed = (double) num_vals*num_splines / multi_avg; fprintf (stderr, "Normal value speed = %13.3f evaluations per second.\n", norm_speed); fprintf (stderr, "Multi value speed = %13.3f evaluations per second.\n", multi_speed); /////////////////////// // Check VGH routine // /////////////////////// #pragma omp parallel for for (int thr=0; thr<nthr; thr++) { int node = thr/thr_per_node; multi_start[thr] = omp_get_wtime(); for (int i=0; i<num_vals; i++) { double rx = drand48(); double x = rx*x_grid.start + (1.0-rx)*x_grid.end; double ry = drand48(); double y = ry*y_grid.start + (1.0-ry)*y_grid.end; double rz = drand48(); double z = rz*z_grid.start + (1.0-rz)*z_grid.end; eval_multi_UBspline_3d_d_vgh (multi_spline[node], x, y, z, multi_vals[thr], multi_grads[thr], multi_hess[thr]); } multi_end[thr] = omp_get_wtime(); } #pragma omp parallel for for (int thr=0; thr<nthr; thr++) { norm_start[thr] = omp_get_wtime(); for (int i=0; i<num_vals; i++) { double rx = drand48(); double x = rx*x_grid.start + (1.0-rx)*x_grid.end; double ry = drand48(); double y = ry*y_grid.start + (1.0-ry)*y_grid.end; double rz = drand48(); double z = rz*z_grid.start + (1.0-rz)*z_grid.end; for (int j=0; j<num_splines; j++) eval_UBspline_3d_d_vgh (norm_splines[j], x, y, z, &(norm_vals[thr][j]), &(norm_grads[thr][3*j]), &(norm_hess[thr][9*j])); } norm_end[thr] = omp_get_wtime(); } norm_avg = multi_avg = 0.0; for (int thr=0; thr<nthr; thr++) { double norm_time = (double)(norm_end[thr] - norm_start[thr] + rand_start - rand_end); double multi_time = (double)(multi_end[thr] - multi_start[thr] + rand_start - rand_end); norm_avg += norm_time; multi_avg += multi_time; } norm_avg /= nthr; multi_avg /= nthr; norm_speed = (double) num_vals*num_splines / norm_avg; multi_speed = (double) num_vals*num_splines / multi_avg; fprintf (stderr, "Normal VGH speed = %13.3f evaluations per second.\n", norm_speed); fprintf (stderr, "Multi VGH speed = %13.3f evaluations per second.\n", multi_speed); destroy_Bspline (multi_spline); for (int i=0; i<num_splines; i++) destroy_Bspline(norm_splines[i]); } #endif void time_3d_real_double_all() { int Nx=63; int Ny=61; int Nz = 69; int num_splines = 128; Ugrid x_grid, y_grid, z_grid; x_grid.start = 3.1; x_grid.end = 9.1; x_grid.num = Nx; y_grid.start = 8.7; y_grid.end = 12.7; y_grid.num = Ny; z_grid.start = 4.5; z_grid.end = 9.3; z_grid.num = Nz; BCtype_d xBC, yBC, zBC; xBC.lCode = xBC.rCode = PERIODIC; yBC.lCode = yBC.rCode = PERIODIC; zBC.lCode = zBC.rCode = PERIODIC; // First, create splines the normal way UBspline_3d_d* norm_splines[num_splines]; multi_UBspline_3d_d *multi_spline; // First, create multispline multi_spline = create_multi_UBspline_3d_d (x_grid, y_grid, z_grid, xBC, yBC, zBC, num_splines); double data[Nx*Ny*Nz]; // Now, create normal splines and set multispline data for (int i=0; i<num_splines; i++) { for (int j=0; j<Nx*Ny*Nz; j++) data[j] = (drand48()-0.5); norm_splines[i] = create_UBspline_3d_d (x_grid, y_grid, z_grid, xBC, yBC, zBC, data); set_multi_UBspline_3d_d (multi_spline, i, data); } // Now, test random values int num_vals = 100000; double multi_vals[num_splines], norm_vals[num_splines]; double multi_grads[3*num_splines], norm_grads[3*num_splines]; double multi_lapl[num_splines], norm_lapl[num_splines]; double multi_hess[9*num_splines], norm_hess[9*num_splines]; clock_t rand_start, rand_end, norm_start, norm_end, multi_start, multi_end; rand_start = clock(); for (int i=0; i<num_vals; i++) { double rx = drand48(); double x = rx*x_grid.start + (1.0-rx)*x_grid.end; double ry = drand48(); double y = ry*y_grid.start + (1.0-ry)*y_grid.end; double rz = drand48(); double z = rz*z_grid.start + (1.0-rz)*z_grid.end; } rand_end = clock(); /////////////////////// // Check value routine // /////////////////////// multi_start = clock(); double rx = drand48(); double x = rx*x_grid.start + (1.0-rx)*x_grid.end; double ry = drand48(); double y = ry*y_grid.start + (1.0-ry)*y_grid.end; double rz = drand48(); double z = rz*z_grid.start + (1.0-rz)*z_grid.end; for (int i=0; i<num_vals; i++) { double rx = drand48(); double x = rx*x_grid.start + (1.0-rx)*x_grid.end; double ry = drand48(); double y = ry*y_grid.start + (1.0-ry)*y_grid.end; double rz = drand48(); double z = rz*z_grid.start + (1.0-rz)*z_grid.end; eval_multi_UBspline_3d_d (multi_spline, x, y, z, multi_vals); } multi_end = clock(); norm_start = clock(); for (int i=0; i<num_vals; i++) { double rx = drand48(); double x = rx*x_grid.start + (1.0-rx)*x_grid.end; double ry = drand48(); double y = ry*y_grid.start + (1.0-ry)*y_grid.end; double rz = drand48(); double z = rz*z_grid.start + (1.0-rz)*z_grid.end; for (int j=0; j<num_splines; j++) eval_UBspline_3d_d (norm_splines[j], x, y, z, &(norm_vals[j])); } norm_end = clock(); double norm_time = (double)(norm_end - norm_start + rand_start - rand_end) / (double)CLOCKS_PER_SEC; double multi_time = (double)(multi_end - multi_start + rand_start - rand_end) / (double)CLOCKS_PER_SEC; double norm_speed = (double) num_vals*num_splines / norm_time; double multi_speed = (double) num_vals*num_splines / multi_time; fprintf (stderr, "Normal value speed = %13.3f evaluations per second.\n", norm_speed); fprintf (stderr, "Multi value speed = %13.3f evaluations per second.\n", multi_speed); /////////////////////// // Check VGH routine // /////////////////////// multi_start = clock(); for (int i=0; i<num_vals; i++) { double rx = drand48(); double x = rx*x_grid.start + (1.0-rx)*x_grid.end; double ry = drand48(); double y = ry*y_grid.start + (1.0-ry)*y_grid.end; double rz = drand48(); double z = rz*z_grid.start + (1.0-rz)*z_grid.end; eval_multi_UBspline_3d_d_vgh (multi_spline, x, y, z, multi_vals, multi_grads, multi_hess); } multi_end = clock(); norm_start = clock(); for (int i=0; i<num_vals; i++) { double rx = drand48(); double x = rx*x_grid.start + (1.0-rx)*x_grid.end; double ry = drand48(); double y = ry*y_grid.start + (1.0-ry)*y_grid.end; double rz = drand48(); double z = rz*z_grid.start + (1.0-rz)*z_grid.end; for (int j=0; j<num_splines; j++) eval_UBspline_3d_d_vgh (norm_splines[j], x, y, z, &(norm_vals[j]), &(norm_grads[3*j]), &(norm_hess[9*j])); } norm_end = clock(); norm_time = (double)(norm_end - norm_start + rand_start - rand_end) / (double)CLOCKS_PER_SEC; multi_time = (double)(multi_end - multi_start + rand_start - rand_end) / (double)CLOCKS_PER_SEC; norm_speed = (double) num_vals*num_splines / norm_time; multi_speed = (double) num_vals*num_splines / multi_time; fprintf (stderr, "Normal VGH speed = %13.3f evaluations per second.\n", norm_speed); fprintf (stderr, "Multi VGH speed = %13.3f evaluations per second.\n", multi_speed); destroy_Bspline (multi_spline); for (int i=0; i<num_splines; i++) destroy_Bspline(norm_splines[i]); } void time_3d_complex_double_all() { int Nx=37; int Ny=37; int Nz = 37; int num_splines = 56; Ugrid x_grid, y_grid, z_grid; x_grid.start = 3.1; x_grid.end = 9.1; x_grid.num = Nx; y_grid.start = 8.7; y_grid.end = 12.7; y_grid.num = Ny; z_grid.start = 4.5; z_grid.end = 9.3; z_grid.num = Nz; BCtype_z xBC, yBC, zBC; xBC.lCode = xBC.rCode = PERIODIC; yBC.lCode = yBC.rCode = PERIODIC; zBC.lCode = zBC.rCode = PERIODIC; // First, create splines the normal way UBspline_3d_z* norm_splines[num_splines]; multi_UBspline_3d_z *multi_spline; // First, create multispline multi_spline = create_multi_UBspline_3d_z (x_grid, y_grid, z_grid, xBC, yBC, zBC, num_splines); complex_double data[Nx*Ny*Nz]; // Now, create normal splines and set multispline data for (int i=0; i<num_splines; i++) { for (int j=0; j<Nx*Ny*Nz; j++) data[j] = (drand48()-0.5) + (drand48()-0.5)*1.0i; norm_splines[i] = create_UBspline_3d_z (x_grid, y_grid, z_grid, xBC, yBC, zBC, data); set_multi_UBspline_3d_z (multi_spline, i, data); } // Now, test random values int num_vals = 100000; complex_double multi_vals[num_splines], norm_vals[num_splines]; complex_double multi_grads[3*num_splines], norm_grads[3*num_splines]; complex_double multi_lapl[num_splines], norm_lapl[num_splines]; complex_double multi_hess[9*num_splines], norm_hess[9*num_splines]; clock_t rand_start, rand_end, norm_start, norm_end, multi_start, multi_end; rand_start = clock(); for (int i=0; i<num_vals; i++) { double rx = drand48(); double x = rx*x_grid.start + (1.0-rx)*x_grid.end; double ry = drand48(); double y = ry*y_grid.start + (1.0-ry)*y_grid.end; double rz = drand48(); double z = rz*z_grid.start + (1.0-rz)*z_grid.end; } rand_end = clock(); /////////////////////// // Check value routine // /////////////////////// multi_start = clock(); for (int i=0; i<num_vals; i++) { double rx = drand48(); double x = rx*x_grid.start + (1.0-rx)*x_grid.end; double ry = drand48(); double y = ry*y_grid.start + (1.0-ry)*y_grid.end; double rz = drand48(); double z = rz*z_grid.start + (1.0-rz)*z_grid.end; eval_multi_UBspline_3d_z (multi_spline, x, y, z, multi_vals); } multi_end = clock(); norm_start = clock(); for (int i=0; i<num_vals; i++) { double rx = drand48(); double x = rx*x_grid.start + (1.0-rx)*x_grid.end; double ry = drand48(); double y = ry*y_grid.start + (1.0-ry)*y_grid.end; double rz = drand48(); double z = rz*z_grid.start + (1.0-rz)*z_grid.end; for (int j=0; j<num_splines; j++) eval_UBspline_3d_z (norm_splines[j], x, y, z, &(norm_vals[j])); } norm_end = clock(); double norm_time = (double)(norm_end - norm_start + rand_start - rand_end) / (double)CLOCKS_PER_SEC; double multi_time = (double)(multi_end - multi_start + rand_start - rand_end) / (double)CLOCKS_PER_SEC; double norm_speed = (double) num_vals*num_splines / norm_time; double multi_speed = (double) num_vals*num_splines / multi_time; fprintf (stderr, "Normal value speed = %13.3f evaluations per second.\n", norm_speed); fprintf (stderr, "Multi value speed = %13.3f evaluations per second.\n", multi_speed); /////////////////////// // Check VGL routine // /////////////////////// // eval_multi_UBspline_3d_z_vgl (multi_spline, x, y, z, // multi_vals, multi_grads, multi_lapl); // for (int j=0; j<num_splines; j++) // eval_UBspline_3d_z_vgl (norm_splines[j], x, y, z, &(norm_vals[j]), // &(norm_grads[3*j]), &(norm_lapl[j])); // for (int j=0; j<num_splines; j++) { // // Check value // if (zdiff(norm_vals[j], multi_vals[j], 1.0e-12)) // return -3; // // Check gradients // for (int n=0; n<3; n++) // if (zdiff (norm_grads[3*j+n], multi_grads[3*j+n], 1.0e-10)) // return -4; // // Check laplacian // if (zdiff (norm_lapl[j], multi_lapl[j], 1.0e-10)) // return -5; // } /////////////////////// // Check VGH routine // /////////////////////// multi_start = clock(); for (int i=0; i<num_vals; i++) { double rx = drand48(); double x = rx*x_grid.start + (1.0-rx)*x_grid.end; double ry = drand48(); double y = ry*y_grid.start + (1.0-ry)*y_grid.end; double rz = drand48(); double z = rz*z_grid.start + (1.0-rz)*z_grid.end; eval_multi_UBspline_3d_z_vgh (multi_spline, x, y, z, multi_vals, multi_grads, multi_hess); } multi_end = clock(); norm_start = clock(); for (int i=0; i<num_vals; i++) { double rx = drand48(); double x = rx*x_grid.start + (1.0-rx)*x_grid.end; double ry = drand48(); double y = ry*y_grid.start + (1.0-ry)*y_grid.end; double rz = drand48(); double z = rz*z_grid.start + (1.0-rz)*z_grid.end; for (int j=0; j<num_splines; j++) eval_UBspline_3d_z_vgh (norm_splines[j], x, y, z, &(norm_vals[j]), &(norm_grads[3*j]), &(norm_hess[9*j])); } norm_end = clock(); norm_time = (double)(norm_end - norm_start + rand_start - rand_end) / (double)CLOCKS_PER_SEC; multi_time = (double)(multi_end - multi_start + rand_start - rand_end) / (double)CLOCKS_PER_SEC; norm_speed = (double) num_vals*num_splines / norm_time; multi_speed = (double) num_vals*num_splines / multi_time; fprintf (stderr, "Normal VGH speed = %13.3f evaluations per second.\n", norm_speed); fprintf (stderr, "Multi VGH speed = %13.3f evaluations per second.\n", multi_speed); destroy_Bspline (multi_spline); for (int i=0; i<num_splines; i++) destroy_Bspline(norm_splines[i]); } void test_complex_double_vgh() { int Nx=73; int Ny=91; int Nz = 29; int num_splines = 128; Ugrid x_grid, y_grid, z_grid; x_grid.start = 3.1; x_grid.end = 9.1; x_grid.num = Nx; y_grid.start = 8.7; y_grid.end = 12.7; y_grid.num = Ny; z_grid.start = 4.5; z_grid.end = 9.3; z_grid.num = Nz; BCtype_z xBC, yBC, zBC; xBC.lCode = xBC.rCode = PERIODIC; yBC.lCode = yBC.rCode = PERIODIC; zBC.lCode = zBC.rCode = PERIODIC; // First, create splines the normal way UBspline_3d_z* norm_splines[num_splines]; multi_UBspline_3d_z *multi_spline; // First, create multispline multi_spline = create_multi_UBspline_3d_z (x_grid, y_grid, z_grid, xBC, yBC, zBC, num_splines); complex_double data[Nx*Ny*Nz]; // Now, create normal splines and set multispline data for (int i=0; i<num_splines; i++) { for (int j=0; j<Nx*Ny*Nz; j++) data[j] = (drand48()-0.5) + (drand48()-0.5)*1.0i; norm_splines[i] = create_UBspline_3d_z (x_grid, y_grid, z_grid, xBC, yBC, zBC, data); set_multi_UBspline_3d_z (multi_spline, i, data); } fprintf (stderr, "norm coef = %1.14e + %1.14ei\n", creal(norm_splines[19]->coefs[227]), cimag(norm_splines[19]->coefs[227])); fprintf (stderr, "multi coef = %1.14e + %1.14ei\n", creal(multi_spline->coefs[19+227*multi_spline->z_stride]), cimag(multi_spline->coefs[19+227*multi_spline->z_stride])); // Now, test random values int num_vals = 100; complex_double multi_vals[num_splines], norm_vals[num_splines]; complex_double multi_grads[3*num_splines], norm_grads[3*num_splines]; complex_double multi_lapl[num_splines], norm_lapl[num_splines]; complex_double multi_hess[9*num_splines], norm_hess[9*num_splines]; for (int i=0; i<num_vals; i++) { double rx = drand48(); double x = rx*x_grid.start + (1.0-rx)*x_grid.end; double ry = drand48(); double y = ry*y_grid.start + (1.0-ry)*y_grid.end; double rz = drand48(); double z = rz*z_grid.start + (1.0-rz)*z_grid.end; /////////////////////// // Check VGH routine // /////////////////////// eval_multi_UBspline_3d_z_vgh (multi_spline, x, y, z, multi_vals, multi_grads, multi_hess); for (int j=0; j<num_splines; j++) eval_UBspline_3d_z_vgh (norm_splines[j], x, y, z, &(norm_vals[j]), &(norm_grads[3*j]), &(norm_hess[9*j])); for (int j=0; j<num_splines; j++) { // Check value if (zdiff(norm_vals[j], multi_vals[j], 1.0e-12)) { fprintf (stderr, "Error! norm_vals[j] = %1.14e + %1.14ei\n", creal(norm_vals[j]), cimag(norm_vals[j])); fprintf (stderr, " multi_vals[j] = %1.14e + %1.14ei\n", creal(multi_vals[j]), cimag(multi_vals[j])); } // Check gradients for (int n=0; n<3; n++) { if (zdiff (norm_grads[3*j+n], multi_grads[3*j+n], 1.0e-12)) { fprintf (stderr, "n=%d\n", n); fprintf (stderr, "Error! norm_grads[j] = %1.14e + %1.14ei\n", creal(norm_grads[3*j+n]), cimag(norm_grads[3*j+n])); fprintf (stderr, " multi_grads[j] = %1.14e + %1.14ei\n", creal(multi_grads[3*j+n]), cimag(multi_grads[3*j+n])); } } // Check hessian for (int n=0; n<9; n++) { if (zdiff (norm_hess[9*j+n], multi_hess[9*j+n], 1.0e-10)) { fprintf (stderr, "Error! norm_hess[j] = %1.14e + %1.14ei\n", creal(norm_hess[9*j+n]), cimag(norm_hess[9*j+n])); fprintf (stderr, " multi_hess[j] = %1.14e + %1.14ei\n", creal(multi_hess[9*j+n]), cimag(multi_hess[9*j+n])); } } } } num_vals = 100000; // Now do timing clock_t norm_start, norm_end, multi_start, multi_end, rand_start, rand_end; rand_start = clock(); for (int i=0; i<num_vals; i++) { double rx = drand48(); double x = rx*x_grid.start + (1.0-rx)*x_grid.end; double ry = drand48(); double y = ry*y_grid.start + (1.0-ry)*y_grid.end; double rz = drand48(); double z = rz*z_grid.start + (1.0-rz)*z_grid.end; } rand_end = clock(); norm_start = clock(); for (int i=0; i<num_vals; i++) { double rx = drand48(); double x = rx*x_grid.start + (1.0-rx)*x_grid.end; double ry = drand48(); double y = ry*y_grid.start + (1.0-ry)*y_grid.end; double rz = drand48(); double z = rz*z_grid.start + (1.0-rz)*z_grid.end; for (int j=0; j<num_splines; j++) eval_UBspline_3d_z_vgh (norm_splines[j], x, y, z, &(norm_vals[j]), &(norm_grads[3*j]), &(norm_hess[9*j])); } norm_end = clock(); multi_start = clock(); for (int i=0; i<num_vals; i++) { double rx = drand48(); double x = rx*x_grid.start + (1.0-rx)*x_grid.end; double ry = drand48(); double y = ry*y_grid.start + (1.0-ry)*y_grid.end; double rz = drand48(); double z = rz*z_grid.start + (1.0-rz)*z_grid.end; eval_multi_UBspline_3d_z_vgh (multi_spline, x, y, z, multi_vals, multi_grads, multi_hess); } multi_end = clock(); fprintf (stderr, "Normal spline time = %1.5f\n", (double)(norm_end-norm_start+rand_start-rand_end)/CLOCKS_PER_SEC); fprintf (stderr, "Multi spline time = %1.5f\n", (double)(multi_end-multi_start+rand_start-rand_end)/CLOCKS_PER_SEC); } void test_double() { int Nx=73; int Ny=91; int Nz = 29; int num_splines = 201; Ugrid x_grid, y_grid, z_grid; x_grid.start = 3.1; x_grid.end = 9.1; x_grid.num = Nx; y_grid.start = 8.7; y_grid.end = 12.7; y_grid.num = Ny; z_grid.start = 4.5; z_grid.end = 9.3; z_grid.num = Nz; BCtype_d xBC, yBC, zBC; xBC.lCode = xBC.rCode = PERIODIC; yBC.lCode = yBC.rCode = PERIODIC; zBC.lCode = zBC.rCode = PERIODIC; // First, create splines the normal way UBspline_3d_d* norm_splines[num_splines]; multi_UBspline_3d_d *multi_spline; // First, create multispline multi_spline = create_multi_UBspline_3d_d (x_grid, y_grid, z_grid, xBC, yBC, zBC, num_splines); double data[Nx*Ny*Nz]; // Now, create normal splines and set multispline data for (int i=0; i<num_splines; i++) { for (int j=0; j<Nx*Ny*Nz; j++) data[j] = (drand48()-0.5) + (drand48()-0.5)*1.0i; norm_splines[i] = create_UBspline_3d_d (x_grid, y_grid, z_grid, xBC, yBC, zBC, data); set_multi_UBspline_3d_d (multi_spline, i, data); } fprintf (stderr, "norm coef = %1.14e\n", norm_splines[19]->coefs[227]); fprintf (stderr, "multi coef = %1.14e\n", multi_spline->coefs[19+227*multi_spline->z_stride]); // Now, test random values int num_vals = 100; double multi_vals[num_splines], norm_vals[num_splines]; for (int i=0; i<num_vals; i++) { double rx = drand48(); double x = rx*x_grid.start + (1.0-rx)*x_grid.end; double ry = drand48(); double y = ry*y_grid.start + (1.0-ry)*y_grid.end; double rz = drand48(); double z = rz*z_grid.start + (1.0-rz)*z_grid.end; eval_multi_UBspline_3d_d (multi_spline, x, y, z, multi_vals); for (int j=0; j<num_splines; j++) eval_UBspline_3d_d (norm_splines[j], x, y, z, &(norm_vals[j])); for (int j=0; j<num_splines; j++) { // Check value double diff = norm_vals[j] - multi_vals[j]; if (fabs(diff) > 1.0e-12) { fprintf (stderr, "Error! norm_vals[j] = %1.14e\n", norm_vals[j]); fprintf (stderr, " multi_vals[j] = %1.14e\n", multi_vals[j]); } } } num_vals = 100000; // Now do timing clock_t norm_start, norm_end, multi_start, multi_end, rand_start, rand_end; rand_start = clock(); for (int i=0; i<num_vals; i++) { double rx = drand48(); double x = rx*x_grid.start + (1.0-rx)*x_grid.end; double ry = drand48(); double y = ry*y_grid.start + (1.0-ry)*y_grid.end; double rz = drand48(); double z = rz*z_grid.start + (1.0-rz)*z_grid.end; } rand_end = clock(); norm_start = clock(); for (int i=0; i<num_vals; i++) { double rx = drand48(); double x = rx*x_grid.start + (1.0-rx)*x_grid.end; double ry = drand48(); double y = ry*y_grid.start + (1.0-ry)*y_grid.end; double rz = drand48(); double z = rz*z_grid.start + (1.0-rz)*z_grid.end; for (int j=0; j<num_splines; j++) eval_UBspline_3d_d (norm_splines[j], x, y, z, &(norm_vals[j])); } norm_end = clock(); multi_start = clock(); for (int i=0; i<num_vals; i++) { double rx = drand48(); double x = rx*x_grid.start + (1.0-rx)*x_grid.end; double ry = drand48(); double y = ry*y_grid.start + (1.0-ry)*y_grid.end; double rz = drand48(); double z = rz*z_grid.start + (1.0-rz)*z_grid.end; eval_multi_UBspline_3d_d (multi_spline, x, y, z, multi_vals); } multi_end = clock(); fprintf (stderr, "Normal spline time = %1.5f\n", (double)(norm_end-norm_start+rand_start-rand_end)/CLOCKS_PER_SEC); fprintf (stderr, "Multi spline time = %1.5f\n", (double)(multi_end-multi_start+rand_start-rand_end)/CLOCKS_PER_SEC); } void test_double_vgh() { int Nx=73; int Ny=91; int Nz = 29; int num_splines = 128; Ugrid x_grid, y_grid, z_grid; x_grid.start = 3.1; x_grid.end = 9.1; x_grid.num = Nx; y_grid.start = 8.7; y_grid.end = 12.7; y_grid.num = Ny; z_grid.start = 4.5; z_grid.end = 9.3; z_grid.num = Nz; BCtype_d xBC, yBC, zBC; xBC.lCode = xBC.rCode = PERIODIC; yBC.lCode = yBC.rCode = PERIODIC; zBC.lCode = zBC.rCode = PERIODIC; // First, create splines the normal way UBspline_3d_d* norm_splines[num_splines]; multi_UBspline_3d_d *multi_spline; // First, create multispline multi_spline = create_multi_UBspline_3d_d (x_grid, y_grid, z_grid, xBC, yBC, zBC, num_splines); double data[Nx*Ny*Nz]; // Now, create normal splines and set multispline data for (int i=0; i<num_splines; i++) { for (int j=0; j<Nx*Ny*Nz; j++) data[j] = (drand48()-0.5) + (drand48()-0.5)*1.0i; norm_splines[i] = create_UBspline_3d_d (x_grid, y_grid, z_grid, xBC, yBC, zBC, data); set_multi_UBspline_3d_d (multi_spline, i, data); } fprintf (stderr, "norm coef = %1.14e\n", norm_splines[19]->coefs[227]); fprintf (stderr, "multi coef = %1.14e\n", multi_spline->coefs[19+227*multi_spline->z_stride]); // Now, test random values int num_vals = 100; double multi_vals[num_splines], norm_vals[num_splines]; double multi_grads[3*num_splines], norm_grads[3*num_splines]; double multi_hess[9*num_splines], norm_hess[9*num_splines]; for (int i=0; i<num_vals; i++) { double rx = drand48(); double x = rx*x_grid.start + (1.0-rx)*x_grid.end; double ry = drand48(); double y = ry*y_grid.start + (1.0-ry)*y_grid.end; double rz = drand48(); double z = rz*z_grid.start + (1.0-rz)*z_grid.end; eval_multi_UBspline_3d_d_vgh (multi_spline, x, y, z, multi_vals, multi_grads, multi_hess); for (int j=0; j<num_splines; j++) eval_UBspline_3d_d_vgh (norm_splines[j], x, y, z, &(norm_vals[j]), &(norm_grads[3*j]), &(norm_hess[9*j])); for (int j=0; j<num_splines; j++) { // Check value double diff = norm_vals[j] - multi_vals[j]; if (fabs(diff) > 1.0e-12) { fprintf (stderr, "j = %d\n", j); fprintf (stderr, "Error! norm_vals[j] = %1.14e\n", norm_vals[j]); fprintf (stderr, " multi_vals[j] = %1.14e\n", multi_vals[j]); } // Check gradients for (int n=0; n<3; n++) { diff = norm_grads[3*j+n] - multi_grads[3*j+n]; if (fabs(diff) > 1.0e-12) { fprintf (stderr, "n=%d\n", n); fprintf (stderr, "Error! norm_grads[j] = %1.14e\n", norm_grads[3*j+n]); fprintf (stderr, " multi_grads[j] = %1.14e\n", multi_grads[3*j+n]); } } // Check hessian for (int n=0; n<9; n++) { diff = norm_hess[9*j+n] - multi_hess[9*j+n]; if (fabs(diff) > 1.0e-10) { fprintf (stderr, "Error! norm_hess[j] = %1.14e\n", norm_hess[9*j+n]); fprintf (stderr, " multi_hess[j] = %1.14e\n", multi_hess[9*j+n]); } } } } num_vals = 100000; // Now do timing clock_t norm_start, norm_end, multi_start, multi_end, rand_start, rand_end; rand_start = clock(); for (int i=0; i<num_vals; i++) { double rx = drand48(); double x = rx*x_grid.start + (1.0-rx)*x_grid.end; double ry = drand48(); double y = ry*y_grid.start + (1.0-ry)*y_grid.end; double rz = drand48(); double z = rz*z_grid.start + (1.0-rz)*z_grid.end; } rand_end = clock(); norm_start = clock(); for (int i=0; i<num_vals; i++) { double rx = drand48(); double x = rx*x_grid.start + (1.0-rx)*x_grid.end; double ry = drand48(); double y = ry*y_grid.start + (1.0-ry)*y_grid.end; double rz = drand48(); double z = rz*z_grid.start + (1.0-rz)*z_grid.end; for (int j=0; j<num_splines; j++) eval_UBspline_3d_d_vgh (norm_splines[j], x, y, z, &(norm_vals[j]), &(norm_grads[3*j]), &(norm_hess[9*j])); } norm_end = clock(); multi_start = clock(); for (int i=0; i<num_vals; i++) { double rx = drand48(); double x = rx*x_grid.start + (1.0-rx)*x_grid.end; double ry = drand48(); double y = ry*y_grid.start + (1.0-ry)*y_grid.end; double rz = drand48(); double z = rz*z_grid.start + (1.0-rz)*z_grid.end; eval_multi_UBspline_3d_d_vgh (multi_spline, x, y, z, multi_vals, multi_grads, multi_hess); } multi_end = clock(); fprintf (stderr, "Normal spline time = %1.5f\n", (double)(norm_end-norm_start+rand_start-rand_end)/CLOCKS_PER_SEC); fprintf (stderr, "Multi spline time = %1.5f\n", (double)(multi_end-multi_start+rand_start-rand_end)/CLOCKS_PER_SEC); } int time_1d_NUB_complex_double_all() { int Nx=100; int num_splines = 128*36; NUgrid *x_grid = create_log_grid (1.0e-4, 3.0, Nx); // for (int i=0; i<Nx; i++) // fprintf (stderr, "%1.8e\n", x_grid->points[i]); BCtype_z xBC; // xBC.lCode = xBC.rCode = NATURAL; xBC.lCode = DERIV1; xBC.lVal_r = 2.3; xBC.lVal_i = 1.1; xBC.rCode = DERIV1; xBC.rVal_r = -2.3; xBC.rVal_i = -1.1; // First, create splines the normal way NUBspline_1d_z* norm_splines[num_splines]; multi_NUBspline_1d_z *multi_spline; // First, create multispline multi_spline = create_multi_NUBspline_1d_z (x_grid, xBC, num_splines); complex_double data[Nx]; // Now, create normal splines and set multispline data for (int i=0; i<num_splines; i++) { for (int j=0; j<Nx; j++) data[j] = (drand48()-0.5) + (drand48()-0.5)*1.0i; xBC.lVal_r = drand48(); xBC.lVal_i = drand48(); xBC.rVal_r = drand48(); xBC.rVal_i = drand48(); norm_splines[i] = create_NUBspline_1d_z (x_grid, xBC, data); //set_multi_NUBspline_1d_z (multi_spline, i, data); set_multi_NUBspline_1d_z_BC (multi_spline, i, data, xBC); } // Now, test random values int num_vals = 100000; complex_double multi_vals[num_splines], norm_vals [num_splines]; complex_double multi_grads[num_splines], norm_grads[num_splines]; complex_double multi_lapl[num_splines], norm_lapl [num_splines]; clock_t multi_start, multi_end, norm_start, norm_end; ////////////////////////// // Time value routine // ////////////////////////// multi_start = clock(); for (int i=0; i<num_vals; i++) { double rx = drand48(); double x = rx*x_grid->start + (1.0-rx)*x_grid->end; eval_multi_NUBspline_1d_z (multi_spline, x, multi_vals); } multi_end = clock(); norm_start = clock(); for (int i=0; i<num_vals; i++) { double rx = drand48(); double x = rx*x_grid->start + (1.0-rx)*x_grid->end; for (int j=0; j<num_splines; j++) eval_NUBspline_1d_z (norm_splines[j], x, &(norm_vals[j])); } norm_end = clock(); double dt = (double)(multi_end - multi_start) / (double)CLOCKS_PER_SEC; double multi_speed = (double)num_vals * (double)num_splines/ dt; fprintf (stderr, "1D complex nonuniform multi-spline speed = %9.2f\n", multi_speed); ////////////////////////// // Time VGL routine // ////////////////////////// multi_start = clock(); for (int i=0; i<num_vals; i++) { double rx = drand48(); double x = rx*x_grid->start + (1.0-rx)*x_grid->end; eval_multi_NUBspline_1d_z_vgl (multi_spline, x, multi_vals, multi_grads, multi_lapl); } multi_end = clock(); /* norm_start = clock(); */ /* for (int i=0; i<num_vals; i++) { */ /* double rx = drand48(); */ /* double x = rx*x_grid->start + (1.0-rx)*x_grid->end; */ /* for (int j=0; j<num_splines; j++) */ /* eval_NUBspline_1d_z (norm_splines[j], x, &(norm_vals[j])); */ /* } */ /* norm_end = clock(); */ dt = (double)(multi_end - multi_start) / (double)CLOCKS_PER_SEC; multi_speed = (double)num_vals * (double)num_splines/ dt; fprintf (stderr, "1D complex nonuniform multi-spline speed = %9.2f\n", multi_speed); return 0; } void PrintPassFail (int code) { char green[100], normal[100], red[100]; snprintf (green, 100, "%c[0;32;47m", 0x1b); snprintf (normal, 100, "%c[0;30;47m", 0x1b); snprintf (red, 100, "%c[0;31;47m", 0x1b); if (code == 0) fprintf (stderr, "PASSED\n"); else fprintf (stderr, "FAILED: code = %d\n", code); } main() { // time_1d_NUB_complex_double_all(); #ifdef _OPENMP fprintf (stderr, "Timing 3D double-precision evaluation speed with OpenMP:\n"); time_3d_real_double_omp(); #endif fprintf (stderr, "Timing 3D complex single-precision evaluation speed:\n"); time_3d_complex_float_all(); fprintf (stderr, "Timing 3D single-precision evaluation speed:\n"); time_3d_real_float_all(); fprintf (stderr, "Timing 3D double-precision evaluation speed:\n"); time_3d_real_double_all(); fprintf (stderr, "Timing 3D complex double-precision evaluation speed:\n"); time_3d_complex_double_all(); }
GB_unaryop__minv_int8_uint64.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__minv_int8_uint64 // op(A') function: GB_tran__minv_int8_uint64 // C type: int8_t // A type: uint64_t // cast: int8_t cij = (int8_t) aij // unaryop: cij = GB_IMINV_SIGNED (aij, 8) #define GB_ATYPE \ uint64_t #define GB_CTYPE \ int8_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_IMINV_SIGNED (x, 8) ; // casting #define GB_CASTING(z, x) \ int8_t z = (int8_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MINV || GxB_NO_INT8 || GxB_NO_UINT64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__minv_int8_uint64 ( int8_t *restrict Cx, const uint64_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__minv_int8_uint64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
datatypes.h
#ifndef DATATYPES_H_ #define DATATYPES_H_ #include <stdbool.h> #include "../tools.h" #include "PlyDict.h" #include "ObjDict.h" #define MSG_HEAD_SEP "YGG_MSG_HEAD" /*! @brief Size of COMM buffer. */ #define COMMBUFFSIZ 2000 #define FMT_LEN 100 #ifdef __cplusplus /* If this is a C++ compiler, use C linkage */ extern "C" { #endif static char prefix_char = '#'; #ifdef _OPENMP #pragma omp threadprivate(prefix_char) #endif /*! @brief Bit flags. */ #define HEAD_FLAG_VALID 0x00000001 //!< Set if the header is valid. #define HEAD_FLAG_MULTIPART 0x00000002 //!< Set if the header is for a multipart message #define HEAD_TYPE_IN_DATA 0x00000004 //!< Set if the type is stored with the data during serialization #define HEAD_AS_ARRAY 0x00000008 //!< Set if messages will be serialized arrays /*! @brief C-friendly definition of MetaschemaType. */ typedef struct dtype_t { char type[COMMBUFFSIZ]; //!< Type name bool use_generic; //!< Flag for empty dtypes to specify generic in/out void *obj; //!< MetaschemaType Pointer } dtype_t; /*! @brief C-friendly defintion of YggGeneric. */ typedef struct generic_t { char prefix; //!< Prefix character for limited verification. void *obj; //!< Pointer to YggGeneric class. } generic_t; /*! @brief C-friendly definition of vector object. */ typedef generic_t json_array_t; /*! @brief C-friendly definition of map object. */ typedef generic_t json_object_t; /*! @brief C-friendly definition of schema object. */ typedef generic_t schema_t; /*! @brief C-friendly defintion of Python class object. */ typedef python_t python_class_t; /*! @brief C-friendly defintion of Python function object. */ typedef python_t python_function_t; /*! @brief C-friendly defintion of Python instance object. */ typedef generic_t python_instance_t; /*! @brief Macro wrapping call to PyObject_CallFunction. */ #define call_python(x, format, ...) PyObject_CallFunction(x.obj, format, __VA_ARGS__) /*! @brief Aliases to allow differentiation in parsing model definition. */ typedef char* unicode_t; typedef char* string_t; typedef char* bytes_t; /*! @brief Header information passed by comms for multipart messages. */ typedef struct comm_head_t { size_t bodysiz; //!< Size of body. size_t bodybeg; //!< Start of body in header. int flags; //!< Bit flags encoding the status of the header. int nargs_populated; //!< Number of arguments populated during deserialization. // size_t size; //!< Size of incoming message. char address[COMMBUFFSIZ]; //!< Address that message will comm in on. char id[COMMBUFFSIZ]; //!< Unique ID associated with this message. char response_address[COMMBUFFSIZ]; //!< Response address. char request_id[COMMBUFFSIZ]; //!< Request id. char zmq_reply[COMMBUFFSIZ]; //!< Reply address for ZMQ sockets. char zmq_reply_worker[COMMBUFFSIZ]; //!< Reply address for worker socket. char model[COMMBUFFSIZ]; //!< Name of model that sent the header. // These should be removed once JSON fully implemented int serializer_type; //!< Code indicating the type of serializer. char format_str[COMMBUFFSIZ]; //!< Format string for serializer. char field_names[COMMBUFFSIZ]; //!< String containing field names. char field_units[COMMBUFFSIZ]; //!< String containing field units. // dtype_t* dtype; //!< Type structure. } comm_head_t; /*! @brief C wrapper for the C++ type_from_doc function. @param type_doc void* Pointer to const rapidjson::Value type doc. @returns void* Pointer to MetaschemaType class. */ void* type_from_doc_c(const void* type_doc, const bool use_generic); /*! @brief C wrapper for the C++ type_from_pyobj function. @param type_doc void* Pointer to const rapidjson::Value type doc. @returns void* Pointer to MetaschemaType class. */ void* type_from_pyobj_c(PyObject* pyobj, const bool use_generic); /*! @brief Determine if a datatype was created from a format. @params[in] type_struct dtype_t* Datatype structure. @returns int 1 if the datatype was created from a format, 0 if it was not, -1 if there is an error. */ int is_dtype_format_array(dtype_t* type_struct); /*! @brief Initialize an empty generic object. @returns generic_t New generic object structure. */ generic_t init_generic(); /*! @brief Initialize an empty array of mixed types with generic wrappers. @returns generic_t New generic object structure containing an empty array. */ generic_t init_generic_array(); /*! @brief Initialize an empty map (JSON object) of mixed types with generic wrappers. @returns generic_t New generic object structure contaiing an empty map (JSON object). */ generic_t init_generic_map(); /*! @brief Determine if the provided character matches the required generic prefix char. @param[in] x char Character to check. @returns int 1 if the character is the correct prefix, 0 otherwise. */ int is_generic_flag(char x); /*! @brief Determine if a generic structure is initialized. @param[in] x generic_t Generic structure to test. @returns int 1 if the structure is initialized, 0 otherwise. */ int is_generic_init(generic_t x); /*! @brief Create a generic object from the provided information. @param[in] type_class dtype_t* Type structure/class. @param[in] data void* Pointer to data. @param[in] nbytes size_t Size of data. @returns generic_t Pointer to new generic object structure. */ generic_t create_generic(dtype_t* type_class, void* data, size_t nbytes); /*! @brief Destroy a generic object. @param[in] x generic_t* Pointer to generic object structure to destory. @returns int -1 if unsuccessful, 0 otherwise. */ int destroy_generic(generic_t* x); /*! @brief Copy data from one generic object to the other. @param[in] src generic_t Generic structure that data should be copied from. @returns generic_t Copied structure. */ generic_t copy_generic(generic_t src); /*! @brief Display information about the generic type. @param[in] x generic_t* Wrapper for generic object. */ void display_generic(generic_t x); /*! @brief Return the recovered generic structure if one is present in the variable argument list. @param[in] nargs size_t Number of argument present in ap. @param[in] ap va_list_t Variable argument list. @returns generic_t Generic structure if one is present. */ generic_t get_generic_va(size_t nargs, va_list_t ap); /*! @brief Return the recovered generic structure if one is present in the variable argument list. @param[in] nargs size_t Number of argument present in ap. @param[in] ap va_list_t Variable argument list. @returns generic_t* Generic structure if one is present, NULL otherwise. */ generic_t* get_generic_va_ptr(size_t nargs, va_list_t ap); /*! @brief Return the recovered generic structure if one is present in the variable argument list by removing it. @param[in] nargs size_t* Pointer to number of arguments present in ap that will be decremented by 1. @param[in] ap va_list_t* Pointer to variable argument list. @returns generic_t Generic structure if one is present. */ generic_t pop_generic_va(size_t* nargs, va_list_t* ap); /*! @brief Return the recovered generic structure if one is present in the variable argument list by removing it. @param[in] nargs size_t* Pointer to number of arguments present in ap that will be decremented by 1. @param[in] ap va_list_t* Pointer to variable argument list. @returns generic_t* Generic structure if one is present, NULL otherwise. */ generic_t* pop_generic_va_ptr(size_t* nargs, va_list_t* ap); /*! @brief Add an element to the end of an array of generic elements. @param[in] arr generic_t Array to add element to. @param[in] x generic_t Element to add. @returns int Flag that is 1 if there is an error and 0 otherwise. */ int add_generic_array(generic_t arr, generic_t x); /*! @brief Set an element in the array at a given index to a new value. @param[in] arr generic_t Array to add element to. @param[in] i size_t Index where element should be added. @param[in] x generic_t Element to add. @returns int Flag that is 1 if there is an error and 0 otherwise. */ int set_generic_array(generic_t arr, size_t i, generic_t x); /*! @brief Get an element from an array. @param[in] arr generic_t Array to get element from. @param[in] i size_t Index of element to get. @param[out] x generic_t* Pointer to address where element should be stored. @returns int Flag that is 1 if there is an error and 0 otherwise. */ int get_generic_array(generic_t arr, size_t i, generic_t *x); /*! @brief Set an element in the object at for a given key to a new value. @param[in] arr generic_t Object to add element to. @param[in] k const char* Key where element should be added. @param[in] x generic_t Element to add. @returns int Flag that is 1 if there is an error and 0 otherwise. */ int set_generic_object(generic_t arr, const char* k, generic_t x); /*! @brief Get an element from an object. @param[in] arr generic_t Object to get element from. @param[in] k const char* Key of element to return. @param[out] x generic_t* Pointer to address where element should be stored. @returns int Flag that is 1 if there is an error and 0 otherwise. */ int get_generic_object(generic_t arr, const char* k, generic_t *x); /*! @brief Get the number of elements in an array object. @param[in] x generic_t Generic object that is presumed to contain an array. @returns size_t Number of elements in array. */ size_t generic_array_get_size(generic_t x); /*! @brief Get an item from an array for types that don't require additional parameters. @param[in] x generic_t Generic object that is presumed to contain an array. @param[in] index size_t Index for value that should be returned. @param[in] type const char* Type of value expected. @returns void* Pointer to data for array item. */ void* generic_array_get_item(generic_t x, const size_t index, const char *type); int generic_array_get_item_nbytes(generic_t x, const size_t index); bool generic_array_get_bool(generic_t x, const size_t index); int generic_array_get_integer(generic_t x, const size_t index); void* generic_array_get_null(generic_t x, const size_t index); double generic_array_get_number(generic_t x, const size_t index); char* generic_array_get_string(generic_t x, const size_t index); generic_t generic_array_get_object(generic_t x, const size_t index); generic_t generic_array_get_array(generic_t x, const size_t index); char* generic_array_get_direct(generic_t x, const size_t index); ply_t generic_array_get_ply(generic_t x, const size_t index); obj_t generic_array_get_obj(generic_t x, const size_t index); python_t generic_array_get_python_class(generic_t x, const size_t index); python_t generic_array_get_python_function(generic_t x, const size_t index); schema_t generic_array_get_schema(generic_t x, const size_t index); generic_t generic_array_get_any(generic_t x, const size_t index); /*! @brief Get a scalar value from an array. @param[in] x generic_t Generic object that is presumed to contain an array. @param[in] index size_t Index for value that should be returned. @param[in] subtype const char* Subtype of scalar expected. @param[in] precision const int Precision of scalar that is expected. @returns void* Pointer to scalar data. */ void* generic_array_get_scalar(generic_t x, const size_t index, const char *subtype, const size_t precision); int8_t generic_array_get_int8(generic_t x, const size_t index); int16_t generic_array_get_int16(generic_t x, const size_t index); int32_t generic_array_get_int32(generic_t x, const size_t index); int64_t generic_array_get_int64(generic_t x, const size_t index); uint8_t generic_array_get_uint8(generic_t x, const size_t index); uint16_t generic_array_get_uint16(generic_t x, const size_t index); uint32_t generic_array_get_uint32(generic_t x, const size_t index); uint64_t generic_array_get_uint64(generic_t x, const size_t index); float generic_array_get_float(generic_t x, const size_t index); double generic_array_get_double(generic_t x, const size_t index); long double generic_array_get_long_double(generic_t x, const size_t index); complex_float_t generic_array_get_complex_float(generic_t x, const size_t index); complex_double_t generic_array_get_complex_double(generic_t x, const size_t index); complex_long_double_t generic_array_get_complex_long_double(generic_t x, const size_t index); char* generic_array_get_bytes(generic_t x, const size_t index); char* generic_array_get_unicode(generic_t x, const size_t index); /*! @brief Get a 1d array value from an array. @param[in] x generic_t Generic object that is presumed to contain an array. @param[in] index size_t Index for value that should be returned. @param[in] subtype const char* Subtype of array expected. @param[in] precision const size_t Precision of array that is expected. @param[out] data void** Pointer to pointer that should be reallocated to store the data. @returns size_t Number of elements in the data. */ size_t generic_array_get_1darray(generic_t x, const size_t index, const char *subtype, const size_t precision, void** data); size_t generic_array_get_1darray_int8(generic_t x, const size_t index, int8_t** data); size_t generic_array_get_1darray_int16(generic_t x, const size_t index, int16_t** data); size_t generic_array_get_1darray_int32(generic_t x, const size_t index, int32_t** data); size_t generic_array_get_1darray_int64(generic_t x, const size_t index, int64_t** data); size_t generic_array_get_1darray_uint8(generic_t x, const size_t index, uint8_t** data); size_t generic_array_get_1darray_uint16(generic_t x, const size_t index, uint16_t** data); size_t generic_array_get_1darray_uint32(generic_t x, const size_t index, uint32_t** data); size_t generic_array_get_1darray_uint64(generic_t x, const size_t index, uint64_t** data); size_t generic_array_get_1darray_float(generic_t x, const size_t index, float** data); size_t generic_array_get_1darray_double(generic_t x, const size_t index, double** data); size_t generic_array_get_1darray_long_double(generic_t x, const size_t index, long double** data); size_t generic_array_get_1darray_complex_float(generic_t x, const size_t index, complex_float_t** data); size_t generic_array_get_1darray_complex_double(generic_t x, const size_t index, complex_double_t** data); size_t generic_array_get_1darray_complex_long_double(generic_t x, const size_t index, complex_long_double_t** data); size_t generic_array_get_1darray_bytes(generic_t x, const size_t index, char** data); size_t generic_array_get_1darray_unicode(generic_t x, const size_t index, char** data); /*! @brief Get a nd array value from an array. @param[in] x generic_t Generic object that is presumed to contain an array. @param[in] index size_t Index for value that should be returned. @param[in] subtype const char* Subtype of array expected. @param[in] precision const size_t Precision of array that is expected. @param[out] data void** Pointer to array that should be reallocated to store the data. @param[out] shape size_t** Pointer to array that should be reallocated to store the array shape in each dimension. @returns size_t Number of dimensions in the array. */ size_t generic_array_get_ndarray(generic_t x, const size_t index, const char *subtype, const size_t precision, void** data, size_t** shape); size_t generic_array_get_ndarray_int8(generic_t x, const size_t index, int8_t** data, size_t** shape); size_t generic_array_get_ndarray_int16(generic_t x, const size_t index, int16_t** data, size_t** shape); size_t generic_array_get_ndarray_int32(generic_t x, const size_t index, int32_t** data, size_t** shape); size_t generic_array_get_ndarray_int64(generic_t x, const size_t index, int64_t** data, size_t** shape); size_t generic_array_get_ndarray_uint8(generic_t x, const size_t index, uint8_t** data, size_t** shape); size_t generic_array_get_ndarray_uint16(generic_t x, const size_t index, uint16_t** data, size_t** shape); size_t generic_array_get_ndarray_uint32(generic_t x, const size_t index, uint32_t** data, size_t** shape); size_t generic_array_get_ndarray_uint64(generic_t x, const size_t index, uint64_t** data, size_t** shape); size_t generic_array_get_ndarray_float(generic_t x, const size_t index, float** data, size_t** shape); size_t generic_array_get_ndarray_double(generic_t x, const size_t index, double** data, size_t** shape); size_t generic_array_get_ndarray_long_double(generic_t x, const size_t index, long double** data, size_t** shape); size_t generic_array_get_ndarray_complex_float(generic_t x, const size_t index, complex_float_t** data, size_t** shape); size_t generic_array_get_ndarray_complex_double(generic_t x, const size_t index, complex_double_t** data, size_t** shape); size_t generic_array_get_ndarray_complex_long_double(generic_t x, const size_t index, complex_long_double_t** data, size_t** shape); size_t generic_array_get_ndarray_bytes(generic_t x, const size_t index, char** data, size_t** shape); size_t generic_array_get_ndarray_unicode(generic_t x, const size_t index, char** data, size_t** shape); /*! @brief Get the number of elements in an map object. @param[in] x generic_t Generic object that is presumed to contain a map. @returns size_t Number of elements in map. */ size_t generic_map_get_size(generic_t x); /*! @brief Determine if a map object has a certain key. @param[in] x generic_t Generic object that is presumed to contain a map. @param[in] key char* Key to check for. @returns int 1 if the key is present, 0 otherwise. */ int generic_map_has_key(generic_t x, char* key); /*! @brief Get the keys in a map object. @param[in] x generic_t Generic object that is presumed to contain a map. @param[out] keys char*** Pointer to memory where array of keys should be stored. @returns size_t Number of keys in map. */ size_t generic_map_get_keys(generic_t x, char*** keys); /*! @brief Get an item from a map for types that don't require additional parameters. @param[in] x generic_t Generic object that is presumed to contain a map. @param[in] key const char* Key string for value that should be returned. @param[in] type const char* Type of value expected. @returns void* Pointer to data for map item. */ void* generic_map_get_item(generic_t x, const char* key, const char *type); int generic_map_get_item_nbytes(generic_t x, const char* key); bool generic_map_get_bool(generic_t x, const char* key); int generic_map_get_integer(generic_t x, const char* key); void* generic_map_get_null(generic_t x, const char* key); double generic_map_get_number(generic_t x, const char* key); char* generic_map_get_string(generic_t x, const char* key); generic_t generic_map_get_object(generic_t x, const char* key); generic_t generic_map_get_array(generic_t x, const char* key); char* generic_map_get_direct(generic_t x, const char* key); ply_t generic_map_get_ply(generic_t x, const char* key); obj_t generic_map_get_obj(generic_t x, const char* key); python_t generic_map_get_python_class(generic_t x, const char* key); python_t generic_map_get_python_function(generic_t x, const char* key); schema_t generic_map_get_schema(generic_t x, const char* key); generic_t generic_map_get_any(generic_t x, const char* key); /*! @brief Get a scalar value from a map. @param[in] x generic_t Generic object that is presumed to contain a map. @param[in] key const char* Key string for value that should be returned. @param[in] subtype const char* Subtype of scalar expected. @param[in] precision const int Precision of scalar that is expected. @returns void* Pointer to scalar data. */ void* generic_map_get_scalar(generic_t x, const char* key, const char *subtype, const size_t precision); int8_t generic_map_get_int8(generic_t x, const char* key); int16_t generic_map_get_int16(generic_t x, const char* key); int32_t generic_map_get_int32(generic_t x, const char* key); int64_t generic_map_get_int64(generic_t x, const char* key); uint8_t generic_map_get_uint8(generic_t x, const char* key); uint16_t generic_map_get_uint16(generic_t x, const char* key); uint32_t generic_map_get_uint32(generic_t x, const char* key); uint64_t generic_map_get_uint64(generic_t x, const char* key); float generic_map_get_float(generic_t x, const char* key); double generic_map_get_double(generic_t x, const char* key); long double generic_map_get_long_double(generic_t x, const char* key); complex_float_t generic_map_get_complex_float(generic_t x, const char* key); complex_double_t generic_map_get_complex_double(generic_t x, const char* key); complex_long_double_t generic_map_get_complex_long_double(generic_t x, const char* key); char* generic_map_get_bytes(generic_t x, const char* key); char* generic_map_get_unicode(generic_t x, const char* key); /*! @brief Get a 1d array value from a map. @param[in] x generic_t Generic object that is presumed to contain a map. @param[in] key const char* Key string for value that should be returned. @param[in] subtype const char* Subtype of array expected. @param[in] precision const size_t Precision of array that is expected. @param[out] data void** Pointer to pointer that should be reallocated to store the data. @returns size_t Number of elements in the data. */ size_t generic_map_get_1darray(generic_t x, const char* key, const char *subtype, const size_t precision, void** data); size_t generic_map_get_1darray_int8(generic_t x, const char* key, int8_t** data); size_t generic_map_get_1darray_int16(generic_t x, const char* key, int16_t** data); size_t generic_map_get_1darray_int32(generic_t x, const char* key, int32_t** data); size_t generic_map_get_1darray_int64(generic_t x, const char* key, int64_t** data); size_t generic_map_get_1darray_uint8(generic_t x, const char* key, uint8_t** data); size_t generic_map_get_1darray_uint16(generic_t x, const char* key, uint16_t** data); size_t generic_map_get_1darray_uint32(generic_t x, const char* key, uint32_t** data); size_t generic_map_get_1darray_uint64(generic_t x, const char* key, uint64_t** data); size_t generic_map_get_1darray_float(generic_t x, const char* key, float** data); size_t generic_map_get_1darray_double(generic_t x, const char* key, double** data); size_t generic_map_get_1darray_long_double(generic_t x, const char* key, long double** data); size_t generic_map_get_1darray_complex_float(generic_t x, const char* key, complex_float_t** data); size_t generic_map_get_1darray_complex_double(generic_t x, const char* key, complex_double_t** data); size_t generic_map_get_1darray_complex_long_double(generic_t x, const char* key, complex_long_double_t** data); size_t generic_map_get_1darray_bytes(generic_t x, const char* key, char** data); size_t generic_map_get_1darray_unicode(generic_t x, const char* key, char** data); /*! @brief Get a nd array value from a map. @param[in] x generic_t Generic object that is presumed to contain a map. @param[in] key const char* Key string for value that should be returned. @param[in] subtype const char* Subtype of array expected. @param[in] precision const size_t Precision of array that is expected. @param[out] data void** Pointer to array that should be reallocated to store the data. @param[out] shape size_t** Pointer to array that should be reallocated to store the array shape in each dimension. @returns size_t Number of dimensions in the array. */ size_t generic_map_get_ndarray(generic_t x, const char* key, const char *subtype, const size_t precision, void** data, size_t** shape); size_t generic_map_get_ndarray_int8(generic_t x, const char* key, int8_t** data, size_t** shape); size_t generic_map_get_ndarray_int16(generic_t x, const char* key, int16_t** data, size_t** shape); size_t generic_map_get_ndarray_int32(generic_t x, const char* key, int32_t** data, size_t** shape); size_t generic_map_get_ndarray_int64(generic_t x, const char* key, int64_t** data, size_t** shape); size_t generic_map_get_ndarray_uint8(generic_t x, const char* key, uint8_t** data, size_t** shape); size_t generic_map_get_ndarray_uint16(generic_t x, const char* key, uint16_t** data, size_t** shape); size_t generic_map_get_ndarray_uint32(generic_t x, const char* key, uint32_t** data, size_t** shape); size_t generic_map_get_ndarray_uint64(generic_t x, const char* key, uint64_t** data, size_t** shape); size_t generic_map_get_ndarray_float(generic_t x, const char* key, float** data, size_t** shape); size_t generic_map_get_ndarray_double(generic_t x, const char* key, double** data, size_t** shape); size_t generic_map_get_ndarray_long_double(generic_t x, const char* key, long double** data, size_t** shape); size_t generic_map_get_ndarray_complex_float(generic_t x, const char* key, complex_float_t** data, size_t** shape); size_t generic_map_get_ndarray_complex_double(generic_t x, const char* key, complex_double_t** data, size_t** shape); size_t generic_map_get_ndarray_complex_long_double(generic_t x, const char* key, complex_long_double_t** data, size_t** shape); size_t generic_map_get_ndarray_bytes(generic_t x, const char* key, char** data, size_t** shape); size_t generic_map_get_ndarray_unicode(generic_t x, const char* key, char** data, size_t** shape); /*! @brief Set an item in an array for types that don't require additional parameters. @param[in] x generic_t Generic object that is presumed to contain an array. @param[in] index size_t Index for value that should be set. @param[in] type const char* Type of value being set. @param[in] value void* Pointer to data that item should be set to. @returns int -1 if there is an error, 0 otherwise. */ int generic_array_set_item(generic_t x, const size_t index, const char *type, void* value); int generic_array_set_bool(generic_t x, const size_t index, bool value); int generic_array_set_integer(generic_t x, const size_t index, int value); int generic_array_set_null(generic_t x, const size_t index, void* value); int generic_array_set_number(generic_t x, const size_t index, double value); int generic_array_set_string(generic_t x, const size_t index, char* value); int generic_array_set_object(generic_t x, const size_t index, generic_t value); int generic_array_set_map(generic_t x, const size_t index, generic_t value); int generic_array_set_array(generic_t x, const size_t index, generic_t value); int generic_array_set_direct(generic_t x, const size_t index, char* value); int generic_array_set_ply(generic_t x, const size_t index, ply_t value); int generic_array_set_obj(generic_t x, const size_t index, obj_t value); int generic_array_set_python_class(generic_t x, const size_t index, python_t value); int generic_array_set_python_function(generic_t x, const size_t index, python_t value); int generic_array_set_schema(generic_t x, const size_t index, schema_t value); int generic_array_set_any(generic_t x, const size_t index, generic_t value); /*! @brief Set a scalar value in an array. @param[in] x generic_t Generic object that is presumed to contain an array. @param[in] index size_t Index for value that should be set. @param[in] value void* Pointer to scalar data. @param[in] subtype const char* Subtype of scalar in value. @param[in] precision const int Precision of scalar in value. @param[in] units const char* Units of value. @returns int -1 if there is an error, 0 otherwise. */ int generic_array_set_scalar(generic_t x, const size_t index, void* value, const char *subtype, const size_t precision, const char* units); int generic_array_set_int8(generic_t x, const size_t index, int8_t value, const char* units); int generic_array_set_int16(generic_t x, const size_t index, int16_t value, const char* units); int generic_array_set_int32(generic_t x, const size_t index, int32_t value, const char* units); int generic_array_set_int64(generic_t x, const size_t index, int64_t value, const char* units); int generic_array_set_uint8(generic_t x, const size_t index, uint8_t value, const char* units); int generic_array_set_uint16(generic_t x, const size_t index, uint16_t value, const char* units); int generic_array_set_uint32(generic_t x, const size_t index, uint32_t value, const char* units); int generic_array_set_uint64(generic_t x, const size_t index, uint64_t value, const char* units); int generic_array_set_float(generic_t x, const size_t index, float value, const char* units); int generic_array_set_double(generic_t x, const size_t index, double value, const char* units); int generic_array_set_long_double(generic_t x, const size_t index, long double value, const char* units); int generic_array_set_complex_float(generic_t x, const size_t index, complex_float_t value, const char* units); int generic_array_set_complex_double(generic_t x, const size_t index, complex_double_t value, const char* units); int generic_array_set_complex_long_double(generic_t x, const size_t index, complex_long_double_t value, const char* units); int generic_array_set_bytes(generic_t x, const size_t index, char* value, const char* units); int generic_array_set_unicode(generic_t x, const size_t index, char* value, const char* units); /*! @brief Set a 1d array value in an array. @param[in] x generic_t Generic object that is presumed to contain an array. @param[in] index size_t Index for value that should be set. @param[in] value void* Pointer to array data. @param[in] subtype const char* Subtype of array expected. @param[in] precision const size_t Precision of array that is expected. @param[in] length const size_t Number of elements in value. @param[in] units const char* Units of value. @returns int -1 if there is an error, 0 otherwise. */ int generic_array_set_1darray(generic_t x, const size_t index, void* value, const char *subtype, const size_t precision, const size_t length, const char* units); int generic_array_set_1darray_int8(generic_t x, const size_t index, int8_t* value, const size_t length, const char* units); int generic_array_set_1darray_int16(generic_t x, const size_t index, int16_t* value, const size_t length, const char* units); int generic_array_set_1darray_int32(generic_t x, const size_t index, int32_t* value, const size_t length, const char* units); int generic_array_set_1darray_int64(generic_t x, const size_t index, int64_t* value, const size_t length, const char* units); int generic_array_set_1darray_uint8(generic_t x, const size_t index, uint8_t* value, const size_t length, const char* units); int generic_array_set_1darray_uint16(generic_t x, const size_t index, uint16_t* value, const size_t length, const char* units); int generic_array_set_1darray_uint32(generic_t x, const size_t index, uint32_t* value, const size_t length, const char* units); int generic_array_set_1darray_uint64(generic_t x, const size_t index, uint64_t* value, const size_t length, const char* units); int generic_array_set_1darray_float(generic_t x, const size_t index, float* value, const size_t length, const char* units); int generic_array_set_1darray_double(generic_t x, const size_t index, double* value, const size_t length, const char* units); int generic_array_set_1darray_long_double(generic_t x, const size_t index, long double* value, const size_t length, const char* units); int generic_array_set_1darray_complex_float(generic_t x, const size_t index, complex_float_t* value, const size_t length, const char* units); int generic_array_set_1darray_complex_double(generic_t x, const size_t index, complex_double_t* value, const size_t length, const char* units); int generic_array_set_1darray_complex_long_double(generic_t x, const size_t index, complex_long_double_t* value, const size_t length, const char* units); int generic_array_set_1darray_bytes(generic_t x, const size_t index, char** value, const size_t length, const char* units); int generic_array_set_1darray_unicode(generic_t x, const size_t index, char** value, const size_t length, const char* units); /*! @brief Set a nd array value from an array. @param[in] x generic_t Generic object that is presumed to contain an array. @param[in] index size_t Index for value that should be set. @param[in] value void* Pointer to array data. @param[in] subtype const char* Subtype of array in value. @param[in] precision const size_t Precision of array that is in value. @param[in] ndim size_t Number of dimensions in the array. @param[in] shape size_t* Pointer to array containing the size of the array in each dimension. @returns int -1 if there is an error, 0 otherwise. */ int generic_array_set_ndarray(generic_t x, const size_t index, void* data, const char *subtype, const size_t precision, const size_t ndim, const size_t* shape, const char* units); int generic_array_set_ndarray_int8(generic_t x, const size_t index, int8_t* data, const size_t ndim, const size_t* shape, const char* units); int generic_array_set_ndarray_int16(generic_t x, const size_t index, int16_t* data, const size_t ndim, const size_t* shape, const char* units); int generic_array_set_ndarray_int32(generic_t x, const size_t index, int32_t* data, const size_t ndim, const size_t* shape, const char* units); int generic_array_set_ndarray_int64(generic_t x, const size_t index, int64_t* data, const size_t ndim, const size_t* shape, const char* units); int generic_array_set_ndarray_uint8(generic_t x, const size_t index, uint8_t* data, const size_t ndim, const size_t* shape, const char* units); int generic_array_set_ndarray_uint16(generic_t x, const size_t index, uint16_t* data, const size_t ndim, const size_t* shape, const char* units); int generic_array_set_ndarray_uint32(generic_t x, const size_t index, uint32_t* data, const size_t ndim, const size_t* shape, const char* units); int generic_array_set_ndarray_uint64(generic_t x, const size_t index, uint64_t* data, const size_t ndim, const size_t* shape, const char* units); int generic_array_set_ndarray_float(generic_t x, const size_t index, float* data, const size_t ndim, const size_t* shape, const char* units); int generic_array_set_ndarray_double(generic_t x, const size_t index, double* data, const size_t ndim, const size_t* shape, const char* units); int generic_array_set_ndarray_long_double(generic_t x, const size_t index, long double* data, const size_t ndim, const size_t* shape, const char* units); int generic_array_set_ndarray_complex_float(generic_t x, const size_t index, complex_float_t* data, const size_t ndim, const size_t* shape, const char* units); int generic_array_set_ndarray_complex_double(generic_t x, const size_t index, complex_double_t* data, const size_t ndim, const size_t* shape, const char* units); int generic_array_set_ndarray_complex_long_double(generic_t x, const size_t index, complex_long_double_t* data, const size_t ndim, const size_t* shape, const char* units); int generic_array_set_ndarray_bytes(generic_t x, const size_t index, char** data, const size_t ndim, const size_t* shape, const char* units); int generic_array_set_ndarray_unicode(generic_t x, const size_t index, char** data, const size_t ndim, const size_t* shape, const char* units); /*! @brief Set an item from a map for types that don't require additional parameters. @param[in] x generic_t Generic object that is presumed to contain a map. @param[in] key const char* Key string for value that should be set. @param[in] type const char* Type of value being set. @param[in] value void* Pointer to data that item should be set to. @returns int -1 if there is an error, 0 otherwise. */ int generic_map_set_item(generic_t x, const char* key, const char* type, void* value); int generic_map_set_bool(generic_t x, const char* key, bool value); int generic_map_set_integer(generic_t x, const char* key, int value); int generic_map_set_null(generic_t x, const char* key, void* value); int generic_map_set_number(generic_t x, const char* key, double value); int generic_map_set_string(generic_t x, const char* key, char* value); int generic_map_set_object(generic_t x, const char* key, generic_t value); int generic_map_set_map(generic_t x, const char* key, generic_t value); int generic_map_set_array(generic_t x, const char* key, generic_t value); int generic_map_set_direct(generic_t x, const char* key, char* value); int generic_map_set_ply(generic_t x, const char* key, ply_t value); int generic_map_set_obj(generic_t x, const char* key, obj_t value); int generic_map_set_python_class(generic_t x, const char* key, python_t value); int generic_map_set_python_function(generic_t x, const char* key, python_t value); int generic_map_set_schema(generic_t x, const char* key, schema_t value); int generic_map_set_any(generic_t x, const char* key, generic_t value); /*! @brief Set a scalar value in a map. @param[in] x generic_t Generic object that is presumed to contain a map. @param[in] key const char* Key string for value that should be set. @param[in] value void* Pointer to scalar data. @param[in] subtype const char* Subtype of scalar in value. @param[in] precision const int Precision of scalar in value. @param[in] units const char* Units of value. @returns int -1 if there is an error, 0 otherwise. */ int generic_map_set_scalar(generic_t x, const char* key, void* value, const char *subtype, const size_t precision, const char* units); int generic_map_set_int8(generic_t x, const char* key, int8_t value, const char* units); int generic_map_set_int16(generic_t x, const char* key, int16_t value, const char* units); int generic_map_set_int32(generic_t x, const char* key, int32_t value, const char* units); int generic_map_set_int64(generic_t x, const char* key, int64_t value, const char* units); int generic_map_set_uint8(generic_t x, const char* key, uint8_t value, const char* units); int generic_map_set_uint16(generic_t x, const char* key, uint16_t value, const char* units); int generic_map_set_uint32(generic_t x, const char* key, uint32_t value, const char* units); int generic_map_set_uint64(generic_t x, const char* key, uint64_t value, const char* units); int generic_map_set_float(generic_t x, const char* key, float value, const char* units); int generic_map_set_double(generic_t x, const char* key, double value, const char* units); int generic_map_set_long_double(generic_t x, const char* key, long double value, const char* units); int generic_map_set_complex_float(generic_t x, const char* key, complex_float_t value, const char* units); int generic_map_set_complex_double(generic_t x, const char* key, complex_double_t value, const char* units); int generic_map_set_complex_long_double(generic_t x, const char* key, complex_long_double_t value, const char* units); int generic_map_set_bytes(generic_t x, const char* key, char* value, const char* units); int generic_map_set_unicode(generic_t x, const char* key, char* value, const char* units); /*! @brief Set a 1d array value in a map. @param[in] x generic_t Generic object that is presumed to contain a map. @param[in] key const char* Key string for value that should be set. @param[in] value void* Pointer to array data. @param[in] subtype const char* Subtype of array expected. @param[in] precision const size_t Precision of array that is expected. @param[in] length const size_t Number of elements in value. @param[in] units const char* Units of value. @returns int -1 if there is an error, 0 otherwise. */ int generic_map_set_1darray(generic_t x, const char* key, void* value, const char *subtype, const size_t precision, const size_t length, const char* units); int generic_map_set_1darray_int8(generic_t x, const char* key, int8_t* value, const size_t length, const char* units); int generic_map_set_1darray_int16(generic_t x, const char* key, int16_t* value, const size_t length, const char* units); int generic_map_set_1darray_int32(generic_t x, const char* key, int32_t* value, const size_t length, const char* units); int generic_map_set_1darray_int64(generic_t x, const char* key, int64_t* value, const size_t length, const char* units); int generic_map_set_1darray_uint8(generic_t x, const char* key, uint8_t* value, const size_t length, const char* units); int generic_map_set_1darray_uint16(generic_t x, const char* key, uint16_t* value, const size_t length, const char* units); int generic_map_set_1darray_uint32(generic_t x, const char* key, uint32_t* value, const size_t length, const char* units); int generic_map_set_1darray_uint64(generic_t x, const char* key, uint64_t* value, const size_t length, const char* units); int generic_map_set_1darray_float(generic_t x, const char* key, float* value, const size_t length, const char* units); int generic_map_set_1darray_double(generic_t x, const char* key, double* value, const size_t length, const char* units); int generic_map_set_1darray_long_double(generic_t x, const char* key, long double* value, const size_t length, const char* units); int generic_map_set_1darray_complex_float(generic_t x, const char* key, complex_float_t* value, const size_t length, const char* units); int generic_map_set_1darray_complex_double(generic_t x, const char* key, complex_double_t* value, const size_t length, const char* units); int generic_map_set_1darray_complex_long_double(generic_t x, const char* key, complex_long_double_t* value, const size_t length, const char* units); int generic_map_set_1darray_bytes(generic_t x, const char* key, char** value, const size_t length, const char* units); int generic_map_set_1darray_unicode(generic_t x, const char* key, char** value, const size_t length, const char* units); /*! @brief Set a nd array value in a map. @param[in] x generic_t Generic object that is presumed to contain a map. @param[in] key const char* Key string for value that should be set. @param[in] value void* Pointer to array data. @param[in] subtype const char* Subtype of array in value. @param[in] precision const size_t Precision of array that is in value. @param[in] ndim size_t Number of dimensions in the array. @param[in] shape size_t* Pointer to array containing the size of the array in each dimension. @returns int -1 if there is an error, 0 otherwise. */ int generic_map_set_ndarray(generic_t x, const char* key, void* data, const char *subtype, const size_t precision, const size_t ndim, const size_t* shape, const char* units); int generic_map_set_ndarray_int8(generic_t x, const char* key, int8_t* data, const size_t ndim, const size_t* shape, const char* units); int generic_map_set_ndarray_int16(generic_t x, const char* key, int16_t* data, const size_t ndim, const size_t* shape, const char* units); int generic_map_set_ndarray_int32(generic_t x, const char* key, int32_t* data, const size_t ndim, const size_t* shape, const char* units); int generic_map_set_ndarray_int64(generic_t x, const char* key, int64_t* data, const size_t ndim, const size_t* shape, const char* units); int generic_map_set_ndarray_uint8(generic_t x, const char* key, uint8_t* data, const size_t ndim, const size_t* shape, const char* units); int generic_map_set_ndarray_uint16(generic_t x, const char* key, uint16_t* data, const size_t ndim, const size_t* shape, const char* units); int generic_map_set_ndarray_uint32(generic_t x, const char* key, uint32_t* data, const size_t ndim, const size_t* shape, const char* units); int generic_map_set_ndarray_uint64(generic_t x, const char* key, uint64_t* data, const size_t ndim, const size_t* shape, const char* units); int generic_map_set_ndarray_float(generic_t x, const char* key, float* data, const size_t ndim, const size_t* shape, const char* units); int generic_map_set_ndarray_double(generic_t x, const char* key, double* data, const size_t ndim, const size_t* shape, const char* units); int generic_map_set_ndarray_long_double(generic_t x, const char* key, long double* data, const size_t ndim, const size_t* shape, const char* units); int generic_map_set_ndarray_complex_float(generic_t x, const char* key, complex_float_t* data, const size_t ndim, const size_t* shape, const char* units); int generic_map_set_ndarray_complex_double(generic_t x, const char* key, complex_double_t* data, const size_t ndim, const size_t* shape, const char* units); int generic_map_set_ndarray_complex_long_double(generic_t x, const char* key, complex_long_double_t* data, const size_t ndim, const size_t* shape, const char* units); int generic_map_set_ndarray_bytes(generic_t x, const char* key, char** data, const size_t ndim, const size_t* shape, const char* units); int generic_map_set_ndarray_unicode(generic_t x, const char* key, char** data, const size_t ndim, const size_t* shape, const char* units); /*! >>>>>>> topic/timesync @brief Destroy a structure containing a Python object. @param[in] x python_t* Pointer to Python object structure that should be freed. */ void destroy_python(python_t *x); /*! @brief Copy a Python object structure (NOTE: this dosn't copy the underlying Python object but does increment the reference count). @param[in] x python_t Structure containing Python object to copy. @returns python_t Copy of x. */ python_t copy_python(python_t x); /*! @brief Display a Python object structure. @param[in] x python_t Structure containing Python object to display. */ void display_python(python_t x); /*! @brief Destroy a structure containing a Python function object. @param[in] x python_function_t* Pointer to Python function structure that should be freed. */ void destroy_python_function(python_function_t *x); /*! @brief Skip datatype arguments. @param[in] dtype dtype_t* Type structure to skip arguments for. @param[in, out] nargs Pointer to number of arguments in ap. @param[in, out] ap va_list_t Variable argument list. @returns int 0 if there are no errors, 1 otherwise. */ int skip_va_elements(const dtype_t* dtype, size_t *nargs, va_list_t *ap); /*! @brief Determine if a datatype is empty. @param[in] dtype dtype_t* Type structure to test. @returns int 1 if dtype is empty, 0 otherwise. */ int is_empty_dtype(const dtype_t* dtype); /*! @brief Get the name of the type from the class. @param[in] type_class dtype_t* Type structure/class. @returns const char* Type name. */ const char* dtype_name(const dtype_t* type_class); /*! @brief Get the subtype of the type. @param[in] type_class dtype_t* Type structure/class. @returns const char* The subtype of the class, "" if there is an error. */ const char* dtype_subtype(const dtype_t* type_class); /*! @brief Get the precision of the type. @param[in] type_class dtype_t* Type structure/class. @returns const size_t The precision of the class, 0 if there is an error. */ const size_t dtype_precision(const dtype_t* type_class); /*! @brief Initialize a datatype structure including setting the type string. @param[in] dtype dtype_t* Type structure/class. @returns dtype_t* Initialized type structure/class. */ dtype_t* complete_dtype(dtype_t *dtype, const bool use_generic); /*! @brief Construct and empty type object. @param[in] use_generic bool If true, serialized/deserialized objects will be expected to be YggGeneric classes. @returns dtype_t* Type structure/class. */ dtype_t* create_dtype_empty(const bool use_generic); /*! @brief Create a datatype based on a JSON document. @param type_doc void* Pointer to const rapidjson::Value type doc. @param[in] use_generic bool If true, serialized/deserialized objects will be expected to be YggGeneric classes. @returns dtype_t* Type structure/class. */ dtype_t* create_dtype_doc(void* type_doc, const bool use_generic); /*! @brief Create a datatype based on a Python dictionary. @param[in] pyobj PyObject* Python dictionary. @param[in] use_generic bool If true, serialized/deserialized objects will be expected to be YggGeneric classes. @returns dtype_t* Type structure/class. */ dtype_t* create_dtype_python(PyObject* pyobj, const bool use_generic); /*! @brief Construct a Direct type object. @param[in] use_generic bool If true, serialized/deserialized objects will be expected to be YggGeneric classes. @returns dtype_t* Type structure/class. */ dtype_t* create_dtype_direct(const bool use_generic); /*! @brief Construct a type object for one of the default JSON types. @param[in] type char* Name of the type. @param[in] use_generic bool If true, serialized/deserialized objects will be expected to be YggGeneric classes. @returns dtype_t* Type structure/class. */ dtype_t* create_dtype_default(const char* type, const bool use_generic); /*! @brief Construct a Scalar type object. @param[in] subtype char* Name of the scalar subtype (e.g. int, uint, float, bytes). @param[in] precision size_t Precision of the scalar in bits. @param[in] units char* Units for scalar. (e.g. "cm", "g", "" for unitless) @param[in] use_generic bool If true, serialized/deserialized objects will be expected to be YggGeneric classes. @returns dtype_t* Type structure/class. */ dtype_t* create_dtype_scalar(const char* subtype, const size_t precision, const char* units, const bool use_generic); /*! @brief Construct a 1D array type object. @param[in] subtype char* Name of the array subtype (e.g. int, uint, float, bytes). @param[in] precision size_t Precision of the array in bits. @param[in] length size_t Number of elements in the array. @param[in] units char* Units for array elements. (e.g. "cm", "g", "" for unitless) @param[in] use_generic bool If true, serialized/deserialized objects will be expected to be YggGeneric classes. @returns dtype_t* Type structure/class. */ dtype_t* create_dtype_1darray(const char* subtype, const size_t precision, const size_t length, const char* units, const bool use_generic); /*! @brief Construct a ND array type object. @param[in] subtype char* Name of the array subtype (e.g. int, uint, float, bytes). @param[in] precision size_t Precision of the array in bits. @param[in] ndim size_t Number of dimensions in the array (and therefore also the number of elements in shape). @param[in] shape size_t* Pointer to array where each element is the size of the array in that dimension. @param[in] units char* Units for array elements. (e.g. "cm", "g", "" for unitless) @param[in] use_generic bool If true, serialized/deserialized objects will be expected to be YggGeneric classes. @returns dtype_t* Type structure/class. */ dtype_t* create_dtype_ndarray(const char* subtype, const size_t precision, const size_t ndim, const size_t* shape, const char* units, const bool use_generic); /*! @brief Construct a ND array type object. @param[in] subtype char* Name of the array subtype (e.g. int, uint, float, bytes). @param[in] precision size_t Precision of the array in bits. @param[in] ndim size_t Number of dimensions in the array (and therefore also the number of elements in shape). @param[in] shape[] size_t Array where each element is the size of the array in that dimension. @param[in] units char* Units for array elements. (e.g. "cm", "g", "" for unitless) @param[in] use_generic bool If true, serialized/deserialized objects will be expected to be YggGeneric classes. @returns dtype_t* Type structure/class. */ dtype_t* create_dtype_ndarray_arr(const char* subtype, const size_t precision, const size_t ndim, const int64_t shape[], const char* units, const bool use_generic); /*! @brief Construct a JSON array type object. @param[in] nitems size_t Number of types in items. @param[in] items dtype_t** Pointer to array of types describing the array elements. @param[in] use_generic bool If true, serialized/deserialized objects will be expected to be YggGeneric classes. @returns dtype_t* Type structure/class. */ dtype_t* create_dtype_json_array(const size_t nitems, dtype_t** items, const bool use_generic); /*! @brief Construct a JSON object type object. @param[in] nitems size_t Number of keys/types in keys and values. @param[in] keys char** Pointer to array of keys for each type. @param[in] values dtype_t** Pointer to array of types describing the values for each key. @param[in] use_generic bool If true, serialized/deserialized objects will be expected to be YggGeneric classes. @returns dtype_t* Type structure/class. */ dtype_t* create_dtype_json_object(const size_t nitems, char** keys, dtype_t** values, const bool use_generic); /*! @brief Construct a Ply type object. @param[in] use_generic bool If true, serialized/deserialized objects will be expected to be YggGeneric classes. @returns dtype_t* Type structure/class. */ dtype_t* create_dtype_ply(const bool use_generic); /*! @brief Construct a Obj type object. @param[in] use_generic bool If true, serialized/deserialized objects will be expected to be YggGeneric classes. @returns dtype_t* Type structure/class. */ dtype_t* create_dtype_obj(const bool use_generic); /*! @brief Construct an AsciiTable type object. @param[in] use_generic bool If true, serialized/deserialized objects will be expected to be YggGeneric classes. @returns dtype_t* Type structure/class. */ dtype_t* create_dtype_ascii_table(const char *format_str, const int as_array, const bool use_generic); /*! @brief Construct a type object based on the provided format string. @param[in] format_str const char* C-style format string that will be used to determine the type of elements in arrays that will be serialized/deserialized using the resulting type. @param[in] as_array int If 1, the types will be arrays. Otherwise they will be scalars. @param[in] use_generic bool If true, serialized/deserialized objects will be expected to be YggGeneric classes. @returns dtype_t* Type structure/class. */ dtype_t* create_dtype_format(const char *format_str, const int as_array, const bool use_generic); /*! @brief Construct a type object for Python objects. @param[in] type char* Type string. @param[in] use_generic bool If true, serialized/deserialized objects will be expected to be YggGeneric classes. @returns dtype_t* Type structure/class. */ dtype_t* create_dtype_pyobj(const char* type, const bool use_generic); /*! @brief Construct a type object for Python object instances. @param[in] class_name char* Python class name. @param[in] args_dtype dtype_t* Datatype describing the arguments creating the instance. @param[in] kwargs_dtype dtype_t* Datatype describing the keyword arguments creating the instance. @param[in] use_generic bool If true, serialized/deserialized objects will be expected to be YggGeneric classes. @returns dtype_t* Type structure/class. */ dtype_t* create_dtype_pyinst(const char* class_name, const dtype_t* args_dtype, const dtype_t* kwargs_dtype, const bool use_generic); /*! @brief Construct a type object for a schema. @param[in] use_generic bool If true, serialized/deserialized objects will be expected to be YggGeneric classes. @returns dtype_t* Type structure/class. */ dtype_t* create_dtype_schema(const bool use_generic); /*! @brief Construct a type object for receiving any type. @param[in] use_generic bool If true, serialized/deserialized objects will be expected to be YggGeneric classes. @returns dtype_t* Type structure/class. */ dtype_t* create_dtype_any(const bool use_generic); /*! @brief Wrapper for freeing MetaschemaType class wrapper struct. @param[in] dtype dtype_t** Wrapper struct for C++ Metaschema type class. @returns: int 0 if free was successfull, -1 if there was an error. */ int destroy_dtype(dtype_t** dtype); /*! @brief Initialize a header struct. @param[in] size size_t Size of message to be sent. @param[in] address char* Address that should be used for remainder of message following this header if it is a multipart message. @param[in] id char* Message ID. @returns comm_head_t Structure with provided information, char arrays correctly initialized to empty strings if NULLs provided. */ static inline comm_head_t init_header(const size_t size, const char *address, const char *id) { comm_head_t out; // Parameters set during read out.bodysiz = 0; out.bodybeg = 0; out.flags = HEAD_FLAG_VALID; out.nargs_populated = 0; // Parameters sent in header out.size = size; if (address == NULL) out.address[0] = '\0'; else strncpy(out.address, address, COMMBUFFSIZ); if (id == NULL) out.id[0] = '\0'; else strncpy(out.id, id, COMMBUFFSIZ); out.response_address[0] = '\0'; out.request_id[0] = '\0'; out.zmq_reply[0] = '\0'; out.zmq_reply_worker[0] = '\0'; out.model[0] = '\0'; // Parameters that will be removed out.serializer_type = -1; out.format_str[0] = '\0'; // Parameters used for type out.dtype = NULL; return out; }; /*! @brief Destroy a header object. @param[in] x comm_head_t* Pointer to the header that should be destroyed. @returns int 0 if successful, -1 otherwise. */ static inline int destroy_header(comm_head_t* x) { int ret = 0; if (x->dtype != NULL) { ret = destroy_dtype(&(x->dtype)); } return ret; }; /*! @brief Split header and body of message. @param[in] buf const char* Message that should be split. @param[in] buf_siz size_t Size of buf. @param[out] head const char** pointer to buffer where the extracted header should be stored. @param[out] headsiz size_t reference to memory where size of extracted header should be stored. @returns: int 0 if split is successful, -1 if there was an error. */ static inline int split_head_body(const char *buf, const size_t buf_siz, char **head, size_t *headsiz) { // Split buffer into head and body int ret; size_t sind, eind, sind_head, eind_head; sind = 0; eind = 0; #ifdef _WIN32 // Windows regex of newline is buggy UNUSED(buf_siz); size_t sind1, eind1, sind2, eind2; char re_head_tag[COMMBUFFSIZ]; sprintf(re_head_tag, "(%s)", MSG_HEAD_SEP); ret = find_match(re_head_tag, buf, &sind1, &eind1); if (ret > 0) { sind = sind1; ret = find_match(re_head_tag, buf + eind1, &sind2, &eind2); if (ret > 0) eind = eind1 + eind2; } #else // Extract just header char re_head[COMMBUFFSIZ] = MSG_HEAD_SEP; strcat(re_head, "(.*)"); strcat(re_head, MSG_HEAD_SEP); // strcat(re_head, ".*"); ret = find_match(re_head, buf, &sind, &eind); #endif if (ret < 0) { ygglog_error("split_head_body: Could not find header in '%.1000s'", buf); return -1; } else if (ret == 0) { ygglog_debug("split_head_body: No header in '%.1000s...'", buf); sind_head = 0; eind_head = 0; } else { sind_head = sind + strlen(MSG_HEAD_SEP); eind_head = eind - strlen(MSG_HEAD_SEP); } headsiz[0] = (eind_head - sind_head); char* temp = (char*)realloc(*head, *headsiz + 1); if (temp == NULL) { ygglog_error("split_head_body: Failed to reallocate header."); return -1; } *head = temp; memcpy(*head, buf + sind_head, *headsiz); (*head)[*headsiz] = '\0'; return 0; }; /*! @brief Format header to a string. @param[in] head comm_head_t* Pointer to header to be formatted. @param[out] buf char ** Pointer to buffer where header should be written. @param[in] buf_siz size_t Size of buf. @param[in] max_header_size size_t Maximum size that header can occupy before the type should be moved to the data portion of the message. @param[in] int no_type If 1, type information will not be added to the header. If 0, it will be. @returns: int Size of header written. */ int format_comm_header(comm_head_t *head, char **buf, size_t buf_siz, const size_t max_header_size, const int no_type); /*! @brief Extract type from data and updated header. @param[in] buf char** Pointer to data containing type. @param[in] buf_siz size_t Size of buf. @param[in,out] head comm_head_t* Pointer to header structure that should be updated. @returns: int -1 if there is an error, size of adjusted data that dosn't include type otherwise. */ int parse_type_in_data(char **buf, const size_t buf_siz, comm_head_t* head); /*! @brief Extract header information from a string. @param[in] buf const char* Message that header should be extracted from. @param[in] buf_siz size_t Size of buf. @returns: comm_head_t Header information structure. */ comm_head_t parse_comm_header(const char *buf, const size_t buf_siz); /*! @brief Get the ascii table data structure. @param[in] dtype dtype_t* Wrapper struct for C++ Metaschema type class. @returns: void* Cast pointer to ascii table. */ void* dtype_ascii_table(const dtype_t* dtype); /*! @brief Get a copy of a type structure. @param[in] dtype dtype_t* Wrapper struct for C++ Metaschema type class. @returns: dtype_t* Type class. */ dtype_t* copy_dtype(const dtype_t* dtype); /*! @brief Wrapper for updating a type object with information from another. @param[in] dtype1 dtype_t* Wrapper struct for C++ Metaschema type class that should be updated. @param[in] dtype2 dtype_t* Wrapper struct for C++ Metaschema type class that should be updated from. @returns: int 0 if successfull, -1 if there was an error. */ int update_dtype(dtype_t* dtype1, dtype_t* dtype2); /*! @brief Wrapper for updatining a type object with information from the provided variable arguments if a generic structure is present. @param[in] dtype1 dtype_t* Wrapper struct for C++ Metaschema type class that should be updated. @param[in] nargs size_t Number of arguments in ap. @param[in] ap va_list_t Variable argument list. @returns: int 0 if successfull, -1 if there was an error. */ int update_dtype_from_generic_ap(dtype_t* dtype1, size_t nargs, va_list_t ap); /*! @brief Wrapper for updating the precision of a bytes or unicode scalar type. @param[in] dtype dtype_t* Wrapper struct for C++ Metaschema type class. @param[in] new_precision size_t New precision. @returns: int 0 if free was successfull, -1 if there was an error. */ int update_precision_dtype(const dtype_t* dtype, const size_t new_precision); /*! @brief Wrapper for deserializing from a data type. @param[in] dtype dtype_t* Wrapper struct for C++ Metaschema type class. @param[in] buf character pointer to serialized message. @param[in] buf_siz size_t Size of buf. @param[in] allow_realloc int If 1, variables being filled are assumed to be pointers to pointers for heap memory. If 0, variables are assumed to be pointers to stack memory. If allow_realloc is set to 1, but stack variables are passed, a segfault can occur. @param[in, out] nargs int Number of arguments remaining in argument list. @param[in] ap va_list Arguments to be parsed from message. returns: int The number of populated arguments. -1 indicates an error. */ int deserialize_dtype(const dtype_t *dtype, const char *buf, const size_t buf_siz, const int allow_realloc, size_t *nargs, va_list_t ap); /*! @brief Wrapper for serializing from a data type. @param[in] dtype dtype_t* Wrapper struct for C++ Metaschema type class. @param[in] buf character pointer to pointer to memory where serialized message should be stored. @param[in] buf_siz size_t Size of memory allocated to buf. @param[in] allow_realloc int If 1, buf will be realloced if it is not big enough to hold the serialized emssage. If 0, an error will be returned. @param[in, out] nargs int Number of arguments remaining in argument list. @param[in] ap va_list Arguments to be formatted. returns: int The length of the serialized message or -1 if there is an error. */ int serialize_dtype(const dtype_t *dtype, char **buf, size_t *buf_siz, const int allow_realloc, size_t *nargs, va_list_t ap); /*! @brief Wrapper for displaying a data type. @param[in] dtype dtype_t* Wrapper struct for C++ Metaschema type class. @param[in] indent char* Indentation to add to display output. */ void display_dtype(const dtype_t *dtype, const char* indent); /*! @brief Wrapper for determining how many arguments a data type expects. @param[in] dtype dtype_t* Wrapper struct for C++ Metaschema type class. */ size_t nargs_exp_dtype(const dtype_t *dtype); #define free_generic destroy_generic #define init_json_object init_generic #define init_json_array init_generic #define init_schema init_generic #define free_json_object free_generic #define free_json_array free_generic #define free_schema free_generic #define copy_json_object copy_generic #define copy_json_array copy_generic #define copy_schema copy_generic #define display_json_object display_generic #define display_json_array display_generic #define display_schema display_generic #ifdef __cplusplus /* If this is a C++ compiler, end C linkage */ } #endif #endif /*DATATYPES_H_*/
minibatch.c
#include <stdlib.h> #include <stdio.h> #include <math.h> #include <time.h> #include <omp.h> #include "minibatch.h" #include "generate_data.h" #include "distance.h" #define EARLY_TERM_WINDOW (10) void save_double_matrix(double *data, char *filename, int N, int D) { FILE *fd = fopen(filename, "w+"); for(int i=0; i<N; i++) { for(int j=0; j<D; j++) { fprintf(fd, "%f\t", data[i*D + j]); } fprintf(fd, "\n"); } fclose(fd); } void save_int_matrix(int *data, char *filename, int N, int D) { FILE *fd = fopen(filename, "w+"); for(int i=0; i<N; i++) { for(int j=0; j<D; j++) { fprintf(fd, "%d\t", data[i*D + j]); } fprintf(fd, "\n"); } fclose(fd); } /* * Moves the given centroid closer to the given vector with a learning rate * proportional to the number of vectors already in the centroid */ void gradient_step(double *vector, double *centroid, int count, int D) { double eta = 1.0 / count; double eta_compliment = 1.0 - eta; for(int i=0; i<D; i++) { centroid[i] = eta_compliment * centroid[i] + eta * vector[i]; } } /* * Calculate the variance of the model given the current centroids */ double model_variance(double *data, double *centroids, int k, int N, int D) { double variance_distance = 0.0; for(int i=0; i<N; i++) { int c = closest_centroid(data + i*D, centroids, k, D); variance_distance += distance_metric(data + i*D, centroids + c*D, D); } double variance = variance_distance / (double)(N - k); if (variance == 0) { variance = nextafter(0, 1); } return variance; } /* * Calculates the bayesian information criterion for clustered data which * represents how good a model the centroids represents. */ double bayesian_information_criterion(double *data, double *centroids, int k, int N, int D) { /* Calculate the variance of the model and the centroid counts */ int *centroid_count = (int*) malloc(k * sizeof(int)); for(int c=0; c<k; c++) { centroid_count[c] = 0; } double variance_distance = 0.0; for(int i=0; i<N; i++) { int c = closest_centroid(data + i*D, centroids, k, D); centroid_count[c] += 1; variance_distance += distance_metric(data + i*D, centroids + c*D, D); } double variance = variance_distance / (double)(N - k); if (variance == 0) { variance = nextafter(0, 1); } /* Calculate the log likelihood */ double log_likelihood = 0.0; double t1, t2, t3, t4; double ccount; for(int c=0; c<k; c++) { ccount = (double) centroid_count[c]; if (ccount == 0) { ccount = nextafter(0, 1); } t1 = ccount * log(ccount); t2 = ccount * log(N); t3 = (ccount * D) / 2.0 + log(2.0 * PI) * variance; t4 = (ccount - 1.0) / 2.0; log_likelihood += t1 - t2 - t3 - t4; } /* calculate the BIC with the number of free parameters = k * (D + 1) */ double bic = log_likelihood - k * (D + 1) * 2.0 * log(N); free(centroid_count); return bic; } /* * Runs multiple kmeanspp (as given by n_runs) and returns the centroids * that have the best variance */ void kmeanspp_multi(double *data, double *centroids, int n_samples, int n_runs, int n_jobs, int k, int N, int D) { double *all_centroids; double *all_variances = (double*) malloc(n_jobs * sizeof(double)); if (n_jobs > 1) { all_centroids = (double*) malloc(k * D * n_jobs * sizeof(double)); } else { all_centroids = centroids; } #pragma omp parallel shared(all_centroids, all_variances, data, distance_metric) num_threads(n_jobs) { int id = omp_get_thread_num(); double minimum_variance, cur_variance; double *current_centroid = (double*) malloc(k * D * sizeof(double)); int local_iter = 0; #pragma omp for for(int i=0; i<n_runs; i++) { kmeanspp(data, current_centroid, n_samples, k, N, D); cur_variance = model_variance(data, current_centroid, k, N, D); if (local_iter == 0 || cur_variance < minimum_variance) { minimum_variance = cur_variance; all_variances[id] = cur_variance; for(int j=0; j<k*D; j++) { all_centroids[id * D * k + j] = current_centroid[j]; } } local_iter++; } free(current_centroid); _LOG("Thread %d is done\n", id); } if (n_jobs > 1) { double min_variance; int min_variance_index; _LOG("Finding min variance\n"); for(int i=0; i<n_jobs; i++) { _LOG("variance[%d] = %e\n", i, all_variances[i]); if (i == 0 || all_variances[i] < min_variance) { min_variance = all_variances[i]; min_variance_index = i; } } _LOG("Min variance = %f\n", min_variance); for(int i=0; i<k*D; i++) { centroids[i] = all_centroids[min_variance_index*k*D + i]; } } free(all_centroids); free(all_variances); } /* * Runs multiple minibatches (as given by n_runs) and returns the centroids * that have the best variance */ void minibatch_multi(double *data, double *centroids, int n_samples, int max_iter, int n_runs, int n_jobs, double bic_ratio_termination, double reassignment_ratio, int k, int N, int D) { double *all_centroids; double *all_variances = (double*) malloc(n_jobs * sizeof(double)); if (n_jobs > 1) { all_centroids = (double*) malloc(k * D * n_jobs * sizeof(double)); } else { all_centroids = centroids; } #pragma omp parallel shared(all_centroids, all_variances, data, distance_metric) num_threads(n_jobs) { int id = omp_get_thread_num(); double minimum_variance, cur_variance; double *current_centroid = (double*) malloc(k * D * sizeof(double)); int local_iter = 0; #pragma omp for for(int i=0; i<n_runs; i++) { for(int j=0; j<k*D; j++) { current_centroid[j] = centroids[j]; } minibatch(data, current_centroid, n_samples, max_iter, bic_ratio_termination, reassignment_ratio, k, N, D); cur_variance = model_variance(data, current_centroid, k, N, D); if (local_iter == 0 || cur_variance < minimum_variance) { minimum_variance = cur_variance; all_variances[id] = cur_variance; for(int j=0; j<k*D; j++) { all_centroids[id * D * k + j] = current_centroid[j]; } } local_iter++; } free(current_centroid); _LOG("Thread %d is done\n", id); } if (n_jobs > 1) { double min_variance; int min_variance_index; _LOG("Finding min variance\n"); for(int i=0; i<n_jobs; i++) { _LOG("variance[%d] = %f\n", i, all_variances[i]); if (i == 0 || all_variances[i] < min_variance) { min_variance = all_variances[i]; min_variance_index = i; } } _LOG("Min variance = %f\n", min_variance); for(int i=0; i<k*D; i++) { centroids[i] = all_centroids[min_variance_index*k*D + i]; } } free(all_centroids); free(all_variances); } /* * Does max_iter iterations of minibatch on the given data. The centroids * should already be initialized and each batch will consist of n_samples * samples from the data. */ void minibatch(double *data, double *centroids, int n_samples, int max_iter, double bic_ratio_termination, double reassignment_ratio, int k, int N, int D) { // assert(k < n_samples < N) // assert(data.shape == (N, D) // assert(centoids.shape == (k, D) _LOG("Initializing\n"); int *sample_indicies = (int*) malloc(n_samples * sizeof(int)); int *centroid_counts = (int*) malloc(k * sizeof(int)); int *cluster_cache = (int*) malloc(n_samples * sizeof(int)); int *last_centroid_counts = (int*) malloc(k * sizeof(int)); int *reassign_centroid_indicies = (int*) malloc(k * sizeof(int)); int count_diff = 0, reassign_num = 0, max_count_diff = 0; double current_bic, bic_sum = 0.0; double *historical_bic; int historical_bic_idx = 0; if (bic_ratio_termination > 0.0) { historical_bic = (double*) malloc(EARLY_TERM_WINDOW * sizeof(double)); } for (int i=0; i<k; i++) { centroid_counts[i] = 0; last_centroid_counts[i] = 0; } _LOG("Starting minibatch\n"); for(int iter=0; iter<max_iter; iter++) { _LOG("Iteration %d\n", iter); _LOG("\tGenerating samples\n"); generate_random_indicies(N, n_samples, sample_indicies); minibatch_iteration(data, centroids, sample_indicies, centroid_counts, cluster_cache, n_samples, k, N, D); reassign_num = 0; max_count_diff = 0; for(int i=0; i<k; i++) { count_diff = centroid_counts[i] - last_centroid_counts[i]; if (count_diff == 0) { reassign_centroid_indicies[reassign_num] = i; reassign_num += 1; } if (count_diff > max_count_diff) { max_count_diff = count_diff; } } for(int i=0; i<k; i++) { count_diff = centroid_counts[i] - last_centroid_counts[i]; if (count_diff > 0 && count_diff < max_count_diff * reassignment_ratio) { reassign_centroid_indicies[reassign_num] = i; reassign_num += 1; } last_centroid_counts[i] = centroid_counts[i]; } if (reassign_num > 0) { _LOG("Reassigning %d centroids\n", reassign_num); reassign_centroids(data, centroids, reassign_centroid_indicies, n_samples, reassign_num, k, N, D); } if (bic_ratio_termination > 0.0) { _LOG("\tChecking for early termination condition\n"); current_bic = bayesian_information_criterion(data, centroids, k, N, D); if (iter > EARLY_TERM_WINDOW) { _LOG("Current bic ratio: %f\n", fabs(1.0 - current_bic * EARLY_TERM_WINDOW / bic_sum)); if (fabs(1.0 - current_bic * EARLY_TERM_WINDOW / bic_sum) < bic_ratio_termination) { _LOG("Finishing early at iteration %d. ratio = %f, threshold = %f\n", iter, fabs(1.0 - current_bic * EARLY_TERM_WINDOW / bic_sum), bic_ratio_termination ); break; } } bic_sum += current_bic; bic_sum -= historical_bic[historical_bic_idx]; historical_bic[historical_bic_idx] = current_bic; historical_bic_idx = (historical_bic_idx + 1) % EARLY_TERM_WINDOW; } #ifdef DEBUG_OUTPUT char filename[128]; sprintf(filename, "data/centroids-%02d.dat", iter); save_double_matrix(centroids, filename, k, D); sprintf(filename, "data/samples-%02d.dat", iter); save_int_matrix(sample_indicies, filename, n_samples, 1); _LOG("\tBIC of current model: %f\n", bayesian_information_criterion(data, centroids, k, N, D)); #endif } _LOG("Cleaning up\n"); free(centroid_counts); free(sample_indicies); free(cluster_cache); if (bic_ratio_termination > 0.0) { free(historical_bic); } } /* * Does a single iteration of minibatch on the given data. * Parameters: * data: the data to cluster centroids: location of the centroids * sample_indicies: list of indexes into data that should be used for the * clustering * centroid_counts: a count of the number of datapoints found * in each centroid * cluster_cache: a cache of which cluster a sample belongs to. */ void minibatch_iteration(double *data, double *centroids, int *sample_indicies, int *centroid_counts, int *cluster_cache, int n_samples, int k, int N, int D) { // assert(k < n_samples < N) // assert(data.shape == (N, D) // assert(centoids.shape == (k, D) // assert(sample_indicies.shape == (n_samples,) // assert(centroid_counts.shape == (k, ) // assert(cluster_cache.shape == (n_samples, ) int idx, cur_cluster; _LOG("\tGenerating cache\n"); for(int i=0; i<n_samples; i++) { idx = sample_indicies[i]; cluster_cache[i] = closest_centroid(data + idx * D, centroids, k, D); } _LOG("\tUpdating centroids\n"); for(int i=0; i<n_samples; i++) { idx = sample_indicies[i]; cur_cluster = cluster_cache[i]; centroid_counts[cur_cluster] += 1; gradient_step(data + idx * D, centroids + cur_cluster * D, centroid_counts[cur_cluster], D); } } void reassign_centroids(double *data, double *centroids, int *reassign_clusters, int n_samples, int K, int k, int N, int D) { unsigned int seed = (int) clock() * (omp_get_thread_num() + 1); srand(seed); double distance, total_distance2; double *distances2 = (double*) malloc(n_samples * sizeof(double)); int *sample_indicies = (int*) malloc(n_samples * sizeof(int)); generate_random_indicies(N, n_samples, sample_indicies); for(int i=0; i<n_samples; i++) { int idx = sample_indicies[i]; distance = distance_to_closest_centroid(data + D*idx, centroids, k, D); distances2[i] = distance * distance; total_distance2 += distances2[i]; } for(int c=0; c<K; c++) { total_distance2 = 0.0; int index; double d = (rand() / ((double)RAND_MAX+1)) * total_distance2; for(index = 0; index < n_samples && d >= 0; index++) { d -= distances2[index]; } if (index) index--; int data_index = sample_indicies[index]; int centroid_idx = reassign_clusters[c]; for(int i=0; i<D; i++) { centroids[centroid_idx*D + i] = data[data_index*D + i]; } total_distance2 -= distances2[index]; distances2[index] = 0; } free(distances2); free(sample_indicies); } /* * Initialize centroids using the k-means++ algorithm over the given data. */ void kmeanspp(double *data, double *centroids, int n_samples, int k, int N, int D) { /* The first cluster is centered from a randomly chosen point in the data */ unsigned int seed = (int) clock() * (omp_get_thread_num() + 1); srand(seed); int index = (int) (rand() / ((double)RAND_MAX+1) * N); for(int i=0; i<D; i++) { centroids[i] = data[index*D + i]; } _LOG("Fitted clusters: 1 / %d\n", k); /* * Now we pick random data points to use for centroids using a weighted * probability propotional to the datapoints squared distance to the * closest centroid */ double distance, total_distance2; double *distances2 = (double*) malloc(n_samples * sizeof(double)); int *sample_indicies = (int*) malloc(n_samples * sizeof(int)); for(int c=1; c<k; c++) { total_distance2 = 0.0; generate_random_indicies(N, n_samples, sample_indicies); for(int i=0; i<n_samples; i++) { int idx = sample_indicies[i]; distance = distance_to_closest_centroid(data + D*idx, centroids, c, D); distances2[i] = distance * distance; total_distance2 += distances2[i]; } int index; double d = (rand() / ((double)RAND_MAX+1)) * total_distance2; for(index = 0; index < N && d >= 0; index++) { d -= distances2[index]; } if(index) index--; int data_index = sample_indicies[index]; for(int i=0; i<D; i++) { centroids[c*D + i] = data[data_index*D + i]; } _LOG("Fitted clusters: %d / %d\n", c, k); } free(distances2); free(sample_indicies); } int main(void) { int N = 1000; int D = 2; int k = 256; int n_samples = k*5; int max_iter = 1000; printf("Allocating test data\n"); double *data = (double*) malloc(N * D * sizeof(double)); double *centroids = (double*) malloc(k * D * sizeof(double)); printf("Creating synthetic data\n"); gaussian_data(data, 20, N, D); kmeanspp(data, centroids, n_samples, k, N, D); #ifdef DEBUG_OUTPUT save_double_matrix(data, "data/cluster_data.dat", N, D); #endif clock_t start_clock = clock(); minibatch(data, centroids, n_samples, max_iter, 0.001, 0.1, k, N, D); /*minibatch_multi(data, centroids, n_samples, max_iter, 10, 4, -1.0, k, N, D);*/ clock_t end_clock = clock(); printf("BIC of resulting model: %f\n", bayesian_information_criterion(data, centroids, k, N, D)); printf("Time to run: %fs\n", (end_clock - start_clock) / (double)CLOCKS_PER_SEC); free(data); free(centroids); return 1; }
6423.c
/* POLYBENCH/GPU-OPENMP * * This file is a part of the Polybench/GPU-OpenMP suite * * Contact: * William Killian <killian@udel.edu> * * Copyright 2013, The University of Delaware */ #include <stdio.h> #include <unistd.h> #include <string.h> #include <math.h> /* Include polybench common header. */ #include <polybench.h> /* Include benchmark-specific header. */ /* Default data type is double, default size is 4096x4096. */ #include "convolution-2d.h" /* Array initialization. */ static void init_array (int ni, int nj, DATA_TYPE POLYBENCH_2D(A,NI,NJ,ni,nj)) { // printf("Initializing Array\n"); int i, j; for (i = 0; i < ni; i++) for (j = 0; j < nj; j++) { A[i][j] = ((DATA_TYPE) (i + j) / nj); } } /* DCE code. Must scan the entire live-out data. Can be used also to check the correctness of the output. */ static void print_array(int ni, int nj, DATA_TYPE POLYBENCH_2D(B,NI,NJ,ni,nj)) { int i, j; for (i = 0; i < ni; i++) for (j = 0; j < nj; j++) { fprintf(stderr, DATA_PRINTF_MODIFIER, B[i][j]); if ((i * NJ + j) % 20 == 0) fprintf(stderr, "\n"); } fprintf(stderr, "\n"); } /* Main computational kernel. The whole function will be timed, including the call and return. */ static void kernel_conv2d(int ni, int nj, DATA_TYPE POLYBENCH_2D(A,NI,NJ,ni,nj), DATA_TYPE POLYBENCH_2D(B,NI,NJ,ni,nj)) { int i, j; #pragma scop #pragma omp parallel for private(j) collapse(2) schedule(dynamic, 1) num_threads(2) for (i = 1; i < _PB_NI - 1; ++i) { for (j = 1; j < _PB_NJ - 1; ++j) { B[i][j] = 0.2 * A[i-1][j-1] + 0.5 * A[i-1][j] + -0.8 * A[i-1][j+1] + -0.3 * A[ i ][j-1] + 0.6 * A[ i ][j] + -0.9 * A[ i ][j+1] + 0.4 * A[i+1][j-1] + 0.7 * A[i+1][j] + 0.1 * A[i+1][j+1]; } } #pragma endscop // printf("Kernal computation complete !!\n"); } int main(int argc, char** argv) { /* Retrieve problem size. */ int ni = NI; int nj = NJ; /* Variable declaration/allocation. */ POLYBENCH_2D_ARRAY_DECL(A, DATA_TYPE, NI, NJ, ni, nj); POLYBENCH_2D_ARRAY_DECL(B, DATA_TYPE, NI, NJ, ni, nj); /* Initialize array(s). */ init_array (ni, nj, POLYBENCH_ARRAY(A)); /* Start timer. */ //polybench_start_instruments; polybench_timer_start(); /* Run kernel. */ kernel_conv2d (ni, nj, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(B)); /* Stop and print timer. */ polybench_timer_stop(); polybench_timer_print(); //polybench_stop_instruments; //polybench_print_instruments; /* Prevent dead-code elimination. All live-out data must be printed by the function call in argument. */ polybench_prevent_dce(print_array(ni, nj, POLYBENCH_ARRAY(B))); /* Be clean. */ POLYBENCH_FREE_ARRAY(A); POLYBENCH_FREE_ARRAY(B); return 0; }
fs_strategy_for_chimera.h
// | / | // ' / __| _` | __| _ \ __| // . \ | ( | | ( |\__ ` // _|\_\_| \__,_|\__|\___/ ____/ // Multi-Physics // // // License: BSD License // Kratos default license: kratos/license.txt // // Authors: Aditya Ghantasala, https://github.com/adityaghantasala // Navaneeth K Narayanan // Rishith Ellath Meethal // #ifndef KRATOS_FS_STRATEGY_FOR_CHIMERA_H #define KRATOS_FS_STRATEGY_FOR_CHIMERA_H #include "includes/define.h" #include "utilities/openmp_utils.h" // FluidDynamicsApp Includes #include "custom_strategies/strategies/fs_strategy.h" // Application includes #include "chimera_application_variables.h" #include "custom_utilities/fractional_step_settings_for_chimera.h" namespace Kratos { ///@addtogroup ChimeraApplication ///@{ ///@name Kratos Globals ///@{ ///@} ///@name Type Definitions ///@{ ///@} ///@name Enum's ///@{ ///@} ///@name Functions ///@{ ///@} ///@name Kratos Classes ///@{ template<class TSparseSpace, class TDenseSpace, class TLinearSolver > class FSStrategyForChimera : public FSStrategy<TSparseSpace,TDenseSpace,TLinearSolver> { public: ///@name Type Definitions ///@{ /// Counted pointer of FSStrategyForChimera KRATOS_CLASS_POINTER_DEFINITION(FSStrategyForChimera); typedef FSStrategy<TSparseSpace, TDenseSpace, TLinearSolver> BaseType; typedef FractionalStepSettingsForChimera<TSparseSpace,TDenseSpace,TLinearSolver> SolverSettingsType; ///@} ///@name Life Cycle ///@{ FSStrategyForChimera(ModelPart& rModelPart, SolverSettingsType& rSolverConfig, bool PredictorCorrector): BaseType(rModelPart,rSolverConfig,PredictorCorrector) { this->InitializeStrategy(rSolverConfig,PredictorCorrector); } /// Destructor. ~FSStrategyForChimera() = default; /// Assignment operator. FSStrategyForChimera& operator=(FSStrategyForChimera const& rOther) = delete; /// Copy constructor. FSStrategyForChimera(FSStrategyForChimera const& rOther) = delete; ///@} ///@name Operators ///@{ ///@} ///@name Operations ///@{ ///@} ///@name Access ///@{ ///@} ///@name Inquiry ///@{ ///@} ///@name Input and output ///@{ /// Turn back information as a string. std::string Info() const override { std::stringstream buffer; buffer << "FSStrategyForChimera" ; return buffer.str(); } /// Print information about this object. void PrintInfo(std::ostream& rOStream) const override {rOStream << "FSStrategyForChimera";} /// Print object's data. void PrintData(std::ostream& rOStream) const override {} ///@} ///@name Friends ///@{ ///@} protected: ///@name Protected Life Cycle ///@{ ///@} ///@name Protected static Member Variables ///@{ ///@} ///@name Protected member Variables ///@{ ///@} ///@name Protected Operators ///@{ ///@} ///@name Protected Operations ///@{ void SetActiveStateOnConstraint(const Flags& TheFlagToSet ,const bool ValToSet) { ModelPart& rModelPart = BaseType::GetModelPart(); #pragma omp parallel { ModelPart::MasterSlaveConstraintIteratorType constraints_begin; ModelPart::MasterSlaveConstraintIteratorType constraints_end; OpenMPUtils::PartitionedIterators(rModelPart.MasterSlaveConstraints(),constraints_begin,constraints_end); for ( ModelPart::MasterSlaveConstraintIteratorType itConstraint = constraints_begin; itConstraint != constraints_end; ++itConstraint ) { if (itConstraint->Is(TheFlagToSet)) itConstraint->Set(ACTIVE, ValToSet); } } } double SolveStep() override { double start_solve_time = OpenMPUtils::GetCurrentTime(); ModelPart& r_model_part = BaseType::GetModelPart(); // 1. Fractional step momentum iteration r_model_part.GetProcessInfo().SetValue(FRACTIONAL_STEP,1); bool converged = false; // Activate Constraints for VELOCITY and deactivate PRESSURE SetActiveStateOnConstraint(FS_CHIMERA_VELOCITY_CONSTRAINT, true); SetActiveStateOnConstraint(FS_CHIMERA_PRESSURE_CONSTRAINT, false); for(std::size_t it = 0; it < BaseType::mMaxVelocityIter; ++it) { KRATOS_INFO("FRACTIONAL STEP :: ")<<it+1<<std::endl; // build momentum system and solve for fractional step velocity increment r_model_part.GetProcessInfo().SetValue(FRACTIONAL_STEP,1); double norm_dv = BaseType::mpMomentumStrategy->Solve(); // Check convergence converged = BaseType::CheckFractionalStepConvergence(norm_dv); if (converged) { KRATOS_INFO_IF("FSStrategyForChimera ", BaseType::GetEchoLevel() > 0 )<< "Fractional velocity converged in " << it+1 << " iterations." << std::endl; break; } } // Activate Constraints for PRESSURE and deactivate VELOCITY SetActiveStateOnConstraint(FS_CHIMERA_VELOCITY_CONSTRAINT, false); SetActiveStateOnConstraint(FS_CHIMERA_PRESSURE_CONSTRAINT, true); KRATOS_INFO_IF("FSStrategyForChimera ", (BaseType::GetEchoLevel() > 0) && !converged)<< "Fractional velocity iterations did not converge "<< std::endl; // Compute projections (for stabilization) r_model_part.GetProcessInfo().SetValue(FRACTIONAL_STEP,4); ComputeSplitOssProjections(r_model_part); // 2. Pressure solution (store pressure variation in PRESSURE_OLD_IT) r_model_part.GetProcessInfo().SetValue(FRACTIONAL_STEP,5); #pragma omp parallel { ModelPart::NodeIterator nodes_begin; ModelPart::NodeIterator nodes_end; OpenMPUtils::PartitionedIterators(r_model_part.Nodes(),nodes_begin,nodes_end); for (ModelPart::NodeIterator it_node = nodes_begin; it_node != nodes_end; ++it_node) { const double old_press = it_node->FastGetSolutionStepValue(PRESSURE); it_node->FastGetSolutionStepValue(PRESSURE_OLD_IT) = -old_press; } } KRATOS_INFO_IF("FSStrategyForChimera ", BaseType::GetEchoLevel() > 0 )<< "Calculating Pressure."<< std::endl; //double norm_dp = 0; double norm_dp = BaseType::mpPressureStrategy->Solve(); #pragma omp parallel { ModelPart::NodeIterator nodes_begin; ModelPart::NodeIterator nodes_end; OpenMPUtils::PartitionedIterators(r_model_part.Nodes(),nodes_begin,nodes_end); for (ModelPart::NodeIterator it_node = nodes_begin; it_node != nodes_end; ++it_node) it_node->FastGetSolutionStepValue(PRESSURE_OLD_IT) += it_node->FastGetSolutionStepValue(PRESSURE); } // 3. Compute end-of-step velocity KRATOS_INFO_IF("FSStrategyForChimera ", BaseType::GetEchoLevel() > 0 )<<"Updating Velocity." << std::endl; r_model_part.GetProcessInfo().SetValue(FRACTIONAL_STEP,6); CalculateEndOfStepVelocity(); // Activate Constraints for PRESSURE and deactivate VELOCITY SetActiveStateOnConstraint(FS_CHIMERA_VELOCITY_CONSTRAINT, true); SetActiveStateOnConstraint(FS_CHIMERA_PRESSURE_CONSTRAINT, true); // Additional steps for (std::vector<Process::Pointer>::iterator iExtraSteps = BaseType::mExtraIterationSteps.begin(); iExtraSteps != BaseType::mExtraIterationSteps.end(); ++iExtraSteps) (*iExtraSteps)->Execute(); const double stop_solve_time = OpenMPUtils::GetCurrentTime(); KRATOS_INFO_IF("FSStrategyForChimera", BaseType::GetEchoLevel() >= 1) << "Time for solving step : " << stop_solve_time - start_solve_time << std::endl; return norm_dp; } void ComputeSplitOssProjections(ModelPart& rModelPart) override { const array_1d<double,3> zero(3,0.0); array_1d<double,3> out(3,0.0); #pragma omp parallel { ModelPart::NodeIterator nodes_begin; ModelPart::NodeIterator nodes_end; OpenMPUtils::PartitionedIterators(rModelPart.Nodes(),nodes_begin,nodes_end); for ( ModelPart::NodeIterator it_node = nodes_begin; it_node != nodes_end; ++it_node ) { it_node->FastGetSolutionStepValue(CONV_PROJ) = zero; it_node->FastGetSolutionStepValue(PRESS_PROJ) = zero; it_node->FastGetSolutionStepValue(DIVPROJ) = 0.0; it_node->FastGetSolutionStepValue(NODAL_AREA) = 0.0; } } #pragma omp parallel { ModelPart::ElementIterator elem_begin; ModelPart::ElementIterator elem_end; OpenMPUtils::PartitionedIterators(rModelPart.Elements(),elem_begin,elem_end); for ( ModelPart::ElementIterator it_elem = elem_begin; it_elem != elem_end; ++it_elem ) { it_elem->Calculate(CONV_PROJ,out,rModelPart.GetProcessInfo()); } } rModelPart.GetCommunicator().AssembleCurrentData(CONV_PROJ); rModelPart.GetCommunicator().AssembleCurrentData(PRESS_PROJ); rModelPart.GetCommunicator().AssembleCurrentData(DIVPROJ); rModelPart.GetCommunicator().AssembleCurrentData(NODAL_AREA); // If there are periodic conditions, add contributions from both sides to the periodic nodes //PeriodicConditionProjectionCorrection(rModelPart); ChimeraProjectionCorrection(rModelPart); #pragma omp parallel { ModelPart::NodeIterator nodes_begin; ModelPart::NodeIterator nodes_end; OpenMPUtils::PartitionedIterators(rModelPart.Nodes(),nodes_begin,nodes_end); for ( ModelPart::NodeIterator it_node = nodes_begin; it_node != nodes_end; ++it_node ) { const double nodal_area = it_node->FastGetSolutionStepValue(NODAL_AREA); if( nodal_area > mAreaTolerance ) { it_node->FastGetSolutionStepValue(CONV_PROJ) /= nodal_area; it_node->FastGetSolutionStepValue(PRESS_PROJ) /= nodal_area; it_node->FastGetSolutionStepValue(DIVPROJ) /= nodal_area; } } } //For correcting projections for chimera auto &r_pre_modelpart = rModelPart.GetSubModelPart(rModelPart.Name()+"fs_pressure_model_part"); const auto& r_constraints_container = r_pre_modelpart.MasterSlaveConstraints(); for(const auto& constraint : r_constraints_container) { const auto& master_dofs = constraint.GetMasterDofsVector(); const auto& slave_dofs = constraint.GetSlaveDofsVector(); ModelPart::MatrixType r_relation_matrix; ModelPart::VectorType r_constant_vector; constraint.CalculateLocalSystem(r_relation_matrix,r_constant_vector,rModelPart.GetProcessInfo()); IndexType slave_i = 0; for(const auto& slave_dof : slave_dofs) { const auto slave_node_id = slave_dof->Id(); // DOF ID is same as node ID auto& r_slave_node = rModelPart.Nodes()[slave_node_id]; IndexType master_j = 0; for(const auto& master_dof : master_dofs) { const auto master_node_id = master_dof->Id(); const double weight = r_relation_matrix(slave_i, master_j); auto& r_master_node = rModelPart.Nodes()[master_node_id]; auto& conv_proj = r_slave_node.FastGetSolutionStepValue(CONV_PROJ); auto& pres_proj = r_slave_node.FastGetSolutionStepValue(PRESS_PROJ); auto& dive_proj = r_slave_node.FastGetSolutionStepValue(DIVPROJ); auto& nodal_area = r_slave_node.FastGetSolutionStepValue(NODAL_AREA); conv_proj += (r_master_node.FastGetSolutionStepValue(CONV_PROJ))*weight; pres_proj += (r_master_node.FastGetSolutionStepValue(PRESS_PROJ))*weight; dive_proj += (r_master_node.FastGetSolutionStepValue(DIVPROJ))*weight; nodal_area += (r_master_node.FastGetSolutionStepValue(NODAL_AREA))*weight; ++master_j; } ++slave_i; } } } void CalculateEndOfStepVelocity() override { ModelPart& r_model_part = BaseType::GetModelPart(); const array_1d<double,3> zero(3,0.0); array_1d<double,3> out(3,0.0); #pragma omp parallel { ModelPart::NodeIterator nodes_begin; ModelPart::NodeIterator nodes_end; OpenMPUtils::PartitionedIterators(r_model_part.Nodes(),nodes_begin,nodes_end); for ( ModelPart::NodeIterator it_node = nodes_begin; it_node != nodes_end; ++it_node ) { it_node->FastGetSolutionStepValue(FRACT_VEL) = zero; } } #pragma omp parallel { ModelPart::ElementIterator elem_begin; ModelPart::ElementIterator elem_end; OpenMPUtils::PartitionedIterators(r_model_part.Elements(),elem_begin,elem_end); for ( ModelPart::ElementIterator it_elem = elem_begin; it_elem != elem_end; ++it_elem ) { it_elem->Calculate(VELOCITY,out,r_model_part.GetProcessInfo()); } } r_model_part.GetCommunicator().AssembleCurrentData(FRACT_VEL); //PeriodicConditionVelocityCorrection(r_model_part); // Force the end of step velocity to verify slip conditions in the model if (BaseType::mUseSlipConditions) BaseType::EnforceSlipCondition(SLIP); if (BaseType::mDomainSize == 2) InterpolateVelocity<2>(r_model_part); if (BaseType::mDomainSize == 3) InterpolateVelocity<3>(r_model_part); } void ChimeraProjectionCorrection(ModelPart& rModelPart) { auto &r_pre_modelpart = rModelPart.GetSubModelPart(rModelPart.Name()+"fs_pressure_model_part"); const auto& r_constraints_container = r_pre_modelpart.MasterSlaveConstraints(); for(const auto& constraint : r_constraints_container) { const auto& slave_dofs = constraint.GetSlaveDofsVector(); for(const auto& slave_dof : slave_dofs) { const auto slave_node_id = slave_dof->Id(); // DOF ID is same as node ID auto& r_slave_node = rModelPart.Nodes()[slave_node_id]; r_slave_node.GetValue(NODAL_AREA)= 0; r_slave_node.GetValue(CONV_PROJ)= array_1d<double,3>(3,0.0); r_slave_node.GetValue(PRESS_PROJ)= array_1d<double,3>(3,0.0); r_slave_node.GetValue(DIVPROJ)= 0 ; } } for(const auto& constraint : r_constraints_container) { const auto& master_dofs = constraint.GetMasterDofsVector(); const auto& slave_dofs = constraint.GetSlaveDofsVector(); ModelPart::MatrixType r_relation_matrix; ModelPart::VectorType r_constant_vector; constraint.CalculateLocalSystem(r_relation_matrix,r_constant_vector,rModelPart.GetProcessInfo()); IndexType slave_i = 0; for(const auto& slave_dof : slave_dofs) { const IndexType slave_node_id = slave_dof->Id(); // DOF ID is same as node ID auto& r_slave_node = rModelPart.Nodes()[slave_node_id]; IndexType master_j = 0; for(const auto& master_dof : master_dofs) { const IndexType master_node_id = master_dof->Id(); const double weight = r_relation_matrix(slave_i, master_j); auto& r_master_node = rModelPart.Nodes()[master_node_id]; r_slave_node.GetValue(NODAL_AREA) +=(r_master_node.FastGetSolutionStepValue(NODAL_AREA))*weight; r_slave_node.GetValue(CONV_PROJ) +=(r_master_node.FastGetSolutionStepValue(CONV_PROJ))*weight; r_slave_node.GetValue(PRESS_PROJ) +=(r_master_node.FastGetSolutionStepValue(PRESS_PROJ))*weight; r_slave_node.GetValue(DIVPROJ) +=(r_master_node.FastGetSolutionStepValue(DIVPROJ))*weight; ++master_j; } ++slave_i; } } rModelPart.GetCommunicator().AssembleNonHistoricalData(NODAL_AREA); rModelPart.GetCommunicator().AssembleNonHistoricalData(CONV_PROJ); rModelPart.GetCommunicator().AssembleNonHistoricalData(PRESS_PROJ); rModelPart.GetCommunicator().AssembleNonHistoricalData(DIVPROJ); for (auto it_node = rModelPart.NodesBegin(); it_node != rModelPart.NodesEnd(); it_node++) { if (it_node->GetValue(NODAL_AREA) > mAreaTolerance) { it_node->FastGetSolutionStepValue(NODAL_AREA) = it_node->GetValue(NODAL_AREA); it_node->FastGetSolutionStepValue(CONV_PROJ) = it_node->GetValue(CONV_PROJ); it_node->FastGetSolutionStepValue(PRESS_PROJ) = it_node->GetValue(PRESS_PROJ); it_node->FastGetSolutionStepValue(DIVPROJ) = it_node->GetValue(DIVPROJ); // reset for next iteration it_node->GetValue(NODAL_AREA) = 0.0; it_node->GetValue(CONV_PROJ) = array_1d<double,3>(3,0.0); it_node->GetValue(PRESS_PROJ) = array_1d<double,3>(3,0.0); it_node->GetValue(DIVPROJ) = 0.0; } } } void ChimeraVelocityCorrection(ModelPart& rModelPart) { auto &r_pre_modelpart = rModelPart.GetSubModelPart(rModelPart.Name()+"fs_pressure_model_part"); const auto& r_constraints_container = r_pre_modelpart.MasterSlaveConstraints(); for(const auto& constraint : r_constraints_container) { const auto& slave_dofs = constraint.GetSlaveDofsVector(); for(const auto& slave_dof : slave_dofs) { const auto slave_node_id = slave_dof->Id(); // DOF ID is same as node ID auto& r_slave_node = rModelPart.Nodes()[slave_node_id]; r_slave_node.FastGetSolutionStepValue(FRACT_VEL_X)=0; r_slave_node.FastGetSolutionStepValue(FRACT_VEL_Y)=0; } } for(const auto& constraint : r_constraints_container) { const auto& master_dofs = constraint.GetMasterDofsVector(); const auto& slave_dofs = constraint.GetSlaveDofsVector(); ModelPart::MatrixType r_relation_matrix; ModelPart::VectorType r_constant_vector; constraint.CalculateLocalSystem(r_relation_matrix,r_constant_vector,rModelPart.GetProcessInfo()); IndexType slave_i = 0; for(const auto& slave_dof : slave_dofs) { const auto slave_node_id = slave_dof->Id(); // DOF ID is same as node ID auto& r_slave_node = rModelPart.Nodes()[slave_node_id]; IndexType master_j = 0; for(const auto& master_dof : master_dofs) { const auto master_node_id = master_dof->Id(); const double weight = r_relation_matrix(slave_i, master_j); auto& r_master_node = rModelPart.Nodes()[master_node_id]; r_slave_node.GetValue(FRACT_VEL) +=(r_master_node.FastGetSolutionStepValue(FRACT_VEL))*weight; ++master_j; } ++slave_i; } } rModelPart.GetCommunicator().AssembleNonHistoricalData(FRACT_VEL); for (typename ModelPart::NodeIterator it_node = rModelPart.NodesBegin(); it_node != rModelPart.NodesEnd(); it_node++) { array_1d<double,3>& r_delta_vel = it_node->GetValue(FRACT_VEL); if ( r_delta_vel[0]*r_delta_vel[0] + r_delta_vel[1]*r_delta_vel[1] + r_delta_vel[2]*r_delta_vel[2] != 0.0) { it_node->FastGetSolutionStepValue(FRACT_VEL) = it_node->GetValue(FRACT_VEL); r_delta_vel = array_1d<double,3>(3,0.0); } } } ///@} ///@name Protected Access ///@{ ///@} ///@name Protected Inquiry ///@{ ///@} ///@name Protected LifeCycle ///@{ ///@} private: ///@name Static Member Variables ///@{ ///@} ///@name Member Variables ///@{ const double mAreaTolerance=1E-12; ///@} ///@name Private Operators ///@{ ///@} ///@name Private Operations ///@{ template <int TDim> void InterpolateVelocity(ModelPart& rModelPart) { #pragma omp parallel { ModelPart::NodeIterator nodes_begin; ModelPart::NodeIterator nodes_end; OpenMPUtils::PartitionedIterators(rModelPart.Nodes(), nodes_begin, nodes_end); for (ModelPart::NodeIterator it_node = nodes_begin; it_node != nodes_end; ++it_node) { const double NodalArea = it_node->FastGetSolutionStepValue(NODAL_AREA); if (NodalArea > mAreaTolerance) { if (!it_node->IsFixed(VELOCITY_X)) it_node->FastGetSolutionStepValue(VELOCITY_X) += it_node->FastGetSolutionStepValue(FRACT_VEL_X) / NodalArea; if (!it_node->IsFixed(VELOCITY_Y)) it_node->FastGetSolutionStepValue(VELOCITY_Y) += it_node->FastGetSolutionStepValue(FRACT_VEL_Y) / NodalArea; if(TDim > 2) if (!it_node->IsFixed(VELOCITY_Z)) it_node->FastGetSolutionStepValue(VELOCITY_Z) += it_node->FastGetSolutionStepValue(FRACT_VEL_Z) / NodalArea; } } } auto& r_pre_modelpart = rModelPart.GetSubModelPart(rModelPart.Name()+"fs_pressure_model_part"); const auto& r_constraints_container = r_pre_modelpart.MasterSlaveConstraints(); for (const auto& constraint : r_constraints_container) { const auto& slave_dofs = constraint.GetSlaveDofsVector(); for (const auto& slave_dof : slave_dofs) { const auto slave_node_id = slave_dof->Id(); // DOF ID is same as node ID auto& r_slave_node = rModelPart.Nodes()[slave_node_id]; r_slave_node.FastGetSolutionStepValue(VELOCITY_X) = 0; r_slave_node.FastGetSolutionStepValue(VELOCITY_Y) = 0; if(TDim > 2) r_slave_node.FastGetSolutionStepValue(VELOCITY_Z) = 0; } } for (const auto& constraint : r_constraints_container) { const auto& master_dofs = constraint.GetMasterDofsVector(); const auto& slave_dofs = constraint.GetSlaveDofsVector(); ModelPart::MatrixType r_relation_matrix; ModelPart::VectorType r_constant_vector; constraint.CalculateLocalSystem(r_relation_matrix, r_constant_vector, rModelPart.GetProcessInfo()); IndexType slave_i = 0; for (const auto& slave_dof : slave_dofs) { const auto slave_node_id = slave_dof->Id(); // DOF ID is same as node ID auto& r_slave_node = rModelPart.Nodes()[slave_node_id]; IndexType master_j = 0; for (const auto& master_dof : master_dofs) { const auto master_node_id = master_dof->Id(); const double weight = r_relation_matrix(slave_i, master_j); auto& r_master_node = rModelPart.Nodes()[master_node_id]; r_slave_node.FastGetSolutionStepValue(VELOCITY_X) += (r_master_node.FastGetSolutionStepValue(VELOCITY_X)) * weight; r_slave_node.FastGetSolutionStepValue(VELOCITY_Y) += (r_master_node.FastGetSolutionStepValue(VELOCITY_Y)) * weight; if(TDim > 2) r_slave_node.FastGetSolutionStepValue(VELOCITY_Z) += (r_master_node.FastGetSolutionStepValue(VELOCITY_Z)) * weight; ++master_j; } ++slave_i; } } } void InitializeStrategy(SolverSettingsType& rSolverConfig, bool PredictorCorrector) { KRATOS_TRY; BaseType::mTimeOrder = rSolverConfig.GetTimeOrder(); // Check that input parameters are reasonable and sufficient. BaseType::Check(); //ModelPart& rModelPart = BaseType::GetModelPart(); BaseType::mDomainSize = rSolverConfig.GetDomainSize(); BaseType::mPredictorCorrector = PredictorCorrector; BaseType::mUseSlipConditions = rSolverConfig.UseSlipConditions(); BaseType::mReformDofSet = rSolverConfig.GetReformDofSet(); BaseType::SetEchoLevel(rSolverConfig.GetEchoLevel()); // Initialize strategies for each step bool HaveVelStrategy = rSolverConfig.FindStrategy(SolverSettingsType::Velocity,BaseType::mpMomentumStrategy); if (HaveVelStrategy) { rSolverConfig.FindTolerance(SolverSettingsType::Velocity,BaseType::mVelocityTolerance); rSolverConfig.FindMaxIter(SolverSettingsType::Velocity,BaseType::mMaxVelocityIter); KRATOS_INFO("FSStrategyForChimera ")<<"Velcoity strategy successfully set !"<<std::endl; } else { KRATOS_THROW_ERROR(std::runtime_error,"FS_Strategy error: No Velocity strategy defined in FractionalStepSettings",""); } bool HavePressStrategy = rSolverConfig.FindStrategy(SolverSettingsType::Pressure,BaseType::mpPressureStrategy); if (HavePressStrategy) { rSolverConfig.FindTolerance(SolverSettingsType::Pressure,BaseType::mPressureTolerance); rSolverConfig.FindMaxIter(SolverSettingsType::Pressure,BaseType::mMaxPressureIter); KRATOS_INFO("FSStrategyForChimera ")<<"Pressure strategy successfully set !"<<std::endl; } else { KRATOS_THROW_ERROR(std::runtime_error,"FS_Strategy error: No Pressure strategy defined in FractionalStepSettings",""); } // Check input parameters BaseType::Check(); KRATOS_CATCH(""); } ///@} ///@name Private Access ///@{ ///@} ///@name Private Inquiry ///@{ ///@} ///@name Un accessible methods ///@{ ///@} }; /// Class FStepStrategy ///@} ///@name Type Definitions ///@{ ///@} ///@} // addtogroup } // namespace Kratos. #endif // KRATOS_FS_STRATEGY_FOR_CHIMERA_H