source
stringlengths
3
92
c
stringlengths
26
2.25M
GB_binop__bxor_uint32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCUDA_DEV #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__bxor_uint32) // A.*B function (eWiseMult): GB (_AemultB_08__bxor_uint32) // A.*B function (eWiseMult): GB (_AemultB_02__bxor_uint32) // A.*B function (eWiseMult): GB (_AemultB_04__bxor_uint32) // A.*B function (eWiseMult): GB (_AemultB_bitmap__bxor_uint32) // A*D function (colscale): GB (_AxD__bxor_uint32) // D*A function (rowscale): GB (_DxB__bxor_uint32) // C+=B function (dense accum): GB (_Cdense_accumB__bxor_uint32) // C+=b function (dense accum): GB (_Cdense_accumb__bxor_uint32) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__bxor_uint32) // C=scalar+B GB (_bind1st__bxor_uint32) // C=scalar+B' GB (_bind1st_tran__bxor_uint32) // C=A+scalar GB (_bind2nd__bxor_uint32) // C=A'+scalar GB (_bind2nd_tran__bxor_uint32) // C type: uint32_t // A type: uint32_t // A pattern? 0 // B type: uint32_t // B pattern? 0 // BinaryOp: cij = (aij) ^ (bij) #define GB_ATYPE \ uint32_t #define GB_BTYPE \ uint32_t #define GB_CTYPE \ uint32_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint32_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ uint32_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint32_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x) ^ (y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_BXOR || GxB_NO_UINT32 || GxB_NO_BXOR_UINT32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__bxor_uint32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__bxor_uint32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__bxor_uint32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint32_t uint32_t bwork = (*((uint32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__bxor_uint32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t *restrict Cx = (uint32_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__bxor_uint32) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t *restrict Cx = (uint32_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__bxor_uint32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; uint32_t alpha_scalar ; uint32_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((uint32_t *) alpha_scalar_in)) ; beta_scalar = (*((uint32_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__bxor_uint32) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__bxor_uint32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__bxor_uint32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__bxor_uint32) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__bxor_uint32) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t *Cx = (uint32_t *) Cx_output ; uint32_t x = (*((uint32_t *) x_input)) ; uint32_t *Bx = (uint32_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; uint32_t bij = GBX (Bx, p, false) ; Cx [p] = (x) ^ (bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__bxor_uint32) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint32_t *Cx = (uint32_t *) Cx_output ; uint32_t *Ax = (uint32_t *) Ax_input ; uint32_t y = (*((uint32_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint32_t aij = GBX (Ax, p, false) ; Cx [p] = (aij) ^ (y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x) ^ (aij) ; \ } GrB_Info GB (_bind1st_tran__bxor_uint32) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t x = (*((const uint32_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint32_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij) ^ (y) ; \ } GrB_Info GB (_bind2nd_tran__bxor_uint32) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t y = (*((const uint32_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
core_csyssq.c
/** * * @file * * PLASMA is a software package provided by: * University of Tennessee, US, * University of Manchester, UK. * * @generated from /home/luszczek/workspace/plasma/bitbucket/plasma/core_blas/core_zsyssq.c, normal z -> c, Fri Sep 28 17:38:23 2018 * **/ #include <plasma_core_blas.h> #include "plasma_types.h" #include "core_lapack.h" #include <math.h> /******************************************************************************/ __attribute__((weak)) void plasma_core_csyssq(plasma_enum_t uplo, int n, const plasma_complex32_t *A, int lda, float *scale, float *sumsq) { int ione = 1; if (uplo == PlasmaUpper) { for (int j = 1; j < n; j++) // TODO: Inline this operation. LAPACK_classq(&j, &A[lda*j], &ione, scale, sumsq); } else { // PlasmaLower for (int j = 0; j < n-1; j++) { int len = n-j-1; // TODO: Inline this operation. LAPACK_classq(&len, &A[lda*j+j+1], &ione, scale, sumsq); } } *sumsq *= 2.0; for (int i = 0; i < n; i++) { // diagonal is complex, don't ignore complex part float absa = cabsf(A[lda*i+i]); if (absa != 0.0) { // != propagates nan if (*scale < absa) { *sumsq = 1.0 + *sumsq*((*scale/absa)*(*scale/absa)); *scale = absa; } else { *sumsq = *sumsq + ((absa/(*scale))*(absa/(*scale))); } } } } /******************************************************************************/ void plasma_core_omp_csyssq(plasma_enum_t uplo, int n, const plasma_complex32_t *A, int lda, float *scale, float *sumsq, plasma_sequence_t *sequence, plasma_request_t *request) { #pragma omp task depend(in:A[0:lda*n]) \ depend(out:scale[0:n]) \ depend(out:sumsq[0:n]) { if (sequence->status == PlasmaSuccess) { *scale = 0.0; *sumsq = 1.0; plasma_core_csyssq(uplo, n, A, lda, scale, sumsq); } } } /******************************************************************************/ void plasma_core_omp_csyssq_aux(int m, int n, const float *scale, const float *sumsq, float *value, plasma_sequence_t *sequence, plasma_request_t *request) { #pragma omp task depend(in:scale[0:n]) \ depend(in:sumsq[0:n]) \ depend(out:value[0:1]) { if (sequence->status == PlasmaSuccess) { float scl = 0.0; float sum = 1.0; for (int j = 0; j < n; j++) { for (int i = j+1; i < n; i++) { int idx = m*j+i; if (scl < scale[idx]) { sum = sumsq[idx] + sum*((scl/scale[idx])*(scl/scale[idx])); scl = scale[idx]; } else { sum = sum + sumsq[idx]*((scale[idx]/scl)*(scale[idx]/scl)); } } } sum = 2.0*sum; for (int j = 0; j < n; j++) { int idx = m*j+j; if (scl < scale[idx]) { sum = sumsq[idx] + sum*((scl/scale[idx])*(scl/scale[idx])); scl = scale[idx]; } else { sum = sum + sumsq[idx]*((scale[idx]/scl)*(scale[idx]/scl)); } } *value = scl*sqrtf(sum); } } }
reduction_utilities.h
// | / | // ' / __| _` | __| _ \ __| // . \ | ( | | ( |\__ ` // _|\_\_| \__,_|\__|\___/ ____/ // Multi-Physics // // License: BSD License // Kratos default license: kratos/license.txt // // Main authors: Riccardo Rossi // Denis Demidov // #if !defined(KRATOS_REDUCTION_UTILITIES_H_INCLUDED ) #define KRATOS_REDUCTION_UTILITIES_H_INCLUDED // System includes #include <tuple> #include <limits> #include <algorithm> // External includes // Project includes #include "includes/define.h" #include "utilities/atomic_utilities.h" namespace Kratos { namespace Internals { /** @brief Helper class for null-initializiation */ template <class TObjectType> struct NullInitialized { static TObjectType Get() { return TObjectType(); } }; template <class TValueType, std::size_t ArraySize> struct NullInitialized<array_1d<TValueType,ArraySize>> { static array_1d<TValueType,ArraySize> Get() { array_1d<TValueType,ArraySize> array; std::fill_n(array.begin(), ArraySize, NullInitialized<TValueType>::Get()); return array; } }; } // namespace Internals ///@addtogroup KratosCore //*********************************************************************************** //*********************************************************************************** //*********************************************************************************** /** @brief utility function to do a sum reduction */ template<class TDataType, class TReturnType = TDataType> class SumReduction { public: typedef TDataType value_type; typedef TReturnType return_type; TReturnType mValue = Internals::NullInitialized<TReturnType>::Get(); // deliberately making the member value public, to allow one to change it as needed /// access to reduced value TReturnType GetValue() const { return mValue; } /// NON-THREADSAFE (fast) value of reduction, to be used within a single thread void LocalReduce(const TDataType value){ mValue += value; } /// THREADSAFE (needs some sort of lock guard) reduction, to be used to sync threads void ThreadSafeReduce(const SumReduction<TDataType, TReturnType>& rOther) { AtomicAdd(mValue, rOther.mValue); } }; //*********************************************************************************** //*********************************************************************************** //*********************************************************************************** template<class TDataType, class TReturnType = TDataType> class SubReduction { public: typedef TDataType value_type; typedef TReturnType return_type; TReturnType mValue = Internals::NullInitialized<TReturnType>::Get(); // deliberately making the member value public, to allow one to change it as needed /// access to reduced value TReturnType GetValue() const { return mValue; } /// NON-THREADSAFE (fast) value of reduction, to be used within a single thread void LocalReduce(const TDataType value){ mValue -= value; } /// THREADSAFE (needs some sort of lock guard) reduction, to be used to sync threads void ThreadSafeReduce(const SubReduction<TDataType, TReturnType>& rOther) { AtomicAdd(mValue, rOther.mValue); } }; //*********************************************************************************** //*********************************************************************************** //*********************************************************************************** template<class TDataType, class TReturnType = TDataType> class MaxReduction { public: typedef TDataType value_type; typedef TReturnType return_type; TReturnType mValue = std::numeric_limits<TReturnType>::lowest(); // deliberately making the member value public, to allow one to change it as needed /// access to reduced value TReturnType GetValue() const { return mValue; } /// NON-THREADSAFE (fast) value of reduction, to be used within a single thread void LocalReduce(const TDataType value){ mValue = std::max(mValue,value); } /// THREADSAFE (needs some sort of lock guard) reduction, to be used to sync threads void ThreadSafeReduce(const MaxReduction<TDataType, TReturnType>& rOther) { #pragma omp critical mValue = std::max(mValue,rOther.mValue); } }; //*********************************************************************************** //*********************************************************************************** //*********************************************************************************** template<class TDataType, class TReturnType = TDataType> class MinReduction { public: typedef TDataType value_type; typedef TReturnType return_type; TReturnType mValue = std::numeric_limits<TReturnType>::max(); // deliberately making the member value public, to allow one to change it as needed /// access to reduced value TReturnType GetValue() const { return mValue; } /// NON-THREADSAFE (fast) value of reduction, to be used within a single thread void LocalReduce(const TDataType value){ mValue = std::min(mValue,value); } /// THREADSAFE (needs some sort of lock guard) reduction, to be used to sync threads void ThreadSafeReduce(const MinReduction<TDataType, TReturnType>& rOther) { #pragma omp critical mValue = std::min(mValue,rOther.mValue); } }; //*********************************************************************************** //*********************************************************************************** //*********************************************************************************** template<class TDataType, class TReturnType = std::vector<TDataType>> class AccumReduction { public: typedef TDataType value_type; typedef TReturnType return_type; TReturnType mValue = TReturnType(); // deliberately making the member value public, to allow one to change it as needed /// access to reduced value TReturnType GetValue() const { return mValue; } /// NON-THREADSAFE (fast) value of reduction, to be used within a single thread void LocalReduce(const TDataType value){ mValue.push_back(value); } /// THREADSAFE (needs some sort of lock guard) reduction, to be used to sync threads void ThreadSafeReduce(const AccumReduction<TDataType, TReturnType>& rOther) { #pragma omp critical mValue.insert(mValue.end(), rOther.mValue.begin(), rOther.mValue.end()); } }; template <class... Reducer> struct CombinedReduction { typedef std::tuple<typename Reducer::value_type...> value_type; typedef std::tuple<typename Reducer::return_type...> return_type; std::tuple<Reducer...> mChild; CombinedReduction() {} /// access to reduced value return_type GetValue(){ return_type return_value; fill_value<0>(return_value); return return_value; } template <int I, class T> typename std::enable_if<(I < sizeof...(Reducer)), void>::type fill_value(T& v) { std::get<I>(v) = std::get<I>(mChild).GetValue(); fill_value<I+1>(v); }; template <int I, class T> typename std::enable_if<(I == sizeof...(Reducer)), void>::type fill_value(T& v) {} /// NON-THREADSAFE (fast) value of reduction, to be used within a single thread template <class... T> void LocalReduce(const std::tuple<T...> &&v) { // Static recursive loop over tuple elements reduce_local<0>(v); } /// THREADSAFE (needs some sort of lock guard) reduction, to be used to sync threads void ThreadSafeReduce(const CombinedReduction &other) { reduce_global<0>(other); } private: template <int I, class T> typename std::enable_if<(I < sizeof...(Reducer)), void>::type reduce_local(T &&v) { std::get<I>(mChild).LocalReduce(std::get<I>(v)); reduce_local<I+1>(std::forward<T>(v)); }; template <int I, class T> typename std::enable_if<(I == sizeof...(Reducer)), void>::type reduce_local(T &&v) { // Exit static recursion } template <int I> typename std::enable_if<(I < sizeof...(Reducer)), void>::type reduce_global(const CombinedReduction &other) { std::get<I>(mChild).ThreadSafeReduce(std::get<I>(other.mChild)); reduce_global<I+1>(other); } template <int I> typename std::enable_if<(I == sizeof...(Reducer)), void>::type reduce_global(const CombinedReduction &other) { // Exit static recursion } }; } // namespace Kratos. #endif // KRATOS_REDUCTION_UTILITIES_H_INCLUDED defined
draw.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % DDDD RRRR AAA W W % % D D R R A A W W % % D D RRRR AAAAA W W W % % D D R RN A A WW WW % % DDDD R R A A W W % % % % % % MagickCore Image Drawing Methods % % % % % % Software Design % % Cristy % % July 1998 % % % % % % Copyright 1999-2019 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % Bill Radcliffe of Corbis (www.corbis.com) contributed the polygon % rendering code based on Paul Heckbert's "Concave Polygon Scan Conversion", % Graphics Gems, 1990. Leonard Rosenthal and David Harr of Appligent % (www.appligent.com) contributed the dash pattern, linecap stroking % algorithm, and minor rendering improvements. % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/annotate.h" #include "MagickCore/artifact.h" #include "MagickCore/blob.h" #include "MagickCore/cache.h" #include "MagickCore/cache-private.h" #include "MagickCore/cache-view.h" #include "MagickCore/channel.h" #include "MagickCore/color.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/composite.h" #include "MagickCore/composite-private.h" #include "MagickCore/constitute.h" #include "MagickCore/draw.h" #include "MagickCore/draw-private.h" #include "MagickCore/enhance.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/gem.h" #include "MagickCore/geometry.h" #include "MagickCore/image-private.h" #include "MagickCore/list.h" #include "MagickCore/log.h" #include "MagickCore/memory-private.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/option.h" #include "MagickCore/paint.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/pixel-private.h" #include "MagickCore/property.h" #include "MagickCore/resample.h" #include "MagickCore/resample-private.h" #include "MagickCore/resource_.h" #include "MagickCore/splay-tree.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/thread-private.h" #include "MagickCore/token.h" #include "MagickCore/transform-private.h" #include "MagickCore/utility.h" /* Define declarations. */ #define BezierQuantum 200 #define PrimitiveExtentPad 2048 #define MaxBezierCoordinates 4194304 #define ThrowPointExpectedException(token,exception) \ { \ (void) ThrowMagickException(exception,GetMagickModule(),DrawError, \ "NonconformingDrawingPrimitiveDefinition","`%s'",token); \ status=MagickFalse; \ break; \ } /* Typedef declarations. */ typedef struct _EdgeInfo { SegmentInfo bounds; double scanline; PointInfo *points; size_t number_points; ssize_t direction; MagickBooleanType ghostline; size_t highwater; } EdgeInfo; typedef struct _ElementInfo { double cx, cy, major, minor, angle; } ElementInfo; typedef struct _MVGInfo { PrimitiveInfo **primitive_info; size_t *extent; ssize_t offset; PointInfo point; ExceptionInfo *exception; } MVGInfo; typedef struct _PolygonInfo { EdgeInfo *edges; size_t number_edges; } PolygonInfo; typedef enum { MoveToCode, OpenCode, GhostlineCode, LineToCode, EndCode } PathInfoCode; typedef struct _PathInfo { PointInfo point; PathInfoCode code; } PathInfo; /* Forward declarations. */ static Image *DrawClippingMask(Image *,const DrawInfo *,const char *,const char *, ExceptionInfo *); static MagickBooleanType DrawStrokePolygon(Image *,const DrawInfo *,const PrimitiveInfo *, ExceptionInfo *), RenderMVGContent(Image *,const DrawInfo *,const size_t,ExceptionInfo *), TraceArc(MVGInfo *,const PointInfo,const PointInfo,const PointInfo), TraceArcPath(MVGInfo *,const PointInfo,const PointInfo,const PointInfo, const double,const MagickBooleanType,const MagickBooleanType), TraceBezier(MVGInfo *,const size_t), TraceCircle(MVGInfo *,const PointInfo,const PointInfo), TraceEllipse(MVGInfo *,const PointInfo,const PointInfo,const PointInfo), TraceLine(PrimitiveInfo *,const PointInfo,const PointInfo), TraceRectangle(PrimitiveInfo *,const PointInfo,const PointInfo), TraceRoundRectangle(MVGInfo *,const PointInfo,const PointInfo,PointInfo), TraceSquareLinecap(PrimitiveInfo *,const size_t,const double); static PrimitiveInfo *TraceStrokePolygon(const Image *,const DrawInfo *,const PrimitiveInfo *); static size_t TracePath(MVGInfo *,const char *,ExceptionInfo *); /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A c q u i r e D r a w I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquireDrawInfo() returns a DrawInfo structure properly initialized. % % The format of the AcquireDrawInfo method is: % % DrawInfo *AcquireDrawInfo(void) % */ MagickExport DrawInfo *AcquireDrawInfo(void) { DrawInfo *draw_info; draw_info=(DrawInfo *) AcquireCriticalMemory(sizeof(*draw_info)); GetDrawInfo((ImageInfo *) NULL,draw_info); return(draw_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C l o n e D r a w I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CloneDrawInfo() makes a copy of the given draw_info structure. If NULL % is specified, a new DrawInfo structure is created initialized to default % values. % % The format of the CloneDrawInfo method is: % % DrawInfo *CloneDrawInfo(const ImageInfo *image_info, % const DrawInfo *draw_info) % % A description of each parameter follows: % % o image_info: the image info. % % o draw_info: the draw info. % */ MagickExport DrawInfo *CloneDrawInfo(const ImageInfo *image_info, const DrawInfo *draw_info) { DrawInfo *clone_info; ExceptionInfo *exception; clone_info=(DrawInfo *) AcquireCriticalMemory(sizeof(*clone_info)); GetDrawInfo(image_info,clone_info); if (draw_info == (DrawInfo *) NULL) return(clone_info); exception=AcquireExceptionInfo(); if (draw_info->primitive != (char *) NULL) (void) CloneString(&clone_info->primitive,draw_info->primitive); if (draw_info->geometry != (char *) NULL) (void) CloneString(&clone_info->geometry,draw_info->geometry); clone_info->compliance=draw_info->compliance; clone_info->viewbox=draw_info->viewbox; clone_info->affine=draw_info->affine; clone_info->gravity=draw_info->gravity; clone_info->fill=draw_info->fill; clone_info->stroke=draw_info->stroke; clone_info->stroke_width=draw_info->stroke_width; if (draw_info->fill_pattern != (Image *) NULL) clone_info->fill_pattern=CloneImage(draw_info->fill_pattern,0,0,MagickTrue, exception); if (draw_info->stroke_pattern != (Image *) NULL) clone_info->stroke_pattern=CloneImage(draw_info->stroke_pattern,0,0, MagickTrue,exception); clone_info->stroke_antialias=draw_info->stroke_antialias; clone_info->text_antialias=draw_info->text_antialias; clone_info->fill_rule=draw_info->fill_rule; clone_info->linecap=draw_info->linecap; clone_info->linejoin=draw_info->linejoin; clone_info->miterlimit=draw_info->miterlimit; clone_info->dash_offset=draw_info->dash_offset; clone_info->decorate=draw_info->decorate; clone_info->compose=draw_info->compose; if (draw_info->text != (char *) NULL) (void) CloneString(&clone_info->text,draw_info->text); if (draw_info->font != (char *) NULL) (void) CloneString(&clone_info->font,draw_info->font); if (draw_info->metrics != (char *) NULL) (void) CloneString(&clone_info->metrics,draw_info->metrics); if (draw_info->family != (char *) NULL) (void) CloneString(&clone_info->family,draw_info->family); clone_info->style=draw_info->style; clone_info->stretch=draw_info->stretch; clone_info->weight=draw_info->weight; if (draw_info->encoding != (char *) NULL) (void) CloneString(&clone_info->encoding,draw_info->encoding); clone_info->pointsize=draw_info->pointsize; clone_info->kerning=draw_info->kerning; clone_info->interline_spacing=draw_info->interline_spacing; clone_info->interword_spacing=draw_info->interword_spacing; clone_info->direction=draw_info->direction; if (draw_info->density != (char *) NULL) (void) CloneString(&clone_info->density,draw_info->density); clone_info->align=draw_info->align; clone_info->undercolor=draw_info->undercolor; clone_info->border_color=draw_info->border_color; if (draw_info->server_name != (char *) NULL) (void) CloneString(&clone_info->server_name,draw_info->server_name); if (draw_info->dash_pattern != (double *) NULL) { register ssize_t x; for (x=0; fabs(draw_info->dash_pattern[x]) >= MagickEpsilon; x++) ; clone_info->dash_pattern=(double *) AcquireQuantumMemory((size_t) (2*x+2), sizeof(*clone_info->dash_pattern)); if (clone_info->dash_pattern == (double *) NULL) ThrowFatalException(ResourceLimitFatalError, "UnableToAllocateDashPattern"); (void) memset(clone_info->dash_pattern,0,(size_t) (2*x+2)* sizeof(*clone_info->dash_pattern)); (void) memcpy(clone_info->dash_pattern,draw_info->dash_pattern,(size_t) (x+1)*sizeof(*clone_info->dash_pattern)); } clone_info->gradient=draw_info->gradient; if (draw_info->gradient.stops != (StopInfo *) NULL) { size_t number_stops; number_stops=clone_info->gradient.number_stops; clone_info->gradient.stops=(StopInfo *) AcquireQuantumMemory((size_t) number_stops,sizeof(*clone_info->gradient.stops)); if (clone_info->gradient.stops == (StopInfo *) NULL) ThrowFatalException(ResourceLimitFatalError, "UnableToAllocateDashPattern"); (void) memcpy(clone_info->gradient.stops,draw_info->gradient.stops, (size_t) number_stops*sizeof(*clone_info->gradient.stops)); } clone_info->bounds=draw_info->bounds; clone_info->fill_alpha=draw_info->fill_alpha; clone_info->stroke_alpha=draw_info->stroke_alpha; clone_info->element_reference=draw_info->element_reference; clone_info->clip_path=draw_info->clip_path; clone_info->clip_units=draw_info->clip_units; if (draw_info->clip_mask != (char *) NULL) (void) CloneString(&clone_info->clip_mask,draw_info->clip_mask); if (draw_info->clipping_mask != (Image *) NULL) clone_info->clipping_mask=CloneImage(draw_info->clipping_mask,0,0, MagickTrue,exception); if (draw_info->composite_mask != (Image *) NULL) clone_info->composite_mask=CloneImage(draw_info->composite_mask,0,0, MagickTrue,exception); clone_info->render=draw_info->render; clone_info->debug=IsEventLogging(); exception=DestroyExceptionInfo(exception); return(clone_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C o n v e r t P a t h T o P o l y g o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ConvertPathToPolygon() converts a path to the more efficient sorted % rendering form. % % The format of the ConvertPathToPolygon method is: % % PolygonInfo *ConvertPathToPolygon(const PathInfo *path_info) % % A description of each parameter follows: % % o Method ConvertPathToPolygon returns the path in a more efficient sorted % rendering form of type PolygonInfo. % % o draw_info: Specifies a pointer to an DrawInfo structure. % % o path_info: Specifies a pointer to an PathInfo structure. % % */ #if defined(__cplusplus) || defined(c_plusplus) extern "C" { #endif static int DrawCompareEdges(const void *p_edge,const void *q_edge) { #define DrawCompareEdge(p,q) \ { \ if (((p)-(q)) < 0.0) \ return(-1); \ if (((p)-(q)) > 0.0) \ return(1); \ } register const PointInfo *p, *q; /* Edge sorting for right-handed coordinate system. */ p=((const EdgeInfo *) p_edge)->points; q=((const EdgeInfo *) q_edge)->points; DrawCompareEdge(p[0].y,q[0].y); DrawCompareEdge(p[0].x,q[0].x); DrawCompareEdge((p[1].x-p[0].x)*(q[1].y-q[0].y),(p[1].y-p[0].y)* (q[1].x-q[0].x)); DrawCompareEdge(p[1].y,q[1].y); DrawCompareEdge(p[1].x,q[1].x); return(0); } #if defined(__cplusplus) || defined(c_plusplus) } #endif static void LogPolygonInfo(const PolygonInfo *polygon_info) { register EdgeInfo *p; register ssize_t i, j; (void) LogMagickEvent(DrawEvent,GetMagickModule()," begin active-edge"); p=polygon_info->edges; for (i=0; i < (ssize_t) polygon_info->number_edges; i++) { (void) LogMagickEvent(DrawEvent,GetMagickModule()," edge %.20g:", (double) i); (void) LogMagickEvent(DrawEvent,GetMagickModule()," direction: %s", p->direction != MagickFalse ? "down" : "up"); (void) LogMagickEvent(DrawEvent,GetMagickModule()," ghostline: %s", p->ghostline != MagickFalse ? "transparent" : "opaque"); (void) LogMagickEvent(DrawEvent,GetMagickModule(), " bounds: %g,%g - %g,%g",p->bounds.x1,p->bounds.y1, p->bounds.x2,p->bounds.y2); for (j=0; j < (ssize_t) p->number_points; j++) (void) LogMagickEvent(DrawEvent,GetMagickModule()," %g,%g", p->points[j].x,p->points[j].y); p++; } (void) LogMagickEvent(DrawEvent,GetMagickModule()," end active-edge"); } static void ReversePoints(PointInfo *points,const size_t number_points) { PointInfo point; register ssize_t i; for (i=0; i < (ssize_t) (number_points >> 1); i++) { point=points[i]; points[i]=points[number_points-(i+1)]; points[number_points-(i+1)]=point; } } static PolygonInfo *ConvertPathToPolygon(const PathInfo *path_info) { long direction, next_direction; PointInfo point, *points; PolygonInfo *polygon_info; SegmentInfo bounds; register ssize_t i, n; MagickBooleanType ghostline; size_t edge, number_edges, number_points; /* Convert a path to the more efficient sorted rendering form. */ polygon_info=(PolygonInfo *) AcquireMagickMemory(sizeof(*polygon_info)); if (polygon_info == (PolygonInfo *) NULL) return((PolygonInfo *) NULL); number_edges=16; polygon_info->edges=(EdgeInfo *) AcquireQuantumMemory(number_edges, sizeof(*polygon_info->edges)); if (polygon_info->edges == (EdgeInfo *) NULL) return((PolygonInfo *) NULL); (void) memset(polygon_info->edges,0,number_edges* sizeof(*polygon_info->edges)); direction=0; edge=0; ghostline=MagickFalse; n=0; number_points=0; points=(PointInfo *) NULL; (void) memset(&point,0,sizeof(point)); (void) memset(&bounds,0,sizeof(bounds)); polygon_info->edges[edge].number_points=(size_t) n; polygon_info->edges[edge].scanline=0.0; polygon_info->edges[edge].highwater=0; polygon_info->edges[edge].ghostline=ghostline; polygon_info->edges[edge].direction=(ssize_t) direction; polygon_info->edges[edge].points=points; polygon_info->edges[edge].bounds=bounds; polygon_info->number_edges=0; for (i=0; path_info[i].code != EndCode; i++) { if ((path_info[i].code == MoveToCode) || (path_info[i].code == OpenCode) || (path_info[i].code == GhostlineCode)) { /* Move to. */ if ((points != (PointInfo *) NULL) && (n >= 2)) { if (edge == number_edges) { number_edges<<=1; polygon_info->edges=(EdgeInfo *) ResizeQuantumMemory( polygon_info->edges,(size_t) number_edges, sizeof(*polygon_info->edges)); if (polygon_info->edges == (EdgeInfo *) NULL) return((PolygonInfo *) NULL); } polygon_info->edges[edge].number_points=(size_t) n; polygon_info->edges[edge].scanline=(-1.0); polygon_info->edges[edge].highwater=0; polygon_info->edges[edge].ghostline=ghostline; polygon_info->edges[edge].direction=(ssize_t) (direction > 0); if (direction < 0) ReversePoints(points,(size_t) n); polygon_info->edges[edge].points=points; polygon_info->edges[edge].bounds=bounds; polygon_info->edges[edge].bounds.y1=points[0].y; polygon_info->edges[edge].bounds.y2=points[n-1].y; points=(PointInfo *) NULL; ghostline=MagickFalse; edge++; } if (points == (PointInfo *) NULL) { number_points=16; points=(PointInfo *) AcquireQuantumMemory((size_t) number_points, sizeof(*points)); if (points == (PointInfo *) NULL) return((PolygonInfo *) NULL); } ghostline=path_info[i].code == GhostlineCode ? MagickTrue : MagickFalse; point=path_info[i].point; points[0]=point; bounds.x1=point.x; bounds.x2=point.x; direction=0; n=1; continue; } /* Line to. */ next_direction=((path_info[i].point.y > point.y) || ((fabs(path_info[i].point.y-point.y) < MagickEpsilon) && (path_info[i].point.x > point.x))) ? 1 : -1; if ((points != (PointInfo *) NULL) && (direction != 0) && (direction != next_direction)) { /* New edge. */ point=points[n-1]; if (edge == number_edges) { number_edges<<=1; polygon_info->edges=(EdgeInfo *) ResizeQuantumMemory( polygon_info->edges,(size_t) number_edges, sizeof(*polygon_info->edges)); if (polygon_info->edges == (EdgeInfo *) NULL) return((PolygonInfo *) NULL); } polygon_info->edges[edge].number_points=(size_t) n; polygon_info->edges[edge].scanline=(-1.0); polygon_info->edges[edge].highwater=0; polygon_info->edges[edge].ghostline=ghostline; polygon_info->edges[edge].direction=(ssize_t) (direction > 0); if (direction < 0) ReversePoints(points,(size_t) n); polygon_info->edges[edge].points=points; polygon_info->edges[edge].bounds=bounds; polygon_info->edges[edge].bounds.y1=points[0].y; polygon_info->edges[edge].bounds.y2=points[n-1].y; number_points=16; points=(PointInfo *) AcquireQuantumMemory((size_t) number_points, sizeof(*points)); if (points == (PointInfo *) NULL) return((PolygonInfo *) NULL); n=1; ghostline=MagickFalse; points[0]=point; bounds.x1=point.x; bounds.x2=point.x; edge++; } direction=next_direction; if (points == (PointInfo *) NULL) continue; if (n == (ssize_t) number_points) { number_points<<=1; points=(PointInfo *) ResizeQuantumMemory(points,(size_t) number_points, sizeof(*points)); if (points == (PointInfo *) NULL) return((PolygonInfo *) NULL); } point=path_info[i].point; points[n]=point; if (point.x < bounds.x1) bounds.x1=point.x; if (point.x > bounds.x2) bounds.x2=point.x; n++; } if (points != (PointInfo *) NULL) { if (n < 2) points=(PointInfo *) RelinquishMagickMemory(points); else { if (edge == number_edges) { number_edges<<=1; polygon_info->edges=(EdgeInfo *) ResizeQuantumMemory( polygon_info->edges,(size_t) number_edges, sizeof(*polygon_info->edges)); if (polygon_info->edges == (EdgeInfo *) NULL) return((PolygonInfo *) NULL); } polygon_info->edges[edge].number_points=(size_t) n; polygon_info->edges[edge].scanline=(-1.0); polygon_info->edges[edge].highwater=0; polygon_info->edges[edge].ghostline=ghostline; polygon_info->edges[edge].direction=(ssize_t) (direction > 0); if (direction < 0) ReversePoints(points,(size_t) n); polygon_info->edges[edge].points=points; polygon_info->edges[edge].bounds=bounds; polygon_info->edges[edge].bounds.y1=points[0].y; polygon_info->edges[edge].bounds.y2=points[n-1].y; ghostline=MagickFalse; edge++; } } polygon_info->number_edges=edge; qsort(polygon_info->edges,(size_t) polygon_info->number_edges, sizeof(*polygon_info->edges),DrawCompareEdges); if (IsEventLogging() != MagickFalse) LogPolygonInfo(polygon_info); return(polygon_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C o n v e r t P r i m i t i v e T o P a t h % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ConvertPrimitiveToPath() converts a PrimitiveInfo structure into a vector % path structure. % % The format of the ConvertPrimitiveToPath method is: % % PathInfo *ConvertPrimitiveToPath(const DrawInfo *draw_info, % const PrimitiveInfo *primitive_info) % % A description of each parameter follows: % % o Method ConvertPrimitiveToPath returns a vector path structure of type % PathInfo. % % o draw_info: a structure of type DrawInfo. % % o primitive_info: Specifies a pointer to an PrimitiveInfo structure. % % */ static void LogPathInfo(const PathInfo *path_info) { register const PathInfo *p; (void) LogMagickEvent(DrawEvent,GetMagickModule()," begin vector-path"); for (p=path_info; p->code != EndCode; p++) (void) LogMagickEvent(DrawEvent,GetMagickModule(), " %g,%g %s",p->point.x,p->point.y,p->code == GhostlineCode ? "moveto ghostline" : p->code == OpenCode ? "moveto open" : p->code == MoveToCode ? "moveto" : p->code == LineToCode ? "lineto" : "?"); (void) LogMagickEvent(DrawEvent,GetMagickModule()," end vector-path"); } static PathInfo *ConvertPrimitiveToPath(const PrimitiveInfo *primitive_info) { MagickBooleanType closed_subpath; PathInfo *path_info; PathInfoCode code; PointInfo p, q; register ssize_t i, n; ssize_t coordinates, start; /* Converts a PrimitiveInfo structure into a vector path structure. */ switch (primitive_info->primitive) { case AlphaPrimitive: case ColorPrimitive: case ImagePrimitive: case PointPrimitive: case TextPrimitive: return((PathInfo *) NULL); default: break; } for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++) ; path_info=(PathInfo *) AcquireQuantumMemory((size_t) (3UL*i+1UL), sizeof(*path_info)); if (path_info == (PathInfo *) NULL) return((PathInfo *) NULL); coordinates=0; closed_subpath=MagickFalse; n=0; p.x=(-1.0); p.y=(-1.0); q.x=(-1.0); q.y=(-1.0); start=0; for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++) { code=LineToCode; if (coordinates <= 0) { /* New subpath. */ coordinates=(ssize_t) primitive_info[i].coordinates; p=primitive_info[i].point; start=n; code=MoveToCode; closed_subpath=primitive_info[i].closed_subpath; } coordinates--; if ((code == MoveToCode) || (coordinates <= 0) || (fabs(q.x-primitive_info[i].point.x) >= MagickEpsilon) || (fabs(q.y-primitive_info[i].point.y) >= MagickEpsilon)) { /* Eliminate duplicate points. */ path_info[n].code=code; path_info[n].point=primitive_info[i].point; q=primitive_info[i].point; n++; } if (coordinates > 0) continue; /* next point in current subpath */ if (closed_subpath != MagickFalse) { closed_subpath=MagickFalse; continue; } /* Mark the p point as open if the subpath is not closed. */ path_info[start].code=OpenCode; path_info[n].code=GhostlineCode; path_info[n].point=primitive_info[i].point; n++; path_info[n].code=LineToCode; path_info[n].point=p; n++; } path_info[n].code=EndCode; path_info[n].point.x=0.0; path_info[n].point.y=0.0; if (IsEventLogging() != MagickFalse) LogPathInfo(path_info); path_info=(PathInfo *) ResizeQuantumMemory(path_info,(size_t) (n+1), sizeof(*path_info)); return(path_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e s t r o y D r a w I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyDrawInfo() deallocates memory associated with an DrawInfo structure. % % The format of the DestroyDrawInfo method is: % % DrawInfo *DestroyDrawInfo(DrawInfo *draw_info) % % A description of each parameter follows: % % o draw_info: the draw info. % */ MagickExport DrawInfo *DestroyDrawInfo(DrawInfo *draw_info) { assert(draw_info != (DrawInfo *) NULL); if (draw_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(draw_info->signature == MagickCoreSignature); if (draw_info->primitive != (char *) NULL) draw_info->primitive=DestroyString(draw_info->primitive); if (draw_info->text != (char *) NULL) draw_info->text=DestroyString(draw_info->text); if (draw_info->geometry != (char *) NULL) draw_info->geometry=DestroyString(draw_info->geometry); if (draw_info->fill_pattern != (Image *) NULL) draw_info->fill_pattern=DestroyImage(draw_info->fill_pattern); if (draw_info->stroke_pattern != (Image *) NULL) draw_info->stroke_pattern=DestroyImage(draw_info->stroke_pattern); if (draw_info->font != (char *) NULL) draw_info->font=DestroyString(draw_info->font); if (draw_info->metrics != (char *) NULL) draw_info->metrics=DestroyString(draw_info->metrics); if (draw_info->family != (char *) NULL) draw_info->family=DestroyString(draw_info->family); if (draw_info->encoding != (char *) NULL) draw_info->encoding=DestroyString(draw_info->encoding); if (draw_info->density != (char *) NULL) draw_info->density=DestroyString(draw_info->density); if (draw_info->server_name != (char *) NULL) draw_info->server_name=(char *) RelinquishMagickMemory(draw_info->server_name); if (draw_info->dash_pattern != (double *) NULL) draw_info->dash_pattern=(double *) RelinquishMagickMemory( draw_info->dash_pattern); if (draw_info->gradient.stops != (StopInfo *) NULL) draw_info->gradient.stops=(StopInfo *) RelinquishMagickMemory( draw_info->gradient.stops); if (draw_info->clip_mask != (char *) NULL) draw_info->clip_mask=DestroyString(draw_info->clip_mask); if (draw_info->clipping_mask != (Image *) NULL) draw_info->clipping_mask=DestroyImage(draw_info->clipping_mask); if (draw_info->composite_mask != (Image *) NULL) draw_info->composite_mask=DestroyImage(draw_info->composite_mask); draw_info->signature=(~MagickCoreSignature); draw_info=(DrawInfo *) RelinquishMagickMemory(draw_info); return(draw_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e s t r o y E d g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyEdge() destroys the specified polygon edge. % % The format of the DestroyEdge method is: % % ssize_t DestroyEdge(PolygonInfo *polygon_info,const int edge) % % A description of each parameter follows: % % o polygon_info: Specifies a pointer to an PolygonInfo structure. % % o edge: the polygon edge number to destroy. % */ static size_t DestroyEdge(PolygonInfo *polygon_info, const size_t edge) { assert(edge < polygon_info->number_edges); polygon_info->edges[edge].points=(PointInfo *) RelinquishMagickMemory( polygon_info->edges[edge].points); polygon_info->number_edges--; if (edge < polygon_info->number_edges) (void) memmove(polygon_info->edges+edge,polygon_info->edges+edge+1, (size_t) (polygon_info->number_edges-edge)*sizeof(*polygon_info->edges)); return(polygon_info->number_edges); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e s t r o y P o l y g o n I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyPolygonInfo() destroys the PolygonInfo data structure. % % The format of the DestroyPolygonInfo method is: % % PolygonInfo *DestroyPolygonInfo(PolygonInfo *polygon_info) % % A description of each parameter follows: % % o polygon_info: Specifies a pointer to an PolygonInfo structure. % */ static PolygonInfo *DestroyPolygonInfo(PolygonInfo *polygon_info) { register ssize_t i; for (i=0; i < (ssize_t) polygon_info->number_edges; i++) polygon_info->edges[i].points=(PointInfo *) RelinquishMagickMemory(polygon_info->edges[i].points); polygon_info->edges=(EdgeInfo *) RelinquishMagickMemory(polygon_info->edges); return((PolygonInfo *) RelinquishMagickMemory(polygon_info)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w A f f i n e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawAffineImage() composites the source over the destination image as % dictated by the affine transform. % % The format of the DrawAffineImage method is: % % MagickBooleanType DrawAffineImage(Image *image,const Image *source, % const AffineMatrix *affine,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o source: the source image. % % o affine: the affine transform. % % o exception: return any errors or warnings in this structure. % */ static SegmentInfo AffineEdge(const Image *image,const AffineMatrix *affine, const double y,const SegmentInfo *edge) { double intercept, z; register double x; SegmentInfo inverse_edge; /* Determine left and right edges. */ inverse_edge.x1=edge->x1; inverse_edge.y1=edge->y1; inverse_edge.x2=edge->x2; inverse_edge.y2=edge->y2; z=affine->ry*y+affine->tx; if (affine->sx >= MagickEpsilon) { intercept=(-z/affine->sx); x=intercept; if (x > inverse_edge.x1) inverse_edge.x1=x; intercept=(-z+(double) image->columns)/affine->sx; x=intercept; if (x < inverse_edge.x2) inverse_edge.x2=x; } else if (affine->sx < -MagickEpsilon) { intercept=(-z+(double) image->columns)/affine->sx; x=intercept; if (x > inverse_edge.x1) inverse_edge.x1=x; intercept=(-z/affine->sx); x=intercept; if (x < inverse_edge.x2) inverse_edge.x2=x; } else if ((z < 0.0) || ((size_t) floor(z+0.5) >= image->columns)) { inverse_edge.x2=edge->x1; return(inverse_edge); } /* Determine top and bottom edges. */ z=affine->sy*y+affine->ty; if (affine->rx >= MagickEpsilon) { intercept=(-z/affine->rx); x=intercept; if (x > inverse_edge.x1) inverse_edge.x1=x; intercept=(-z+(double) image->rows)/affine->rx; x=intercept; if (x < inverse_edge.x2) inverse_edge.x2=x; } else if (affine->rx < -MagickEpsilon) { intercept=(-z+(double) image->rows)/affine->rx; x=intercept; if (x > inverse_edge.x1) inverse_edge.x1=x; intercept=(-z/affine->rx); x=intercept; if (x < inverse_edge.x2) inverse_edge.x2=x; } else if ((z < 0.0) || ((size_t) floor(z+0.5) >= image->rows)) { inverse_edge.x2=edge->x2; return(inverse_edge); } return(inverse_edge); } static AffineMatrix InverseAffineMatrix(const AffineMatrix *affine) { AffineMatrix inverse_affine; double determinant; determinant=PerceptibleReciprocal(affine->sx*affine->sy-affine->rx* affine->ry); inverse_affine.sx=determinant*affine->sy; inverse_affine.rx=determinant*(-affine->rx); inverse_affine.ry=determinant*(-affine->ry); inverse_affine.sy=determinant*affine->sx; inverse_affine.tx=(-affine->tx)*inverse_affine.sx-affine->ty* inverse_affine.ry; inverse_affine.ty=(-affine->tx)*inverse_affine.rx-affine->ty* inverse_affine.sy; return(inverse_affine); } MagickExport MagickBooleanType DrawAffineImage(Image *image, const Image *source,const AffineMatrix *affine,ExceptionInfo *exception) { AffineMatrix inverse_affine; CacheView *image_view, *source_view; MagickBooleanType status; PixelInfo zero; PointInfo extent[4], min, max; register ssize_t i; SegmentInfo edge; ssize_t start, stop, y; /* Determine bounding box. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(source != (const Image *) NULL); assert(source->signature == MagickCoreSignature); assert(affine != (AffineMatrix *) NULL); extent[0].x=0.0; extent[0].y=0.0; extent[1].x=(double) source->columns-1.0; extent[1].y=0.0; extent[2].x=(double) source->columns-1.0; extent[2].y=(double) source->rows-1.0; extent[3].x=0.0; extent[3].y=(double) source->rows-1.0; for (i=0; i < 4; i++) { PointInfo point; point=extent[i]; extent[i].x=point.x*affine->sx+point.y*affine->ry+affine->tx; extent[i].y=point.x*affine->rx+point.y*affine->sy+affine->ty; } min=extent[0]; max=extent[0]; for (i=1; i < 4; i++) { if (min.x > extent[i].x) min.x=extent[i].x; if (min.y > extent[i].y) min.y=extent[i].y; if (max.x < extent[i].x) max.x=extent[i].x; if (max.y < extent[i].y) max.y=extent[i].y; } /* Affine transform image. */ if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); status=MagickTrue; edge.x1=MagickMax(min.x,0.0); edge.y1=MagickMax(min.y,0.0); edge.x2=MagickMin(max.x,(double) image->columns-1.0); edge.y2=MagickMin(max.y,(double) image->rows-1.0); inverse_affine=InverseAffineMatrix(affine); GetPixelInfo(image,&zero); start=(ssize_t) ceil(edge.y1-0.5); stop=(ssize_t) floor(edge.y2+0.5); source_view=AcquireVirtualCacheView(source,exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(source,image,stop-start,1) #endif for (y=start; y <= stop; y++) { PixelInfo composite, pixel; PointInfo point; register ssize_t x; register Quantum *magick_restrict q; SegmentInfo inverse_edge; ssize_t x_offset; inverse_edge=AffineEdge(source,&inverse_affine,(double) y,&edge); if (inverse_edge.x2 < inverse_edge.x1) continue; q=GetCacheViewAuthenticPixels(image_view,(ssize_t) ceil(inverse_edge.x1- 0.5),y,(size_t) (floor(inverse_edge.x2+0.5)-ceil(inverse_edge.x1-0.5)+1), 1,exception); if (q == (Quantum *) NULL) continue; pixel=zero; composite=zero; x_offset=0; for (x=(ssize_t) ceil(inverse_edge.x1-0.5); x <= (ssize_t) floor(inverse_edge.x2+0.5); x++) { point.x=(double) x*inverse_affine.sx+y*inverse_affine.ry+ inverse_affine.tx; point.y=(double) x*inverse_affine.rx+y*inverse_affine.sy+ inverse_affine.ty; status=InterpolatePixelInfo(source,source_view,UndefinedInterpolatePixel, point.x,point.y,&pixel,exception); if (status == MagickFalse) break; GetPixelInfoPixel(image,q,&composite); CompositePixelInfoOver(&pixel,pixel.alpha,&composite,composite.alpha, &composite); SetPixelViaPixelInfo(image,&composite,q); x_offset++; q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } source_view=DestroyCacheView(source_view); image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D r a w B o u n d i n g R e c t a n g l e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawBoundingRectangles() draws the bounding rectangles on the image. This % is only useful for developers debugging the rendering algorithm. % % The format of the DrawBoundingRectangles method is: % % MagickBooleanType DrawBoundingRectangles(Image *image, % const DrawInfo *draw_info,PolygonInfo *polygon_info, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % % o polygon_info: Specifies a pointer to a PolygonInfo structure. % % o exception: return any errors or warnings in this structure. % */ static inline double SaneStrokeWidth(const Image *image, const DrawInfo *draw_info) { return(MagickMin((double) draw_info->stroke_width, (2.0*sqrt(2.0)+MagickEpsilon)*MagickMax(image->columns,image->rows))); } static MagickBooleanType DrawBoundingRectangles(Image *image, const DrawInfo *draw_info,const PolygonInfo *polygon_info, ExceptionInfo *exception) { double mid; DrawInfo *clone_info; MagickStatusType status; PointInfo end, resolution, start; PrimitiveInfo primitive_info[6]; register ssize_t i; SegmentInfo bounds; ssize_t coordinates; (void) memset(primitive_info,0,sizeof(primitive_info)); clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info); status=QueryColorCompliance("#000F",AllCompliance,&clone_info->fill, exception); if (status == MagickFalse) { clone_info=DestroyDrawInfo(clone_info); return(MagickFalse); } resolution.x=96.0; resolution.y=96.0; if (clone_info->density != (char *) NULL) { GeometryInfo geometry_info; MagickStatusType flags; flags=ParseGeometry(clone_info->density,&geometry_info); resolution.x=geometry_info.rho; resolution.y=geometry_info.sigma; if ((flags & SigmaValue) == MagickFalse) resolution.y=resolution.x; } mid=(resolution.x/96.0)*ExpandAffine(&clone_info->affine)* SaneStrokeWidth(image,clone_info)/2.0; bounds.x1=0.0; bounds.y1=0.0; bounds.x2=0.0; bounds.y2=0.0; if (polygon_info != (PolygonInfo *) NULL) { bounds=polygon_info->edges[0].bounds; for (i=1; i < (ssize_t) polygon_info->number_edges; i++) { if (polygon_info->edges[i].bounds.x1 < (double) bounds.x1) bounds.x1=polygon_info->edges[i].bounds.x1; if (polygon_info->edges[i].bounds.y1 < (double) bounds.y1) bounds.y1=polygon_info->edges[i].bounds.y1; if (polygon_info->edges[i].bounds.x2 > (double) bounds.x2) bounds.x2=polygon_info->edges[i].bounds.x2; if (polygon_info->edges[i].bounds.y2 > (double) bounds.y2) bounds.y2=polygon_info->edges[i].bounds.y2; } bounds.x1-=mid; bounds.x1=bounds.x1 < 0.0 ? 0.0 : bounds.x1 >= (double) image->columns ? (double) image->columns-1 : bounds.x1; bounds.y1-=mid; bounds.y1=bounds.y1 < 0.0 ? 0.0 : bounds.y1 >= (double) image->rows ? (double) image->rows-1 : bounds.y1; bounds.x2+=mid; bounds.x2=bounds.x2 < 0.0 ? 0.0 : bounds.x2 >= (double) image->columns ? (double) image->columns-1 : bounds.x2; bounds.y2+=mid; bounds.y2=bounds.y2 < 0.0 ? 0.0 : bounds.y2 >= (double) image->rows ? (double) image->rows-1 : bounds.y2; for (i=0; i < (ssize_t) polygon_info->number_edges; i++) { if (polygon_info->edges[i].direction != 0) status=QueryColorCompliance("#f00",AllCompliance,&clone_info->stroke, exception); else status=QueryColorCompliance("#0f0",AllCompliance,&clone_info->stroke, exception); if (status == MagickFalse) break; start.x=(double) (polygon_info->edges[i].bounds.x1-mid); start.y=(double) (polygon_info->edges[i].bounds.y1-mid); end.x=(double) (polygon_info->edges[i].bounds.x2+mid); end.y=(double) (polygon_info->edges[i].bounds.y2+mid); primitive_info[0].primitive=RectanglePrimitive; status&=TraceRectangle(primitive_info,start,end); primitive_info[0].method=ReplaceMethod; coordinates=(ssize_t) primitive_info[0].coordinates; primitive_info[coordinates].primitive=UndefinedPrimitive; status=DrawPrimitive(image,clone_info,primitive_info,exception); if (status == MagickFalse) break; } if (i < (ssize_t) polygon_info->number_edges) { clone_info=DestroyDrawInfo(clone_info); return(status == 0 ? MagickFalse : MagickTrue); } } status=QueryColorCompliance("#00f",AllCompliance,&clone_info->stroke, exception); if (status == MagickFalse) { clone_info=DestroyDrawInfo(clone_info); return(MagickFalse); } start.x=(double) (bounds.x1-mid); start.y=(double) (bounds.y1-mid); end.x=(double) (bounds.x2+mid); end.y=(double) (bounds.y2+mid); primitive_info[0].primitive=RectanglePrimitive; status&=TraceRectangle(primitive_info,start,end); primitive_info[0].method=ReplaceMethod; coordinates=(ssize_t) primitive_info[0].coordinates; primitive_info[coordinates].primitive=UndefinedPrimitive; status=DrawPrimitive(image,clone_info,primitive_info,exception); clone_info=DestroyDrawInfo(clone_info); return(status == 0 ? MagickFalse : MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w C l i p P a t h % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawClipPath() draws the clip path on the image mask. % % The format of the DrawClipPath method is: % % MagickBooleanType DrawClipPath(Image *image,const DrawInfo *draw_info, % const char *id,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % % o id: the clip path id. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType DrawClipPath(Image *image, const DrawInfo *draw_info,const char *id,ExceptionInfo *exception) { const char *clip_path; Image *clipping_mask; MagickBooleanType status; clip_path=GetImageArtifact(image,id); if (clip_path == (const char *) NULL) return(MagickFalse); clipping_mask=DrawClippingMask(image,draw_info,draw_info->clip_mask,clip_path, exception); if (clipping_mask == (Image *) NULL) return(MagickFalse); status=SetImageMask(image,WritePixelMask,clipping_mask,exception); clipping_mask=DestroyImage(clipping_mask); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w C l i p p i n g M a s k % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawClippingMask() draws the clip path and returns it as an image clipping % mask. % % The format of the DrawClippingMask method is: % % Image *DrawClippingMask(Image *image,const DrawInfo *draw_info, % const char *id,const char *clip_path,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % % o id: the clip path id. % % o clip_path: the clip path. % % o exception: return any errors or warnings in this structure. % */ static Image *DrawClippingMask(Image *image,const DrawInfo *draw_info, const char *id,const char *clip_path,ExceptionInfo *exception) { DrawInfo *clone_info; Image *clip_mask, *separate_mask; MagickStatusType status; /* Draw a clip path. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(draw_info != (const DrawInfo *) NULL); clip_mask=AcquireImage((const ImageInfo *) NULL,exception); status=SetImageExtent(clip_mask,image->columns,image->rows,exception); if (status == MagickFalse) return(DestroyImage(clip_mask)); status=SetImageMask(clip_mask,WritePixelMask,(Image *) NULL,exception); status=QueryColorCompliance("#0000",AllCompliance, &clip_mask->background_color,exception); clip_mask->background_color.alpha=(MagickRealType) TransparentAlpha; clip_mask->background_color.alpha_trait=BlendPixelTrait; status=SetImageBackgroundColor(clip_mask,exception); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(),"\nbegin clip-path %s", id); clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info); (void) CloneString(&clone_info->primitive,clip_path); status=QueryColorCompliance("#ffffff",AllCompliance,&clone_info->fill, exception); if (clone_info->clip_mask != (char *) NULL) clone_info->clip_mask=DestroyString(clone_info->clip_mask); status=QueryColorCompliance("#00000000",AllCompliance,&clone_info->stroke, exception); clone_info->stroke_width=0.0; clone_info->alpha=OpaqueAlpha; clone_info->clip_path=MagickTrue; status=RenderMVGContent(clip_mask,clone_info,0,exception); clone_info=DestroyDrawInfo(clone_info); separate_mask=SeparateImage(clip_mask,AlphaChannel,exception); if (separate_mask != (Image *) NULL) { clip_mask=DestroyImage(clip_mask); clip_mask=separate_mask; status=NegateImage(clip_mask,MagickFalse,exception); if (status == MagickFalse) clip_mask=DestroyImage(clip_mask); } if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(),"end clip-path"); return(clip_mask); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w C o m p o s i t e M a s k % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawCompositeMask() draws the mask path and returns it as an image mask. % % The format of the DrawCompositeMask method is: % % Image *DrawCompositeMask(Image *image,const DrawInfo *draw_info, % const char *id,const char *mask_path,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % % o id: the mask path id. % % o mask_path: the mask path. % % o exception: return any errors or warnings in this structure. % */ static Image *DrawCompositeMask(Image *image,const DrawInfo *draw_info, const char *id,const char *mask_path,ExceptionInfo *exception) { Image *composite_mask, *separate_mask; DrawInfo *clone_info; MagickStatusType status; /* Draw a mask path. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(draw_info != (const DrawInfo *) NULL); composite_mask=AcquireImage((const ImageInfo *) NULL,exception); status=SetImageExtent(composite_mask,image->columns,image->rows,exception); if (status == MagickFalse) return(DestroyImage(composite_mask)); status=SetImageMask(composite_mask,CompositePixelMask,(Image *) NULL, exception); status=QueryColorCompliance("#0000",AllCompliance, &composite_mask->background_color,exception); composite_mask->background_color.alpha=(MagickRealType) TransparentAlpha; composite_mask->background_color.alpha_trait=BlendPixelTrait; (void) SetImageBackgroundColor(composite_mask,exception); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(),"\nbegin mask-path %s", id); clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info); (void) CloneString(&clone_info->primitive,mask_path); status=QueryColorCompliance("#ffffff",AllCompliance,&clone_info->fill, exception); status=QueryColorCompliance("#00000000",AllCompliance,&clone_info->stroke, exception); clone_info->stroke_width=0.0; clone_info->alpha=OpaqueAlpha; status=RenderMVGContent(composite_mask,clone_info,0,exception); clone_info=DestroyDrawInfo(clone_info); separate_mask=SeparateImage(composite_mask,AlphaChannel,exception); if (separate_mask != (Image *) NULL) { composite_mask=DestroyImage(composite_mask); composite_mask=separate_mask; status=NegateImage(composite_mask,MagickFalse,exception); if (status == MagickFalse) composite_mask=DestroyImage(composite_mask); } if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(),"end mask-path"); return(composite_mask); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D r a w D a s h P o l y g o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawDashPolygon() draws a dashed polygon (line, rectangle, ellipse) on the % image while respecting the dash offset and dash pattern attributes. % % The format of the DrawDashPolygon method is: % % MagickBooleanType DrawDashPolygon(const DrawInfo *draw_info, % const PrimitiveInfo *primitive_info,Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o draw_info: the draw info. % % o primitive_info: Specifies a pointer to a PrimitiveInfo structure. % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType DrawDashPolygon(const DrawInfo *draw_info, const PrimitiveInfo *primitive_info,Image *image,ExceptionInfo *exception) { double length, maximum_length, offset, scale, total_length; DrawInfo *clone_info; MagickStatusType status; PrimitiveInfo *dash_polygon; register double dx, dy; register ssize_t i; size_t number_vertices; ssize_t j, n; assert(draw_info != (const DrawInfo *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule()," begin draw-dash"); for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++) ; number_vertices=(size_t) i; dash_polygon=(PrimitiveInfo *) AcquireQuantumMemory((size_t) (2UL*number_vertices+32UL),sizeof(*dash_polygon)); if (dash_polygon == (PrimitiveInfo *) NULL) return(MagickFalse); (void) memset(dash_polygon,0,(2UL*number_vertices+32UL)* sizeof(*dash_polygon)); clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info); clone_info->miterlimit=0; dash_polygon[0]=primitive_info[0]; scale=ExpandAffine(&draw_info->affine); length=scale*draw_info->dash_pattern[0]; offset=fabs(draw_info->dash_offset) >= MagickEpsilon ? scale*draw_info->dash_offset : 0.0; j=1; for (n=0; offset > 0.0; j=0) { if (draw_info->dash_pattern[n] <= 0.0) break; length=scale*(draw_info->dash_pattern[n]+(n == 0 ? -0.5 : 0.5)); if (offset > length) { offset-=length; n++; length=scale*draw_info->dash_pattern[n]; continue; } if (offset < length) { length-=offset; offset=0.0; break; } offset=0.0; n++; } status=MagickTrue; maximum_length=0.0; total_length=0.0; for (i=1; (i < (ssize_t) number_vertices) && (length >= 0.0); i++) { dx=primitive_info[i].point.x-primitive_info[i-1].point.x; dy=primitive_info[i].point.y-primitive_info[i-1].point.y; maximum_length=hypot(dx,dy); if (maximum_length > MaxBezierCoordinates) break; if (fabs(length) < MagickEpsilon) { if (fabs(draw_info->dash_pattern[n]) >= MagickEpsilon) n++; if (fabs(draw_info->dash_pattern[n]) < MagickEpsilon) n=0; length=scale*draw_info->dash_pattern[n]; } for (total_length=0.0; (length >= 0.0) && (maximum_length >= (total_length+length)); ) { total_length+=length; if ((n & 0x01) != 0) { dash_polygon[0]=primitive_info[0]; dash_polygon[0].point.x=(double) (primitive_info[i-1].point.x+dx* total_length*PerceptibleReciprocal(maximum_length)); dash_polygon[0].point.y=(double) (primitive_info[i-1].point.y+dy* total_length*PerceptibleReciprocal(maximum_length)); j=1; } else { if ((j+1) > (ssize_t) number_vertices) break; dash_polygon[j]=primitive_info[i-1]; dash_polygon[j].point.x=(double) (primitive_info[i-1].point.x+dx* total_length*PerceptibleReciprocal(maximum_length)); dash_polygon[j].point.y=(double) (primitive_info[i-1].point.y+dy* total_length*PerceptibleReciprocal(maximum_length)); dash_polygon[j].coordinates=1; j++; dash_polygon[0].coordinates=(size_t) j; dash_polygon[j].primitive=UndefinedPrimitive; status&=DrawStrokePolygon(image,clone_info,dash_polygon,exception); } if (fabs(draw_info->dash_pattern[n]) >= MagickEpsilon) n++; if (fabs(draw_info->dash_pattern[n]) < MagickEpsilon) n=0; length=scale*draw_info->dash_pattern[n]; } length-=(maximum_length-total_length); if ((n & 0x01) != 0) continue; dash_polygon[j]=primitive_info[i]; dash_polygon[j].coordinates=1; j++; } if ((total_length < maximum_length) && ((n & 0x01) == 0) && (j > 1)) { dash_polygon[j]=primitive_info[i-1]; dash_polygon[j].point.x+=MagickEpsilon; dash_polygon[j].point.y+=MagickEpsilon; dash_polygon[j].coordinates=1; j++; dash_polygon[0].coordinates=(size_t) j; dash_polygon[j].primitive=UndefinedPrimitive; status&=DrawStrokePolygon(image,clone_info,dash_polygon,exception); } dash_polygon=(PrimitiveInfo *) RelinquishMagickMemory(dash_polygon); clone_info=DestroyDrawInfo(clone_info); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule()," end draw-dash"); return(status != 0 ? MagickTrue : MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w G r a d i e n t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawGradientImage() draws a linear gradient on the image. % % The format of the DrawGradientImage method is: % % MagickBooleanType DrawGradientImage(Image *image, % const DrawInfo *draw_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % % o exception: return any errors or warnings in this structure. % */ static inline double GetStopColorOffset(const GradientInfo *gradient, const ssize_t x,const ssize_t y) { switch (gradient->type) { case UndefinedGradient: case LinearGradient: { double gamma, length, offset, scale; PointInfo p, q; const SegmentInfo *gradient_vector; gradient_vector=(&gradient->gradient_vector); p.x=gradient_vector->x2-gradient_vector->x1; p.y=gradient_vector->y2-gradient_vector->y1; q.x=(double) x-gradient_vector->x1; q.y=(double) y-gradient_vector->y1; length=sqrt(q.x*q.x+q.y*q.y); gamma=sqrt(p.x*p.x+p.y*p.y)*length; gamma=PerceptibleReciprocal(gamma); scale=p.x*q.x+p.y*q.y; offset=gamma*scale*length; return(offset); } case RadialGradient: { PointInfo v; if (gradient->spread == RepeatSpread) { v.x=(double) x-gradient->center.x; v.y=(double) y-gradient->center.y; return(sqrt(v.x*v.x+v.y*v.y)); } v.x=(double) (((x-gradient->center.x)*cos(DegreesToRadians( gradient->angle)))+((y-gradient->center.y)*sin(DegreesToRadians( gradient->angle))))*PerceptibleReciprocal(gradient->radii.x); v.y=(double) (((x-gradient->center.x)*sin(DegreesToRadians( gradient->angle)))-((y-gradient->center.y)*cos(DegreesToRadians( gradient->angle))))*PerceptibleReciprocal(gradient->radii.y); return(sqrt(v.x*v.x+v.y*v.y)); } } return(0.0); } static int StopInfoCompare(const void *x,const void *y) { StopInfo *stop_1, *stop_2; stop_1=(StopInfo *) x; stop_2=(StopInfo *) y; if (stop_1->offset > stop_2->offset) return(1); if (fabs(stop_1->offset-stop_2->offset) <= MagickEpsilon) return(0); return(-1); } MagickExport MagickBooleanType DrawGradientImage(Image *image, const DrawInfo *draw_info,ExceptionInfo *exception) { CacheView *image_view; const GradientInfo *gradient; const SegmentInfo *gradient_vector; double length; MagickBooleanType status; PixelInfo zero; PointInfo point; RectangleInfo bounding_box; ssize_t y; /* Draw linear or radial gradient on image. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(draw_info != (const DrawInfo *) NULL); gradient=(&draw_info->gradient); qsort(gradient->stops,gradient->number_stops,sizeof(StopInfo), StopInfoCompare); gradient_vector=(&gradient->gradient_vector); point.x=gradient_vector->x2-gradient_vector->x1; point.y=gradient_vector->y2-gradient_vector->y1; length=sqrt(point.x*point.x+point.y*point.y); bounding_box=gradient->bounding_box; status=MagickTrue; GetPixelInfo(image,&zero); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,bounding_box.height-bounding_box.y,1) #endif for (y=bounding_box.y; y < (ssize_t) bounding_box.height; y++) { PixelInfo composite, pixel; double alpha, offset; register Quantum *magick_restrict q; register ssize_t i, x; ssize_t j; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } pixel=zero; composite=zero; offset=GetStopColorOffset(gradient,0,y); if (gradient->type != RadialGradient) offset*=PerceptibleReciprocal(length); for (x=bounding_box.x; x < (ssize_t) bounding_box.width; x++) { GetPixelInfoPixel(image,q,&pixel); switch (gradient->spread) { case UndefinedSpread: case PadSpread: { if ((x != (ssize_t) ceil(gradient_vector->x1-0.5)) || (y != (ssize_t) ceil(gradient_vector->y1-0.5))) { offset=GetStopColorOffset(gradient,x,y); if (gradient->type != RadialGradient) offset*=PerceptibleReciprocal(length); } for (i=0; i < (ssize_t) gradient->number_stops; i++) if (offset < gradient->stops[i].offset) break; if ((offset < 0.0) || (i == 0)) composite=gradient->stops[0].color; else if ((offset > 1.0) || (i == (ssize_t) gradient->number_stops)) composite=gradient->stops[gradient->number_stops-1].color; else { j=i; i--; alpha=(offset-gradient->stops[i].offset)/ (gradient->stops[j].offset-gradient->stops[i].offset); CompositePixelInfoBlend(&gradient->stops[i].color,1.0-alpha, &gradient->stops[j].color,alpha,&composite); } break; } case ReflectSpread: { if ((x != (ssize_t) ceil(gradient_vector->x1-0.5)) || (y != (ssize_t) ceil(gradient_vector->y1-0.5))) { offset=GetStopColorOffset(gradient,x,y); if (gradient->type != RadialGradient) offset*=PerceptibleReciprocal(length); } if (offset < 0.0) offset=(-offset); if ((ssize_t) fmod(offset,2.0) == 0) offset=fmod(offset,1.0); else offset=1.0-fmod(offset,1.0); for (i=0; i < (ssize_t) gradient->number_stops; i++) if (offset < gradient->stops[i].offset) break; if (i == 0) composite=gradient->stops[0].color; else if (i == (ssize_t) gradient->number_stops) composite=gradient->stops[gradient->number_stops-1].color; else { j=i; i--; alpha=(offset-gradient->stops[i].offset)/ (gradient->stops[j].offset-gradient->stops[i].offset); CompositePixelInfoBlend(&gradient->stops[i].color,1.0-alpha, &gradient->stops[j].color,alpha,&composite); } break; } case RepeatSpread: { MagickBooleanType antialias; double repeat; antialias=MagickFalse; repeat=0.0; if ((x != (ssize_t) ceil(gradient_vector->x1-0.5)) || (y != (ssize_t) ceil(gradient_vector->y1-0.5))) { offset=GetStopColorOffset(gradient,x,y); if (gradient->type == LinearGradient) { repeat=fmod(offset,length); if (repeat < 0.0) repeat=length-fmod(-repeat,length); else repeat=fmod(offset,length); antialias=(repeat < length) && ((repeat+1.0) > length) ? MagickTrue : MagickFalse; offset=PerceptibleReciprocal(length)*repeat; } else { repeat=fmod(offset,gradient->radius); if (repeat < 0.0) repeat=gradient->radius-fmod(-repeat,gradient->radius); else repeat=fmod(offset,gradient->radius); antialias=repeat+1.0 > gradient->radius ? MagickTrue : MagickFalse; offset=repeat/gradient->radius; } } for (i=0; i < (ssize_t) gradient->number_stops; i++) if (offset < gradient->stops[i].offset) break; if (i == 0) composite=gradient->stops[0].color; else if (i == (ssize_t) gradient->number_stops) composite=gradient->stops[gradient->number_stops-1].color; else { j=i; i--; alpha=(offset-gradient->stops[i].offset)/ (gradient->stops[j].offset-gradient->stops[i].offset); if (antialias != MagickFalse) { if (gradient->type == LinearGradient) alpha=length-repeat; else alpha=gradient->radius-repeat; i=0; j=(ssize_t) gradient->number_stops-1L; } CompositePixelInfoBlend(&gradient->stops[i].color,1.0-alpha, &gradient->stops[j].color,alpha,&composite); } break; } } CompositePixelInfoOver(&composite,composite.alpha,&pixel,pixel.alpha, &pixel); SetPixelViaPixelInfo(image,&pixel,q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawImage() draws a graphic primitive on your image. The primitive % may be represented as a string or filename. Precede the filename with an % "at" sign (@) and the contents of the file are drawn on the image. You % can affect how text is drawn by setting one or more members of the draw % info structure. % % The format of the DrawImage method is: % % MagickBooleanType DrawImage(Image *image,const DrawInfo *draw_info, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType CheckPrimitiveExtent(MVGInfo *mvg_info, const size_t pad) { double extent; size_t quantum; /* Check if there is enough storage for drawing pimitives. */ extent=(double) mvg_info->offset+pad+PrimitiveExtentPad; quantum=sizeof(**mvg_info->primitive_info); if (((extent*quantum) < (double) SSIZE_MAX) && ((extent*quantum) < (double) GetMaxMemoryRequest())) { if (extent <= (double) *mvg_info->extent) return(MagickTrue); *mvg_info->primitive_info=(PrimitiveInfo *) ResizeQuantumMemory( *mvg_info->primitive_info,(size_t) extent,quantum); if (*mvg_info->primitive_info != (PrimitiveInfo *) NULL) { register ssize_t i; *mvg_info->extent=(size_t) extent; for (i=mvg_info->offset+1; i < (ssize_t) extent; i++) (*mvg_info->primitive_info)[i].primitive=UndefinedPrimitive; return(MagickTrue); } } /* Reallocation failed, allocate a primitive to facilitate unwinding. */ (void) ThrowMagickException(mvg_info->exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",""); if (*mvg_info->primitive_info != (PrimitiveInfo *) NULL) *mvg_info->primitive_info=(PrimitiveInfo *) RelinquishMagickMemory( *mvg_info->primitive_info); *mvg_info->primitive_info=(PrimitiveInfo *) AcquireCriticalMemory( PrimitiveExtentPad*quantum); (void) memset(*mvg_info->primitive_info,0,PrimitiveExtentPad*quantum); *mvg_info->extent=1; return(MagickFalse); } MagickExport int MVGMacroCompare(const void *target,const void *source) { const char *p, *q; p=(const char *) target; q=(const char *) source; return(strcmp(p,q)); } static SplayTreeInfo *GetMVGMacros(const char *primitive) { char *macro, *token; const char *q; size_t extent; SplayTreeInfo *macros; /* Scan graphic primitives for definitions and classes. */ if (primitive == (const char *) NULL) return((SplayTreeInfo *) NULL); macros=NewSplayTree(MVGMacroCompare,RelinquishMagickMemory, RelinquishMagickMemory); macro=AcquireString(primitive); token=AcquireString(primitive); extent=strlen(token)+MagickPathExtent; for (q=primitive; *q != '\0'; ) { GetNextToken(q,&q,extent,token); if (*token == '\0') break; if (LocaleCompare("push",token) == 0) { register const char *end, *start; GetNextToken(q,&q,extent,token); if (*q == '"') { char name[MagickPathExtent]; const char *p; ssize_t n; /* Named macro (e.g. push graphic-context "wheel"). */ GetNextToken(q,&q,extent,token); start=q; end=q; (void) CopyMagickString(name,token,MagickPathExtent); n=1; for (p=q; *p != '\0'; ) { GetNextToken(p,&p,extent,token); if (*token == '\0') break; if (LocaleCompare(token,"pop") == 0) { end=p-strlen(token)-1; n--; } if (LocaleCompare(token,"push") == 0) n++; if ((n == 0) && (end > start)) { /* Extract macro. */ GetNextToken(p,&p,extent,token); (void) CopyMagickString(macro,start,(size_t) (end-start)); (void) AddValueToSplayTree(macros,ConstantString(name), ConstantString(macro)); break; } } } } } token=DestroyString(token); macro=DestroyString(macro); return(macros); } static inline MagickBooleanType IsPoint(const char *point) { char *p; double value; value=StringToDouble(point,&p); return((fabs(value) < MagickEpsilon) && (p == point) ? MagickFalse : MagickTrue); } static inline MagickBooleanType TracePoint(PrimitiveInfo *primitive_info, const PointInfo point) { primitive_info->coordinates=1; primitive_info->closed_subpath=MagickFalse; primitive_info->point=point; return(MagickTrue); } static MagickBooleanType RenderMVGContent(Image *image, const DrawInfo *draw_info,const size_t depth,ExceptionInfo *exception) { #define RenderImageTag "Render/Image" AffineMatrix affine, current; char keyword[MagickPathExtent], geometry[MagickPathExtent], *next_token, pattern[MagickPathExtent], *primitive, *token; const char *q; double angle, coordinates, cursor, factor, primitive_extent; DrawInfo *clone_info, **graphic_context; MagickBooleanType proceed; MagickStatusType status; MVGInfo mvg_info; PointInfo point; PrimitiveInfo *primitive_info; PrimitiveType primitive_type; register const char *p; register ssize_t i, x; SegmentInfo bounds; size_t extent, number_points, number_stops; SplayTreeInfo *macros; ssize_t defsDepth, j, k, n, symbolDepth; StopInfo *stops; TypeMetric metrics; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(draw_info != (DrawInfo *) NULL); assert(draw_info->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); if (depth > MagickMaxRecursionDepth) ThrowBinaryException(DrawError,"VectorGraphicsNestedTooDeeply", image->filename); if ((draw_info->primitive == (char *) NULL) || (*draw_info->primitive == '\0')) return(MagickFalse); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(),"begin draw-image"); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); if (image->alpha_trait == UndefinedPixelTrait) { status=SetImageAlphaChannel(image,OpaqueAlphaChannel,exception); if (status == MagickFalse) return(MagickFalse); } if ((*draw_info->primitive == '@') && (strlen(draw_info->primitive) > 1) && (*(draw_info->primitive+1) != '-') && (depth == 1)) primitive=FileToString(draw_info->primitive+1,~0UL,exception); else primitive=AcquireString(draw_info->primitive); if (primitive == (char *) NULL) return(MagickFalse); primitive_extent=(double) strlen(primitive); (void) SetImageArtifact(image,"mvg:vector-graphics",primitive); n=0; number_stops=0; stops=(StopInfo *) NULL; /* Allocate primitive info memory. */ graphic_context=(DrawInfo **) AcquireMagickMemory(sizeof(*graphic_context)); if (graphic_context == (DrawInfo **) NULL) { primitive=DestroyString(primitive); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } number_points=PrimitiveExtentPad; primitive_info=(PrimitiveInfo *) AcquireQuantumMemory((size_t) number_points, sizeof(*primitive_info)); if (primitive_info == (PrimitiveInfo *) NULL) { primitive=DestroyString(primitive); for ( ; n >= 0; n--) graphic_context[n]=DestroyDrawInfo(graphic_context[n]); graphic_context=(DrawInfo **) RelinquishMagickMemory(graphic_context); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } (void) memset(primitive_info,0,(size_t) number_points* sizeof(*primitive_info)); (void) memset(&mvg_info,0,sizeof(mvg_info)); mvg_info.primitive_info=(&primitive_info); mvg_info.extent=(&number_points); mvg_info.exception=exception; graphic_context[n]=CloneDrawInfo((ImageInfo *) NULL,draw_info); graphic_context[n]->viewbox=image->page; if ((image->page.width == 0) || (image->page.height == 0)) { graphic_context[n]->viewbox.width=image->columns; graphic_context[n]->viewbox.height=image->rows; } token=AcquireString(primitive); extent=strlen(token)+MagickPathExtent; defsDepth=0; symbolDepth=0; cursor=0.0; macros=GetMVGMacros(primitive); status=MagickTrue; for (q=primitive; *q != '\0'; ) { /* Interpret graphic primitive. */ GetNextToken(q,&q,MagickPathExtent,keyword); if (*keyword == '\0') break; if (*keyword == '#') { /* Comment. */ while ((*q != '\n') && (*q != '\0')) q++; continue; } p=q-strlen(keyword)-1; primitive_type=UndefinedPrimitive; current=graphic_context[n]->affine; GetAffineMatrix(&affine); switch (*keyword) { case ';': break; case 'a': case 'A': { if (LocaleCompare("affine",keyword) == 0) { GetNextToken(q,&q,extent,token); affine.sx=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); GetNextToken(q,&q,extent,token); if (*token == ',') GetNextToken(q,&q,extent,token); affine.rx=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); GetNextToken(q,&q,extent,token); if (*token == ',') GetNextToken(q,&q,extent,token); affine.ry=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); GetNextToken(q,&q,extent,token); if (*token == ',') GetNextToken(q,&q,extent,token); affine.sy=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); GetNextToken(q,&q,extent,token); if (*token == ',') GetNextToken(q,&q,extent,token); affine.tx=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); GetNextToken(q,&q,extent,token); if (*token == ',') GetNextToken(q,&q,extent,token); affine.ty=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); break; } if (LocaleCompare("alpha",keyword) == 0) { primitive_type=AlphaPrimitive; break; } if (LocaleCompare("arc",keyword) == 0) { primitive_type=ArcPrimitive; break; } status=MagickFalse; break; } case 'b': case 'B': { if (LocaleCompare("bezier",keyword) == 0) { primitive_type=BezierPrimitive; break; } if (LocaleCompare("border-color",keyword) == 0) { GetNextToken(q,&q,extent,token); status&=QueryColorCompliance(token,AllCompliance, &graphic_context[n]->border_color,exception); break; } status=MagickFalse; break; } case 'c': case 'C': { if (LocaleCompare("class",keyword) == 0) { const char *mvg_class; GetNextToken(q,&q,extent,token); if (*token == '\0') { status=MagickFalse; break; } mvg_class=(const char *) GetValueFromSplayTree(macros,token); if (mvg_class != (const char *) NULL) { char *elements; ssize_t offset; /* Inject class elements in stream. */ offset=(ssize_t) (p-primitive); elements=AcquireString(primitive); elements[offset]='\0'; (void) ConcatenateString(&elements,mvg_class); (void) ConcatenateString(&elements,"\n"); (void) ConcatenateString(&elements,q); primitive=DestroyString(primitive); primitive=elements; q=primitive+offset; } break; } if (LocaleCompare("clip-path",keyword) == 0) { const char *clip_path; /* Take a node from within the MVG document, and duplicate it here. */ GetNextToken(q,&q,extent,token); if (*token == '\0') { status=MagickFalse; break; } (void) CloneString(&graphic_context[n]->clip_mask,token); clip_path=(const char *) GetValueFromSplayTree(macros,token); if (clip_path != (const char *) NULL) { if (graphic_context[n]->clipping_mask != (Image *) NULL) graphic_context[n]->clipping_mask= DestroyImage(graphic_context[n]->clipping_mask); graphic_context[n]->clipping_mask=DrawClippingMask(image, graphic_context[n],token,clip_path,exception); if (draw_info->compliance != SVGCompliance) { const char *clip_path; clip_path=(const char *) GetValueFromSplayTree(macros, graphic_context[n]->clip_mask); if (clip_path != (const char *) NULL) (void) SetImageArtifact(image, graphic_context[n]->clip_mask,clip_path); status&=DrawClipPath(image,graphic_context[n], graphic_context[n]->clip_mask,exception); } } break; } if (LocaleCompare("clip-rule",keyword) == 0) { ssize_t fill_rule; GetNextToken(q,&q,extent,token); fill_rule=ParseCommandOption(MagickFillRuleOptions,MagickFalse, token); if (fill_rule == -1) { status=MagickFalse; break; } graphic_context[n]->fill_rule=(FillRule) fill_rule; break; } if (LocaleCompare("clip-units",keyword) == 0) { ssize_t clip_units; GetNextToken(q,&q,extent,token); clip_units=ParseCommandOption(MagickClipPathOptions,MagickFalse, token); if (clip_units == -1) { status=MagickFalse; break; } graphic_context[n]->clip_units=(ClipPathUnits) clip_units; if (clip_units == ObjectBoundingBox) { GetAffineMatrix(&current); affine.sx=draw_info->bounds.x2; affine.sy=draw_info->bounds.y2; affine.tx=draw_info->bounds.x1; affine.ty=draw_info->bounds.y1; break; } break; } if (LocaleCompare("circle",keyword) == 0) { primitive_type=CirclePrimitive; break; } if (LocaleCompare("color",keyword) == 0) { primitive_type=ColorPrimitive; break; } if (LocaleCompare("compliance",keyword) == 0) { /* MVG compliance associates a clipping mask with an image; SVG compliance associates a clipping mask with a graphics context. */ GetNextToken(q,&q,extent,token); graphic_context[n]->compliance=(ComplianceType) ParseCommandOption( MagickComplianceOptions,MagickFalse,token); break; } status=MagickFalse; break; } case 'd': case 'D': { if (LocaleCompare("decorate",keyword) == 0) { ssize_t decorate; GetNextToken(q,&q,extent,token); decorate=ParseCommandOption(MagickDecorateOptions,MagickFalse, token); if (decorate == -1) { status=MagickFalse; break; } graphic_context[n]->decorate=(DecorationType) decorate; break; } if (LocaleCompare("density",keyword) == 0) { GetNextToken(q,&q,extent,token); (void) CloneString(&graphic_context[n]->density,token); break; } if (LocaleCompare("direction",keyword) == 0) { ssize_t direction; GetNextToken(q,&q,extent,token); direction=ParseCommandOption(MagickDirectionOptions,MagickFalse, token); if (direction == -1) status=MagickFalse; else graphic_context[n]->direction=(DirectionType) direction; break; } status=MagickFalse; break; } case 'e': case 'E': { if (LocaleCompare("ellipse",keyword) == 0) { primitive_type=EllipsePrimitive; break; } if (LocaleCompare("encoding",keyword) == 0) { GetNextToken(q,&q,extent,token); (void) CloneString(&graphic_context[n]->encoding,token); break; } status=MagickFalse; break; } case 'f': case 'F': { if (LocaleCompare("fill",keyword) == 0) { GetNextToken(q,&q,extent,token); if (graphic_context[n]->clip_path != MagickFalse) break; (void) FormatLocaleString(pattern,MagickPathExtent,"%s",token); if (GetImageArtifact(image,pattern) != (const char *) NULL) (void) DrawPatternPath(image,draw_info,token, &graphic_context[n]->fill_pattern,exception); else { status&=QueryColorCompliance(token,AllCompliance, &graphic_context[n]->fill,exception); if (graphic_context[n]->fill_alpha != OpaqueAlpha) graphic_context[n]->fill.alpha=graphic_context[n]->fill_alpha; } break; } if (LocaleCompare("fill-opacity",keyword) == 0) { double opacity; GetNextToken(q,&q,extent,token); if (graphic_context[n]->clip_path != MagickFalse) break; factor=strchr(token,'%') != (char *) NULL ? 0.01 : 1.0; opacity=MagickMin(MagickMax(factor* StringToDouble(token,&next_token),0.0),1.0); if (token == next_token) ThrowPointExpectedException(token,exception); graphic_context[n]->fill_alpha*=opacity; if (graphic_context[n]->fill.alpha != TransparentAlpha) graphic_context[n]->fill.alpha=graphic_context[n]->fill_alpha; else graphic_context[n]->fill.alpha=(MagickRealType) ClampToQuantum(QuantumRange*(1.0-opacity)); break; } if (LocaleCompare("fill-rule",keyword) == 0) { ssize_t fill_rule; GetNextToken(q,&q,extent,token); fill_rule=ParseCommandOption(MagickFillRuleOptions,MagickFalse, token); if (fill_rule == -1) { status=MagickFalse; break; } graphic_context[n]->fill_rule=(FillRule) fill_rule; break; } if (LocaleCompare("font",keyword) == 0) { GetNextToken(q,&q,extent,token); (void) CloneString(&graphic_context[n]->font,token); if (LocaleCompare("none",token) == 0) graphic_context[n]->font=(char *) RelinquishMagickMemory( graphic_context[n]->font); break; } if (LocaleCompare("font-family",keyword) == 0) { GetNextToken(q,&q,extent,token); (void) CloneString(&graphic_context[n]->family,token); break; } if (LocaleCompare("font-size",keyword) == 0) { GetNextToken(q,&q,extent,token); graphic_context[n]->pointsize=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); break; } if (LocaleCompare("font-stretch",keyword) == 0) { ssize_t stretch; GetNextToken(q,&q,extent,token); stretch=ParseCommandOption(MagickStretchOptions,MagickFalse,token); if (stretch == -1) { status=MagickFalse; break; } graphic_context[n]->stretch=(StretchType) stretch; break; } if (LocaleCompare("font-style",keyword) == 0) { ssize_t style; GetNextToken(q,&q,extent,token); style=ParseCommandOption(MagickStyleOptions,MagickFalse,token); if (style == -1) { status=MagickFalse; break; } graphic_context[n]->style=(StyleType) style; break; } if (LocaleCompare("font-weight",keyword) == 0) { ssize_t weight; GetNextToken(q,&q,extent,token); weight=ParseCommandOption(MagickWeightOptions,MagickFalse,token); if (weight == -1) weight=(ssize_t) StringToUnsignedLong(token); graphic_context[n]->weight=(size_t) weight; break; } status=MagickFalse; break; } case 'g': case 'G': { if (LocaleCompare("gradient-units",keyword) == 0) { GetNextToken(q,&q,extent,token); break; } if (LocaleCompare("gravity",keyword) == 0) { ssize_t gravity; GetNextToken(q,&q,extent,token); gravity=ParseCommandOption(MagickGravityOptions,MagickFalse,token); if (gravity == -1) { status=MagickFalse; break; } graphic_context[n]->gravity=(GravityType) gravity; break; } status=MagickFalse; break; } case 'i': case 'I': { if (LocaleCompare("image",keyword) == 0) { ssize_t compose; primitive_type=ImagePrimitive; GetNextToken(q,&q,extent,token); compose=ParseCommandOption(MagickComposeOptions,MagickFalse,token); if (compose == -1) { status=MagickFalse; break; } graphic_context[n]->compose=(CompositeOperator) compose; break; } if (LocaleCompare("interline-spacing",keyword) == 0) { GetNextToken(q,&q,extent,token); graphic_context[n]->interline_spacing=StringToDouble(token, &next_token); if (token == next_token) ThrowPointExpectedException(token,exception); break; } if (LocaleCompare("interword-spacing",keyword) == 0) { GetNextToken(q,&q,extent,token); graphic_context[n]->interword_spacing=StringToDouble(token, &next_token); if (token == next_token) ThrowPointExpectedException(token,exception); break; } status=MagickFalse; break; } case 'k': case 'K': { if (LocaleCompare("kerning",keyword) == 0) { GetNextToken(q,&q,extent,token); graphic_context[n]->kerning=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); break; } status=MagickFalse; break; } case 'l': case 'L': { if (LocaleCompare("letter-spacing",keyword) == 0) { GetNextToken(q,&q,extent,token); clone_info=CloneDrawInfo((ImageInfo *) NULL,graphic_context[n]); clone_info->text=AcquireString(" "); status&=GetTypeMetrics(image,clone_info,&metrics,exception); graphic_context[n]->kerning=metrics.width* StringToDouble(token,&next_token); clone_info=DestroyDrawInfo(clone_info); if (token == next_token) ThrowPointExpectedException(token,exception); break; } if (LocaleCompare("line",keyword) == 0) { primitive_type=LinePrimitive; break; } status=MagickFalse; break; } case 'm': case 'M': { if (LocaleCompare("mask",keyword) == 0) { const char *mask_path; /* Take a node from within the MVG document, and duplicate it here. */ GetNextToken(q,&q,extent,token); mask_path=(const char *) GetValueFromSplayTree(macros,token); if (mask_path != (const char *) NULL) { if (graphic_context[n]->composite_mask != (Image *) NULL) graphic_context[n]->composite_mask= DestroyImage(graphic_context[n]->composite_mask); graphic_context[n]->composite_mask=DrawCompositeMask(image, graphic_context[n],token,mask_path,exception); if (draw_info->compliance != SVGCompliance) status=SetImageMask(image,CompositePixelMask, graphic_context[n]->composite_mask,exception); } break; } break; } case 'o': case 'O': { if (LocaleCompare("offset",keyword) == 0) { GetNextToken(q,&q,extent,token); break; } if (LocaleCompare("opacity",keyword) == 0) { double opacity; GetNextToken(q,&q,extent,token); if (graphic_context[n]->clip_path != MagickFalse) break; factor=strchr(token,'%') != (char *) NULL ? 0.01 : 1.0; opacity=MagickMin(MagickMax(factor* StringToDouble(token,&next_token),0.0),1.0); if (token == next_token) ThrowPointExpectedException(token,exception); graphic_context[n]->fill_alpha*=opacity; graphic_context[n]->stroke_alpha*=opacity; break; } status=MagickFalse; break; } case 'p': case 'P': { if (LocaleCompare("path",keyword) == 0) { primitive_type=PathPrimitive; break; } if (LocaleCompare("point",keyword) == 0) { primitive_type=PointPrimitive; break; } if (LocaleCompare("polyline",keyword) == 0) { primitive_type=PolylinePrimitive; break; } if (LocaleCompare("polygon",keyword) == 0) { primitive_type=PolygonPrimitive; break; } if (LocaleCompare("pop",keyword) == 0) { GetNextToken(q,&q,extent,token); if (LocaleCompare("class",token) == 0) break; if (LocaleCompare("clip-path",token) == 0) break; if (LocaleCompare("defs",token) == 0) { defsDepth--; graphic_context[n]->render=defsDepth > 0 ? MagickFalse : MagickTrue; break; } if (LocaleCompare("gradient",token) == 0) break; if (LocaleCompare("graphic-context",token) == 0) { if (n <= 0) { (void) ThrowMagickException(exception,GetMagickModule(), DrawError,"UnbalancedGraphicContextPushPop","`%s'",token); status=MagickFalse; n=0; break; } if ((graphic_context[n]->clip_mask != (char *) NULL) && (draw_info->compliance != SVGCompliance)) if (LocaleCompare(graphic_context[n]->clip_mask, graphic_context[n-1]->clip_mask) != 0) status=SetImageMask(image,WritePixelMask,(Image *) NULL, exception); graphic_context[n]=DestroyDrawInfo(graphic_context[n]); n--; break; } if (LocaleCompare("mask",token) == 0) break; if (LocaleCompare("pattern",token) == 0) break; if (LocaleCompare("symbol",token) == 0) { symbolDepth--; graphic_context[n]->render=symbolDepth > 0 ? MagickFalse : MagickTrue; break; } status=MagickFalse; break; } if (LocaleCompare("push",keyword) == 0) { GetNextToken(q,&q,extent,token); if (LocaleCompare("class",token) == 0) { /* Class context. */ for (p=q; *q != '\0'; ) { GetNextToken(q,&q,extent,token); if (LocaleCompare(token,"pop") != 0) continue; GetNextToken(q,(const char **) NULL,extent,token); if (LocaleCompare(token,"class") != 0) continue; break; } GetNextToken(q,&q,extent,token); break; } if (LocaleCompare("clip-path",token) == 0) { GetNextToken(q,&q,extent,token); for (p=q; *q != '\0'; ) { GetNextToken(q,&q,extent,token); if (LocaleCompare(token,"pop") != 0) continue; GetNextToken(q,(const char **) NULL,extent,token); if (LocaleCompare(token,"clip-path") != 0) continue; break; } if ((q == (char *) NULL) || (p == (char *) NULL) || ((q-4) < p)) { status=MagickFalse; break; } GetNextToken(q,&q,extent,token); break; } if (LocaleCompare("defs",token) == 0) { defsDepth++; graphic_context[n]->render=defsDepth > 0 ? MagickFalse : MagickTrue; break; } if (LocaleCompare("gradient",token) == 0) { char key[2*MagickPathExtent], name[MagickPathExtent], type[MagickPathExtent]; SegmentInfo segment; GetNextToken(q,&q,extent,token); (void) CopyMagickString(name,token,MagickPathExtent); GetNextToken(q,&q,extent,token); (void) CopyMagickString(type,token,MagickPathExtent); GetNextToken(q,&q,extent,token); segment.x1=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); GetNextToken(q,&q,extent,token); if (*token == ',') GetNextToken(q,&q,extent,token); segment.y1=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); GetNextToken(q,&q,extent,token); if (*token == ',') GetNextToken(q,&q,extent,token); segment.x2=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); GetNextToken(q,&q,extent,token); if (*token == ',') GetNextToken(q,&q,extent,token); segment.y2=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); if (LocaleCompare(type,"radial") == 0) { GetNextToken(q,&q,extent,token); if (*token == ',') GetNextToken(q,&q,extent,token); } for (p=q; *q != '\0'; ) { GetNextToken(q,&q,extent,token); if (LocaleCompare(token,"pop") != 0) continue; GetNextToken(q,(const char **) NULL,extent,token); if (LocaleCompare(token,"gradient") != 0) continue; break; } if ((q == (char *) NULL) || (p == (char *) NULL) || ((q-4) < p)) { status=MagickFalse; break; } (void) CopyMagickString(token,p,(size_t) (q-p-4+1)); bounds.x1=graphic_context[n]->affine.sx*segment.x1+ graphic_context[n]->affine.ry*segment.y1+ graphic_context[n]->affine.tx; bounds.y1=graphic_context[n]->affine.rx*segment.x1+ graphic_context[n]->affine.sy*segment.y1+ graphic_context[n]->affine.ty; bounds.x2=graphic_context[n]->affine.sx*segment.x2+ graphic_context[n]->affine.ry*segment.y2+ graphic_context[n]->affine.tx; bounds.y2=graphic_context[n]->affine.rx*segment.x2+ graphic_context[n]->affine.sy*segment.y2+ graphic_context[n]->affine.ty; (void) FormatLocaleString(key,MagickPathExtent,"%s",name); (void) SetImageArtifact(image,key,token); (void) FormatLocaleString(key,MagickPathExtent,"%s-type",name); (void) SetImageArtifact(image,key,type); (void) FormatLocaleString(key,MagickPathExtent,"%s-geometry", name); (void) FormatLocaleString(geometry,MagickPathExtent, "%gx%g%+.15g%+.15g", MagickMax(fabs(bounds.x2-bounds.x1+1.0),1.0), MagickMax(fabs(bounds.y2-bounds.y1+1.0),1.0), bounds.x1,bounds.y1); (void) SetImageArtifact(image,key,geometry); GetNextToken(q,&q,extent,token); break; } if (LocaleCompare("graphic-context",token) == 0) { n++; graphic_context=(DrawInfo **) ResizeQuantumMemory( graphic_context,(size_t) (n+1),sizeof(*graphic_context)); if (graphic_context == (DrawInfo **) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'", image->filename); break; } graphic_context[n]=CloneDrawInfo((ImageInfo *) NULL, graphic_context[n-1]); if (*q == '"') GetNextToken(q,&q,extent,token); break; } if (LocaleCompare("mask",token) == 0) { GetNextToken(q,&q,extent,token); break; } if (LocaleCompare("pattern",token) == 0) { char key[2*MagickPathExtent], name[MagickPathExtent]; RectangleInfo bounds; GetNextToken(q,&q,extent,token); (void) CopyMagickString(name,token,MagickPathExtent); GetNextToken(q,&q,extent,token); bounds.x=(ssize_t) ceil(StringToDouble(token,&next_token)-0.5); if (token == next_token) ThrowPointExpectedException(token,exception); GetNextToken(q,&q,extent,token); if (*token == ',') GetNextToken(q,&q,extent,token); bounds.y=(ssize_t) ceil(StringToDouble(token,&next_token)-0.5); if (token == next_token) ThrowPointExpectedException(token,exception); GetNextToken(q,&q,extent,token); if (*token == ',') GetNextToken(q,&q,extent,token); bounds.width=(size_t) floor(StringToDouble(token,&next_token)+ 0.5); if (token == next_token) ThrowPointExpectedException(token,exception); GetNextToken(q,&q,extent,token); if (*token == ',') GetNextToken(q,&q,extent,token); bounds.height=(size_t) floor(StringToDouble(token,&next_token)+ 0.5); if (token == next_token) ThrowPointExpectedException(token,exception); for (p=q; *q != '\0'; ) { GetNextToken(q,&q,extent,token); if (LocaleCompare(token,"pop") != 0) continue; GetNextToken(q,(const char **) NULL,extent,token); if (LocaleCompare(token,"pattern") != 0) continue; break; } if ((q == (char *) NULL) || (p == (char *) NULL) || ((q-4) < p)) { status=MagickFalse; break; } (void) CopyMagickString(token,p,(size_t) (q-p-4+1)); (void) FormatLocaleString(key,MagickPathExtent,"%s",name); (void) SetImageArtifact(image,key,token); (void) FormatLocaleString(key,MagickPathExtent,"%s-geometry", name); (void) FormatLocaleString(geometry,MagickPathExtent, "%.20gx%.20g%+.20g%+.20g",(double) bounds.width,(double) bounds.height,(double) bounds.x,(double) bounds.y); (void) SetImageArtifact(image,key,geometry); GetNextToken(q,&q,extent,token); break; } if (LocaleCompare("symbol",token) == 0) { symbolDepth++; graphic_context[n]->render=symbolDepth > 0 ? MagickFalse : MagickTrue; break; } status=MagickFalse; break; } status=MagickFalse; break; } case 'r': case 'R': { if (LocaleCompare("rectangle",keyword) == 0) { primitive_type=RectanglePrimitive; break; } if (LocaleCompare("rotate",keyword) == 0) { GetNextToken(q,&q,extent,token); angle=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); affine.sx=cos(DegreesToRadians(fmod((double) angle,360.0))); affine.rx=sin(DegreesToRadians(fmod((double) angle,360.0))); affine.ry=(-sin(DegreesToRadians(fmod((double) angle,360.0)))); affine.sy=cos(DegreesToRadians(fmod((double) angle,360.0))); break; } if (LocaleCompare("roundRectangle",keyword) == 0) { primitive_type=RoundRectanglePrimitive; break; } status=MagickFalse; break; } case 's': case 'S': { if (LocaleCompare("scale",keyword) == 0) { GetNextToken(q,&q,extent,token); affine.sx=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); GetNextToken(q,&q,extent,token); if (*token == ',') GetNextToken(q,&q,extent,token); affine.sy=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); break; } if (LocaleCompare("skewX",keyword) == 0) { GetNextToken(q,&q,extent,token); angle=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); affine.ry=sin(DegreesToRadians(angle)); break; } if (LocaleCompare("skewY",keyword) == 0) { GetNextToken(q,&q,extent,token); angle=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); affine.rx=(-tan(DegreesToRadians(angle)/2.0)); break; } if (LocaleCompare("stop-color",keyword) == 0) { PixelInfo stop_color; number_stops++; if (number_stops == 1) stops=(StopInfo *) AcquireQuantumMemory(2,sizeof(*stops)); else if (number_stops > 2) stops=(StopInfo *) ResizeQuantumMemory(stops,number_stops, sizeof(*stops)); if (stops == (StopInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'", image->filename); break; } GetNextToken(q,&q,extent,token); status&=QueryColorCompliance(token,AllCompliance,&stop_color, exception); stops[number_stops-1].color=stop_color; GetNextToken(q,&q,extent,token); factor=strchr(token,'%') != (char *) NULL ? 0.01 : 1.0; stops[number_stops-1].offset=factor*StringToDouble(token, &next_token); if (token == next_token) ThrowPointExpectedException(token,exception); break; } if (LocaleCompare("stroke",keyword) == 0) { GetNextToken(q,&q,extent,token); if (graphic_context[n]->clip_path != MagickFalse) break; (void) FormatLocaleString(pattern,MagickPathExtent,"%s",token); if (GetImageArtifact(image,pattern) != (const char *) NULL) (void) DrawPatternPath(image,draw_info,token, &graphic_context[n]->stroke_pattern,exception); else { status&=QueryColorCompliance(token,AllCompliance, &graphic_context[n]->stroke,exception); if (graphic_context[n]->stroke_alpha != OpaqueAlpha) graphic_context[n]->stroke.alpha= graphic_context[n]->stroke_alpha; } break; } if (LocaleCompare("stroke-antialias",keyword) == 0) { GetNextToken(q,&q,extent,token); graphic_context[n]->stroke_antialias=StringToLong(token) != 0 ? MagickTrue : MagickFalse; break; } if (LocaleCompare("stroke-dasharray",keyword) == 0) { if (graphic_context[n]->dash_pattern != (double *) NULL) graphic_context[n]->dash_pattern=(double *) RelinquishMagickMemory(graphic_context[n]->dash_pattern); if (IsPoint(q) != MagickFalse) { const char *r; r=q; GetNextToken(r,&r,extent,token); if (*token == ',') GetNextToken(r,&r,extent,token); for (x=0; IsPoint(token) != MagickFalse; x++) { GetNextToken(r,&r,extent,token); if (*token == ',') GetNextToken(r,&r,extent,token); } graphic_context[n]->dash_pattern=(double *) AcquireQuantumMemory((size_t) (2*x+2), sizeof(*graphic_context[n]->dash_pattern)); if (graphic_context[n]->dash_pattern == (double *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'", image->filename); status=MagickFalse; break; } (void) memset(graphic_context[n]->dash_pattern,0,(size_t) (2*x+2)*sizeof(*graphic_context[n]->dash_pattern)); for (j=0; j < x; j++) { GetNextToken(q,&q,extent,token); if (*token == ',') GetNextToken(q,&q,extent,token); graphic_context[n]->dash_pattern[j]=StringToDouble(token, &next_token); if (token == next_token) ThrowPointExpectedException(token,exception); if (graphic_context[n]->dash_pattern[j] < 0.0) status=MagickFalse; } if ((x & 0x01) != 0) for ( ; j < (2*x); j++) graphic_context[n]->dash_pattern[j]= graphic_context[n]->dash_pattern[j-x]; graphic_context[n]->dash_pattern[j]=0.0; break; } GetNextToken(q,&q,extent,token); break; } if (LocaleCompare("stroke-dashoffset",keyword) == 0) { GetNextToken(q,&q,extent,token); graphic_context[n]->dash_offset=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); break; } if (LocaleCompare("stroke-linecap",keyword) == 0) { ssize_t linecap; GetNextToken(q,&q,extent,token); linecap=ParseCommandOption(MagickLineCapOptions,MagickFalse,token); if (linecap == -1) { status=MagickFalse; break; } graphic_context[n]->linecap=(LineCap) linecap; break; } if (LocaleCompare("stroke-linejoin",keyword) == 0) { ssize_t linejoin; GetNextToken(q,&q,extent,token); linejoin=ParseCommandOption(MagickLineJoinOptions,MagickFalse, token); if (linejoin == -1) { status=MagickFalse; break; } graphic_context[n]->linejoin=(LineJoin) linejoin; break; } if (LocaleCompare("stroke-miterlimit",keyword) == 0) { GetNextToken(q,&q,extent,token); graphic_context[n]->miterlimit=StringToUnsignedLong(token); break; } if (LocaleCompare("stroke-opacity",keyword) == 0) { double opacity; GetNextToken(q,&q,extent,token); if (graphic_context[n]->clip_path != MagickFalse) break; factor=strchr(token,'%') != (char *) NULL ? 0.01 : 1.0; opacity=MagickMin(MagickMax(factor* StringToDouble(token,&next_token),0.0),1.0); if (token == next_token) ThrowPointExpectedException(token,exception); graphic_context[n]->stroke_alpha*=opacity; if (graphic_context[n]->stroke.alpha != TransparentAlpha) graphic_context[n]->stroke.alpha=graphic_context[n]->stroke_alpha; else graphic_context[n]->stroke.alpha=(MagickRealType) ClampToQuantum(QuantumRange*(1.0-opacity)); break; } if (LocaleCompare("stroke-width",keyword) == 0) { GetNextToken(q,&q,extent,token); if (graphic_context[n]->clip_path != MagickFalse) break; graphic_context[n]->stroke_width=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); break; } status=MagickFalse; break; } case 't': case 'T': { if (LocaleCompare("text",keyword) == 0) { primitive_type=TextPrimitive; break; } if (LocaleCompare("text-align",keyword) == 0) { ssize_t align; GetNextToken(q,&q,extent,token); align=ParseCommandOption(MagickAlignOptions,MagickFalse,token); if (align == -1) { status=MagickFalse; break; } graphic_context[n]->align=(AlignType) align; break; } if (LocaleCompare("text-anchor",keyword) == 0) { ssize_t align; GetNextToken(q,&q,extent,token); align=ParseCommandOption(MagickAlignOptions,MagickFalse,token); if (align == -1) { status=MagickFalse; break; } graphic_context[n]->align=(AlignType) align; break; } if (LocaleCompare("text-antialias",keyword) == 0) { GetNextToken(q,&q,extent,token); graphic_context[n]->text_antialias=StringToLong(token) != 0 ? MagickTrue : MagickFalse; break; } if (LocaleCompare("text-undercolor",keyword) == 0) { GetNextToken(q,&q,extent,token); status&=QueryColorCompliance(token,AllCompliance, &graphic_context[n]->undercolor,exception); break; } if (LocaleCompare("translate",keyword) == 0) { GetNextToken(q,&q,extent,token); affine.tx=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); GetNextToken(q,&q,extent,token); if (*token == ',') GetNextToken(q,&q,extent,token); affine.ty=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); cursor=0.0; break; } status=MagickFalse; break; } case 'u': case 'U': { if (LocaleCompare("use",keyword) == 0) { const char *use; /* Get a macro from the MVG document, and "use" it here. */ GetNextToken(q,&q,extent,token); use=(const char *) GetValueFromSplayTree(macros,token); if (use != (const char *) NULL) { clone_info=CloneDrawInfo((ImageInfo *) NULL,graphic_context[n]); (void) CloneString(&clone_info->primitive,use); status=RenderMVGContent(image,clone_info,depth+1,exception); clone_info=DestroyDrawInfo(clone_info); } break; } break; } case 'v': case 'V': { if (LocaleCompare("viewbox",keyword) == 0) { GetNextToken(q,&q,extent,token); graphic_context[n]->viewbox.x=(ssize_t) ceil(StringToDouble(token, &next_token)-0.5); if (token == next_token) ThrowPointExpectedException(token,exception); GetNextToken(q,&q,extent,token); if (*token == ',') GetNextToken(q,&q,extent,token); graphic_context[n]->viewbox.y=(ssize_t) ceil(StringToDouble(token, &next_token)-0.5); if (token == next_token) ThrowPointExpectedException(token,exception); GetNextToken(q,&q,extent,token); if (*token == ',') GetNextToken(q,&q,extent,token); graphic_context[n]->viewbox.width=(size_t) floor(StringToDouble( token,&next_token)+0.5); if (token == next_token) ThrowPointExpectedException(token,exception); GetNextToken(q,&q,extent,token); if (*token == ',') GetNextToken(q,&q,extent,token); graphic_context[n]->viewbox.height=(size_t) floor(StringToDouble( token,&next_token)+0.5); if (token == next_token) ThrowPointExpectedException(token,exception); break; } status=MagickFalse; break; } case 'w': case 'W': { if (LocaleCompare("word-spacing",keyword) == 0) { GetNextToken(q,&q,extent,token); graphic_context[n]->interword_spacing=StringToDouble(token, &next_token); if (token == next_token) ThrowPointExpectedException(token,exception); break; } status=MagickFalse; break; } default: { status=MagickFalse; break; } } if (status == MagickFalse) break; if ((fabs(affine.sx-1.0) >= MagickEpsilon) || (fabs(affine.rx) >= MagickEpsilon) || (fabs(affine.ry) >= MagickEpsilon) || (fabs(affine.sy-1.0) >= MagickEpsilon) || (fabs(affine.tx) >= MagickEpsilon) || (fabs(affine.ty) >= MagickEpsilon)) { graphic_context[n]->affine.sx=current.sx*affine.sx+current.ry*affine.rx; graphic_context[n]->affine.rx=current.rx*affine.sx+current.sy*affine.rx; graphic_context[n]->affine.ry=current.sx*affine.ry+current.ry*affine.sy; graphic_context[n]->affine.sy=current.rx*affine.ry+current.sy*affine.sy; graphic_context[n]->affine.tx=current.sx*affine.tx+current.ry*affine.ty+ current.tx; graphic_context[n]->affine.ty=current.rx*affine.tx+current.sy*affine.ty+ current.ty; } if (primitive_type == UndefinedPrimitive) { if (*q == '\0') { if (number_stops > 1) { GradientType type; type=LinearGradient; if (draw_info->gradient.type == RadialGradient) type=RadialGradient; (void) GradientImage(image,type,PadSpread,stops,number_stops, exception); } if (number_stops > 0) stops=(StopInfo *) RelinquishMagickMemory(stops); } if ((image->debug != MagickFalse) && (q > p)) (void) LogMagickEvent(DrawEvent,GetMagickModule()," %.*s",(int) (q-p-1),p); continue; } /* Parse the primitive attributes. */ for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++) if ((primitive_info[i].primitive == TextPrimitive) || (primitive_info[i].primitive == ImagePrimitive)) if (primitive_info[i].text != (char *) NULL) primitive_info[i].text=DestroyString(primitive_info[i].text); i=0; mvg_info.offset=i; j=0; primitive_info[0].point.x=0.0; primitive_info[0].point.y=0.0; primitive_info[0].coordinates=0; primitive_info[0].method=FloodfillMethod; primitive_info[0].closed_subpath=MagickFalse; for (x=0; *q != '\0'; x++) { /* Define points. */ if (IsPoint(q) == MagickFalse) break; GetNextToken(q,&q,extent,token); point.x=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); GetNextToken(q,&q,extent,token); if (*token == ',') GetNextToken(q,&q,extent,token); point.y=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); GetNextToken(q,(const char **) NULL,extent,token); if (*token == ',') GetNextToken(q,&q,extent,token); primitive_info[i].primitive=primitive_type; primitive_info[i].point=point; primitive_info[i].coordinates=0; primitive_info[i].method=FloodfillMethod; primitive_info[i].closed_subpath=MagickFalse; i++; mvg_info.offset=i; if (i < (ssize_t) number_points) continue; status&=CheckPrimitiveExtent(&mvg_info,number_points); } if (status == MagickFalse) break; if ((primitive_info[j].primitive == TextPrimitive) || (primitive_info[j].primitive == ImagePrimitive)) if (primitive_info[j].text != (char *) NULL) primitive_info[j].text=DestroyString(primitive_info[j].text); primitive_info[j].primitive=primitive_type; primitive_info[j].coordinates=(size_t) x; primitive_info[j].method=FloodfillMethod; primitive_info[j].closed_subpath=MagickFalse; /* Circumscribe primitive within a circle. */ bounds.x1=primitive_info[j].point.x; bounds.y1=primitive_info[j].point.y; bounds.x2=primitive_info[j].point.x; bounds.y2=primitive_info[j].point.y; for (k=1; k < (ssize_t) primitive_info[j].coordinates; k++) { point=primitive_info[j+k].point; if (point.x < bounds.x1) bounds.x1=point.x; if (point.y < bounds.y1) bounds.y1=point.y; if (point.x > bounds.x2) bounds.x2=point.x; if (point.y > bounds.y2) bounds.y2=point.y; } /* Speculate how many points our primitive might consume. */ coordinates=(double) primitive_info[j].coordinates; switch (primitive_type) { case RectanglePrimitive: { coordinates*=5.0; break; } case RoundRectanglePrimitive: { double alpha, beta, radius; alpha=bounds.x2-bounds.x1; beta=bounds.y2-bounds.y1; radius=hypot((double) alpha,(double) beta); coordinates*=5.0; coordinates+=2.0*((size_t) ceil((double) MagickPI*radius))+6.0* BezierQuantum+360.0; break; } case BezierPrimitive: { coordinates=(double) (BezierQuantum*primitive_info[j].coordinates); if (primitive_info[j].coordinates > (107*BezierQuantum)) { (void) ThrowMagickException(exception,GetMagickModule(),DrawError, "TooManyBezierCoordinates","`%s'",token); status=MagickFalse; break; } break; } case PathPrimitive: { char *s, *t; GetNextToken(q,&q,extent,token); coordinates=1.0; t=token; for (s=token; *s != '\0'; s=t) { double value; value=StringToDouble(s,&t); (void) value; if (s == t) { t++; continue; } coordinates++; } for (s=token; *s != '\0'; s++) if (strspn(s,"AaCcQqSsTt") != 0) coordinates+=(20.0*BezierQuantum)+360.0; break; } case CirclePrimitive: case ArcPrimitive: case EllipsePrimitive: { double alpha, beta, radius; alpha=bounds.x2-bounds.x1; beta=bounds.y2-bounds.y1; radius=hypot(alpha,beta); coordinates=2.0*(ceil(MagickPI*radius))+6.0*BezierQuantum+360.0; break; } default: break; } if (coordinates > MaxBezierCoordinates) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"TooManyBezierCoordinates","`%s'",token); status=MagickFalse; } if (status == MagickFalse) break; if (((size_t) (i+coordinates)) >= number_points) { /* Resize based on speculative points required by primitive. */ number_points+=coordinates+1; if (number_points < (size_t) coordinates) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'", image->filename); break; } mvg_info.offset=i; status&=CheckPrimitiveExtent(&mvg_info,number_points); } status&=CheckPrimitiveExtent(&mvg_info,PrimitiveExtentPad); if (status == MagickFalse) break; mvg_info.offset=j; switch (primitive_type) { case PointPrimitive: default: { if (primitive_info[j].coordinates != 1) { status=MagickFalse; break; } status&=TracePoint(primitive_info+j,primitive_info[j].point); i=(ssize_t) (j+primitive_info[j].coordinates); break; } case LinePrimitive: { if (primitive_info[j].coordinates != 2) { status=MagickFalse; break; } status&=TraceLine(primitive_info+j,primitive_info[j].point, primitive_info[j+1].point); i=(ssize_t) (j+primitive_info[j].coordinates); break; } case RectanglePrimitive: { if (primitive_info[j].coordinates != 2) { status=MagickFalse; break; } status&=TraceRectangle(primitive_info+j,primitive_info[j].point, primitive_info[j+1].point); i=(ssize_t) (j+primitive_info[j].coordinates); break; } case RoundRectanglePrimitive: { if (primitive_info[j].coordinates != 3) { status=MagickFalse; break; } if ((primitive_info[j+2].point.x < 0.0) || (primitive_info[j+2].point.y < 0.0)) { status=MagickFalse; break; } if ((primitive_info[j+1].point.x-primitive_info[j].point.x) < 0.0) { status=MagickFalse; break; } if ((primitive_info[j+1].point.y-primitive_info[j].point.y) < 0.0) { status=MagickFalse; break; } status&=TraceRoundRectangle(&mvg_info,primitive_info[j].point, primitive_info[j+1].point,primitive_info[j+2].point); i=(ssize_t) (j+primitive_info[j].coordinates); break; } case ArcPrimitive: { if (primitive_info[j].coordinates != 3) { primitive_type=UndefinedPrimitive; break; } status&=TraceArc(&mvg_info,primitive_info[j].point, primitive_info[j+1].point,primitive_info[j+2].point); i=(ssize_t) (j+primitive_info[j].coordinates); break; } case EllipsePrimitive: { if (primitive_info[j].coordinates != 3) { status=MagickFalse; break; } if ((primitive_info[j+1].point.x < 0.0) || (primitive_info[j+1].point.y < 0.0)) { status=MagickFalse; break; } status&=TraceEllipse(&mvg_info,primitive_info[j].point, primitive_info[j+1].point,primitive_info[j+2].point); i=(ssize_t) (j+primitive_info[j].coordinates); break; } case CirclePrimitive: { if (primitive_info[j].coordinates != 2) { status=MagickFalse; break; } status&=TraceCircle(&mvg_info,primitive_info[j].point, primitive_info[j+1].point); i=(ssize_t) (j+primitive_info[j].coordinates); break; } case PolylinePrimitive: { if (primitive_info[j].coordinates < 1) { status=MagickFalse; break; } break; } case PolygonPrimitive: { if (primitive_info[j].coordinates < 3) { status=MagickFalse; break; } primitive_info[i]=primitive_info[j]; primitive_info[i].coordinates=0; primitive_info[j].coordinates++; primitive_info[j].closed_subpath=MagickTrue; i++; break; } case BezierPrimitive: { if (primitive_info[j].coordinates < 3) { status=MagickFalse; break; } status&=TraceBezier(&mvg_info,primitive_info[j].coordinates); i=(ssize_t) (j+primitive_info[j].coordinates); break; } case PathPrimitive: { coordinates=(double) TracePath(&mvg_info,token,exception); if (coordinates == 0.0) { status=MagickFalse; break; } i=(ssize_t) (j+coordinates); break; } case AlphaPrimitive: case ColorPrimitive: { ssize_t method; if (primitive_info[j].coordinates != 1) { status=MagickFalse; break; } GetNextToken(q,&q,extent,token); method=ParseCommandOption(MagickMethodOptions,MagickFalse,token); if (method == -1) { status=MagickFalse; break; } primitive_info[j].method=(PaintMethod) method; break; } case TextPrimitive: { char geometry[MagickPathExtent]; if (primitive_info[j].coordinates != 1) { status=MagickFalse; break; } if (*token != ',') GetNextToken(q,&q,extent,token); (void) CloneString(&primitive_info[j].text,token); /* Compute text cursor offset. */ clone_info=CloneDrawInfo((ImageInfo *) NULL,graphic_context[n]); if ((fabs(mvg_info.point.x-primitive_info->point.x) < MagickEpsilon) && (fabs(mvg_info.point.y-primitive_info->point.y) < MagickEpsilon)) { mvg_info.point=primitive_info->point; primitive_info->point.x+=cursor; } else { mvg_info.point=primitive_info->point; cursor=0.0; } (void) FormatLocaleString(geometry,MagickPathExtent,"%+f%+f", primitive_info->point.x,primitive_info->point.y); clone_info->render=MagickFalse; clone_info->text=AcquireString(token); (void) ConcatenateString(&clone_info->text," "); status&=GetTypeMetrics(image,clone_info,&metrics,exception); clone_info=DestroyDrawInfo(clone_info); cursor+=metrics.width; break; } case ImagePrimitive: { if (primitive_info[j].coordinates != 2) { status=MagickFalse; break; } GetNextToken(q,&q,extent,token); (void) CloneString(&primitive_info[j].text,token); break; } } mvg_info.offset=i; if ((image->debug != MagickFalse) && (q > p)) (void) LogMagickEvent(DrawEvent,GetMagickModule()," %.*s",(int) (q-p-1), p); if (status == MagickFalse) break; primitive_info[i].primitive=UndefinedPrimitive; if (i == 0) continue; /* Transform points. */ for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++) { point=primitive_info[i].point; primitive_info[i].point.x=graphic_context[n]->affine.sx*point.x+ graphic_context[n]->affine.ry*point.y+graphic_context[n]->affine.tx; primitive_info[i].point.y=graphic_context[n]->affine.rx*point.x+ graphic_context[n]->affine.sy*point.y+graphic_context[n]->affine.ty; point=primitive_info[i].point; if (point.x < graphic_context[n]->bounds.x1) graphic_context[n]->bounds.x1=point.x; if (point.y < graphic_context[n]->bounds.y1) graphic_context[n]->bounds.y1=point.y; if (point.x > graphic_context[n]->bounds.x2) graphic_context[n]->bounds.x2=point.x; if (point.y > graphic_context[n]->bounds.y2) graphic_context[n]->bounds.y2=point.y; if (primitive_info[i].primitive == ImagePrimitive) break; if (i >= (ssize_t) number_points) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); } if (graphic_context[n]->render != MagickFalse) { if ((n != 0) && (draw_info->compliance != SVGCompliance) && (graphic_context[n]->clip_mask != (char *) NULL) && (LocaleCompare(graphic_context[n]->clip_mask, graphic_context[n-1]->clip_mask) != 0)) { const char *clip_path; clip_path=(const char *) GetValueFromSplayTree(macros, graphic_context[n]->clip_mask); if (clip_path != (const char *) NULL) (void) SetImageArtifact(image,graphic_context[n]->clip_mask, clip_path); status&=DrawClipPath(image,graphic_context[n], graphic_context[n]->clip_mask,exception); } status&=DrawPrimitive(image,graphic_context[n],primitive_info, exception); } proceed=SetImageProgress(image,RenderImageTag,q-primitive,(MagickSizeType) primitive_extent); if (proceed == MagickFalse) break; if (status == 0) break; } if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(),"end draw-image"); /* Relinquish resources. */ macros=DestroySplayTree(macros); token=DestroyString(token); if (primitive_info != (PrimitiveInfo *) NULL) { for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++) if ((primitive_info[i].primitive == TextPrimitive) || (primitive_info[i].primitive == ImagePrimitive)) if (primitive_info[i].text != (char *) NULL) primitive_info[i].text=DestroyString(primitive_info[i].text); primitive_info=(PrimitiveInfo *) RelinquishMagickMemory(primitive_info); } primitive=DestroyString(primitive); if (stops != (StopInfo *) NULL) stops=(StopInfo *) RelinquishMagickMemory(stops); for ( ; n >= 0; n--) graphic_context[n]=DestroyDrawInfo(graphic_context[n]); graphic_context=(DrawInfo **) RelinquishMagickMemory(graphic_context); if (status == MagickFalse) ThrowBinaryException(DrawError,"NonconformingDrawingPrimitiveDefinition", keyword); return(status != 0 ? MagickTrue : MagickFalse); } MagickExport MagickBooleanType DrawImage(Image *image,const DrawInfo *draw_info, ExceptionInfo *exception) { return(RenderMVGContent(image,draw_info,0,exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w P a t t e r n P a t h % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawPatternPath() draws a pattern. % % The format of the DrawPatternPath method is: % % MagickBooleanType DrawPatternPath(Image *image,const DrawInfo *draw_info, % const char *name,Image **pattern,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % % o name: the pattern name. % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType DrawPatternPath(Image *image, const DrawInfo *draw_info,const char *name,Image **pattern, ExceptionInfo *exception) { char property[MagickPathExtent]; const char *geometry, *path, *type; DrawInfo *clone_info; ImageInfo *image_info; MagickBooleanType status; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(draw_info != (const DrawInfo *) NULL); assert(name != (const char *) NULL); (void) FormatLocaleString(property,MagickPathExtent,"%s",name); path=GetImageArtifact(image,property); if (path == (const char *) NULL) return(MagickFalse); (void) FormatLocaleString(property,MagickPathExtent,"%s-geometry",name); geometry=GetImageArtifact(image,property); if (geometry == (const char *) NULL) return(MagickFalse); if ((*pattern) != (Image *) NULL) *pattern=DestroyImage(*pattern); image_info=AcquireImageInfo(); image_info->size=AcquireString(geometry); *pattern=AcquireImage(image_info,exception); image_info=DestroyImageInfo(image_info); (void) QueryColorCompliance("#000000ff",AllCompliance, &(*pattern)->background_color,exception); (void) SetImageBackgroundColor(*pattern,exception); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(), "begin pattern-path %s %s",name,geometry); clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info); clone_info->fill_pattern=NewImageList(); clone_info->stroke_pattern=NewImageList(); (void) FormatLocaleString(property,MagickPathExtent,"%s-type",name); type=GetImageArtifact(image,property); if (type != (const char *) NULL) clone_info->gradient.type=(GradientType) ParseCommandOption( MagickGradientOptions,MagickFalse,type); (void) CloneString(&clone_info->primitive,path); status=RenderMVGContent(*pattern,clone_info,0,exception); clone_info=DestroyDrawInfo(clone_info); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(),"end pattern-path"); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D r a w P o l y g o n P r i m i t i v e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawPolygonPrimitive() draws a polygon on the image. % % The format of the DrawPolygonPrimitive method is: % % MagickBooleanType DrawPolygonPrimitive(Image *image, % const DrawInfo *draw_info,const PrimitiveInfo *primitive_info, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % % o primitive_info: Specifies a pointer to a PrimitiveInfo structure. % % o exception: return any errors or warnings in this structure. % */ static PolygonInfo **DestroyPolygonThreadSet(PolygonInfo **polygon_info) { register ssize_t i; assert(polygon_info != (PolygonInfo **) NULL); for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++) if (polygon_info[i] != (PolygonInfo *) NULL) polygon_info[i]=DestroyPolygonInfo(polygon_info[i]); polygon_info=(PolygonInfo **) RelinquishMagickMemory(polygon_info); return(polygon_info); } static PolygonInfo **AcquirePolygonThreadSet( const PrimitiveInfo *primitive_info) { PathInfo *magick_restrict path_info; PolygonInfo **polygon_info; register ssize_t i; size_t number_threads; number_threads=(size_t) GetMagickResourceLimit(ThreadResource); polygon_info=(PolygonInfo **) AcquireQuantumMemory(number_threads, sizeof(*polygon_info)); if (polygon_info == (PolygonInfo **) NULL) return((PolygonInfo **) NULL); (void) memset(polygon_info,0,number_threads*sizeof(*polygon_info)); path_info=ConvertPrimitiveToPath(primitive_info); if (path_info == (PathInfo *) NULL) return(DestroyPolygonThreadSet(polygon_info)); for (i=0; i < (ssize_t) number_threads; i++) { polygon_info[i]=ConvertPathToPolygon(path_info); if (polygon_info[i] == (PolygonInfo *) NULL) return(DestroyPolygonThreadSet(polygon_info)); } path_info=(PathInfo *) RelinquishMagickMemory(path_info); return(polygon_info); } static double GetFillAlpha(PolygonInfo *polygon_info,const double mid, const MagickBooleanType fill,const FillRule fill_rule,const ssize_t x, const ssize_t y,double *stroke_alpha) { double alpha, beta, distance, subpath_alpha; PointInfo delta; register const PointInfo *q; register EdgeInfo *p; register ssize_t i; ssize_t j, winding_number; /* Compute fill & stroke opacity for this (x,y) point. */ *stroke_alpha=0.0; subpath_alpha=0.0; p=polygon_info->edges; for (j=0; j < (ssize_t) polygon_info->number_edges; j++, p++) { if ((double) y <= (p->bounds.y1-mid-0.5)) break; if ((double) y > (p->bounds.y2+mid+0.5)) { (void) DestroyEdge(polygon_info,(size_t) j); continue; } if (((double) x <= (p->bounds.x1-mid-0.5)) || ((double) x > (p->bounds.x2+mid+0.5))) continue; i=(ssize_t) MagickMax((double) p->highwater,1.0); for ( ; i < (ssize_t) p->number_points; i++) { if ((double) y <= (p->points[i-1].y-mid-0.5)) break; if ((double) y > (p->points[i].y+mid+0.5)) continue; if (p->scanline != (double) y) { p->scanline=(double) y; p->highwater=(size_t) i; } /* Compute distance between a point and an edge. */ q=p->points+i-1; delta.x=(q+1)->x-q->x; delta.y=(q+1)->y-q->y; beta=delta.x*(x-q->x)+delta.y*(y-q->y); if (beta <= 0.0) { delta.x=(double) x-q->x; delta.y=(double) y-q->y; distance=delta.x*delta.x+delta.y*delta.y; } else { alpha=delta.x*delta.x+delta.y*delta.y; if (beta >= alpha) { delta.x=(double) x-(q+1)->x; delta.y=(double) y-(q+1)->y; distance=delta.x*delta.x+delta.y*delta.y; } else { alpha=PerceptibleReciprocal(alpha); beta=delta.x*(y-q->y)-delta.y*(x-q->x); distance=alpha*beta*beta; } } /* Compute stroke & subpath opacity. */ beta=0.0; if (p->ghostline == MagickFalse) { alpha=mid+0.5; if ((*stroke_alpha < 1.0) && (distance <= ((alpha+0.25)*(alpha+0.25)))) { alpha=mid-0.5; if (distance <= ((alpha+0.25)*(alpha+0.25))) *stroke_alpha=1.0; else { beta=1.0; if (fabs(distance-1.0) >= MagickEpsilon) beta=sqrt((double) distance); alpha=beta-mid-0.5; if (*stroke_alpha < ((alpha-0.25)*(alpha-0.25))) *stroke_alpha=(alpha-0.25)*(alpha-0.25); } } } if ((fill == MagickFalse) || (distance > 1.0) || (subpath_alpha >= 1.0)) continue; if (distance <= 0.0) { subpath_alpha=1.0; continue; } if (distance > 1.0) continue; if (fabs(beta) < MagickEpsilon) { beta=1.0; if (fabs(distance-1.0) >= MagickEpsilon) beta=sqrt(distance); } alpha=beta-1.0; if (subpath_alpha < (alpha*alpha)) subpath_alpha=alpha*alpha; } } /* Compute fill opacity. */ if (fill == MagickFalse) return(0.0); if (subpath_alpha >= 1.0) return(1.0); /* Determine winding number. */ winding_number=0; p=polygon_info->edges; for (j=0; j < (ssize_t) polygon_info->number_edges; j++, p++) { if ((double) y <= p->bounds.y1) break; if (((double) y > p->bounds.y2) || ((double) x <= p->bounds.x1)) continue; if ((double) x > p->bounds.x2) { winding_number+=p->direction ? 1 : -1; continue; } i=(ssize_t) MagickMax((double) p->highwater,1.0); for ( ; i < (ssize_t) (p->number_points-1); i++) if ((double) y <= p->points[i].y) break; q=p->points+i-1; if ((((q+1)->x-q->x)*(y-q->y)) <= (((q+1)->y-q->y)*(x-q->x))) winding_number+=p->direction ? 1 : -1; } if (fill_rule != NonZeroRule) { if ((MagickAbsoluteValue(winding_number) & 0x01) != 0) return(1.0); } else if (MagickAbsoluteValue(winding_number) != 0) return(1.0); return(subpath_alpha); } static MagickBooleanType DrawPolygonPrimitive(Image *image, const DrawInfo *draw_info,const PrimitiveInfo *primitive_info, ExceptionInfo *exception) { CacheView *image_view; MagickBooleanType fill, status; double mid; PolygonInfo **magick_restrict polygon_info; register EdgeInfo *p; register ssize_t i; SegmentInfo bounds; ssize_t start_y, stop_y, y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(draw_info != (DrawInfo *) NULL); assert(draw_info->signature == MagickCoreSignature); assert(primitive_info != (PrimitiveInfo *) NULL); if (primitive_info->coordinates <= 1) return(MagickTrue); /* Compute bounding box. */ polygon_info=AcquirePolygonThreadSet(primitive_info); if (polygon_info == (PolygonInfo **) NULL) return(MagickFalse); DisableMSCWarning(4127) if (0) { status=DrawBoundingRectangles(image,draw_info,polygon_info[0],exception); if (status == MagickFalse) { polygon_info=DestroyPolygonThreadSet(polygon_info); return(status); } } RestoreMSCWarning if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule()," begin draw-polygon"); fill=(primitive_info->method == FillToBorderMethod) || (primitive_info->method == FloodfillMethod) ? MagickTrue : MagickFalse; mid=ExpandAffine(&draw_info->affine)*SaneStrokeWidth(image,draw_info)/2.0; bounds=polygon_info[0]->edges[0].bounds; for (i=1; i < (ssize_t) polygon_info[0]->number_edges; i++) { p=polygon_info[0]->edges+i; if (p->bounds.x1 < bounds.x1) bounds.x1=p->bounds.x1; if (p->bounds.y1 < bounds.y1) bounds.y1=p->bounds.y1; if (p->bounds.x2 > bounds.x2) bounds.x2=p->bounds.x2; if (p->bounds.y2 > bounds.y2) bounds.y2=p->bounds.y2; } bounds.x1-=(mid+1.0); bounds.y1-=(mid+1.0); bounds.x2+=(mid+1.0); bounds.y2+=(mid+1.0); if ((bounds.x1 >= (double) image->columns) || (bounds.y1 >= (double) image->rows) || (bounds.x2 <= 0.0) || (bounds.y2 <= 0.0)) { polygon_info=DestroyPolygonThreadSet(polygon_info); return(MagickTrue); /* virtual polygon */ } bounds.x1=bounds.x1 < 0.0 ? 0.0 : bounds.x1 >= (double) image->columns-1.0 ? (double) image->columns-1.0 : bounds.x1; bounds.y1=bounds.y1 < 0.0 ? 0.0 : bounds.y1 >= (double) image->rows-1.0 ? (double) image->rows-1.0 : bounds.y1; bounds.x2=bounds.x2 < 0.0 ? 0.0 : bounds.x2 >= (double) image->columns-1.0 ? (double) image->columns-1.0 : bounds.x2; bounds.y2=bounds.y2 < 0.0 ? 0.0 : bounds.y2 >= (double) image->rows-1.0 ? (double) image->rows-1.0 : bounds.y2; status=MagickTrue; image_view=AcquireAuthenticCacheView(image,exception); if ((primitive_info->coordinates == 1) || (polygon_info[0]->number_edges == 0)) { /* Draw point. */ start_y=(ssize_t) ceil(bounds.y1-0.5); stop_y=(ssize_t) floor(bounds.y2+0.5); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,stop_y-start_y+1,1) #endif for (y=start_y; y <= stop_y; y++) { MagickBooleanType sync; PixelInfo pixel; register ssize_t x; register Quantum *magick_restrict q; ssize_t start_x, stop_x; if (status == MagickFalse) continue; start_x=(ssize_t) ceil(bounds.x1-0.5); stop_x=(ssize_t) floor(bounds.x2+0.5); x=start_x; q=GetCacheViewAuthenticPixels(image_view,x,y,(size_t) (stop_x-x+1),1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } GetPixelInfo(image,&pixel); for ( ; x <= stop_x; x++) { if ((x == (ssize_t) ceil(primitive_info->point.x-0.5)) && (y == (ssize_t) ceil(primitive_info->point.y-0.5))) { GetFillColor(draw_info,x-start_x,y-start_y,&pixel,exception); SetPixelViaPixelInfo(image,&pixel,q); } q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); polygon_info=DestroyPolygonThreadSet(polygon_info); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(), " end draw-polygon"); return(status); } /* Draw polygon or line. */ start_y=(ssize_t) ceil(bounds.y1-0.5); stop_y=(ssize_t) floor(bounds.y2+0.5); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,stop_y-start_y+1,1) #endif for (y=start_y; y <= stop_y; y++) { const int id = GetOpenMPThreadId(); register Quantum *magick_restrict q; register ssize_t x; ssize_t start_x, stop_x; if (status == MagickFalse) continue; start_x=(ssize_t) ceil(bounds.x1-0.5); stop_x=(ssize_t) floor(bounds.x2+0.5); q=GetCacheViewAuthenticPixels(image_view,start_x,y,(size_t) (stop_x-start_x+ 1),1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=start_x; x <= stop_x; x++) { double fill_alpha, stroke_alpha; PixelInfo fill_color, stroke_color; /* Fill and/or stroke. */ fill_alpha=GetFillAlpha(polygon_info[id],mid,fill,draw_info->fill_rule, x,y,&stroke_alpha); if (draw_info->stroke_antialias == MagickFalse) { fill_alpha=fill_alpha > 0.25 ? 1.0 : 0.0; stroke_alpha=stroke_alpha > 0.25 ? 1.0 : 0.0; } GetFillColor(draw_info,x-start_x,y-start_y,&fill_color,exception); CompositePixelOver(image,&fill_color,fill_alpha*fill_color.alpha,q, (double) GetPixelAlpha(image,q),q); GetStrokeColor(draw_info,x-start_x,y-start_y,&stroke_color,exception); CompositePixelOver(image,&stroke_color,stroke_alpha*stroke_color.alpha,q, (double) GetPixelAlpha(image,q),q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); polygon_info=DestroyPolygonThreadSet(polygon_info); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule()," end draw-polygon"); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w P r i m i t i v e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawPrimitive() draws a primitive (line, rectangle, ellipse) on the image. % % The format of the DrawPrimitive method is: % % MagickBooleanType DrawPrimitive(Image *image,const DrawInfo *draw_info, % PrimitiveInfo *primitive_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % % o primitive_info: Specifies a pointer to a PrimitiveInfo structure. % % o exception: return any errors or warnings in this structure. % */ static inline double ConstrainCoordinate(double x) { if (x < (double) -SSIZE_MAX) return((double) -SSIZE_MAX); if (x > (double) SSIZE_MAX) return((double) SSIZE_MAX); return(x); } static void LogPrimitiveInfo(const PrimitiveInfo *primitive_info) { const char *methods[] = { "point", "replace", "floodfill", "filltoborder", "reset", "?" }; PointInfo p, point, q; register ssize_t i, x; ssize_t coordinates, y; x=(ssize_t) ceil(primitive_info->point.x-0.5); y=(ssize_t) ceil(primitive_info->point.y-0.5); switch (primitive_info->primitive) { case AlphaPrimitive: { (void) LogMagickEvent(DrawEvent,GetMagickModule(), "AlphaPrimitive %.20g,%.20g %s",(double) x,(double) y, methods[primitive_info->method]); return; } case ColorPrimitive: { (void) LogMagickEvent(DrawEvent,GetMagickModule(), "ColorPrimitive %.20g,%.20g %s",(double) x,(double) y, methods[primitive_info->method]); return; } case ImagePrimitive: { (void) LogMagickEvent(DrawEvent,GetMagickModule(), "ImagePrimitive %.20g,%.20g",(double) x,(double) y); return; } case PointPrimitive: { (void) LogMagickEvent(DrawEvent,GetMagickModule(), "PointPrimitive %.20g,%.20g %s",(double) x,(double) y, methods[primitive_info->method]); return; } case TextPrimitive: { (void) LogMagickEvent(DrawEvent,GetMagickModule(), "TextPrimitive %.20g,%.20g",(double) x,(double) y); return; } default: break; } coordinates=0; p=primitive_info[0].point; q.x=(-1.0); q.y=(-1.0); for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++) { point=primitive_info[i].point; if (coordinates <= 0) { coordinates=(ssize_t) primitive_info[i].coordinates; (void) LogMagickEvent(DrawEvent,GetMagickModule(), " begin open (%.20g)",(double) coordinates); p=point; } point=primitive_info[i].point; if ((fabs(q.x-point.x) >= MagickEpsilon) || (fabs(q.y-point.y) >= MagickEpsilon)) (void) LogMagickEvent(DrawEvent,GetMagickModule(), " %.20g: %.18g,%.18g",(double) coordinates,point.x,point.y); else (void) LogMagickEvent(DrawEvent,GetMagickModule(), " %.20g: %g %g (duplicate)",(double) coordinates,point.x,point.y); q=point; coordinates--; if (coordinates > 0) continue; if ((fabs(p.x-point.x) >= MagickEpsilon) || (fabs(p.y-point.y) >= MagickEpsilon)) (void) LogMagickEvent(DrawEvent,GetMagickModule()," end last (%.20g)", (double) coordinates); else (void) LogMagickEvent(DrawEvent,GetMagickModule()," end open (%.20g)", (double) coordinates); } } MagickExport MagickBooleanType DrawPrimitive(Image *image, const DrawInfo *draw_info,const PrimitiveInfo *primitive_info, ExceptionInfo *exception) { CacheView *image_view; MagickStatusType status; register ssize_t i, x; ssize_t y; if (image->debug != MagickFalse) { (void) LogMagickEvent(DrawEvent,GetMagickModule(), " begin draw-primitive"); (void) LogMagickEvent(DrawEvent,GetMagickModule(), " affine: %g,%g,%g,%g,%g,%g",draw_info->affine.sx, draw_info->affine.rx,draw_info->affine.ry,draw_info->affine.sy, draw_info->affine.tx,draw_info->affine.ty); } status=MagickTrue; if ((IsGrayColorspace(image->colorspace) != MagickFalse) && ((IsPixelInfoGray(&draw_info->fill) == MagickFalse) || (IsPixelInfoGray(&draw_info->stroke) == MagickFalse))) status=SetImageColorspace(image,sRGBColorspace,exception); if (draw_info->compliance == SVGCompliance) { status&=SetImageMask(image,WritePixelMask,draw_info->clipping_mask, exception); status&=SetImageMask(image,CompositePixelMask,draw_info->composite_mask, exception); } x=(ssize_t) ceil(ConstrainCoordinate(primitive_info->point.x-0.5)); y=(ssize_t) ceil(ConstrainCoordinate(primitive_info->point.y-0.5)); image_view=AcquireAuthenticCacheView(image,exception); switch (primitive_info->primitive) { case AlphaPrimitive: { if (image->alpha_trait == UndefinedPixelTrait) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception); switch (primitive_info->method) { case PointMethod: default: { PixelInfo pixel; register Quantum *q; q=GetCacheViewAuthenticPixels(image_view,x,y,1,1,exception); if (q == (Quantum *) NULL) break; GetFillColor(draw_info,x,y,&pixel,exception); SetPixelAlpha(image,ClampToQuantum(pixel.alpha),q); (void) SyncCacheViewAuthenticPixels(image_view,exception); break; } case ReplaceMethod: { MagickBooleanType sync; PixelInfo pixel, target; (void) GetOneCacheViewVirtualPixelInfo(image_view,x,y,&target, exception); GetPixelInfo(image,&pixel); for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { GetPixelInfoPixel(image,q,&pixel); if (IsFuzzyEquivalencePixelInfo(&pixel,&target) == MagickFalse) { q+=GetPixelChannels(image); continue; } GetFillColor(draw_info,x,y,&pixel,exception); SetPixelAlpha(image,ClampToQuantum(pixel.alpha),q); q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) break; } break; } case FloodfillMethod: case FillToBorderMethod: { ChannelType channel_mask; PixelInfo target; (void) GetOneVirtualPixelInfo(image,TileVirtualPixelMethod,x,y, &target,exception); if (primitive_info->method == FillToBorderMethod) { target.red=(double) draw_info->border_color.red; target.green=(double) draw_info->border_color.green; target.blue=(double) draw_info->border_color.blue; } channel_mask=SetImageChannelMask(image,AlphaChannel); status&=FloodfillPaintImage(image,draw_info,&target,x,y, primitive_info->method == FloodfillMethod ? MagickFalse : MagickTrue,exception); (void) SetImageChannelMask(image,channel_mask); break; } case ResetMethod: { MagickBooleanType sync; PixelInfo pixel; for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { GetFillColor(draw_info,x,y,&pixel,exception); SetPixelAlpha(image,ClampToQuantum(pixel.alpha),q); q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) break; } break; } } break; } case ColorPrimitive: { switch (primitive_info->method) { case PointMethod: default: { PixelInfo pixel; register Quantum *q; q=GetCacheViewAuthenticPixels(image_view,x,y,1,1,exception); if (q == (Quantum *) NULL) break; GetPixelInfo(image,&pixel); GetFillColor(draw_info,x,y,&pixel,exception); SetPixelViaPixelInfo(image,&pixel,q); (void) SyncCacheViewAuthenticPixels(image_view,exception); break; } case ReplaceMethod: { MagickBooleanType sync; PixelInfo pixel, target; (void) GetOneCacheViewVirtualPixelInfo(image_view,x,y,&target, exception); for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { GetPixelInfoPixel(image,q,&pixel); if (IsFuzzyEquivalencePixelInfo(&pixel,&target) == MagickFalse) { q+=GetPixelChannels(image); continue; } GetFillColor(draw_info,x,y,&pixel,exception); SetPixelViaPixelInfo(image,&pixel,q); q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) break; } break; } case FloodfillMethod: case FillToBorderMethod: { PixelInfo target; (void) GetOneVirtualPixelInfo(image,TileVirtualPixelMethod,x,y, &target,exception); if (primitive_info->method == FillToBorderMethod) { target.red=(double) draw_info->border_color.red; target.green=(double) draw_info->border_color.green; target.blue=(double) draw_info->border_color.blue; } status&=FloodfillPaintImage(image,draw_info,&target,x,y, primitive_info->method == FloodfillMethod ? MagickFalse : MagickTrue,exception); break; } case ResetMethod: { MagickBooleanType sync; PixelInfo pixel; GetPixelInfo(image,&pixel); for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { GetFillColor(draw_info,x,y,&pixel,exception); SetPixelViaPixelInfo(image,&pixel,q); q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) break; } break; } } break; } case ImagePrimitive: { AffineMatrix affine; char composite_geometry[MagickPathExtent]; Image *composite_image, *composite_images; ImageInfo *clone_info; RectangleInfo geometry; ssize_t x1, y1; if (primitive_info->text == (char *) NULL) break; clone_info=AcquireImageInfo(); if (LocaleNCompare(primitive_info->text,"data:",5) == 0) composite_images=ReadInlineImage(clone_info,primitive_info->text, exception); else { (void) CopyMagickString(clone_info->filename,primitive_info->text, MagickPathExtent); composite_images=ReadImage(clone_info,exception); } clone_info=DestroyImageInfo(clone_info); if (composite_images == (Image *) NULL) { status=0; break; } composite_image=RemoveFirstImageFromList(&composite_images); composite_images=DestroyImageList(composite_images); (void) SetImageProgressMonitor(composite_image,(MagickProgressMonitor) NULL,(void *) NULL); x1=(ssize_t) ceil(primitive_info[1].point.x-0.5); y1=(ssize_t) ceil(primitive_info[1].point.y-0.5); if (((x1 != 0L) && (x1 != (ssize_t) composite_image->columns)) || ((y1 != 0L) && (y1 != (ssize_t) composite_image->rows))) { /* Resize image. */ (void) FormatLocaleString(composite_geometry,MagickPathExtent, "%gx%g!",primitive_info[1].point.x,primitive_info[1].point.y); composite_image->filter=image->filter; (void) TransformImage(&composite_image,(char *) NULL, composite_geometry,exception); } if (composite_image->alpha_trait == UndefinedPixelTrait) (void) SetImageAlphaChannel(composite_image,OpaqueAlphaChannel, exception); if (draw_info->alpha != OpaqueAlpha) (void) SetImageAlpha(composite_image,draw_info->alpha,exception); SetGeometry(image,&geometry); image->gravity=draw_info->gravity; geometry.x=x; geometry.y=y; (void) FormatLocaleString(composite_geometry,MagickPathExtent, "%.20gx%.20g%+.20g%+.20g",(double) composite_image->columns,(double) composite_image->rows,(double) geometry.x,(double) geometry.y); (void) ParseGravityGeometry(image,composite_geometry,&geometry,exception); affine=draw_info->affine; affine.tx=(double) geometry.x; affine.ty=(double) geometry.y; composite_image->interpolate=image->interpolate; if ((draw_info->compose == OverCompositeOp) || (draw_info->compose == SrcOverCompositeOp)) (void) DrawAffineImage(image,composite_image,&affine,exception); else (void) CompositeImage(image,composite_image,draw_info->compose, MagickTrue,geometry.x,geometry.y,exception); composite_image=DestroyImage(composite_image); break; } case PointPrimitive: { PixelInfo fill_color; register Quantum *q; if ((y < 0) || (y >= (ssize_t) image->rows)) break; if ((x < 0) || (x >= (ssize_t) image->columns)) break; q=GetCacheViewAuthenticPixels(image_view,x,y,1,1,exception); if (q == (Quantum *) NULL) break; GetFillColor(draw_info,x,y,&fill_color,exception); CompositePixelOver(image,&fill_color,(double) fill_color.alpha,q, (double) GetPixelAlpha(image,q),q); (void) SyncCacheViewAuthenticPixels(image_view,exception); break; } case TextPrimitive: { char geometry[MagickPathExtent]; DrawInfo *clone_info; if (primitive_info->text == (char *) NULL) break; clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info); (void) CloneString(&clone_info->text,primitive_info->text); (void) FormatLocaleString(geometry,MagickPathExtent,"%+f%+f", primitive_info->point.x,primitive_info->point.y); (void) CloneString(&clone_info->geometry,geometry); status&=AnnotateImage(image,clone_info,exception); clone_info=DestroyDrawInfo(clone_info); break; } default: { double mid, scale; DrawInfo *clone_info; if (IsEventLogging() != MagickFalse) LogPrimitiveInfo(primitive_info); scale=ExpandAffine(&draw_info->affine); if ((draw_info->dash_pattern != (double *) NULL) && (fabs(draw_info->dash_pattern[0]) >= MagickEpsilon) && (fabs(scale*draw_info->stroke_width) >= MagickEpsilon) && (draw_info->stroke.alpha != (Quantum) TransparentAlpha)) { /* Draw dash polygon. */ clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info); clone_info->stroke_width=0.0; clone_info->stroke.alpha=(MagickRealType) TransparentAlpha; status&=DrawPolygonPrimitive(image,clone_info,primitive_info, exception); clone_info=DestroyDrawInfo(clone_info); status=DrawDashPolygon(draw_info,primitive_info,image,exception); break; } mid=ExpandAffine(&draw_info->affine)*SaneStrokeWidth(image,draw_info)/2.0; if ((mid > 1.0) && ((draw_info->stroke.alpha != (Quantum) TransparentAlpha) || (draw_info->stroke_pattern != (Image *) NULL))) { double x, y; MagickBooleanType closed_path; /* Draw strokes while respecting line cap/join attributes. */ closed_path=primitive_info[0].closed_subpath; i=(ssize_t) primitive_info[0].coordinates; x=fabs(primitive_info[i-1].point.x-primitive_info[0].point.x); y=fabs(primitive_info[i-1].point.y-primitive_info[0].point.y); if ((x < MagickEpsilon) && (y < MagickEpsilon)) closed_path=MagickTrue; if ((((draw_info->linecap == RoundCap) || (closed_path != MagickFalse)) && (draw_info->linejoin == RoundJoin)) || (primitive_info[i].primitive != UndefinedPrimitive)) { status=DrawPolygonPrimitive(image,draw_info,primitive_info, exception); break; } clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info); clone_info->stroke_width=0.0; clone_info->stroke.alpha=(MagickRealType) TransparentAlpha; status&=DrawPolygonPrimitive(image,clone_info,primitive_info, exception); clone_info=DestroyDrawInfo(clone_info); status&=DrawStrokePolygon(image,draw_info,primitive_info,exception); break; } status&=DrawPolygonPrimitive(image,draw_info,primitive_info,exception); break; } } image_view=DestroyCacheView(image_view); if (draw_info->compliance == SVGCompliance) { status&=SetImageMask(image,WritePixelMask,(Image *) NULL,exception); status&=SetImageMask(image,CompositePixelMask,(Image *) NULL,exception); } if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule()," end draw-primitive"); return(status != 0 ? MagickTrue : MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D r a w S t r o k e P o l y g o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawStrokePolygon() draws a stroked polygon (line, rectangle, ellipse) on % the image while respecting the line cap and join attributes. % % The format of the DrawStrokePolygon method is: % % MagickBooleanType DrawStrokePolygon(Image *image, % const DrawInfo *draw_info,const PrimitiveInfo *primitive_info) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % % o primitive_info: Specifies a pointer to a PrimitiveInfo structure. % % */ static MagickBooleanType DrawRoundLinecap(Image *image, const DrawInfo *draw_info,const PrimitiveInfo *primitive_info, ExceptionInfo *exception) { PrimitiveInfo linecap[5]; register ssize_t i; for (i=0; i < 4; i++) linecap[i]=(*primitive_info); linecap[0].coordinates=4; linecap[1].point.x+=2.0*MagickEpsilon; linecap[2].point.x+=2.0*MagickEpsilon; linecap[2].point.y+=2.0*MagickEpsilon; linecap[3].point.y+=2.0*MagickEpsilon; linecap[4].primitive=UndefinedPrimitive; return(DrawPolygonPrimitive(image,draw_info,linecap,exception)); } static MagickBooleanType DrawStrokePolygon(Image *image, const DrawInfo *draw_info,const PrimitiveInfo *primitive_info, ExceptionInfo *exception) { DrawInfo *clone_info; MagickBooleanType closed_path; MagickStatusType status; PrimitiveInfo *stroke_polygon; register const PrimitiveInfo *p, *q; /* Draw stroked polygon. */ if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(), " begin draw-stroke-polygon"); clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info); clone_info->fill=draw_info->stroke; if (clone_info->fill_pattern != (Image *) NULL) clone_info->fill_pattern=DestroyImage(clone_info->fill_pattern); if (clone_info->stroke_pattern != (Image *) NULL) clone_info->fill_pattern=CloneImage(clone_info->stroke_pattern,0,0, MagickTrue,exception); clone_info->stroke.alpha=(MagickRealType) TransparentAlpha; clone_info->stroke_width=0.0; clone_info->fill_rule=NonZeroRule; status=MagickTrue; for (p=primitive_info; p->primitive != UndefinedPrimitive; p+=p->coordinates) { if (p->coordinates == 1) continue; stroke_polygon=TraceStrokePolygon(image,draw_info,p); if (stroke_polygon == (PrimitiveInfo *) NULL) { status=0; stroke_polygon=(PrimitiveInfo *) RelinquishMagickMemory(stroke_polygon); break; } status&=DrawPolygonPrimitive(image,clone_info,stroke_polygon,exception); stroke_polygon=(PrimitiveInfo *) RelinquishMagickMemory(stroke_polygon); if (status == 0) break; q=p+p->coordinates-1; closed_path=p->closed_subpath; if ((draw_info->linecap == RoundCap) && (closed_path == MagickFalse)) { status&=DrawRoundLinecap(image,draw_info,p,exception); status&=DrawRoundLinecap(image,draw_info,q,exception); } } clone_info=DestroyDrawInfo(clone_info); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(), " end draw-stroke-polygon"); return(status != 0 ? MagickTrue : MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t A f f i n e M a t r i x % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetAffineMatrix() returns an AffineMatrix initialized to the identity % matrix. % % The format of the GetAffineMatrix method is: % % void GetAffineMatrix(AffineMatrix *affine_matrix) % % A description of each parameter follows: % % o affine_matrix: the affine matrix. % */ MagickExport void GetAffineMatrix(AffineMatrix *affine_matrix) { (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(affine_matrix != (AffineMatrix *) NULL); (void) memset(affine_matrix,0,sizeof(*affine_matrix)); affine_matrix->sx=1.0; affine_matrix->sy=1.0; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t D r a w I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetDrawInfo() initializes draw_info to default values from image_info. % % The format of the GetDrawInfo method is: % % void GetDrawInfo(const ImageInfo *image_info,DrawInfo *draw_info) % % A description of each parameter follows: % % o image_info: the image info.. % % o draw_info: the draw info. % */ MagickExport void GetDrawInfo(const ImageInfo *image_info,DrawInfo *draw_info) { char *next_token; const char *option; ExceptionInfo *exception; ImageInfo *clone_info; /* Initialize draw attributes. */ (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(draw_info != (DrawInfo *) NULL); (void) memset(draw_info,0,sizeof(*draw_info)); clone_info=CloneImageInfo(image_info); GetAffineMatrix(&draw_info->affine); exception=AcquireExceptionInfo(); (void) QueryColorCompliance("#000F",AllCompliance,&draw_info->fill, exception); (void) QueryColorCompliance("#FFF0",AllCompliance,&draw_info->stroke, exception); draw_info->stroke_antialias=clone_info->antialias; draw_info->stroke_width=1.0; draw_info->fill_rule=EvenOddRule; draw_info->alpha=OpaqueAlpha; draw_info->fill_alpha=OpaqueAlpha; draw_info->stroke_alpha=OpaqueAlpha; draw_info->linecap=ButtCap; draw_info->linejoin=MiterJoin; draw_info->miterlimit=10; draw_info->decorate=NoDecoration; draw_info->pointsize=12.0; draw_info->undercolor.alpha=(MagickRealType) TransparentAlpha; draw_info->compose=OverCompositeOp; draw_info->render=MagickTrue; draw_info->clip_path=MagickFalse; draw_info->debug=IsEventLogging(); if (clone_info->font != (char *) NULL) draw_info->font=AcquireString(clone_info->font); if (clone_info->density != (char *) NULL) draw_info->density=AcquireString(clone_info->density); draw_info->text_antialias=clone_info->antialias; if (fabs(clone_info->pointsize) >= MagickEpsilon) draw_info->pointsize=clone_info->pointsize; draw_info->border_color=clone_info->border_color; if (clone_info->server_name != (char *) NULL) draw_info->server_name=AcquireString(clone_info->server_name); option=GetImageOption(clone_info,"direction"); if (option != (const char *) NULL) draw_info->direction=(DirectionType) ParseCommandOption( MagickDirectionOptions,MagickFalse,option); else draw_info->direction=UndefinedDirection; option=GetImageOption(clone_info,"encoding"); if (option != (const char *) NULL) (void) CloneString(&draw_info->encoding,option); option=GetImageOption(clone_info,"family"); if (option != (const char *) NULL) (void) CloneString(&draw_info->family,option); option=GetImageOption(clone_info,"fill"); if (option != (const char *) NULL) (void) QueryColorCompliance(option,AllCompliance,&draw_info->fill, exception); option=GetImageOption(clone_info,"gravity"); if (option != (const char *) NULL) draw_info->gravity=(GravityType) ParseCommandOption(MagickGravityOptions, MagickFalse,option); option=GetImageOption(clone_info,"interline-spacing"); if (option != (const char *) NULL) draw_info->interline_spacing=StringToDouble(option,&next_token); option=GetImageOption(clone_info,"interword-spacing"); if (option != (const char *) NULL) draw_info->interword_spacing=StringToDouble(option,&next_token); option=GetImageOption(clone_info,"kerning"); if (option != (const char *) NULL) draw_info->kerning=StringToDouble(option,&next_token); option=GetImageOption(clone_info,"stroke"); if (option != (const char *) NULL) (void) QueryColorCompliance(option,AllCompliance,&draw_info->stroke, exception); option=GetImageOption(clone_info,"strokewidth"); if (option != (const char *) NULL) draw_info->stroke_width=StringToDouble(option,&next_token); option=GetImageOption(clone_info,"style"); if (option != (const char *) NULL) draw_info->style=(StyleType) ParseCommandOption(MagickStyleOptions, MagickFalse,option); option=GetImageOption(clone_info,"undercolor"); if (option != (const char *) NULL) (void) QueryColorCompliance(option,AllCompliance,&draw_info->undercolor, exception); option=GetImageOption(clone_info,"weight"); if (option != (const char *) NULL) { ssize_t weight; weight=ParseCommandOption(MagickWeightOptions,MagickFalse,option); if (weight == -1) weight=(ssize_t) StringToUnsignedLong(option); draw_info->weight=(size_t) weight; } exception=DestroyExceptionInfo(exception); draw_info->signature=MagickCoreSignature; clone_info=DestroyImageInfo(clone_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + P e r m u t a t e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % Permutate() returns the permuation of the (n,k). % % The format of the Permutate method is: % % void Permutate(ssize_t n,ssize_t k) % % A description of each parameter follows: % % o n: % % o k: % % */ static inline double Permutate(const ssize_t n,const ssize_t k) { double r; register ssize_t i; r=1.0; for (i=k+1; i <= n; i++) r*=i; for (i=1; i <= (n-k); i++) r/=i; return(r); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + T r a c e P r i m i t i v e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TracePrimitive is a collection of methods for generating graphic % primitives such as arcs, ellipses, paths, etc. % */ static MagickBooleanType TraceArc(MVGInfo *mvg_info,const PointInfo start, const PointInfo end,const PointInfo degrees) { PointInfo center, radius; center.x=0.5*(end.x+start.x); center.y=0.5*(end.y+start.y); radius.x=fabs(center.x-start.x); radius.y=fabs(center.y-start.y); return(TraceEllipse(mvg_info,center,radius,degrees)); } static MagickBooleanType TraceArcPath(MVGInfo *mvg_info,const PointInfo start, const PointInfo end,const PointInfo arc,const double angle, const MagickBooleanType large_arc,const MagickBooleanType sweep) { double alpha, beta, delta, factor, gamma, theta; MagickStatusType status; PointInfo center, points[3], radii; register double cosine, sine; PrimitiveInfo *primitive_info; register PrimitiveInfo *p; register ssize_t i; size_t arc_segments; ssize_t offset; offset=mvg_info->offset; primitive_info=(*mvg_info->primitive_info)+mvg_info->offset; primitive_info->coordinates=0; if ((fabs(start.x-end.x) < MagickEpsilon) && (fabs(start.y-end.y) < MagickEpsilon)) return(TracePoint(primitive_info,end)); radii.x=fabs(arc.x); radii.y=fabs(arc.y); if ((fabs(radii.x) < MagickEpsilon) || (fabs(radii.y) < MagickEpsilon)) return(TraceLine(primitive_info,start,end)); cosine=cos(DegreesToRadians(fmod((double) angle,360.0))); sine=sin(DegreesToRadians(fmod((double) angle,360.0))); center.x=(double) (cosine*(end.x-start.x)/2+sine*(end.y-start.y)/2); center.y=(double) (cosine*(end.y-start.y)/2-sine*(end.x-start.x)/2); delta=(center.x*center.x)/(radii.x*radii.x)+(center.y*center.y)/ (radii.y*radii.y); if (delta < MagickEpsilon) return(TraceLine(primitive_info,start,end)); if (delta > 1.0) { radii.x*=sqrt((double) delta); radii.y*=sqrt((double) delta); } points[0].x=(double) (cosine*start.x/radii.x+sine*start.y/radii.x); points[0].y=(double) (cosine*start.y/radii.y-sine*start.x/radii.y); points[1].x=(double) (cosine*end.x/radii.x+sine*end.y/radii.x); points[1].y=(double) (cosine*end.y/radii.y-sine*end.x/radii.y); alpha=points[1].x-points[0].x; beta=points[1].y-points[0].y; factor=PerceptibleReciprocal(alpha*alpha+beta*beta)-0.25; if (factor <= 0.0) factor=0.0; else { factor=sqrt((double) factor); if (sweep == large_arc) factor=(-factor); } center.x=(double) ((points[0].x+points[1].x)/2-factor*beta); center.y=(double) ((points[0].y+points[1].y)/2+factor*alpha); alpha=atan2(points[0].y-center.y,points[0].x-center.x); theta=atan2(points[1].y-center.y,points[1].x-center.x)-alpha; if ((theta < 0.0) && (sweep != MagickFalse)) theta+=2.0*MagickPI; else if ((theta > 0.0) && (sweep == MagickFalse)) theta-=2.0*MagickPI; arc_segments=(size_t) ceil(fabs((double) (theta/(0.5*MagickPI+ MagickEpsilon)))); status=MagickTrue; p=primitive_info; for (i=0; i < (ssize_t) arc_segments; i++) { beta=0.5*((alpha+(i+1)*theta/arc_segments)-(alpha+i*theta/arc_segments)); gamma=(8.0/3.0)*sin(fmod((double) (0.5*beta),DegreesToRadians(360.0)))* sin(fmod((double) (0.5*beta),DegreesToRadians(360.0)))/ sin(fmod((double) beta,DegreesToRadians(360.0))); points[0].x=(double) (center.x+cos(fmod((double) (alpha+(double) i*theta/ arc_segments),DegreesToRadians(360.0)))-gamma*sin(fmod((double) (alpha+ (double) i*theta/arc_segments),DegreesToRadians(360.0)))); points[0].y=(double) (center.y+sin(fmod((double) (alpha+(double) i*theta/ arc_segments),DegreesToRadians(360.0)))+gamma*cos(fmod((double) (alpha+ (double) i*theta/arc_segments),DegreesToRadians(360.0)))); points[2].x=(double) (center.x+cos(fmod((double) (alpha+(double) (i+1)* theta/arc_segments),DegreesToRadians(360.0)))); points[2].y=(double) (center.y+sin(fmod((double) (alpha+(double) (i+1)* theta/arc_segments),DegreesToRadians(360.0)))); points[1].x=(double) (points[2].x+gamma*sin(fmod((double) (alpha+(double) (i+1)*theta/arc_segments),DegreesToRadians(360.0)))); points[1].y=(double) (points[2].y-gamma*cos(fmod((double) (alpha+(double) (i+1)*theta/arc_segments),DegreesToRadians(360.0)))); p->point.x=(p == primitive_info) ? start.x : (p-1)->point.x; p->point.y=(p == primitive_info) ? start.y : (p-1)->point.y; (p+1)->point.x=(double) (cosine*radii.x*points[0].x-sine*radii.y* points[0].y); (p+1)->point.y=(double) (sine*radii.x*points[0].x+cosine*radii.y* points[0].y); (p+2)->point.x=(double) (cosine*radii.x*points[1].x-sine*radii.y* points[1].y); (p+2)->point.y=(double) (sine*radii.x*points[1].x+cosine*radii.y* points[1].y); (p+3)->point.x=(double) (cosine*radii.x*points[2].x-sine*radii.y* points[2].y); (p+3)->point.y=(double) (sine*radii.x*points[2].x+cosine*radii.y* points[2].y); if (i == (ssize_t) (arc_segments-1)) (p+3)->point=end; status&=TraceBezier(mvg_info,4); p=(*mvg_info->primitive_info)+mvg_info->offset; mvg_info->offset+=p->coordinates; p+=p->coordinates; } mvg_info->offset=offset; primitive_info=(*mvg_info->primitive_info)+mvg_info->offset; primitive_info->coordinates=(size_t) (p-primitive_info); primitive_info->closed_subpath=MagickFalse; for (i=0; i < (ssize_t) primitive_info->coordinates; i++) { p->primitive=primitive_info->primitive; p--; } return(status == 0 ? MagickFalse : MagickTrue); } static MagickBooleanType TraceBezier(MVGInfo *mvg_info, const size_t number_coordinates) { double alpha, *coefficients, weight; PointInfo end, point, *points; PrimitiveInfo *primitive_info; register PrimitiveInfo *p; register ssize_t i, j; size_t control_points, quantum; /* Allocate coefficients. */ primitive_info=(*mvg_info->primitive_info)+mvg_info->offset; quantum=number_coordinates; for (i=0; i < (ssize_t) number_coordinates; i++) { for (j=i+1; j < (ssize_t) number_coordinates; j++) { alpha=fabs(primitive_info[j].point.x-primitive_info[i].point.x); if (alpha > (double) quantum) quantum=(size_t) alpha; alpha=fabs(primitive_info[j].point.y-primitive_info[i].point.y); if (alpha > (double) quantum) quantum=(size_t) alpha; } } quantum=(size_t) MagickMin((double) quantum/number_coordinates, (double) BezierQuantum); control_points=quantum*number_coordinates; if (CheckPrimitiveExtent(mvg_info,control_points+1) == MagickFalse) return(MagickFalse); primitive_info=(*mvg_info->primitive_info)+mvg_info->offset; coefficients=(double *) AcquireQuantumMemory((size_t) number_coordinates,sizeof(*coefficients)); points=(PointInfo *) AcquireQuantumMemory((size_t) control_points, sizeof(*points)); if ((coefficients == (double *) NULL) || (points == (PointInfo *) NULL)) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); /* Compute bezier points. */ end=primitive_info[number_coordinates-1].point; for (i=0; i < (ssize_t) number_coordinates; i++) coefficients[i]=Permutate((ssize_t) number_coordinates-1,i); weight=0.0; for (i=0; i < (ssize_t) control_points; i++) { p=primitive_info; point.x=0.0; point.y=0.0; alpha=pow((double) (1.0-weight),(double) number_coordinates-1.0); for (j=0; j < (ssize_t) number_coordinates; j++) { point.x+=alpha*coefficients[j]*p->point.x; point.y+=alpha*coefficients[j]*p->point.y; alpha*=weight/(1.0-weight); p++; } points[i]=point; weight+=1.0/control_points; } /* Bezier curves are just short segmented polys. */ p=primitive_info; for (i=0; i < (ssize_t) control_points; i++) { if (TracePoint(p,points[i]) == MagickFalse) return(MagickFalse); p+=p->coordinates; } if (TracePoint(p,end) == MagickFalse) return(MagickFalse); p+=p->coordinates; primitive_info->coordinates=(size_t) (p-primitive_info); primitive_info->closed_subpath=MagickFalse; for (i=0; i < (ssize_t) primitive_info->coordinates; i++) { p->primitive=primitive_info->primitive; p--; } points=(PointInfo *) RelinquishMagickMemory(points); coefficients=(double *) RelinquishMagickMemory(coefficients); return(MagickTrue); } static MagickBooleanType TraceCircle(MVGInfo *mvg_info,const PointInfo start, const PointInfo end) { double alpha, beta, radius; PointInfo offset, degrees; alpha=end.x-start.x; beta=end.y-start.y; radius=hypot((double) alpha,(double) beta); offset.x=(double) radius; offset.y=(double) radius; degrees.x=0.0; degrees.y=360.0; return(TraceEllipse(mvg_info,start,offset,degrees)); } static MagickBooleanType TraceEllipse(MVGInfo *mvg_info,const PointInfo center, const PointInfo radii,const PointInfo arc) { double coordinates, delta, step, x, y; PointInfo angle, point; PrimitiveInfo *primitive_info; register PrimitiveInfo *p; register ssize_t i; /* Ellipses are just short segmented polys. */ primitive_info=(*mvg_info->primitive_info)+mvg_info->offset; primitive_info->coordinates=0; if ((fabs(radii.x) < MagickEpsilon) || (fabs(radii.y) < MagickEpsilon)) return(MagickTrue); delta=2.0*PerceptibleReciprocal(MagickMax(radii.x,radii.y)); step=MagickPI/8.0; if ((delta >= 0.0) && (delta < (MagickPI/8.0))) step=MagickPI/4.0/(MagickPI*PerceptibleReciprocal(delta)/2.0); angle.x=DegreesToRadians(arc.x); y=arc.y; while (y < arc.x) y+=360.0; angle.y=DegreesToRadians(y); coordinates=ceil((angle.y-angle.x)/step+1.0); if ((coordinates > (double) SSIZE_MAX) || (coordinates > (double) GetMaxMemoryRequest())) { (void) ThrowMagickException(mvg_info->exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",""); return(MagickFalse); } if (CheckPrimitiveExtent(mvg_info,(size_t) coordinates) == MagickFalse) return(MagickFalse); primitive_info=(*mvg_info->primitive_info)+mvg_info->offset; for (p=primitive_info; angle.x < angle.y; angle.x+=step) { point.x=cos(fmod(angle.x,DegreesToRadians(360.0)))*radii.x+center.x; point.y=sin(fmod(angle.x,DegreesToRadians(360.0)))*radii.y+center.y; if (TracePoint(p,point) == MagickFalse) return(MagickFalse); p+=p->coordinates; } point.x=cos(fmod(angle.y,DegreesToRadians(360.0)))*radii.x+center.x; point.y=sin(fmod(angle.y,DegreesToRadians(360.0)))*radii.y+center.y; if (TracePoint(p,point) == MagickFalse) return(MagickFalse); p+=p->coordinates; primitive_info->coordinates=(size_t) (p-primitive_info); primitive_info->closed_subpath=MagickFalse; x=fabs(primitive_info[0].point.x- primitive_info[primitive_info->coordinates-1].point.x); y=fabs(primitive_info[0].point.y- primitive_info[primitive_info->coordinates-1].point.y); if ((x < MagickEpsilon) && (y < MagickEpsilon)) primitive_info->closed_subpath=MagickTrue; for (i=0; i < (ssize_t) primitive_info->coordinates; i++) { p->primitive=primitive_info->primitive; p--; } return(MagickTrue); } static MagickBooleanType TraceLine(PrimitiveInfo *primitive_info, const PointInfo start,const PointInfo end) { if (TracePoint(primitive_info,start) == MagickFalse) return(MagickFalse); if ((fabs(start.x-end.x) < MagickEpsilon) && (fabs(start.y-end.y) < MagickEpsilon)) { primitive_info->primitive=PointPrimitive; primitive_info->coordinates=1; return(MagickTrue); } if (TracePoint(primitive_info+1,end) == MagickFalse) return(MagickFalse); (primitive_info+1)->primitive=primitive_info->primitive; primitive_info->coordinates=2; primitive_info->closed_subpath=MagickFalse; return(MagickTrue); } static size_t TracePath(MVGInfo *mvg_info,const char *path, ExceptionInfo *exception) { char *next_token, token[MagickPathExtent]; const char *p; double x, y; int attribute, last_attribute; MagickBooleanType status; PointInfo end = {0.0, 0.0}, points[4] = { {0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0} }, point = {0.0, 0.0}, start = {0.0, 0.0}; PrimitiveInfo *primitive_info; PrimitiveType primitive_type; register PrimitiveInfo *q; register ssize_t i; size_t number_coordinates, z_count; ssize_t subpath_offset; subpath_offset=mvg_info->offset; primitive_info=(*mvg_info->primitive_info)+mvg_info->offset; status=MagickTrue; attribute=0; number_coordinates=0; z_count=0; primitive_type=primitive_info->primitive; q=primitive_info; for (p=path; *p != '\0'; ) { if (status == MagickFalse) break; while (isspace((int) ((unsigned char) *p)) != 0) p++; if (*p == '\0') break; last_attribute=attribute; attribute=(int) (*p++); switch (attribute) { case 'a': case 'A': { double angle = 0.0; MagickBooleanType large_arc = MagickFalse, sweep = MagickFalse; PointInfo arc = {0.0, 0.0}; /* Elliptical arc. */ do { GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') GetNextToken(p,&p,MagickPathExtent,token); arc.x=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') GetNextToken(p,&p,MagickPathExtent,token); arc.y=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') GetNextToken(p,&p,MagickPathExtent,token); angle=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') GetNextToken(p,&p,MagickPathExtent,token); large_arc=StringToLong(token) != 0 ? MagickTrue : MagickFalse; GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') GetNextToken(p,&p,MagickPathExtent,token); sweep=StringToLong(token) != 0 ? MagickTrue : MagickFalse; if (*token == ',') GetNextToken(p,&p,MagickPathExtent,token); GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') GetNextToken(p,&p,MagickPathExtent,token); x=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') GetNextToken(p,&p,MagickPathExtent,token); y=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); end.x=(double) (attribute == (int) 'A' ? x : point.x+x); end.y=(double) (attribute == (int) 'A' ? y : point.y+y); if (TraceArcPath(mvg_info,point,end,arc,angle,large_arc,sweep) == MagickFalse) return(0); q=(*mvg_info->primitive_info)+mvg_info->offset; mvg_info->offset+=q->coordinates; q+=q->coordinates; point=end; while (isspace((int) ((unsigned char) *p)) != 0) p++; if (*p == ',') p++; } while (IsPoint(p) != MagickFalse); break; } case 'c': case 'C': { /* Cubic Bézier curve. */ do { points[0]=point; for (i=1; i < 4; i++) { GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') GetNextToken(p,&p,MagickPathExtent,token); x=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') GetNextToken(p,&p,MagickPathExtent,token); y=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); end.x=(double) (attribute == (int) 'C' ? x : point.x+x); end.y=(double) (attribute == (int) 'C' ? y : point.y+y); points[i]=end; } for (i=0; i < 4; i++) (q+i)->point=points[i]; if (TraceBezier(mvg_info,4) == MagickFalse) return(0); q=(*mvg_info->primitive_info)+mvg_info->offset; mvg_info->offset+=q->coordinates; q+=q->coordinates; point=end; while (isspace((int) ((unsigned char) *p)) != 0) p++; if (*p == ',') p++; } while (IsPoint(p) != MagickFalse); break; } case 'H': case 'h': { do { GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') GetNextToken(p,&p,MagickPathExtent,token); x=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); point.x=(double) (attribute == (int) 'H' ? x: point.x+x); if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse) return(0); q=(*mvg_info->primitive_info)+mvg_info->offset; if (TracePoint(q,point) == MagickFalse) return(0); mvg_info->offset+=q->coordinates; q+=q->coordinates; while (isspace((int) ((unsigned char) *p)) != 0) p++; if (*p == ',') p++; } while (IsPoint(p) != MagickFalse); break; } case 'l': case 'L': { /* Line to. */ do { GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') GetNextToken(p,&p,MagickPathExtent,token); x=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') GetNextToken(p,&p,MagickPathExtent,token); y=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); point.x=(double) (attribute == (int) 'L' ? x : point.x+x); point.y=(double) (attribute == (int) 'L' ? y : point.y+y); if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse) return(0); q=(*mvg_info->primitive_info)+mvg_info->offset; if (TracePoint(q,point) == MagickFalse) return(0); mvg_info->offset+=q->coordinates; q+=q->coordinates; while (isspace((int) ((unsigned char) *p)) != 0) p++; if (*p == ',') p++; } while (IsPoint(p) != MagickFalse); break; } case 'M': case 'm': { /* Move to. */ if (mvg_info->offset != subpath_offset) { primitive_info=(*mvg_info->primitive_info)+subpath_offset; primitive_info->coordinates=(size_t) (q-primitive_info); number_coordinates+=primitive_info->coordinates; primitive_info=q; subpath_offset=mvg_info->offset; } i=0; do { GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') GetNextToken(p,&p,MagickPathExtent,token); x=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') GetNextToken(p,&p,MagickPathExtent,token); y=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); point.x=(double) (attribute == (int) 'M' ? x : point.x+x); point.y=(double) (attribute == (int) 'M' ? y : point.y+y); if (i == 0) start=point; i++; if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse) return(0); q=(*mvg_info->primitive_info)+mvg_info->offset; if (TracePoint(q,point) == MagickFalse) return(0); mvg_info->offset+=q->coordinates; q+=q->coordinates; while (isspace((int) ((unsigned char) *p)) != 0) p++; if (*p == ',') p++; } while (IsPoint(p) != MagickFalse); break; } case 'q': case 'Q': { /* Quadratic Bézier curve. */ do { points[0]=point; for (i=1; i < 3; i++) { GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') GetNextToken(p,&p,MagickPathExtent,token); x=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') GetNextToken(p,&p,MagickPathExtent,token); y=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); if (*p == ',') p++; end.x=(double) (attribute == (int) 'Q' ? x : point.x+x); end.y=(double) (attribute == (int) 'Q' ? y : point.y+y); points[i]=end; } for (i=0; i < 3; i++) (q+i)->point=points[i]; if (TraceBezier(mvg_info,3) == MagickFalse) return(0); q=(*mvg_info->primitive_info)+mvg_info->offset; mvg_info->offset+=q->coordinates; q+=q->coordinates; point=end; while (isspace((int) ((unsigned char) *p)) != 0) p++; if (*p == ',') p++; } while (IsPoint(p) != MagickFalse); break; } case 's': case 'S': { /* Cubic Bézier curve. */ do { points[0]=points[3]; points[1].x=2.0*points[3].x-points[2].x; points[1].y=2.0*points[3].y-points[2].y; for (i=2; i < 4; i++) { GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') GetNextToken(p,&p,MagickPathExtent,token); x=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') GetNextToken(p,&p,MagickPathExtent,token); y=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); if (*p == ',') p++; end.x=(double) (attribute == (int) 'S' ? x : point.x+x); end.y=(double) (attribute == (int) 'S' ? y : point.y+y); points[i]=end; } if (strchr("CcSs",last_attribute) == (char *) NULL) { points[0]=point; points[1]=point; } for (i=0; i < 4; i++) (q+i)->point=points[i]; if (TraceBezier(mvg_info,4) == MagickFalse) return(0); q=(*mvg_info->primitive_info)+mvg_info->offset; mvg_info->offset+=q->coordinates; q+=q->coordinates; point=end; last_attribute=attribute; while (isspace((int) ((unsigned char) *p)) != 0) p++; if (*p == ',') p++; } while (IsPoint(p) != MagickFalse); break; } case 't': case 'T': { /* Quadratic Bézier curve. */ do { points[0]=points[2]; points[1].x=2.0*points[2].x-points[1].x; points[1].y=2.0*points[2].y-points[1].y; for (i=2; i < 3; i++) { GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') GetNextToken(p,&p,MagickPathExtent,token); x=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') GetNextToken(p,&p,MagickPathExtent,token); y=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); end.x=(double) (attribute == (int) 'T' ? x : point.x+x); end.y=(double) (attribute == (int) 'T' ? y : point.y+y); points[i]=end; } if (status == MagickFalse) break; if (strchr("QqTt",last_attribute) == (char *) NULL) { points[0]=point; points[1]=point; } for (i=0; i < 3; i++) (q+i)->point=points[i]; if (TraceBezier(mvg_info,3) == MagickFalse) return(0); q=(*mvg_info->primitive_info)+mvg_info->offset; mvg_info->offset+=q->coordinates; q+=q->coordinates; point=end; last_attribute=attribute; while (isspace((int) ((unsigned char) *p)) != 0) p++; if (*p == ',') p++; } while (IsPoint(p) != MagickFalse); break; } case 'v': case 'V': { /* Line to. */ do { GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') GetNextToken(p,&p,MagickPathExtent,token); y=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(token,exception); point.y=(double) (attribute == (int) 'V' ? y : point.y+y); if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse) return(0); q=(*mvg_info->primitive_info)+mvg_info->offset; if (TracePoint(q,point) == MagickFalse) return(0); mvg_info->offset+=q->coordinates; q+=q->coordinates; while (isspace((int) ((unsigned char) *p)) != 0) p++; if (*p == ',') p++; } while (IsPoint(p) != MagickFalse); break; } case 'z': case 'Z': { /* Close path. */ point=start; if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse) return(0); q=(*mvg_info->primitive_info)+mvg_info->offset; if (TracePoint(q,point) == MagickFalse) return(0); mvg_info->offset+=q->coordinates; q+=q->coordinates; primitive_info=(*mvg_info->primitive_info)+subpath_offset; primitive_info->coordinates=(size_t) (q-primitive_info); primitive_info->closed_subpath=MagickTrue; number_coordinates+=primitive_info->coordinates; primitive_info=q; subpath_offset=mvg_info->offset; z_count++; break; } default: { ThrowPointExpectedException(token,exception); break; } } } if (status == MagickFalse) return(0); primitive_info=(*mvg_info->primitive_info)+subpath_offset; primitive_info->coordinates=(size_t) (q-primitive_info); number_coordinates+=primitive_info->coordinates; for (i=0; i < (ssize_t) number_coordinates; i++) { q--; q->primitive=primitive_type; if (z_count > 1) q->method=FillToBorderMethod; } q=primitive_info; return(number_coordinates); } static MagickBooleanType TraceRectangle(PrimitiveInfo *primitive_info, const PointInfo start,const PointInfo end) { PointInfo point; register PrimitiveInfo *p; register ssize_t i; p=primitive_info; if (TracePoint(p,start) == MagickFalse) return(MagickFalse); p+=p->coordinates; point.x=start.x; point.y=end.y; if (TracePoint(p,point) == MagickFalse) return(MagickFalse); p+=p->coordinates; if (TracePoint(p,end) == MagickFalse) return(MagickFalse); p+=p->coordinates; point.x=end.x; point.y=start.y; if (TracePoint(p,point) == MagickFalse) return(MagickFalse); p+=p->coordinates; if (TracePoint(p,start) == MagickFalse) return(MagickFalse); p+=p->coordinates; primitive_info->coordinates=(size_t) (p-primitive_info); primitive_info->closed_subpath=MagickTrue; for (i=0; i < (ssize_t) primitive_info->coordinates; i++) { p->primitive=primitive_info->primitive; p--; } return(MagickTrue); } static MagickBooleanType TraceRoundRectangle(MVGInfo *mvg_info, const PointInfo start,const PointInfo end,PointInfo arc) { PointInfo degrees, point, segment; PrimitiveInfo *primitive_info; register PrimitiveInfo *p; register ssize_t i; ssize_t offset; offset=mvg_info->offset; segment.x=fabs(end.x-start.x); segment.y=fabs(end.y-start.y); if ((segment.x < MagickEpsilon) || (segment.y < MagickEpsilon)) { (*mvg_info->primitive_info+mvg_info->offset)->coordinates=0; return(MagickTrue); } if (arc.x > (0.5*segment.x)) arc.x=0.5*segment.x; if (arc.y > (0.5*segment.y)) arc.y=0.5*segment.y; point.x=start.x+segment.x-arc.x; point.y=start.y+arc.y; degrees.x=270.0; degrees.y=360.0; if (TraceEllipse(mvg_info,point,arc,degrees) == MagickFalse) return(MagickFalse); p=(*mvg_info->primitive_info)+mvg_info->offset; mvg_info->offset+=p->coordinates; point.x=start.x+segment.x-arc.x; point.y=start.y+segment.y-arc.y; degrees.x=0.0; degrees.y=90.0; if (TraceEllipse(mvg_info,point,arc,degrees) == MagickFalse) return(MagickFalse); p=(*mvg_info->primitive_info)+mvg_info->offset; mvg_info->offset+=p->coordinates; point.x=start.x+arc.x; point.y=start.y+segment.y-arc.y; degrees.x=90.0; degrees.y=180.0; if (TraceEllipse(mvg_info,point,arc,degrees) == MagickFalse) return(MagickFalse); p=(*mvg_info->primitive_info)+mvg_info->offset; mvg_info->offset+=p->coordinates; point.x=start.x+arc.x; point.y=start.y+arc.y; degrees.x=180.0; degrees.y=270.0; if (TraceEllipse(mvg_info,point,arc,degrees) == MagickFalse) return(MagickFalse); p=(*mvg_info->primitive_info)+mvg_info->offset; mvg_info->offset+=p->coordinates; if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse) return(MagickFalse); p=(*mvg_info->primitive_info)+mvg_info->offset; if (TracePoint(p,(*mvg_info->primitive_info+offset)->point) == MagickFalse) return(MagickFalse); p+=p->coordinates; mvg_info->offset=offset; primitive_info=(*mvg_info->primitive_info)+offset; primitive_info->coordinates=(size_t) (p-primitive_info); primitive_info->closed_subpath=MagickTrue; for (i=0; i < (ssize_t) primitive_info->coordinates; i++) { p->primitive=primitive_info->primitive; p--; } return(MagickTrue); } static MagickBooleanType TraceSquareLinecap(PrimitiveInfo *primitive_info, const size_t number_vertices,const double offset) { double distance; register double dx, dy; register ssize_t i; ssize_t j; dx=0.0; dy=0.0; for (i=1; i < (ssize_t) number_vertices; i++) { dx=primitive_info[0].point.x-primitive_info[i].point.x; dy=primitive_info[0].point.y-primitive_info[i].point.y; if ((fabs((double) dx) >= MagickEpsilon) || (fabs((double) dy) >= MagickEpsilon)) break; } if (i == (ssize_t) number_vertices) i=(ssize_t) number_vertices-1L; distance=hypot((double) dx,(double) dy); primitive_info[0].point.x=(double) (primitive_info[i].point.x+ dx*(distance+offset)/distance); primitive_info[0].point.y=(double) (primitive_info[i].point.y+ dy*(distance+offset)/distance); for (j=(ssize_t) number_vertices-2; j >= 0; j--) { dx=primitive_info[number_vertices-1].point.x-primitive_info[j].point.x; dy=primitive_info[number_vertices-1].point.y-primitive_info[j].point.y; if ((fabs((double) dx) >= MagickEpsilon) || (fabs((double) dy) >= MagickEpsilon)) break; } distance=hypot((double) dx,(double) dy); primitive_info[number_vertices-1].point.x=(double) (primitive_info[j].point.x+ dx*(distance+offset)/distance); primitive_info[number_vertices-1].point.y=(double) (primitive_info[j].point.y+ dy*(distance+offset)/distance); return(MagickTrue); } static PrimitiveInfo *TraceStrokePolygon(const Image *image, const DrawInfo *draw_info,const PrimitiveInfo *primitive_info) { #define CheckPathExtent(pad) \ if ((ssize_t) (q+(pad)) >= (ssize_t) max_strokes) \ { \ if (~max_strokes < (pad)) \ { \ path_p=(PointInfo *) RelinquishMagickMemory(path_p); \ path_q=(PointInfo *) RelinquishMagickMemory(path_q); \ } \ else \ { \ max_strokes+=(pad); \ path_p=(PointInfo *) ResizeQuantumMemory(path_p,max_strokes, \ sizeof(*path_p)); \ path_q=(PointInfo *) ResizeQuantumMemory(path_q,max_strokes, \ sizeof(*path_q)); \ } \ if ((path_p == (PointInfo *) NULL) || (path_q == (PointInfo *) NULL)) \ { \ if (path_p != (PointInfo *) NULL) \ path_p=(PointInfo *) RelinquishMagickMemory(path_p); \ if (path_q != (PointInfo *) NULL) \ path_q=(PointInfo *) RelinquishMagickMemory(path_q); \ polygon_primitive=(PrimitiveInfo *) \ RelinquishMagickMemory(polygon_primitive); \ return((PrimitiveInfo *) NULL); \ } \ } typedef struct _LineSegment { double p, q; } LineSegment; double delta_theta, dot_product, mid, miterlimit; LineSegment dx = {0,0}, dy = {0,0}, inverse_slope = {0,0}, slope = {0,0}, theta = {0,0}; MagickBooleanType closed_path; PointInfo box_p[5], box_q[5], center, offset, *path_p, *path_q; PrimitiveInfo *polygon_primitive, *stroke_polygon; register ssize_t i; size_t arc_segments, max_strokes, number_vertices; ssize_t j, n, p, q; /* Allocate paths. */ number_vertices=primitive_info->coordinates; max_strokes=2*number_vertices+6*BezierQuantum+360; polygon_primitive=(PrimitiveInfo *) AcquireQuantumMemory((size_t) number_vertices+2UL,sizeof(*polygon_primitive)); if (polygon_primitive == (PrimitiveInfo *) NULL) return((PrimitiveInfo *) NULL); (void) memcpy(polygon_primitive,primitive_info,(size_t) number_vertices* sizeof(*polygon_primitive)); closed_path=primitive_info[0].closed_subpath; if (((draw_info->linejoin == RoundJoin) || (draw_info->linejoin == MiterJoin)) && (closed_path != MagickFalse)) { polygon_primitive[number_vertices]=primitive_info[1]; number_vertices++; } polygon_primitive[number_vertices].primitive=UndefinedPrimitive; /* Compute the slope for the first line segment, p. */ dx.p=0.0; dy.p=0.0; for (n=1; n < (ssize_t) number_vertices; n++) { dx.p=polygon_primitive[n].point.x-polygon_primitive[0].point.x; dy.p=polygon_primitive[n].point.y-polygon_primitive[0].point.y; if ((fabs(dx.p) >= MagickEpsilon) || (fabs(dy.p) >= MagickEpsilon)) break; } if (n == (ssize_t) number_vertices) { if ((draw_info->linecap != RoundCap) || (closed_path != MagickFalse)) { /* Zero length subpath. */ stroke_polygon=(PrimitiveInfo *) AcquireCriticalMemory( sizeof(*stroke_polygon)); stroke_polygon[0]=polygon_primitive[0]; stroke_polygon[0].coordinates=0; polygon_primitive=(PrimitiveInfo *) RelinquishMagickMemory( polygon_primitive); return(stroke_polygon); } n=(ssize_t) number_vertices-1L; } path_p=(PointInfo *) AcquireQuantumMemory((size_t) max_strokes, sizeof(*path_p)); if (path_p == (PointInfo *) NULL) { polygon_primitive=(PrimitiveInfo *) RelinquishMagickMemory( polygon_primitive); return((PrimitiveInfo *) NULL); } path_q=(PointInfo *) AcquireQuantumMemory((size_t) max_strokes, sizeof(*path_q)); if (path_q == (PointInfo *) NULL) { path_p=(PointInfo *) RelinquishMagickMemory(path_p); polygon_primitive=(PrimitiveInfo *) RelinquishMagickMemory( polygon_primitive); return((PrimitiveInfo *) NULL); } slope.p=0.0; inverse_slope.p=0.0; if (fabs(dx.p) < MagickEpsilon) { if (dx.p >= 0.0) slope.p=dy.p < 0.0 ? -1.0/MagickEpsilon : 1.0/MagickEpsilon; else slope.p=dy.p < 0.0 ? 1.0/MagickEpsilon : -1.0/MagickEpsilon; } else if (fabs(dy.p) < MagickEpsilon) { if (dy.p >= 0.0) inverse_slope.p=dx.p < 0.0 ? -1.0/MagickEpsilon : 1.0/MagickEpsilon; else inverse_slope.p=dx.p < 0.0 ? 1.0/MagickEpsilon : -1.0/MagickEpsilon; } else { slope.p=dy.p/dx.p; inverse_slope.p=(-1.0/slope.p); } mid=ExpandAffine(&draw_info->affine)*SaneStrokeWidth(image,draw_info)/2.0; miterlimit=(double) (draw_info->miterlimit*draw_info->miterlimit*mid*mid); if ((draw_info->linecap == SquareCap) && (closed_path == MagickFalse)) (void) TraceSquareLinecap(polygon_primitive,number_vertices,mid); offset.x=sqrt((double) (mid*mid/(inverse_slope.p*inverse_slope.p+1.0))); offset.y=(double) (offset.x*inverse_slope.p); if ((dy.p*offset.x-dx.p*offset.y) > 0.0) { box_p[0].x=polygon_primitive[0].point.x-offset.x; box_p[0].y=polygon_primitive[0].point.y-offset.x*inverse_slope.p; box_p[1].x=polygon_primitive[n].point.x-offset.x; box_p[1].y=polygon_primitive[n].point.y-offset.x*inverse_slope.p; box_q[0].x=polygon_primitive[0].point.x+offset.x; box_q[0].y=polygon_primitive[0].point.y+offset.x*inverse_slope.p; box_q[1].x=polygon_primitive[n].point.x+offset.x; box_q[1].y=polygon_primitive[n].point.y+offset.x*inverse_slope.p; } else { box_p[0].x=polygon_primitive[0].point.x+offset.x; box_p[0].y=polygon_primitive[0].point.y+offset.y; box_p[1].x=polygon_primitive[n].point.x+offset.x; box_p[1].y=polygon_primitive[n].point.y+offset.y; box_q[0].x=polygon_primitive[0].point.x-offset.x; box_q[0].y=polygon_primitive[0].point.y-offset.y; box_q[1].x=polygon_primitive[n].point.x-offset.x; box_q[1].y=polygon_primitive[n].point.y-offset.y; } /* Create strokes for the line join attribute: bevel, miter, round. */ p=0; q=0; path_q[p++]=box_q[0]; path_p[q++]=box_p[0]; for (i=(ssize_t) n+1; i < (ssize_t) number_vertices; i++) { /* Compute the slope for this line segment, q. */ dx.q=polygon_primitive[i].point.x-polygon_primitive[n].point.x; dy.q=polygon_primitive[i].point.y-polygon_primitive[n].point.y; dot_product=dx.q*dx.q+dy.q*dy.q; if (dot_product < 0.25) continue; slope.q=0.0; inverse_slope.q=0.0; if (fabs(dx.q) < MagickEpsilon) { if (dx.q >= 0.0) slope.q=dy.q < 0.0 ? -1.0/MagickEpsilon : 1.0/MagickEpsilon; else slope.q=dy.q < 0.0 ? 1.0/MagickEpsilon : -1.0/MagickEpsilon; } else if (fabs(dy.q) < MagickEpsilon) { if (dy.q >= 0.0) inverse_slope.q=dx.q < 0.0 ? -1.0/MagickEpsilon : 1.0/MagickEpsilon; else inverse_slope.q=dx.q < 0.0 ? 1.0/MagickEpsilon : -1.0/MagickEpsilon; } else { slope.q=dy.q/dx.q; inverse_slope.q=(-1.0/slope.q); } offset.x=sqrt((double) (mid*mid/(inverse_slope.q*inverse_slope.q+1.0))); offset.y=(double) (offset.x*inverse_slope.q); dot_product=dy.q*offset.x-dx.q*offset.y; if (dot_product > 0.0) { box_p[2].x=polygon_primitive[n].point.x-offset.x; box_p[2].y=polygon_primitive[n].point.y-offset.y; box_p[3].x=polygon_primitive[i].point.x-offset.x; box_p[3].y=polygon_primitive[i].point.y-offset.y; box_q[2].x=polygon_primitive[n].point.x+offset.x; box_q[2].y=polygon_primitive[n].point.y+offset.y; box_q[3].x=polygon_primitive[i].point.x+offset.x; box_q[3].y=polygon_primitive[i].point.y+offset.y; } else { box_p[2].x=polygon_primitive[n].point.x+offset.x; box_p[2].y=polygon_primitive[n].point.y+offset.y; box_p[3].x=polygon_primitive[i].point.x+offset.x; box_p[3].y=polygon_primitive[i].point.y+offset.y; box_q[2].x=polygon_primitive[n].point.x-offset.x; box_q[2].y=polygon_primitive[n].point.y-offset.y; box_q[3].x=polygon_primitive[i].point.x-offset.x; box_q[3].y=polygon_primitive[i].point.y-offset.y; } if (fabs((double) (slope.p-slope.q)) < MagickEpsilon) { box_p[4]=box_p[1]; box_q[4]=box_q[1]; } else { box_p[4].x=(double) ((slope.p*box_p[0].x-box_p[0].y-slope.q*box_p[3].x+ box_p[3].y)/(slope.p-slope.q)); box_p[4].y=(double) (slope.p*(box_p[4].x-box_p[0].x)+box_p[0].y); box_q[4].x=(double) ((slope.p*box_q[0].x-box_q[0].y-slope.q*box_q[3].x+ box_q[3].y)/(slope.p-slope.q)); box_q[4].y=(double) (slope.p*(box_q[4].x-box_q[0].x)+box_q[0].y); } CheckPathExtent(6*BezierQuantum+360); dot_product=dx.q*dy.p-dx.p*dy.q; if (dot_product <= 0.0) switch (draw_info->linejoin) { case BevelJoin: { path_q[q++]=box_q[1]; path_q[q++]=box_q[2]; dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+ (box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y); if (dot_product <= miterlimit) path_p[p++]=box_p[4]; else { path_p[p++]=box_p[1]; path_p[p++]=box_p[2]; } break; } case MiterJoin: { dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+ (box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y); if (dot_product <= miterlimit) { path_q[q++]=box_q[4]; path_p[p++]=box_p[4]; } else { path_q[q++]=box_q[1]; path_q[q++]=box_q[2]; path_p[p++]=box_p[1]; path_p[p++]=box_p[2]; } break; } case RoundJoin: { dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+ (box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y); if (dot_product <= miterlimit) path_p[p++]=box_p[4]; else { path_p[p++]=box_p[1]; path_p[p++]=box_p[2]; } center=polygon_primitive[n].point; theta.p=atan2(box_q[1].y-center.y,box_q[1].x-center.x); theta.q=atan2(box_q[2].y-center.y,box_q[2].x-center.x); if (theta.q < theta.p) theta.q+=2.0*MagickPI; arc_segments=(size_t) ceil((double) ((theta.q-theta.p)/ (2.0*sqrt((double) (1.0/mid))))); CheckPathExtent(arc_segments+6*BezierQuantum+360); path_q[q].x=box_q[1].x; path_q[q].y=box_q[1].y; q++; for (j=1; j < (ssize_t) arc_segments; j++) { delta_theta=(double) (j*(theta.q-theta.p)/arc_segments); path_q[q].x=(double) (center.x+mid*cos(fmod((double) (theta.p+delta_theta),DegreesToRadians(360.0)))); path_q[q].y=(double) (center.y+mid*sin(fmod((double) (theta.p+delta_theta),DegreesToRadians(360.0)))); q++; } path_q[q++]=box_q[2]; break; } default: break; } else switch (draw_info->linejoin) { case BevelJoin: { path_p[p++]=box_p[1]; path_p[p++]=box_p[2]; dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+ (box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y); if (dot_product <= miterlimit) path_q[q++]=box_q[4]; else { path_q[q++]=box_q[1]; path_q[q++]=box_q[2]; } break; } case MiterJoin: { dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+ (box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y); if (dot_product <= miterlimit) { path_q[q++]=box_q[4]; path_p[p++]=box_p[4]; } else { path_q[q++]=box_q[1]; path_q[q++]=box_q[2]; path_p[p++]=box_p[1]; path_p[p++]=box_p[2]; } break; } case RoundJoin: { dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+ (box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y); if (dot_product <= miterlimit) path_q[q++]=box_q[4]; else { path_q[q++]=box_q[1]; path_q[q++]=box_q[2]; } center=polygon_primitive[n].point; theta.p=atan2(box_p[1].y-center.y,box_p[1].x-center.x); theta.q=atan2(box_p[2].y-center.y,box_p[2].x-center.x); if (theta.p < theta.q) theta.p+=2.0*MagickPI; arc_segments=(size_t) ceil((double) ((theta.p-theta.q)/ (2.0*sqrt((double) (1.0/mid))))); CheckPathExtent(arc_segments+6*BezierQuantum+360); path_p[p++]=box_p[1]; for (j=1; j < (ssize_t) arc_segments; j++) { delta_theta=(double) (j*(theta.q-theta.p)/arc_segments); path_p[p].x=(double) (center.x+mid*cos(fmod((double) (theta.p+delta_theta),DegreesToRadians(360.0)))); path_p[p].y=(double) (center.y+mid*sin(fmod((double) (theta.p+delta_theta),DegreesToRadians(360.0)))); p++; } path_p[p++]=box_p[2]; break; } default: break; } slope.p=slope.q; inverse_slope.p=inverse_slope.q; box_p[0]=box_p[2]; box_p[1]=box_p[3]; box_q[0]=box_q[2]; box_q[1]=box_q[3]; dx.p=dx.q; dy.p=dy.q; n=i; } path_p[p++]=box_p[1]; path_q[q++]=box_q[1]; /* Trace stroked polygon. */ stroke_polygon=(PrimitiveInfo *) AcquireQuantumMemory((size_t) (p+q+2UL*closed_path+2UL),sizeof(*stroke_polygon)); if (stroke_polygon != (PrimitiveInfo *) NULL) { for (i=0; i < (ssize_t) p; i++) { stroke_polygon[i]=polygon_primitive[0]; stroke_polygon[i].point=path_p[i]; } if (closed_path != MagickFalse) { stroke_polygon[i]=polygon_primitive[0]; stroke_polygon[i].point=stroke_polygon[0].point; i++; } for ( ; i < (ssize_t) (p+q+closed_path); i++) { stroke_polygon[i]=polygon_primitive[0]; stroke_polygon[i].point=path_q[p+q+closed_path-(i+1)]; } if (closed_path != MagickFalse) { stroke_polygon[i]=polygon_primitive[0]; stroke_polygon[i].point=stroke_polygon[p+closed_path].point; i++; } stroke_polygon[i]=polygon_primitive[0]; stroke_polygon[i].point=stroke_polygon[0].point; i++; stroke_polygon[i].primitive=UndefinedPrimitive; stroke_polygon[0].coordinates=(size_t) (p+q+2*closed_path+1); } path_p=(PointInfo *) RelinquishMagickMemory(path_p); path_q=(PointInfo *) RelinquishMagickMemory(path_q); polygon_primitive=(PrimitiveInfo *) RelinquishMagickMemory(polygon_primitive); return(stroke_polygon); }
3d7pt_var.lbpar.c
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) /* * Order-1, 3D 7 point stencil with variable coefficients * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, m, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+2; Ny = atoi(argv[2])+2; Nz = atoi(argv[3])+2; } if (argc > 4) Nt = atoi(argv[4]); // allocate the arrays double ****A = (double ****) malloc(sizeof(double***)*2); for(m=0; m<2;m++){ A[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } double ****coef = (double ****) malloc(sizeof(double***)*7); for(m=0; m<7;m++){ coef[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ coef[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ coef[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 24; tile_size[1] = 24; tile_size[2] = 8; tile_size[3] = 128; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } for (m=0; m<7; m++) { for (i=1; i<Nz; i++) { for (j=1; j<Ny; j++) { for (k=1; k<Nx; k++) { coef[m][i][j][k] = 1.0 * (rand() % BASE); } } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 /* Copyright (C) 1991-2014 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7, t8; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) { for (t1=-1;t1<=floord(Nt-2,12);t1++) { lbp=max(ceild(t1,2),ceild(24*t1-Nt+3,24)); ubp=min(floord(Nt+Nz-4,24),floord(12*t1+Nz+9,24)); #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8) for (t2=lbp;t2<=ubp;t2++) { for (t3=max(max(0,ceild(3*t1-1,2)),ceild(24*t2-Nz-4,8));t3<=min(min(min(floord(Nt+Ny-4,8),floord(12*t1+Ny+21,8)),floord(24*t2+Ny+20,8)),floord(24*t1-24*t2+Nz+Ny+19,8));t3++) { for (t4=max(max(max(0,ceild(3*t1-31,32)),ceild(24*t2-Nz-124,128)),ceild(8*t3-Ny-124,128));t4<=min(min(min(min(floord(Nt+Nx-4,128),floord(12*t1+Nx+21,128)),floord(24*t2+Nx+20,128)),floord(8*t3+Nx+4,128)),floord(24*t1-24*t2+Nz+Nx+19,128));t4++) { for (t5=max(max(max(max(max(0,12*t1),24*t1-24*t2+1),24*t2-Nz+2),8*t3-Ny+2),128*t4-Nx+2);t5<=min(min(min(min(min(Nt-2,12*t1+23),24*t2+22),8*t3+6),128*t4+126),24*t1-24*t2+Nz+21);t5++) { for (t6=max(max(24*t2,t5+1),-24*t1+24*t2+2*t5-23);t6<=min(min(24*t2+23,-24*t1+24*t2+2*t5),t5+Nz-2);t6++) { for (t7=max(8*t3,t5+1);t7<=min(8*t3+7,t5+Ny-2);t7++) { lbv=max(128*t4,t5+1); ubv=min(128*t4+127,t5+Nx-2); #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = (((((((coef[0][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) + (coef[1][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)])) + (coef[2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)])) + (coef[3][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1])) + (coef[4][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)])) + (coef[5][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)])) + (coef[6][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1]));; } } } } } } } } } /* End of CLooG code */ gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(1, "variable no-symmetry") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); for(m=0; m<7;m++){ for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(coef[m][i][j]); } free(coef[m][i]); } free(coef[m]); } return 0; }
correlation.c
/** * correlation.c This file was adapted from PolyBench/GPU 1.0 test suite * to run on GPU with OpenMP 4.0 pragmas and OpenCL driver. * * Web address: http://www.cse.ohio-state.edu/~pouchet/software/polybench/GPU * * Contacts: Marcio M Pereira <mpereira@ic.unicamp.br> * Rafael Cardoso F Sousa <rafael.cardoso@students.ic.unicamp.br> * Luís Felipe Mattos <ra107822@students.ic.unicamp.br> */ #include <assert.h> #include <math.h> #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef _OPENMP #include <omp.h> #endif #include "BenchmarksUtil.h" /* Problem size */ #define M SIZE #define N SIZE #define sqrt_of_array_cell(x, j) sqrt(x[j]) #define FLOAT_N 3214212.01f #define EPS 0.005f /* Can switch DATA_TYPE between float and double */ typedef float DATA_TYPE; /** * @brief Initialize matrice * * @param data */ void init_arrays(DATA_TYPE *data) { int i, j; for (i = 0; i < (M + 1); i++) { for (j = 0; j < (N + 1); j++) { data[i * (N + 1) + j] = ((DATA_TYPE)i * j) / (M + 1); } } } /** * @brief * * @param symmat * @param symmat_outputFromGpu * @return int */ int compareResults(DATA_TYPE *symmat, DATA_TYPE *symmat_outputFromGpu) { int i, j, fail; fail = 0; for (i = 1; i < (M + 1); i++) { for (j = 1; j < (N + 1); j++) { if (percentDiff(symmat[i * (N + 1) + j], symmat_outputFromGpu[i * (N + 1) + j]) > ERROR_THRESHOLD) { fail++; } } } return fail; } /** * @brief * * @param data * @param mean * @param stddev * @param symmat */ void correlation(DATA_TYPE *data, DATA_TYPE *mean, DATA_TYPE *stddev, DATA_TYPE *symmat) { int i, j, j1, j2; // Determine mean of column vectors of input data matrix for (j = 1; j < (M + 1); j++) { mean[j] = 0.0; for (i = 1; i < (N + 1); i++) { mean[j] += data[i * (M + 1) + j]; } mean[j] /= (DATA_TYPE)FLOAT_N; } // Determine standard deviations of column vectors of data matrix. for (j = 1; j < (M + 1); j++) { stddev[j] = 0.0; for (i = 1; i < (N + 1); i++) { stddev[j] += (data[i * (M + 1) + j] - mean[j]) * (data[i * (M + 1) + j] - mean[j]); } stddev[j] /= FLOAT_N; stddev[j] = sqrt_of_array_cell(stddev, j); stddev[j] = stddev[j] <= EPS ? 1.0 : stddev[j]; } // i - threadIdx.x, j = threadIdx.y // Center and reduce the column vectors. for (i = 1; i < (N + 1); i++) { for (j = 1; j < (M + 1); j++) { data[i * (M + 1) + j] -= mean[j]; data[i * (M + 1) + j] /= (sqrt(FLOAT_N) * stddev[j]); } } // Calculate the m * m correlation matrix. for (j1 = 1; j1 < M; j1++) { symmat[j1 * (M + 1) + j1] = 1.0; for (j2 = j1 + 1; j2 < (M + 1); j2++) { symmat[j1 * (M + 1) + j2] = 0.0; for (i = 1; i < (N + 1); i++) { symmat[j1 * (M + 1) + j2] += (data[i * (M + 1) + j1] * data[i * (M + 1) + j2]); } symmat[j2 * (M + 1) + j1] = symmat[j1 * (M + 1) + j2]; } } symmat[M * (M + 1) + M] = 1.0; } /** * @brief * * @param data * @param mean * @param stddev * @param symmat */ void correlation_OMP(DATA_TYPE *data, DATA_TYPE *mean, DATA_TYPE *stddev, DATA_TYPE *symmat) { int i, j, k; #pragma omp target data map(to: data[:(M+1)*(N+1)], mean[:(M+1)], stddev[:(M+1)]) map(tofrom: symmat[:(M+1)*(N+1)]) device(OMP_DEVICE_ID) { // Determine mean of column vectors of input data matrix #pragma omp target teams distribute parallel for private(i) device(OMP_DEVICE_ID) for (j = 1; j < (M + 1); j++) { mean[j] = 0.0; for (i = 1; i < (N + 1); i++) { mean[j] += data[i * (M + 1) + j]; } mean[j] /= (DATA_TYPE)FLOAT_N; } // Determine standard deviations of column vectors of data matrix. #pragma omp target teams distribute parallel for private(i) device(OMP_DEVICE_ID) for (j = 1; j < (M + 1); j++) { stddev[j] = 0.0; for (i = 1; i < (N + 1); i++) { stddev[j] += (data[i * (M + 1) + j] - mean[j]) * (data[i * (M + 1) + j] - mean[j]); } stddev[j] /= FLOAT_N; stddev[j] = sqrt(stddev[j]); if (stddev[j] <= EPS) { stddev[j] = 1.0; } } // Center and reduce the column vectors. #pragma omp target teams distribute parallel for collapse(2) device(OMP_DEVICE_ID) for (i = 1; i < (N + 1); i++) { for (j = 1; j < (M + 1); j++) { data[i * (M + 1) + j] -= mean[j]; data[i * (M + 1) + j] /= (sqrt(FLOAT_N) * stddev[j]); } } // Calculate the m * m correlation matrix. #pragma omp target teams distribute parallel for private(j, i) device(OMP_DEVICE_ID) for (k = 1; k < M; k++) { symmat[k * (M + 1) + k] = 1.0; for (j = k + 1; j < (M + 1); j++) { symmat[k * (M + 1) + j] = 0.0; for (i = 1; i < (N + 1); i++) { symmat[k * (M + 1) + j] += (data[i * (M + 1) + k] * data[i * (M + 1) + j]); } symmat[j * (M + 1) + k] = symmat[k * (M + 1) + j]; } } } symmat[M * (M + 1) + M] = 1.0; } int main() { fprintf(stdout, "<< Correlation Computation >>\n"); // declare arrays and allocate memory DATA_TYPE *data = (DATA_TYPE *) malloc((M + 1) * (N + 1) * sizeof(DATA_TYPE)); DATA_TYPE *mean = (DATA_TYPE *) malloc((M + 1) * sizeof(DATA_TYPE)); DATA_TYPE *stddev = (DATA_TYPE *) malloc((M + 1) * sizeof(DATA_TYPE)); DATA_TYPE *symmat = NULL; DATA_TYPE *symmat_GPU = NULL; // init operand matrices init_arrays(data); // run OMP on GPU or CPU if enabled #if defined(RUN_OMP_GPU) || defined(RUN_OMP_CPU) symmat_GPU = (DATA_TYPE *) malloc((M + 1) * (N + 1) * sizeof(DATA_TYPE)); BENCHMARK_OMP(correlation_OMP(data, mean, stddev, symmat_GPU)); // prevent dead-code elimination DCE_PREVENT(symmat_GPU, (M+1)*(N+1)); #endif // run sequential version if enabled #ifdef RUN_CPU_SEQ symmat = (DATA_TYPE *) malloc((M + 1) * (N + 1) * sizeof(DATA_TYPE)); BENCHMARK_CPU(correlation(data, mean, stddev, symmat)); // prevent dead-code elimination DCE_PREVENT(symmat, (M+1)*(N+1)); #endif int fail = 0; // if TEST is enabled, then compare OMP results against sequential mode #ifdef RUN_TEST fail = compareResults(symmat, symmat_GPU); printf("Errors on OMP (threshold %4.2lf): %d\n", ERROR_THRESHOLD, fail); #endif // release memory free(data); free(mean); free(stddev); free(symmat_GPU); free(symmat); return fail; }
GB_unaryop__lnot_uint16_fp32.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__lnot_uint16_fp32 // op(A') function: GB_tran__lnot_uint16_fp32 // C type: uint16_t // A type: float // cast: uint16_t cij ; GB_CAST_UNSIGNED(cij,aij,16) // unaryop: cij = !(aij != 0) #define GB_ATYPE \ float #define GB_CTYPE \ uint16_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ float aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = !(x != 0) ; // casting #define GB_CASTING(z, x) \ uint16_t z ; GB_CAST_UNSIGNED(z,x,16) ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LNOT || GxB_NO_UINT16 || GxB_NO_FP32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__lnot_uint16_fp32 ( uint16_t *restrict Cx, const float *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__lnot_uint16_fp32 ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_unaryop__minv_fp64_uint16.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__minv_fp64_uint16 // op(A') function: GB_tran__minv_fp64_uint16 // C type: double // A type: uint16_t // cast: double cij = (double) aij // unaryop: cij = 1./aij #define GB_ATYPE \ uint16_t #define GB_CTYPE \ double // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint16_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = 1./x ; // casting #define GB_CASTING(z, aij) \ double z = (double) aij ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (z, aij) ; \ GB_OP (GB_CX (pC), z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MINV || GxB_NO_FP64 || GxB_NO_UINT16) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__minv_fp64_uint16 ( double *Cx, // Cx and Ax may be aliased uint16_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__minv_fp64_uint16 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_binop__isne_fc64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__isne_fc64) // A.*B function (eWiseMult): GB (_AemultB_01__isne_fc64) // A.*B function (eWiseMult): GB (_AemultB_02__isne_fc64) // A.*B function (eWiseMult): GB (_AemultB_03__isne_fc64) // A.*B function (eWiseMult): GB (_AemultB_bitmap__isne_fc64) // A*D function (colscale): GB ((none)) // D*A function (rowscale): GB ((none)) // C+=B function (dense accum): GB (_Cdense_accumB__isne_fc64) // C+=b function (dense accum): GB (_Cdense_accumb__isne_fc64) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__isne_fc64) // C=scalar+B GB (_bind1st__isne_fc64) // C=scalar+B' GB (_bind1st_tran__isne_fc64) // C=A+scalar GB (_bind2nd__isne_fc64) // C=A'+scalar GB (_bind2nd_tran__isne_fc64) // C type: GxB_FC64_t // A type: GxB_FC64_t // B,b type: GxB_FC64_t // BinaryOp: cij = GB_FC64_isne (aij, bij) #define GB_ATYPE \ GxB_FC64_t #define GB_BTYPE \ GxB_FC64_t #define GB_CTYPE \ GxB_FC64_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ GxB_FC64_t aij = GBX (Ax, pA, A_iso) // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ GxB_FC64_t bij = GBX (Bx, pB, B_iso) // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ GxB_FC64_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = GB_FC64_isne (x, y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ISNE || GxB_NO_FC64 || GxB_NO_ISNE_FC64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__isne_fc64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__isne_fc64) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__isne_fc64) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type GxB_FC64_t GxB_FC64_t bwork = (*((GxB_FC64_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC64_t *restrict Cx = (GxB_FC64_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC64_t *restrict Cx = (GxB_FC64_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__isne_fc64) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__isne_fc64) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__isne_fc64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__isne_fc64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__isne_fc64) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__isne_fc64) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC64_t *Cx = (GxB_FC64_t *) Cx_output ; GxB_FC64_t x = (*((GxB_FC64_t *) x_input)) ; GxB_FC64_t *Bx = (GxB_FC64_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; GxB_FC64_t bij = GBX (Bx, p, false) ; Cx [p] = GB_FC64_isne (x, bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__isne_fc64) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; GxB_FC64_t *Cx = (GxB_FC64_t *) Cx_output ; GxB_FC64_t *Ax = (GxB_FC64_t *) Ax_input ; GxB_FC64_t y = (*((GxB_FC64_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; GxB_FC64_t aij = GBX (Ax, p, false) ; Cx [p] = GB_FC64_isne (aij, y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ GxB_FC64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_FC64_isne (x, aij) ; \ } GrB_Info GB (_bind1st_tran__isne_fc64) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ GxB_FC64_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC64_t x = (*((const GxB_FC64_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ GxB_FC64_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ GxB_FC64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_FC64_isne (aij, y) ; \ } GrB_Info GB (_bind2nd_tran__isne_fc64) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC64_t y = (*((const GxB_FC64_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
calculate_global_physical_properties.h
#ifndef CALCULATE_GLOBAL_PHYSICAL_PROPERTIES_H #define CALCULATE_GLOBAL_PHYSICAL_PROPERTIES_H // /* External includes */ // System includes // Project includes #include "utilities/timer.h" #include "custom_utilities/create_and_destroy.h" #include "custom_elements/Particle_Contact_Element.h" #include "includes/variables.h" /* System includes */ #include <limits> #include <iostream> #include <iomanip> /* External includes */ #ifdef _OPENMP #include <omp.h> #endif /* Project includes */ #include "includes/define.h" #include "utilities/openmp_utils.h" namespace Kratos { class SphericElementGlobalPhysicsCalculator { public: typedef ModelPart::ElementsContainerType ElementsArrayType; KRATOS_CLASS_POINTER_DEFINITION(SphericElementGlobalPhysicsCalculator); /// Default constructor. SphericElementGlobalPhysicsCalculator(ModelPart& r_model_part) { mInitialCenterOfMassAndMass = CalculateCenterOfMass(r_model_part); mInitialMass = CalculateTotalMass(r_model_part); } /// Destructor. virtual ~SphericElementGlobalPhysicsCalculator(){} //*************************************************************************************************************** //*************************************************************************************************************** double CalculateTotalVolume(ModelPart& r_model_part) { OpenMPUtils::CreatePartition(OpenMPUtils::GetNumThreads(), r_model_part.GetCommunicator().LocalMesh().Elements().size(), mElementsPartition); double added_volume = 0.0; #pragma omp parallel for reduction(+ : added_volume) for (int k = 0; k < OpenMPUtils::GetNumThreads(); k++){ for (ElementsArrayType::iterator it = GetElementPartitionBegin(r_model_part, k); it != GetElementPartitionEnd(r_model_part, k); ++it){ if (it->GetGeometry()[0].Is(BLOCKED)) { // we exclude blocked elements from the volume calculation (e.g., inlet injectors) continue; } if ((it)->IsNot(DEMFlags::BELONGS_TO_A_CLUSTER)) { SphericParticle& r_spheric_particle = dynamic_cast<Kratos::SphericParticle&> (*it); const double particle_radius = r_spheric_particle.GetRadius(); added_volume += 4.0 / 3.0 * Globals::Pi * particle_radius * particle_radius * particle_radius; } } } return added_volume; } //*************************************************************************************************************** //*************************************************************************************************************** // Returns the minimum value of a double variable in the model part. double CalculateMaxNodalVariable(ModelPart& r_model_part, const Variable<double>& r_variable) { ElementsArrayType& pElements = r_model_part.GetCommunicator().LocalMesh().Elements(); if (pElements.size() == 0){ KRATOS_THROW_ERROR(std::invalid_argument, "Cannot compute maximum of the required nodal variable. Empty model part. Could not compute the maximum of the required variable ", r_variable); } ElementsArrayType::iterator it_begin = pElements.ptr_begin(); if (!it_begin->GetGeometry()[0].SolutionStepsDataHas(r_variable)){ KRATOS_THROW_ERROR(std::invalid_argument, "Cannot compute maximum of the required nodal variable. Missing nodal variable ", r_variable); } std::vector<double> max_values; double max_val = - std::numeric_limits<double>::max(); max_values.resize(OpenMPUtils::GetNumThreads()); for (int k = 0; k < OpenMPUtils::GetNumThreads(); k++){ max_values[k] = max_val; } OpenMPUtils::CreatePartition(OpenMPUtils::GetNumThreads(), pElements.size(), mElementsPartition); unsigned int elem_counter; #pragma omp parallel for private(elem_counter) for (int k = 0; k < OpenMPUtils::GetNumThreads(); k++){ elem_counter = mElementsPartition[k]; for (ElementsArrayType::iterator it = GetElementPartitionBegin(r_model_part, k); it != GetElementPartitionEnd(r_model_part, k); ++it){ max_values[k] = std::max(max_values[k], (it)->GetGeometry()[0].FastGetSolutionStepValue(r_variable)); elem_counter++; } } // getting the maximum between threads: for (int k = 0; k < OpenMPUtils::GetNumThreads(); k++){ max_val = std::max(max_val, max_values[k]); } return max_val; } //*************************************************************************************************************** //*************************************************************************************************************** double CalculateMinNodalVariable(ModelPart& r_model_part, const Variable<double>& r_variable) { ElementsArrayType& pElements = r_model_part.GetCommunicator().LocalMesh().Elements(); if (pElements.size() == 0){ KRATOS_THROW_ERROR(std::invalid_argument, "Cannot compute minimum of the required nodal variable. Empty model part. Could not compute the maximum of the required variable ", r_variable); } ElementsArrayType::iterator it_begin = pElements.ptr_begin(); if (!it_begin->GetGeometry()[0].SolutionStepsDataHas(r_variable)){ KRATOS_THROW_ERROR(std::invalid_argument, "Cannot compute minimum of the required nodal variable. Missing variable ", r_variable); } std::vector<double> min_values; double min_val = std::numeric_limits<double>::max(); min_values.resize(OpenMPUtils::GetNumThreads()); for (int k = 0; k < OpenMPUtils::GetNumThreads(); k++){ min_values[k] = min_val; } OpenMPUtils::CreatePartition(OpenMPUtils::GetNumThreads(), pElements.size(), mElementsPartition); unsigned int elem_counter; #pragma omp parallel for private(elem_counter) for (int k = 0; k < OpenMPUtils::GetNumThreads(); k++){ elem_counter = mElementsPartition[k]; for (ElementsArrayType::iterator it = GetElementPartitionBegin(r_model_part, k); it != GetElementPartitionEnd(r_model_part, k); ++it){ min_values[k] = std::min(min_values[k], (it)->GetGeometry()[0].FastGetSolutionStepValue(r_variable)); elem_counter++; } } // getting the minimum between threads: for (int k = 0; k < OpenMPUtils::GetNumThreads(); k++){ min_val = std::min(min_val, min_values[k]); } return min_val; } //*************************************************************************************************************** //*************************************************************************************************************** double CalculateD50(ModelPart& r_model_part) { const unsigned int size = r_model_part.GetCommunicator().LocalMesh().Elements().size(); OpenMPUtils::CreatePartition(OpenMPUtils::GetNumThreads(), size, mElementsPartition); std::vector<double> radii; radii.resize(size); unsigned int particle_counter = 0; #pragma omp parallel for private(particle_counter) for (int k = 0; k < OpenMPUtils::GetNumThreads(); k++){ particle_counter = mElementsPartition[k]; for (ElementsArrayType::iterator it = GetElementPartitionBegin(r_model_part, k); it != GetElementPartitionEnd(r_model_part, k); ++it){ SphericParticle& r_spheric_particle = dynamic_cast<Kratos::SphericParticle&> (*it); radii[particle_counter] = r_spheric_particle.GetRadius(); particle_counter++; } } if (particle_counter) { std::sort(radii.begin(), radii.end()); int half = div(size, 2).quot; bool even = (size%2 == 0); double d50 = even ? 2 * radii[half] : radii[half] + radii[half + 1]; return d50; } else { return 0.00; } } //*************************************************************************************************************** //*************************************************************************************************************** double CalculateTotalMass(ModelPart& r_model_part) { OpenMPUtils::CreatePartition(OpenMPUtils::GetNumThreads(),r_model_part.GetCommunicator().LocalMesh().Elements().size(), mElementsPartition); double added_mass = 0.0; #pragma omp parallel for reduction(+ : added_mass) for (int k = 0; k < OpenMPUtils::GetNumThreads(); k++){ for (ElementsArrayType::iterator it = GetElementPartitionBegin(r_model_part, k); it != GetElementPartitionEnd(r_model_part, k); ++it){ if ((it)->IsNot(DEMFlags::BELONGS_TO_A_CLUSTER)) { double particle_mass = (it)->GetGeometry()[0].FastGetSolutionStepValue(NODAL_MASS); added_mass += particle_mass; } } } return added_mass; } //*************************************************************************************************************** //*************************************************************************************************************** array_1d<double, 3> CalculateCenterOfMass(ModelPart& r_model_part) { OpenMPUtils::CreatePartition(OpenMPUtils::GetNumThreads(), r_model_part.GetCommunicator().LocalMesh().Elements().size(), mElementsPartition); const double total_mass_inv = 1 / CalculateTotalMass(r_model_part); double cm_x = 0.0; double cm_y = 0.0; double cm_z = 0.0; #pragma omp parallel for reduction(+ : cm_x, cm_y, cm_z) for (int k = 0; k < OpenMPUtils::GetNumThreads(); k++){ for (ElementsArrayType::iterator it = GetElementPartitionBegin(r_model_part, k); it != GetElementPartitionEnd(r_model_part, k); ++it){ if ((it)->IsNot(DEMFlags::BELONGS_TO_A_CLUSTER)) { double particle_mass = (it)->GetGeometry()[0].FastGetSolutionStepValue(NODAL_MASS); cm_x += particle_mass * (it)->GetGeometry()[0].Coordinates()[0]; cm_y += particle_mass * (it)->GetGeometry()[0].Coordinates()[1]; cm_z += particle_mass * (it)->GetGeometry()[0].Coordinates()[2]; } } } array_1d<double, 3> center_of_mass; center_of_mass[0] = total_mass_inv * cm_x; center_of_mass[1] = total_mass_inv * cm_y; center_of_mass[2] = total_mass_inv * cm_z; return center_of_mass; } //*************************************************************************************************************** //*************************************************************************************************************** double CalculateGravitationalPotentialEnergy(ModelPart& r_model_part, const array_1d<double, 3> reference_point) { double gravitational_energy; const double total_mass = CalculateTotalMass(r_model_part); if (total_mass == 0) gravitational_energy = 0.0; else { const array_1d<double, 3>& gravity = r_model_part.GetProcessInfo()[GRAVITY]; const array_1d<double, 3> center_of_mass = CalculateCenterOfMass(r_model_part); const array_1d<double, 3> center_of_mass_to_reference = reference_point - center_of_mass; gravitational_energy = total_mass * (center_of_mass_to_reference[0] * gravity[0] + center_of_mass_to_reference[1] * gravity[1] + center_of_mass_to_reference[2] * gravity[2]); } return gravitational_energy; } //*************************************************************************************************************** //*************************************************************************************************************** double CalculateTranslationalKinematicEnergy(ModelPart& r_model_part) { OpenMPUtils::CreatePartition(OpenMPUtils::GetNumThreads(), r_model_part.GetCommunicator().LocalMesh().Elements().size(), mElementsPartition); double kinematic_energy = 0.0; #pragma omp parallel for reduction(+ : kinematic_energy) for (int k = 0; k < OpenMPUtils::GetNumThreads(); k++){ for (ElementsArrayType::iterator it = GetElementPartitionBegin(r_model_part, k); it != GetElementPartitionEnd(r_model_part, k); ++it){ if ((it)->IsNot(DEMFlags::BELONGS_TO_A_CLUSTER)) { double particle_translational_kinematic_energy = 0.0; (it)->Calculate(PARTICLE_TRANSLATIONAL_KINEMATIC_ENERGY, particle_translational_kinematic_energy, r_model_part.GetProcessInfo()); kinematic_energy += particle_translational_kinematic_energy; } } } return kinematic_energy; } //*************************************************************************************************************** //*************************************************************************************************************** double CalculateRotationalKinematicEnergy(ModelPart& r_model_part) { OpenMPUtils::CreatePartition(OpenMPUtils::GetNumThreads(), r_model_part.GetCommunicator().LocalMesh().Elements().size(), mElementsPartition); double rotational_kinematic_energy = 0.0; #pragma omp parallel for reduction(+ : rotational_kinematic_energy) for (int k = 0; k < OpenMPUtils::GetNumThreads(); k++){ for (ElementsArrayType::iterator it = GetElementPartitionBegin(r_model_part, k); it != GetElementPartitionEnd(r_model_part, k); ++it){ if ((it)->IsNot(DEMFlags::BELONGS_TO_A_CLUSTER)) { double particle_rotational_kinematic_energy = 0.0; (it)->Calculate(PARTICLE_ROTATIONAL_KINEMATIC_ENERGY, particle_rotational_kinematic_energy, r_model_part.GetProcessInfo()); rotational_kinematic_energy += particle_rotational_kinematic_energy; } } } return rotational_kinematic_energy; } //*************************************************************************************************************** //*************************************************************************************************************** double CalculateElasticEnergy(ModelPart& r_model_part) { OpenMPUtils::CreatePartition(OpenMPUtils::GetNumThreads(), r_model_part.GetCommunicator().LocalMesh().Elements().size(), mElementsPartition); double elastic_energy = 0.0; #pragma omp parallel for reduction(+ : elastic_energy) for (int k = 0; k < OpenMPUtils::GetNumThreads(); k++){ for (ElementsArrayType::iterator it = GetElementPartitionBegin(r_model_part, k); it != GetElementPartitionEnd(r_model_part, k); ++it){ if ((it)->IsNot(DEMFlags::BELONGS_TO_A_CLUSTER)) { double particle_elastic_energy = 0.0; (it)->Calculate(PARTICLE_ELASTIC_ENERGY, particle_elastic_energy, r_model_part.GetProcessInfo()); elastic_energy += particle_elastic_energy; } } } return elastic_energy; } //*************************************************************************************************************** //*************************************************************************************************************** double CalculateInelasticFrictionalEnergy(ModelPart& r_model_part) { OpenMPUtils::CreatePartition(OpenMPUtils::GetNumThreads(), r_model_part.GetCommunicator().LocalMesh().Elements().size(), mElementsPartition); double frictional_energy = 0.0; #pragma omp parallel for reduction(+ : frictional_energy) for (int k = 0; k < OpenMPUtils::GetNumThreads(); k++){ for (ElementsArrayType::iterator it = GetElementPartitionBegin(r_model_part, k); it != GetElementPartitionEnd(r_model_part, k); ++it){ if ((it)->IsNot(DEMFlags::BELONGS_TO_A_CLUSTER)) { double particle_frictional_energy = 0.0; (it)->Calculate(PARTICLE_INELASTIC_FRICTIONAL_ENERGY, particle_frictional_energy, r_model_part.GetProcessInfo()); frictional_energy += particle_frictional_energy; } } } return frictional_energy; } double CalculateInelasticViscodampingEnergy(ModelPart& r_model_part) { OpenMPUtils::CreatePartition(OpenMPUtils::GetNumThreads(), r_model_part.GetCommunicator().LocalMesh().Elements().size(), mElementsPartition); double viscodamping_energy = 0.0; #pragma omp parallel for reduction(+ : viscodamping_energy) for (int k = 0; k < OpenMPUtils::GetNumThreads(); k++){ for (ElementsArrayType::iterator it = GetElementPartitionBegin(r_model_part, k); it != GetElementPartitionEnd(r_model_part, k); ++it){ if ((it)->IsNot(DEMFlags::BELONGS_TO_A_CLUSTER)) { double particle_viscodamping_energy = 0.0; (it)->Calculate(PARTICLE_INELASTIC_VISCODAMPING_ENERGY, particle_viscodamping_energy, r_model_part.GetProcessInfo()); viscodamping_energy += particle_viscodamping_energy; } } } return viscodamping_energy; } //*************************************************************************************************************** //*************************************************************************************************************** array_1d<double, 3> CalculateTotalMomentum(ModelPart& r_model_part) { OpenMPUtils::CreatePartition(OpenMPUtils::GetNumThreads(), r_model_part.GetCommunicator().LocalMesh().Elements().size(), mElementsPartition); double m_x = 0.0; double m_y = 0.0; double m_z = 0.0; #pragma omp parallel for reduction(+ : m_x, m_y, m_z) for (int k = 0; k < OpenMPUtils::GetNumThreads(); k++){ for (ElementsArrayType::iterator it = GetElementPartitionBegin(r_model_part, k); it != GetElementPartitionEnd(r_model_part, k); ++it){ if ((it)->IsNot(DEMFlags::BELONGS_TO_A_CLUSTER)) { array_1d<double, 3> particle_momentum; (it)->Calculate(MOMENTUM, particle_momentum, r_model_part.GetProcessInfo()); m_x += particle_momentum[0]; m_y += particle_momentum[1]; m_z += particle_momentum[2]; } } } array_1d<double, 3> momentum; momentum[0] = m_x; momentum[1] = m_y; momentum[2] = m_z; return momentum; } //*************************************************************************************************************** //*************************************************************************************************************** array_1d<double, 3> CalulateTotalAngularMomentum(ModelPart& r_model_part) { OpenMPUtils::CreatePartition(OpenMPUtils::GetNumThreads(), r_model_part.GetCommunicator().LocalMesh().Elements().size(), mElementsPartition); const array_1d<double, 3> center_of_mass = CalculateCenterOfMass(r_model_part); double am_x = 0.0; double am_y = 0.0; double am_z = 0.0; #pragma omp parallel for reduction(+ : am_x, am_y, am_z) for (int k = 0; k < OpenMPUtils::GetNumThreads(); k++){ for (ElementsArrayType::iterator it = GetElementPartitionBegin(r_model_part, k); it != GetElementPartitionEnd(r_model_part, k); ++it){ if ((it)->IsNot(DEMFlags::BELONGS_TO_A_CLUSTER)) { array_1d<double, 3> particle_momentum; array_1d<double, 3> particle_local_angular_momentum; array_1d<double, 3> center_of_mass_to_particle = (it)->GetGeometry()[0].Coordinates() - center_of_mass; (it)->Calculate(MOMENTUM, particle_momentum, r_model_part.GetProcessInfo()); (it)->Calculate(ANGULAR_MOMENTUM, particle_local_angular_momentum, r_model_part.GetProcessInfo()); array_1d<double, 3> aux; Kratos::MathUtils<double>::CrossProduct(aux, particle_momentum, center_of_mass_to_particle); am_x += particle_local_angular_momentum[0] + aux[0]; am_y += particle_local_angular_momentum[1] + aux[1]; am_z += particle_local_angular_momentum[2] + aux[2]; } } } array_1d<double, 3> angular_momentum; angular_momentum[0] = am_x; angular_momentum[1] = am_y; angular_momentum[2] = am_z; return angular_momentum; } //*************************************************************************************************************** //*************************************************************************************************************** // Check by how much Newton's Third Law is violated array_1d<double, 3> CalculateSumOfInternalForces(ModelPart& r_model_part) { OpenMPUtils::CreatePartition(OpenMPUtils::GetNumThreads(),r_model_part.GetCommunicator().LocalMesh().Elements().size(), mElementsPartition); double sum_of_contact_forces_x = 0.0; double sum_of_contact_forces_y = 0.0; double sum_of_contact_forces_z = 0.0; #pragma omp parallel for reduction(+ : sum_of_contact_forces_x, sum_of_contact_forces_y, sum_of_contact_forces_z) for (int k = 0; k < OpenMPUtils::GetNumThreads(); ++k){ for (ElementsArrayType::iterator it = GetElementPartitionBegin(r_model_part, k); it != GetElementPartitionEnd(r_model_part, k); ++it){ if ((it)->IsNot(DEMFlags::BELONGS_TO_A_CLUSTER)){ const array_1d<double, 3>& contact_force = (it)->GetGeometry()[0].FastGetSolutionStepValue(CONTACT_FORCES); sum_of_contact_forces_x += contact_force[0]; sum_of_contact_forces_y += contact_force[1]; sum_of_contact_forces_z += contact_force[2]; } } } array_1d<double, 3> sum_of_contact_forces; sum_of_contact_forces[0] = sum_of_contact_forces_x; sum_of_contact_forces[1] = sum_of_contact_forces_y; sum_of_contact_forces[2] = sum_of_contact_forces_z; return sum_of_contact_forces; } //*************************************************************************************************************** //*************************************************************************************************************** ///@} ///@name Access ///@{ array_1d<double, 3> GetInitialCenterOfMass() { return mInitialCenterOfMassAndMass; } ///@} ///@name Inquiry ///@{ ///@} ///@name Input and output ///@{ /// Turn back information as a stemplate<class T, std::size_t dim> tring. virtual std::string Info() const { return ""; } /// Print information about this object. virtual void PrintInfo(std::ostream& rOStream) const { } /// Print object's data. virtual void PrintData(std::ostream& rOStream) const { } ///@} ///@name Friends ///@{ std::vector<unsigned int>& GetElementPartition() { return (mElementsPartition); } ElementsArrayType::iterator GetElementPartitionBegin(ModelPart& r_model_part, unsigned int k) { ElementsArrayType& pElements = r_model_part.GetCommunicator().LocalMesh().Elements(); return (pElements.ptr_begin() + mElementsPartition[k]); } ElementsArrayType::iterator GetElementPartitionEnd(ModelPart& r_model_part, unsigned int k) { ElementsArrayType& pElements = r_model_part.GetCommunicator().LocalMesh().Elements(); return (pElements.ptr_begin() + mElementsPartition[k + 1]); } ///@} protected: ///@name Protected static Member r_variables ///@{ ///@} ///@name Protected member r_variables ///@{ template<class T, std::size_t dim> ///@} ///@name Protected Operators ///@{ ///@} ///@name Protected Operations ///@{ ///@} ///@name Protected Access ///@{ std::vector<unsigned int> mElementsPartition; ///@} ///@name Protected Inquiry ///@{ ///@} ///@name Protected LifeCycle ///@{ ///@} private: ///@name Static Member r_variables ///@{ ///@} ///@name Member r_variables ///@{ array_1d<double, 3> mInitialCenterOfMassAndMass; double mInitialMass; ///@} ///@name Private Operators ///@{ ///@} ///@name Private Operations ///@{ ///@} ///@name Private Access ///@{ ///@} ///@name Private Inquiry ///@{ ///@} ///@name Un accessible methods ///@{ /// Assignment operator. SphericElementGlobalPhysicsCalculator & operator=(SphericElementGlobalPhysicsCalculator const& rOther); ///@} }; // Class SphericElementGlobalPhysicsCalculator ///@} ///@name Type Definitions ///@{ ///@} ///@name Input and output ///@{ } // namespace Kratos. #endif // CALCULATE_GLOBAL_PHYSICAL_PROPERTIES_H
3d7pt_var.lbpar.c
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) /* * Order-1, 3D 7 point stencil with variable coefficients * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, m, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+2; Ny = atoi(argv[2])+2; Nz = atoi(argv[3])+2; } if (argc > 4) Nt = atoi(argv[4]); // allocate the arrays double ****A = (double ****) malloc(sizeof(double***)*2); for(m=0; m<2;m++){ A[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } double ****coef = (double ****) malloc(sizeof(double***)*7); for(m=0; m<7;m++){ coef[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ coef[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ coef[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 32; tile_size[1] = 32; tile_size[2] = 32; tile_size[3] = 128; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } for (m=0; m<7; m++) { for (i=1; i<Nz; i++) { for (j=1; j<Ny; j++) { for (k=1; k<Nx; k++) { coef[m][i][j][k] = 1.0 * (rand() % BASE); } } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 /* Copyright (C) 1991-2014 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7, t8; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) { for (t1=-1;t1<=floord(Nt-2,16);t1++) { lbp=max(ceild(t1,2),ceild(32*t1-Nt+3,32)); ubp=min(floord(Nt+Nz-4,32),floord(16*t1+Nz+13,32)); #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8) for (t2=lbp;t2<=ubp;t2++) { for (t3=max(max(0,ceild(t1-1,2)),ceild(32*t2-Nz-28,32));t3<=min(min(min(floord(Nt+Ny-4,32),floord(16*t1+Ny+29,32)),floord(32*t2+Ny+28,32)),floord(32*t1-32*t2+Nz+Ny+27,32));t3++) { for (t4=max(max(max(0,ceild(t1-7,8)),ceild(32*t2-Nz-124,128)),ceild(32*t3-Ny-124,128));t4<=min(min(min(min(floord(Nt+Nx-4,128),floord(16*t1+Nx+29,128)),floord(32*t2+Nx+28,128)),floord(32*t3+Nx+28,128)),floord(32*t1-32*t2+Nz+Nx+27,128));t4++) { for (t5=max(max(max(max(max(0,16*t1),32*t1-32*t2+1),32*t2-Nz+2),32*t3-Ny+2),128*t4-Nx+2);t5<=min(min(min(min(min(Nt-2,16*t1+31),32*t2+30),32*t3+30),128*t4+126),32*t1-32*t2+Nz+29);t5++) { for (t6=max(max(32*t2,t5+1),-32*t1+32*t2+2*t5-31);t6<=min(min(32*t2+31,-32*t1+32*t2+2*t5),t5+Nz-2);t6++) { for (t7=max(32*t3,t5+1);t7<=min(32*t3+31,t5+Ny-2);t7++) { lbv=max(128*t4,t5+1); ubv=min(128*t4+127,t5+Nx-2); #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = (((((((coef[0][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) + (coef[1][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)])) + (coef[2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)])) + (coef[3][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1])) + (coef[4][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)])) + (coef[5][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)])) + (coef[6][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1]));; } } } } } } } } } /* End of CLooG code */ gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(1, "variable no-symmetry") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); for(m=0; m<7;m++){ for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(coef[m][i][j]); } free(coef[m][i]); } free(coef[m]); } return 0; }
GB_unop__lgamma_fp32_fp32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__lgamma_fp32_fp32) // op(A') function: GB (_unop_tran__lgamma_fp32_fp32) // C type: float // A type: float // cast: float cij = aij // unaryop: cij = lgammaf (aij) #define GB_ATYPE \ float #define GB_CTYPE \ float // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ float aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = lgammaf (x) ; // casting #define GB_CAST(z, aij) \ float z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ float aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ float z = aij ; \ Cx [pC] = lgammaf (z) ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LGAMMA || GxB_NO_FP32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__lgamma_fp32_fp32) ( float *Cx, // Cx and Ax may be aliased const float *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; // TODO: if OP is ONE and uniform-valued matrices are exploited, then // do this in O(1) time if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (float), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { float aij = Ax [p] ; float z = aij ; Cx [p] = lgammaf (z) ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; float aij = Ax [p] ; float z = aij ; Cx [p] = lgammaf (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__lgamma_fp32_fp32) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
pdgstrf.c
/*! \file Copyright (c) 2003, The Regents of the University of California, through Lawrence Berkeley National Laboratory (subject to receipt of any required approvals from U.S. Dept. of Energy) All rights reserved. The source code is distributed under BSD license, see the file License.txt at the top-level directory. */ /*! @file * \brief Performs LU factorization in parallel * * <pre> * -- Distributed SuperLU routine (version 6.1) -- * Lawrence Berkeley National Lab, Univ. of California Berkeley. * October 1, 2014 * * Modified: * September 1, 1999 * Feburary 7, 2001 use MPI_Isend/MPI_Irecv * October 15, 2008 latency-reducing panel factorization * July 12, 2011 static scheduling and arbitrary look-ahead * March 13, 2013 change NTAGS to MPI_TAG_UB value * September 24, 2015 replace xLAMCH by xMACH, using C99 standard. * December 31, 2015 rename xMACH to xMACH_DIST. * September 30, 2017 optimization for Intel Knights Landing (KNL) node . * June 1, 2018 add parallel AWPM pivoting; add back arrive_at_ublock() * February 8, 2019 version 6.1.1 * * Sketch of the algorithm * * ======================= * * The following relations hold: * * A_kk = L_kk * U_kk * * L_ik = Aik * U_kk^(-1) * * U_kj = L_kk^(-1) * A_kj * * ---------------------------------- * | | | * ----|----------------------------- * | | \ U_kk| | * | | \ | U_kj | * | |L_kk \ | || | * ----|-------|---------||---------- * | | | \/ | * | | | | * | | | | * | | | | * | | L_ik ==> A_ij | * | | | | * | | | | * | | | | * ---------------------------------- * * Handle the first block of columns separately. * * Factor diagonal and subdiagonal blocks and test for exact * singularity. ( pdgstrf2(0), one column at a time ) * * Compute block row of U * * Update trailing matrix * * Loop over the remaining blocks of columns. * mycol = MYCOL( iam, grid ); * myrow = MYROW( iam, grid ); * N = nsupers; * For (k = 1; k < N; ++k) { * krow = PROW( k, grid ); * kcol = PCOL( k, grid ); * Pkk = PNUM( krow, kcol, grid ); * * * Factor diagonal and subdiagonal blocks and test for exact * singularity. * if ( mycol == kcol ) { * pdgstrf2(k), one column at a time * } * * * Parallel triangular solve * if ( iam == Pkk ) multicast L_k,k to this process row; * if ( myrow == krow && mycol != kcol ) { * Recv L_k,k from process Pkk; * for (j = k+1; j < N; ++j) * if ( PCOL( j, grid ) == mycol && A_k,j != 0 ) * U_k,j = L_k,k \ A_k,j; * } * * * Parallel rank-k update * if ( myrow == krow ) multicast U_k,k+1:N to this process column; * if ( mycol == kcol ) multicast L_k+1:N,k to this process row; * if ( myrow != krow ) { * Pkj = PNUM( krow, mycol, grid ); * Recv U_k,k+1:N from process Pkj; * } * if ( mycol != kcol ) { * Pik = PNUM( myrow, kcol, grid ); * Recv L_k+1:N,k from process Pik; * } * for (j = k+1; k < N; ++k) { * for (i = k+1; i < N; ++i) * if ( myrow == PROW( i, grid ) && mycol == PCOL( j, grid ) * && L_i,k != 0 && U_k,j != 0 ) * A_i,j = A_i,j - L_i,k * U_k,j; * } * } * * </pre> */ #include <math.h> /*#include "mkl.h"*/ #include "superlu_ddefs.h" #ifdef GPU_ACC #include "cublas_utils.h" /*#include "cublas_dgemm.h"*/ // #define NUM_CUDA_STREAMS 16 // #define NUM_CUDA_STREAMS 16 #endif /* Various defininations */ /* Name : SUPERNODE_PROFILE Purpose : For SuperNode Level profiling of various measurements such as gigaflop/sec obtained,bandwidth achieved: Overhead : Low */ // #define SUPERNODE_PROFILE /* Name : BAELINE Purpose : baseline to compare performance against Overhead : NA : this won't be used for running experiments */ // #define BASELINE /* Name : PHI_FRAMEWORK Purpose : To simulate and test algorithm used for offloading Phi Overhead : NA : this won't be used for running experiments */ #define PHI_FRAMEWORK #if 0 #define CACHELINE 64 /* bytes, Xeon Phi KNL */ #else #define CACHELINE 0 /* not worry about false sharing of different threads */ #endif //#define GEMM_PADLEN 1 #define GEMM_PADLEN 8 #define PDGSTRF2 pdgstrf2_trsm #define PDGSTRS2 pdgstrs2_omp extern void PDGSTRF2 (superlu_dist_options_t *, int_t, int_t, double, Glu_persist_t *, gridinfo_t *, LocalLU_t *, MPI_Request *, int, SuperLUStat_t *, int *); #ifdef _CRAY extern void PDGSTRS2 (int_t, int_t, Glu_persist_t *, gridinfo_t *, LocalLU_t *, SuperLUStat_t *, _fcd, _fcd, _fcd); #else extern void PDGSTRS2 (int_t, int_t, Glu_persist_t *, gridinfo_t *, LocalLU_t *, SuperLUStat_t *); #endif #ifdef ISORT extern void isort (int_t N, int_t * ARRAY1, int_t * ARRAY2); extern void isort1 (int_t N, int_t * ARRAY); #else int superlu_sort_perm (const void *arg1, const void *arg2) { const int_t *val1 = (const int_t *) arg1; const int_t *val2 = (const int_t *) arg2; return (*val2 < *val1); } #endif /************************************************************************/ #include "dscatter.c" /************************************************************************/ /*! \brief * * <pre> * Purpose * ======= * * PDGSTRF performs the LU factorization in parallel. * * Arguments * ========= * * options (input) superlu_dist_options_t* * The structure defines the input parameters to control * how the LU decomposition will be performed. * The following field should be defined: * o ReplaceTinyPivot (yes_no_t) * Specifies whether to replace the tiny diagonals by * sqrt(epsilon)*norm(A) during LU factorization. * * m (input) int * Number of rows in the matrix. * * n (input) int * Number of columns in the matrix. * * anorm (input) double * The norm of the original matrix A, or the scaled A if * equilibration was done. * * LUstruct (input/output) LUstruct_t* * The data structures to store the distributed L and U factors. * The following fields should be defined: * * o Glu_persist (input) Glu_persist_t* * Global data structure (xsup, supno) replicated on all processes, * describing the supernode partition in the factored matrices * L and U: * xsup[s] is the leading column of the s-th supernode, * supno[i] is the supernode number to which column i belongs. * * o Llu (input/output) LocalLU_t* * The distributed data structures to store L and U factors. * See superlu_ddefs.h for the definition of 'LocalLU_t'. * * grid (input) gridinfo_t* * The 2D process mesh. It contains the MPI communicator, the number * of process rows (NPROW), the number of process columns (NPCOL), * and my process rank. It is an input argument to all the * parallel routines. * Grid can be initialized by subroutine SUPERLU_GRIDINIT. * See superlu_ddefs.h for the definition of 'gridinfo_t'. * * stat (output) SuperLUStat_t* * Record the statistics on runtime and floating-point operation count. * See util.h for the definition of 'SuperLUStat_t'. * * info (output) int* * = 0: successful exit * < 0: if info = -i, the i-th argument had an illegal value * > 0: if info = i, U(i,i) is exactly zero. The factorization has * been completed, but the factor U is exactly singular, * and division by zero will occur if it is used to solve a * system of equations. * </pre> */ int_t pdgstrf(superlu_dist_options_t * options, int m, int n, double anorm, LUstruct_t * LUstruct, gridinfo_t * grid, SuperLUStat_t * stat, int *info) { #ifdef _CRAY _fcd ftcs = _cptofcd ("N", strlen ("N")); _fcd ftcs1 = _cptofcd ("L", strlen ("L")); _fcd ftcs2 = _cptofcd ("N", strlen ("N")); _fcd ftcs3 = _cptofcd ("U", strlen ("U")); #endif double zero = 0.0, alpha = 1.0, beta = 0.0; int_t *xsup; int_t *lsub, *lsub1, *usub, *Usub_buf; int_t **Lsub_buf_2, **Usub_buf_2; double **Lval_buf_2, **Uval_buf_2; /* pointers to starts of bufs */ double *lusup, *lusup1, *uval, *Uval_buf; /* pointer to current buf */ int_t fnz, i, ib, ijb, ilst, it, iukp, jb, jj, klst, knsupc, lb, lib, ldv, ljb, lptr, lptr0, lptrj, luptr, luptr0, luptrj, nlb, nub, nsupc, rel, rukp, il, iu; int_t Pc, Pr; int iam, kcol, krow, yourcol, mycol, myrow, pi, pj; int j, k, lk, nsupers; /* k - current panel to work on */ int k0; /* counter of the next supernode to be factored */ int kk, kk0, kk1, kk2, jj0; /* panels in the look-ahead window */ int iukp0, rukp0, flag0, flag1; int nsupr, nbrow, segsize; int msg0, msg2; int_t **Ufstnz_br_ptr, **Lrowind_bc_ptr; double **Unzval_br_ptr, **Lnzval_bc_ptr; int_t *index; double *nzval; double *ucol; int *indirect, *indirect2; int_t *tempi; double *tempu, *tempv, *tempr; /* double *tempv2d, *tempU2d; Sherry */ int iinfo; int *ToRecv, *ToSendD, **ToSendR; Glu_persist_t *Glu_persist = LUstruct->Glu_persist; LocalLU_t *Llu = LUstruct->Llu; superlu_scope_t *scp; float s_eps; double thresh; /*int full;*/ int ldt, ldu, lead_zero, ncols, ncb, nrb, p, pr, pc, nblocks; int_t *etree_supno_l, *etree_supno, *blocks, *blockr, *Ublock, *Urows, *Lblock, *Lrows, *perm_u, *sf_block, *sf_block_l, *nnodes_l, *nnodes_u, *edag_supno_l, *recvbuf, **edag_supno; float edag_supno_l_bytes; #ifdef ISORT int_t *iperm_u; #endif int *msgcnt; /* Count the size of the message xfer'd in each buffer: * 0 : transferred in Lsub_buf[] * 1 : transferred in Lval_buf[] * 2 : transferred in Usub_buf[] * 3 : transferred in Uval_buf[] */ int **msgcnts, **msgcntsU; /* counts in the look-ahead window */ int *factored; /* factored[j] == 0 : L col panel j is factorized. */ int *factoredU; /* factoredU[i] == 1 : U row panel i is factorized. */ int nnodes, *sendcnts, *sdispls, *recvcnts, *rdispls, *srows, *rrows; etree_node *head, *tail, *ptr; int *num_child; int num_look_aheads, look_id; int *look_ahead; /* global look_ahead table */ int_t *perm_c_supno, *iperm_c_supno; /* perm_c_supno[k] = j means at the k-th step of elimination, * the j-th supernode is chosen. */ MPI_Request *recv_req, **recv_reqs, **send_reqs, **send_reqs_u, **recv_reqs_u; MPI_Request *send_req, *U_diag_blk_send_req = NULL; MPI_Status status; void *attr_val; int flag; /* The following variables are used to pad GEMM dimensions so that each is a multiple of vector length (8 doubles for KNL) */ int gemm_m_pad = GEMM_PADLEN, gemm_k_pad = GEMM_PADLEN, gemm_n_pad = GEMM_PADLEN; int gemm_padding = 0; int iword = sizeof (int_t); int dword = sizeof (double); /* For measuring load imbalence in omp threads */ double omp_load_imblc = 0.0; double *omp_loop_time; double schur_flop_timer = 0.0; double pdgstrf2_timer = 0.0; double pdgstrs2_timer = 0.0; double lookaheadupdatetimer = 0.0; double InitTimer = 0.0; /* including compute schedule, malloc */ double tt_start, tt_end; /* #if !defined( GPU_ACC ) */ /* Counters for memory operations and timings */ double scatter_mem_op_counter = 0.0; double scatter_mem_op_timer = 0.0; double scatterL_mem_op_counter = 0.0; double scatterL_mem_op_timer = 0.0; double scatterU_mem_op_counter = 0.0; double scatterU_mem_op_timer = 0.0; /* Counters for flops/gather/scatter and timings */ double GatherLTimer = 0.0; double LookAheadRowSepMOP = 0.0; double GatherUTimer = 0.0; double GatherMOP = 0.0; double LookAheadGEMMTimer = 0.0; double LookAheadGEMMFlOp = 0.0; double LookAheadScatterTimer = 0.0; double LookAheadScatterMOP = 0.0; double RemainGEMMTimer = 0.0; double RemainGEMM_flops = 0.0; double RemainScatterTimer = 0.0; double NetSchurUpTimer = 0.0; double schur_flop_counter = 0.0; /* #endif */ #if ( PRNTlevel>= 1) /* count GEMM max dimensions */ int gemm_max_m = 0, gemm_max_n = 0, gemm_max_k = 0; #endif #if ( DEBUGlevel>=2 ) int_t num_copy = 0, num_update = 0; #endif #if ( PRNTlevel==3 ) int zero_msg = 0, total_msg = 0; #endif #if ( PROFlevel>=1 ) double t1, t2; float msg_vol = 0, msg_cnt = 0; double comm_wait_time = 0.0; /* Record GEMM dimensions and times */ FILE *fopen(), *fgemm; int gemm_count = 0; typedef struct { int m, n, k; double microseconds; } gemm_profile; gemm_profile *gemm_stats; #endif /* Test the input parameters. */ *info = 0; if (m < 0) *info = -2; else if (n < 0) *info = -3; if (*info) { pxerr_dist ("pdgstrf", grid, -*info); return (-1); } /* Quick return if possible. */ if (m == 0 || n == 0) return 0; double tt1 = SuperLU_timer_ (); /* * Initialization. */ iam = grid->iam; Pc = grid->npcol; Pr = grid->nprow; myrow = MYROW (iam, grid); mycol = MYCOL (iam, grid); nsupers = Glu_persist->supno[n - 1] + 1; xsup = Glu_persist->xsup; s_eps = smach_dist("Epsilon"); thresh = s_eps * anorm; MPI_Comm_get_attr (MPI_COMM_WORLD, MPI_TAG_UB, &attr_val, &flag); if (!flag) { fprintf (stderr, "Could not get TAG_UB\n"); return (-1); } int tag_ub = *(int *) attr_val; #if ( PRNTlevel>=1 ) if (!iam) { printf ("MPI tag upper bound = %d\n", tag_ub); fflush(stdout); } #endif #if ( DEBUGlevel>=1 ) if (s_eps == 0.0) printf (" ***** warning s_eps = %e *****\n", s_eps); CHECK_MALLOC (iam, "Enter pdgstrf()"); #endif #if (PROFlevel >= 1 ) gemm_stats = (gemm_profile *) SUPERLU_MALLOC(nsupers * sizeof(gemm_profile)); if (iam == 0) fgemm = fopen("dgemm_mnk.dat", "w"); int *prof_sendR = intCalloc_dist(nsupers); #endif stat->ops[FACT] = 0.0; stat->current_buffer = 0.0; stat->peak_buffer = 0.0; stat->gpu_buffer = 0.0; /* make sure the range of look-ahead window [0, MAX_LOOKAHEADS-1] */ num_look_aheads = SUPERLU_MAX(0, SUPERLU_MIN(options->num_lookaheads, MAX_LOOKAHEADS - 1)); if (Pr * Pc > 1) { if (!(U_diag_blk_send_req = (MPI_Request *) SUPERLU_MALLOC (Pr * sizeof (MPI_Request)))) ABORT ("Malloc fails for U_diag_blk_send_req[]."); /* flag no outstanding Isend */ U_diag_blk_send_req[myrow] = MPI_REQUEST_NULL; /* used 0 before */ /* allocating buffers for look-ahead */ i = Llu->bufmax[0]; if (i != 0) { if ( !(Llu->Lsub_buf_2[0] = intMalloc_dist ((num_look_aheads + 1) * ((size_t) i))) ) ABORT ("Malloc fails for Lsub_buf."); tempi = Llu->Lsub_buf_2[0]; for (jj = 0; jj < num_look_aheads; jj++) Llu->Lsub_buf_2[jj+1] = tempi + i*(jj+1); /* vectorize */ //Llu->Lsub_buf_2[jj + 1] = Llu->Lsub_buf_2[jj] + i; } i = Llu->bufmax[1]; if (i != 0) { if (!(Llu->Lval_buf_2[0] = doubleMalloc_dist ((num_look_aheads + 1) * ((size_t) i)))) ABORT ("Malloc fails for Lval_buf[]."); tempr = Llu->Lval_buf_2[0]; for (jj = 0; jj < num_look_aheads; jj++) Llu->Lval_buf_2[jj+1] = tempr + i*(jj+1); /* vectorize */ //Llu->Lval_buf_2[jj + 1] = Llu->Lval_buf_2[jj] + i; } i = Llu->bufmax[2]; if (i != 0) { if (!(Llu->Usub_buf_2[0] = intMalloc_dist ((num_look_aheads + 1) * i))) ABORT ("Malloc fails for Usub_buf_2[]."); tempi = Llu->Usub_buf_2[0]; for (jj = 0; jj < num_look_aheads; jj++) Llu->Usub_buf_2[jj+1] = tempi + i*(jj+1); /* vectorize */ //Llu->Usub_buf_2[jj + 1] = Llu->Usub_buf_2[jj] + i; } i = Llu->bufmax[3]; if (i != 0) { if (!(Llu->Uval_buf_2[0] = doubleMalloc_dist ((num_look_aheads + 1) * i))) ABORT ("Malloc fails for Uval_buf_2[]."); tempr = Llu->Uval_buf_2[0]; for (jj = 0; jj < num_look_aheads; jj++) Llu->Uval_buf_2[jj+1] = tempr + i*(jj+1); /* vectorize */ //Llu->Uval_buf_2[jj + 1] = Llu->Uval_buf_2[jj] + i; } } log_memory( (Llu->bufmax[0] + Llu->bufmax[2]) * (num_look_aheads + 1) * iword + (Llu->bufmax[1] + Llu->bufmax[3]) * (num_look_aheads + 1) * dword, stat ); /* creating pointers to the look-ahead buffers */ if (! (Lsub_buf_2 = SUPERLU_MALLOC ((1 + num_look_aheads) * sizeof (int_t *)))) ABORT ("Malloc fails for Lsub_buf_2[]."); if (! (Lval_buf_2 = SUPERLU_MALLOC ((1 + num_look_aheads) * sizeof (double *)))) ABORT ("Malloc fails for Lval_buf_2[]."); if (! (Usub_buf_2 = SUPERLU_MALLOC ((1 + num_look_aheads) * sizeof (int_t *)))) ABORT ("Malloc fails for Uval_buf_2[]."); if (! (Uval_buf_2 = SUPERLU_MALLOC ((1 + num_look_aheads) * sizeof (double *)))) ABORT ("Malloc fails for buf_2[]."); for (i = 0; i <= num_look_aheads; i++) { Lval_buf_2[i] = Llu->Lval_buf_2[i]; Lsub_buf_2[i] = Llu->Lsub_buf_2[i]; Uval_buf_2[i] = Llu->Uval_buf_2[i]; Usub_buf_2[i] = Llu->Usub_buf_2[i]; } if (!(msgcnts = SUPERLU_MALLOC ((1 + num_look_aheads) * sizeof (int *)))) ABORT ("Malloc fails for msgcnts[]."); if (!(msgcntsU = SUPERLU_MALLOC ((1 + num_look_aheads) * sizeof (int *)))) ABORT ("Malloc fails for msgcntsU[]."); for (i = 0; i <= num_look_aheads; i++) { if (!(msgcnts[i] = SUPERLU_MALLOC (4 * sizeof (int)))) ABORT ("Malloc fails for msgcnts[]."); if (!(msgcntsU[i] = SUPERLU_MALLOC (4 * sizeof (int)))) ABORT ("Malloc fails for msgcntsU[]."); } if (! (recv_reqs_u = SUPERLU_MALLOC ((1 + num_look_aheads) * sizeof (MPI_Request *)))) ABORT ("Malloc fails for recv_reqs_u[]."); if (! (send_reqs_u = SUPERLU_MALLOC ((1 + num_look_aheads) * sizeof (MPI_Request *)))) ABORT ("Malloc fails for send_reqs_u[]."); if (! (send_reqs = SUPERLU_MALLOC ((1 + num_look_aheads) * sizeof (MPI_Request *)))) ABORT ("Malloc fails for send_reqs_u[]."); if (! (recv_reqs = SUPERLU_MALLOC ((1 + num_look_aheads) * sizeof (MPI_Request *)))) ABORT ("Malloc fails for recv_reqs[]."); for (i = 0; i <= num_look_aheads; i++) { if (!(recv_reqs_u[i] = (MPI_Request *) SUPERLU_MALLOC (2 * sizeof (MPI_Request)))) ABORT ("Malloc fails for recv_req_u[i]."); if (!(send_reqs_u[i] = (MPI_Request *) SUPERLU_MALLOC (2 * Pr * sizeof (MPI_Request)))) ABORT ("Malloc fails for send_req_u[i]."); if (!(send_reqs[i] = (MPI_Request *) SUPERLU_MALLOC (2 * Pc * sizeof (MPI_Request)))) ABORT ("Malloc fails for send_reqs[i]."); if (!(recv_reqs[i] = (MPI_Request *) SUPERLU_MALLOC (4 * sizeof (MPI_Request)))) ABORT ("Malloc fails for recv_req[]."); send_reqs[i][0] = send_reqs[i][1] = MPI_REQUEST_NULL; recv_reqs[i][0] = recv_reqs[i][1] = MPI_REQUEST_NULL; } if (!(factored = SUPERLU_MALLOC (nsupers * sizeof (int_t)))) ABORT ("Malloc fails for factored[]."); if (!(factoredU = SUPERLU_MALLOC (nsupers * sizeof (int_t)))) ABORT ("Malloc fails for factoredU[]."); for (i = 0; i < nsupers; i++) factored[i] = factoredU[i] = -1; log_memory(2 * nsupers * iword, stat); int num_threads = 1; #ifdef _OPENMP #pragma omp parallel default(shared) #pragma omp master { //if (omp_get_thread_num () == 0) num_threads = omp_get_num_threads (); } #endif #if 0 omp_loop_time = (double *) _mm_malloc (sizeof (double) * num_threads,64); #else omp_loop_time = (double *) doubleMalloc_dist(num_threads); #endif #if ( PRNTlevel>=1 ) if(!iam) { printf(".. Starting with %d OpenMP threads \n", num_threads ); fflush(stdout); } #endif nblocks = 0; ncb = nsupers / Pc; /* number of column blocks, horizontal */ nrb = nsupers / Pr; /* number of row blocks, vertical */ /* in order to have dynamic scheduling */ int *full_u_cols; int *blk_ldu; #if 0 full_u_cols = (int_t *) _mm_malloc (sizeof (int_t) * ncb,64); blk_ldu = (int_t *) _mm_malloc (sizeof (int_t) * ncb,64); #else full_u_cols = SUPERLU_MALLOC(ncb * sizeof(int)); blk_ldu = SUPERLU_MALLOC(ncb * sizeof(int)); #endif log_memory(2 * ncb * iword, stat); #if 0 /* Sherry: not used? */ /* This bunch is used for static scheduling */ pair *full_col_count = (pair *) _mm_malloc (sizeof (pair) * ncb,64); int_t *count_cols, *sum_cols, *partition; count_cols = (int_t *) _mm_malloc (sizeof (int_t) * num_threads,64); sum_cols = (int_t *) _mm_malloc (sizeof (int_t) * num_threads,64); partition = (int_t *) _mm_malloc (sizeof (int_t) * num_threads * ncb,64); int_t ldp = ncb; #endif /* ################################################################## * Compute a good static schedule based on the factorization task graph. * ################################################################## */ perm_c_supno = SUPERLU_MALLOC (2 * nsupers * sizeof (int_t)); iperm_c_supno = perm_c_supno + nsupers; static_schedule(options, m, n, LUstruct, grid, stat, perm_c_supno, iperm_c_supno, info); #if ( DEBUGlevel >= 2 ) PrintInt10("schedule:perm_c_supno", nsupers, perm_c_supno); /* Turn off static schedule */ printf("[%d] .. Turn off static schedule for debugging ..\n", iam); for (i = 0; i < nsupers; ++i) perm_c_supno[i] = iperm_c_supno[i] = i; #endif /* ################################################################## */ /* constructing look-ahead table to indicate the last dependency */ int *look_ahead_l; /* Sherry: add comment on look_ahead_l[] */ stat->num_look_aheads = num_look_aheads; look_ahead_l = SUPERLU_MALLOC (nsupers * sizeof (int)); look_ahead = SUPERLU_MALLOC (nsupers * sizeof (int)); for (lb = 0; lb < nsupers; lb++) look_ahead_l[lb] = -1; /* vectorized */ log_memory(3 * nsupers * iword, stat); /* Sherry: omp parallel? not worth doing, due to concurrent write to look_ahead_l[jb] */ for (lb = 0; lb < nrb; ++lb) { /* go through U-factor */ ib = lb * Pr + myrow; index = Llu->Ufstnz_br_ptr[lb]; if (index) { /* Not an empty row */ k = BR_HEADER; for (j = 0; j < index[0]; ++j) { jb = index[k]; /* global block number */ if (jb != ib) look_ahead_l[jb] = SUPERLU_MAX (iperm_c_supno[ib], look_ahead_l[jb]); k += UB_DESCRIPTOR + SuperSize (index[k]); } } } if (myrow < nsupers % grid->nprow) { /* leftover block rows */ ib = nrb * Pr + myrow; index = Llu->Ufstnz_br_ptr[nrb]; if (index) { /* Not an empty row */ k = BR_HEADER; for (j = 0; j < index[0]; ++j) { jb = index[k]; if (jb != ib) look_ahead_l[jb] = SUPERLU_MAX (iperm_c_supno[ib], look_ahead_l[jb]); k += UB_DESCRIPTOR + SuperSize (index[k]); } } } if (options->SymPattern == NO) { /* Sherry: omp parallel? not worth doing, due to concurrent write to look_ahead_l[jb] */ for (lb = 0; lb < ncb; lb++) { /* go through L-factor */ ib = lb * Pc + mycol; index = Llu->Lrowind_bc_ptr[lb]; if (index) { k = BC_HEADER; for (j = 0; j < index[0]; j++) { jb = index[k]; if (jb != ib) look_ahead_l[jb] = SUPERLU_MAX (iperm_c_supno[ib], look_ahead_l[jb]); k += LB_DESCRIPTOR + index[k + 1]; } } } if (mycol < nsupers % grid->npcol) { /* leftover block columns */ ib = ncb * Pc + mycol; index = Llu->Lrowind_bc_ptr[ncb]; if (index) { k = BC_HEADER; for (j = 0; j < index[0]; j++) { jb = index[k]; if (jb != ib) look_ahead_l[jb] = SUPERLU_MAX (iperm_c_supno[ib], look_ahead_l[jb]); k += LB_DESCRIPTOR + index[k + 1]; } } } } MPI_Allreduce (look_ahead_l, look_ahead, nsupers, MPI_INT, MPI_MAX, grid->comm); SUPERLU_FREE (look_ahead_l); #ifdef ISORT iperm_u = SUPERLU_MALLOC (nsupers * sizeof (int_t)); perm_u = SUPERLU_MALLOC (nsupers * sizeof (int_t)); #else perm_u = SUPERLU_MALLOC (2 * nsupers * sizeof (int_t)); #endif log_memory(nsupers * iword, stat); k = sp_ienv_dist (3); /* max supernode size */ #if 0 if ( !(Llu->ujrow = doubleMalloc_dist(k*(k+1)/2)) ) ABORT("Malloc fails for ujrow[]."); #else /* Instead of half storage, we'll do full storage */ if (!(Llu->ujrow = doubleCalloc_dist (k * k))) ABORT ("Malloc fails for ujrow[]."); #endif log_memory(k * k * iword, stat); #if ( PRNTlevel>=1 ) if (!iam) { printf (".. thresh = s_eps %e * anorm %e = %e\n", s_eps, anorm, thresh); printf (".. Buffer size: Lsub %ld\tLval %ld\tUsub %ld\tUval %ld\tLDA %ld\n", (long int) Llu->bufmax[0], (long int) Llu->bufmax[1], (long int) Llu->bufmax[2], (long int) Llu->bufmax[3], (long int) Llu->bufmax[4]); fflush(stdout); } #endif Lrowind_bc_ptr = Llu->Lrowind_bc_ptr; Lnzval_bc_ptr = Llu->Lnzval_bc_ptr; Ufstnz_br_ptr = Llu->Ufstnz_br_ptr; Unzval_br_ptr = Llu->Unzval_br_ptr; ToRecv = Llu->ToRecv; ToSendD = Llu->ToSendD; ToSendR = Llu->ToSendR; ldt = sp_ienv_dist (3); /* Size of maximum supernode */ k = CEILING (nsupers, Pr); /* Number of local block rows */ /* Following code is for finding maximum row dimension of all L panels */ int local_max_row_size = 0; int max_row_size; #if 0 #if defined _OPENMP // Sherry: parallel reduction -- seems slower? #pragma omp parallel for reduction(max :local_max_row_size) private(lk,lsub) #endif #endif for (int i = mycol; i < nsupers; i += Pc) { /* grab my local columns */ //int tpc = PCOL (i, grid); lk = LBj (i, grid); lsub = Lrowind_bc_ptr[lk]; if (lsub != NULL) { if (lsub[1] > local_max_row_size) local_max_row_size = lsub[1]; } } /* Max row size is global reduction within a row */ MPI_Allreduce (&local_max_row_size, &max_row_size, 1, MPI_INT, MPI_MAX, (grid->rscp.comm)); /* Buffer size is max of look-ahead window */ /* int_t buffer_size = SUPERLU_MAX (max_row_size * num_threads * ldt, get_max_buffer_size ()); */ #ifdef GPU_ACC int cublas_nb = get_cublas_nb(); int nstreams = get_num_cuda_streams (); int buffer_size = SUPERLU_MAX(max_row_size*nstreams*cublas_nb,get_max_buffer_size()); /* array holding last column blk for each partition, used in SchCompUdt--CUDA.c */ #if 0 int *stream_end_col = (int_t *) _mm_malloc (sizeof (int_t) * nstreams,64); #else int *stream_end_col = SUPERLU_MALLOC( nstreams * sizeof(int) ); #endif #else /* not to use GPU */ int Threads_per_process = get_thread_per_process(); int buffer_size = SUPERLU_MAX(max_row_size*Threads_per_process*ldt,get_max_buffer_size()); #endif /* end ifdef GPU_ACC */ int_t max_ncols = 0; #if 0 /* symmetric assumption -- using L's supernode to estimate. */ /* Note that in following expression 8 can be anything as long as its not too big */ int bigu_size = 8 * sp_ienv_dist (3) * (max_row_size); #else int_t bigu_size = estimate_bigu_size( nsupers, Ufstnz_br_ptr, Glu_persist, grid, perm_u, &max_ncols ); #endif /* +16 to avoid cache line false sharing */ // int_t bigv_size = SUPERLU_MAX(max_row_size * (bigu_size / ldt), int_t bigv_size = SUPERLU_MAX(max_row_size * max_ncols, (ldt*ldt + CACHELINE / dword) * num_threads); /* bigU and bigV are either on CPU or on GPU, not both. */ double* bigU; /* for storing entire U(k,:) panel, prepare for GEMM. bigU has the same size either on CPU or on CPU. */ double* bigV; /* for storing GEMM output matrix, i.e. update matrix. bigV is large to hold the aggregate GEMM output.*/ bigU = NULL; bigV = NULL; #if ( PRNTlevel>=1 ) if(!iam) { printf("\t.. GEMM buffer size: max_row_size X max_ncols = %d x " IFMT "\n", max_row_size, max_ncols); printf(".. BIG U size " IFMT "\t BIG V size " IFMT "\n", bigu_size, bigv_size); fflush(stdout); } #endif #ifdef GPU_ACC if ( checkCuda(cudaHostAlloc((void**)&bigU, bigu_size * sizeof(double), cudaHostAllocDefault)) ) ABORT("Malloc fails for dgemm buffer U "); bigv_size = buffer_size; #if ( PRNTlevel>=1 ) if (!iam) printf("[%d] .. BIG V bigv_size %d, using buffer_size %d (on GPU)\n", iam, bigv_size, buffer_size); #endif if ( checkCuda(cudaHostAlloc((void**)&bigV, bigv_size * sizeof(double) ,cudaHostAllocDefault)) ) ABORT("Malloc fails for dgemm buffer V"); DisplayHeader(); #if ( PRNTlevel>=1 ) printf(" Starting with %d Cuda Streams \n",nstreams ); #endif cublasHandle_t *handle; handle = (cublasHandle_t *) SUPERLU_MALLOC(sizeof(cublasHandle_t)*nstreams); for(int i = 0; i < nstreams; i++) handle[i] = create_handle(); // creating streams cudaStream_t *streams; streams = (cudaStream_t *) SUPERLU_MALLOC(sizeof(cudaStream_t)*nstreams); for (int i = 0; i < nstreams; ++i) checkCuda( cudaStreamCreate(&streams[i]) ); // allocating data in device double *dA, *dB, *dC; cudaError_t cudaStat; #if 0 // cudaStat = cudaMalloc( (void**)&dA, m*k*sizeof(double)); // HOw much should be the size of dA? // for time being just making it // cudaStat = cudaMalloc( (void**)&dA, ((max_row_size*sp_ienv_dist(3)))* sizeof(double)); #endif cudaStat = cudaMalloc( (void**)&dA, max_row_size*sp_ienv_dist(3)* sizeof(double)); if (cudaStat!= cudaSuccess) { fprintf(stderr, "!!!! Error in allocating A in the device %ld \n",m*k*sizeof(double) ); return 1; } // size of B should be max_supernode_size*buffer cudaStat = cudaMalloc((void**)&dB, bigu_size * sizeof(double)); if (cudaStat!= cudaSuccess) { fprintf(stderr, "!!!! Error in allocating B in the device %ld \n",n*k*sizeof(double)); return 1; } cudaStat = cudaMalloc((void**)&dC, buffer_size* sizeof(double) ); if (cudaStat!= cudaSuccess) { fprintf(stderr, "!!!! Error in allocating C in the device \n" ); return 1; } stat->gpu_buffer += ( max_row_size * sp_ienv_dist(3) + bigu_size + buffer_size ) * dword; #else /* not CUDA */ // for GEMM padding 0 j = bigu_size / ldt; bigu_size += (gemm_k_pad * (j + ldt + gemm_n_pad)); bigv_size += (gemm_m_pad * (j + max_row_size + gemm_n_pad)); //#ifdef __INTEL_COMPILER // bigU = _mm_malloc(bigu_size * sizeof(double), 1<<12); // align at 4K page // bigV = _mm_malloc(bigv_size * sizeof(double), 1<<12); //#else if ( !(bigU = doubleMalloc_dist(bigu_size)) ) ABORT ("Malloc fails for dgemm U buffer"); //Maximum size of bigU= sqrt(buffsize) ? // int bigv_size = 8 * ldt * ldt * num_threads; if ( !(bigV = doubleMalloc_dist(bigv_size)) ) ABORT ("Malloc failed for dgemm V buffer"); //#endif #endif /* end ifdef GPU_ACC */ log_memory((bigv_size + bigu_size) * dword, stat); // mlock(bigU,(bigu_size) * sizeof (double)); #if ( PRNTlevel>=1 ) if(!iam) { printf (" Max row size is %d \n", max_row_size); printf (" Threads per process %d \n", num_threads); fflush(stdout); } #endif #if 0 /* Sherry */ if (!(tempv2d = doubleCalloc_dist (2 * ((size_t) ldt) * ldt))) ABORT ("Calloc fails for tempv2d[]."); tempU2d = tempv2d + ldt * ldt; #endif /* Sherry: (ldt + 16), avoid cache line false sharing. KNL cacheline size = 64 bytes = 16 int */ iinfo = ldt + CACHELINE / sizeof(int); if (!(indirect = SUPERLU_MALLOC (iinfo * num_threads * sizeof(int)))) ABORT ("Malloc fails for indirect[]."); if (!(indirect2 = SUPERLU_MALLOC (iinfo * num_threads * sizeof(int)))) ABORT ("Malloc fails for indirect[]."); log_memory(2 * ldt*ldt * dword + 2 * iinfo * num_threads * iword, stat); int_t *lookAheadFullRow,*lookAheadStRow,*lookAhead_lptr,*lookAhead_ib, *RemainStRow,*Remain_lptr,*Remain_ib; lookAheadFullRow = intMalloc_dist( (num_look_aheads+1) ); lookAheadStRow = intMalloc_dist( (num_look_aheads+1) ); lookAhead_lptr = intMalloc_dist( (num_look_aheads+1) ); lookAhead_ib = intMalloc_dist( (num_look_aheads+1) ); int_t mrb = (nsupers + Pr - 1) / Pr; int_t mcb = (nsupers + Pc - 1) / Pc; RemainStRow = intMalloc_dist(mrb); #if 0 Remain_lptr = (int *) _mm_malloc(sizeof(int)*mrb,1); #else Remain_lptr = intMalloc_dist(mrb); #endif // mlock(Remain_lptr, sizeof(int)*mrb ); Remain_ib = intMalloc_dist(mrb); Remain_info_t *Remain_info; #if 0 Remain_info = (Remain_info_t *) _mm_malloc(mrb*sizeof(Remain_info_t),64); #else Remain_info = (Remain_info_t *) SUPERLU_MALLOC(mrb*sizeof(Remain_info_t)); #endif double *lookAhead_L_buff, *Remain_L_buff; /* Stores entire L-panel */ Ublock_info_t *Ublock_info; ldt = sp_ienv_dist (3); /* max supernode size */ /* The following is quite loose */ lookAhead_L_buff = doubleMalloc_dist(ldt*ldt* (num_look_aheads+1) ); #if 0 Remain_L_buff = (double *) _mm_malloc( sizeof(double)*(Llu->bufmax[1]),64); Ublock_info = (Ublock_info_t *) _mm_malloc(mcb*sizeof(Ublock_info_t),64); int * Ublock_info_iukp = (int *) _mm_malloc(mcb*sizeof(int),64); int * Ublock_info_rukp = (int *) _mm_malloc(mcb*sizeof(int),64); int * Ublock_info_jb = (int *) _mm_malloc(mcb*sizeof(int),64); #else j = gemm_m_pad * (ldt + max_row_size + gemm_k_pad); Remain_L_buff = doubleMalloc_dist(Llu->bufmax[1] + j); /* This is loose */ Ublock_info = (Ublock_info_t *) SUPERLU_MALLOC(mcb*sizeof(Ublock_info_t)); int *Ublock_info_iukp = (int *) SUPERLU_MALLOC(mcb*sizeof(int)); int *Ublock_info_rukp = (int *) SUPERLU_MALLOC(mcb*sizeof(int)); int *Ublock_info_jb = (int *) SUPERLU_MALLOC(mcb*sizeof(int)); #endif long long alloc_mem = 3 * mrb * iword + mrb * sizeof(Remain_info_t) + ldt * ldt * (num_look_aheads+1) * dword + Llu->bufmax[1] * dword ; log_memory(alloc_mem, stat); InitTimer = SuperLU_timer_() - tt1; double pxgstrfTimer = SuperLU_timer_(); /* ################################################################## ** Handle first block column separately to start the pipeline. ** ################################################################## */ look_id = 0; msgcnt = msgcnts[0]; /* Lsub[0] to be transferred */ send_req = send_reqs[0]; recv_req = recv_reqs[0]; k0 = 0; k = perm_c_supno[0]; kcol = PCOL (k, grid); krow = PROW (k, grid); if (mycol == kcol) { double ttt1 = SuperLU_timer_(); /* panel factorization */ PDGSTRF2 (options, k0, k, thresh, Glu_persist, grid, Llu, U_diag_blk_send_req, tag_ub, stat, info); pdgstrf2_timer += SuperLU_timer_()-ttt1; scp = &grid->rscp; /* The scope of process row. */ /* Multicasts numeric values of L(:,0) to process rows. */ lk = LBj (k, grid); /* Local block number. */ lsub = Lrowind_bc_ptr[lk]; lusup = Lnzval_bc_ptr[lk]; if (lsub) { /* number of entries in Lsub_buf[] to be transferred */ msgcnt[0] = lsub[1] + BC_HEADER + lsub[0] * LB_DESCRIPTOR; /* number of entries in Lval_buf[] to be transferred */ msgcnt[1] = lsub[1] * SuperSize (k); } else { msgcnt[0] = msgcnt[1] = 0; } for (pj = 0; pj < Pc; ++pj) { if (ToSendR[lk][pj] != EMPTY) { #if ( PROFlevel>=1 ) TIC (t1); #endif MPI_Isend (lsub, msgcnt[0], mpi_int_t, pj, SLU_MPI_TAG (0, 0) /* 0 */, scp->comm, &send_req[pj]); MPI_Isend (lusup, msgcnt[1], MPI_DOUBLE, pj, SLU_MPI_TAG (1, 0) /* 1 */, scp->comm, &send_req[pj + Pc]); #if ( DEBUGlevel>=2 ) printf ("[%d] first block cloumn Send L(:,%4d): lsub %4d, lusup %4d to Pc %2d\n", iam, 0, msgcnt[0], msgcnt[1], pj); #endif #if ( PROFlevel>=1 ) TOC (t2, t1); stat->utime[COMM] += t2; stat->utime[COMM_RIGHT] += t2; ++prof_sendR[lk]; msg_cnt += 2; msg_vol += msgcnt[0] * iword + msgcnt[1] * dword; #endif } /* end if */ } /* end for pj ... */ } else { /* Post immediate receives. */ if (ToRecv[k] >= 1) { /* Recv block column L(:,0). */ scp = &grid->rscp; /* The scope of process row. */ #if ( PROFlevel>=1 ) TIC (t1); #endif MPI_Irecv (Lsub_buf_2[0], Llu->bufmax[0], mpi_int_t, kcol, SLU_MPI_TAG (0, 0) /* 0 */ , scp->comm, &recv_req[0]); MPI_Irecv (Lval_buf_2[0], Llu->bufmax[1], MPI_DOUBLE, kcol, SLU_MPI_TAG (1, 0) /* 1 */ , scp->comm, &recv_req[1]); #if ( PROFlevel>=1 ) TOC (t2, t1); stat->utime[COMM] += t2; stat->utime[COMM_RIGHT] += t2; #endif } } /* end if mycol == 0 */ factored[k] = 0; /* flag column k as factored. */ /* post receive of first U-row */ if (myrow != krow) { if (ToRecv[k] == 2) { /* Recv block row U(k,:). */ scp = &grid->cscp; /* The scope of process column. */ Usub_buf = Llu->Usub_buf_2[0]; Uval_buf = Llu->Uval_buf_2[0]; #if ( PROFlevel>=1 ) TIC (t1); #endif MPI_Irecv (Usub_buf, Llu->bufmax[2], mpi_int_t, krow, SLU_MPI_TAG (2, 0) /* 2%tag_ub */ , scp->comm, &recv_reqs_u[0][0]); MPI_Irecv (Uval_buf, Llu->bufmax[3], MPI_DOUBLE, krow, SLU_MPI_TAG (3, 0) /* 3%tag_ub */ , scp->comm, &recv_reqs_u[0][1]); #if ( PROFlevel>=1 ) TOC (t2, t1); stat->utime[COMM] += t2; stat->utime[COMM_DOWN] += t2; #endif } } /* ################################################################## **** MAIN LOOP **** ################################################################## */ for (k0 = 0; k0 < nsupers; ++k0) { k = perm_c_supno[k0]; /* ============================================ * * ======= look-ahead the new L columns ======= * * ============================================ */ /* tt1 = SuperLU_timer_(); */ if (k0 == 0) { /* look-ahead all the columns in the window */ kk1 = k0 + 1; kk2 = SUPERLU_MIN (k0 + num_look_aheads, nsupers - 1); } else { /* look-ahead one new column after the current window */ kk1 = k0 + num_look_aheads; kk2 = SUPERLU_MIN (kk1, nsupers - 1); } for (kk0 = kk1; kk0 <= kk2; kk0++) { /* loop through look-ahead window in L */ kk = perm_c_supno[kk0]; /* use the ordering from static schedule */ look_id = kk0 % (1 + num_look_aheads); /* which column in window */ if (look_ahead[kk] < k0) { /* does not depend on current column k */ kcol = PCOL (kk, grid); if (mycol == kcol) { /* I own this panel */ /* Panel factorization -- Factor diagonal and subdiagonal L blocks and test for exact singularity. */ factored[kk] = 0; /* flag column kk as factored */ double ttt1 = SuperLU_timer_(); PDGSTRF2 (options, kk0, kk, thresh, Glu_persist, grid, Llu, U_diag_blk_send_req, tag_ub, stat, info); pdgstrf2_timer += SuperLU_timer_() - ttt1; /* Multicasts numeric values of L(:,kk) to process rows. */ /* ttt1 = SuperLU_timer_(); */ msgcnt = msgcnts[look_id]; /* point to the proper count array */ send_req = send_reqs[look_id]; lk = LBj (kk, grid); /* Local block number in L. */ lsub1 = Lrowind_bc_ptr[lk]; if (lsub1) { msgcnt[0] = lsub1[1] + BC_HEADER + lsub1[0] * LB_DESCRIPTOR; /* size of metadata */ msgcnt[1] = lsub1[1] * SuperSize (kk); /* Lval_buf[] size */ } else { msgcnt[0] = 0; msgcnt[1] = 0; } scp = &grid->rscp; /* The scope of process row. */ for (pj = 0; pj < Pc; ++pj) { if (ToSendR[lk][pj] != EMPTY) { lusup1 = Lnzval_bc_ptr[lk]; #if ( PROFlevel>=1 ) TIC (t1); #endif MPI_Isend (lsub1, msgcnt[0], mpi_int_t, pj, SLU_MPI_TAG (0, kk0), /* (4*kk0)%tag_ub */ scp->comm, &send_req[pj]); MPI_Isend (lusup1, msgcnt[1], MPI_DOUBLE, pj, SLU_MPI_TAG (1, kk0), /* (4*kk0+1)%tag_ub */ scp->comm, &send_req[pj + Pc]); #if ( PROFlevel>=1 ) TOC (t2, t1); stat->utime[COMM] += t2; stat->utime[COMM_RIGHT] += t2; ++prof_sendR[lk]; #endif #if ( DEBUGlevel>=2 ) printf ("[%d] -1- Send L(:,%4d): #lsub1 %4d, #lusup1 %4d right to Pj %2d\n", iam, kk, msgcnt[0], msgcnt[1], pj); #endif } } /* stat->time9 += SuperLU_timer_() - ttt1; */ } else { /* Post Recv of block column L(:,kk). */ /* double ttt1 = SuperLU_timer_(); */ if (ToRecv[kk] >= 1) { scp = &grid->rscp; /* The scope of process row. */ recv_req = recv_reqs[look_id]; #if ( PROFlevel>=1 ) TIC (t1); #endif MPI_Irecv (Lsub_buf_2[look_id], Llu->bufmax[0], mpi_int_t, kcol, SLU_MPI_TAG (0, kk0), /* (4*kk0)%tag_ub */ scp->comm, &recv_req[0]); MPI_Irecv (Lval_buf_2[look_id], Llu->bufmax[1], MPI_DOUBLE, kcol, SLU_MPI_TAG (1, kk0), /* (4*kk0+1)%tag_ub */ scp->comm, &recv_req[1]); #if ( PROFlevel>=1 ) TOC (t2, t1); stat->utime[COMM] += t2; stat->utime[COMM_RIGHT] += t2; #endif } /* stat->time10 += SuperLU_timer_() - ttt1; */ } /* end if mycol == Pc(kk) */ } /* end if look-ahead in L panels */ /* Pre-post irecv for U-row look-ahead */ krow = PROW (kk, grid); if (myrow != krow) { if (ToRecv[kk] == 2) { /* post iRecv block row U(kk,:). */ scp = &grid->cscp; /* The scope of process column. */ Usub_buf = Llu->Usub_buf_2[look_id]; Uval_buf = Llu->Uval_buf_2[look_id]; #if ( PROFlevel>=1 ) TIC (t1); #endif MPI_Irecv (Usub_buf, Llu->bufmax[2], mpi_int_t, krow, SLU_MPI_TAG (2, kk0) /* (4*kk0+2)%tag_ub */ , scp->comm, &recv_reqs_u[look_id][0]); MPI_Irecv (Uval_buf, Llu->bufmax[3], MPI_DOUBLE, krow, SLU_MPI_TAG (3, kk0) /* (4*kk0+3)%tag_ub */ , scp->comm, &recv_reqs_u[look_id][1]); #if ( PROFlevel>=1 ) TOC (t2, t1); stat->utime[COMM] += t2; stat->utime[COMM_DOWN] += t2; #endif } } } /* end for each column in look-ahead window for L panels */ /* stat->time4 += SuperLU_timer_()-tt1; */ /* ================================= * * ==== look-ahead the U rows === * * ================================= */ kk1 = k0; kk2 = SUPERLU_MIN (k0 + num_look_aheads, nsupers - 1); for (kk0 = kk1; kk0 < kk2; kk0++) { kk = perm_c_supno[kk0]; /* order determined from static schedule */ if (factoredU[kk0] != 1 && look_ahead[kk] < k0) { /* does not depend on current column k */ kcol = PCOL (kk, grid); krow = PROW (kk, grid); lk = LBj (kk, grid); /* Local block number across row. NOT USED?? -- Sherry */ look_id = kk0 % (1 + num_look_aheads); msgcnt = msgcntsU[look_id]; recv_req = recv_reqs[look_id]; /* ================================================= * * Check if diagonal block has been received * * for panel factorization of U in look-ahead window * * ================================================= */ if (mycol == kcol) { /* I own this column panel, no need to receive L */ flag0 = flag1 = 1; msgcnt[0] = msgcnt[1] = -1; /* No need to transfer Lsub, nor Lval */ } else { /* Check to receive L(:,kk) from the left */ flag0 = flag1 = 0; if ( ToRecv[kk] >= 1 ) { #if ( PROFlevel>=1 ) TIC (t1); #endif if ( recv_req[0] != MPI_REQUEST_NULL ) { MPI_Test (&recv_req[0], &flag0, &status); if ( flag0 ) { MPI_Get_count (&status, mpi_int_t, &msgcnt[0]); recv_req[0] = MPI_REQUEST_NULL; } } else flag0 = 1; if ( recv_req[1] != MPI_REQUEST_NULL ) { MPI_Test (&recv_req[1], &flag1, &status); if ( flag1 ) { MPI_Get_count (&status, mpi_int_t, &msgcnt[1]); recv_req[1] = MPI_REQUEST_NULL; } } else flag1 = 1; #if ( PROFlevel>=1 ) TOC (t2, t1); stat->utime[COMM] += t2; stat->utime[COMM_RIGHT] += t2; #endif } else { msgcnt[0] = 0; } } if (flag0 && flag1) { /* L(:,kk) is ready */ /* tt1 = SuperLU_timer_(); */ scp = &grid->cscp; /* The scope of process column. */ if (myrow == krow) { factoredU[kk0] = 1; /* Parallel triangular solve across process row *krow* -- U(k,j) = L(k,k) \ A(k,j). */ double ttt2 = SuperLU_timer_(); #ifdef _OPENMP /* #pragma omp parallel */ /* Sherry -- parallel done inside pdgstrs2 */ #endif { PDGSTRS2 (kk0, kk, Glu_persist, grid, Llu, stat); } pdgstrs2_timer += SuperLU_timer_()-ttt2; /* stat->time8 += SuperLU_timer_()-ttt2; */ /* Multicasts U(kk,:) to process columns. */ lk = LBi (kk, grid); usub = Ufstnz_br_ptr[lk]; uval = Unzval_br_ptr[lk]; if (usub) { msgcnt[2] = usub[2]; /* metadata size */ msgcnt[3] = usub[1]; /* Uval[] size */ } else { msgcnt[2] = msgcnt[3] = 0; } if (ToSendD[lk] == YES) { for (pi = 0; pi < Pr; ++pi) { if (pi != myrow) { #if ( PROFlevel>=1 ) TIC (t1); #endif MPI_Isend (usub, msgcnt[2], mpi_int_t, pi, SLU_MPI_TAG (2, kk0), /* (4*kk0+2)%tag_ub */ scp->comm, &send_reqs_u[look_id][pi]); MPI_Isend (uval, msgcnt[3], MPI_DOUBLE, pi, SLU_MPI_TAG (3, kk0), /* (4*kk0+3)%tag_ub */ scp->comm, &send_reqs_u[look_id][pi + Pr]); #if ( PROFlevel>=1 ) TOC (t2, t1); stat->utime[COMM] += t2; msg_cnt += 2; msg_vol += msgcnt[2] * iword + msgcnt[3] * dword; #endif #if ( DEBUGlevel>=2 ) printf ("[%d] Send U(%4d,:) to Pr %2d\n", iam, k, pi); #endif } /* if pi ... */ } /* for pi ... */ } /* if ToSendD ... */ /* stat->time2 += SuperLU_timer_()-tt1; */ } /* end if myrow == krow */ } /* end if flag0 & flag1 ... */ } /* end if factoredU[] ... */ } /* end for kk0 ... */ /* ============================================== * * == start processing the current row of U(k,:) * * ============================================== */ knsupc = SuperSize (k); krow = PROW (k, grid); kcol = PCOL (k, grid); /* tt1 = SuperLU_timer_(); */ look_id = k0 % (1 + num_look_aheads); recv_req = recv_reqs[look_id]; send_req = send_reqs[look_id]; msgcnt = msgcnts[look_id]; Usub_buf = Llu->Usub_buf_2[look_id]; Uval_buf = Llu->Uval_buf_2[look_id]; if (mycol == kcol) { lk = LBj (k, grid); /* Local block number in L */ #if ( PROFlevel>=1 ) TIC(t1); #endif for (pj = 0; pj < Pc; ++pj) { /* Wait for Isend to complete before using lsub/lusup buffer. */ if (ToSendR[lk][pj] != EMPTY) { MPI_Wait (&send_req[pj], &status); MPI_Wait (&send_req[pj + Pc], &status); } } #if ( PROFlevel>=1 ) TOC(t2, t1); stat->utime[COMM] += t2; stat->utime[COMM_RIGHT] += t2; #endif lsub = Lrowind_bc_ptr[lk]; lusup = Lnzval_bc_ptr[lk]; } else { if (ToRecv[k] >= 1) { /* Recv block column L(:,k). */ scp = &grid->rscp; /* The scope of process row. */ /* ============================================= * * Waiting for L(:,kk) for outer-product uptate * * if iam in U(kk,:), then the diagonal block * * did not reach in time for panel factorization * * of U(k,:). * * ============================================= */ #if ( PROFlevel>=1 ) TIC (t1); #endif if (recv_req[0] != MPI_REQUEST_NULL) { MPI_Wait (&recv_req[0], &status); MPI_Get_count (&status, mpi_int_t, &msgcnt[0]); recv_req[0] = MPI_REQUEST_NULL; } else { msgcnt[0] = msgcntsU[look_id][0]; #if (DEBUGlevel>=2) printf("\t[%d] k=%d, look_id=%d, recv_req[0] == MPI_REQUEST_NULL, msgcnt[0] = %d\n", iam, k, look_id, msgcnt[0]); #endif } if (recv_req[1] != MPI_REQUEST_NULL) { MPI_Wait (&recv_req[1], &status); MPI_Get_count (&status, MPI_DOUBLE, &msgcnt[1]); recv_req[1] = MPI_REQUEST_NULL; } else { msgcnt[1] = msgcntsU[look_id][1]; #if (DEBUGlevel>=2) printf("\t[%d] k=%d, look_id=%d, recv_req[1] == MPI_REQUEST_NULL, msgcnt[1] = %d\n", iam, k, look_id, msgcnt[1]); #endif } #if ( PROFlevel>=1 ) TOC (t2, t1); stat->utime[COMM] += t2; stat->utime[COMM_RIGHT] += t2; #endif #if ( DEBUGlevel>=2 ) printf("[%d] Recv L(:,%4d): #lsub %4d, #lusup %4d from Pc %2d\n", iam, k, msgcnt[0], msgcnt[1], kcol); fflush (stdout); #endif #if ( PRNTlevel==3 ) ++total_msg; if (!msgcnt[0]) ++zero_msg; #endif } else { msgcnt[0] = 0; } lsub = Lsub_buf_2[look_id]; lusup = Lval_buf_2[look_id]; } /* else if mycol = Pc(k) */ /* stat->time1 += SuperLU_timer_()-tt1; */ scp = &grid->cscp; /* The scope of process column. */ /* tt1 = SuperLU_timer_(); */ if (myrow == krow) { /* I own U(k,:) */ lk = LBi (k, grid); usub = Ufstnz_br_ptr[lk]; uval = Unzval_br_ptr[lk]; if (factoredU[k0] == -1) { /* Parallel triangular solve across process row *krow* -- U(k,j) = L(k,k) \ A(k,j). */ double ttt2 = SuperLU_timer_(); #ifdef _OPENMP /* #pragma omp parallel */ /* Sherry -- parallel done inside pdgstrs2 */ #endif { PDGSTRS2 (k0, k, Glu_persist, grid, Llu, stat); } pdgstrs2_timer += SuperLU_timer_() - ttt2; /* Sherry -- need to set factoredU[k0] = 1; ?? */ /* Multicasts U(k,:) along process columns. */ if ( usub ) { msgcnt[2] = usub[2]; /* metadata size */ msgcnt[3] = usub[1]; /* Uval[] size */ } else { msgcnt[2] = msgcnt[3] = 0; } if (ToSendD[lk] == YES) { for (pi = 0; pi < Pr; ++pi) { if (pi != myrow) { /* Matching recv was pre-posted before */ #if ( PROFlevel>=1 ) TIC (t1); #endif MPI_Send (usub, msgcnt[2], mpi_int_t, pi, SLU_MPI_TAG (2, k0), /* (4*k0+2)%tag_ub */ scp->comm); MPI_Send (uval, msgcnt[3], MPI_DOUBLE, pi, SLU_MPI_TAG (3, k0), /* (4*k0+3)%tag_ub */ scp->comm); #if ( PROFlevel>=1 ) TOC (t2, t1); stat->utime[COMM] += t2; stat->utime[COMM_DOWN] += t2; msg_cnt += 2; msg_vol += msgcnt[2] * iword + msgcnt[3] * dword; #endif #if ( DEBUGlevel>=2 ) printf ("[%d] Send U(%4d,:) down to Pr %2d\n", iam, k, pi); #endif } /* if pi ... */ } /* for pi ... */ } /* if ToSendD ... */ } else { /* Panel U(k,:) already factorized from previous look-ahead */ /* ================================================ * * Wait for downward sending of U(k,:) to complete * * for outer-product update. * * ================================================ */ if (ToSendD[lk] == YES) { #if ( PROFlevel>=1 ) TIC (t1); #endif for (pi = 0; pi < Pr; ++pi) { if (pi != myrow) { MPI_Wait (&send_reqs_u[look_id][pi], &status); MPI_Wait (&send_reqs_u[look_id][pi + Pr], &status); } } #if ( PROFlevel>=1 ) TOC (t2, t1); stat->utime[COMM] += t2; stat->utime[COMM_DOWN] += t2; #endif } msgcnt[2] = msgcntsU[look_id][2]; msgcnt[3] = msgcntsU[look_id][3]; } /* stat->time2 += SuperLU_timer_()-tt1; */ } else { /* myrow != krow */ /* ========================================== * * Wait for U(k,:) for outer-product updates. * * ========================================== */ if (ToRecv[k] == 2) { /* Recv block row U(k,:). */ #if ( PROFlevel>=1 ) TIC (t1); #endif MPI_Wait (&recv_reqs_u[look_id][0], &status); MPI_Get_count (&status, mpi_int_t, &msgcnt[2]); MPI_Wait (&recv_reqs_u[look_id][1], &status); MPI_Get_count (&status, MPI_DOUBLE, &msgcnt[3]); #if ( PROFlevel>=1 ) TOC (t2, t1); stat->utime[COMM] += t2; stat->utime[COMM_DOWN] += t2; #endif usub = Usub_buf; uval = Uval_buf; #if ( DEBUGlevel>=2 ) printf ("[%d] Recv U(%4d,:) from Pr %2d\n", iam, k, krow); #endif #if ( PRNTlevel==3 ) ++total_msg; if (!msgcnt[2]) ++zero_msg; #endif } else { msgcnt[2] = 0; } /* stat->time6 += SuperLU_timer_()-tt1; */ } /* end if myrow == Pr(k) */ /* * Parallel rank-k update; pair up blocks L(i,k) and U(k,j). * for (j = k+1; k < N; ++k) { * for (i = k+1; i < N; ++i) * if ( myrow == PROW( i, grid ) && mycol == PCOL( j, grid ) * && L(i,k) != 0 && U(k,j) != 0 ) * A(i,j) = A(i,j) - L(i,k) * U(k,j); */ msg0 = msgcnt[0]; msg2 = msgcnt[2]; /* tt1 = SuperLU_timer_(); */ if (msg0 && msg2) { /* L(:,k) and U(k,:) are not empty. */ nsupr = lsub[1]; /* LDA of lusup. */ if (myrow == krow) { /* Skip diagonal block L(k,k). */ lptr0 = BC_HEADER + LB_DESCRIPTOR + lsub[BC_HEADER + 1]; luptr0 = knsupc; nlb = lsub[0] - 1; } else { lptr0 = BC_HEADER; luptr0 = 0; nlb = lsub[0]; } iukp = BR_HEADER; /* Skip header; Pointer to index[] of U(k,:) */ rukp = 0; /* Pointer to nzval[] of U(k,:) */ nub = usub[0]; /* Number of blocks in the block row U(k,:) */ klst = FstBlockC (k + 1); /* ------------------------------------------------------------- Update the look-ahead block columns A(:,k+1:k+num_look_ahead) ------------------------------------------------------------- */ iukp0 = iukp; rukp0 = rukp; /* reorder the remaining columns in bottome-up */ /* TAU_STATIC_TIMER_START("LOOK_AHEAD_UPDATE"); */ for (jj = 0; jj < nub; jj++) { #ifdef ISORT iperm_u[jj] = iperm_c_supno[usub[iukp]]; /* Global block number of block U(k,j). */ perm_u[jj] = jj; #else perm_u[2 * jj] = iperm_c_supno[usub[iukp]]; /* Global block number of block U(k,j). */ perm_u[2 * jj + 1] = jj; #endif jb = usub[iukp]; /* Global block number of block U(k,j). */ nsupc = SuperSize (jb); iukp += UB_DESCRIPTOR; /* Start fstnz of block U(k,j). */ iukp += nsupc; } iukp = iukp0; #ifdef ISORT /* iperm_u is sorted based on elimination order; perm_u reorders the U blocks to match the elimination order. */ isort (nub, iperm_u, perm_u); #else qsort (perm_u, (size_t) nub, 2 * sizeof (int_t), &superlu_sort_perm); #endif /************************************************************************/ double ttx =SuperLU_timer_(); //#include "dlook_ahead_update_v4.c" #include "dlook_ahead_update.c" lookaheadupdatetimer += SuperLU_timer_() - ttx; /************************************************************************/ /*ifdef OMP_LOOK_AHEAD */ /* TAU_STATIC_TIMER_STOP("LOOK_AHEAD_UPDATE"); */ } /* if L(:,k) and U(k,:) not empty */ /* stat->time3 += SuperLU_timer_()-tt1; */ /* ================== */ /* == post receive == */ /* ================== */ kk1 = SUPERLU_MIN (k0 + num_look_aheads, nsupers - 1); for (kk0 = k0 + 1; kk0 <= kk1; kk0++) { kk = perm_c_supno[kk0]; kcol = PCOL (kk, grid); if (look_ahead[kk] == k0) { if (mycol != kcol) { if (ToRecv[kk] >= 1) { scp = &grid->rscp; /* The scope of process row. */ look_id = kk0 % (1 + num_look_aheads); recv_req = recv_reqs[look_id]; #if ( PROFlevel>=1 ) TIC (t1); #endif MPI_Irecv (Lsub_buf_2[look_id], Llu->bufmax[0], mpi_int_t, kcol, SLU_MPI_TAG (0, kk0), /* (4*kk0)%tag_ub */ scp->comm, &recv_req[0]); MPI_Irecv (Lval_buf_2[look_id], Llu->bufmax[1], MPI_DOUBLE, kcol, SLU_MPI_TAG (1, kk0), /* (4*kk0+1)%tag_ub */ scp->comm, &recv_req[1]); #if ( PROFlevel>=1 ) TOC (t2, t1); stat->utime[COMM] += t2; stat->utime[COMM_RIGHT] += t2; #endif } } else { lk = LBj (kk, grid); /* Local block number. */ lsub1 = Lrowind_bc_ptr[lk]; lusup1 = Lnzval_bc_ptr[lk]; if (factored[kk] == -1) { /* Factor diagonal and subdiagonal blocks and test for exact singularity. */ factored[kk] = 0; /* flag column kk as factored */ double ttt1 = SuperLU_timer_(); PDGSTRF2 (options, kk0, kk, thresh, Glu_persist, grid, Llu, U_diag_blk_send_req, tag_ub, stat, info); pdgstrf2_timer += SuperLU_timer_() - ttt1; /* Process column *kcol+1* multicasts numeric values of L(:,k+1) to process rows. */ look_id = kk0 % (1 + num_look_aheads); send_req = send_reqs[look_id]; msgcnt = msgcnts[look_id]; if (lsub1) { msgcnt[0] = lsub1[1] + BC_HEADER + lsub1[0] * LB_DESCRIPTOR; msgcnt[1] = lsub1[1] * SuperSize (kk); } else { msgcnt[0] = 0; msgcnt[1] = 0; } scp = &grid->rscp; /* The scope of process row. */ for (pj = 0; pj < Pc; ++pj) { if (ToSendR[lk][pj] != EMPTY) { #if ( PROFlevel>=1 ) TIC (t1); #endif MPI_Isend (lsub1, msgcnt[0], mpi_int_t, pj, SLU_MPI_TAG (0, kk0), /* (4*kk0)%tag_ub */ scp->comm, &send_req[pj]); MPI_Isend (lusup1, msgcnt[1], MPI_DOUBLE, pj, SLU_MPI_TAG (1, kk0), /* (4*kk0+1)%tag_ub */ scp->comm, &send_req[pj + Pc]); #if ( PROFlevel>=1 ) TOC (t2, t1); stat->utime[COMM] += t2; stat->utime[COMM_RIGHT] += t2; ++prof_sendR[lk]; #endif } } /* end for pj ... */ } /* if factored[kk] ... */ } } } double tsch = SuperLU_timer_(); /*******************************************************************/ #ifdef GPU_ACC #include "dSchCompUdt-cuda.c" #else /*#include "SchCompUdt--Phi-2Ddynamic-alt.c"*/ //#include "dSchCompUdt-2Ddynamic_v6.c" #include "dSchCompUdt-2Ddynamic.c" #endif /*uncomment following to compare against SuperLU 3.3 baseline*/ /* #include "SchCompUdt--baseline.c" */ /************************************************************************/ NetSchurUpTimer += SuperLU_timer_() - tsch; } /* MAIN LOOP for k0 = 0, ... */ /* ################################################################## ** END MAIN LOOP: for k0 = ... ################################################################## */ pxgstrfTimer = SuperLU_timer_() - pxgstrfTimer; #if ( PRNTlevel>=2 ) /* Print detailed statistics */ /* Updating total flops */ double allflops; MPI_Reduce(&RemainGEMM_flops, &allflops, 1, MPI_DOUBLE, MPI_SUM, 0, grid->comm); if ( iam==0 ) { printf("\nInitialization time\t%8.2lf seconds\n" "\t Serial: compute static schedule, allocate storage\n", InitTimer); printf("\n==== Time breakdown in factorization (rank 0) ====\n"); printf("Panel factorization \t %8.2lf seconds\n", pdgstrf2_timer + pdgstrs2_timer); printf(".. L-panel pxgstrf2 \t %8.2lf seconds\n", pdgstrf2_timer); printf(".. U-panel pxgstrs2 \t %8.2lf seconds\n", pdgstrs2_timer); printf("Time in Look-ahead update \t %8.2lf seconds\n", lookaheadupdatetimer); printf("Time in Schur update \t\t %8.2lf seconds\n", NetSchurUpTimer); printf(".. Time to Gather L buffer\t %8.2lf (Separate L panel by Lookahead/Remain)\n", GatherLTimer); printf(".. Time to Gather U buffer\t %8.2lf \n", GatherUTimer); printf(".. Time in GEMM %8.2lf \n", LookAheadGEMMTimer + RemainGEMMTimer); printf("\t* Look-ahead\t %8.2lf \n", LookAheadGEMMTimer); printf("\t* Remain\t %8.2lf\tFlops %8.2le\tGflops %8.2lf\n", RemainGEMMTimer, allflops, allflops/RemainGEMMTimer*1e-9); printf(".. Time to Scatter %8.2lf \n", LookAheadScatterTimer + RemainScatterTimer); printf("\t* Look-ahead\t %8.2lf \n", LookAheadScatterTimer); printf("\t* Remain\t %8.2lf \n", RemainScatterTimer); printf("Total factorization time \t: %8.2lf seconds, \n", pxgstrfTimer); printf("--------\n"); printf("GEMM maximum block: %d-%d-%d\n", gemm_max_m, gemm_max_k, gemm_max_n); } #endif #if ( DEBUGlevel>=3 ) for (i = 0; i < Pr * Pc; ++i) { if (iam == i) { dPrintLblocks(iam, nsupers, grid, Glu_persist, Llu); dPrintUblocks(iam, nsupers, grid, Glu_persist, Llu); printf ("(%d)\n", iam); PrintInt10 ("Recv", nsupers, Llu->ToRecv); } MPI_Barrier (grid->comm); } #endif /******************************************************** * Free memory * ********************************************************/ if (Pr * Pc > 1) { SUPERLU_FREE (Lsub_buf_2[0]); /* also free Lsub_buf_2[1] */ SUPERLU_FREE (Lval_buf_2[0]); /* also free Lval_buf_2[1] */ if (Llu->bufmax[2] != 0) SUPERLU_FREE (Usub_buf_2[0]); if (Llu->bufmax[3] != 0) SUPERLU_FREE (Uval_buf_2[0]); if (U_diag_blk_send_req[myrow] != MPI_REQUEST_NULL) { /* wait for last Isend requests to complete, deallocate objects */ for (krow = 0; krow < Pr; ++krow) { if (krow != myrow) MPI_Wait (U_diag_blk_send_req + krow, &status); } } SUPERLU_FREE (U_diag_blk_send_req); } log_memory( -((Llu->bufmax[0] + Llu->bufmax[2]) * (num_look_aheads + 1) * iword + (Llu->bufmax[1] + Llu->bufmax[3]) * (num_look_aheads + 1) * dword), stat ); SUPERLU_FREE (Lsub_buf_2); SUPERLU_FREE (Lval_buf_2); SUPERLU_FREE (Usub_buf_2); SUPERLU_FREE (Uval_buf_2); SUPERLU_FREE (perm_c_supno); SUPERLU_FREE (perm_u); #ifdef ISORT SUPERLU_FREE (iperm_u); #endif SUPERLU_FREE (look_ahead); SUPERLU_FREE (factoredU); SUPERLU_FREE (factored); log_memory(-(6 * nsupers * iword), stat); for (i = 0; i <= num_look_aheads; i++) { SUPERLU_FREE (msgcnts[i]); SUPERLU_FREE (msgcntsU[i]); } SUPERLU_FREE (msgcnts); SUPERLU_FREE (msgcntsU); for (i = 0; i <= num_look_aheads; i++) { SUPERLU_FREE (send_reqs_u[i]); SUPERLU_FREE (recv_reqs_u[i]); SUPERLU_FREE (send_reqs[i]); SUPERLU_FREE (recv_reqs[i]); } SUPERLU_FREE (recv_reqs_u); SUPERLU_FREE (send_reqs_u); SUPERLU_FREE (recv_reqs); SUPERLU_FREE (send_reqs); #ifdef GPU_ACC checkCuda (cudaFreeHost (bigV)); checkCuda (cudaFreeHost (bigU)); cudaFree( (void*)dA ); /* Sherry added */ cudaFree( (void*)dB ); cudaFree( (void*)dC ); SUPERLU_FREE( handle ); SUPERLU_FREE( streams ); SUPERLU_FREE( stream_end_col ); #else // #ifdef __INTEL_COMPILER // _mm_free (bigU); // _mm_free (bigV); // #else SUPERLU_FREE (bigV); SUPERLU_FREE (bigU); // #endif /* Decrement freed memory from memory stat. */ log_memory(-(bigv_size + bigu_size) * dword, stat); #endif SUPERLU_FREE (Llu->ujrow); // SUPERLU_FREE (tempv2d);/* Sherry */ SUPERLU_FREE (indirect); SUPERLU_FREE (indirect2); /* Sherry added */ ldt = sp_ienv_dist(3); log_memory( -(3 * ldt *ldt * dword + 2 * ldt * num_threads * iword), stat ); /* Sherry added */ SUPERLU_FREE(omp_loop_time); SUPERLU_FREE(full_u_cols); SUPERLU_FREE(blk_ldu); #if ( PRNTlevel>=1 ) log_memory(-2 * ncb * dword, stat); #endif SUPERLU_FREE(lookAheadFullRow); SUPERLU_FREE(lookAheadStRow); SUPERLU_FREE(lookAhead_lptr); SUPERLU_FREE(lookAhead_ib); SUPERLU_FREE(RemainStRow); SUPERLU_FREE(Remain_lptr); SUPERLU_FREE(Remain_ib); SUPERLU_FREE(Remain_info); SUPERLU_FREE(lookAhead_L_buff); SUPERLU_FREE(Remain_L_buff); log_memory( -(3 * mrb * iword + mrb * sizeof(Remain_info_t) + ldt * ldt * (num_look_aheads + 1) * dword + Llu->bufmax[1] * dword), stat ); SUPERLU_FREE(Ublock_info); SUPERLU_FREE(Ublock_info_iukp); SUPERLU_FREE(Ublock_info_rukp); SUPERLU_FREE(Ublock_info_jb); #if ( PROFlevel>=1 ) TIC (t1); #endif /* Prepare error message - find the smallesr index i that U(i,i)==0 */ if ( *info == 0 ) *info = n + 1; MPI_Allreduce (info, &iinfo, 1, MPI_INT, MPI_MIN, grid->comm); if ( iinfo == n + 1 ) *info = 0; else *info = iinfo; #if ( PROFlevel>=1 ) TOC (t2, t1); stat->utime[COMM] += t2; { float msg_vol_max, msg_vol_sum, msg_cnt_max, msg_cnt_sum; MPI_Reduce (&msg_cnt, &msg_cnt_sum, 1, MPI_FLOAT, MPI_SUM, 0, grid->comm); MPI_Reduce (&msg_cnt, &msg_cnt_max, 1, MPI_FLOAT, MPI_MAX, 0, grid->comm); MPI_Reduce (&msg_vol, &msg_vol_sum, 1, MPI_FLOAT, MPI_SUM, 0, grid->comm); MPI_Reduce (&msg_vol, &msg_vol_max, 1, MPI_FLOAT, MPI_MAX, 0, grid->comm); if ( iam==0 ) { printf ("\tPDGSTRF comm stat:" "\tAvg\tMax\t\tAvg\tMax\n" "\t\t\tCount:\t%.0f\t%.0f\tVol(MB)\t%.2f\t%.2f\n", msg_cnt_sum / Pr / Pc, msg_cnt_max, msg_vol_sum / Pr / Pc * 1e-6, msg_vol_max * 1e-6); printf("\t\tcomm time on task 0: %8.2lf\n" "\t\t\tcomm down DIAG block %8.2lf\n" "\t\t\tcomm right L panel %8.2lf\n" "\t\t\tcomm down U panel %8.2lf\n", stat->utime[COMM], stat->utime[COMM_DIAG], stat->utime[COMM_RIGHT], stat->utime[COMM_DOWN]); //#include <float.h> //int Digs = DECIMAL_DIG; printf("gemm_count %d\n", gemm_count); for (i = 0; i < gemm_count; ++i) fprintf(fgemm, "%8d%8d%8d\t %20.16e\t%8d\n", gemm_stats[i].m, gemm_stats[i].n, gemm_stats[i].k, gemm_stats[i].microseconds, prof_sendR[i]); fclose(fgemm); } SUPERLU_FREE(gemm_stats); SUPERLU_FREE(prof_sendR); } #endif #if ( PRNTlevel==3 ) MPI_Allreduce (&zero_msg, &iinfo, 1, MPI_INT, MPI_SUM, grid->comm); if (!iam) printf (".. # msg of zero size\t%d\n", iinfo); MPI_Allreduce (&total_msg, &iinfo, 1, MPI_INT, MPI_SUM, grid->comm); if (!iam) printf (".. # total msg\t%d\n", iinfo); #endif #if ( DEBUGlevel>=3 ) for (i = 0; i < Pr * Pc; ++i) { if (iam == i) { dPrintLblocks (iam, nsupers, grid, Glu_persist, Llu); dPrintUblocks (iam, nsupers, grid, Glu_persist, Llu); printf ("(%d)\n", iam); PrintInt10 ("Recv", nsupers, Llu->ToRecv); } MPI_Barrier (grid->comm); } #endif #if ( DEBUGlevel>=3 ) printf ("(%d) num_copy=%d, num_update=%d\n", iam, num_copy, num_update); #endif #if ( DEBUGlevel>=1 ) CHECK_MALLOC (iam, "Exit pdgstrf()"); #endif return 0; } /* PDGSTRF */
depend-6.c
/* { dg-do compile } */ /* { dg-options "-fopenmp" } */ struct T { int c[3]; }; struct S { int a; struct T *b; struct T g; }; struct U { int a : 5; }; struct S d[10]; struct S *e[10]; struct S *f; struct S h; struct U i; void foo (void) { #pragma omp task depend(in: d[:2].b->c[2]) /* { dg-error "expected" } */ ; #pragma omp task depend(inout: d[1:].b->c[2]) /* { dg-error "expected" } */ ; #pragma omp task depend(out: d[0:1].a) /* { dg-error "expected" } */ ; #pragma omp task depend(in: e[3:2]->a) /* { dg-error "expected" } */ ; #pragma omp task depend(inout: e[2:2]->b->c) /* { dg-error "expected" } */ ; #pragma omp task depend(in: e[1]->b->c[2:1]) /* { dg-error "expected" } */ ; #pragma omp task depend(out: f + 0) /* { dg-error "not lvalue expression" } */ ; #pragma omp task depend(inout: f[0:1].a) /* { dg-error "expected" } */ ; #pragma omp task depend(inout: h.g.c[2:1]) /* { dg-error "expected" } */ ; #pragma omp task depend(in: i.a) /* { dg-error "bit-field '\[^\n\r]*' in 'depend' clause" } */ ; }
GB_unaryop__ainv_int8_int16.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__ainv_int8_int16 // op(A') function: GB_tran__ainv_int8_int16 // C type: int8_t // A type: int16_t // cast: int8_t cij = (int8_t) aij // unaryop: cij = -aij #define GB_ATYPE \ int16_t #define GB_CTYPE \ int8_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int16_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = -x ; // casting #define GB_CASTING(z, x) \ int8_t z = (int8_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_AINV || GxB_NO_INT8 || GxB_NO_INT16) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__ainv_int8_int16 ( int8_t *restrict Cx, const int16_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__ainv_int8_int16 ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
jacobi-omp3.c
/* * BSD 2-Clause License * * Copyright (c) 2020, Alessandro Capotondi * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /** * @file jacobi.c * @author Alessandro Capotondi * @date 27 Mar 2020 * @brief This code solves the steady state heat equation on a rectangular region. * This code solves the steady state heat equation on a rectangular region. * The sequential version of this program needs approximately * 18/epsilon iterations to complete. * The physical region, and the boundary conditions, are suggested * by this diagram; * W = 0 * +------------------+ * | | * W = 100 | | W = 100 * | | * +------------------+ * W = 100 * The region is covered with a grid of M by N nodes, and an N by N * array W is used to record the temperature. The correspondence between * array indices and locations in the region is suggested by giving the * indices of the four corners: * I = 0 * [0][0]-------------[0][N-1] * | | * J = 0 | | J = N-1 * | | * [M-1][0]-----------[M-1][N-1] * I = M-1 * The steady state solution to the discrete heat equation satisfies the * following condition at an interior grid point: * W[Central] = (1/4) * ( W[North] + W[South] + W[East] + W[West] ) * where "Central" is the index of the grid point, "North" is the index * of its immediate neighbor to the "north", and so on. * * Given an approximate solution of the steady state heat equation, a * "better" solution is given by replacing each interior point by the * average of its 4 neighbors - in other words, by using the condition * as an ASSIGNMENT statement: * W[Central] <= (1/4) * ( W[North] + W[South] + W[East] + W[West] ) * If this process is repeated often enough, the difference between successive * estimates of the solution will go to zero. * This program carries out such an iteration, using a tolerance specified by * the user, and writes the final estimate of the solution to a file that can * be used for graphic processing. * icensing: * This code is distributed under the GNU LGPL license. * odified: * 18 October 2011 * uthor: * Original C version by Michael Quinn. * This C version by John Burkardt. * eference: * Michael Quinn, * Parallel Programming in C with MPI and OpenMP, * McGraw-Hill, 2004, * ISBN13: 978-0071232654, * LC: QA76.73.C15.Q55. * ocal parameters: * Local, double DIFF, the norm of the change in the solution from one iteration * to the next. * Local, double MEAN, the average of the boundary values, used to initialize * the values of the solution in the interior. * Local, double U[M][N], the solution at the previous iteration. * Local, double W[M][N], the solution computed at the latest iteration. * * * @see https://en.wikipedia.org/wiki/Jacobi_method * @see http://algo.ing.unimo.it/people/andrea/Didattica/HPC/index.html */ #include <math.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <sys/time.h> #include "utils.h" static int N; static int MAX_ITERATIONS; static int SEED; static double CONVERGENCE_THRESHOLD; static FILE *data; #define SEPARATOR "------------------------------------\n" // Return the current time in seconds since the Epoch double get_timestamp(); // Parse command line arguments to set solver parameters void parse_arguments(int argc, char *argv[]); // Run the Jacobi solver // Returns the number of iterations performed int run(double *A, double *xtmp) { int iter = 0, iterations_print = 1; double err = 0.0; #pragma omp parallel shared(err) firstprivate(iter, iterations_print) num_threads(NTHREADS) { do { #pragma omp barrier #pragma omp single err = 0.0; #pragma omp for reduction(max \ : err) nowait for (int i = 1; i < N - 1; i++) { #pragma omp simd for (int j = 1; j < N - 1; j++) { xtmp[i * N + j] = 0.25 * (A[(i - 1) * N + j] + A[(i + 1) * N + j] + A[i * N + j - 1] + A[i * N + j + 1]); err = fmax(err, fabs(xtmp[i * N + j] - A[i * N + j])); } } #pragma omp for for (int i = 0; i < N; i++) { #pragma omp simd for (int j = 0; j < N; j++) { A[i * N + j] = xtmp[i * N + j]; } } iter++; #ifdef DEBUG if (iter == iterations_print) { printf(" %8d %f\n", iter, err); iterations_print = 2 * iterations_print; } #endif } while (err > CONVERGENCE_THRESHOLD && iter < MAX_ITERATIONS); } return iter; } int main(int argc, char *argv[]) { parse_arguments(argc, argv); double *A = malloc(N * N * sizeof(double)); double *xtmp = malloc(N * N * sizeof(double)); printf(SEPARATOR); printf("Matrix size: %dx%d\n", N, N); printf("Maximum iterations: %d\n", MAX_ITERATIONS); printf("Convergence threshold: %lf\n", CONVERGENCE_THRESHOLD); printf(SEPARATOR); for (int ii = 0; ii < N; ii++) { for (int jj = 0; jj < N; jj++) { double f; fread(&f, sizeof(double), 1, data); A[ii * N + jj] = f; } } // Run Jacobi solver start_timer(); int itr = run(A, xtmp); stop_timer(); printf("Iterations = %d\n", itr); printf("Solver runtime = %lf ms\n", elapsed_ns() / 1E6); if (itr == MAX_ITERATIONS) printf("WARNING: solution did not converge\n"); printf(SEPARATOR); free(A); free(xtmp); fclose(data); return 0; } int parse_int(const char *str) { char *next; int value = strtoul(str, &next, 10); return strlen(next) ? -1 : value; } double parse_double(const char *str) { char *next; double value = strtod(str, &next); return strlen(next) ? -1 : value; } void parse_arguments(int argc, char *argv[]) { // Set default values N = 500; MAX_ITERATIONS = 1000; CONVERGENCE_THRESHOLD = 0.001; SEED = 0; for (int i = 1; i < argc; i++) { if (!strcmp(argv[i], "--convergence") || !strcmp(argv[i], "-c")) { if (++i >= argc || (CONVERGENCE_THRESHOLD = parse_double(argv[i])) < 0) { printf("Invalid convergence threshold\n"); exit(1); } } else if (!strcmp(argv[i], "--iterations") || !strcmp(argv[i], "-i")) { if (++i >= argc || (MAX_ITERATIONS = parse_int(argv[i])) < 0) { printf("Invalid number of iterations\n"); exit(1); } } else if (!strcmp(argv[i], "--norder") || !strcmp(argv[i], "-n")) { if (++i >= argc || (N = parse_int(argv[i])) < 0) { printf("Invalid matrix order\n"); exit(1); } } else if (!strcmp(argv[i], "--help") || !strcmp(argv[i], "-h")) { printf("\n"); printf("Usage: ./jacobi [OPTIONS]\n\n"); printf("Options:\n"); printf(" -h --help Print this message\n"); printf(" -c --convergence C Set convergence threshold\n"); printf(" -i --iterations I Set maximum number of iterations\n"); printf(" -n --norder N Set maxtrix order (500 or 1000)\n"); printf("\n"); exit(0); } else { printf("Unrecognized argument '%s' (try '--help')\n", argv[i]); exit(1); } } if (N == 1000) data = fopen("data/jacobi-1000.bin", "rb"); else if (N == 500) data = fopen("data/jacobi-500.bin", "rb"); else { printf("Invalid matrix order\n"); exit(1); } }
enhance.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % EEEEE N N H H AAA N N CCCC EEEEE % % E NN N H H A A NN N C E % % EEE N N N HHHHH AAAAA N N N C EEE % % E N NN H H A A N NN C E % % EEEEE N N H H A A N N CCCC EEEEE % % % % % % MagickCore Image Enhancement Methods % % % % Software Design % % Cristy % % July 1992 % % % % % % Copyright 1999-2014 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % http://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "magick/studio.h" #include "magick/accelerate.h" #include "magick/artifact.h" #include "magick/attribute.h" #include "magick/cache.h" #include "magick/cache-view.h" #include "magick/channel.h" #include "magick/color.h" #include "magick/color-private.h" #include "magick/colorspace.h" #include "magick/colorspace-private.h" #include "magick/composite-private.h" #include "magick/enhance.h" #include "magick/exception.h" #include "magick/exception-private.h" #include "magick/fx.h" #include "magick/gem.h" #include "magick/geometry.h" #include "magick/histogram.h" #include "magick/image.h" #include "magick/image-private.h" #include "magick/memory_.h" #include "magick/monitor.h" #include "magick/monitor-private.h" #include "magick/opencl.h" #include "magick/opencl-private.h" #include "magick/option.h" #include "magick/pixel-accessor.h" #include "magick/pixel-private.h" #include "magick/quantum.h" #include "magick/quantum-private.h" #include "magick/resample.h" #include "magick/resample-private.h" #include "magick/resource_.h" #include "magick/statistic.h" #include "magick/string_.h" #include "magick/string-private.h" #include "magick/thread-private.h" #include "magick/threshold.h" #include "magick/token.h" #include "magick/xml-tree.h" /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A u t o G a m m a I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AutoGammaImage() extract the 'mean' from the image and adjust the image % to try make set its gamma appropriatally. % % The format of the AutoGammaImage method is: % % MagickBooleanType AutoGammaImage(Image *image) % MagickBooleanType AutoGammaImageChannel(Image *image, % const ChannelType channel) % % A description of each parameter follows: % % o image: The image to auto-level % % o channel: The channels to auto-level. If the special 'SyncChannels' % flag is set all given channels is adjusted in the same way using the % mean average of those channels. % */ MagickExport MagickBooleanType AutoGammaImage(Image *image) { return(AutoGammaImageChannel(image,DefaultChannels)); } MagickExport MagickBooleanType AutoGammaImageChannel(Image *image, const ChannelType channel) { double gamma, mean, logmean, sans; MagickStatusType status; logmean=log(0.5); if ((channel & SyncChannels) != 0) { /* Apply gamma correction equally accross all given channels */ (void) GetImageChannelMean(image,channel,&mean,&sans,&image->exception); gamma=log(mean*QuantumScale)/logmean; return(LevelImageChannel(image,channel,0.0,(double) QuantumRange,gamma)); } /* Auto-gamma each channel separateally */ status = MagickTrue; if ((channel & RedChannel) != 0) { (void) GetImageChannelMean(image,RedChannel,&mean,&sans, &image->exception); gamma=log(mean*QuantumScale)/logmean; status&=LevelImageChannel(image,RedChannel,0.0,(double) QuantumRange, gamma); } if ((channel & GreenChannel) != 0) { (void) GetImageChannelMean(image,GreenChannel,&mean,&sans, &image->exception); gamma=log(mean*QuantumScale)/logmean; status&=LevelImageChannel(image,GreenChannel,0.0,(double) QuantumRange, gamma); } if ((channel & BlueChannel) != 0) { (void) GetImageChannelMean(image,BlueChannel,&mean,&sans, &image->exception); gamma=log(mean*QuantumScale)/logmean; status&=LevelImageChannel(image,BlueChannel,0.0,(double) QuantumRange, gamma); } if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse)) { (void) GetImageChannelMean(image,OpacityChannel,&mean,&sans, &image->exception); gamma=log(mean*QuantumScale)/logmean; status&=LevelImageChannel(image,OpacityChannel,0.0,(double) QuantumRange, gamma); } if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) { (void) GetImageChannelMean(image,IndexChannel,&mean,&sans, &image->exception); gamma=log(mean*QuantumScale)/logmean; status&=LevelImageChannel(image,IndexChannel,0.0,(double) QuantumRange, gamma); } return(status != 0 ? MagickTrue : MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A u t o L e v e l I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AutoLevelImage() adjusts the levels of a particular image channel by % scaling the minimum and maximum values to the full quantum range. % % The format of the LevelImage method is: % % MagickBooleanType AutoLevelImage(Image *image) % MagickBooleanType AutoLevelImageChannel(Image *image, % const ChannelType channel) % % A description of each parameter follows: % % o image: The image to auto-level % % o channel: The channels to auto-level. If the special 'SyncChannels' % flag is set the min/max/mean value of all given channels is used for % all given channels, to all channels in the same way. % */ MagickExport MagickBooleanType AutoLevelImage(Image *image) { return(AutoLevelImageChannel(image,DefaultChannels)); } MagickExport MagickBooleanType AutoLevelImageChannel(Image *image, const ChannelType channel) { /* Convenience method for a min/max histogram stretch. */ return(MinMaxStretchImage(image,channel,0.0,0.0)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % B r i g h t n e s s C o n t r a s t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % BrightnessContrastImage() changes the brightness and/or contrast of an % image. It converts the brightness and contrast parameters into slope and % intercept and calls a polynomical function to apply to the image. % % The format of the BrightnessContrastImage method is: % % MagickBooleanType BrightnessContrastImage(Image *image, % const double brightness,const double contrast) % MagickBooleanType BrightnessContrastImageChannel(Image *image, % const ChannelType channel,const double brightness, % const double contrast) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel. % % o brightness: the brightness percent (-100 .. 100). % % o contrast: the contrast percent (-100 .. 100). % */ MagickExport MagickBooleanType BrightnessContrastImage(Image *image, const double brightness,const double contrast) { MagickBooleanType status; status=BrightnessContrastImageChannel(image,DefaultChannels,brightness, contrast); return(status); } MagickExport MagickBooleanType BrightnessContrastImageChannel(Image *image, const ChannelType channel,const double brightness,const double contrast) { #define BrightnessContastImageTag "BrightnessContast/Image" double alpha, intercept, coefficients[2], slope; MagickBooleanType status; /* Compute slope and intercept. */ assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); alpha=contrast; slope=tan((double) (MagickPI*(alpha/100.0+1.0)/4.0)); if (slope < 0.0) slope=0.0; intercept=brightness/100.0+((100-brightness)/200.0)*(1.0-slope); coefficients[0]=slope; coefficients[1]=intercept; status=FunctionImageChannel(image,channel,PolynomialFunction,2,coefficients, &image->exception); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o l o r D e c i s i o n L i s t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ColorDecisionListImage() accepts a lightweight Color Correction Collection % (CCC) file which solely contains one or more color corrections and applies % the correction to the image. Here is a sample CCC file: % % <ColorCorrectionCollection xmlns="urn:ASC:CDL:v1.2"> % <ColorCorrection id="cc03345"> % <SOPNode> % <Slope> 0.9 1.2 0.5 </Slope> % <Offset> 0.4 -0.5 0.6 </Offset> % <Power> 1.0 0.8 1.5 </Power> % </SOPNode> % <SATNode> % <Saturation> 0.85 </Saturation> % </SATNode> % </ColorCorrection> % </ColorCorrectionCollection> % % which includes the slop, offset, and power for each of the RGB channels % as well as the saturation. % % The format of the ColorDecisionListImage method is: % % MagickBooleanType ColorDecisionListImage(Image *image, % const char *color_correction_collection) % % A description of each parameter follows: % % o image: the image. % % o color_correction_collection: the color correction collection in XML. % */ MagickExport MagickBooleanType ColorDecisionListImage(Image *image, const char *color_correction_collection) { #define ColorDecisionListCorrectImageTag "ColorDecisionList/Image" typedef struct _Correction { double slope, offset, power; } Correction; typedef struct _ColorCorrection { Correction red, green, blue; double saturation; } ColorCorrection; CacheView *image_view; char token[MaxTextExtent]; ColorCorrection color_correction; const char *content, *p; ExceptionInfo *exception; MagickBooleanType status; MagickOffsetType progress; PixelPacket *cdl_map; register ssize_t i; ssize_t y; XMLTreeInfo *cc, *ccc, *sat, *sop; /* Allocate and initialize cdl maps. */ assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (color_correction_collection == (const char *) NULL) return(MagickFalse); ccc=NewXMLTree((const char *) color_correction_collection,&image->exception); if (ccc == (XMLTreeInfo *) NULL) return(MagickFalse); cc=GetXMLTreeChild(ccc,"ColorCorrection"); if (cc == (XMLTreeInfo *) NULL) { ccc=DestroyXMLTree(ccc); return(MagickFalse); } color_correction.red.slope=1.0; color_correction.red.offset=0.0; color_correction.red.power=1.0; color_correction.green.slope=1.0; color_correction.green.offset=0.0; color_correction.green.power=1.0; color_correction.blue.slope=1.0; color_correction.blue.offset=0.0; color_correction.blue.power=1.0; color_correction.saturation=0.0; sop=GetXMLTreeChild(cc,"SOPNode"); if (sop != (XMLTreeInfo *) NULL) { XMLTreeInfo *offset, *power, *slope; slope=GetXMLTreeChild(sop,"Slope"); if (slope != (XMLTreeInfo *) NULL) { content=GetXMLTreeContent(slope); p=(const char *) content; for (i=0; (*p != '\0') && (i < 3); i++) { GetMagickToken(p,&p,token); if (*token == ',') GetMagickToken(p,&p,token); switch (i) { case 0: { color_correction.red.slope=StringToDouble(token,(char **) NULL); break; } case 1: { color_correction.green.slope=StringToDouble(token, (char **) NULL); break; } case 2: { color_correction.blue.slope=StringToDouble(token, (char **) NULL); break; } } } } offset=GetXMLTreeChild(sop,"Offset"); if (offset != (XMLTreeInfo *) NULL) { content=GetXMLTreeContent(offset); p=(const char *) content; for (i=0; (*p != '\0') && (i < 3); i++) { GetMagickToken(p,&p,token); if (*token == ',') GetMagickToken(p,&p,token); switch (i) { case 0: { color_correction.red.offset=StringToDouble(token, (char **) NULL); break; } case 1: { color_correction.green.offset=StringToDouble(token, (char **) NULL); break; } case 2: { color_correction.blue.offset=StringToDouble(token, (char **) NULL); break; } } } } power=GetXMLTreeChild(sop,"Power"); if (power != (XMLTreeInfo *) NULL) { content=GetXMLTreeContent(power); p=(const char *) content; for (i=0; (*p != '\0') && (i < 3); i++) { GetMagickToken(p,&p,token); if (*token == ',') GetMagickToken(p,&p,token); switch (i) { case 0: { color_correction.red.power=StringToDouble(token,(char **) NULL); break; } case 1: { color_correction.green.power=StringToDouble(token, (char **) NULL); break; } case 2: { color_correction.blue.power=StringToDouble(token, (char **) NULL); break; } } } } } sat=GetXMLTreeChild(cc,"SATNode"); if (sat != (XMLTreeInfo *) NULL) { XMLTreeInfo *saturation; saturation=GetXMLTreeChild(sat,"Saturation"); if (saturation != (XMLTreeInfo *) NULL) { content=GetXMLTreeContent(saturation); p=(const char *) content; GetMagickToken(p,&p,token); color_correction.saturation=StringToDouble(token,(char **) NULL); } } ccc=DestroyXMLTree(ccc); if (image->debug != MagickFalse) { (void) LogMagickEvent(TransformEvent,GetMagickModule(), " Color Correction Collection:"); (void) LogMagickEvent(TransformEvent,GetMagickModule(), " color_correction.red.slope: %g",color_correction.red.slope); (void) LogMagickEvent(TransformEvent,GetMagickModule(), " color_correction.red.offset: %g",color_correction.red.offset); (void) LogMagickEvent(TransformEvent,GetMagickModule(), " color_correction.red.power: %g",color_correction.red.power); (void) LogMagickEvent(TransformEvent,GetMagickModule(), " color_correction.green.slope: %g",color_correction.green.slope); (void) LogMagickEvent(TransformEvent,GetMagickModule(), " color_correction.green.offset: %g",color_correction.green.offset); (void) LogMagickEvent(TransformEvent,GetMagickModule(), " color_correction.green.power: %g",color_correction.green.power); (void) LogMagickEvent(TransformEvent,GetMagickModule(), " color_correction.blue.slope: %g",color_correction.blue.slope); (void) LogMagickEvent(TransformEvent,GetMagickModule(), " color_correction.blue.offset: %g",color_correction.blue.offset); (void) LogMagickEvent(TransformEvent,GetMagickModule(), " color_correction.blue.power: %g",color_correction.blue.power); (void) LogMagickEvent(TransformEvent,GetMagickModule(), " color_correction.saturation: %g",color_correction.saturation); } cdl_map=(PixelPacket *) AcquireQuantumMemory(MaxMap+1UL,sizeof(*cdl_map)); if (cdl_map == (PixelPacket *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); for (i=0; i <= (ssize_t) MaxMap; i++) { cdl_map[i].red=ClampToQuantum((MagickRealType) ScaleMapToQuantum(( MagickRealType) (MaxMap*(pow(color_correction.red.slope*i/MaxMap+ color_correction.red.offset,color_correction.red.power))))); cdl_map[i].green=ClampToQuantum((MagickRealType) ScaleMapToQuantum(( MagickRealType) (MaxMap*(pow(color_correction.green.slope*i/MaxMap+ color_correction.green.offset,color_correction.green.power))))); cdl_map[i].blue=ClampToQuantum((MagickRealType) ScaleMapToQuantum(( MagickRealType) (MaxMap*(pow(color_correction.blue.slope*i/MaxMap+ color_correction.blue.offset,color_correction.blue.power))))); } if (image->storage_class == PseudoClass) { /* Apply transfer function to colormap. */ for (i=0; i < (ssize_t) image->colors; i++) { double luma; luma=0.212656*image->colormap[i].red+0.715158*image->colormap[i].green+ 0.072186*image->colormap[i].blue; image->colormap[i].red=ClampToQuantum(luma+color_correction.saturation* cdl_map[ScaleQuantumToMap(image->colormap[i].red)].red-luma); image->colormap[i].green=ClampToQuantum(luma+ color_correction.saturation*cdl_map[ScaleQuantumToMap( image->colormap[i].green)].green-luma); image->colormap[i].blue=ClampToQuantum(luma+color_correction.saturation* cdl_map[ScaleQuantumToMap(image->colormap[i].blue)].blue-luma); } } /* Apply transfer function to image. */ status=MagickTrue; progress=0; exception=(&image->exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { double luma; register PixelPacket *restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { luma=0.212656*GetPixelRed(q)+0.715158*GetPixelGreen(q)+ 0.072186*GetPixelBlue(q); SetPixelRed(q,ClampToQuantum(luma+color_correction.saturation* (cdl_map[ScaleQuantumToMap(GetPixelRed(q))].red-luma))); SetPixelGreen(q,ClampToQuantum(luma+color_correction.saturation* (cdl_map[ScaleQuantumToMap(GetPixelGreen(q))].green-luma))); SetPixelBlue(q,ClampToQuantum(luma+color_correction.saturation* (cdl_map[ScaleQuantumToMap(GetPixelBlue(q))].blue-luma))); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_ColorDecisionListImageChannel) #endif proceed=SetImageProgress(image,ColorDecisionListCorrectImageTag, progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); cdl_map=(PixelPacket *) RelinquishMagickMemory(cdl_map); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C l u t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ClutImage() replaces each color value in the given image, by using it as an % index to lookup a replacement color value in a Color Look UP Table in the % form of an image. The values are extracted along a diagonal of the CLUT % image so either a horizontal or vertial gradient image can be used. % % Typically this is used to either re-color a gray-scale image according to a % color gradient in the CLUT image, or to perform a freeform histogram % (level) adjustment according to the (typically gray-scale) gradient in the % CLUT image. % % When the 'channel' mask includes the matte/alpha transparency channel but % one image has no such channel it is assumed that that image is a simple % gray-scale image that will effect the alpha channel values, either for % gray-scale coloring (with transparent or semi-transparent colors), or % a histogram adjustment of existing alpha channel values. If both images % have matte channels, direct and normal indexing is applied, which is rarely % used. % % The format of the ClutImage method is: % % MagickBooleanType ClutImage(Image *image,Image *clut_image) % MagickBooleanType ClutImageChannel(Image *image, % const ChannelType channel,Image *clut_image) % % A description of each parameter follows: % % o image: the image, which is replaced by indexed CLUT values % % o clut_image: the color lookup table image for replacement color values. % % o channel: the channel. % */ MagickExport MagickBooleanType ClutImage(Image *image,const Image *clut_image) { return(ClutImageChannel(image,DefaultChannels,clut_image)); } MagickExport MagickBooleanType ClutImageChannel(Image *image, const ChannelType channel,const Image *clut_image) { #define ClutImageTag "Clut/Image" CacheView *clut_view, *image_view; ExceptionInfo *exception; MagickBooleanType status; MagickOffsetType progress; MagickPixelPacket *clut_map; register ssize_t i; ssize_t adjust, y; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(clut_image != (Image *) NULL); assert(clut_image->signature == MagickSignature); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); if ((IsGrayColorspace(image->colorspace) != MagickFalse) && (IsGrayColorspace(clut_image->colorspace) == MagickFalse)) (void) SetImageColorspace(image,sRGBColorspace); clut_map=(MagickPixelPacket *) AcquireQuantumMemory(MaxMap+1UL, sizeof(*clut_map)); if (clut_map == (MagickPixelPacket *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); /* Clut image. */ status=MagickTrue; progress=0; adjust=(ssize_t) (clut_image->interpolate == IntegerInterpolatePixel ? 0 : 1); exception=(&image->exception); clut_view=AcquireAuthenticCacheView(clut_image,exception); for (i=0; i <= (ssize_t) MaxMap; i++) { GetMagickPixelPacket(clut_image,clut_map+i); (void) InterpolateMagickPixelPacket(clut_image,clut_view, UndefinedInterpolatePixel,(double) i*(clut_image->columns-adjust)/MaxMap, (double) i*(clut_image->rows-adjust)/MaxMap,clut_map+i,exception); } clut_view=DestroyCacheView(clut_view); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickPixelPacket pixel; register IndexPacket *restrict indexes; register PixelPacket *restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); GetMagickPixelPacket(image,&pixel); for (x=0; x < (ssize_t) image->columns; x++) { SetMagickPixelPacket(image,q,indexes+x,&pixel); if ((channel & RedChannel) != 0) SetPixelRed(q,ClampPixelRed(clut_map+ ScaleQuantumToMap(GetPixelRed(q)))); if ((channel & GreenChannel) != 0) SetPixelGreen(q,ClampPixelGreen(clut_map+ ScaleQuantumToMap(GetPixelGreen(q)))); if ((channel & BlueChannel) != 0) SetPixelBlue(q,ClampPixelBlue(clut_map+ ScaleQuantumToMap(GetPixelBlue(q)))); if ((channel & OpacityChannel) != 0) { if (clut_image->matte == MagickFalse) SetPixelAlpha(q,MagickPixelIntensityToQuantum(clut_map+ ScaleQuantumToMap((Quantum) GetPixelAlpha(q)))); else if (image->matte == MagickFalse) SetPixelOpacity(q,ClampPixelOpacity(clut_map+ ScaleQuantumToMap((Quantum) MagickPixelIntensity(&pixel)))); else SetPixelOpacity(q,ClampPixelOpacity( clut_map+ScaleQuantumToMap(GetPixelOpacity(q)))); } if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) SetPixelIndex(indexes+x,ClampToQuantum((clut_map+(ssize_t) GetPixelIndex(indexes+x))->index)); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_ClutImageChannel) #endif proceed=SetImageProgress(image,ClutImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); clut_map=(MagickPixelPacket *) RelinquishMagickMemory(clut_map); if ((clut_image->matte != MagickFalse) && ((channel & OpacityChannel) != 0)) (void) SetImageAlphaChannel(image,ActivateAlphaChannel); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o n t r a s t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ContrastImage() enhances the intensity differences between the lighter and % darker elements of the image. Set sharpen to a MagickTrue to increase the % image contrast otherwise the contrast is reduced. % % The format of the ContrastImage method is: % % MagickBooleanType ContrastImage(Image *image, % const MagickBooleanType sharpen) % % A description of each parameter follows: % % o image: the image. % % o sharpen: Increase or decrease image contrast. % */ static void Contrast(const int sign,Quantum *red,Quantum *green,Quantum *blue) { double brightness, hue, saturation; /* Enhance contrast: dark color become darker, light color become lighter. */ assert(red != (Quantum *) NULL); assert(green != (Quantum *) NULL); assert(blue != (Quantum *) NULL); hue=0.0; saturation=0.0; brightness=0.0; ConvertRGBToHSB(*red,*green,*blue,&hue,&saturation,&brightness); brightness+=0.5*sign*(0.5*(sin((double) (MagickPI*(brightness-0.5)))+1.0)- brightness); if (brightness > 1.0) brightness=1.0; else if (brightness < 0.0) brightness=0.0; ConvertHSBToRGB(hue,saturation,brightness,red,green,blue); } MagickExport MagickBooleanType ContrastImage(Image *image, const MagickBooleanType sharpen) { #define ContrastImageTag "Contrast/Image" CacheView *image_view; ExceptionInfo *exception; int sign; MagickBooleanType status; MagickOffsetType progress; register ssize_t i; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); sign=sharpen != MagickFalse ? 1 : -1; if (image->storage_class == PseudoClass) { /* Contrast enhance colormap. */ for (i=0; i < (ssize_t) image->colors; i++) Contrast(sign,&image->colormap[i].red,&image->colormap[i].green, &image->colormap[i].blue); } /* Contrast enhance image. */ status = AccelerateContrastImage(image, sharpen, &image->exception); if (status != MagickFalse) return status; status=MagickTrue; progress=0; exception=(&image->exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { Quantum blue, green, red; register PixelPacket *restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { red=GetPixelRed(q); green=GetPixelGreen(q); blue=GetPixelBlue(q); Contrast(sign,&red,&green,&blue); SetPixelRed(q,red); SetPixelGreen(q,green); SetPixelBlue(q,blue); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_ContrastImage) #endif proceed=SetImageProgress(image,ContrastImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o n t r a s t S t r e t c h I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ContrastStretchImage() is a simple image enhancement technique that attempts % to improve the contrast in an image by `stretching' the range of intensity % values it contains to span a desired range of values. It differs from the % more sophisticated histogram equalization in that it can only apply a % linear scaling function to the image pixel values. As a result the % `enhancement' is less harsh. % % The format of the ContrastStretchImage method is: % % MagickBooleanType ContrastStretchImage(Image *image, % const char *levels) % MagickBooleanType ContrastStretchImageChannel(Image *image, % const size_t channel,const double black_point, % const double white_point) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel. % % o black_point: the black point. % % o white_point: the white point. % % o levels: Specify the levels where the black and white points have the % range of 0 to number-of-pixels (e.g. 1%, 10x90%, etc.). % */ MagickExport MagickBooleanType ContrastStretchImage(Image *image, const char *levels) { double black_point, white_point; GeometryInfo geometry_info; MagickBooleanType status; MagickStatusType flags; /* Parse levels. */ if (levels == (char *) NULL) return(MagickFalse); flags=ParseGeometry(levels,&geometry_info); black_point=geometry_info.rho; white_point=(double) image->columns*image->rows; if ((flags & SigmaValue) != 0) white_point=geometry_info.sigma; if ((flags & PercentValue) != 0) { black_point*=(double) QuantumRange/100.0; white_point*=(double) QuantumRange/100.0; } if ((flags & SigmaValue) == 0) white_point=(double) image->columns*image->rows-black_point; status=ContrastStretchImageChannel(image,DefaultChannels,black_point, white_point); return(status); } MagickExport MagickBooleanType ContrastStretchImageChannel(Image *image, const ChannelType channel,const double black_point,const double white_point) { #define MaxRange(color) ((MagickRealType) ScaleQuantumToMap((Quantum) (color))) #define ContrastStretchImageTag "ContrastStretch/Image" CacheView *image_view; double intensity; ExceptionInfo *exception; MagickBooleanType status; MagickOffsetType progress; MagickPixelPacket black, *histogram, white; QuantumPixelPacket *stretch_map; register ssize_t i; ssize_t y; /* Allocate histogram and stretch map. */ assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); /* Call OpenCL version */ status=AccelerateContrastStretchImageChannel(image,channel,black_point, white_point,&image->exception); if (status == MagickTrue) return status; histogram=(MagickPixelPacket *) AcquireQuantumMemory(MaxMap+1UL, sizeof(*histogram)); stretch_map=(QuantumPixelPacket *) AcquireQuantumMemory(MaxMap+1UL, sizeof(*stretch_map)); if ((histogram == (MagickPixelPacket *) NULL) || (stretch_map == (QuantumPixelPacket *) NULL)) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); /* Form histogram. */ exception=(&image->exception); if (IsGrayImage(image,exception) != MagickFalse) (void) SetImageColorspace(image,GRAYColorspace); status=MagickTrue; (void) ResetMagickMemory(histogram,0,(MaxMap+1)*sizeof(*histogram)); image_view=AcquireAuthenticCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { register const PixelPacket *restrict p; register IndexPacket *restrict indexes; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); if ((channel & SyncChannels) != 0) for (x=0; x < (ssize_t) image->columns; x++) { Quantum intensity; intensity=ClampToQuantum(GetPixelIntensity(image,p)); histogram[ScaleQuantumToMap(intensity)].red++; histogram[ScaleQuantumToMap(intensity)].green++; histogram[ScaleQuantumToMap(intensity)].blue++; histogram[ScaleQuantumToMap(intensity)].index++; p++; } else for (x=0; x < (ssize_t) image->columns; x++) { if ((channel & RedChannel) != 0) histogram[ScaleQuantumToMap(GetPixelRed(p))].red++; if ((channel & GreenChannel) != 0) histogram[ScaleQuantumToMap(GetPixelGreen(p))].green++; if ((channel & BlueChannel) != 0) histogram[ScaleQuantumToMap(GetPixelBlue(p))].blue++; if ((channel & OpacityChannel) != 0) histogram[ScaleQuantumToMap(GetPixelOpacity(p))].opacity++; if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) histogram[ScaleQuantumToMap(GetPixelIndex(indexes+x))].index++; p++; } } /* Find the histogram boundaries by locating the black/white levels. */ black.red=0.0; white.red=MaxRange(QuantumRange); if ((channel & RedChannel) != 0) { intensity=0.0; for (i=0; i <= (ssize_t) MaxMap; i++) { intensity+=histogram[i].red; if (intensity > black_point) break; } black.red=(MagickRealType) i; intensity=0.0; for (i=(ssize_t) MaxMap; i != 0; i--) { intensity+=histogram[i].red; if (intensity > ((double) image->columns*image->rows-white_point)) break; } white.red=(MagickRealType) i; } black.green=0.0; white.green=MaxRange(QuantumRange); if ((channel & GreenChannel) != 0) { intensity=0.0; for (i=0; i <= (ssize_t) MaxMap; i++) { intensity+=histogram[i].green; if (intensity > black_point) break; } black.green=(MagickRealType) i; intensity=0.0; for (i=(ssize_t) MaxMap; i != 0; i--) { intensity+=histogram[i].green; if (intensity > ((double) image->columns*image->rows-white_point)) break; } white.green=(MagickRealType) i; } black.blue=0.0; white.blue=MaxRange(QuantumRange); if ((channel & BlueChannel) != 0) { intensity=0.0; for (i=0; i <= (ssize_t) MaxMap; i++) { intensity+=histogram[i].blue; if (intensity > black_point) break; } black.blue=(MagickRealType) i; intensity=0.0; for (i=(ssize_t) MaxMap; i != 0; i--) { intensity+=histogram[i].blue; if (intensity > ((double) image->columns*image->rows-white_point)) break; } white.blue=(MagickRealType) i; } black.opacity=0.0; white.opacity=MaxRange(QuantumRange); if ((channel & OpacityChannel) != 0) { intensity=0.0; for (i=0; i <= (ssize_t) MaxMap; i++) { intensity+=histogram[i].opacity; if (intensity > black_point) break; } black.opacity=(MagickRealType) i; intensity=0.0; for (i=(ssize_t) MaxMap; i != 0; i--) { intensity+=histogram[i].opacity; if (intensity > ((double) image->columns*image->rows-white_point)) break; } white.opacity=(MagickRealType) i; } black.index=0.0; white.index=MaxRange(QuantumRange); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) { intensity=0.0; for (i=0; i <= (ssize_t) MaxMap; i++) { intensity+=histogram[i].index; if (intensity > black_point) break; } black.index=(MagickRealType) i; intensity=0.0; for (i=(ssize_t) MaxMap; i != 0; i--) { intensity+=histogram[i].index; if (intensity > ((double) image->columns*image->rows-white_point)) break; } white.index=(MagickRealType) i; } histogram=(MagickPixelPacket *) RelinquishMagickMemory(histogram); /* Stretch the histogram to create the stretched image mapping. */ (void) ResetMagickMemory(stretch_map,0,(MaxMap+1)*sizeof(*stretch_map)); for (i=0; i <= (ssize_t) MaxMap; i++) { if ((channel & RedChannel) != 0) { if (i < (ssize_t) black.red) stretch_map[i].red=(Quantum) 0; else if (i > (ssize_t) white.red) stretch_map[i].red=QuantumRange; else if (black.red != white.red) stretch_map[i].red=ScaleMapToQuantum((MagickRealType) (MaxMap* (i-black.red)/(white.red-black.red))); } if ((channel & GreenChannel) != 0) { if (i < (ssize_t) black.green) stretch_map[i].green=0; else if (i > (ssize_t) white.green) stretch_map[i].green=QuantumRange; else if (black.green != white.green) stretch_map[i].green=ScaleMapToQuantum((MagickRealType) (MaxMap* (i-black.green)/(white.green-black.green))); } if ((channel & BlueChannel) != 0) { if (i < (ssize_t) black.blue) stretch_map[i].blue=0; else if (i > (ssize_t) white.blue) stretch_map[i].blue= QuantumRange; else if (black.blue != white.blue) stretch_map[i].blue=ScaleMapToQuantum((MagickRealType) (MaxMap* (i-black.blue)/(white.blue-black.blue))); } if ((channel & OpacityChannel) != 0) { if (i < (ssize_t) black.opacity) stretch_map[i].opacity=0; else if (i > (ssize_t) white.opacity) stretch_map[i].opacity=QuantumRange; else if (black.opacity != white.opacity) stretch_map[i].opacity=ScaleMapToQuantum((MagickRealType) (MaxMap* (i-black.opacity)/(white.opacity-black.opacity))); } if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) { if (i < (ssize_t) black.index) stretch_map[i].index=0; else if (i > (ssize_t) white.index) stretch_map[i].index=QuantumRange; else if (black.index != white.index) stretch_map[i].index=ScaleMapToQuantum((MagickRealType) (MaxMap* (i-black.index)/(white.index-black.index))); } } /* Stretch the image. */ if (((channel & OpacityChannel) != 0) || (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace))) image->storage_class=DirectClass; if (image->storage_class == PseudoClass) { /* Stretch colormap. */ for (i=0; i < (ssize_t) image->colors; i++) { if ((channel & RedChannel) != 0) { if (black.red != white.red) image->colormap[i].red=stretch_map[ ScaleQuantumToMap(image->colormap[i].red)].red; } if ((channel & GreenChannel) != 0) { if (black.green != white.green) image->colormap[i].green=stretch_map[ ScaleQuantumToMap(image->colormap[i].green)].green; } if ((channel & BlueChannel) != 0) { if (black.blue != white.blue) image->colormap[i].blue=stretch_map[ ScaleQuantumToMap(image->colormap[i].blue)].blue; } if ((channel & OpacityChannel) != 0) { if (black.opacity != white.opacity) image->colormap[i].opacity=stretch_map[ ScaleQuantumToMap(image->colormap[i].opacity)].opacity; } } } /* Stretch image. */ status=MagickTrue; progress=0; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register IndexPacket *restrict indexes; register PixelPacket *restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { if ((channel & RedChannel) != 0) { if (black.red != white.red) SetPixelRed(q,stretch_map[ ScaleQuantumToMap(GetPixelRed(q))].red); } if ((channel & GreenChannel) != 0) { if (black.green != white.green) SetPixelGreen(q,stretch_map[ ScaleQuantumToMap(GetPixelGreen(q))].green); } if ((channel & BlueChannel) != 0) { if (black.blue != white.blue) SetPixelBlue(q,stretch_map[ ScaleQuantumToMap(GetPixelBlue(q))].blue); } if ((channel & OpacityChannel) != 0) { if (black.opacity != white.opacity) SetPixelOpacity(q,stretch_map[ ScaleQuantumToMap(GetPixelOpacity(q))].opacity); } if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) { if (black.index != white.index) SetPixelIndex(indexes+x,stretch_map[ ScaleQuantumToMap(GetPixelIndex(indexes+x))].index); } q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_ContrastStretchImageChannel) #endif proceed=SetImageProgress(image,ContrastStretchImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); stretch_map=(QuantumPixelPacket *) RelinquishMagickMemory(stretch_map); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % E n h a n c e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % EnhanceImage() applies a digital filter that improves the quality of a % noisy image. % % The format of the EnhanceImage method is: % % Image *EnhanceImage(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *EnhanceImage(const Image *image,ExceptionInfo *exception) { #define Enhance(weight) \ mean=((MagickRealType) GetPixelRed(r)+pixel.red)/2; \ distance=(MagickRealType) GetPixelRed(r)-(MagickRealType) pixel.red; \ distance_squared=QuantumScale*(2.0*((MagickRealType) QuantumRange+1.0)+ \ mean)*distance*distance; \ mean=((MagickRealType) GetPixelGreen(r)+pixel.green)/2; \ distance=(MagickRealType) GetPixelGreen(r)-(MagickRealType) pixel.green; \ distance_squared+=4.0*distance*distance; \ mean=((MagickRealType) GetPixelBlue(r)+pixel.blue)/2; \ distance=(MagickRealType) GetPixelBlue(r)-(MagickRealType) pixel.blue; \ distance_squared+=QuantumScale*(3.0*((MagickRealType) QuantumRange+1.0)-1.0- \ mean)*distance*distance; \ mean=((MagickRealType) r->opacity+pixel.opacity)/2; \ distance=(MagickRealType) r->opacity-(MagickRealType) pixel.opacity; \ distance_squared+=QuantumScale*(3.0*((MagickRealType) QuantumRange+1.0)-1.0- \ mean)*distance*distance; \ if (distance_squared < ((MagickRealType) QuantumRange*(MagickRealType) \ QuantumRange/25.0f)) \ { \ aggregate.red+=(weight)*GetPixelRed(r); \ aggregate.green+=(weight)*GetPixelGreen(r); \ aggregate.blue+=(weight)*GetPixelBlue(r); \ aggregate.opacity+=(weight)*GetPixelOpacity(r); \ total_weight+=(weight); \ } \ r++; #define EnhanceImageTag "Enhance/Image" CacheView *enhance_view, *image_view; Image *enhance_image; MagickBooleanType status; MagickOffsetType progress; MagickPixelPacket zero; ssize_t y; /* Initialize enhanced image attributes. */ assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); if ((image->columns < 5) || (image->rows < 5)) return((Image *) NULL); enhance_image=CloneImage(image,image->columns,image->rows,MagickTrue, exception); if (enhance_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(enhance_image,DirectClass) == MagickFalse) { InheritException(exception,&enhance_image->exception); enhance_image=DestroyImage(enhance_image); return((Image *) NULL); } /* Enhance image. */ status=MagickTrue; progress=0; (void) ResetMagickMemory(&zero,0,sizeof(zero)); image_view=AcquireAuthenticCacheView(image,exception); enhance_view=AcquireAuthenticCacheView(enhance_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,enhance_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register const PixelPacket *restrict p; register PixelPacket *restrict q; register ssize_t x; /* Read another scan line. */ if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,-2,y-2,image->columns+4,5,exception); q=QueueCacheViewAuthenticPixels(enhance_view,0,y,enhance_image->columns,1, exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { MagickPixelPacket aggregate; MagickRealType distance, distance_squared, mean, total_weight; PixelPacket pixel; register const PixelPacket *restrict r; /* Compute weighted average of target pixel color components. */ aggregate=zero; total_weight=0.0; r=p+2*(image->columns+4)+2; pixel=(*r); r=p; Enhance(5.0); Enhance(8.0); Enhance(10.0); Enhance(8.0); Enhance(5.0); r=p+(image->columns+4); Enhance(8.0); Enhance(20.0); Enhance(40.0); Enhance(20.0); Enhance(8.0); r=p+2*(image->columns+4); Enhance(10.0); Enhance(40.0); Enhance(80.0); Enhance(40.0); Enhance(10.0); r=p+3*(image->columns+4); Enhance(8.0); Enhance(20.0); Enhance(40.0); Enhance(20.0); Enhance(8.0); r=p+4*(image->columns+4); Enhance(5.0); Enhance(8.0); Enhance(10.0); Enhance(8.0); Enhance(5.0); SetPixelRed(q,(aggregate.red+(total_weight/2)-1)/total_weight); SetPixelGreen(q,(aggregate.green+(total_weight/2)-1)/ total_weight); SetPixelBlue(q,(aggregate.blue+(total_weight/2)-1)/total_weight); SetPixelOpacity(q,(aggregate.opacity+(total_weight/2)-1)/ total_weight); p++; q++; } if (SyncCacheViewAuthenticPixels(enhance_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_EnhanceImage) #endif proceed=SetImageProgress(image,EnhanceImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } enhance_view=DestroyCacheView(enhance_view); image_view=DestroyCacheView(image_view); if (status == MagickFalse) enhance_image=DestroyImage(enhance_image); return(enhance_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % E q u a l i z e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % EqualizeImage() applies a histogram equalization to the image. % % The format of the EqualizeImage method is: % % MagickBooleanType EqualizeImage(Image *image) % MagickBooleanType EqualizeImageChannel(Image *image, % const ChannelType channel) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel. % */ MagickExport MagickBooleanType EqualizeImage(Image *image) { return(EqualizeImageChannel(image,DefaultChannels)); } MagickExport MagickBooleanType EqualizeImageChannel(Image *image, const ChannelType channel) { #define EqualizeImageTag "Equalize/Image" CacheView *image_view; ExceptionInfo *exception; MagickBooleanType status; MagickOffsetType progress; MagickPixelPacket black, *histogram, intensity, *map, white; QuantumPixelPacket *equalize_map; register ssize_t i; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); /* Call OpenCL version */ status = AccelerateEqualizeImage(image, channel, &image->exception); if (status != MagickFalse) return status; /* Allocate and initialize histogram arrays. */ equalize_map=(QuantumPixelPacket *) AcquireQuantumMemory(MaxMap+1UL, sizeof(*equalize_map)); histogram=(MagickPixelPacket *) AcquireQuantumMemory(MaxMap+1UL, sizeof(*histogram)); map=(MagickPixelPacket *) AcquireQuantumMemory(MaxMap+1UL,sizeof(*map)); if ((equalize_map == (QuantumPixelPacket *) NULL) || (histogram == (MagickPixelPacket *) NULL) || (map == (MagickPixelPacket *) NULL)) { if (map != (MagickPixelPacket *) NULL) map=(MagickPixelPacket *) RelinquishMagickMemory(map); if (histogram != (MagickPixelPacket *) NULL) histogram=(MagickPixelPacket *) RelinquishMagickMemory(histogram); if (equalize_map != (QuantumPixelPacket *) NULL) equalize_map=(QuantumPixelPacket *) RelinquishMagickMemory( equalize_map); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } /* Form histogram. */ (void) ResetMagickMemory(histogram,0,(MaxMap+1)*sizeof(*histogram)); exception=(&image->exception); image_view=AcquireVirtualCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { register const IndexPacket *restrict indexes; register const PixelPacket *restrict p; register ssize_t x; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const PixelPacket *) NULL) break; indexes=GetCacheViewVirtualIndexQueue(image_view); if ((channel & SyncChannels) != 0) for (x=0; x < (ssize_t) image->columns; x++) { MagickRealType intensity=GetPixelIntensity(image,p); histogram[ScaleQuantumToMap(ClampToQuantum(intensity))].red++; p++; } else for (x=0; x < (ssize_t) image->columns; x++) { if ((channel & RedChannel) != 0) histogram[ScaleQuantumToMap(GetPixelRed(p))].red++; if ((channel & GreenChannel) != 0) histogram[ScaleQuantumToMap(GetPixelGreen(p))].green++; if ((channel & BlueChannel) != 0) histogram[ScaleQuantumToMap(GetPixelBlue(p))].blue++; if ((channel & OpacityChannel) != 0) histogram[ScaleQuantumToMap(GetPixelOpacity(p))].opacity++; if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) histogram[ScaleQuantumToMap(GetPixelIndex(indexes+x))].index++; p++; } } image_view=DestroyCacheView(image_view); /* Integrate the histogram to get the equalization map. */ (void) ResetMagickMemory(&intensity,0,sizeof(intensity)); for (i=0; i <= (ssize_t) MaxMap; i++) { if ((channel & SyncChannels) != 0) { intensity.red+=histogram[i].red; map[i]=intensity; continue; } if ((channel & RedChannel) != 0) intensity.red+=histogram[i].red; if ((channel & GreenChannel) != 0) intensity.green+=histogram[i].green; if ((channel & BlueChannel) != 0) intensity.blue+=histogram[i].blue; if ((channel & OpacityChannel) != 0) intensity.opacity+=histogram[i].opacity; if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) intensity.index+=histogram[i].index; map[i]=intensity; } black=map[0]; white=map[(int) MaxMap]; (void) ResetMagickMemory(equalize_map,0,(MaxMap+1)*sizeof(*equalize_map)); for (i=0; i <= (ssize_t) MaxMap; i++) { if ((channel & SyncChannels) != 0) { if (white.red != black.red) equalize_map[i].red=ScaleMapToQuantum((MagickRealType) ((MaxMap* (map[i].red-black.red))/(white.red-black.red))); continue; } if (((channel & RedChannel) != 0) && (white.red != black.red)) equalize_map[i].red=ScaleMapToQuantum((MagickRealType) ((MaxMap* (map[i].red-black.red))/(white.red-black.red))); if (((channel & GreenChannel) != 0) && (white.green != black.green)) equalize_map[i].green=ScaleMapToQuantum((MagickRealType) ((MaxMap* (map[i].green-black.green))/(white.green-black.green))); if (((channel & BlueChannel) != 0) && (white.blue != black.blue)) equalize_map[i].blue=ScaleMapToQuantum((MagickRealType) ((MaxMap* (map[i].blue-black.blue))/(white.blue-black.blue))); if (((channel & OpacityChannel) != 0) && (white.opacity != black.opacity)) equalize_map[i].opacity=ScaleMapToQuantum((MagickRealType) ((MaxMap* (map[i].opacity-black.opacity))/(white.opacity-black.opacity))); if ((((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) && (white.index != black.index)) equalize_map[i].index=ScaleMapToQuantum((MagickRealType) ((MaxMap* (map[i].index-black.index))/(white.index-black.index))); } histogram=(MagickPixelPacket *) RelinquishMagickMemory(histogram); map=(MagickPixelPacket *) RelinquishMagickMemory(map); if (image->storage_class == PseudoClass) { /* Equalize colormap. */ for (i=0; i < (ssize_t) image->colors; i++) { if ((channel & SyncChannels) != 0) { if (white.red != black.red) { image->colormap[i].red=equalize_map[ ScaleQuantumToMap(image->colormap[i].red)].red; image->colormap[i].green=equalize_map[ ScaleQuantumToMap(image->colormap[i].green)].red; image->colormap[i].blue=equalize_map[ ScaleQuantumToMap(image->colormap[i].blue)].red; image->colormap[i].opacity=equalize_map[ ScaleQuantumToMap(image->colormap[i].opacity)].red; } continue; } if (((channel & RedChannel) != 0) && (white.red != black.red)) image->colormap[i].red=equalize_map[ ScaleQuantumToMap(image->colormap[i].red)].red; if (((channel & GreenChannel) != 0) && (white.green != black.green)) image->colormap[i].green=equalize_map[ ScaleQuantumToMap(image->colormap[i].green)].green; if (((channel & BlueChannel) != 0) && (white.blue != black.blue)) image->colormap[i].blue=equalize_map[ ScaleQuantumToMap(image->colormap[i].blue)].blue; if (((channel & OpacityChannel) != 0) && (white.opacity != black.opacity)) image->colormap[i].opacity=equalize_map[ ScaleQuantumToMap(image->colormap[i].opacity)].opacity; } } /* Equalize image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register IndexPacket *restrict indexes; register PixelPacket *restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { if ((channel & SyncChannels) != 0) { if (white.red != black.red) { SetPixelRed(q,equalize_map[ ScaleQuantumToMap(GetPixelRed(q))].red); SetPixelGreen(q,equalize_map[ ScaleQuantumToMap(GetPixelGreen(q))].red); SetPixelBlue(q,equalize_map[ ScaleQuantumToMap(GetPixelBlue(q))].red); SetPixelOpacity(q,equalize_map[ ScaleQuantumToMap(GetPixelOpacity(q))].red); if (image->colorspace == CMYKColorspace) SetPixelIndex(indexes+x,equalize_map[ ScaleQuantumToMap(GetPixelIndex(indexes+x))].red); } q++; continue; } if (((channel & RedChannel) != 0) && (white.red != black.red)) SetPixelRed(q,equalize_map[ ScaleQuantumToMap(GetPixelRed(q))].red); if (((channel & GreenChannel) != 0) && (white.green != black.green)) SetPixelGreen(q,equalize_map[ ScaleQuantumToMap(GetPixelGreen(q))].green); if (((channel & BlueChannel) != 0) && (white.blue != black.blue)) SetPixelBlue(q,equalize_map[ ScaleQuantumToMap(GetPixelBlue(q))].blue); if (((channel & OpacityChannel) != 0) && (white.opacity != black.opacity)) SetPixelOpacity(q,equalize_map[ ScaleQuantumToMap(GetPixelOpacity(q))].opacity); if ((((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) && (white.index != black.index)) SetPixelIndex(indexes+x,equalize_map[ ScaleQuantumToMap(GetPixelIndex(indexes+x))].index); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_EqualizeImageChannel) #endif proceed=SetImageProgress(image,EqualizeImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); equalize_map=(QuantumPixelPacket *) RelinquishMagickMemory(equalize_map); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G a m m a I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GammaImage() gamma-corrects a particular image channel. The same % image viewed on different devices will have perceptual differences in the % way the image's intensities are represented on the screen. Specify % individual gamma levels for the red, green, and blue channels, or adjust % all three with the gamma parameter. Values typically range from 0.8 to 2.3. % % You can also reduce the influence of a particular channel with a gamma % value of 0. % % The format of the GammaImage method is: % % MagickBooleanType GammaImage(Image *image,const char *level) % MagickBooleanType GammaImageChannel(Image *image, % const ChannelType channel,const double gamma) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel. % % o level: the image gamma as a string (e.g. 1.6,1.2,1.0). % % o gamma: the image gamma. % */ static inline double gamma_pow(const double value,const double gamma) { return(value < 0.0 ? value : pow(value,gamma)); } MagickExport MagickBooleanType GammaImage(Image *image,const char *level) { GeometryInfo geometry_info; MagickPixelPacket gamma; MagickStatusType flags, status; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (level == (char *) NULL) return(MagickFalse); flags=ParseGeometry(level,&geometry_info); gamma.red=geometry_info.rho; gamma.green=geometry_info.sigma; if ((flags & SigmaValue) == 0) gamma.green=gamma.red; gamma.blue=geometry_info.xi; if ((flags & XiValue) == 0) gamma.blue=gamma.red; if ((gamma.red == 1.0) && (gamma.green == 1.0) && (gamma.blue == 1.0)) return(MagickTrue); if ((gamma.red == gamma.green) && (gamma.green == gamma.blue)) status=GammaImageChannel(image,(ChannelType) (RedChannel | GreenChannel | BlueChannel),(double) gamma.red); else { status=GammaImageChannel(image,RedChannel,(double) gamma.red); status&=GammaImageChannel(image,GreenChannel,(double) gamma.green); status&=GammaImageChannel(image,BlueChannel,(double) gamma.blue); } return(status != 0 ? MagickTrue : MagickFalse); } MagickExport MagickBooleanType GammaImageChannel(Image *image, const ChannelType channel,const double gamma) { #define GammaCorrectImageTag "GammaCorrect/Image" CacheView *image_view; ExceptionInfo *exception; MagickBooleanType status; MagickOffsetType progress; Quantum *gamma_map; register ssize_t i; ssize_t y; /* Allocate and initialize gamma maps. */ assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (gamma == 1.0) return(MagickTrue); gamma_map=(Quantum *) AcquireQuantumMemory(MaxMap+1UL,sizeof(*gamma_map)); if (gamma_map == (Quantum *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); (void) ResetMagickMemory(gamma_map,0,(MaxMap+1)*sizeof(*gamma_map)); if (gamma != 0.0) for (i=0; i <= (ssize_t) MaxMap; i++) gamma_map[i]=ClampToQuantum((MagickRealType) ScaleMapToQuantum(( MagickRealType) (MaxMap*pow((double) i/MaxMap,1.0/gamma)))); if (image->storage_class == PseudoClass) { /* Gamma-correct colormap. */ for (i=0; i < (ssize_t) image->colors; i++) { #if !defined(MAGICKCORE_HDRI_SUPPORT) if ((channel & RedChannel) != 0) image->colormap[i].red=gamma_map[ScaleQuantumToMap( image->colormap[i].red)]; if ((channel & GreenChannel) != 0) image->colormap[i].green=gamma_map[ScaleQuantumToMap( image->colormap[i].green)]; if ((channel & BlueChannel) != 0) image->colormap[i].blue=gamma_map[ScaleQuantumToMap( image->colormap[i].blue)]; if ((channel & OpacityChannel) != 0) { if (image->matte == MagickFalse) image->colormap[i].opacity=gamma_map[ScaleQuantumToMap( image->colormap[i].opacity)]; else image->colormap[i].opacity=QuantumRange-gamma_map[ ScaleQuantumToMap((Quantum) (QuantumRange- image->colormap[i].opacity))]; } #else if ((channel & RedChannel) != 0) image->colormap[i].red=QuantumRange*gamma_pow(QuantumScale* image->colormap[i].red,1.0/gamma); if ((channel & GreenChannel) != 0) image->colormap[i].green=QuantumRange*gamma_pow(QuantumScale* image->colormap[i].green,1.0/gamma); if ((channel & BlueChannel) != 0) image->colormap[i].blue=QuantumRange*gamma_pow(QuantumScale* image->colormap[i].blue,1.0/gamma); if ((channel & OpacityChannel) != 0) { if (image->matte == MagickFalse) image->colormap[i].opacity=QuantumRange*gamma_pow(QuantumScale* image->colormap[i].opacity,1.0/gamma); else image->colormap[i].opacity=QuantumRange-QuantumRange*gamma_pow( QuantumScale*(QuantumRange-image->colormap[i].opacity),1.0/ gamma); } #endif } } /* Gamma-correct image. */ status=MagickTrue; progress=0; exception=(&image->exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register IndexPacket *restrict indexes; register PixelPacket *restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { #if !defined(MAGICKCORE_HDRI_SUPPORT) if ((channel & SyncChannels) != 0) { SetPixelRed(q,gamma_map[ScaleQuantumToMap(GetPixelRed(q))]); SetPixelGreen(q,gamma_map[ScaleQuantumToMap(GetPixelGreen(q))]); SetPixelBlue(q,gamma_map[ScaleQuantumToMap(GetPixelBlue(q))]); } else { if ((channel & RedChannel) != 0) SetPixelRed(q,gamma_map[ScaleQuantumToMap(GetPixelRed(q))]); if ((channel & GreenChannel) != 0) SetPixelGreen(q,gamma_map[ScaleQuantumToMap(GetPixelGreen(q))]); if ((channel & BlueChannel) != 0) SetPixelBlue(q,gamma_map[ScaleQuantumToMap(GetPixelBlue(q))]); if ((channel & OpacityChannel) != 0) { if (image->matte == MagickFalse) SetPixelOpacity(q,gamma_map[ScaleQuantumToMap( GetPixelOpacity(q))]); else SetPixelAlpha(q,gamma_map[ScaleQuantumToMap((Quantum) GetPixelAlpha(q))]); } } #else if ((channel & SyncChannels) != 0) { SetPixelRed(q,QuantumRange*gamma_pow(QuantumScale*GetPixelRed(q), 1.0/gamma)); SetPixelGreen(q,QuantumRange*gamma_pow(QuantumScale*GetPixelGreen(q), 1.0/gamma)); SetPixelBlue(q,QuantumRange*gamma_pow(QuantumScale*GetPixelBlue(q), 1.0/gamma)); } else { if ((channel & RedChannel) != 0) SetPixelRed(q,QuantumRange*gamma_pow(QuantumScale*GetPixelRed(q), 1.0/gamma)); if ((channel & GreenChannel) != 0) SetPixelGreen(q,QuantumRange*gamma_pow(QuantumScale* GetPixelGreen(q),1.0/gamma)); if ((channel & BlueChannel) != 0) SetPixelBlue(q,QuantumRange*gamma_pow(QuantumScale*GetPixelBlue(q), 1.0/gamma)); if ((channel & OpacityChannel) != 0) { if (image->matte == MagickFalse) SetPixelOpacity(q,QuantumRange*gamma_pow(QuantumScale* GetPixelOpacity(q),1.0/gamma)); else SetPixelAlpha(q,QuantumRange*gamma_pow(QuantumScale* GetPixelAlpha(q),1.0/gamma)); } } #endif q++; } if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) for (x=0; x < (ssize_t) image->columns; x++) SetPixelIndex(indexes+x,gamma_map[ScaleQuantumToMap( GetPixelIndex(indexes+x))]); if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_GammaImageChannel) #endif proceed=SetImageProgress(image,GammaCorrectImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); gamma_map=(Quantum *) RelinquishMagickMemory(gamma_map); if (image->gamma != 0.0) image->gamma*=gamma; return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G r a y s c a l e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GrayscaleImage() converts the colors in the reference image to gray. % % The format of the GrayscaleImageChannel method is: % % MagickBooleanType GrayscaleImage(Image *image, % const PixelIntensityMethod method) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel. % */ static inline MagickRealType MagickMax(const MagickRealType x, const MagickRealType y) { if (x > y) return(x); return(y); } static inline MagickRealType MagickMin(const MagickRealType x, const MagickRealType y) { if (x < y) return(x); return(y); } MagickExport MagickBooleanType GrayscaleImage(Image *image, const PixelIntensityMethod method) { #define GrayscaleImageTag "Grayscale/Image" CacheView *image_view; ExceptionInfo *exception; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->storage_class == PseudoClass) { if (SyncImage(image) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); } /* Grayscale image. */ /* call opencl version */ status = AccelerateGrayscaleImage(image, method, &image->exception); if (status == MagickTrue) return status; status=MagickTrue; progress=0; exception=(&image->exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register PixelPacket *restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { MagickRealType blue, green, intensity, red; red=(MagickRealType) q->red; green=(MagickRealType) q->green; blue=(MagickRealType) q->blue; intensity=0.0; switch (method) { case AveragePixelIntensityMethod: { intensity=(red+green+blue)/3.0; break; } case BrightnessPixelIntensityMethod: { intensity=MagickMax(MagickMax(red,green),blue); break; } case LightnessPixelIntensityMethod: { intensity=(MagickMin(MagickMin(red,green),blue)+ MagickMax(MagickMax(red,green),blue))/2.0; break; } case MSPixelIntensityMethod: { intensity=(MagickRealType) (((double) red*red+green*green+ blue*blue)/(3.0*QuantumRange)); break; } case Rec601LumaPixelIntensityMethod: { if (image->colorspace == RGBColorspace) { red=EncodePixelGamma(red); green=EncodePixelGamma(green); blue=EncodePixelGamma(blue); } intensity=0.298839*red+0.586811*green+0.114350*blue; break; } case Rec601LuminancePixelIntensityMethod: { if (image->colorspace == sRGBColorspace) { red=DecodePixelGamma(red); green=DecodePixelGamma(green); blue=DecodePixelGamma(blue); } intensity=0.298839*red+0.586811*green+0.114350*blue; break; } case Rec709LumaPixelIntensityMethod: default: { if (image->colorspace == RGBColorspace) { red=EncodePixelGamma(red); green=EncodePixelGamma(green); blue=EncodePixelGamma(blue); } intensity=0.212656*red+0.715158*green+0.072186*blue; break; } case Rec709LuminancePixelIntensityMethod: { if (image->colorspace == sRGBColorspace) { red=DecodePixelGamma(red); green=DecodePixelGamma(green); blue=DecodePixelGamma(blue); } intensity=0.212656*red+0.715158*green+0.072186*blue; break; } case RMSPixelIntensityMethod: { intensity=(MagickRealType) (sqrt((double) red*red+green*green+ blue*blue)/sqrt(3.0)); break; } } SetPixelGray(q,ClampToQuantum(intensity)); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_GrayscaleImageChannel) #endif proceed=SetImageProgress(image,GrayscaleImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); image->intensity=method; image->type=GrayscaleType; return(SetImageColorspace(image,GRAYColorspace)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % H a l d C l u t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % HaldClutImage() applies a Hald color lookup table to the image. A Hald % color lookup table is a 3-dimensional color cube mapped to 2 dimensions. % Create it with the HALD coder. You can apply any color transformation to % the Hald image and then use this method to apply the transform to the % image. % % The format of the HaldClutImage method is: % % MagickBooleanType HaldClutImage(Image *image,Image *hald_image) % MagickBooleanType HaldClutImageChannel(Image *image, % const ChannelType channel,Image *hald_image) % % A description of each parameter follows: % % o image: the image, which is replaced by indexed CLUT values % % o hald_image: the color lookup table image for replacement color values. % % o channel: the channel. % */ MagickExport MagickBooleanType HaldClutImage(Image *image, const Image *hald_image) { return(HaldClutImageChannel(image,DefaultChannels,hald_image)); } MagickExport MagickBooleanType HaldClutImageChannel(Image *image, const ChannelType channel,const Image *hald_image) { #define HaldClutImageTag "Clut/Image" typedef struct _HaldInfo { MagickRealType x, y, z; } HaldInfo; CacheView *hald_view, *image_view; double width; ExceptionInfo *exception; MagickBooleanType status; MagickOffsetType progress; MagickPixelPacket zero; size_t cube_size, length, level; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(hald_image != (Image *) NULL); assert(hald_image->signature == MagickSignature); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); if (IsGrayColorspace(image->colorspace) != MagickFalse) (void) SetImageColorspace(image,sRGBColorspace); if (image->matte == MagickFalse) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel); /* Hald clut image. */ status=MagickTrue; progress=0; length=(size_t) MagickMin((MagickRealType) hald_image->columns, (MagickRealType) hald_image->rows); for (level=2; (level*level*level) < length; level++) ; level*=level; cube_size=level*level; width=(double) hald_image->columns; GetMagickPixelPacket(hald_image,&zero); exception=(&image->exception); image_view=AcquireAuthenticCacheView(image,exception); hald_view=AcquireAuthenticCacheView(hald_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,hald_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { double offset; HaldInfo point; MagickPixelPacket pixel, pixel1, pixel2, pixel3, pixel4; register IndexPacket *restrict indexes; register PixelPacket *restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(hald_view); pixel=zero; pixel1=zero; pixel2=zero; pixel3=zero; pixel4=zero; for (x=0; x < (ssize_t) image->columns; x++) { point.x=QuantumScale*(level-1.0)*GetPixelRed(q); point.y=QuantumScale*(level-1.0)*GetPixelGreen(q); point.z=QuantumScale*(level-1.0)*GetPixelBlue(q); offset=(double) (point.x+level*floor(point.y)+cube_size*floor(point.z)); point.x-=floor(point.x); point.y-=floor(point.y); point.z-=floor(point.z); (void) InterpolateMagickPixelPacket(image,hald_view, UndefinedInterpolatePixel,fmod(offset,width),floor(offset/width), &pixel1,exception); (void) InterpolateMagickPixelPacket(image,hald_view, UndefinedInterpolatePixel,fmod(offset+level,width),floor((offset+level)/ width),&pixel2,exception); MagickPixelCompositeAreaBlend(&pixel1,pixel1.opacity,&pixel2, pixel2.opacity,point.y,&pixel3); offset+=cube_size; (void) InterpolateMagickPixelPacket(image,hald_view, UndefinedInterpolatePixel,fmod(offset,width),floor(offset/width), &pixel1,exception); (void) InterpolateMagickPixelPacket(image,hald_view, UndefinedInterpolatePixel,fmod(offset+level,width),floor((offset+level)/ width),&pixel2,exception); MagickPixelCompositeAreaBlend(&pixel1,pixel1.opacity,&pixel2, pixel2.opacity,point.y,&pixel4); MagickPixelCompositeAreaBlend(&pixel3,pixel3.opacity,&pixel4, pixel4.opacity,point.z,&pixel); if ((channel & RedChannel) != 0) SetPixelRed(q,ClampToQuantum(pixel.red)); if ((channel & GreenChannel) != 0) SetPixelGreen(q,ClampToQuantum(pixel.green)); if ((channel & BlueChannel) != 0) SetPixelBlue(q,ClampToQuantum(pixel.blue)); if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse)) SetPixelOpacity(q,ClampToQuantum(pixel.opacity)); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) SetPixelIndex(indexes+x,ClampToQuantum(pixel.index)); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_HaldClutImageChannel) #endif proceed=SetImageProgress(image,HaldClutImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } hald_view=DestroyCacheView(hald_view); image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % L e v e l I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % LevelImage() adjusts the levels of a particular image channel by % scaling the colors falling between specified white and black points to % the full available quantum range. % % The parameters provided represent the black, and white points. The black % point specifies the darkest color in the image. Colors darker than the % black point are set to zero. White point specifies the lightest color in % the image. Colors brighter than the white point are set to the maximum % quantum value. % % If a '!' flag is given, map black and white colors to the given levels % rather than mapping those levels to black and white. See % LevelizeImageChannel() and LevelizeImageChannel(), below. % % Gamma specifies a gamma correction to apply to the image. % % The format of the LevelImage method is: % % MagickBooleanType LevelImage(Image *image,const char *levels) % % A description of each parameter follows: % % o image: the image. % % o levels: Specify the levels where the black and white points have the % range of 0-QuantumRange, and gamma has the range 0-10 (e.g. 10x90%+2). % A '!' flag inverts the re-mapping. % */ MagickExport MagickBooleanType LevelImage(Image *image,const char *levels) { double black_point, gamma, white_point; GeometryInfo geometry_info; MagickBooleanType status; MagickStatusType flags; /* Parse levels. */ if (levels == (char *) NULL) return(MagickFalse); flags=ParseGeometry(levels,&geometry_info); black_point=geometry_info.rho; white_point=(double) QuantumRange; if ((flags & SigmaValue) != 0) white_point=geometry_info.sigma; gamma=1.0; if ((flags & XiValue) != 0) gamma=geometry_info.xi; if ((flags & PercentValue) != 0) { black_point*=(double) image->columns*image->rows/100.0; white_point*=(double) image->columns*image->rows/100.0; } if ((flags & SigmaValue) == 0) white_point=(double) QuantumRange-black_point; if ((flags & AspectValue ) == 0) status=LevelImageChannel(image,DefaultChannels,black_point,white_point, gamma); else status=LevelizeImage(image,black_point,white_point,gamma); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % L e v e l I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % LevelImage() applies the normal level operation to the image, spreading % out the values between the black and white points over the entire range of % values. Gamma correction is also applied after the values has been mapped. % % It is typically used to improve image contrast, or to provide a controlled % linear threshold for the image. If the black and white points are set to % the minimum and maximum values found in the image, the image can be % normalized. or by swapping black and white values, negate the image. % % The format of the LevelImage method is: % % MagickBooleanType LevelImage(Image *image,const double black_point, % const double white_point,const double gamma) % MagickBooleanType LevelImageChannel(Image *image, % const ChannelType channel,const double black_point, % const double white_point,const double gamma) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel. % % o black_point: The level which is to be mapped to zero (black) % % o white_point: The level which is to be mapped to QuantiumRange (white) % % o gamma: adjust gamma by this factor before mapping values. % use 1.0 for purely linear stretching of image color values % */ static inline double LevelPixel(const double black_point, const double white_point,const double gamma,const MagickRealType pixel) { double level_pixel, scale; scale=(white_point != black_point) ? 1.0/(white_point-black_point) : 1.0; level_pixel=QuantumRange*gamma_pow(scale*((double) pixel-black_point),1.0/ gamma); return(level_pixel); } MagickExport MagickBooleanType LevelImageChannel(Image *image, const ChannelType channel,const double black_point,const double white_point, const double gamma) { #define LevelImageTag "Level/Image" CacheView *image_view; ExceptionInfo *exception; MagickBooleanType status; MagickOffsetType progress; register ssize_t i; ssize_t y; /* Allocate and initialize levels map. */ assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->storage_class == PseudoClass) for (i=0; i < (ssize_t) image->colors; i++) { /* Level colormap. */ if ((channel & RedChannel) != 0) image->colormap[i].red=(Quantum) ClampToQuantum(LevelPixel(black_point, white_point,gamma,(MagickRealType) image->colormap[i].red)); if ((channel & GreenChannel) != 0) image->colormap[i].green=(Quantum) ClampToQuantum(LevelPixel( black_point,white_point,gamma,(MagickRealType) image->colormap[i].green)); if ((channel & BlueChannel) != 0) image->colormap[i].blue=(Quantum) ClampToQuantum(LevelPixel(black_point, white_point,gamma,(MagickRealType) image->colormap[i].blue)); if ((channel & OpacityChannel) != 0) image->colormap[i].opacity=(Quantum) (QuantumRange-(Quantum) ClampToQuantum(LevelPixel(black_point,white_point,gamma, (MagickRealType) (QuantumRange-image->colormap[i].opacity)))); } /* Level image. */ status=MagickTrue; progress=0; exception=(&image->exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register IndexPacket *restrict indexes; register PixelPacket *restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { if ((channel & RedChannel) != 0) SetPixelRed(q,ClampToQuantum(LevelPixel(black_point,white_point,gamma, (MagickRealType) GetPixelRed(q)))); if ((channel & GreenChannel) != 0) SetPixelGreen(q,ClampToQuantum(LevelPixel(black_point,white_point,gamma, (MagickRealType) GetPixelGreen(q)))); if ((channel & BlueChannel) != 0) SetPixelBlue(q,ClampToQuantum(LevelPixel(black_point,white_point,gamma, (MagickRealType) GetPixelBlue(q)))); if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse)) SetPixelAlpha(q,ClampToQuantum(LevelPixel(black_point,white_point,gamma, (MagickRealType) GetPixelAlpha(q)))); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) SetPixelIndex(indexes+x,ClampToQuantum(LevelPixel(black_point, white_point,gamma,(MagickRealType) GetPixelIndex(indexes+x)))); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_LevelImageChannel) #endif proceed=SetImageProgress(image,LevelImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); (void) ClampImage(image); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % L e v e l i z e I m a g e C h a n n e l % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % LevelizeImageChannel() applies the reversed LevelImage() operation to just % the specific channels specified. It compresses the full range of color % values, so that they lie between the given black and white points. Gamma is % applied before the values are mapped. % % LevelizeImageChannel() can be called with by using a +level command line % API option, or using a '!' on a -level or LevelImage() geometry string. % % It can be used for example de-contrast a greyscale image to the exact % levels specified. Or by using specific levels for each channel of an image % you can convert a gray-scale image to any linear color gradient, according % to those levels. % % The format of the LevelizeImageChannel method is: % % MagickBooleanType LevelizeImageChannel(Image *image, % const ChannelType channel,const char *levels) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel. % % o black_point: The level to map zero (black) to. % % o white_point: The level to map QuantiumRange (white) to. % % o gamma: adjust gamma by this factor before mapping values. % */ MagickExport MagickBooleanType LevelizeImage(Image *image, const double black_point,const double white_point,const double gamma) { MagickBooleanType status; status=LevelizeImageChannel(image,DefaultChannels,black_point,white_point, gamma); return(status); } MagickExport MagickBooleanType LevelizeImageChannel(Image *image, const ChannelType channel,const double black_point,const double white_point, const double gamma) { #define LevelizeImageTag "Levelize/Image" #define LevelizeValue(x) ClampToQuantum(((MagickRealType) gamma_pow((double) \ (QuantumScale*(x)),gamma))*(white_point-black_point)+black_point) CacheView *image_view; ExceptionInfo *exception; MagickBooleanType status; MagickOffsetType progress; register ssize_t i; ssize_t y; /* Allocate and initialize levels map. */ assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->storage_class == PseudoClass) for (i=0; i < (ssize_t) image->colors; i++) { /* Level colormap. */ if ((channel & RedChannel) != 0) image->colormap[i].red=LevelizeValue(image->colormap[i].red); if ((channel & GreenChannel) != 0) image->colormap[i].green=LevelizeValue(image->colormap[i].green); if ((channel & BlueChannel) != 0) image->colormap[i].blue=LevelizeValue(image->colormap[i].blue); if ((channel & OpacityChannel) != 0) image->colormap[i].opacity=(Quantum) (QuantumRange-LevelizeValue( QuantumRange-image->colormap[i].opacity)); } /* Level image. */ status=MagickTrue; progress=0; exception=(&image->exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register IndexPacket *restrict indexes; register PixelPacket *restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { if ((channel & RedChannel) != 0) SetPixelRed(q,LevelizeValue(GetPixelRed(q))); if ((channel & GreenChannel) != 0) SetPixelGreen(q,LevelizeValue(GetPixelGreen(q))); if ((channel & BlueChannel) != 0) SetPixelBlue(q,LevelizeValue(GetPixelBlue(q))); if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse)) SetPixelAlpha(q,LevelizeValue(GetPixelAlpha(q))); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) SetPixelIndex(indexes+x,LevelizeValue(GetPixelIndex(indexes+x))); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_LevelizeImageChannel) #endif proceed=SetImageProgress(image,LevelizeImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % L e v e l I m a g e C o l o r s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % LevelImageColor() maps the given color to "black" and "white" values, % linearly spreading out the colors, and level values on a channel by channel % bases, as per LevelImage(). The given colors allows you to specify % different level ranges for each of the color channels separately. % % If the boolean 'invert' is set true the image values will modifyed in the % reverse direction. That is any existing "black" and "white" colors in the % image will become the color values given, with all other values compressed % appropriatally. This effectivally maps a greyscale gradient into the given % color gradient. % % The format of the LevelColorsImageChannel method is: % % MagickBooleanType LevelColorsImage(Image *image, % const MagickPixelPacket *black_color, % const MagickPixelPacket *white_color,const MagickBooleanType invert) % MagickBooleanType LevelColorsImageChannel(Image *image, % const ChannelType channel,const MagickPixelPacket *black_color, % const MagickPixelPacket *white_color,const MagickBooleanType invert) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel. % % o black_color: The color to map black to/from % % o white_point: The color to map white to/from % % o invert: if true map the colors (levelize), rather than from (level) % */ MagickExport MagickBooleanType LevelColorsImage(Image *image, const MagickPixelPacket *black_color,const MagickPixelPacket *white_color, const MagickBooleanType invert) { MagickBooleanType status; status=LevelColorsImageChannel(image,DefaultChannels,black_color,white_color, invert); return(status); } MagickExport MagickBooleanType LevelColorsImageChannel(Image *image, const ChannelType channel,const MagickPixelPacket *black_color, const MagickPixelPacket *white_color,const MagickBooleanType invert) { MagickStatusType status; /* Allocate and initialize levels map. */ assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if ((IsGrayColorspace(image->colorspace) != MagickFalse) && ((IsGrayColorspace(black_color->colorspace) == MagickFalse) || (IsGrayColorspace(white_color->colorspace) == MagickFalse))) (void) SetImageColorspace(image,sRGBColorspace); status=MagickTrue; if (invert == MagickFalse) { if ((channel & RedChannel) != 0) status&=LevelImageChannel(image,RedChannel,black_color->red, white_color->red,(double) 1.0); if ((channel & GreenChannel) != 0) status&=LevelImageChannel(image,GreenChannel,black_color->green, white_color->green,(double) 1.0); if ((channel & BlueChannel) != 0) status&=LevelImageChannel(image,BlueChannel,black_color->blue, white_color->blue,(double) 1.0); if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse)) status&=LevelImageChannel(image,OpacityChannel,black_color->opacity, white_color->opacity,(double) 1.0); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) status&=LevelImageChannel(image,IndexChannel,black_color->index, white_color->index,(double) 1.0); } else { if ((channel & RedChannel) != 0) status&=LevelizeImageChannel(image,RedChannel,black_color->red, white_color->red,(double) 1.0); if ((channel & GreenChannel) != 0) status&=LevelizeImageChannel(image,GreenChannel,black_color->green, white_color->green,(double) 1.0); if ((channel & BlueChannel) != 0) status&=LevelizeImageChannel(image,BlueChannel,black_color->blue, white_color->blue,(double) 1.0); if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse)) status&=LevelizeImageChannel(image,OpacityChannel,black_color->opacity, white_color->opacity,(double) 1.0); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) status&=LevelizeImageChannel(image,IndexChannel,black_color->index, white_color->index,(double) 1.0); } return(status == 0 ? MagickFalse : MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % L i n e a r S t r e t c h I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % LinearStretchImage() discards any pixels below the black point and above % the white point and levels the remaining pixels. % % The format of the LinearStretchImage method is: % % MagickBooleanType LinearStretchImage(Image *image, % const double black_point,const double white_point) % % A description of each parameter follows: % % o image: the image. % % o black_point: the black point. % % o white_point: the white point. % */ MagickExport MagickBooleanType LinearStretchImage(Image *image, const double black_point,const double white_point) { #define LinearStretchImageTag "LinearStretch/Image" ExceptionInfo *exception; MagickBooleanType status; MagickRealType *histogram, intensity; ssize_t black, white, y; /* Allocate histogram and linear map. */ assert(image != (Image *) NULL); assert(image->signature == MagickSignature); histogram=(MagickRealType *) AcquireQuantumMemory(MaxMap+1UL, sizeof(*histogram)); if (histogram == (MagickRealType *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); /* Form histogram. */ (void) ResetMagickMemory(histogram,0,(MaxMap+1)*sizeof(*histogram)); exception=(&image->exception); for (y=0; y < (ssize_t) image->rows; y++) { register const PixelPacket *restrict p; register ssize_t x; p=GetVirtualPixels(image,0,y,image->columns,1,exception); if (p == (const PixelPacket *) NULL) break; for (x=(ssize_t) image->columns-1; x >= 0; x--) { histogram[ScaleQuantumToMap(ClampToQuantum(GetPixelIntensity(image,p)))]++; p++; } } /* Find the histogram boundaries by locating the black and white point levels. */ intensity=0.0; for (black=0; black < (ssize_t) MaxMap; black++) { intensity+=histogram[black]; if (intensity >= black_point) break; } intensity=0.0; for (white=(ssize_t) MaxMap; white != 0; white--) { intensity+=histogram[white]; if (intensity >= white_point) break; } histogram=(MagickRealType *) RelinquishMagickMemory(histogram); status=LevelImageChannel(image,DefaultChannels,(double) black,(double) white, 1.0); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M o d u l a t e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ModulateImage() lets you control the brightness, saturation, and hue % of an image. Modulate represents the brightness, saturation, and hue % as one parameter (e.g. 90,150,100). If the image colorspace is HSL, the % modulation is lightness, saturation, and hue. For HWB, use blackness, % whiteness, and hue. And for HCL, use chrome, luma, and hue. % % The format of the ModulateImage method is: % % MagickBooleanType ModulateImage(Image *image,const char *modulate) % % A description of each parameter follows: % % o image: the image. % % o modulate: Define the percent change in brightness, saturation, and % hue. % */ static inline void ModulateHCL(const double percent_hue, const double percent_chroma,const double percent_luma,Quantum *red, Quantum *green,Quantum *blue) { double hue, luma, chroma; /* Increase or decrease color luma, chroma, or hue. */ ConvertRGBToHCL(*red,*green,*blue,&hue,&chroma,&luma); hue+=0.5*(0.01*percent_hue-1.0); while (hue < 0.0) hue+=1.0; while (hue > 1.0) hue-=1.0; chroma*=0.01*percent_chroma; luma*=0.01*percent_luma; ConvertHCLToRGB(hue,chroma,luma,red,green,blue); } static inline void ModulateHCLp(const double percent_hue, const double percent_chroma,const double percent_luma,Quantum *red, Quantum *green,Quantum *blue) { double hue, luma, chroma; /* Increase or decrease color luma, chroma, or hue. */ ConvertRGBToHCLp(*red,*green,*blue,&hue,&chroma,&luma); hue+=0.5*(0.01*percent_hue-1.0); while (hue < 0.0) hue+=1.0; while (hue > 1.0) hue-=1.0; chroma*=0.01*percent_chroma; luma*=0.01*percent_luma; ConvertHCLpToRGB(hue,chroma,luma,red,green,blue); } static inline void ModulateHSB(const double percent_hue, const double percent_saturation,const double percent_brightness, Quantum *red,Quantum *green,Quantum *blue) { double brightness, hue, saturation; /* Increase or decrease color brightness, saturation, or hue. */ ConvertRGBToHSB(*red,*green,*blue,&hue,&saturation,&brightness); hue+=0.5*(0.01*percent_hue-1.0); while (hue < 0.0) hue+=1.0; while (hue > 1.0) hue-=1.0; saturation*=0.01*percent_saturation; brightness*=0.01*percent_brightness; ConvertHSBToRGB(hue,saturation,brightness,red,green,blue); } static inline void ModulateHSI(const double percent_hue, const double percent_saturation,const double percent_intensity, Quantum *red,Quantum *green,Quantum *blue) { double intensity, hue, saturation; /* Increase or decrease color intensity, saturation, or hue. */ ConvertRGBToHSI(*red,*green,*blue,&hue,&saturation,&intensity); hue+=0.5*(0.01*percent_hue-1.0); while (hue < 0.0) hue+=1.0; while (hue > 1.0) hue-=1.0; saturation*=0.01*percent_saturation; intensity*=0.01*percent_intensity; ConvertHSIToRGB(hue,saturation,intensity,red,green,blue); } static inline void ModulateHSL(const double percent_hue, const double percent_saturation,const double percent_lightness, Quantum *red,Quantum *green,Quantum *blue) { double hue, lightness, saturation; /* Increase or decrease color lightness, saturation, or hue. */ ConvertRGBToHSL(*red,*green,*blue,&hue,&saturation,&lightness); hue+=0.5*(0.01*percent_hue-1.0); while (hue < 0.0) hue+=1.0; while (hue >= 1.0) hue-=1.0; saturation*=0.01*percent_saturation; lightness*=0.01*percent_lightness; ConvertHSLToRGB(hue,saturation,lightness,red,green,blue); } static inline void ModulateHSV(const double percent_hue, const double percent_saturation,const double percent_value,Quantum *red, Quantum *green,Quantum *blue) { double hue, saturation, value; /* Increase or decrease color value, saturation, or hue. */ ConvertRGBToHSV(*red,*green,*blue,&hue,&saturation,&value); hue+=0.5*(0.01*percent_hue-1.0); while (hue < 0.0) hue+=1.0; while (hue >= 1.0) hue-=1.0; saturation*=0.01*percent_saturation; value*=0.01*percent_value; ConvertHSVToRGB(hue,saturation,value,red,green,blue); } static inline void ModulateHWB(const double percent_hue, const double percent_whiteness,const double percent_blackness,Quantum *red, Quantum *green,Quantum *blue) { double blackness, hue, whiteness; /* Increase or decrease color blackness, whiteness, or hue. */ ConvertRGBToHWB(*red,*green,*blue,&hue,&whiteness,&blackness); hue+=0.5*(0.01*percent_hue-1.0); while (hue < 0.0) hue+=1.0; while (hue >= 1.0) hue-=1.0; blackness*=0.01*percent_blackness; whiteness*=0.01*percent_whiteness; ConvertHWBToRGB(hue,whiteness,blackness,red,green,blue); } static inline void ModulateLCHab(const double percent_luma, const double percent_chroma,const double percent_hue,Quantum *red, Quantum *green,Quantum *blue) { double hue, luma, chroma; /* Increase or decrease color luma, chroma, or hue. */ ConvertRGBToLCHab(*red,*green,*blue,&luma,&chroma,&hue); luma*=0.01*percent_luma; chroma*=0.01*percent_chroma; hue+=0.5*(0.01*percent_hue-1.0); while (hue < 0.0) hue+=1.0; while (hue >= 1.0) hue-=1.0; ConvertLCHabToRGB(luma,chroma,hue,red,green,blue); } static inline void ModulateLCHuv(const double percent_luma, const double percent_chroma,const double percent_hue,Quantum *red, Quantum *green,Quantum *blue) { double hue, luma, chroma; /* Increase or decrease color luma, chroma, or hue. */ ConvertRGBToLCHuv(*red,*green,*blue,&luma,&chroma,&hue); luma*=0.01*percent_luma; chroma*=0.01*percent_chroma; hue+=0.5*(0.01*percent_hue-1.0); while (hue < 0.0) hue+=1.0; while (hue >= 1.0) hue-=1.0; ConvertLCHuvToRGB(luma,chroma,hue,red,green,blue); } MagickExport MagickBooleanType ModulateImage(Image *image,const char *modulate) { #define ModulateImageTag "Modulate/Image" CacheView *image_view; ColorspaceType colorspace; const char *artifact; double percent_brightness, percent_hue, percent_saturation; ExceptionInfo *exception; GeometryInfo geometry_info; MagickBooleanType status; MagickOffsetType progress; MagickStatusType flags; register ssize_t i; ssize_t y; /* Initialize modulate table. */ assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (modulate == (char *) NULL) return(MagickFalse); if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse) (void) SetImageColorspace(image,sRGBColorspace); flags=ParseGeometry(modulate,&geometry_info); percent_brightness=geometry_info.rho; percent_saturation=geometry_info.sigma; if ((flags & SigmaValue) == 0) percent_saturation=100.0; percent_hue=geometry_info.xi; if ((flags & XiValue) == 0) percent_hue=100.0; colorspace=UndefinedColorspace; artifact=GetImageArtifact(image,"modulate:colorspace"); if (artifact != (const char *) NULL) colorspace=(ColorspaceType) ParseCommandOption(MagickColorspaceOptions, MagickFalse,artifact); if (image->storage_class == PseudoClass) for (i=0; i < (ssize_t) image->colors; i++) { Quantum blue, green, red; /* Modulate image colormap. */ red=image->colormap[i].red; green=image->colormap[i].green; blue=image->colormap[i].blue; switch (colorspace) { case HCLColorspace: { ModulateHCL(percent_hue,percent_saturation,percent_brightness, &red,&green,&blue); break; } case HCLpColorspace: { ModulateHCLp(percent_hue,percent_saturation,percent_brightness, &red,&green,&blue); break; } case HSBColorspace: { ModulateHSB(percent_hue,percent_saturation,percent_brightness, &red,&green,&blue); break; } case HSIColorspace: { ModulateHSI(percent_hue,percent_saturation,percent_brightness, &red,&green,&blue); break; } case HSLColorspace: default: { ModulateHSL(percent_hue,percent_saturation,percent_brightness, &red,&green,&blue); break; } case HSVColorspace: { ModulateHSV(percent_hue,percent_saturation,percent_brightness, &red,&green,&blue); break; } case HWBColorspace: { ModulateHWB(percent_hue,percent_saturation,percent_brightness, &red,&green,&blue); break; } case LCHabColorspace: case LCHColorspace: { ModulateLCHab(percent_brightness,percent_saturation,percent_hue, &red,&green,&blue); break; } case LCHuvColorspace: { ModulateLCHuv(percent_brightness,percent_saturation,percent_hue, &red,&green,&blue); break; } } image->colormap[i].red=red; image->colormap[i].green=green; image->colormap[i].blue=blue; } /* Modulate image. */ /* call opencl version */ status = AccelerateModulateImage(image, percent_brightness, percent_hue, percent_saturation, colorspace, &image->exception); if (status != MagickFalse) return status; status=MagickTrue; progress=0; exception=(&image->exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register PixelPacket *restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { Quantum blue, green, red; red=GetPixelRed(q); green=GetPixelGreen(q); blue=GetPixelBlue(q); switch (colorspace) { case HCLColorspace: { ModulateHCL(percent_hue,percent_saturation,percent_brightness, &red,&green,&blue); break; } case HCLpColorspace: { ModulateHCLp(percent_hue,percent_saturation,percent_brightness, &red,&green,&blue); break; } case HSBColorspace: { ModulateHSB(percent_hue,percent_saturation,percent_brightness, &red,&green,&blue); break; } case HSLColorspace: default: { ModulateHSL(percent_hue,percent_saturation,percent_brightness, &red,&green,&blue); break; } case HSVColorspace: { ModulateHSV(percent_hue,percent_saturation,percent_brightness, &red,&green,&blue); break; } case HWBColorspace: { ModulateHWB(percent_hue,percent_saturation,percent_brightness, &red,&green,&blue); break; } case LCHabColorspace: { ModulateLCHab(percent_brightness,percent_saturation,percent_hue, &red,&green,&blue); break; } case LCHColorspace: case LCHuvColorspace: { ModulateLCHuv(percent_brightness,percent_saturation,percent_hue, &red,&green,&blue); break; } } SetPixelRed(q,red); SetPixelGreen(q,green); SetPixelBlue(q,blue); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_ModulateImage) #endif proceed=SetImageProgress(image,ModulateImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % N e g a t e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % NegateImage() negates the colors in the reference image. The grayscale % option means that only grayscale values within the image are negated. % % The format of the NegateImageChannel method is: % % MagickBooleanType NegateImage(Image *image, % const MagickBooleanType grayscale) % MagickBooleanType NegateImageChannel(Image *image, % const ChannelType channel,const MagickBooleanType grayscale) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel. % % o grayscale: If MagickTrue, only negate grayscale pixels within the image. % */ MagickExport MagickBooleanType NegateImage(Image *image, const MagickBooleanType grayscale) { MagickBooleanType status; status=NegateImageChannel(image,DefaultChannels,grayscale); return(status); } MagickExport MagickBooleanType NegateImageChannel(Image *image, const ChannelType channel,const MagickBooleanType grayscale) { #define NegateImageTag "Negate/Image" CacheView *image_view; ExceptionInfo *exception; MagickBooleanType status; MagickOffsetType progress; register ssize_t i; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->storage_class == PseudoClass) { /* Negate colormap. */ for (i=0; i < (ssize_t) image->colors; i++) { if (grayscale != MagickFalse) if ((image->colormap[i].red != image->colormap[i].green) || (image->colormap[i].green != image->colormap[i].blue)) continue; if ((channel & RedChannel) != 0) image->colormap[i].red=QuantumRange-image->colormap[i].red; if ((channel & GreenChannel) != 0) image->colormap[i].green=QuantumRange-image->colormap[i].green; if ((channel & BlueChannel) != 0) image->colormap[i].blue=QuantumRange-image->colormap[i].blue; } } /* Negate image. */ status=MagickTrue; progress=0; exception=(&image->exception); image_view=AcquireAuthenticCacheView(image,exception); if (grayscale != MagickFalse) { #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register IndexPacket *restrict indexes; register PixelPacket *restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { if ((GetPixelRed(q) != GetPixelGreen(q)) || (GetPixelGreen(q) != GetPixelBlue(q))) { q++; continue; } if ((channel & RedChannel) != 0) SetPixelRed(q,QuantumRange-GetPixelRed(q)); if ((channel & GreenChannel) != 0) SetPixelGreen(q,QuantumRange-GetPixelGreen(q)); if ((channel & BlueChannel) != 0) SetPixelBlue(q,QuantumRange-GetPixelBlue(q)); if ((channel & OpacityChannel) != 0) SetPixelOpacity(q,QuantumRange-GetPixelOpacity(q)); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) SetPixelIndex(indexes+x,QuantumRange-GetPixelIndex(indexes+x)); q++; } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_NegateImageChannel) #endif proceed=SetImageProgress(image,NegateImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(MagickTrue); } /* Negate image. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register IndexPacket *restrict indexes; register PixelPacket *restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); if (channel == DefaultChannels) for (x=0; x < (ssize_t) image->columns; x++) { SetPixelRed(q+x,QuantumRange-GetPixelRed(q+x)); SetPixelGreen(q+x,QuantumRange-GetPixelGreen(q+x)); SetPixelBlue(q+x,QuantumRange-GetPixelBlue(q+x)); } else for (x=0; x < (ssize_t) image->columns; x++) { if ((channel & RedChannel) != 0) SetPixelRed(q+x,QuantumRange-GetPixelRed(q+x)); if ((channel & GreenChannel) != 0) SetPixelGreen(q+x,QuantumRange-GetPixelGreen(q+x)); if ((channel & BlueChannel) != 0) SetPixelBlue(q+x,QuantumRange-GetPixelBlue(q+x)); if ((channel & OpacityChannel) != 0) SetPixelOpacity(q+x,QuantumRange-GetPixelOpacity(q+x)); } if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) for (x=0; x < (ssize_t) image->columns; x++) SetPixelIndex(indexes+x,QuantumRange-GetPixelIndex(indexes+x)); if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_NegateImageChannel) #endif proceed=SetImageProgress(image,NegateImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % N o r m a l i z e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % The NormalizeImage() method enhances the contrast of a color image by % mapping the darkest 2 percent of all pixel to black and the brightest % 1 percent to white. % % The format of the NormalizeImage method is: % % MagickBooleanType NormalizeImage(Image *image) % MagickBooleanType NormalizeImageChannel(Image *image, % const ChannelType channel) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel. % */ MagickExport MagickBooleanType NormalizeImage(Image *image) { MagickBooleanType status; status=NormalizeImageChannel(image,DefaultChannels); return(status); } MagickExport MagickBooleanType NormalizeImageChannel(Image *image, const ChannelType channel) { double black_point, white_point; black_point=(double) image->columns*image->rows*0.0015; white_point=(double) image->columns*image->rows*0.9995; return(ContrastStretchImageChannel(image,channel,black_point,white_point)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S i g m o i d a l C o n t r a s t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SigmoidalContrastImage() adjusts the contrast of an image with a non-linear % sigmoidal contrast algorithm. Increase the contrast of the image using a % sigmoidal transfer function without saturating highlights or shadows. % Contrast indicates how much to increase the contrast (0 is none; 3 is % typical; 20 is pushing it); mid-point indicates where midtones fall in the % resultant image (0 is white; 50% is middle-gray; 100% is black). Set % sharpen to MagickTrue to increase the image contrast otherwise the contrast % is reduced. % % The format of the SigmoidalContrastImage method is: % % MagickBooleanType SigmoidalContrastImage(Image *image, % const MagickBooleanType sharpen,const char *levels) % MagickBooleanType SigmoidalContrastImageChannel(Image *image, % const ChannelType channel,const MagickBooleanType sharpen, % const double contrast,const double midpoint) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel. % % o sharpen: Increase or decrease image contrast. % % o contrast: strength of the contrast, the larger the number the more % 'threshold-like' it becomes. % % o midpoint: midpoint of the function as a color value 0 to QuantumRange. % */ /* ImageMagick 7 has a version of this function which does not use LUTs. */ /* Sigmoidal function Sigmoidal with inflexion point moved to b and "slope constant" set to a. The first version, based on the hyperbolic tangent tanh, when combined with the scaling step, is an exact arithmetic clone of the the sigmoid function based on the logistic curve. The equivalence is based on the identity 1/(1+exp(-t)) = (1+tanh(t/2))/2 (http://de.wikipedia.org/wiki/Sigmoidfunktion) and the fact that the scaled sigmoidal derivation is invariant under affine transformations of the ordinate. The tanh version is almost certainly more accurate and cheaper. The 0.5 factor in the argument is to clone the legacy ImageMagick behavior. The reason for making the define depend on atanh even though it only uses tanh has to do with the construction of the inverse of the scaled sigmoidal. */ #if defined(MAGICKCORE_HAVE_ATANH) #define Sigmoidal(a,b,x) ( tanh((0.5*(a))*((x)-(b))) ) #else #define Sigmoidal(a,b,x) ( 1.0/(1.0+exp((a)*((b)-(x)))) ) #endif /* Scaled sigmoidal function: ( Sigmoidal(a,b,x) - Sigmoidal(a,b,0) ) / ( Sigmoidal(a,b,1) - Sigmoidal(a,b,0) ) See http://osdir.com/ml/video.image-magick.devel/2005-04/msg00006.html and http://www.cs.dartmouth.edu/farid/downloads/tutorials/fip.pdf. The limit of ScaledSigmoidal as a->0 is the identity, but a=0 gives a division by zero. This is fixed below by exiting immediately when contrast is small, leaving the image (or colormap) unmodified. This appears to be safe because the series expansion of the logistic sigmoidal function around x=b is 1/2-a*(b-x)/4+... so that the key denominator s(1)-s(0) is about a/4 (a/2 with tanh). */ #define ScaledSigmoidal(a,b,x) ( \ (Sigmoidal((a),(b),(x))-Sigmoidal((a),(b),0.0)) / \ (Sigmoidal((a),(b),1.0)-Sigmoidal((a),(b),0.0)) ) /* Inverse of ScaledSigmoidal, used for +sigmoidal-contrast. Because b may be 0 or 1, the argument of the hyperbolic tangent (resp. logistic sigmoidal) may be outside of the interval (-1,1) (resp. (0,1)), even when creating a LUT from in gamut values, hence the branching. In addition, HDRI may have out of gamut values. InverseScaledSigmoidal is not a two-sided inverse of ScaledSigmoidal: It is only a right inverse. This is unavoidable. */ static inline double InverseScaledSigmoidal(const double a,const double b, const double x) { const double sig0=Sigmoidal(a,b,0.0); const double sig1=Sigmoidal(a,b,1.0); const double argument=(sig1-sig0)*x+sig0; const double clamped= ( #if defined(MAGICKCORE_HAVE_ATANH) argument < -1+MagickEpsilon ? -1+MagickEpsilon : ( argument > 1-MagickEpsilon ? 1-MagickEpsilon : argument ) ); return(b+(2.0/a)*atanh(clamped)); #else argument < MagickEpsilon ? MagickEpsilon : ( argument > 1-MagickEpsilon ? 1-MagickEpsilon : argument ) ); return(b-log(1.0/clamped-1.0)/a); #endif } MagickExport MagickBooleanType SigmoidalContrastImage(Image *image, const MagickBooleanType sharpen,const char *levels) { GeometryInfo geometry_info; MagickBooleanType status; MagickStatusType flags; flags=ParseGeometry(levels,&geometry_info); if ((flags & SigmaValue) == 0) geometry_info.sigma=1.0*QuantumRange/2.0; if ((flags & PercentValue) != 0) geometry_info.sigma=1.0*QuantumRange*geometry_info.sigma/100.0; status=SigmoidalContrastImageChannel(image,DefaultChannels,sharpen, geometry_info.rho,geometry_info.sigma); return(status); } MagickExport MagickBooleanType SigmoidalContrastImageChannel(Image *image, const ChannelType channel,const MagickBooleanType sharpen, const double contrast,const double midpoint) { #define SigmoidalContrastImageTag "SigmoidalContrast/Image" CacheView *image_view; ExceptionInfo *exception; MagickBooleanType status; MagickOffsetType progress; MagickRealType *sigmoidal_map; register ssize_t i; ssize_t y; /* Side effect: clamps values unless contrast<MagickEpsilon, in which case nothing is done. */ if (contrast < MagickEpsilon) return(MagickTrue); /* Allocate and initialize sigmoidal maps. */ assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); sigmoidal_map=(MagickRealType *) AcquireQuantumMemory(MaxMap+1UL, sizeof(*sigmoidal_map)); if (sigmoidal_map == (MagickRealType *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); (void) ResetMagickMemory(sigmoidal_map,0,(MaxMap+1)*sizeof(*sigmoidal_map)); if (sharpen != MagickFalse) for (i=0; i <= (ssize_t) MaxMap; i++) sigmoidal_map[i]=(MagickRealType) ScaleMapToQuantum((MagickRealType) (MaxMap*ScaledSigmoidal(contrast,QuantumScale*midpoint,(double) i/ MaxMap))); else for (i=0; i <= (ssize_t) MaxMap; i++) sigmoidal_map[i]=(MagickRealType) ScaleMapToQuantum((MagickRealType) ( MaxMap*InverseScaledSigmoidal(contrast,QuantumScale*midpoint,(double) i/ MaxMap))); /* Sigmoidal-contrast enhance colormap. */ if (image->storage_class == PseudoClass) for (i=0; i < (ssize_t) image->colors; i++) { if ((channel & RedChannel) != 0) image->colormap[i].red=ClampToQuantum(sigmoidal_map[ ScaleQuantumToMap(image->colormap[i].red)]); if ((channel & GreenChannel) != 0) image->colormap[i].green=ClampToQuantum(sigmoidal_map[ ScaleQuantumToMap(image->colormap[i].green)]); if ((channel & BlueChannel) != 0) image->colormap[i].blue=ClampToQuantum(sigmoidal_map[ ScaleQuantumToMap(image->colormap[i].blue)]); if ((channel & OpacityChannel) != 0) image->colormap[i].opacity=ClampToQuantum(sigmoidal_map[ ScaleQuantumToMap(image->colormap[i].opacity)]); } /* Sigmoidal-contrast enhance image. */ status=MagickTrue; progress=0; exception=(&image->exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register IndexPacket *restrict indexes; register PixelPacket *restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { if ((channel & RedChannel) != 0) SetPixelRed(q,ClampToQuantum(sigmoidal_map[ScaleQuantumToMap( GetPixelRed(q))])); if ((channel & GreenChannel) != 0) SetPixelGreen(q,ClampToQuantum(sigmoidal_map[ScaleQuantumToMap( GetPixelGreen(q))])); if ((channel & BlueChannel) != 0) SetPixelBlue(q,ClampToQuantum(sigmoidal_map[ScaleQuantumToMap( GetPixelBlue(q))])); if ((channel & OpacityChannel) != 0) SetPixelOpacity(q,ClampToQuantum(sigmoidal_map[ScaleQuantumToMap( GetPixelOpacity(q))])); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) SetPixelIndex(indexes+x,ClampToQuantum(sigmoidal_map[ScaleQuantumToMap( GetPixelIndex(indexes+x))])); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_SigmoidalContrastImageChannel) #endif proceed=SetImageProgress(image,SigmoidalContrastImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); sigmoidal_map=(MagickRealType *) RelinquishMagickMemory(sigmoidal_map); return(status); }
c_md.c
/* *********************************************************************** This program is part of the OpenMP Source Code Repository http://www.pcg.ull.es/ompscr/ e-mail: ompscr@etsii.ull.es This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License (LICENSE file) along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA FILE: c_md.c VERSION: 1.0 DATE: May 2004 AUTHOR: Bill Magro, Kuck and Associates, Inc. (KAI), 1998 COMMENTS TO: sande@csi.ull.es DESCRIPTION: This program implements a simple molecular dynamics simulation, using the velocity Verlet time integration scheme. The particles interact with a central pair potential. COMMENTS: REFERENCES: W. C. Swope and H. C. Andersen and P. H. Berens and K. R. Wilson A Computer Simulation Method for the Calculation of Equilibrium Constants for the Formation of Physical Clusters of Molecules: Application to Small Water Clusters Journal of Chemical Physics, 1982 vol. 76 pg 637-649 BASIC PRAGMAS: parallel for USAGE: ./c_md.par 8192 10 INPUT: Number of particles Number of simulation steps OUTPUT: - FILE FORMATS: - RESTRICTIONS: - REVISION HISTORY: **************************************************************************/ #include "OmpSCR.h" #include <math.h> #ifndef RAND_MAX #define RAND_MAX 0x7fff #endif #ifndef M_PI_2 #define M_PI_2 1.57079632679489661923 /* pi/2 */ #endif #define NUM_ARGS 2 #define NUM_TIMERS 1 #define DEFAULT_NPARTS 8192 #define DEFAULT_NSTEPS 10 #define USAGE_STR "NPARTS NSTEPS" #define NDIM 3 int NPARTS; /* No. of particles */ int NSTEPS; /* No. of simulation steps */ typedef double vnd_t[NDIM]; /* ----------------------------------------------------------------------- PROTOTYPES * ----------------------------------------------------------------------- */ double v(double x); double dv(double x); void initialize(int np, int nd, vnd_t box, vnd_t *pos, vnd_t *vel, vnd_t *acc); double dist(int nd, vnd_t r1, vnd_t r2, vnd_t dr); double dot_prod(int n, vnd_t x,vnd_t y); void compute(int np, int nd, vnd_t *pos, vnd_t *vel, double mass, vnd_t *f, double *pot_p, double *kin_p); void update(int np, int nd, vnd_t *pos, vnd_t *vel, vnd_t *f, vnd_t *a, double mass, double dt); int main (int argc, char **argv); /* ----------------------------------------------------------------------- IMPLEMENTATION * ----------------------------------------------------------------------- */ /* ----------------------------------------------------------------------- statement function for the pair potential. This potential is a harmonic well which smoothly saturates to a maximum value at PI/2. * ----------------------------------------------------------------------- */ double v(double x) { if (x < M_PI_2) return pow(sin(x), 2.0); else return 1.0; } /* ----------------------------------------------------------------------- statement function for the derivative of the pair potential * ----------------------------------------------------------------------- */ double dv(double x) { if (x < M_PI_2) return 2.0 * sin(x) * cos(x); else return 0.0; } /* ----------------------------------------------------------------------- Initialize the positions, velocities, and accelerations. * ----------------------------------------------------------------------- */ void initialize(int np, int nd, vnd_t box, vnd_t *pos, vnd_t *vel, vnd_t *acc) { int i, j; double x; srand(4711L); for (i = 0; i < np; i++) { for (j = 0; j < nd; j++) { x = rand() % 10000 / (double)10000.0; pos[i][j] = box[j] * x; vel[i][j] = 0.0; acc[i][j] = 0.0; } } } /* ----------------------------------------------------------------------- Compute the displacement vector (and its norm) between two particles. * ----------------------------------------------------------------------- */ double dist(int nd, vnd_t r1, vnd_t r2, vnd_t dr) { int i; double d; d = 0.0; for (i = 0; i < nd; i++) { dr[i] = r1[i] - r2[i]; d += dr[i] * dr[i]; } return sqrt(d); } /* ----------------------------------------------------------------------- Return the dot product between two vectors of type double and length n * ----------------------------------------------------------------------- */ double dot_prod(int n, vnd_t x, vnd_t y) { int i; double t = 0.0; for (i = 0; i < n; i++) { t += x[i] * y[i]; } return t; } /* ----------------------------------------------------------------------- Compute the forces and energies, given positions, masses, and velocities * ----------------------------------------------------------------------- */ void compute(int np, int nd, vnd_t *pos, vnd_t *vel, double mass, vnd_t *f, double *pot_p, double *kin_p) { int i, j, k; vnd_t rij; double d; double pot, kin; pot = 0.0; kin = 0.0; /* The computation of forces and energies is fully parallel. */ #pragma omp parallel for default(shared) private(i, j, k, rij, d) reduction(+ : pot, kin) for (i = 0; i < np; i++) { /* compute potential energy and forces */ for (j = 0; j < nd; j++) f[i][j] = 0.0; for (j = 0; j < np; j++) { if (i != j) { d = dist(nd, pos[i], pos[j], rij); /* attribute half of the potential energy to particle 'j' */ pot = pot + 0.5 * v(d); for (k = 0; k < nd; k++) { f[i][k] = f[i][k] - rij[k]* dv(d) /d; } } } /* compute kinetic energy */ kin = kin + dot_prod(nd, vel[i], vel[j]); } kin = kin * 0.5 * mass; *pot_p = pot; *kin_p = kin; } /* ----------------------------------------------------------------------- Perform the time integration, using a velocity Verlet algorithm * ----------------------------------------------------------------------- */ void update(int np, int nd, vnd_t *pos, vnd_t *vel, vnd_t *f, vnd_t *a, double mass, double dt) { int i, j; double rmass; rmass = 1.0/mass; /* The time integration is fully parallel */ #pragma omp parallel for default(shared) private(i, j) firstprivate(rmass, dt) for (i = 0; i < np; i++) { for (j = 0; j < nd; j++) { pos[i][j] = pos[i][j] + vel[i][j]*dt + 0.5*dt*dt*a[i][j]; vel[i][j] = vel[i][j] + 0.5*dt*(f[i][j]*rmass + a[i][j]); a[i][j] = f[i][j]*rmass; } } } /* ----------------------------------------------------------------------- */ int main (int argc, char **argv) { /* simulation parameters */ double mass = 1.0; double dt = 1.0e-4; vnd_t box; vnd_t *position; vnd_t *velocity; vnd_t *force; vnd_t *accel; double potential, kinetic, E0; int i; int NUMTHREADS; double total_time; char *PARAM_NAMES[NUM_ARGS] = {"Nparts", "Nsteps"}; char *TIMERS_NAMES[NUM_TIMERS] = {"Total_time" }; char *DEFAULT_VALUES[NUM_ARGS] = {"8192", "10"}; NUMTHREADS = omp_get_max_threads(); OSCR_init (NUMTHREADS, "Molecular dynamic simulation", "Use md <Nparts> <Nsteps>", NUM_ARGS, PARAM_NAMES, DEFAULT_VALUES , NUM_TIMERS, NUM_TIMERS, TIMERS_NAMES, argc, argv); NPARTS = OSCR_getarg_int(1); NSTEPS = OSCR_getarg_int(2); /* Default: DEFAULT_NPARTS, DEFAULT_NSTEPS */ /* Memory allocation */ position = OSCR_calloc(NPARTS, sizeof(vnd_t)); velocity = OSCR_calloc(NPARTS, sizeof(vnd_t)); force = OSCR_calloc(NPARTS, sizeof(vnd_t)); accel = OSCR_calloc(NPARTS, sizeof(vnd_t)); NUMTHREADS = omp_get_max_threads(); for (i = 0; i < NDIM; i++) box[i] = 10.0; /* set initial positions, velocities, and accelerations */ initialize(NPARTS, NDIM, box, position, velocity, accel); OSCR_timer_start(0); /* compute the forces and energies */ compute(NPARTS, NDIM, position, velocity, mass, force, &potential, &kinetic); E0 = potential + kinetic; /* This is the main time stepping loop */ for (i = 0; i < NSTEPS; i++) { printf("step: %d\n", i); compute(NPARTS, NDIM, position, velocity, mass, force, &potential, &kinetic); #if 0 printf("%17.9e %17.9e %17.9e\n", potential, kinetic, (potential + kinetic - E0) / E0); #endif update(NPARTS, NDIM, position, velocity, force, accel, mass, dt); } OSCR_timer_stop(0); total_time = OSCR_timer_read(0); OSCR_report(1, TIMERS_NAMES); printf("\n \t# THREADS \tTIME (secs.) \n"); printf("\t %d \t\t%14.6lf\n", NUMTHREADS, total_time); return 0; } /* * vim:ts=2:sw=2: */
DRB092-threadprivatemissing2-orig-yes.c
/* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov, schordan1@llnl.gov, karlin1@llnl.gov) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https://github.com/LLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* A file-scope variable used within a function called by a parallel region. No threadprivate is used to avoid data races. This is the case for a variable referenced within a construct. Data race pairs sum0@68:7 vs. sum0@68:12 sum0@68:7 vs. sum0@68:7 */ #include "omprace.h" #include <omp.h> #include <stdio.h> #include <assert.h> int sum0=0, sum1=0; //#pragma omp threadprivate(sum0) int main() { omprace_init(); int i, sum=0; #pragma omp parallel { #pragma omp for for (i=1;i<=1000;i++) { sum0=sum0+i; } #pragma omp critical { sum= sum+sum0; } } /* reference calculation */ for (i=1;i<=1000;i++) { sum1=sum1+i; } printf("sum=%d; sum1=%d\n",sum,sum1); // assert(sum==sum1); omprace_fini(); return 0; }
1386.c
/* POLYBENCH/GPU-OPENMP * * This file is a part of the Polybench/GPU-OpenMP suite * * Contact: * William Killian <killian@udel.edu> * * Copyright 2013, The University of Delaware */ #include <stdio.h> #include <unistd.h> #include <string.h> #include <math.h> /* Include polybench common header. */ #include <polybench.h> /* Include benchmark-specific header. */ /* Default data type is double, default size is 4096x4096. */ #include "convolution-2d.h" /* Array initialization. */ static void init_array (int ni, int nj, DATA_TYPE POLYBENCH_2D(A,NI,NJ,ni,nj)) { // printf("Initializing Array\n"); int i, j; for (i = 0; i < ni; i++) for (j = 0; j < nj; j++) { A[i][j] = ((DATA_TYPE) (i + j) / nj); } } /* DCE code. Must scan the entire live-out data. Can be used also to check the correctness of the output. */ static void print_array(int ni, int nj, DATA_TYPE POLYBENCH_2D(B,NI,NJ,ni,nj)) { int i, j; for (i = 0; i < ni; i++) for (j = 0; j < nj; j++) { fprintf(stderr, DATA_PRINTF_MODIFIER, B[i][j]); if ((i * NJ + j) % 20 == 0) fprintf(stderr, "\n"); } fprintf(stderr, "\n"); } /* Main computational kernel. The whole function will be timed, including the call and return. */ static void kernel_conv2d(int ni, int nj, DATA_TYPE POLYBENCH_2D(A,NI,NJ,ni,nj), DATA_TYPE POLYBENCH_2D(B,NI,NJ,ni,nj)) { int i, j; #pragma scop #pragma omp target teams distribute parallel for schedule(dynamic) private(j) for (i = 1; i < _PB_NI - 1; ++i) { #pragma omp target teams distribute parallel for schedule(dynamic) for (j = 1; j < _PB_NJ - 1; ++j) { B[i][j] = 0.2 * A[i-1][j-1] + 0.5 * A[i-1][j] + -0.8 * A[i-1][j+1] + -0.3 * A[ i ][j-1] + 0.6 * A[ i ][j] + -0.9 * A[ i ][j+1] + 0.4 * A[i+1][j-1] + 0.7 * A[i+1][j] + 0.1 * A[i+1][j+1]; } } #pragma endscop // printf("Kernal computation complete !!\n"); } int main(int argc, char** argv) { /* Retrieve problem size. */ int ni = NI; int nj = NJ; /* Variable declaration/allocation. */ POLYBENCH_2D_ARRAY_DECL(A, DATA_TYPE, NI, NJ, ni, nj); POLYBENCH_2D_ARRAY_DECL(B, DATA_TYPE, NI, NJ, ni, nj); /* Initialize array(s). */ init_array (ni, nj, POLYBENCH_ARRAY(A)); /* Start timer. */ //polybench_start_instruments; polybench_timer_start(); /* Run kernel. */ kernel_conv2d (ni, nj, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(B)); /* Stop and print timer. */ polybench_timer_stop(); polybench_timer_print(); //polybench_stop_instruments; //polybench_print_instruments; /* Prevent dead-code elimination. All live-out data must be printed by the function call in argument. */ polybench_prevent_dce(print_array(ni, nj, POLYBENCH_ARRAY(B))); /* Be clean. */ POLYBENCH_FREE_ARRAY(A); POLYBENCH_FREE_ARRAY(B); return 0; }
GB_unaryop__abs_uint64_fp32.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__abs_uint64_fp32 // op(A') function: GB_tran__abs_uint64_fp32 // C type: uint64_t // A type: float // cast: uint64_t cij ; GB_CAST_UNSIGNED(cij,aij,64) // unaryop: cij = aij #define GB_ATYPE \ float #define GB_CTYPE \ uint64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ float aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CASTING(z, x) \ uint64_t z ; GB_CAST_UNSIGNED(z,x,64) ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ABS || GxB_NO_UINT64 || GxB_NO_FP32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__abs_uint64_fp32 ( uint64_t *restrict Cx, const float *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__abs_uint64_fp32 ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_binop__isge_uint8.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__isge_uint8 // A.*B function (eWiseMult): GB_AemultB__isge_uint8 // A*D function (colscale): GB_AxD__isge_uint8 // D*A function (rowscale): GB_DxB__isge_uint8 // C+=B function (dense accum): GB_Cdense_accumB__isge_uint8 // C+=b function (dense accum): GB_Cdense_accumb__isge_uint8 // C+=A+B function (dense ewise3): (none) // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__isge_uint8 // C=scalar+B GB_bind1st__isge_uint8 // C=scalar+B' GB_bind1st_tran__isge_uint8 // C=A+scalar GB_bind2nd__isge_uint8 // C=A'+scalar GB_bind2nd_tran__isge_uint8 // C type: uint8_t // A type: uint8_t // B,b type: uint8_t // BinaryOp: cij = (aij >= bij) #define GB_ATYPE \ uint8_t #define GB_BTYPE \ uint8_t #define GB_CTYPE \ uint8_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint8_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ uint8_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint8_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = (x >= y) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ISGE || GxB_NO_UINT8 || GxB_NO_ISGE_UINT8) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void (none) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__isge_uint8 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__isge_uint8 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__isge_uint8 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint8_t uint8_t bwork = (*((uint8_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_AxD__isge_uint8 ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *GB_RESTRICT Cx = (uint8_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_DxB__isge_uint8 ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *GB_RESTRICT Cx = (uint8_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ #undef GB_FREE_ALL #define GB_FREE_ALL \ { \ GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \ GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \ GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \ } GrB_Info GB_AaddB__isge_uint8 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_add_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__isge_uint8 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_emult_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__isge_uint8 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *GB_RESTRICT Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *Cx = (uint8_t *) Cx_output ; uint8_t x = (*((uint8_t *) x_input)) ; uint8_t *Bx = (uint8_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; uint8_t bij = Bx [p] ; Cx [p] = (x >= bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__isge_uint8 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *GB_RESTRICT Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint8_t *Cx = (uint8_t *) Cx_output ; uint8_t *Ax = (uint8_t *) Ax_input ; uint8_t y = (*((uint8_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint8_t aij = Ax [p] ; Cx [p] = (aij >= y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint8_t aij = Ax [pA] ; \ Cx [pC] = (x >= aij) ; \ } GrB_Info GB_bind1st_tran__isge_uint8 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint8_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t x = (*((const uint8_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint8_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint8_t aij = Ax [pA] ; \ Cx [pC] = (aij >= y) ; \ } GrB_Info GB_bind2nd_tran__isge_uint8 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t y = (*((const uint8_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
neutral.c
#include "neutral.h" #include "../../comms.h" #include "../../params.h" #include "../../shared.h" #include "../../shared_data.h" #include "../neutral_interface.h" #include <assert.h> #include <float.h> #include <math.h> #include <omp.h> #include <stdio.h> #include <stdlib.h> #ifdef MPI #include "mpi.h" #endif // Performs a solve of dependent variables for particle transport void solve_transport_2d( const int nx, const int ny, const int global_nx, const int global_ny, const uint64_t master_key, const int pad, const int x_off, const int y_off, const double dt, const int ntotal_particles, int* nparticles, const int* neighbours, Particle* particles, const double* density, const double* edgex, const double* edgey, const double* edgedx, const double* edgedy, CrossSection* cs_scatter_table, CrossSection* cs_absorb_table, double* energy_deposition_tally, uint64_t* reduce_array0, uint64_t* reduce_array1, uint64_t* reduce_array2, uint64_t* facet_events, uint64_t* collision_events) { if (!(*nparticles)) { printf("Out of particles\n"); return; } handle_particles(global_nx, global_ny, nx, ny, master_key, pad, x_off, y_off, 1, dt, neighbours, density, edgex, edgey, edgedx, edgedy, facet_events, collision_events, ntotal_particles, *nparticles, particles, cs_scatter_table, cs_absorb_table, energy_deposition_tally); } // Handles the current active batch of particles void handle_particles(const int global_nx, const int global_ny, const int nx, const int ny, const uint64_t master_key, const int pad, const int x_off, const int y_off, const int initial, const double dt, const int* neighbours, const double* density, const double* edgex, const double* edgey, const double* edgedx, const double* edgedy, uint64_t* facets, uint64_t* collisions, const int ntotal_particles, const int nparticles_to_process, Particle* particles_start, CrossSection* cs_scatter_table, CrossSection* cs_absorb_table, double* energy_deposition_tally) { int nthreads = 0; #pragma omp parallel { nthreads = omp_get_num_threads(); } uint64_t nfacets = 0; uint64_t ncollisions = 0; uint64_t nparticles = 0; const int np_per_thread = nparticles_to_process / nthreads; const int np_remainder = nparticles_to_process % nthreads; // The main particle loop #pragma omp parallel reduction(+ : nfacets, ncollisions, nparticles) { const int tid = omp_get_thread_num(); // Calculate the particles offset, accounting for some remainder const int rem = (tid < np_remainder); const int particles_off = tid * np_per_thread + min(tid, np_remainder); int result = PARTICLE_CONTINUE; for (int pp = 0; pp < np_per_thread + rem; ++pp) { // (1) particle can stream and reach census // (2) particle can collide and either // - the particle will be absorbed // - the particle will scatter (this means the energy changes) // (3) particle encounters boundary region, transports to another cell // Current particle const int pid = particles_off + pp; Particle* particle = &particles_start[pid]; const uint64_t pkey = pid; if (particle->dead) { continue; } nparticles++; int x_facet = 0; int absorb_cs_index = -1; int scatter_cs_index = -1; double cell_mfp = 0.0; // Determine the current cell int cellx = particle->cellx - x_off + pad; int celly = particle->celly - y_off + pad; double local_density = density[celly * (nx + 2 * pad) + cellx]; // Fetch the cross sections and prepare related quantities double microscopic_cs_scatter = microscopic_cs_for_energy( cs_scatter_table, particle->energy, &scatter_cs_index); double microscopic_cs_absorb = microscopic_cs_for_energy( cs_absorb_table, particle->energy, &absorb_cs_index); double number_density = (local_density * AVOGADROS / MOLAR_MASS); double macroscopic_cs_scatter = number_density * microscopic_cs_scatter * BARNS; double macroscopic_cs_absorb = number_density * microscopic_cs_absorb * BARNS; double speed = sqrt((2.0 * particle->energy * eV_TO_J) / PARTICLE_MASS); double energy_deposition = 0.0; const double inv_ntotal_particles = 1.0 / (double)ntotal_particles; uint64_t counter = 0; double rn[NRANDOM_NUMBERS]; // Set time to census and MFPs until collision, unless travelled // particle if (initial) { particle->dt_to_census = dt; generate_random_numbers(pkey, master_key, counter++, &rn[0], &rn[1]); particle->mfp_to_collision = -log(rn[0]) / macroscopic_cs_scatter; } // Loop until we have reached census while (particle->dt_to_census > 0.0) { cell_mfp = 1.0 / (macroscopic_cs_scatter + macroscopic_cs_absorb); // Work out the distance until the particle hits a facet double distance_to_facet = 0.0; calc_distance_to_facet(global_nx, particle->x, particle->y, pad, x_off, y_off, particle->omega_x, particle->omega_y, speed, particle->cellx, particle->celly, &distance_to_facet, &x_facet, edgex, edgey); const double distance_to_collision = particle->mfp_to_collision * cell_mfp; const double distance_to_census = speed * particle->dt_to_census; // Check if our next event is a collision if (distance_to_collision < distance_to_facet && distance_to_collision < distance_to_census) { // Track the total number of collisions ncollisions++; // Handles a collision event result = collision_event( global_nx, nx, x_off, y_off, pid, master_key, inv_ntotal_particles, distance_to_collision, local_density, cs_scatter_table, cs_absorb_table, particle, &counter, &energy_deposition, &number_density, &microscopic_cs_scatter, &microscopic_cs_absorb, &macroscopic_cs_scatter, &macroscopic_cs_absorb, energy_deposition_tally, &scatter_cs_index, &absorb_cs_index, rn, &speed); if (result != PARTICLE_CONTINUE) { break; } } // Check if we have reached facet else if (distance_to_facet < distance_to_census) { // Track the number of fact encounters nfacets++; result = facet_event( global_nx, global_ny, nx, ny, x_off, y_off, inv_ntotal_particles, distance_to_facet, speed, cell_mfp, x_facet, density, neighbours, particle, &energy_deposition, &number_density, &microscopic_cs_scatter, &microscopic_cs_absorb, &macroscopic_cs_scatter, &macroscopic_cs_absorb, energy_deposition_tally, &cellx, &celly, &local_density); if (result != PARTICLE_CONTINUE) { break; } } else { census_event(global_nx, nx, x_off, y_off, inv_ntotal_particles, distance_to_census, cell_mfp, particle, &energy_deposition, &number_density, &microscopic_cs_scatter, &microscopic_cs_absorb, energy_deposition_tally); break; } } } } // Store a total number of facets and collisions *facets += nfacets; *collisions += ncollisions; printf("Particles %llu\n", nparticles); } // Handles a collision event inline int collision_event( const int global_nx, const int nx, const int x_off, const int y_off, const uint64_t pkey, const uint64_t master_key, const double inv_ntotal_particles, const double distance_to_collision, const double local_density, const CrossSection* cs_scatter_table, const CrossSection* cs_absorb_table, Particle* particle, uint64_t* counter, double* energy_deposition, double* number_density, double* microscopic_cs_scatter, double* microscopic_cs_absorb, double* macroscopic_cs_scatter, double* macroscopic_cs_absorb, double* energy_deposition_tally, int* scatter_cs_index, int* absorb_cs_index, double rn[NRANDOM_NUMBERS], double* speed) { // Energy deposition stored locally for collision, not in tally mesh *energy_deposition += calculate_energy_deposition( global_nx, nx, x_off, y_off, particle, inv_ntotal_particles, distance_to_collision, *number_density, *microscopic_cs_absorb, *microscopic_cs_scatter + *microscopic_cs_absorb); // Moves the particle to the collision site particle->x += distance_to_collision * particle->omega_x; particle->y += distance_to_collision * particle->omega_y; const double p_absorb = *macroscopic_cs_absorb / (*macroscopic_cs_scatter + *macroscopic_cs_absorb); double rn1[NRANDOM_NUMBERS]; generate_random_numbers(pkey, master_key, (*counter)++, &rn1[0], &rn1[1]); if (rn1[0] < p_absorb) { /* Model particle absorption */ // Find the new particle weight after absorption, saving the energy change particle->weight *= (1.0 - p_absorb); if (particle->energy < MIN_ENERGY_OF_INTEREST) { // Energy is too low, so mark the particle for deletion particle->dead = 1; // Need to store tally information as finished with particle update_tallies(nx, x_off, y_off, particle, inv_ntotal_particles, *energy_deposition, energy_deposition_tally); *energy_deposition = 0.0; return PARTICLE_DEAD; } } else { /* Model elastic particle scattering */ // The following assumes that all particles reside within a two-dimensional // plane, which solves a different equation. Change so that we consider // the full set of directional cosines, allowing scattering between planes. // Choose a random scattering angle between -1 and 1 const double mu_cm = 1.0 - 2.0 * rn1[1]; // Calculate the new energy based on the relation to angle of incidence const double e_new = particle->energy * (MASS_NO * MASS_NO + 2.0 * MASS_NO * mu_cm + 1.0) / ((MASS_NO + 1.0) * (MASS_NO + 1.0)); // Convert the angle into the laboratory frame of reference double cos_theta = 0.5 * ((MASS_NO + 1.0) * sqrt(e_new / particle->energy) - (MASS_NO - 1.0) * sqrt(particle->energy / e_new)); // Alter the direction of the velocities const double sin_theta = sqrt(1.0 - cos_theta * cos_theta); const double omega_x_new = (particle->omega_x * cos_theta - particle->omega_y * sin_theta); const double omega_y_new = (particle->omega_x * sin_theta + particle->omega_y * cos_theta); particle->omega_x = omega_x_new; particle->omega_y = omega_y_new; particle->energy = e_new; } // Energy has changed so update the cross-sections *microscopic_cs_scatter = microscopic_cs_for_energy( cs_scatter_table, particle->energy, scatter_cs_index); *microscopic_cs_absorb = microscopic_cs_for_energy( cs_absorb_table, particle->energy, absorb_cs_index); *number_density = (local_density * AVOGADROS / MOLAR_MASS); *macroscopic_cs_scatter = *number_density * (*microscopic_cs_scatter) * BARNS; *macroscopic_cs_absorb = *number_density * (*microscopic_cs_absorb) * BARNS; // Re-sample number of mean free paths to collision generate_random_numbers(pkey, master_key, (*counter)++, &rn[0], &rn[1]); particle->mfp_to_collision = -log(rn[0]) / *macroscopic_cs_scatter; particle->dt_to_census -= distance_to_collision / *speed; *speed = sqrt((2.0 * particle->energy * eV_TO_J) / PARTICLE_MASS); return PARTICLE_CONTINUE; } // Handle facet event inline int facet_event(const int global_nx, const int global_ny, const int nx, const int ny, const int x_off, const int y_off, const double inv_ntotal_particles, const double distance_to_facet, const double speed, const double cell_mfp, const int x_facet, const double* density, const int* neighbours, Particle* particle, double* energy_deposition, double* number_density, double* microscopic_cs_scatter, double* microscopic_cs_absorb, double* macroscopic_cs_scatter, double* macroscopic_cs_absorb, double* energy_deposition_tally, int* cellx, int* celly, double* local_density) { // Update the mean free paths until collision particle->mfp_to_collision -= (distance_to_facet / cell_mfp); particle->dt_to_census -= (distance_to_facet / speed); *energy_deposition += calculate_energy_deposition( global_nx, nx, x_off, y_off, particle, inv_ntotal_particles, distance_to_facet, *number_density, *microscopic_cs_absorb, *microscopic_cs_scatter + *microscopic_cs_absorb); // Update tallies as we leave a cell update_tallies(nx, x_off, y_off, particle, inv_ntotal_particles, *energy_deposition, energy_deposition_tally); *energy_deposition = 0.0; // Move the particle to the facet particle->x += distance_to_facet * particle->omega_x; particle->y += distance_to_facet * particle->omega_y; if (x_facet) { if (particle->omega_x > 0.0) { // Reflect at the boundary if (particle->cellx >= (global_nx - 1)) { particle->omega_x = -(particle->omega_x); } else { // Moving to right cell particle->cellx++; } } else if (particle->omega_x < 0.0) { if (particle->cellx <= 0) { // Reflect at the boundary particle->omega_x = -(particle->omega_x); } else { // Moving to left cell particle->cellx--; } } } else { if (particle->omega_y > 0.0) { // Reflect at the boundary if (particle->celly >= (global_ny - 1)) { particle->omega_y = -(particle->omega_y); } else { // Moving to north cell particle->celly++; } } else if (particle->omega_y < 0.0) { // Reflect at the boundary if (particle->celly <= 0) { particle->omega_y = -(particle->omega_y); } else { // Moving to south cell particle->celly--; } } } // Update the data based on new cell *cellx = particle->cellx - x_off; *celly = particle->celly - y_off; *local_density = density[*celly * nx + *cellx]; *number_density = (*local_density * AVOGADROS / MOLAR_MASS); *macroscopic_cs_scatter = *number_density * *microscopic_cs_scatter * BARNS; *macroscopic_cs_absorb = *number_density * *microscopic_cs_absorb * BARNS; return PARTICLE_CONTINUE; } // Handles the census event inline void census_event(const int global_nx, const int nx, const int x_off, const int y_off, const double inv_ntotal_particles, const double distance_to_census, const double cell_mfp, Particle* particle, double* energy_deposition, double* number_density, double* microscopic_cs_scatter, double* microscopic_cs_absorb, double* energy_deposition_tally) { // We have not changed cell or energy level at this stage particle->x += distance_to_census * particle->omega_x; particle->y += distance_to_census * particle->omega_y; particle->mfp_to_collision -= (distance_to_census / cell_mfp); *energy_deposition += calculate_energy_deposition( global_nx, nx, x_off, y_off, particle, inv_ntotal_particles, distance_to_census, *number_density, *microscopic_cs_absorb, *microscopic_cs_scatter + *microscopic_cs_absorb); // Need to store tally information as finished with particle update_tallies(nx, x_off, y_off, particle, inv_ntotal_particles, *energy_deposition, energy_deposition_tally); particle->dt_to_census = 0.0; } // Tallies the energy deposition in the cell inline void update_tallies(const int nx, const int x_off, const int y_off, Particle* particle, const double inv_ntotal_particles, const double energy_deposition, double* energy_deposition_tally) { const int cellx = particle->cellx - x_off; const int celly = particle->celly - y_off; #pragma omp atomic update energy_deposition_tally[celly * nx + cellx] += energy_deposition * inv_ntotal_particles; } // Calculate the distance to the next facet inline void calc_distance_to_facet(const int global_nx, const double x, const double y, const int pad, const int x_off, const int y_off, const double omega_x, const double omega_y, const double speed, const int particle_cellx, const int particle_celly, double* distance_to_facet, int* x_facet, const double* edgex, const double* edgey) { // Check the master_key required to move the particle along a single axis // If the velocity is positive then the top or right boundary will be hit const int cellx = particle_cellx - x_off + pad; const int celly = particle_celly - y_off + pad; double u_x_inv = 1.0 / (omega_x * speed); double u_y_inv = 1.0 / (omega_y * speed); // The bound is open on the left and bottom so we have to correct for this // and required the movement to the facet to go slightly further than the edge // in the calculated values, using OPEN_BOUND_CORRECTION, which is the // smallest possible distance from the closed bound e.g. 1.0e-14. double dt_x = (omega_x >= 0.0) ? ((edgex[cellx + 1]) - x) * u_x_inv : ((edgex[cellx] - OPEN_BOUND_CORRECTION) - x) * u_x_inv; double dt_y = (omega_y >= 0.0) ? ((edgey[celly + 1]) - y) * u_y_inv : ((edgey[celly] - OPEN_BOUND_CORRECTION) - y) * u_y_inv; *x_facet = (dt_x < dt_y) ? 1 : 0; // Calculated the projection to be // a = vector on first edge to be hit // u = velocity vector double mag_u0 = speed; if (*x_facet) { // We are centered on the origin, so the y component is 0 after travelling // aint the x axis to the edge (ax, 0).(x, y) *distance_to_facet = (omega_x >= 0.0) ? ((edgex[cellx + 1]) - x) * mag_u0 * u_x_inv : ((edgex[cellx] - OPEN_BOUND_CORRECTION) - x) * mag_u0 * u_x_inv; } else { // We are centered on the origin, so the x component is 0 after travelling // along the y axis to the edge (0, ay).(x, y) *distance_to_facet = (omega_y >= 0.0) ? ((edgey[celly + 1]) - y) * mag_u0 * u_y_inv : ((edgey[celly] - OPEN_BOUND_CORRECTION) - y) * mag_u0 * u_y_inv; } } // Calculate the energy deposition in the cell inline double calculate_energy_deposition( const int global_nx, const int nx, const int x_off, const int y_off, Particle* particle, const double inv_ntotal_particles, const double path_length, const double number_density, const double microscopic_cs_absorb, const double microscopic_cs_total) { // Calculate the energy deposition based on the path length const double average_exit_energy_absorb = 0.0; const double absorption_heating = (microscopic_cs_absorb / microscopic_cs_total) * average_exit_energy_absorb; const double average_exit_energy_scatter = particle->energy * ((MASS_NO * MASS_NO + MASS_NO + 1) / ((MASS_NO + 1) * (MASS_NO + 1))); const double scattering_heating = (1.0 - (microscopic_cs_absorb / microscopic_cs_total)) * average_exit_energy_scatter; const double heating_response = (particle->energy - scattering_heating - absorption_heating); return particle->weight * path_length * (microscopic_cs_total * BARNS) * heating_response * number_density; } // Fetch the cross section for a particular energy value inline double microscopic_cs_for_energy(const CrossSection* cs, const double energy, int* cs_index) { double* keys = cs->keys; double* values = cs->values; // Use a simple binary search to find the energy group int ind = cs->nentries / 2; int width = ind / 2; while (energy < keys[ind] || energy >= keys[ind + 1]) { ind += (energy < keys[ind]) ? -width : width; width = max(1, width / 2); // To handle odd cases, allows one extra walk } // Return the value linearly interpolated return values[ind] + ((energy - keys[ind]) / (keys[ind + 1] - keys[ind])) * (values[ind + 1] - values[ind]); } // Validates the results of the simulation void validate(const int nx, const int ny, const char* params_filename, const int rank, double* energy_deposition_tally) { // Reduce the entire energy deposition tally locally double local_energy_tally = 0.0; for (int ii = 0; ii < nx * ny; ++ii) { local_energy_tally += energy_deposition_tally[ii]; } // Finalise the reduction globally double global_energy_tally = reduce_all_sum(local_energy_tally); if (rank != MASTER) { return; } printf("\nFinal global_energy_tally %.15e\n", global_energy_tally); int nresults = 0; char* keys = (char*)malloc(sizeof(char) * MAX_KEYS * (MAX_STR_LEN + 1)); double* values = (double*)malloc(sizeof(double) * MAX_KEYS); if (!get_key_value_parameter(params_filename, NEUTRAL_TESTS, keys, values, &nresults)) { printf("Warning. Test entry was not found, could NOT validate.\n"); return; } // Check the result is within tolerance printf("Expected %.12e, result was %.12e.\n", values[0], global_energy_tally); if (within_tolerance(values[0], global_energy_tally, VALIDATE_TOLERANCE)) { printf("PASSED validation.\n"); } else { printf("FAILED validation.\n"); } free(keys); free(values); } // Initialises a new particle ready for tracking size_t inject_particles(const int nparticles, const int global_nx, const int local_nx, const int local_ny, const int pad, const double local_particle_left_off, const double local_particle_bottom_off, const double local_particle_width, const double local_particle_height, const int x_off, const int y_off, const double dt, const double* edgex, const double* edgey, const double initial_energy, Particle** particles) { *particles = (Particle*)malloc(sizeof(Particle) * nparticles * 2); if (!*particles) { TERMINATE("Could not allocate particle array.\n"); } START_PROFILING(&compute_profile); #pragma omp parallel for for (int kk = 0; kk < nparticles; ++kk) { Particle* particle = &(*particles)[kk]; double rn[NRANDOM_NUMBERS]; generate_random_numbers(kk, 0, 0, &rn[0], &rn[1]); // Set the initial nandom location of the particle inside the source // region particle->x = local_particle_left_off + rn[0] * local_particle_width; particle->y = local_particle_bottom_off + rn[1] * local_particle_height; // Check the location of the specific cell that the particle sits within. // We have to check this explicitly because the mesh might be non-uniform. int cellx = 0; int celly = 0; for (int ii = 0; ii < local_nx; ++ii) { if (particle->x >= edgex[ii + pad] && particle->x < edgex[ii + pad + 1]) { cellx = x_off + ii; break; } } for (int ii = 0; ii < local_ny; ++ii) { if (particle->y >= edgey[ii + pad] && particle->y < edgey[ii + pad + 1]) { celly = y_off + ii; break; } } particle->cellx = cellx; particle->celly = celly; // Generating theta has uniform density, however 0.0 and 1.0 produce the // same // value which introduces very very very small bias... generate_random_numbers(kk, 0, 1, &rn[0], &rn[1]); const double theta = 2.0 * M_PI * rn[0]; particle->omega_x = cos(theta); particle->omega_y = sin(theta); // This approximation sets mono-energetic initial state for source // particles particle->energy = initial_energy; // Set a weight for the particle to track absorption particle->weight = 1.0; particle->dt_to_census = dt; particle->mfp_to_collision = 0.0; particle->dead = 0; } STOP_PROFILING(&compute_profile, "initialising particles"); return (sizeof(Particle) * nparticles * 2); } void generate_random_numbers(const uint64_t pkey, const uint64_t master_key, const uint64_t counter, double* rn0, double* rn1) { const int nrns = 2; threefry2x64_ctr_t ctr; threefry2x64_ctr_t key; ctr.v[0] = counter; ctr.v[1] = 0; key.v[0] = pkey; key.v[1] = master_key; // Generate the random numbers threefry2x64_ctr_t rand = threefry2x64(ctr, key); // Turn our random numbers from integrals to double precision uint64_t max_uint64 = UINT64_C(0xFFFFFFFFFFFFFFFF); const double factor = 1.0 / (max_uint64 + 1.0); const double half_factor = 0.5 * factor; *rn0 = rand.v[0] * factor + half_factor; *rn1 = rand.v[1] * factor + half_factor; }
ncwa.c
/* $Header$ */ /* ncwa -- netCDF weighted averager */ /* Purpose: Compute averages of specified hyperslabs of specfied variables in a single input netCDF file and output them to a single file. */ /* Copyright (C) 1995--present Charlie Zender This file is part of NCO, the netCDF Operators. NCO is free software. You may redistribute and/or modify NCO under the terms of the 3-Clause BSD License. You are permitted to link NCO with the HDF, netCDF, OPeNDAP, and UDUnits libraries and to distribute the resulting executables under the terms of the BSD, but in addition obeying the extra stipulations of the HDF, netCDF, OPeNDAP, and UDUnits licenses. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 3-Clause BSD License for more details. The original author of this software, Charlie Zender, seeks to improve it with your suggestions, contributions, bug-reports, and patches. Please contact the NCO project at http://nco.sf.net or write to Charlie Zender Department of Earth System Science University of California, Irvine Irvine, CA 92697-3100 */ /* fxm: 19981202 deactivated -n and -W switches and code left in place to rethink normalization switches */ /* Usage: ncwa -O -a lon ~/nco/data/in.nc ~/foo.nc ncwa -O -R -p /ZENDER/tmp -l ~/nco/data in.nc ~/foo.nc ncwa -O -C -a lat,lon,time -w gw -v PS -p /fs/cgd/csm/input/atm SEP1.T42.0596.nc ~/foo.nc;ncks -H foo.nc scp ~/nco/src/nco/ncwa.c esmf.ess.uci.edu:nco/src/nco */ #ifdef HAVE_CONFIG_H # include <config.h> /* Autotools tokens */ #endif /* !HAVE_CONFIG_H */ /* Standard C headers */ #include <math.h> /* sin cos cos sin 3.14159 */ #include <stdio.h> /* stderr, FILE, NULL, etc. */ #include <stdlib.h> /* atof, atoi, malloc, getopt */ #include <string.h> /* strcmp() */ #include <sys/stat.h> /* stat() */ #include <time.h> /* machine time */ #ifndef _MSC_VER # if !defined(HAVE_BISON_FLEX) # define HAVE_BISON_FLEX /* 21070906 pvn add this definition to automake, currently in CMake */ # endif /* HAVE_BISON_FLEX */ # include <unistd.h> /* POSIX stuff */ #endif /* _MSC_VER */ #ifndef HAVE_GETOPT_LONG # include "nco_getopt.h" #else /* HAVE_GETOPT_LONG */ # ifdef HAVE_GETOPT_H # include <getopt.h> # endif /* !HAVE_GETOPT_H */ #endif /* HAVE_GETOPT_LONG */ #ifdef I18N # include <langinfo.h> /* nl_langinfo() */ # include <libintl.h> /* Internationalization i18n */ # include <locale.h> /* Locale setlocale() */ # define _(sng) gettext (sng) # define gettext_noop(sng) (sng) # define N_(sng) gettext_noop(sng) #endif /* I18N */ /* Supply stub gettext() function in case i18n failed */ #ifndef _LIBINTL_H # define gettext(foo) foo #endif /* _LIBINTL_H */ /* 3rd party vendors */ #include <netcdf.h> /* netCDF definitions and C library */ #ifdef ENABLE_MPI # include <mpi.h> /* MPI definitions */ # include <netcdf_par.h> /* Parallel netCDF definitions */ # include "nco_mpi.h" /* MPI utilities */ #endif /* !ENABLE_MPI */ /* #define MAIN_PROGRAM_FILE MUST precede #include libnco.h */ #define MAIN_PROGRAM_FILE #ifdef HAVE_BISON_FLEX # include "ncap_utl.h" /* netCDF arithmetic processor-specific definitions (symbol table, ...) */ #endif /* !HAVE_BISON_FLEX */ #include "libnco.h" /* netCDF Operator (NCO) library */ #ifdef HAVE_BISON_FLEX /* Global variables (keep consistent with global variables declared in ncap.c) */ size_t ncap_ncl_dpt_crr=0UL; /* [nbr] Depth of current #include file (incremented in ncap.l) */ size_t *ncap_ln_nbr_crr; /* [cnt] Line number (incremented in ncap.l) */ char **ncap_fl_spt_glb; /* [fl] Script file */ #endif /* !HAVE_BISON_FLEX */ int main(int argc,char **argv) { char **dmn_avg_lst_in=NULL_CEWI; /* Option a */ char **fl_lst_abb=NULL; /* Option n */ char **fl_lst_in=NULL_CEWI; char **gaa_arg=NULL; /* [sng] Global attribute arguments */ char **var_lst_in=NULL_CEWI; char **grp_lst_in=NULL_CEWI; char *aux_arg[NC_MAX_DIMS]; char *cmd_ln; char *cnk_arg[NC_MAX_DIMS]; char *cnk_map_sng=NULL_CEWI; /* [sng] Chunking map */ char *cnk_plc_sng=NULL_CEWI; /* [sng] Chunking policy */ char *fl_in=NULL; char *fl_out=NULL; /* Option o */ char *fl_out_tmp=NULL_CEWI; char *fl_pth=NULL; /* Option p */ char *fl_pth_lcl=NULL; /* Option l */ char *lmt_arg[NC_MAX_DIMS]; char *msk_nm=NULL; char *msk_cnd_sng=NULL; /* Mask string to be "parsed" and values given to msk_nm, msk_val, op_typ_rlt */ char *nco_op_typ_sng; /* Operation type */ char *opt_crr=NULL; /* [sng] String representation of current long-option name */ char *optarg_lcl=NULL; /* [sng] Local copy of system optarg */ char *ppc_arg[NC_MAX_VARS]; /* [sng] PPC arguments */ char *sng_cnv_rcd=NULL_CEWI; /* [sng] strtol()/strtoul() return code */ char *wgt_nm=NULL; char trv_pth[]="/"; /* [sng] Root path of traversal tree */ const char * const CVS_Id="$Id$"; const char * const CVS_Revision="$Revision$"; const char * const opt_sht_lst="34567Aa:B:bCcD:d:Fg:G:hIL:l:M:m:nNOo:p:rRT:t:v:Ww:xy:-:"; cnk_sct cnk; /* [sct] Chunking structure */ #if defined(__cplusplus) || defined(PGI_CC) ddra_info_sct ddra_info; ddra_info.flg_ddra=False; #else /* !__cplusplus */ ddra_info_sct ddra_info={.MRV_flg=False,.flg_ddra=False,.lmn_nbr=0LL,.lmn_nbr_avg=0LL,.lmn_nbr_wgt=0LL,.nco_op_typ=nco_op_nil,.rnk_avg=0,.rnk_var=0,.rnk_wgt=0,.tmr_flg=nco_tmr_srt,.var_idx=0,.wgt_brd_flg=False,.wrd_sz=0}; #endif /* !__cplusplus */ dmn_sct **dim=NULL_CEWI; dmn_sct **dmn_out=NULL_CEWI; dmn_sct **dmn_avg=NULL_CEWI; double msk_val=1.0; /* Option M */ extern char *optarg; extern int optind; /* Using naked stdin/stdout/stderr in parallel region generates warning Copy appropriate filehandle to variable scoped shared in parallel clause */ FILE * const fp_stderr=stderr; /* [fl] stderr filehandle CEWI */ FILE * const fp_stdout=stdout; /* [fl] stdout filehandle CEWI */ int *in_id_arr; int aux_nbr=0; /* [nbr] Number of auxiliary coordinate hyperslabs specified */ int abb_arg_nbr=0; int cnk_map=nco_cnk_map_nil; /* [enm] Chunking map */ int cnk_nbr=0; /* [nbr] Number of chunk sizes */ int cnk_plc=nco_cnk_plc_nil; /* [enm] Chunking policy */ int dfl_lvl=NCO_DFL_LVL_UNDEFINED; /* [enm] Deflate level */ int dmn_avg_nbr=0; int fl_idx=int_CEWI; int fl_nbr=0; int fl_in_fmt; /* [enm] Input file format */ int fl_out_fmt=NCO_FORMAT_UNDEFINED; /* [enm] Output file format */ int fll_md_old; /* [enm] Old fill mode */ int gaa_nbr=0; /* [nbr] Number of global attributes to add */ int grp_lst_in_nbr=0; /* [nbr] Number of groups explicitly specified by user */ int idx=int_CEWI; int in_id; int lmt_nbr=0; /* Option d. NB: lmt_nbr gets incremented */ int log_lvl=0; /* [enm] netCDF library debugging verbosity [0..5] */ int md_open; /* [enm] Mode flag for nc_open() call */ int nbr_dmn_fl; int nbr_dmn_out=0; int nbr_dmn_xtr; int nbr_var_fix; /* nbr_var_fix gets incremented */ int nbr_var_fl; int nbr_var_prc; /* nbr_var_prc gets incremented */ int xtr_nbr=0; /* xtr_nbr won't otherwise be set for -c with no -v */ int nco_op_typ=nco_op_avg; /* Operation type */ int op_typ_rlt=0; /* Option o */ int opt; int out_id; int ppc_nbr=0; /* [nbr] Number of PPC arguments */ int rcd=NC_NOERR; /* [rcd] Return code */ int thr_idx; /* [idx] Index of current thread */ int thr_nbr=int_CEWI; /* [nbr] Thread number Option t */ int var_lst_in_nbr=0; md5_sct *md5=NULL; /* [sct] MD5 configuration */ cnv_sct *cnv; /* [sct] Convention structure */ nco_bool DO_CONFORM_MSK=False; /* Did nco_var_cnf_dmn() find truly conforming mask? */ nco_bool DO_CONFORM_WGT=False; /* Did nco_var_cnf_dmn() find truly conforming weight? */ nco_bool EXCLUDE_INPUT_LIST=False; /* Option c */ nco_bool EXTRACT_ALL_COORDINATES=False; /* Option c */ nco_bool EXTRACT_ASSOCIATED_COORDINATES=True; /* Option C */ nco_bool EXTRACT_CLL_MSR=True; /* [flg] Extract cell_measures variables */ nco_bool EXTRACT_FRM_TRM=True; /* [flg] Extract formula_terms variables */ nco_bool FL_RTR_RMT_LCN; nco_bool FL_LST_IN_FROM_STDIN=False; /* [flg] fl_lst_in comes from stdin */ nco_bool FORCE_APPEND=False; /* Option A */ nco_bool FORCE_OVERWRITE=False; /* Option O */ nco_bool FORTRAN_IDX_CNV=False; /* Option F */ nco_bool GRP_VAR_UNN=False; /* [flg] Select union of specified groups and variables */ nco_bool HISTORY_APPEND=True; /* Option h */ nco_bool HPSS_TRY=False; /* [flg] Search HPSS for unfound files */ nco_bool MSA_USR_RDR=False; /* [flg] Multi-Slab Algorithm returns hyperslabs in user-specified order */ nco_bool MULTIPLY_BY_TALLY=False; /* Not currently implemented */ nco_bool MUST_CONFORM=False; /* [flg] Must nco_var_cnf_dmn() find truly conforming variables? */ nco_bool NORMALIZE_BY_TALLY=True; /* Not currently implemented */ nco_bool NORMALIZE_BY_WEIGHT=True; /* Not currently implemented */ nco_bool NRM_BY_DNM=True; /* Option N Normalize by denominator */ nco_bool RAM_CREATE=False; /* [flg] Create file in RAM */ nco_bool RAM_OPEN=False; /* [flg] Open (netCDF3-only) file(s) in RAM */ nco_bool SHARE_CREATE=False; /* [flg] Create (netCDF3-only) file(s) with unbuffered I/O */ nco_bool SHARE_OPEN=False; /* [flg] Open (netCDF3-only) file(s) with unbuffered I/O */ nco_bool RM_RMT_FL_PST_PRC=True; /* Option R */ nco_bool WGT_MSK_CRD_VAR=True; /* [flg] Weight and/or mask coordinate variables */ nco_bool WRT_TMP_FL=True; /* [flg] Write output to temporary file */ nco_bool flg_cll_mth=True; /* [flg] Add/modify cell_methods attributes */ nco_bool flg_mmr_cln=True; /* [flg] Clean memory prior to exit */ nco_bool flg_dmn_prc_usr_spc=False; /* [flg] Processed dimensions specified on command line */ nco_bool flg_ddra=False; /* [flg] DDRA diagnostics */ nco_bool flg_rdd=False; /* [flg] Retain degenerate dimensions */ size_t bfr_sz_hnt=NC_SIZEHINT_DEFAULT; /* [B] Buffer size hint */ size_t cnk_csh_byt=NCO_CNK_CSH_BYT_DFL; /* [B] Chunk cache size */ size_t cnk_min_byt=NCO_CNK_SZ_MIN_BYT_DFL; /* [B] Minimize size of variable to chunk */ size_t cnk_sz_byt=0UL; /* [B] Chunk size in bytes */ size_t cnk_sz_scl=0UL; /* [nbr] Chunk size scalar */ size_t hdr_pad=0UL; /* [B] Pad at end of header section */ var_sct **var; var_sct **var_fix; var_sct **var_fix_out; var_sct **var_out; var_sct **var_prc; var_sct **var_prc_out; var_sct *wgt_avg=NULL; trv_tbl_sct *trv_tbl=NULL; /* [lst] Traversal table */ nco_dmn_dne_t *flg_dne=NULL; /* [lst] Flag to check if input dimension -d "does not exist" */ gpe_sct *gpe=NULL; /* [sng] Group Path Editing (GPE) structure */ #ifdef HAVE_BISON_FLEX prs_sct prs_arg; /* I/O [sct] Global information required in ncwa parser */ #endif /* !HAVE_BISON_FLEX */ #ifdef ENABLE_MPI /* Declare all MPI-specific variables here */ MPI_Comm mpi_cmm=MPI_COMM_WORLD; /* [prc] Communicator */ int prc_rnk; /* [idx] Process rank */ int prc_nbr=0; /* [nbr] Number of MPI processes */ #endif /* !ENABLE_MPI */ static struct option opt_lng[]={ /* Structure ordered by short option key if possible */ /* Long options with no argument, no short option counterpart */ {"cll_msr",no_argument,0,0}, /* [flg] Extract cell_measures variables */ {"cell_measures",no_argument,0,0}, /* [flg] Extract cell_measures variables */ {"no_cll_msr",no_argument,0,0}, /* [flg] Do not extract cell_measures variables */ {"no_cell_measures",no_argument,0,0}, /* [flg] Do not extract cell_measures variables */ {"frm_trm",no_argument,0,0}, /* [flg] Extract formula_terms variables */ {"formula_terms",no_argument,0,0}, /* [flg] Extract formula_terms variables */ {"no_frm_trm",no_argument,0,0}, /* [flg] Do not extract formula_terms variables */ {"no_formula_terms",no_argument,0,0}, /* [flg] Do not extract formula_terms variables */ {"cll_mth",no_argument,0,0}, /* [flg] Add/modify cell_methods attributes */ {"cell_methods",no_argument,0,0}, /* [flg] Add/modify cell_methods attributes */ {"no_cll_mth",no_argument,0,0}, /* [flg] Do not add/modify cell_methods attributes */ {"no_cell_methods",no_argument,0,0}, /* [flg] Do not add/modify cell_methods attributes */ {"clean",no_argument,0,0}, /* [flg] Clean memory prior to exit */ {"mmr_cln",no_argument,0,0}, /* [flg] Clean memory prior to exit */ {"drt",no_argument,0,0}, /* [flg] Allow dirty memory on exit */ {"dirty",no_argument,0,0}, /* [flg] Allow dirty memory on exit */ {"mmr_drt",no_argument,0,0}, /* [flg] Allow dirty memory on exit */ {"ddra",no_argument,0,0}, /* [flg] DDRA diagnostics */ {"mdl_cmp",no_argument,0,0}, /* [flg] DDRA diagnostics */ {"dbl",no_argument,0,0}, /* [flg] Arithmetic convention: promote float to double */ {"flt",no_argument,0,0}, /* [flg] Arithmetic convention: keep single-precision */ {"rth_dbl",no_argument,0,0}, /* [flg] Arithmetic convention: promote float to double */ {"rth_flt",no_argument,0,0}, /* [flg] Arithmetic convention: keep single-precision */ {"hdf4",no_argument,0,0}, /* [flg] Treat file as HDF4 */ {"hdf_upk",no_argument,0,0}, /* [flg] HDF unpack convention: unpacked=scale_factor*(packed-add_offset) */ {"hdf_unpack",no_argument,0,0}, /* [flg] HDF unpack convention: unpacked=scale_factor*(packed-add_offset) */ {"help",no_argument,0,0}, {"hlp",no_argument,0,0}, {"hpss_try",no_argument,0,0}, /* [flg] Search HPSS for unfound files */ {"ram_all",no_argument,0,0}, /* [flg] Open and create (netCDF3) file(s) in RAM */ {"create_ram",no_argument,0,0}, /* [flg] Create file in RAM */ {"open_ram",no_argument,0,0}, /* [flg] Open (netCDF3) file(s) in RAM */ {"diskless_all",no_argument,0,0}, /* [flg] Open and create (netCDF3) file(s) in RAM */ {"share_all",no_argument,0,0}, /* [flg] Open and create (netCDF3) file(s) with unbuffered I/O */ {"create_share",no_argument,0,0}, /* [flg] Create (netCDF3) file(s) with unbuffered I/O */ {"open_share",no_argument,0,0}, /* [flg] Open (netCDF3) file(s) with unbuffered I/O */ {"unbuffered_io",no_argument,0,0}, /* [flg] Open and create (netCDF3) file(s) with unbuffered I/O */ {"uio",no_argument,0,0}, /* [flg] Open and create (netCDF3) file(s) with unbuffered I/O */ {"wrt_tmp_fl",no_argument,0,0}, /* [flg] Write output to temporary file */ {"write_tmp_fl",no_argument,0,0}, /* [flg] Write output to temporary file */ {"no_tmp_fl",no_argument,0,0}, /* [flg] Do not write output to temporary file */ {"version",no_argument,0,0}, {"vrs",no_argument,0,0}, /* Long options with argument, no short option counterpart */ {"bfr_sz_hnt",required_argument,0,0}, /* [B] Buffer size hint */ {"buffer_size_hint",required_argument,0,0}, /* [B] Buffer size hint */ {"cnk_byt",required_argument,0,0}, /* [B] Chunk size in bytes */ {"chunk_byte",required_argument,0,0}, /* [B] Chunk size in bytes */ {"cnk_csh",required_argument,0,0}, /* [B] Chunk cache size in bytes */ {"chunk_cache",required_argument,0,0}, /* [B] Chunk cache size in bytes */ {"cnk_dmn",required_argument,0,0}, /* [nbr] Chunk size */ {"chunk_dimension",required_argument,0,0}, /* [nbr] Chunk size */ {"cnk_map",required_argument,0,0}, /* [nbr] Chunking map */ {"chunk_map",required_argument,0,0}, /* [nbr] Chunking map */ {"cnk_min",required_argument,0,0}, /* [B] Minimize size of variable to chunk */ {"chunk_min",required_argument,0,0}, /* [B] Minimize size of variable to chunk */ {"cnk_plc",required_argument,0,0}, /* [nbr] Chunking policy */ {"chunk_policy",required_argument,0,0}, /* [nbr] Chunking policy */ {"cnk_scl",required_argument,0,0}, /* [nbr] Chunk size scalar */ {"chunk_scalar",required_argument,0,0}, /* [nbr] Chunk size scalar */ {"fl_fmt",required_argument,0,0}, {"file_format",required_argument,0,0}, {"gaa",required_argument,0,0}, /* [sng] Global attribute add */ {"glb_att_add",required_argument,0,0}, /* [sng] Global attribute add */ {"hdr_pad",required_argument,0,0}, {"header_pad",required_argument,0,0}, {"log_lvl",required_argument,0,0}, /* [enm] netCDF library debugging verbosity [0..5] */ {"log_level",required_argument,0,0}, /* [enm] netCDF library debugging verbosity [0..5] */ {"ppc",required_argument,0,0}, /* [nbr] Precision-preserving compression, i.e., number of total or decimal significant digits */ {"precision_preserving_compression",required_argument,0,0}, /* [nbr] Precision-preserving compression, i.e., number of total or decimal significant digits */ {"quantize",required_argument,0,0}, /* [nbr] Precision-preserving compression, i.e., number of total or decimal significant digits */ /* Long options with short counterparts */ {"3",no_argument,0,'3'}, {"4",no_argument,0,'4'}, {"netcdf4",no_argument,0,'4'}, {"5",no_argument,0,'5'}, {"64bit_data",no_argument,0,'5'}, {"cdf5",no_argument,0,'5'}, {"pnetcdf",no_argument,0,'5'}, {"64bit_offset",no_argument,0,'6'}, {"7",no_argument,0,'7'}, {"append",no_argument,0,'A'}, {"average",required_argument,0,'a'}, {"avg",required_argument,0,'a'}, {"mask_condition",required_argument,0,'B'}, {"msk_cnd_sng",required_argument,0,'B'}, {"retain-degenerate-dimensions",no_argument,0,'b'}, /* [flg] Retain degenerate dimensions */ {"rdd",no_argument,0,'b'}, /* [flg] Retain degenerate dimensions */ {"xtr_ass_var",no_argument,0,'c'}, {"xcl_ass_var",no_argument,0,'C'}, {"no_coords",no_argument,0,'C'}, {"no_crd",no_argument,0,'C'}, {"coords",no_argument,0,'c'}, {"crd",no_argument,0,'c'}, {"dbg_lvl",required_argument,0,'D'}, {"debug",required_argument,0,'D'}, {"nco_dbg_lvl",required_argument,0,'D'}, {"dimension",required_argument,0,'d'}, {"dmn",required_argument,0,'d'}, {"fortran",no_argument,0,'F'}, {"ftn",no_argument,0,'F'}, {"history",no_argument,0,'h'}, {"hst",no_argument,0,'h'}, {"wgt_msk_crd_var",no_argument,0,'I'}, {"dfl_lvl",required_argument,0,'L'}, /* [enm] Deflate level */ {"deflate",required_argument,0,'L'}, /* [enm] Deflate level */ {"local",required_argument,0,'l'}, {"lcl",required_argument,0,'l'}, {"mask-variable",required_argument,0,'m'}, {"mask_variable",required_argument,0,'m'}, {"mask",required_argument,0,'m'}, {"msk_var",required_argument,0,'m'}, {"msk_nm",required_argument,0,'m'}, {"mask-value",required_argument,0,'M'}, {"mask_value",required_argument,0,'M'}, {"msk_val",required_argument,0,'M'}, {"nintap",required_argument,0,'n'}, {"nmr",no_argument,0,'N'}, {"numerator",no_argument,0,'N'}, {"overwrite",no_argument,0,'O'}, {"ovr",no_argument,0,'O'}, {"output",required_argument,0,'o'}, {"fl_out",required_argument,0,'o'}, {"path",required_argument,0,'p'}, {"retain",no_argument,0,'R'}, {"rtn",no_argument,0,'R'}, {"revision",no_argument,0,'r'}, {"mask_comparator",required_argument,0,'T'}, {"msk_cmp_typ",required_argument,0,'T'}, {"op_rlt",required_argument,0,'T'}, {"thr_nbr",required_argument,0,'t'}, {"threads",required_argument,0,'t'}, {"omp_num_threads",required_argument,0,'t'}, {"variable",required_argument,0,'v'}, {"normalize-by-tally",no_argument,0,'W',}, {"exclude",no_argument,0,'x'}, {"xcl",no_argument,0,'x'}, {"weight",required_argument,0,'w'}, {"wgt",required_argument,0,'w'}, {"wgt_var",required_argument,0,'w'}, {"operation",required_argument,0,'y'}, {"op_typ",required_argument,0,'y'}, {0,0,0,0} }; /* end opt_lng */ int opt_idx=0; /* Index of current long option into opt_lng array */ /* Start timer and save command line */ ddra_info.tmr_flg=nco_tmr_srt; rcd+=nco_ddra((char *)NULL,(char *)NULL,&ddra_info); ddra_info.tmr_flg=nco_tmr_mtd; cmd_ln=nco_cmd_ln_sng(argc,argv); NORMALIZE_BY_TALLY=NORMALIZE_BY_TALLY+0; /* CEWI: Avert compiler warning that variable is set but never used */ NORMALIZE_BY_WEIGHT=NORMALIZE_BY_WEIGHT+0; /* CEWI: Avert compiler warning that variable is set but never used */ /* Get program name and set program enum (e.g., nco_prg_id=ncra) */ nco_prg_nm=nco_prg_prs(argv[0],&nco_prg_id); #ifdef ENABLE_MPI /* MPI Initialization */ if(False) (void)fprintf(stdout,gettext("%s: WARNING Compiled with MPI\n"),nco_prg_nm); MPI_Init(&argc,&argv); MPI_Comm_size(mpi_cmm,&prc_nbr); MPI_Comm_rank(mpi_cmm,&prc_rnk); #endif /* !ENABLE_MPI */ /* Parse command line arguments */ while(1){ /* getopt_long_only() allows one dash to prefix long options */ opt=getopt_long(argc,argv,opt_sht_lst,opt_lng,&opt_idx); /* NB: access to opt_crr is only valid when long_opt is detected */ if(opt == EOF) break; /* Parse positional arguments once getopt_long() returns EOF */ opt_crr=(char *)strdup(opt_lng[opt_idx].name); /* Process long options without short option counterparts */ if(opt == 0){ if(!strcmp(opt_crr,"bfr_sz_hnt") || !strcmp(opt_crr,"buffer_size_hint")){ bfr_sz_hnt=strtoul(optarg,&sng_cnv_rcd,NCO_SNG_CNV_BASE10); if(*sng_cnv_rcd) nco_sng_cnv_err(optarg,"strtoul",sng_cnv_rcd); } /* endif bfr_sz */ if(!strcmp(opt_crr,"cnk_byt") || !strcmp(opt_crr,"chunk_byte")){ cnk_sz_byt=strtoul(optarg,&sng_cnv_rcd,NCO_SNG_CNV_BASE10); if(*sng_cnv_rcd) nco_sng_cnv_err(optarg,"strtoul",sng_cnv_rcd); } /* endif cnk_byt */ if(!strcmp(opt_crr,"cnk_csh") || !strcmp(opt_crr,"chunk_cache")){ cnk_csh_byt=strtoul(optarg,&sng_cnv_rcd,NCO_SNG_CNV_BASE10); if(*sng_cnv_rcd) nco_sng_cnv_err(optarg,"strtoul",sng_cnv_rcd); } /* endif cnk_csh_byt */ if(!strcmp(opt_crr,"cnk_min") || !strcmp(opt_crr,"chunk_min")){ cnk_min_byt=strtoul(optarg,&sng_cnv_rcd,NCO_SNG_CNV_BASE10); if(*sng_cnv_rcd) nco_sng_cnv_err(optarg,"strtoul",sng_cnv_rcd); } /* endif cnk_min */ if(!strcmp(opt_crr,"cnk_dmn") || !strcmp(opt_crr,"chunk_dimension")){ /* Copy limit argument for later processing */ cnk_arg[cnk_nbr]=(char *)strdup(optarg); cnk_nbr++; } /* endif cnk_dmn */ if(!strcmp(opt_crr,"cnk_scl") || !strcmp(opt_crr,"chunk_scalar")){ cnk_sz_scl=strtoul(optarg,&sng_cnv_rcd,NCO_SNG_CNV_BASE10); if(*sng_cnv_rcd) nco_sng_cnv_err(optarg,"strtoul",sng_cnv_rcd); } /* endif cnk */ if(!strcmp(opt_crr,"cnk_map") || !strcmp(opt_crr,"chunk_map")){ /* Chunking map */ cnk_map_sng=(char *)strdup(optarg); cnk_map=nco_cnk_map_get(cnk_map_sng); } /* endif cnk */ if(!strcmp(opt_crr,"cnk_plc") || !strcmp(opt_crr,"chunk_policy")){ /* Chunking policy */ cnk_plc_sng=(char *)strdup(optarg); cnk_plc=nco_cnk_plc_get(cnk_plc_sng); } /* endif cnk */ if(!strcmp(opt_crr,"cll_msr") || !strcmp(opt_crr,"cell_measures")) EXTRACT_CLL_MSR=True; /* [flg] Extract cell_measures variables */ if(!strcmp(opt_crr,"no_cll_msr") || !strcmp(opt_crr,"no_cell_measures")) EXTRACT_CLL_MSR=False; /* [flg] Do not extract cell_measures variables */ if(!strcmp(opt_crr,"frm_trm") || !strcmp(opt_crr,"formula_terms")) EXTRACT_FRM_TRM=True; /* [flg] Extract formula_terms variables */ if(!strcmp(opt_crr,"no_frm_trm") || !strcmp(opt_crr,"no_formula_terms")) EXTRACT_FRM_TRM=False; /* [flg] Do not extract formula_terms variables */ if(!strcmp(opt_crr,"cll_mth") || !strcmp(opt_crr,"cell_methods")) flg_cll_mth=True; /* [flg] Add/modify cell_methods attributes */ if(!strcmp(opt_crr,"no_cll_mth") || !strcmp(opt_crr,"no_cell_methods")) flg_cll_mth=False; /* [flg] Add/modify cell_methods attributes */ if(!strcmp(opt_crr,"mmr_cln") || !strcmp(opt_crr,"clean")) flg_mmr_cln=True; /* [flg] Clean memory prior to exit */ if(!strcmp(opt_crr,"drt") || !strcmp(opt_crr,"mmr_drt") || !strcmp(opt_crr,"dirty")) flg_mmr_cln=False; /* [flg] Clean memory prior to exit */ if(!strcmp(opt_crr,"ddra") || !strcmp(opt_crr,"mdl_cmp")) ddra_info.flg_ddra=flg_ddra=True; /* [flg] DDRA diagnostics */ if(!strcmp(opt_crr,"fl_fmt") || !strcmp(opt_crr,"file_format")) rcd=nco_create_mode_prs(optarg,&fl_out_fmt); if(!strcmp(opt_crr,"dbl") || !strcmp(opt_crr,"rth_dbl")) nco_rth_cnv=nco_rth_flt_dbl; /* [flg] Arithmetic convention: promote float to double */ if(!strcmp(opt_crr,"flt") || !strcmp(opt_crr,"rth_flt")) nco_rth_cnv=nco_rth_flt_flt; /* [flg] Arithmetic convention: keep single-precision */ if(!strcmp(opt_crr,"gaa") || !strcmp(opt_crr,"glb_att_add")){ gaa_arg=(char **)nco_realloc(gaa_arg,(gaa_nbr+1)*sizeof(char *)); gaa_arg[gaa_nbr++]=(char *)strdup(optarg); } /* endif gaa */ if(!strcmp(opt_crr,"hdf4")) nco_fmt_xtn=nco_fmt_xtn_hdf4; /* [enm] Treat file as HDF4 */ if(!strcmp(opt_crr,"hdf_upk") || !strcmp(opt_crr,"hdf_unpack")) nco_upk_cnv=nco_upk_HDF_MOD10; /* [flg] HDF unpack convention: unpacked=scale_factor*(packed-add_offset) */ if(!strcmp(opt_crr,"hdr_pad") || !strcmp(opt_crr,"header_pad")){ hdr_pad=strtoul(optarg,&sng_cnv_rcd,NCO_SNG_CNV_BASE10); if(*sng_cnv_rcd) nco_sng_cnv_err(optarg,"strtoul",sng_cnv_rcd); } /* endif "hdr_pad" */ if(!strcmp(opt_crr,"help") || !strcmp(opt_crr,"hlp")){ (void)nco_usg_prn(); nco_exit(EXIT_SUCCESS); } /* endif "help" */ if(!strcmp(opt_crr,"hpss_try")) HPSS_TRY=True; /* [flg] Search HPSS for unfound files */ if(!strcmp(opt_crr,"log_lvl") || !strcmp(opt_crr,"log_level")){ log_lvl=(int)strtol(optarg,&sng_cnv_rcd,NCO_SNG_CNV_BASE10); if(*sng_cnv_rcd) nco_sng_cnv_err(optarg,"strtol",sng_cnv_rcd); nc_set_log_level(log_lvl); } /* !log_lvl */ if(!strcmp(opt_crr,"ppc") || !strcmp(opt_crr,"precision_preserving_compression") || !strcmp(opt_crr,"quantize")){ ppc_arg[ppc_nbr]=(char *)strdup(optarg); ppc_nbr++; } /* endif "ppc" */ if(!strcmp(opt_crr,"ram_all") || !strcmp(opt_crr,"create_ram") || !strcmp(opt_crr,"diskless_all")) RAM_CREATE=True; /* [flg] Create (netCDF3) file(s) in RAM */ if(!strcmp(opt_crr,"ram_all") || !strcmp(opt_crr,"open_ram") || !strcmp(opt_crr,"diskless_all")) RAM_OPEN=True; /* [flg] Open (netCDF3) file(s) in RAM */ if(!strcmp(opt_crr,"share_all") || !strcmp(opt_crr,"unbuffered_io") || !strcmp(opt_crr,"uio") || !strcmp(opt_crr,"create_share")) SHARE_CREATE=True; /* [flg] Create (netCDF3) file(s) with unbuffered I/O */ if(!strcmp(opt_crr,"share_all") || !strcmp(opt_crr,"unbuffered_io") || !strcmp(opt_crr,"uio") || !strcmp(opt_crr,"open_share")) SHARE_OPEN=True; /* [flg] Open (netCDF3) file(s) with unbuffered I/O */ if(!strcmp(opt_crr,"vrs") || !strcmp(opt_crr,"version")){ (void)nco_vrs_prn(CVS_Id,CVS_Revision); nco_exit(EXIT_SUCCESS); } /* endif "vrs" */ if(!strcmp(opt_crr,"wrt_tmp_fl") || !strcmp(opt_crr,"write_tmp_fl")) WRT_TMP_FL=True; if(!strcmp(opt_crr,"no_tmp_fl")) WRT_TMP_FL=False; } /* opt != 0 */ /* Process short options */ switch(opt){ case 0: /* Long options have already been processed, return */ break; case '3': /* Request netCDF3 output storage format */ fl_out_fmt=NC_FORMAT_CLASSIC; break; case '4': /* Request netCDF4 output storage format */ fl_out_fmt=NC_FORMAT_NETCDF4; break; case '5': /* Request netCDF3 64-bit offset+data storage (i.e., pnetCDF) format */ fl_out_fmt=NC_FORMAT_CDF5; break; case '6': /* Request netCDF3 64-bit offset output storage format */ fl_out_fmt=NC_FORMAT_64BIT_OFFSET; break; case '7': /* Request netCDF4-classic output storage format */ fl_out_fmt=NC_FORMAT_NETCDF4_CLASSIC; break; case 'A': /* Toggle FORCE_APPEND */ FORCE_APPEND=!FORCE_APPEND; break; case 'a': /* Dimensions over which to average hyperslab */ flg_dmn_prc_usr_spc=True; if(dmn_avg_lst_in){ (void)fprintf(fp_stdout,"%s: ERROR Option -a appears more than once\n",nco_prg_nm); (void)fprintf(fp_stdout,"%s: HINT Use -a dim1,dim2,... not -a dim1 -a dim2 ...\n",nco_prg_nm); (void)nco_usg_prn(); nco_exit(EXIT_FAILURE); } /* endif */ dmn_avg_lst_in=nco_lst_prs_2D(optarg,",",&dmn_avg_nbr); break; case 'B': /* Mask string to be parsed */ msk_cnd_sng=(char *)strdup(optarg); #ifndef HAVE_BISON_FLEX (void)fprintf(fp_stdout,"%s: ERROR -B and --mask_condition options unsupported because configuration could not find a parser (e.g., Bison) and lexer (e.g., Flex). HINT: Break condition into component -m -T -M switches, e.g., use -m ORO -T lt -M 1.0 instead of -B \"ORO < 1\"\n",nco_prg_nm); nco_exit(EXIT_FAILURE); #endif /* HAVE_BISON_FLEX */ break; case 'b': /* [flg] Retain degenerate dimensions */ flg_rdd=True; break; case 'C': /* Extract all coordinates associated with extracted variables? */ EXTRACT_ASSOCIATED_COORDINATES=False; break; case 'c': EXTRACT_ALL_COORDINATES=True; break; case 'D': /* Debugging level. Default is 0. */ nco_dbg_lvl=(unsigned short int)strtoul(optarg,&sng_cnv_rcd,NCO_SNG_CNV_BASE10); if(*sng_cnv_rcd) nco_sng_cnv_err(optarg,"strtoul",sng_cnv_rcd); break; case 'd': /* Copy limit argument for later processing */ lmt_arg[lmt_nbr]=(char *)strdup(optarg); lmt_nbr++; break; case 'F': /* Toggle index convention. Default is 0-based arrays (C-style). */ FORTRAN_IDX_CNV=!FORTRAN_IDX_CNV; break; case 'G': /* Apply Group Path Editing (GPE) to output group */ /* NB: GNU getopt() optional argument syntax is ugly (requires "=" sign) so avoid it http://stackoverflow.com/questions/1052746/getopt-does-not-parse-optional-arguments-to-parameters */ gpe=nco_gpe_prs_arg(optarg); fl_out_fmt=NC_FORMAT_NETCDF4; break; case 'g': /* Copy group argument for later processing */ /* Replace commas with hashes when within braces (convert back later) */ optarg_lcl=(char *)strdup(optarg); (void)nco_rx_comma2hash(optarg_lcl); grp_lst_in=nco_lst_prs_2D(optarg_lcl,",",&grp_lst_in_nbr); optarg_lcl=(char *)nco_free(optarg_lcl); break; case 'h': /* Toggle appending to history global attribute */ HISTORY_APPEND=!HISTORY_APPEND; break; case 'I': /* [flg] Weight and/or mask coordinate variables */ WGT_MSK_CRD_VAR=!WGT_MSK_CRD_VAR; break; case 'L': /* [enm] Deflate level. Default is 0. */ dfl_lvl=(int)strtol(optarg,&sng_cnv_rcd,NCO_SNG_CNV_BASE10); if(*sng_cnv_rcd) nco_sng_cnv_err(optarg,"strtol",sng_cnv_rcd); break; case 'l': /* Local path prefix for files retrieved from remote file system */ fl_pth_lcl=(char *)strdup(optarg); break; case 'm': /* Name of variable to use as mask in reducing. Default is none */ msk_nm=(char *)strdup(optarg); break; case 'M': /* Good data defined by relation to mask value. Default is 1.0 */ msk_val=strtod(optarg,&sng_cnv_rcd); if(*sng_cnv_rcd) nco_sng_cnv_err(optarg,"strtod",sng_cnv_rcd); break; case 'N': NRM_BY_DNM=False; NORMALIZE_BY_TALLY=False; NORMALIZE_BY_WEIGHT=False; break; case 'n': NORMALIZE_BY_WEIGHT=False; (void)fprintf(fp_stdout,"%s: ERROR This option has been disabled while I re-think its implementation. Sincerely, Charlie\n",nco_prg_nm); nco_exit(EXIT_FAILURE); break; case 'O': /* Toggle FORCE_OVERWRITE */ FORCE_OVERWRITE=!FORCE_OVERWRITE; break; case 'o': /* Name of output file */ fl_out=(char *)strdup(optarg); break; case 'p': /* Common file path */ fl_pth=(char *)strdup(optarg); break; case 'R': /* Toggle removal of remotely-retrieved-files. Default is True. */ RM_RMT_FL_PST_PRC=!RM_RMT_FL_PST_PRC; break; case 'r': /* Print CVS program information and copyright notice */ (void)nco_vrs_prn(CVS_Id,CVS_Revision); (void)nco_lbr_vrs_prn(); (void)nco_cpy_prn(); (void)nco_cnf_prn(); nco_exit(EXIT_SUCCESS); break; case 'T': /* Relational operator type. Default is 0, eq, equality */ op_typ_rlt=nco_op_prs_rlt(optarg); break; case 't': /* Thread number */ thr_nbr=(int)strtol(optarg,&sng_cnv_rcd,NCO_SNG_CNV_BASE10); if(*sng_cnv_rcd) nco_sng_cnv_err(optarg,"strtol",sng_cnv_rcd); break; case 'v': /* Variables to extract/exclude */ /* Replace commas with hashes when within braces (convert back later) */ optarg_lcl=(char *)strdup(optarg); (void)nco_rx_comma2hash(optarg_lcl); var_lst_in=nco_lst_prs_2D(optarg_lcl,",",&var_lst_in_nbr); optarg_lcl=(char *)nco_free(optarg_lcl); xtr_nbr=var_lst_in_nbr; break; case 'W': NORMALIZE_BY_TALLY=False; (void)fprintf(fp_stdout,"%s: ERROR This option has been disabled while I rethink its implementation\n",nco_prg_nm); nco_exit(EXIT_FAILURE); break; case 'w': /* Variable to use as weight in reducing. Default is none */ wgt_nm=(char *)strdup(optarg); break; case 'x': /* Exclude rather than extract variables specified with -v */ EXCLUDE_INPUT_LIST=True; break; case 'y': /* Operation type */ nco_op_typ_sng=(char *)strdup(optarg); nco_op_typ=nco_op_typ_get(nco_op_typ_sng); break; case '?': /* Question mark means unrecognized option, print proper usage then EXIT_FAILURE */ (void)fprintf(stdout,"%s: ERROR in command-line syntax/options. Missing or unrecognized option. Please reformulate command accordingly.\n",nco_prg_nm_get()); (void)nco_usg_prn(); nco_exit(EXIT_FAILURE); break; case '-': /* Long options are not allowed */ (void)fprintf(stderr,"%s: ERROR Long options are not available in this build. Use single letter options instead.\n",nco_prg_nm_get()); nco_exit(EXIT_FAILURE); break; default: /* Print proper usage */ (void)fprintf(stdout,"%s ERROR in command-line syntax/options. Please reformulate command accordingly.\n",nco_prg_nm_get()); (void)nco_usg_prn(); nco_exit(EXIT_FAILURE); break; } /* end switch */ if(opt_crr) opt_crr=(char *)nco_free(opt_crr); } /* end while loop */ /* Set/report global chunk cache */ rcd+=nco_cnk_csh_ini(cnk_csh_byt); /* Initialize traversal table */ trv_tbl_init(&trv_tbl); /* Parse mask string */ #ifdef HAVE_BISON_FLEX if(msk_cnd_sng){ int cst_zero=0; /* Set arguments for scan */ prs_arg.fl_in=NULL; /* [sng] Input data file */ prs_arg.in_id=0; /* [id] Input data file ID */ prs_arg.fl_out=NULL; /* [sng] Output data file */ prs_arg.out_id=0; /* [id] Output data file ID */ prs_arg.att_lst=NULL; /* [sct] Attributes in script */ prs_arg.nbr_att=&cst_zero; /* [nbr] Number of attributes in script */ prs_arg.dmn_in=NULL; /* [dmn_in] List of all dimensions in input */ prs_arg.nbr_dmn_in=0; /* [nbr] Number of dimensions in input */ prs_arg.dmn_out=NULL; /* [sct] Pointer to output dimension list */ prs_arg.nbr_dmn_out=&cst_zero; /* [nbr] Number of dimensions in output list */ prs_arg.sym_tbl=NULL; /* [fnc] Symbol table for functions */ prs_arg.sym_tbl_nbr=0; /* [nbr] Number of functions in table */ prs_arg.ntl_scn=False; /* [flg] Initial scan of script */ prs_arg.var_LHS=NULL; /* [var] LHS cast variable */ prs_arg.nco_op_typ=nco_op_nil; /* [enm] Operation type */ /* Initialize line counter */ ncap_ln_nbr_crr=(size_t *)nco_realloc(ncap_ln_nbr_crr,ncap_ncl_dpt_crr+1UL); ncap_ln_nbr_crr[ncap_ncl_dpt_crr]=1UL; /* [cnt] Line number incremented in ncap.l */ if(ncap_ncwa_scn(&prs_arg,msk_cnd_sng,&msk_nm,&msk_val,&op_typ_rlt) != NCO_NOERR) nco_exit(EXIT_FAILURE); } /* endif msk_cnd_sng */ #endif /* !HAVE_BISON_FLEX */ /* Ensure we do not attempt to normalize by non-existent weight */ if(!wgt_nm) NORMALIZE_BY_WEIGHT=False; /* Process positional arguments and fill-in filenames */ fl_lst_in=nco_fl_lst_mk(argv,argc,optind,&fl_nbr,&fl_out,&FL_LST_IN_FROM_STDIN,FORCE_OVERWRITE); /* Initialize thread information */ thr_nbr=nco_openmp_ini(thr_nbr); in_id_arr=(int *)nco_malloc(thr_nbr*sizeof(int)); /* Parse filename */ fl_in=nco_fl_nm_prs(fl_in,0,&fl_nbr,fl_lst_in,abb_arg_nbr,fl_lst_abb,fl_pth); /* Make sure file is on local system and is readable or die trying */ fl_in=nco_fl_mk_lcl(fl_in,fl_pth_lcl,HPSS_TRY,&FL_RTR_RMT_LCN); /* Open file using appropriate buffer size hints and verbosity */ if(RAM_OPEN) md_open=NC_NOWRITE|NC_DISKLESS; else md_open=NC_NOWRITE; if(SHARE_OPEN) md_open=md_open|NC_SHARE; rcd+=nco_fl_open(fl_in,md_open,&bfr_sz_hnt,&in_id); (void)nco_inq_format(in_id,&fl_in_fmt); /* Construct GTT, Group Traversal Table (groups,variables,dimensions, limits) */ (void)nco_bld_trv_tbl(in_id,trv_pth,lmt_nbr,lmt_arg,aux_nbr,aux_arg,MSA_USR_RDR,FORTRAN_IDX_CNV,grp_lst_in,grp_lst_in_nbr,var_lst_in,xtr_nbr,EXTRACT_ALL_COORDINATES,GRP_VAR_UNN,False,EXCLUDE_INPUT_LIST,EXTRACT_ASSOCIATED_COORDINATES,EXTRACT_CLL_MSR,EXTRACT_FRM_TRM,nco_pck_plc_nil,&flg_dne,trv_tbl); /* Get number of variables, dimensions, and global attributes in file, file format */ (void)trv_tbl_inq((int *)NULL,(int *)NULL,(int *)NULL,&nbr_dmn_fl,(int *)NULL,(int *)NULL,(int *)NULL,(int *)NULL,&nbr_var_fl,trv_tbl); /* Allocate array of dimensions associated with variables to be extracted with maximum possible size */ dim=(dmn_sct **)nco_malloc(nbr_dmn_fl*sizeof(dmn_sct *)); /* Find dimensions associated with variables to be extracted */ (void)nco_dmn_lst_ass_var_trv(in_id,trv_tbl,&nbr_dmn_xtr,&dim); /* Not specifying any dimensions is interpreted as specifying all dimensions */ if(dmn_avg_nbr == 0){ dmn_avg_nbr=nbr_dmn_xtr; dmn_avg_lst_in=(char **)nco_malloc(dmn_avg_nbr*sizeof(char *)); for(idx=0;idx<dmn_avg_nbr;idx++) dmn_avg_lst_in[idx]=(char *)strdup(dim[idx]->nm); if(nco_dbg_lvl >= nco_dbg_std) (void)fprintf(stderr,"%s: INFO No dimensions specified with -a, therefore reducing (averaging, taking minimum, etc.) over all dimensions\n",nco_prg_nm); } /* end if dmn_avg_nbr == 0 */ /* Allocate array of dimensions to average with maximum possible size */ dmn_avg=(dmn_sct **)nco_malloc(nbr_dmn_fl*sizeof(dmn_sct *)); /* Allocate array of dimensions to keep on output with maximum possible size */ dmn_out=(dmn_sct **)nco_malloc(nbr_dmn_fl*sizeof(dmn_sct *)); /* Create list of dimensions to average */ (void)nco_dmn_avg_mk(in_id,dmn_avg_lst_in,dmn_avg_nbr,flg_dmn_prc_usr_spc,flg_rdd,trv_tbl,&dmn_avg,&dmn_avg_nbr); /* Create list of dimensions to keep on output */ (void)nco_dmn_out_mk(dim,nbr_dmn_xtr,trv_tbl,&dmn_out,&nbr_dmn_out); dmn_avg=(dmn_sct **)nco_realloc(dmn_avg,dmn_avg_nbr*sizeof(dmn_sct *)); dmn_out=(dmn_sct **)nco_realloc(dmn_out,nbr_dmn_out*sizeof(dmn_sct *)); /* Transfer degenerated dimensions information into GTT */ (void)nco_dmn_dgn_tbl(dmn_out,nbr_dmn_out,trv_tbl); /* Fill-in variable structure list for all extracted variables */ var=nco_fll_var_trv(in_id,&xtr_nbr,trv_tbl); /* Duplicate to output array */ var_out=(var_sct **)nco_malloc(xtr_nbr*sizeof(var_sct *)); for(idx=0;idx<xtr_nbr;idx++){ var_out[idx]=nco_var_dpl(var[idx]); (void)nco_xrf_var(var[idx],var_out[idx]); (void)nco_xrf_dmn(var_out[idx]); } /* end loop over var */ /* Determine conventions (ARM/CCM/CCSM/CF/MPAS) for treating file */ cnv=nco_cnv_ini(in_id); /* Divide variable lists into lists of fixed variables and variables to be processed */ (void)nco_var_lst_dvd(var,var_out,xtr_nbr,cnv,True,nco_pck_plc_nil,nco_pck_map_nil,dmn_avg,dmn_avg_nbr,&var_fix,&var_fix_out,&nbr_var_fix,&var_prc,&var_prc_out,&nbr_var_prc,trv_tbl); /* Store processed and fixed variables info into GTT */ (void)nco_var_prc_fix_trv(nbr_var_prc,var_prc,nbr_var_fix,var_fix,trv_tbl); /* We now have final list of variables to extract. Phew. */ if(nco_dbg_lvl >= nco_dbg_var && nco_dbg_lvl != nco_dbg_dev){ for(idx=0;idx<xtr_nbr;idx++) (void)fprintf(stderr,"var[%d]->nm = %s, ->id=[%d]\n",idx,var[idx]->nm,var[idx]->id); for(idx=0;idx<nbr_var_fix;idx++) (void)fprintf(stderr,"var_fix[%d]->nm = %s, ->id=[%d]\n",idx,var_fix[idx]->nm,var_fix[idx]->id); for(idx=0;idx<nbr_var_prc;idx++) (void)fprintf(stderr,"var_prc[%d]->nm = %s, ->id=[%d]\n",idx,var_prc[idx]->nm,var_prc[idx]->id); } /* end if */ /* Make output and input files consanguinous */ if(fl_out_fmt == NCO_FORMAT_UNDEFINED) fl_out_fmt=fl_in_fmt; /* Initialize, decode, and set PPC information */ if(ppc_nbr > 0) nco_ppc_ini(in_id,&dfl_lvl,fl_out_fmt,ppc_arg,ppc_nbr,trv_tbl); /* Verify output file format supports requested actions */ (void)nco_fl_fmt_vet(fl_out_fmt,cnk_nbr,dfl_lvl); /* Open output file */ fl_out_tmp=nco_fl_out_open(fl_out,&FORCE_APPEND,FORCE_OVERWRITE,fl_out_fmt,&bfr_sz_hnt,RAM_CREATE,RAM_OPEN,SHARE_CREATE,SHARE_OPEN,WRT_TMP_FL,&out_id); /* Initialize chunking from user-specified inputs */ if(fl_out_fmt == NC_FORMAT_NETCDF4 || fl_out_fmt == NC_FORMAT_NETCDF4_CLASSIC) rcd+=nco_cnk_ini(in_id,fl_out,cnk_arg,cnk_nbr,cnk_map,cnk_plc,cnk_csh_byt,cnk_min_byt,cnk_sz_byt,cnk_sz_scl,&cnk); /* Define dimensions, extracted groups, variables, and attributes in output file. */ (void)nco_xtr_dfn(in_id,out_id,&cnk,dfl_lvl,gpe,md5,!FORCE_APPEND,True,False,nco_pck_plc_nil,(char *)NULL,trv_tbl); /* Catenate time-stamped command line to "history" global attribute */ if(HISTORY_APPEND) (void)nco_hst_att_cat(out_id,cmd_ln); if(HISTORY_APPEND && FORCE_APPEND) (void)nco_prv_att_cat(fl_in,in_id,out_id); if(gaa_nbr > 0) (void)nco_glb_att_add(out_id,gaa_arg,gaa_nbr); if(HISTORY_APPEND) (void)nco_vrs_att_cat(out_id); if(thr_nbr > 1 && HISTORY_APPEND) (void)nco_thr_att_cat(out_id,thr_nbr); /* Add new missing values to output file while in define mode */ if(msk_nm){ for(idx=0;idx<nbr_var_prc;idx++){ char *grp_out_fll=NULL; /* [sng] Group name */ int grp_out_id; /* [ID] Group ID (output) */ int var_out_id; /* [ID] Variable ID (output) */ trv_sct *var_trv; /* [sct] Variable GTT object */ /* Obtain variable GTT object using full variable name */ var_trv=trv_tbl_var_nm_fll(var_prc[idx]->nm_fll,trv_tbl); /* Edit group name for output */ if(gpe) grp_out_fll=nco_gpe_evl(gpe,var_trv->grp_nm_fll); else grp_out_fll=(char *)strdup(var_trv->grp_nm_fll); /* Obtain output group ID */ (void)nco_inq_grp_full_ncid(out_id,grp_out_fll,&grp_out_id); /* Memory management after current extracted group */ if(grp_out_fll) grp_out_fll=(char *)nco_free(grp_out_fll); /* Get variable ID */ (void)nco_inq_varid(grp_out_id,var_trv->nm,&var_out_id); /* Store the output variable ID */ var_prc_out[idx]->id=var_out_id; /* Define for var_prc_out because mss_val for var_prc will be overwritten in nco_var_mtd_refresh() */ if(!var_prc_out[idx]->has_mss_val){ var_prc_out[idx]->has_mss_val=True; var_prc_out[idx]->mss_val=nco_mss_val_mk(var_prc[idx]->type); (void)nco_put_att(grp_out_id,var_prc_out[idx]->id,nco_mss_val_sng_get(),var_prc_out[idx]->type,(long)1,var_prc_out[idx]->mss_val.vp); } /* end if */ } /* end for */ } /* end if */ /* Turn-off default filling behavior to enhance efficiency */ nco_set_fill(out_id,NC_NOFILL,&fll_md_old); /* Add cell_methods attributes (before exiting define mode) */ if(flg_cll_mth) rcd+=nco_cnv_cf_cll_mth_add(out_id,var_prc_out,nbr_var_prc,dmn_avg,dmn_avg_nbr,nco_op_typ,gpe,(clm_bnd_sct *)NULL,trv_tbl); /* Take output file out of define mode */ if(hdr_pad == 0UL){ (void)nco_enddef(out_id); }else{ (void)nco__enddef(out_id,hdr_pad); if(nco_dbg_lvl >= nco_dbg_scl) (void)fprintf(stderr,"%s: INFO Padding header with %lu extra bytes\n",nco_prg_nm_get(),(unsigned long)hdr_pad); } /* hdr_pad */ /* Assign zero to start and unity to stride vectors in output variables */ (void)nco_var_srd_srt_set(var_out,xtr_nbr); /* Copy variable data for non-processed variables */ (void)nco_cpy_fix_var_trv(in_id,out_id,gpe,trv_tbl); /* Close first input netCDF file */ nco_close(in_id); /* Loop over input files (not currently used, fl_nbr == 1) */ for(fl_idx=0;fl_idx<fl_nbr;fl_idx++){ /* Parse filename */ if(fl_idx != 0) fl_in=nco_fl_nm_prs(fl_in,fl_idx,&fl_nbr,fl_lst_in,abb_arg_nbr,fl_lst_abb,fl_pth); if(nco_dbg_lvl >= nco_dbg_fl) (void)fprintf(stderr,"%s: INFO Input file %d is %s",nco_prg_nm_get(),fl_idx,fl_in); /* Make sure file is on local system and is readable or die trying */ if(fl_idx != 0) fl_in=nco_fl_mk_lcl(fl_in,fl_pth_lcl,HPSS_TRY,&FL_RTR_RMT_LCN); if(nco_dbg_lvl >= nco_dbg_fl && FL_RTR_RMT_LCN) (void)fprintf(stderr,", local file is %s",fl_in); if(nco_dbg_lvl >= nco_dbg_fl) (void)fprintf(stderr,"\n"); /* Open file once per thread to improve caching */ for(thr_idx=0;thr_idx<thr_nbr;thr_idx++) rcd=nco_fl_open(fl_in,md_open,&bfr_sz_hnt,in_id_arr+thr_idx); in_id=in_id_arr[0]; /* Perform various error-checks on input file */ if(False) (void)nco_fl_cmp_err_chk(); /* Timestamp end of metadata setup and disk layout */ rcd+=nco_ddra((char *)NULL,(char *)NULL,&ddra_info); ddra_info.tmr_flg=nco_tmr_rgl; #ifdef _OPENMP /* OpenMP notes: firstprivate(): rcd gets incremented, so keep initial value lastprivate(): retain rcd value from last thread private(): wgt_avg does not need initialization shared(): msk, wgt and lmt_nbr are not altered within loop */ #pragma omp parallel for firstprivate(DO_CONFORM_MSK,DO_CONFORM_WGT,ddra_info,rcd) lastprivate(rcd) private(idx,in_id,wgt_avg) shared(MULTIPLY_BY_TALLY,MUST_CONFORM,NRM_BY_DNM,WGT_MSK_CRD_VAR,dmn_avg,dmn_avg_nbr,flg_ddra,flg_rdd,gpe,in_id_arr,msk_nm,msk_val,nbr_var_prc,nco_dbg_lvl,nco_op_typ,nco_prg_nm,op_typ_rlt,out_id,trv_tbl,var_prc,var_prc_out,wgt_nm,lmt_nbr,lmt_arg,FORTRAN_IDX_CNV,MSA_USR_RDR) #endif /* !_OPENMP */ for(idx=0;idx<nbr_var_prc;idx++){ /* Process all variables in current file */ char *grp_out_fll=NULL; /* [sng] Group name */ int grp_id; /* [ID] Group ID */ int grp_out_id; /* [ID] Group ID (output) */ int var_out_id; /* [ID] Variable ID (output) */ trv_sct *var_trv; /* [sct] Variable GTT object */ var_sct *wgt=NULL; var_sct *wgt_out=NULL; var_sct *msk=NULL; var_sct *msk_out=NULL; in_id=in_id_arr[omp_get_thread_num()]; /* Obtain variable GTT object using full variable name */ var_trv=trv_tbl_var_nm_fll(var_prc[idx]->nm_fll,trv_tbl); /* Obtain group ID */ (void)nco_inq_grp_full_ncid(in_id,var_trv->grp_nm_fll,&grp_id); if(nco_dbg_lvl >= nco_dbg_var && nco_dbg_lvl < nco_dbg_nbr) rcd+=nco_var_prc_crr_prn(idx,var_prc[idx]->nm); if(nco_dbg_lvl >= nco_dbg_var && nco_dbg_lvl < nco_dbg_nbr) (void)fflush(fp_stderr); /* Allocate and, if necessary, initialize accumulation space for all processed variables */ var_prc_out[idx]->sz=var_prc[idx]->sz; /* fxm: verify that var_prc->tally is not needed */ if(!(var_prc_out[idx]->tally=(long *)nco_malloc_flg(var_prc_out[idx]->sz*sizeof(long)))){ (void)fprintf(fp_stdout,"%s: ERROR Unable to malloc() %ld*%ld bytes for tally buffer for variable %s in main()\n",nco_prg_nm_get(),var_prc_out[idx]->sz,(long)sizeof(long),var_prc_out[idx]->nm); nco_exit(EXIT_FAILURE); } /* end if err */ (void)nco_zero_long(var_prc_out[idx]->sz,var_prc_out[idx]->tally); if(!(var_prc_out[idx]->val.vp=(void *)nco_malloc_flg(var_prc_out[idx]->sz*nco_typ_lng(var_prc_out[idx]->type)))){ (void)fprintf(fp_stdout,"%s: ERROR Unable to malloc() %ld*%lu bytes for value buffer for variable %s in main()\n",nco_prg_nm_get(),var_prc_out[idx]->sz,(unsigned long)nco_typ_lng(var_prc_out[idx]->type),var_prc_out[idx]->nm); nco_exit(EXIT_FAILURE); } /* end if err */ (void)nco_var_zero(var_prc_out[idx]->type,var_prc_out[idx]->sz,var_prc_out[idx]->val); (void)nco_var_mtd_refresh(grp_id,var_prc[idx]); /* Find weighting variable that matches current variable */ if(wgt_nm) wgt=nco_var_get_wgt_trv(in_id,lmt_nbr,lmt_arg,MSA_USR_RDR,FORTRAN_IDX_CNV,wgt_nm,var_prc[idx],trv_tbl); /* Find mask variable that matches current variable */ if(msk_nm) msk=nco_var_get_wgt_trv(in_id,lmt_nbr,lmt_arg,MSA_USR_RDR,FORTRAN_IDX_CNV,msk_nm,var_prc[idx],trv_tbl); /* Retrieve variable from disk into memory */ (void)nco_msa_var_get_trv(in_id,var_prc[idx],trv_tbl); /* var_prc_out still has type = packed type for packed variables nco_typ_cnv_rth() fixes that for most operations, though not for minimization or maximization Following line is necessary only for packed variables subject to minimization or maximization */ if(var_prc[idx]->typ_dsk != var_prc[idx]->type && var_prc[idx]->typ_upk == var_prc[idx]->type) var_prc_out[idx]=nco_var_cnf_typ(var_prc[idx]->type,var_prc_out[idx]); /* Convert char, short, long, int, and float types to doubles before arithmetic */ var_prc[idx]=nco_typ_cnv_rth(var_prc[idx],nco_op_typ); var_prc_out[idx]=nco_typ_cnv_rth(var_prc_out[idx],nco_op_typ); /* Check mask found for this variable, using msk */ if(msk && (!var_prc[idx]->is_crd_var || WGT_MSK_CRD_VAR)){ msk_out=nco_var_cnf_dmn(var_prc[idx],msk,msk_out,MUST_CONFORM,&DO_CONFORM_MSK); /* Mask variable iff msk and var conform */ if(DO_CONFORM_MSK){ msk_out=nco_var_cnf_typ(var_prc[idx]->type,msk_out); /* mss_val for var_prc has been overwritten in nco_var_mtd_refresh() */ if(!var_prc[idx]->has_mss_val){ var_prc[idx]->has_mss_val=True; var_prc[idx]->mss_val=nco_mss_val_mk(var_prc[idx]->type); } /* end if */ /* Mask by changing variable to missing value where condition is false */ (void)nco_var_msk(var_prc[idx]->type,var_prc[idx]->sz,var_prc[idx]->has_mss_val,var_prc[idx]->mss_val,msk_val,op_typ_rlt,msk_out->val,var_prc[idx]->val); } /* end if */ } /* end if */ /* Perform non-linear transformations before weighting */ if(!var_prc[idx]->is_crd_var){ switch(nco_op_typ){ case nco_op_mabs: /* Absolute value variable before weighting */ case nco_op_mebs: /* Absolute value variable before weighting */ case nco_op_mibs: /* Absolute value variable before weighting */ (void)nco_var_abs(var_prc[idx]->type,var_prc[idx]->sz,var_prc[idx]->has_mss_val,var_prc[idx]->mss_val,var_prc[idx]->val); break; case nco_op_avgsqr: /* Square variable before weighting */ case nco_op_rms: /* Square variable before weighting */ case nco_op_rmssdn: /* Square variable before weighting */ (void)nco_var_mlt(var_prc[idx]->type,var_prc[idx]->sz,var_prc[idx]->has_mss_val,var_prc[idx]->mss_val,var_prc[idx]->val,var_prc[idx]->val); break; default: /* All other operations are linear, do nothing to them yet */ break; } /* end case */ } /* var_prc[idx]->is_crd_var */ /* Check weight found for this variable, using wgt */ if(wgt && (!var_prc[idx]->is_crd_var || WGT_MSK_CRD_VAR)){ /* fxm: nco_var_cnf_dmn() has bug where it does not allocate tally array for weights that do already conform to var_prc. TODO #114. */ wgt_out=nco_var_cnf_dmn(var_prc[idx],wgt,wgt_out,MUST_CONFORM,&DO_CONFORM_WGT); if(DO_CONFORM_WGT){ wgt_out=nco_var_cnf_typ(var_prc[idx]->type,wgt_out); /* Weight variable after any initial non-linear operation so, e.g., variable (not weights) is squared */ (void)nco_var_mlt(var_prc[idx]->type,var_prc[idx]->sz,var_prc[idx]->has_mss_val,var_prc[idx]->mss_val,wgt_out->val,var_prc[idx]->val); } /* end if weights conformed */ } /* end if weight was specified and then tested for conformance */ /* Copy (masked) (weighted) values from var_prc to var_prc_out */ (void)memcpy((void *)(var_prc_out[idx]->val.vp),(void *)(var_prc[idx]->val.vp),var_prc_out[idx]->sz*nco_typ_lng(var_prc_out[idx]->type)); /* 20050516: fxm: destruction of var_prc_out in nco_var_avg() leaves dangling pointers in var_out? */ /* Reduce variable over specified dimensions (tally array is set here) NB: var_prc_out[idx] is new, so corresponding var_out[idx] is dangling nco_var_avg() will perform nco_op_typ on all variables except coordinate variables nco_var_avg() always averages coordinate variables */ var_prc_out[idx]=nco_var_avg(var_prc_out[idx],dmn_avg,dmn_avg_nbr,nco_op_typ,flg_rdd,&ddra_info); /* var_prc_out[idx]->val now holds numerator of averaging expression documented in NCO Users Guide Denominator is also tricky due to sundry normalization options These logical switches are tricky---modify them with care */ if(NRM_BY_DNM && DO_CONFORM_WGT && (!var_prc[idx]->is_crd_var || WGT_MSK_CRD_VAR)){ /* Duplicate wgt_out as wgt_avg so that wgt_out is not contaminated by any averaging operation and may be re-used on next variable. Free wgt_avg after each use but continue to re-use wgt_out */ wgt_avg=nco_var_dpl(wgt_out); if(var_prc[idx]->has_mss_val){ double mss_val_dbl=double_CEWI; /* Set denominator to missing value at all locations where variable is missing value If this is accomplished by setting weight to missing value wherever variable is missing value then weight must not be re-used by next variable (which may conform but have missing values in different locations) This is one good reason to copy wgt_out into disposable wgt_avg for each new variable */ /* First, make sure wgt_avg has same missing value as variable */ (void)nco_mss_val_cp(var_prc[idx],wgt_avg); /* Copy missing value into double precision variable */ switch(wgt_avg->type){ case NC_FLOAT: mss_val_dbl=wgt_avg->mss_val.fp[0]; break; case NC_DOUBLE: mss_val_dbl=wgt_avg->mss_val.dp[0]; break; case NC_INT: mss_val_dbl=wgt_avg->mss_val.ip[0]; break; case NC_SHORT: mss_val_dbl=wgt_avg->mss_val.sp[0]; break; case NC_USHORT: mss_val_dbl=wgt_avg->mss_val.usp[0]; break; case NC_UINT: mss_val_dbl=wgt_avg->mss_val.uip[0]; break; case NC_INT64: mss_val_dbl=(double)wgt_avg->mss_val.i64p[0]; break; /* CEWI for MSVC */ case NC_UINT64: mss_val_dbl=(double)wgt_avg->mss_val.ui64p[0]; break; /* CEWI for MSVC */ case NC_BYTE: mss_val_dbl=wgt_avg->mss_val.bp[0]; break; case NC_UBYTE: mss_val_dbl=wgt_avg->mss_val.ubp[0]; break; case NC_CHAR: mss_val_dbl=wgt_avg->mss_val.cp[0]; break; case NC_STRING: break; /* Do nothing */ default: nco_dfl_case_nc_type_err(); break; } /* end switch */ /* Second, mask wgt_avg where variable is missing value */ (void)nco_var_msk(wgt_avg->type,wgt_avg->sz,var_prc[idx]->has_mss_val,var_prc[idx]->mss_val,mss_val_dbl,nco_op_ne,var_prc[idx]->val,wgt_avg->val); } /* endif weight must be checked for missing values */ /* Free current input buffer */ var_prc[idx]->val.vp=nco_free(var_prc[idx]->val.vp); if(msk && DO_CONFORM_MSK){ /* Must mask weight in same fashion as variable was masked If msk and var did not conform then do not mask wgt Ensure wgt_avg has a missing value */ if(!wgt_avg->has_mss_val){ wgt_avg->has_mss_val=True; wgt_avg->mss_val=nco_mss_val_mk(wgt_avg->type); } /* end if */ /* Mask by changing weight to missing value where condition is false */ (void)nco_var_msk(wgt_avg->type,wgt_avg->sz,wgt_avg->has_mss_val,wgt_avg->mss_val,msk_val,op_typ_rlt,msk_out->val,wgt_avg->val); } /* endif weight must be masked */ /* fxm: temporary kludge to make sure weight has tally space wgt_avg may lack valid tally array in ncwa because wgt_avg is created, sometimes, before the tally array for var_prc_out[idx] is created. When this occurs the nco_var_dpl() call in nco_var_cnf_dmn() does not copy tally array into wgt_avg. See related note about this above. TODO #114.*/ if(wgt_avg->sz > 0){ if(!(wgt_avg->tally=(long *)nco_realloc(wgt_avg->tally,wgt_avg->sz*sizeof(long)))){ (void)fprintf(fp_stdout,"%s: ERROR Unable to realloc() %ld*%ld bytes for tally buffer for weight %s in main()\n",nco_prg_nm_get(),wgt_avg->sz,(long)sizeof(long),wgt_avg->nm); nco_exit(EXIT_FAILURE); } /* end if */ } /* wgt_avg->sz */ /* Average weight over specified dimensions (tally array is set here) */ wgt_avg=nco_var_avg(wgt_avg,dmn_avg,dmn_avg_nbr,nco_op_avg,flg_rdd,&ddra_info); if(MULTIPLY_BY_TALLY){ /* NB: Currently this is not implemented */ /* Multiply numerator (weighted sum of variable) by tally We deviously accomplish this by dividing denominator by tally */ (void)nco_var_nrm(wgt_avg->type,wgt_avg->sz,wgt_avg->has_mss_val,wgt_avg->mss_val,wgt_avg->tally,wgt_avg->val); } /* endif */ /* Divide numerator by denominator */ /* Diagnose common PEBCAK before it causes core dump */ if(var_prc_out[idx]->sz == 1L && var_prc_out[idx]->type == NC_INT && var_prc_out[idx]->val.ip[0] == 0){ (void)fprintf(fp_stdout,"%s: ERROR Weight in denominator weight = 0.0, will cause SIGFPE\n%s: HINT Sum of masked, averaged weights must be non-zero\n%s: HINT A possible workaround is to remove variable \"%s\" from output file using \"%s -x -v %s ...\"\n%s: Expecting core dump...now!\n",nco_prg_nm,nco_prg_nm,nco_prg_nm,var_prc_out[idx]->nm,nco_prg_nm,var_prc_out[idx]->nm,nco_prg_nm); } /* end if */ /* Rather complex conditional statement is shorter than switch() */ if( /* Normalize by weighted tally if .... */ var_prc[idx]->is_crd_var || /* ...variable is a coordinate or ...*/ ((nco_op_typ != nco_op_min) && /* ...operation is not min() and... */ (nco_op_typ != nco_op_max) && /* ...operation is not max() and... */ (nco_op_typ != nco_op_mabs) && /* ...operation is not mabs() and... */ (nco_op_typ != nco_op_mibs) && /* ...operation is not mibs() and... */ (nco_op_typ != nco_op_tabs) && /* ...operation is not tabs() and... */ (nco_op_typ != nco_op_ttl)) /* ...operation is not ttl() and... */ ){ /* Divide numerator by masked, averaged, weights */ (void)nco_var_dvd(var_prc_out[idx]->type,var_prc_out[idx]->sz,var_prc_out[idx]->has_mss_val,var_prc_out[idx]->mss_val,wgt_avg->val,var_prc_out[idx]->val); } /* endif */ /* Free wgt_avg, but keep wgt_out, after each use */ if(wgt_avg) wgt_avg=nco_var_free(wgt_avg); /* End of branch for normalization when weights were specified */ }else if(NRM_BY_DNM){ /* Branch for normalization when no weights were specified Normalization is just due to tally */ if(var_prc[idx]->is_crd_var){ /* Always return averages (never extrema or other statistics) of coordinates Prevent coordinate variables from encountering nco_var_nrm_sdn() */ (void)nco_var_nrm(var_prc_out[idx]->type,var_prc_out[idx]->sz,var_prc_out[idx]->has_mss_val,var_prc_out[idx]->mss_val,var_prc_out[idx]->tally,var_prc_out[idx]->val); }else{ /* !var_prc[idx]->is_crd_var */ switch(nco_op_typ){ case nco_op_mebs: /* Normalize sum by tally to create mean */ case nco_op_avg: /* Normalize sum by tally to create mean */ case nco_op_sqravg: /* Normalize sum by tally to create mean */ case nco_op_avgsqr: /* Normalize sum of squares by tally to create mean square */ case nco_op_rms: /* Normalize sum of squares by tally to create mean square */ case nco_op_sqrt: /* Normalize sum by tally to create mean */ (void)nco_var_nrm(var_prc_out[idx]->type,var_prc_out[idx]->sz,var_prc_out[idx]->has_mss_val,var_prc_out[idx]->mss_val,var_prc_out[idx]->tally,var_prc_out[idx]->val); break; case nco_op_rmssdn: /* Normalize sum of squares by tally-1 to create mean square for sdn */ (void)nco_var_nrm_sdn(var_prc_out[idx]->type,var_prc_out[idx]->sz,var_prc_out[idx]->has_mss_val,var_prc_out[idx]->mss_val,var_prc_out[idx]->tally,var_prc_out[idx]->val); break; case nco_op_min: /* Minimum is already in buffer, do nothing */ case nco_op_max: /* Maximum is already in buffer, do nothing */ case nco_op_mabs: /* Maximum absolute value is already in buffer, do nothing */ case nco_op_mibs: /* Minimum absolute value is already in buffer, do nothing */ case nco_op_tabs: /* Total absolute value is already in buffer, do nothing */ case nco_op_ttl: /* Total is already in buffer, do nothing */ break; default: (void)fprintf(fp_stdout,"%s: ERROR Illegal nco_op_typ in non-weighted normalization\n",nco_prg_nm); nco_exit(EXIT_FAILURE); break; } /* end switch */ } /* !var_prc[idx]->is_crd_var */ }else if(!NRM_BY_DNM){ /* User turned off normalization so we are done */ ; }else{ (void)fprintf(fp_stdout,"%s: ERROR Unforeseen logical branch in main()\n",nco_prg_nm); nco_exit(EXIT_FAILURE); } /* end if */ /* Some non-linear operations require additional processing */ if(!var_prc[idx]->is_crd_var){ switch(nco_op_typ){ case nco_op_sqravg: /* Square mean to create square of the mean (for sdn) */ (void)nco_var_mlt(var_prc_out[idx]->type,var_prc_out[idx]->sz,var_prc_out[idx]->has_mss_val,var_prc_out[idx]->mss_val,var_prc_out[idx]->val,var_prc_out[idx]->val); break; case nco_op_sqrt: /* Take root of mean to create root mean */ case nco_op_rms: /* Take root of mean of sum of squares to create root mean square */ case nco_op_rmssdn: /* Take root of sdn mean of sum of squares to create root mean square for sdn */ (void)nco_var_sqrt(var_prc_out[idx]->type,var_prc_out[idx]->sz,var_prc_out[idx]->has_mss_val,var_prc_out[idx]->mss_val,var_prc_out[idx]->tally,var_prc_out[idx]->val,var_prc_out[idx]->val); break; default: break; } /* end switch */ } /* var_prc[idx]->is_crd_var */ /* Free tally buffer */ var_prc_out[idx]->tally=(long *)nco_free(var_prc_out[idx]->tally); /* Revert any arithmetic promotion but leave unpacked (for now) */ var_prc_out[idx]=nco_var_cnf_typ(var_prc_out[idx]->typ_upk,var_prc_out[idx]); /* Edit group name for output */ if(gpe) grp_out_fll=nco_gpe_evl(gpe,var_trv->grp_nm_fll); else grp_out_fll=(char *)strdup(var_trv->grp_nm_fll); /* Obtain output group ID */ (void)nco_inq_grp_full_ncid(out_id,grp_out_fll,&grp_out_id); /* Memory management after current extracted group */ if(grp_out_fll) grp_out_fll=(char *)nco_free(grp_out_fll); /* Get variable ID */ (void)nco_inq_varid(grp_out_id,var_trv->nm,&var_out_id); /* Store the output variable ID */ var_prc_out[idx]->id=var_out_id; if(var_trv->ppc != NC_MAX_INT){ if(var_trv->flg_nsd) (void)nco_ppc_bitmask(var_trv->ppc,var_prc_out[idx]->type,var_prc_out[idx]->sz,var_prc_out[idx]->has_mss_val,var_prc_out[idx]->mss_val,var_prc_out[idx]->val); else (void)nco_ppc_around(var_trv->ppc,var_prc_out[idx]->type,var_prc_out[idx]->sz,var_prc_out[idx]->has_mss_val,var_prc_out[idx]->mss_val,var_prc_out[idx]->val); } /* endif ppc */ if(nco_is_xcp(var_trv->nm)) nco_xcp_prc(var_trv->nm,var_prc_out[idx]->type,var_prc_out[idx]->sz,(char *)var_prc_out[idx]->val.vp); #ifdef _OPENMP #pragma omp critical #endif /* _OPENMP */ { /* begin OpenMP critical */ /* Copy average to output file then free averaging buffer */ if(var_prc_out[idx]->nbr_dim == 0){ (void)nco_put_var1(grp_out_id,var_prc_out[idx]->id,var_prc_out[idx]->srt,var_prc_out[idx]->val.vp,var_prc_out[idx]->type); }else{ /* end if variable is scalar */ (void)nco_put_vara(grp_out_id,var_prc_out[idx]->id,var_prc_out[idx]->srt,var_prc_out[idx]->cnt,var_prc_out[idx]->val.vp,var_prc_out[idx]->type); } /* end if variable is array */ } /* end OpenMP critical */ if(flg_ddra){ /* DDRA diagnostics Usage: ncwa -O -C --mdl -a lat,lon,time -w lat ~/nco/data/in.nc ~/foo.nc ncwa -O -C --mdl -a lat,lon -w lat ${DATA}/nco_bm/stl_5km.nc ~/foo.nc ncwa -O -C --mdl -a lat,lon,time -w lat ${DATA}/nco_bm/gcm_T85.nc ~/foo.nc */ /* Assign remaining input for DDRA diagnostics */ ddra_info.lmn_nbr=var_prc[idx]->sz; /* [nbr] Variable size */ if(wgt) ddra_info.lmn_nbr_wgt=wgt->sz; /* [nbr] Weight size */ ddra_info.nco_op_typ=nco_op_typ; /* [enm] Operation type */ ddra_info.rnk_var=var_prc[idx]->nbr_dim; /* I [nbr] Variable rank (in input file) */ if(wgt) ddra_info.rnk_wgt=wgt->nbr_dim; /* [nbr] Rank of weight */ ddra_info.var_idx=idx; /* [enm] Index */ ddra_info.wrd_sz=nco_typ_lng(var_prc[idx]->type); /* [B] Bytes per element */ /* DDRA diagnostics */ rcd+=nco_ddra /* [fnc] Count operations */ (var_prc[idx]->nm, /* I [sng] Variable name */ wgt_nm, /* I [sng] Weight name */ &ddra_info); /* I [sct] DDRA information */ } /* !flg_ddra */ /* Free current output buffer */ var_prc_out[idx]->val.vp=nco_free(var_prc_out[idx]->val.vp); /* Free possible weight/mask found */ if(wgt) wgt=nco_var_free(wgt); if(wgt_out) wgt_out=nco_var_free(wgt_out); if(msk) msk=nco_var_free(msk); if(msk_out) msk_out=nco_var_free(msk_out); } /* end (OpenMP parallel for) loop over idx */ if(nco_dbg_lvl >= nco_dbg_var) (void)fprintf(stderr,"\n"); /* Close input netCDF file */ for(thr_idx=0;thr_idx<thr_nbr;thr_idx++) nco_close(in_id_arr[thr_idx]); /* Remove local copy of file */ if(FL_RTR_RMT_LCN && RM_RMT_FL_PST_PRC) (void)nco_fl_rm(fl_in); } /* end loop over fl_idx */ /* Close output file and move it from temporary to permanent location */ (void)nco_fl_out_cls(fl_out,fl_out_tmp,out_id); /* Clean memory unless dirty memory allowed */ if(flg_mmr_cln){ /* ncwa-specific memory */ if(dmn_avg_nbr > 0) dmn_avg=(dmn_sct **)nco_free(dmn_avg); if(msk_nm) msk_nm=(char *)nco_free(msk_nm); if(msk_cnd_sng) msk_cnd_sng=(char *)nco_free(msk_cnd_sng); if(wgt_avg) wgt_avg=nco_var_free(wgt_avg); if(wgt_nm) wgt_nm=(char *)nco_free(wgt_nm); /* NCO-generic clean-up */ /* Free individual strings/arrays */ if(cmd_ln) cmd_ln=(char *)nco_free(cmd_ln); if(cnk_map_sng) cnk_map_sng=(char *)nco_free(cnk_map_sng); if(cnk_plc_sng) cnk_plc_sng=(char *)nco_free(cnk_plc_sng); if(fl_in) fl_in=(char *)nco_free(fl_in); if(fl_out) fl_out=(char *)nco_free(fl_out); if(fl_out_tmp) fl_out_tmp=(char *)nco_free(fl_out_tmp); if(fl_pth) fl_pth=(char *)nco_free(fl_pth); if(fl_pth_lcl) fl_pth_lcl=(char *)nco_free(fl_pth_lcl); if(in_id_arr) in_id_arr=(int *)nco_free(in_id_arr); /* Free lists of strings */ if(fl_lst_in && fl_lst_abb == NULL) fl_lst_in=nco_sng_lst_free(fl_lst_in,fl_nbr); if(fl_lst_in && fl_lst_abb) fl_lst_in=nco_sng_lst_free(fl_lst_in,1); if(fl_lst_abb) fl_lst_abb=nco_sng_lst_free(fl_lst_abb,abb_arg_nbr); if(gaa_nbr > 0) gaa_arg=nco_sng_lst_free(gaa_arg,gaa_nbr); if(var_lst_in_nbr > 0) var_lst_in=nco_sng_lst_free(var_lst_in,var_lst_in_nbr); /* Free limits */ for(idx=0;idx<aux_nbr;idx++) aux_arg[idx]=(char *)nco_free(aux_arg[idx]); for(idx=0;idx<lmt_nbr;idx++) lmt_arg[idx]=(char *)nco_free(lmt_arg[idx]); for(idx=0;idx<ppc_nbr;idx++) ppc_arg[idx]=(char *)nco_free(ppc_arg[idx]); /* Free chunking information */ for(idx=0;idx<cnk_nbr;idx++) cnk_arg[idx]=(char *)nco_free(cnk_arg[idx]); if(cnk_nbr > 0 && (fl_out_fmt == NC_FORMAT_NETCDF4 || fl_out_fmt == NC_FORMAT_NETCDF4_CLASSIC)) cnk.cnk_dmn=(cnk_dmn_sct **)nco_cnk_lst_free(cnk.cnk_dmn,cnk_nbr); /* Free dimension lists */ if(nbr_dmn_xtr > 0) dim=nco_dmn_lst_free(dim,nbr_dmn_xtr); if(nbr_dmn_out > 0) dmn_out=nco_dmn_lst_free(dmn_out,nbr_dmn_out); /* Free variable lists */ if(xtr_nbr > 0) var=nco_var_lst_free(var,xtr_nbr); /* ncwa uses nco_var_lst_free() on var_prc_out because var_out has dangling pointers */ if(nbr_var_fix > 0) var_fix_out=nco_var_lst_free(var_fix_out,nbr_var_fix); if(nbr_var_prc > 0) var_prc_out=nco_var_lst_free(var_prc_out,nbr_var_prc); var_prc=(var_sct **)nco_free(var_prc); var_fix=(var_sct **)nco_free(var_fix); var_out=(var_sct **)nco_free(var_out); for(idx=0;idx<lmt_nbr;idx++) flg_dne[idx].dim_nm=(char *)nco_free(flg_dne[idx].dim_nm); if(flg_dne) flg_dne=(nco_dmn_dne_t *)nco_free(flg_dne); /* Free traversal table */ trv_tbl_free(trv_tbl); } /* !flg_mmr_cln */ /* End timer */ ddra_info.tmr_flg=nco_tmr_end; /* [enm] Timer flag */ rcd+=nco_ddra((char *)NULL,(char *)NULL,&ddra_info); if(rcd != NC_NOERR) nco_err_exit(rcd,"main"); #ifdef ENABLE_MPI MPI_Finalize(); #endif /* !ENABLE_MPI */ nco_exit_gracefully(); return EXIT_SUCCESS; } /* end main() */
DES_bs_b.c
/* * This file is part of John the Ripper password cracker, * Copyright (c) 1996-2001,2003,2010-2013,2015 by Solar Designer * * Addition of single DES encryption with no salt by * Deepika Dutta Mishra <dipikadutta at gmail.com> in 2012, no * rights reserved. */ #ifdef _MSC_VER #undef _OPENMP #endif #include "arch.h" #include "common.h" #include "DES_bs.h" #include "memdbg.h" #if DES_BS_ASM && defined(_OPENMP) && defined(__GNUC__) #warning Assembly code and OpenMP are both requested - will provide the former, but not the latter (for DES-based hashes). This may likely be corrected by enabling SIMD intrinsics with the C compiler (try adding -msse2 to OMPFLAGS). #endif #if !DES_BS_ASM #define vzero (*(vtype *)&DES_bs_all.zero) #if DES_bs_mt #define vones (*(vtype *)&DES_bs_all_by_tnum(-1).ones) #else #define vones (*(vtype *)&DES_bs_all.ones) #endif #define DES_BS_VECTOR_LOOPS 0 #if defined(__ARM_NEON) && DES_BS_DEPTH == 64 #include <arm_neon.h> typedef uint32x2_t vtype; #define vst(dst, ofs, src) \ vst1_u32((uint32_t *)((DES_bs_vector *)&(dst) + (ofs)), (src)) #define vxorf(a, b) \ veor_u32((a), (b)) #define vnot(dst, a) \ (dst) = vmvn_u32((a)) #define vand(dst, a, b) \ (dst) = vand_u32((a), (b)) #define vor(dst, a, b) \ (dst) = vorr_u32((a), (b)) #define vandn(dst, a, b) \ (dst) = vbic_u32((a), (b)) #define vsel(dst, a, b, c) \ (dst) = vbsl_u32((c), (b), (a)) #if 0 #define vshl1(dst, src) \ (dst) = vadd_u32((src), (src)) #endif #define vshl(dst, src, shift) \ (dst) = vshl_n_u32((src), (shift)) #define vshr(dst, src, shift) \ (dst) = vshr_n_u32((src), (shift)) #elif defined(__ARM_NEON) && ARCH_BITS == 32 && DES_BS_DEPTH == 96 #include <arm_neon.h> typedef struct { uint32x2_t f; unsigned ARCH_WORD g; } vtype; #define vst(dst, ofs, src) \ vst1_u32( \ (uint32_t *)&((vtype *)((DES_bs_vector *)&(dst) + (ofs)))->f, \ (src).f); \ ((vtype *)((DES_bs_vector *)&(dst) + (ofs)))->g = (src).g #define vxor(dst, a, b) \ (dst).f = veor_u32((a).f, (b).f); \ (dst).g = (a).g ^ (b).g #define vnot(dst, a) \ (dst).f = vmvn_u32((a).f); \ (dst).g = ~(a).g #define vand(dst, a, b) \ (dst).f = vand_u32((a).f, (b).f); \ (dst).g = (a).g & (b).g #define vor(dst, a, b) \ (dst).f = vorr_u32((a).f, (b).f); \ (dst).g = (a).g | (b).g #define vandn(dst, a, b) \ (dst).f = vbic_u32((a).f, (b).f); \ (dst).g = (a).g & ~(b).g #define vsel(dst, a, b, c) \ (dst).f = vbsl_u32((c).f, (b).f, (a).f); \ (dst).g = (((a).g & ~(c).g) ^ ((b).g & (c).g)) #elif defined(__ARM_NEON) && DES_BS_DEPTH == 128 && defined(DES_BS_2X64) #include <arm_neon.h> typedef struct { uint32x2_t f, g; } vtype; #define vst(dst, ofs, src) \ vst1_u32( \ (uint32_t *)&((vtype *)((DES_bs_vector *)&(dst) + (ofs)))->f, \ (src).f); \ vst1_u32( \ (uint32_t *)&((vtype *)((DES_bs_vector *)&(dst) + (ofs)))->g, \ (src).g) #define vxor(dst, a, b) \ (dst).f = veor_u32((a).f, (b).f); \ (dst).g = veor_u32((a).g, (b).g) #define vnot(dst, a) \ (dst).f = vmvn_u32((a).f); \ (dst).g = vmvn_u32((a).g) #define vand(dst, a, b) \ (dst).f = vand_u32((a).f, (b).f); \ (dst).g = vand_u32((a).g, (b).g) #define vor(dst, a, b) \ (dst).f = vorr_u32((a).f, (b).f); \ (dst).g = vorr_u32((a).g, (b).g) #define vandn(dst, a, b) \ (dst).f = vbic_u32((a).f, (b).f); \ (dst).g = vbic_u32((a).g, (b).g) #define vsel(dst, a, b, c) \ (dst).f = vbsl_u32((c).f, (b).f, (a).f); \ (dst).g = vbsl_u32((c).g, (b).g, (a).g) #elif defined(__ARM_NEON) && DES_BS_DEPTH == 128 #include <arm_neon.h> typedef uint32x4_t vtype; #define vst(dst, ofs, src) \ vst1q_u32((uint32_t *)((DES_bs_vector *)&(dst) + (ofs)), (src)) #define vxorf(a, b) \ veorq_u32((a), (b)) #define vnot(dst, a) \ (dst) = vmvnq_u32((a)) #define vand(dst, a, b) \ (dst) = vandq_u32((a), (b)) #define vor(dst, a, b) \ (dst) = vorrq_u32((a), (b)) #define vandn(dst, a, b) \ (dst) = vbicq_u32((a), (b)) #define vsel(dst, a, b, c) \ (dst) = vbslq_u32((c), (b), (a)) #if 0 #define vshl1(dst, src) \ (dst) = vaddq_u32((src), (src)) #endif #define vshl(dst, src, shift) \ (dst) = vshlq_n_u32((src), (shift)) #define vshr(dst, src, shift) \ (dst) = vshrq_n_u32((src), (shift)) #elif defined(__ARM_NEON) && \ ((ARCH_BITS == 64 && DES_BS_DEPTH == 192) || \ (ARCH_BITS == 32 && DES_BS_DEPTH == 160)) #include <arm_neon.h> typedef struct { uint32x4_t f; unsigned ARCH_WORD g; } vtype; #define vst(dst, ofs, src) \ vst1q_u32( \ (uint32_t *)&((vtype *)((DES_bs_vector *)&(dst) + (ofs)))->f, \ (src).f); \ ((vtype *)((DES_bs_vector *)&(dst) + (ofs)))->g = (src).g #define vxor(dst, a, b) \ (dst).f = veorq_u32((a).f, (b).f); \ (dst).g = (a).g ^ (b).g #define vnot(dst, a) \ (dst).f = vmvnq_u32((a).f); \ (dst).g = ~(a).g #define vand(dst, a, b) \ (dst).f = vandq_u32((a).f, (b).f); \ (dst).g = (a).g & (b).g #define vor(dst, a, b) \ (dst).f = vorrq_u32((a).f, (b).f); \ (dst).g = (a).g | (b).g #define vandn(dst, a, b) \ (dst).f = vbicq_u32((a).f, (b).f); \ (dst).g = (a).g & ~(b).g #define vsel(dst, a, b, c) \ (dst).f = vbslq_u32((c).f, (b).f, (a).f); \ (dst).g = (((a).g & ~(c).g) ^ ((b).g & (c).g)) #elif defined(__ARM_NEON) && DES_BS_DEPTH == 256 #include <arm_neon.h> typedef struct { uint32x4_t f, g; } vtype; #define vst(dst, ofs, src) \ vst1q_u32( \ (uint32_t *)&((vtype *)((DES_bs_vector *)&(dst) + (ofs)))->f, \ (src).f); \ vst1q_u32( \ (uint32_t *)&((vtype *)((DES_bs_vector *)&(dst) + (ofs)))->g, \ (src).g) #define vxor(dst, a, b) \ (dst).f = veorq_u32((a).f, (b).f); \ (dst).g = veorq_u32((a).g, (b).g) #define vnot(dst, a) \ (dst).f = vmvnq_u32((a).f); \ (dst).g = vmvnq_u32((a).g) #define vand(dst, a, b) \ (dst).f = vandq_u32((a).f, (b).f); \ (dst).g = vandq_u32((a).g, (b).g) #define vor(dst, a, b) \ (dst).f = vorrq_u32((a).f, (b).f); \ (dst).g = vorrq_u32((a).g, (b).g) #define vandn(dst, a, b) \ (dst).f = vbicq_u32((a).f, (b).f); \ (dst).g = vbicq_u32((a).g, (b).g) #define vsel(dst, a, b, c) \ (dst).f = vbslq_u32((c).f, (b).f, (a).f); \ (dst).g = vbslq_u32((c).g, (b).g, (a).g) #elif defined(__ALTIVEC__) && DES_BS_DEPTH == 128 #ifdef __linux__ #include <altivec.h> #endif typedef vector signed int vtype; #define vst(dst, ofs, src) \ vec_st((src), (ofs) * sizeof(DES_bs_vector), (vtype *)(dst)) #define vxorf(a, b) \ vec_xor((a), (b)) #define vnot(dst, a) \ (dst) = vec_nor((a), (a)) #define vand(dst, a, b) \ (dst) = vec_and((a), (b)) #define vor(dst, a, b) \ (dst) = vec_or((a), (b)) #define vandn(dst, a, b) \ (dst) = vec_andc((a), (b)) #define vsel(dst, a, b, c) \ (dst) = vec_sel((a), (b), (vector bool int)(c)) #elif defined(__ALTIVEC__) && \ ((ARCH_BITS == 64 && DES_BS_DEPTH == 192) || \ (ARCH_BITS == 32 && DES_BS_DEPTH == 160)) #ifdef __linux__ #include <altivec.h> #endif typedef struct { vector signed int f; unsigned ARCH_WORD g; } vtype; #define vst(dst, ofs, src) \ vec_st((src).f, (ofs) * sizeof(DES_bs_vector), ((vtype *)&(dst))->f); \ ((vtype *)((DES_bs_vector *)&(dst) + (ofs)))->g = (src).g #define vxor(dst, a, b) \ (dst).f = vec_xor((a).f, (b).f); \ (dst).g = (a).g ^ (b).g #define vnot(dst, a) \ (dst).f = vec_nor((a).f, (a).f); \ (dst).g = ~(a).g #define vand(dst, a, b) \ (dst).f = vec_and((a).f, (b).f); \ (dst).g = (a).g & (b).g #define vor(dst, a, b) \ (dst).f = vec_or((a).f, (b).f); \ (dst).g = (a).g | (b).g #define vandn(dst, a, b) \ (dst).f = vec_andc((a).f, (b).f); \ (dst).g = (a).g & ~(b).g #define vsel(dst, a, b, c) \ (dst).f = vec_sel((a).f, (b).f, (vector bool int)(c).f); \ (dst).g = (((a).g & ~(c).g) ^ ((b).g & (c).g)) #elif defined(__ALTIVEC__) && DES_BS_DEPTH == 256 #ifdef __linux__ #include <altivec.h> #endif typedef struct { vector signed int f, g; } vtype; #define vst(dst, ofs, src) \ vec_st((src).f, (ofs) * sizeof(DES_bs_vector), ((vtype *)&(dst))->f); \ vec_st((src).g, (ofs) * sizeof(DES_bs_vector), ((vtype *)&(dst))->g) #define vxor(dst, a, b) \ (dst).f = vec_xor((a).f, (b).f); \ (dst).g = vec_xor((a).g, (b).g) #define vnot(dst, a) \ (dst).f = vec_nor((a).f, (a).f); \ (dst).g = vec_nor((a).g, (a).g) #define vand(dst, a, b) \ (dst).f = vec_and((a).f, (b).f); \ (dst).g = vec_and((a).g, (b).g) #define vor(dst, a, b) \ (dst).f = vec_or((a).f, (b).f); \ (dst).g = vec_or((a).g, (b).g) #define vandn(dst, a, b) \ (dst).f = vec_andc((a).f, (b).f); \ (dst).g = vec_andc((a).g, (b).g) #define vsel(dst, a, b, c) \ (dst).f = vec_sel((a).f, (b).f, (vector bool int)(c).f); \ (dst).g = vec_sel((a).g, (b).g, (vector bool int)(c).g) #elif defined(__MIC__) && DES_BS_DEPTH == 512 #include <immintrin.h> typedef __m512i vtype; #define vst(dst, ofs, src) \ _mm512_store_epi32((vtype *)((DES_bs_vector *)&(dst) + (ofs)), (src)) #define vxorf(a, b) \ _mm512_xor_epi32((a), (b)) #define vand(dst, a, b) \ (dst) = _mm512_and_epi32((a), (b)) #define vor(dst, a, b) \ (dst) = _mm512_or_epi32((a), (b)) #define vandn(dst, a, b) \ (dst) = _mm512_andnot_epi32((b), (a)) #define vshl1(dst, src) \ (dst) = _mm512_add_epi32((src), (src)) #define vshl(dst, src, shift) \ (dst) = _mm512_slli_epi32((src), (shift)) #define vshr(dst, src, shift) \ (dst) = _mm512_srli_epi32((src), (shift)) #elif defined(__AVX__) && DES_BS_DEPTH == 256 && !defined(DES_BS_NO_AVX256) #include <immintrin.h> typedef __m256i vtype; #define vst(dst, ofs, src) \ _mm256_store_si256((vtype *)((DES_bs_vector *)&(dst) + (ofs)), (src)) #define vxorf(a, b) \ _mm256_xor_si256((a), (b)) #define vand(dst, a, b) \ (dst) = _mm256_and_si256((a), (b)) #define vor(dst, a, b) \ (dst) = _mm256_or_si256((a), (b)) #define vandn(dst, a, b) \ (dst) = _mm256_andnot_si256((b), (a)) #ifdef __XOP__ /* This could be _mm256_cmov_si256(), but it does not exist (yet?) */ #define vsel(dst, a, b, c) \ (dst) = __builtin_ia32_vpcmov_v8sf256((b), (a), (c)) #endif #define vshl1(dst, src) \ (dst) = _mm256_add_epi8((src), (src)) #define vshl(dst, src, shift) \ (dst) = _mm256_slli_epi64((src), (shift)) #define vshr(dst, src, shift) \ (dst) = _mm256_srli_epi64((src), (shift)) #elif defined(__AVX__) && DES_BS_DEPTH == 384 && !defined(DES_BS_NO_AVX128) #include <immintrin.h> #ifdef __XOP__ #include <x86intrin.h> #endif typedef struct { __m256i f; __m128i g; } vtype; #define vst(dst, ofs, src) \ _mm256_store_si256(&((vtype *)((DES_bs_vector *)&(dst) + (ofs)))->f, \ (src).f); \ _mm_store_si128(&((vtype *)((DES_bs_vector *)&(dst) + (ofs)))->g, \ (src).g) #define vxor(dst, a, b) \ (dst).f = _mm256_xor_si256((a).f, (b).f); \ (dst).g = _mm_xor_si128((a).g, (b).g) #define vand(dst, a, b) \ (dst).f = _mm256_and_si256((a).f, (b).f); \ (dst).g = _mm_and_si128((a).g, (b).g) #define vor(dst, a, b) \ (dst).f = _mm256_or_si256((a).f, (b).f); \ (dst).g = _mm_or_si128((a).g, (b).g) #define vandn(dst, a, b) \ (dst).f = _mm256_andnot_si256((b).f, (a).f); \ (dst).g = _mm_andnot_si128((b).g, (a).g) #ifdef __XOP__ /* This could be _mm256_cmov_ps(), but it does not exist (yet?) */ #define vsel(dst, a, b, c) \ (dst).f = __builtin_ia32_vpcmov_v8sf256((b).f, (a).f, (c).f); \ (dst).g = _mm_cmov_si128((b).g, (a).g, (c).g) #endif #define vshl(dst, src, shift) \ (dst).f = _mm256_slli_epi64((src).f, (shift)); \ (dst).g = _mm_slli_epi64((src).g, (shift)) #define vshr(dst, src, shift) \ (dst).f = _mm256_srli_epi64((src).f, (shift)); \ (dst).g = _mm_srli_epi64((src).g, (shift)) #elif defined(__AVX__) && DES_BS_DEPTH == 512 #include <immintrin.h> typedef struct { __m256i f, g; } vtype; #define vst(dst, ofs, src) \ _mm256_store_si256(&((vtype *)((DES_bs_vector *)&(dst) + (ofs)))->f, \ (src).f); \ _mm256_store_si256(&((vtype *)((DES_bs_vector *)&(dst) + (ofs)))->g, \ (src).g) #define vxor(dst, a, b) \ (dst).f = _mm256_xor_si256((a).f, (b).f); \ (dst).g = _mm256_xor_si256((a).g, (b).g) #define vand(dst, a, b) \ (dst).f = _mm256_and_si256((a).f, (b).f); \ (dst).g = _mm256_and_si256((a).g, (b).g) #define vor(dst, a, b) \ (dst).f = _mm256_or_si256((a).f, (b).f); \ (dst).g = _mm256_or_si256((a).g, (b).g) #define vandn(dst, a, b) \ (dst).f = _mm256_andnot_si256((b).f, (a).f); \ (dst).g = _mm256_andnot_si256((b).g, (a).g) #ifdef __XOP__ /* This could be _mm256_cmov_ps(), but it does not exist (yet?) */ #define vsel(dst, a, b, c) \ (dst).f = __builtin_ia32_vpcmov_v8sf256((b).f, (a).f, (c).f); \ (dst).g = __builtin_ia32_vpcmov_v8sf256((b).g, (a).g, (c).g) #endif #define vshl(dst, src, shift) \ (dst).f = _mm256_slli_epi64((src).f, (shift)); \ (dst).g = _mm256_slli_epi64((src).g, (shift)) #define vshr(dst, src, shift) \ (dst).f = _mm256_srli_epi64((src).f, (shift)); \ (dst).g = _mm256_srli_epi64((src).g, (shift)) #elif defined(__AVX__) && defined(__MMX__) && DES_BS_DEPTH == 320 && \ !defined(DES_BS_NO_MMX) #include <immintrin.h> #include <mmintrin.h> typedef struct { __m256i f; __m64 g; } vtype; #define vst(dst, ofs, src) \ _mm256_store_si256(&((vtype *)((DES_bs_vector *)&(dst) + (ofs)))->f, \ (src).f); \ ((vtype *)((DES_bs_vector *)&(dst) + (ofs)))->g = (src).g #define vxor(dst, a, b) \ (dst).f = _mm256_xor_si256((a).f, (b).f); \ (dst).g = _mm_xor_si64((a).g, (b).g) #define vand(dst, a, b) \ (dst).f = _mm256_and_si256((a).f, (b).f); \ (dst).g = _mm_and_si64((a).g, (b).g) #define vor(dst, a, b) \ (dst).f = _mm256_or_si256((a).f, (b).f); \ (dst).g = _mm_or_si64((a).g, (b).g) #define vandn(dst, a, b) \ (dst).f = _mm256_andnot_si256((b).f, (a).f); \ (dst).g = _mm_andnot_si64((b).g, (a).g) #define vshl(dst, src, shift) \ (dst).f = _mm256_slli_epi64((src).f, (shift)); \ (dst).g = _mm_slli_si64((src).g, (shift)) #define vshr(dst, src, shift) \ (dst).f = _mm256_srli_epi64((src).f, (shift)); \ (dst).g = _mm_srli_si64((src).g, (shift)) #elif defined(__AVX__) && \ ((ARCH_BITS == 64 && DES_BS_DEPTH == 320) || \ (ARCH_BITS == 32 && DES_BS_DEPTH == 288)) #include <immintrin.h> #include <mmintrin.h> typedef struct { __m256i f; unsigned ARCH_WORD g; } vtype; #define vst(dst, ofs, src) \ _mm256_store_si256(&((vtype *)((DES_bs_vector *)&(dst) + (ofs)))->f, \ (src).f); \ ((vtype *)((DES_bs_vector *)&(dst) + (ofs)))->g = (src).g #define vxor(dst, a, b) \ (dst).f = _mm256_xor_si256((a).f, (b).f); \ (dst).g = (a).g ^ (b).g #define vnot(dst, a) \ (dst).f = _mm256_xor_si256((a).f, vones.f); \ (dst).g = ~(a).g #define vand(dst, a, b) \ (dst).f = _mm256_and_si256((a).f, (b).f); \ (dst).g = (a).g & (b).g #define vor(dst, a, b) \ (dst).f = _mm256_or_si256((a).f, (b).f); \ (dst).g = (a).g | (b).g #define vandn(dst, a, b) \ (dst).f = _mm256_andnot_si256((b).f, (a).f); \ (dst).g = (a).g & ~(b).g #define vshl(dst, src, shift) \ (dst).f = _mm256_slli_epi64((src).f, (shift)); \ (dst).g = (src).g << (shift) #define vshr(dst, src, shift) \ (dst).f = _mm256_srli_epi64((src).f, (shift)); \ (dst).g = (src).g >> (shift) #elif defined(__AVX__) && defined(__MMX__) && \ ((ARCH_BITS == 64 && DES_BS_DEPTH == 384) || \ (ARCH_BITS == 32 && DES_BS_DEPTH == 352)) #include <immintrin.h> #include <mmintrin.h> typedef struct { __m256i f; __m64 g; unsigned ARCH_WORD h; } vtype; #define vst(dst, ofs, src) \ _mm256_store_si256(&((vtype *)((DES_bs_vector *)&(dst) + (ofs)))->f, \ (src).f); \ ((vtype *)((DES_bs_vector *)&(dst) + (ofs)))->g = (src).g; \ ((vtype *)((DES_bs_vector *)&(dst) + (ofs)))->h = (src).h #define vxor(dst, a, b) \ (dst).f = _mm256_xor_si256((a).f, (b).f); \ (dst).g = _mm_xor_si64((a).g, (b).g); \ (dst).h = (a).h ^ (b).h #define vnot(dst, a) \ (dst).f = _mm256_xor_si256((a).f, vones.f); \ (dst).g = _mm_xor_si64((a).g, vones.g); \ (dst).h = ~(a).h #define vand(dst, a, b) \ (dst).f = _mm256_and_si256((a).f, (b).f); \ (dst).g = _mm_and_si64((a).g, (b).g); \ (dst).h = (a).h & (b).h #define vor(dst, a, b) \ (dst).f = _mm256_or_si256((a).f, (b).f); \ (dst).g = _mm_or_si64((a).g, (b).g); \ (dst).h = (a).h | (b).h #define vandn(dst, a, b) \ (dst).f = _mm256_andnot_si256((b).f, (a).f); \ (dst).g = _mm_andnot_si64((b).g, (a).g); \ (dst).h = (a).h & ~(b).h #define vshl(dst, src, shift) \ (dst).f = _mm256_slli_epi64((src).f, (shift)); \ (dst).g = _mm_slli_si64((src).g, (shift)); \ (dst).h = (src).h << (shift) #define vshr(dst, src, shift) \ (dst).f = _mm256_srli_epi64((src).f, (shift)); \ (dst).g = _mm_srli_si64((src).g, (shift)); \ (dst).h = (src).h >> (shift) #elif defined(__SSE2__) && DES_BS_DEPTH == 128 #ifdef __AVX__ #include <immintrin.h> #ifdef __XOP__ #include <x86intrin.h> #endif #else #include <emmintrin.h> #endif typedef __m128i vtype; #define vst(dst, ofs, src) \ _mm_store_si128((vtype *)((DES_bs_vector *)&(dst) + (ofs)), (src)) #define vxorf(a, b) \ _mm_xor_si128((a), (b)) #define vand(dst, a, b) \ (dst) = _mm_and_si128((a), (b)) #define vor(dst, a, b) \ (dst) = _mm_or_si128((a), (b)) #define vandn(dst, a, b) \ (dst) = _mm_andnot_si128((b), (a)) #ifdef __XOP__ #define vsel(dst, a, b, c) \ (dst) = _mm_cmov_si128((b), (a), (c)) #else #define vsel(dst, a, b, c) \ (dst) = _mm_xor_si128(_mm_andnot_si128((c), (a)), \ _mm_and_si128((c), (b))) #endif #define vshl1(dst, src) \ (dst) = _mm_add_epi8((src), (src)) #define vshl(dst, src, shift) \ (dst) = _mm_slli_epi64((src), (shift)) #define vshr(dst, src, shift) \ (dst) = _mm_srli_epi64((src), (shift)) #elif defined(__SSE2__) && DES_BS_DEPTH == 256 && defined(DES_BS_NO_MMX) #ifdef __AVX__ #include <immintrin.h> #ifdef __XOP__ #include <x86intrin.h> #endif #else #include <emmintrin.h> #endif typedef struct { __m128i f, g; } vtype; #define vst(dst, ofs, src) \ _mm_store_si128(&((vtype *)((DES_bs_vector *)&(dst) + (ofs)))->f, \ (src).f); \ _mm_store_si128(&((vtype *)((DES_bs_vector *)&(dst) + (ofs)))->g, \ (src).g) #define vxor(dst, a, b) \ (dst).f = _mm_xor_si128((a).f, (b).f); \ (dst).g = _mm_xor_si128((a).g, (b).g) #define vand(dst, a, b) \ (dst).f = _mm_and_si128((a).f, (b).f); \ (dst).g = _mm_and_si128((a).g, (b).g) #define vor(dst, a, b) \ (dst).f = _mm_or_si128((a).f, (b).f); \ (dst).g = _mm_or_si128((a).g, (b).g) #define vandn(dst, a, b) \ (dst).f = _mm_andnot_si128((b).f, (a).f); \ (dst).g = _mm_andnot_si128((b).g, (a).g) #ifdef __XOP__ #define vsel(dst, a, b, c) \ (dst).f = _mm_cmov_si128((b).f, (a).f, (c).f); \ (dst).g = _mm_cmov_si128((b).g, (a).g, (c).g) #endif #define vshl1(dst, src) \ (dst).f = _mm_add_epi8((src).f, (src).f); \ (dst).g = _mm_add_epi8((src).g, (src).g) #define vshl(dst, src, shift) \ (dst).f = _mm_slli_epi64((src).f, (shift)); \ (dst).g = _mm_slli_epi64((src).g, (shift)) #define vshr(dst, src, shift) \ (dst).f = _mm_srli_epi64((src).f, (shift)); \ (dst).g = _mm_srli_epi64((src).g, (shift)) #elif defined(__SSE2__) && defined(__MMX__) && DES_BS_DEPTH == 192 && \ !defined(DES_BS_NO_MMX) #include <emmintrin.h> #include <mmintrin.h> typedef struct { __m128i f; __m64 g; } vtype; #define vst(dst, ofs, src) \ _mm_store_si128(&((vtype *)((DES_bs_vector *)&(dst) + (ofs)))->f, \ (src).f); \ ((vtype *)((DES_bs_vector *)&(dst) + (ofs)))->g = (src).g #define vxor(dst, a, b) \ (dst).f = _mm_xor_si128((a).f, (b).f); \ (dst).g = _mm_xor_si64((a).g, (b).g) #define vand(dst, a, b) \ (dst).f = _mm_and_si128((a).f, (b).f); \ (dst).g = _mm_and_si64((a).g, (b).g) #define vor(dst, a, b) \ (dst).f = _mm_or_si128((a).f, (b).f); \ (dst).g = _mm_or_si64((a).g, (b).g) #define vandn(dst, a, b) \ (dst).f = _mm_andnot_si128((b).f, (a).f); \ (dst).g = _mm_andnot_si64((b).g, (a).g) #define vshl1(dst, src) \ (dst).f = _mm_add_epi8((src).f, (src).f); \ (dst).g = _mm_add_pi8((src).g, (src).g) #define vshl(dst, src, shift) \ (dst).f = _mm_slli_epi64((src).f, (shift)); \ (dst).g = _mm_slli_si64((src).g, (shift)) #define vshr(dst, src, shift) \ (dst).f = _mm_srli_epi64((src).f, (shift)); \ (dst).g = _mm_srli_si64((src).g, (shift)) #elif defined(__SSE2__) && \ ((ARCH_BITS == 64 && DES_BS_DEPTH == 192) || \ (ARCH_BITS == 32 && DES_BS_DEPTH == 160)) #include <emmintrin.h> typedef struct { __m128i f; unsigned ARCH_WORD g; } vtype; #define vst(dst, ofs, src) \ _mm_store_si128(&((vtype *)((DES_bs_vector *)&(dst) + (ofs)))->f, \ (src).f); \ ((vtype *)((DES_bs_vector *)&(dst) + (ofs)))->g = (src).g #define vxor(dst, a, b) \ (dst).f = _mm_xor_si128((a).f, (b).f); \ (dst).g = (a).g ^ (b).g #define vnot(dst, a) \ (dst).f = _mm_xor_si128((a).f, vones.f); \ (dst).g = ~(a).g #define vand(dst, a, b) \ (dst).f = _mm_and_si128((a).f, (b).f); \ (dst).g = (a).g & (b).g #define vor(dst, a, b) \ (dst).f = _mm_or_si128((a).f, (b).f); \ (dst).g = (a).g | (b).g #define vandn(dst, a, b) \ (dst).f = _mm_andnot_si128((b).f, (a).f); \ (dst).g = (a).g & ~(b).g #define vshl1(dst, src) \ (dst).f = _mm_add_epi8((src).f, (src).f); \ (dst).g = (src).g << 1 #define vshl(dst, src, shift) \ (dst).f = _mm_slli_epi64((src).f, (shift)); \ (dst).g = (src).g << (shift) #define vshr(dst, src, shift) \ (dst).f = _mm_srli_epi64((src).f, (shift)); \ (dst).g = (src).g >> (shift) #elif defined(__SSE2__) && defined(__MMX__) && \ ((ARCH_BITS == 64 && DES_BS_DEPTH == 256) || \ (ARCH_BITS == 32 && DES_BS_DEPTH == 224)) #include <emmintrin.h> #include <mmintrin.h> typedef struct { __m128i f; __m64 g; unsigned ARCH_WORD h; } vtype; #define vst(dst, ofs, src) \ _mm_store_si128(&((vtype *)((DES_bs_vector *)&(dst) + (ofs)))->f, \ (src).f); \ ((vtype *)((DES_bs_vector *)&(dst) + (ofs)))->g = (src).g; \ ((vtype *)((DES_bs_vector *)&(dst) + (ofs)))->h = (src).h #define vxor(dst, a, b) \ (dst).f = _mm_xor_si128((a).f, (b).f); \ (dst).g = _mm_xor_si64((a).g, (b).g); \ (dst).h = (a).h ^ (b).h #define vnot(dst, a) \ (dst).f = _mm_xor_si128((a).f, vones.f); \ (dst).g = _mm_xor_si64((a).g, vones.g); \ (dst).h = ~(a).h #define vand(dst, a, b) \ (dst).f = _mm_and_si128((a).f, (b).f); \ (dst).g = _mm_and_si64((a).g, (b).g); \ (dst).h = (a).h & (b).h #define vor(dst, a, b) \ (dst).f = _mm_or_si128((a).f, (b).f); \ (dst).g = _mm_or_si64((a).g, (b).g); \ (dst).h = (a).h | (b).h #define vandn(dst, a, b) \ (dst).f = _mm_andnot_si128((b).f, (a).f); \ (dst).g = _mm_andnot_si64((b).g, (a).g); \ (dst).h = (a).h & ~(b).h #define vshl1(dst, src) \ (dst).f = _mm_add_epi8((src).f, (src).f); \ (dst).g = _mm_add_pi8((src).g, (src).g); \ (dst).h = (src).h << 1 #define vshl(dst, src, shift) \ (dst).f = _mm_slli_epi64((src).f, (shift)); \ (dst).g = _mm_slli_si64((src).g, (shift)); \ (dst).h = (src).h << (shift) #define vshr(dst, src, shift) \ (dst).f = _mm_srli_epi64((src).f, (shift)); \ (dst).g = _mm_srli_si64((src).g, (shift)); \ (dst).h = (src).h >> (shift) #elif defined(__MMX__) && ARCH_BITS != 64 && DES_BS_DEPTH == 64 #include <mmintrin.h> typedef __m64 vtype; #define vxorf(a, b) \ _mm_xor_si64((a), (b)) #define vand(dst, a, b) \ (dst) = _mm_and_si64((a), (b)) #define vor(dst, a, b) \ (dst) = _mm_or_si64((a), (b)) #define vandn(dst, a, b) \ (dst) = _mm_andnot_si64((b), (a)) #define vshl1(dst, src) \ (dst) = _mm_add_pi8((src), (src)) #define vshl(dst, src, shift) \ (dst) = _mm_slli_si64((src), (shift)) #define vshr(dst, src, shift) \ (dst) = _mm_srli_si64((src), (shift)) #elif defined(__MMX__) && ARCH_BITS == 32 && DES_BS_DEPTH == 96 #include <mmintrin.h> typedef struct { __m64 f; unsigned ARCH_WORD g; } vtype; #define vst(dst, ofs, src) \ ((vtype *)((DES_bs_vector *)&(dst) + (ofs)))->f = (src).f; \ ((vtype *)((DES_bs_vector *)&(dst) + (ofs)))->g = (src).g #define vxor(dst, a, b) \ (dst).f = _mm_xor_si64((a).f, (b).f); \ (dst).g = (a).g ^ (b).g #define vnot(dst, a) \ (dst).f = _mm_xor_si64((a).f, vones.f); \ (dst).g = ~(a).g #define vand(dst, a, b) \ (dst).f = _mm_and_si64((a).f, (b).f); \ (dst).g = (a).g & (b).g #define vor(dst, a, b) \ (dst).f = _mm_or_si64((a).f, (b).f); \ (dst).g = (a).g | (b).g #define vandn(dst, a, b) \ (dst).f = _mm_andnot_si64((b).f, (a).f); \ (dst).g = (a).g & ~(b).g #define vshl1(dst, src) \ (dst).f = _mm_add_pi8((src).f, (src).f); \ (dst).g = (src).g << 1 #define vshl(dst, src, shift) \ (dst).f = _mm_slli_si64((src).f, (shift)); \ (dst).g = (src).g << (shift) #define vshr(dst, src, shift) \ (dst).f = _mm_srli_si64((src).f, (shift)); \ (dst).g = (src).g >> (shift) #else #if DES_BS_VECTOR #undef DES_BS_VECTOR_LOOPS #define DES_BS_VECTOR_LOOPS 1 #endif typedef unsigned ARCH_WORD vtype; #define vxorf(a, b) \ ((a) ^ (b)) #define vnot(dst, a) \ (dst) = ~(a) #define vand(dst, a, b) \ (dst) = (a) & (b) #define vor(dst, a, b) \ (dst) = (a) | (b) #define vandn(dst, a, b) \ (dst) = (a) & ~(b) #define vsel(dst, a, b, c) \ (dst) = (((a) & ~(c)) ^ ((b) & (c))) #define vshl(dst, src, shift) \ (dst) = (src) << (shift) #define vshr(dst, src, shift) \ (dst) = (src) >> (shift) /* Assume that 0 always fits in one load immediate instruction */ #undef vzero #define vzero 0 /* Archs friendly to use of immediate values */ #if defined(__x86_64__) || defined(__i386__) #undef vones #define vones (~(vtype)0) #endif #endif #ifndef vst #define vst(dst, ofs, src) \ *((vtype *)((DES_bs_vector *)&(dst) + (ofs))) = (src) #endif #if !defined(vxor) && defined(vxorf) #define vxor(dst, a, b) \ (dst) = vxorf((a), (b)) #endif #if !defined(vxorf) && defined(vxor) /* * This requires gcc's "Statement Exprs" extension (also supported by a number * of other C compilers). */ #define vxorf(a, b) \ ({ vtype tmp; vxor(tmp, (a), (b)); tmp; }) #endif #ifndef vnot #define vnot(dst, a) \ vxor((dst), (a), vones) #endif #ifndef vshl1 #define vshl1(dst, src) \ vshl((dst), (src), 1) #endif #if !DES_BS_VECTOR_LOOPS && defined(vshl) && defined(vshr) #define DES_BS_VECTOR_LOOPS_K 0 #define DEPTH_K #define for_each_depth_k() #define kvtype vtype #define kvand vand #define kvor vor #define kvshl1 vshl1 #define kvshl vshl #define kvshr vshr #else #if DES_BS_VECTOR #define DES_BS_VECTOR_LOOPS_K 1 #define DEPTH_K [depth] #define for_each_depth_k() \ for (depth = 0; depth < DES_BS_VECTOR; depth++) #else #define DES_BS_VECTOR_LOOPS_K 0 #endif typedef unsigned ARCH_WORD kvtype; #define kvand(dst, a, b) \ (dst) = (a) & (b) #define kvor(dst, a, b) \ (dst) = (a) | (b) #define kvshl1(dst, src) \ (dst) = (src) << 1 #define kvshl(dst, src, shift) \ (dst) = (src) << (shift) #define kvshr(dst, src, shift) \ (dst) = (src) >> (shift) #endif #if !DES_BS_VECTOR || DES_BS_VECTOR_LOOPS_K #ifdef __x86_64__ #define mask01 0x0101010101010101UL #elif __i386__ #define mask01 0x01010101UL #else #undef mask01 #endif #ifdef mask01 #define mask02 (mask01 << 1) #define mask04 (mask01 << 2) #define mask08 (mask01 << 3) #define mask10 (mask01 << 4) #define mask20 (mask01 << 5) #define mask40 (mask01 << 6) #define mask80 (mask01 << 7) #endif #endif #ifndef mask01 #define mask01 (*(kvtype *)&DES_bs_all.masks[0]) #define mask02 (*(kvtype *)&DES_bs_all.masks[1]) #define mask04 (*(kvtype *)&DES_bs_all.masks[2]) #define mask08 (*(kvtype *)&DES_bs_all.masks[3]) #define mask10 (*(kvtype *)&DES_bs_all.masks[4]) #define mask20 (*(kvtype *)&DES_bs_all.masks[5]) #define mask40 (*(kvtype *)&DES_bs_all.masks[6]) #define mask80 (*(kvtype *)&DES_bs_all.masks[7]) #endif #ifdef __i386__ /* register-starved */ #define LOAD_V \ kvtype v0 = *(kvtype *)&vp[0]; \ kvtype v4 = *(kvtype *)&vp[4]; #define v1 *(kvtype *)&vp[1] #define v2 *(kvtype *)&vp[2] #define v3 *(kvtype *)&vp[3] #define v5 *(kvtype *)&vp[5] #define v6 *(kvtype *)&vp[6] #define v7 *(kvtype *)&vp[7] #else #define LOAD_V \ kvtype v0 = *(kvtype *)&vp[0]; \ kvtype v1 = *(kvtype *)&vp[1]; \ kvtype v2 = *(kvtype *)&vp[2]; \ kvtype v3 = *(kvtype *)&vp[3]; \ kvtype v4 = *(kvtype *)&vp[4]; \ kvtype v5 = *(kvtype *)&vp[5]; \ kvtype v6 = *(kvtype *)&vp[6]; \ kvtype v7 = *(kvtype *)&vp[7]; #endif #define kvand_shl1_or(dst, src, mask) \ kvand(tmp, src, mask); \ kvshl1(tmp, tmp); \ kvor(dst, dst, tmp) #define kvand_shl_or(dst, src, mask, shift) \ kvand(tmp, src, mask); \ kvshl(tmp, tmp, shift); \ kvor(dst, dst, tmp) #define kvand_shl1(dst, src, mask) \ kvand(tmp, src, mask); \ kvshl1(dst, tmp) #define kvand_or(dst, src, mask) \ kvand(tmp, src, mask); \ kvor(dst, dst, tmp) #define kvand_shr_or(dst, src, mask, shift) \ kvand(tmp, src, mask); \ kvshr(tmp, tmp, shift); \ kvor(dst, dst, tmp) #define kvand_shr(dst, src, mask, shift) \ kvand(tmp, src, mask); \ kvshr(dst, tmp, shift) #define FINALIZE_NEXT_KEY_BIT_0 { \ kvtype m = mask01, va, vb, tmp; \ kvand(va, v0, m); \ kvand_shl1(vb, v1, m); \ kvand_shl_or(va, v2, m, 2); \ kvand_shl_or(vb, v3, m, 3); \ kvand_shl_or(va, v4, m, 4); \ kvand_shl_or(vb, v5, m, 5); \ kvand_shl_or(va, v6, m, 6); \ kvand_shl_or(vb, v7, m, 7); \ kvor(*(kvtype *)kp, va, vb); \ kp++; \ } #define FINALIZE_NEXT_KEY_BIT_1 { \ kvtype m = mask02, va, vb, tmp; \ kvand_shr(va, v0, m, 1); \ kvand(vb, v1, m); \ kvand_shl1_or(va, v2, m); \ kvand_shl_or(vb, v3, m, 2); \ kvand_shl_or(va, v4, m, 3); \ kvand_shl_or(vb, v5, m, 4); \ kvand_shl_or(va, v6, m, 5); \ kvand_shl_or(vb, v7, m, 6); \ kvor(*(kvtype *)kp, va, vb); \ kp++; \ } #define FINALIZE_NEXT_KEY_BIT_2 { \ kvtype m = mask04, va, vb, tmp; \ kvand_shr(va, v0, m, 2); \ kvand_shr(vb, v1, m, 1); \ kvand_or(va, v2, m); \ kvand_shl1_or(vb, v3, m); \ kvand_shl_or(va, v4, m, 2); \ kvand_shl_or(vb, v5, m, 3); \ kvand_shl_or(va, v6, m, 4); \ kvand_shl_or(vb, v7, m, 5); \ kvor(*(kvtype *)kp, va, vb); \ kp++; \ } #define FINALIZE_NEXT_KEY_BIT_3 { \ kvtype m = mask08, va, vb, tmp; \ kvand_shr(va, v0, m, 3); \ kvand_shr(vb, v1, m, 2); \ kvand_shr_or(va, v2, m, 1); \ kvand_or(vb, v3, m); \ kvand_shl1_or(va, v4, m); \ kvand_shl_or(vb, v5, m, 2); \ kvand_shl_or(va, v6, m, 3); \ kvand_shl_or(vb, v7, m, 4); \ kvor(*(kvtype *)kp, va, vb); \ kp++; \ } #define FINALIZE_NEXT_KEY_BIT_4 { \ kvtype m = mask10, va, vb, tmp; \ kvand_shr(va, v0, m, 4); \ kvand_shr(vb, v1, m, 3); \ kvand_shr_or(va, v2, m, 2); \ kvand_shr_or(vb, v3, m, 1); \ kvand_or(va, v4, m); \ kvand_shl1_or(vb, v5, m); \ kvand_shl_or(va, v6, m, 2); \ kvand_shl_or(vb, v7, m, 3); \ kvor(*(kvtype *)kp, va, vb); \ kp++; \ } #define FINALIZE_NEXT_KEY_BIT_5 { \ kvtype m = mask20, va, vb, tmp; \ kvand_shr(va, v0, m, 5); \ kvand_shr(vb, v1, m, 4); \ kvand_shr_or(va, v2, m, 3); \ kvand_shr_or(vb, v3, m, 2); \ kvand_shr_or(va, v4, m, 1); \ kvand_or(vb, v5, m); \ kvand_shl1_or(va, v6, m); \ kvand_shl_or(vb, v7, m, 2); \ kvor(*(kvtype *)kp, va, vb); \ kp++; \ } #define FINALIZE_NEXT_KEY_BIT_6 { \ kvtype m = mask40, va, vb, tmp; \ kvand_shr(va, v0, m, 6); \ kvand_shr(vb, v1, m, 5); \ kvand_shr_or(va, v2, m, 4); \ kvand_shr_or(vb, v3, m, 3); \ kvand_shr_or(va, v4, m, 2); \ kvand_shr_or(vb, v5, m, 1); \ kvand_or(va, v6, m); \ kvand_shl1_or(vb, v7, m); \ kvor(*(kvtype *)kp, va, vb); \ kp++; \ } #define FINALIZE_NEXT_KEY_BIT_7 { \ kvtype m = mask80, va, vb, tmp; \ kvand_shr(va, v0, m, 7); \ kvand_shr(vb, v1, m, 6); \ kvand_shr_or(va, v2, m, 5); \ kvand_shr_or(vb, v3, m, 4); \ kvand_shr_or(va, v4, m, 3); \ kvand_shr_or(vb, v5, m, 2); \ kvand_shr_or(va, v6, m, 1); \ kvand_or(vb, v7, m); \ kvor(*(kvtype *)kp, va, vb); \ kp++; \ } #if DES_bs_mt static MAYBE_INLINE void DES_bs_finalize_keys(int t) #else static MAYBE_INLINE void DES_bs_finalize_keys(void) #endif { #if DES_BS_VECTOR_LOOPS_K int depth; #endif for_each_depth_k() { DES_bs_vector *kp = (DES_bs_vector *)&DES_bs_all.K[0] DEPTH_K; int ic; for (ic = 0; ic < 8; ic++) { DES_bs_vector *vp = (DES_bs_vector *)&DES_bs_all.xkeys.v[ic][0] DEPTH_K; LOAD_V FINALIZE_NEXT_KEY_BIT_0 FINALIZE_NEXT_KEY_BIT_1 FINALIZE_NEXT_KEY_BIT_2 FINALIZE_NEXT_KEY_BIT_3 FINALIZE_NEXT_KEY_BIT_4 FINALIZE_NEXT_KEY_BIT_5 FINALIZE_NEXT_KEY_BIT_6 } } #if DES_BS_EXPAND { int index; for (index = 0; index < 0x300; index++) for_each_depth_k() { #if DES_BS_VECTOR_LOOPS_K DES_bs_all.KS.v[index] DEPTH_K = DES_bs_all.KSp[index] DEPTH_K; #else vst(*(kvtype *)&DES_bs_all.KS.v[index], 0, *(kvtype *)DES_bs_all.KSp[index]); #endif } } #endif } #endif #if DES_bs_mt MAYBE_INLINE void DES_bs_set_salt_for_thread(int t, unsigned int salt) #else void DES_bs_set_salt(ARCH_WORD salt) #endif { unsigned int new = salt; unsigned int old = DES_bs_all.salt; int dst; DES_bs_all.salt = new; for (dst = 0; dst < 24; dst++) { if ((new ^ old) & 1) { DES_bs_vector *sp1, *sp2; int src1 = dst; int src2 = dst + 24; if (new & 1) { src1 = src2; src2 = dst; } sp1 = DES_bs_all.Ens[src1]; sp2 = DES_bs_all.Ens[src2]; DES_bs_all.E.E[dst] = (ARCH_WORD *)sp1; DES_bs_all.E.E[dst + 24] = (ARCH_WORD *)sp2; DES_bs_all.E.E[dst + 48] = (ARCH_WORD *)(sp1 + 32); DES_bs_all.E.E[dst + 72] = (ARCH_WORD *)(sp2 + 32); } new >>= 1; old >>= 1; if (new == old) break; } } #if !DES_BS_ASM /* Include the S-boxes here so that the compiler can inline them */ #if DES_BS == 3 #include "sboxes-s.c" #elif DES_BS == 2 #include "sboxes.c" #else #undef andn #include "nonstd.c" #endif #define b DES_bs_all.B #define e DES_bs_all.E.E #if DES_BS_VECTOR_LOOPS #define kd [depth] #define bd [depth] #define ed [depth] #define DEPTH [depth] #define for_each_depth() \ for (depth = 0; depth < DES_BS_VECTOR; depth++) #else #if DES_BS_EXPAND #define kd #else #define kd [0] #endif #define bd #define ed [0] #define DEPTH #define for_each_depth() #endif #define DES_bs_clear_block_8(i) \ for_each_depth() { \ vst(b[i] bd, 0, zero); \ vst(b[i] bd, 1, zero); \ vst(b[i] bd, 2, zero); \ vst(b[i] bd, 3, zero); \ vst(b[i] bd, 4, zero); \ vst(b[i] bd, 5, zero); \ vst(b[i] bd, 6, zero); \ vst(b[i] bd, 7, zero); \ } #define DES_bs_clear_block \ DES_bs_clear_block_8(0); \ DES_bs_clear_block_8(8); \ DES_bs_clear_block_8(16); \ DES_bs_clear_block_8(24); \ DES_bs_clear_block_8(32); \ DES_bs_clear_block_8(40); \ DES_bs_clear_block_8(48); \ DES_bs_clear_block_8(56); #define DES_bs_set_block_8(i, v0, v1, v2, v3, v4, v5, v6, v7) \ for_each_depth() { \ vst(b[i] bd, 0, v0); \ vst(b[i] bd, 1, v1); \ vst(b[i] bd, 2, v2); \ vst(b[i] bd, 3, v3); \ vst(b[i] bd, 4, v4); \ vst(b[i] bd, 5, v5); \ vst(b[i] bd, 6, v6); \ vst(b[i] bd, 7, v7); \ } #define x(p) vxorf(*(vtype *)&e[p] ed, *(vtype *)&k[p] kd) #define y(p, q) vxorf(*(vtype *)&b[p] bd, *(vtype *)&k[q] kd) #define z(r) ((vtype *)&b[r] bd) void DES_bs_crypt_25(int keys_count) { #if DES_bs_mt int t, n = (keys_count + (DES_BS_DEPTH - 1)) / DES_BS_DEPTH; #endif #ifdef _OPENMP #pragma omp parallel for default(none) private(t) shared(n, DES_bs_all_p, keys_count) #endif for_each_t(n) { #if DES_BS_EXPAND DES_bs_vector *k; #else ARCH_WORD **k; #endif int iterations, rounds_and_swapped; #if DES_BS_VECTOR_LOOPS int depth; #endif if (DES_bs_all.keys_changed) goto finalize_keys; body: #if DES_bs_mt DES_bs_set_salt_for_thread(t, DES_bs_all_by_tnum(-1).salt); #endif { vtype zero = vzero; DES_bs_clear_block } #if DES_BS_EXPAND k = DES_bs_all.KS.v; #else k = DES_bs_all.KS.p; #endif rounds_and_swapped = 8; iterations = 25; start: for_each_depth() s1(x(0), x(1), x(2), x(3), x(4), x(5), z(40), z(48), z(54), z(62)); for_each_depth() s2(x(6), x(7), x(8), x(9), x(10), x(11), z(44), z(59), z(33), z(49)); for_each_depth() s3(y(7, 12), y(8, 13), y(9, 14), y(10, 15), y(11, 16), y(12, 17), z(55), z(47), z(61), z(37)); for_each_depth() s4(y(11, 18), y(12, 19), y(13, 20), y(14, 21), y(15, 22), y(16, 23), z(57), z(51), z(41), z(32)); for_each_depth() s5(x(24), x(25), x(26), x(27), x(28), x(29), z(39), z(45), z(56), z(34)); for_each_depth() s6(x(30), x(31), x(32), x(33), x(34), x(35), z(35), z(60), z(42), z(50)); for_each_depth() s7(y(23, 36), y(24, 37), y(25, 38), y(26, 39), y(27, 40), y(28, 41), z(63), z(43), z(53), z(38)); for_each_depth() s8(y(27, 42), y(28, 43), y(29, 44), y(30, 45), y(31, 46), y(0, 47), z(36), z(58), z(46), z(52)); if (rounds_and_swapped == 0x100) goto next; swap: for_each_depth() s1(x(48), x(49), x(50), x(51), x(52), x(53), z(8), z(16), z(22), z(30)); for_each_depth() s2(x(54), x(55), x(56), x(57), x(58), x(59), z(12), z(27), z(1), z(17)); for_each_depth() s3(y(39, 60), y(40, 61), y(41, 62), y(42, 63), y(43, 64), y(44, 65), z(23), z(15), z(29), z(5)); for_each_depth() s4(y(43, 66), y(44, 67), y(45, 68), y(46, 69), y(47, 70), y(48, 71), z(25), z(19), z(9), z(0)); for_each_depth() s5(x(72), x(73), x(74), x(75), x(76), x(77), z(7), z(13), z(24), z(2)); for_each_depth() s6(x(78), x(79), x(80), x(81), x(82), x(83), z(3), z(28), z(10), z(18)); for_each_depth() s7(y(55, 84), y(56, 85), y(57, 86), y(58, 87), y(59, 88), y(60, 89), z(31), z(11), z(21), z(6)); for_each_depth() s8(y(59, 90), y(60, 91), y(61, 92), y(62, 93), y(63, 94), y(32, 95), z(4), z(26), z(14), z(20)); k += 96; if (--rounds_and_swapped) goto start; k -= (0x300 + 48); rounds_and_swapped = 0x108; if (--iterations) goto swap; #if DES_bs_mt continue; #else return; #endif next: k -= (0x300 - 48); rounds_and_swapped = 8; iterations--; goto start; finalize_keys: DES_bs_all.keys_changed = 0; #if DES_bs_mt DES_bs_finalize_keys(t); #else DES_bs_finalize_keys(); #endif goto body; } } void DES_bs_crypt(int count, int keys_count) { #if DES_bs_mt int t, n = (keys_count + (DES_BS_DEPTH - 1)) / DES_BS_DEPTH; #endif #ifdef _OPENMP #pragma omp parallel for default(none) private(t) shared(n, DES_bs_all_p, count, keys_count) #endif for_each_t(n) { #if DES_BS_EXPAND DES_bs_vector *k; #else ARCH_WORD **k; #endif int iterations, rounds_and_swapped; #if DES_BS_VECTOR_LOOPS int depth; #endif if (DES_bs_all.keys_changed) goto finalize_keys; body: #if DES_bs_mt DES_bs_set_salt_for_thread(t, DES_bs_all_by_tnum(-1).salt); #endif { vtype zero = vzero; DES_bs_clear_block } #if DES_BS_EXPAND k = DES_bs_all.KS.v; #else k = DES_bs_all.KS.p; #endif rounds_and_swapped = 8; iterations = count; start: for_each_depth() s1(x(0), x(1), x(2), x(3), x(4), x(5), z(40), z(48), z(54), z(62)); for_each_depth() s2(x(6), x(7), x(8), x(9), x(10), x(11), z(44), z(59), z(33), z(49)); for_each_depth() s3(x(12), x(13), x(14), x(15), x(16), x(17), z(55), z(47), z(61), z(37)); for_each_depth() s4(x(18), x(19), x(20), x(21), x(22), x(23), z(57), z(51), z(41), z(32)); for_each_depth() s5(x(24), x(25), x(26), x(27), x(28), x(29), z(39), z(45), z(56), z(34)); for_each_depth() s6(x(30), x(31), x(32), x(33), x(34), x(35), z(35), z(60), z(42), z(50)); for_each_depth() s7(x(36), x(37), x(38), x(39), x(40), x(41), z(63), z(43), z(53), z(38)); for_each_depth() s8(x(42), x(43), x(44), x(45), x(46), x(47), z(36), z(58), z(46), z(52)); if (rounds_and_swapped == 0x100) goto next; swap: for_each_depth() s1(x(48), x(49), x(50), x(51), x(52), x(53), z(8), z(16), z(22), z(30)); for_each_depth() s2(x(54), x(55), x(56), x(57), x(58), x(59), z(12), z(27), z(1), z(17)); for_each_depth() s3(x(60), x(61), x(62), x(63), x(64), x(65), z(23), z(15), z(29), z(5)); for_each_depth() s4(x(66), x(67), x(68), x(69), x(70), x(71), z(25), z(19), z(9), z(0)); for_each_depth() s5(x(72), x(73), x(74), x(75), x(76), x(77), z(7), z(13), z(24), z(2)); for_each_depth() s6(x(78), x(79), x(80), x(81), x(82), x(83), z(3), z(28), z(10), z(18)); for_each_depth() s7(x(84), x(85), x(86), x(87), x(88), x(89), z(31), z(11), z(21), z(6)); for_each_depth() s8(x(90), x(91), x(92), x(93), x(94), x(95), z(4), z(26), z(14), z(20)); k += 96; if (--rounds_and_swapped) goto start; k -= (0x300 + 48); rounds_and_swapped = 0x108; if (--iterations) goto swap; #if DES_bs_mt continue; #else return; #endif next: k -= (0x300 - 48); rounds_and_swapped = 8; if (--iterations) goto start; #if DES_bs_mt continue; #else return; #endif finalize_keys: DES_bs_all.keys_changed = 0; #if DES_bs_mt DES_bs_finalize_keys(t); #else DES_bs_finalize_keys(); #endif goto body; } } #undef x #if DES_bs_mt static MAYBE_INLINE void DES_bs_finalize_keys_LM(int t) #else static MAYBE_INLINE void DES_bs_finalize_keys_LM(void) #endif { #if DES_BS_VECTOR_LOOPS_K int depth; #endif for_each_depth_k() { DES_bs_vector *kp = (DES_bs_vector *)&DES_bs_all.K[0] DEPTH_K; int ic; for (ic = 0; ic < 7; ic++) { DES_bs_vector *vp = (DES_bs_vector *)&DES_bs_all.xkeys.v[ic][0] DEPTH_K; LOAD_V FINALIZE_NEXT_KEY_BIT_0 FINALIZE_NEXT_KEY_BIT_1 FINALIZE_NEXT_KEY_BIT_2 FINALIZE_NEXT_KEY_BIT_3 FINALIZE_NEXT_KEY_BIT_4 FINALIZE_NEXT_KEY_BIT_5 FINALIZE_NEXT_KEY_BIT_6 FINALIZE_NEXT_KEY_BIT_7 } } } #undef kd #if DES_BS_VECTOR_LOOPS #define kd [depth] #else #define kd [0] #endif int DES_bs_crypt_LM(int *pcount, struct db_salt *salt) { int keys_count = *pcount; #if DES_bs_mt int t, n = (keys_count + (DES_BS_DEPTH - 1)) / DES_BS_DEPTH; #endif #ifdef _OPENMP #pragma omp parallel for default(none) private(t) shared(n, DES_bs_all_p, keys_count) #endif for_each_t(n) { ARCH_WORD **k; int rounds; #if DES_BS_VECTOR_LOOPS int depth; #endif { vtype z = vzero, o = vones; DES_bs_set_block_8(0, z, z, z, z, z, z, z, z); DES_bs_set_block_8(8, o, o, o, z, o, z, z, z); DES_bs_set_block_8(16, z, z, z, z, z, z, z, o); DES_bs_set_block_8(24, z, z, o, z, z, o, o, o); DES_bs_set_block_8(32, z, z, z, o, z, o, o, o); DES_bs_set_block_8(40, z, z, z, z, z, o, z, z); DES_bs_set_block_8(48, o, o, z, z, z, z, o, z); DES_bs_set_block_8(56, o, z, o, z, o, o, o, o); } #if DES_bs_mt DES_bs_finalize_keys_LM(t); #else DES_bs_finalize_keys_LM(); #endif k = DES_bs_all.KS.p; rounds = 8; do { for_each_depth() s1(y(31, 0), y(0, 1), y(1, 2), y(2, 3), y(3, 4), y(4, 5), z(40), z(48), z(54), z(62)); for_each_depth() s2(y(3, 6), y(4, 7), y(5, 8), y(6, 9), y(7, 10), y(8, 11), z(44), z(59), z(33), z(49)); for_each_depth() s3(y(7, 12), y(8, 13), y(9, 14), y(10, 15), y(11, 16), y(12, 17), z(55), z(47), z(61), z(37)); for_each_depth() s4(y(11, 18), y(12, 19), y(13, 20), y(14, 21), y(15, 22), y(16, 23), z(57), z(51), z(41), z(32)); for_each_depth() s5(y(15, 24), y(16, 25), y(17, 26), y(18, 27), y(19, 28), y(20, 29), z(39), z(45), z(56), z(34)); for_each_depth() s6(y(19, 30), y(20, 31), y(21, 32), y(22, 33), y(23, 34), y(24, 35), z(35), z(60), z(42), z(50)); for_each_depth() s7(y(23, 36), y(24, 37), y(25, 38), y(26, 39), y(27, 40), y(28, 41), z(63), z(43), z(53), z(38)); for_each_depth() s8(y(27, 42), y(28, 43), y(29, 44), y(30, 45), y(31, 46), y(0, 47), z(36), z(58), z(46), z(52)); for_each_depth() s1(y(63, 48), y(32, 49), y(33, 50), y(34, 51), y(35, 52), y(36, 53), z(8), z(16), z(22), z(30)); for_each_depth() s2(y(35, 54), y(36, 55), y(37, 56), y(38, 57), y(39, 58), y(40, 59), z(12), z(27), z(1), z(17)); for_each_depth() s3(y(39, 60), y(40, 61), y(41, 62), y(42, 63), y(43, 64), y(44, 65), z(23), z(15), z(29), z(5)); for_each_depth() s4(y(43, 66), y(44, 67), y(45, 68), y(46, 69), y(47, 70), y(48, 71), z(25), z(19), z(9), z(0)); for_each_depth() s5(y(47, 72), y(48, 73), y(49, 74), y(50, 75), y(51, 76), y(52, 77), z(7), z(13), z(24), z(2)); for_each_depth() s6(y(51, 78), y(52, 79), y(53, 80), y(54, 81), y(55, 82), y(56, 83), z(3), z(28), z(10), z(18)); for_each_depth() s7(y(55, 84), y(56, 85), y(57, 86), y(58, 87), y(59, 88), y(60, 89), z(31), z(11), z(21), z(6)); for_each_depth() s8(y(59, 90), y(60, 91), y(61, 92), y(62, 93), y(63, 94), y(32, 95), z(4), z(26), z(14), z(20)); k += 96; } while (--rounds); } return keys_count; } #if DES_bs_mt static MAYBE_INLINE void DES_bs_finalize_keys_plain(int t) #else static MAYBE_INLINE void DES_bs_finalize_keys_plain(void) #endif { #if DES_BS_VECTOR_LOOPS_K int depth; #endif for_each_depth_k() { DES_bs_vector *kp = (DES_bs_vector *)&DES_bs_all.K[0] DEPTH_K; int ic; for (ic = 0; ic < 8; ic++) { DES_bs_vector *vp = (DES_bs_vector *)&DES_bs_all.xkeys.v[ic][0] DEPTH_K; LOAD_V FINALIZE_NEXT_KEY_BIT_0 FINALIZE_NEXT_KEY_BIT_1 FINALIZE_NEXT_KEY_BIT_2 FINALIZE_NEXT_KEY_BIT_3 FINALIZE_NEXT_KEY_BIT_4 FINALIZE_NEXT_KEY_BIT_5 FINALIZE_NEXT_KEY_BIT_6 } } } #undef v1 #undef v2 #undef v3 #undef v5 #undef v6 #undef v7 /* Single Des Encryption with no salt */ #undef kd #if DES_BS_VECTOR_LOOPS #define kd [depth] #else #define kd [0] #endif #if DES_BS_VECTOR #define INDX [index] #else #define INDX #endif void DES_bs_crypt_plain(int keys_count) { #if DES_bs_mt int t, n = (keys_count + (DES_BS_DEPTH - 1)) / DES_BS_DEPTH; #endif #ifdef _OPENMP #pragma omp parallel for default(none) private(t) shared(n, DES_bs_all_p, keys_count, DES_bs_P) #endif for_each_t(n) { ARCH_WORD **k; int rounds; #if DES_BS_VECTOR_LOOPS int depth; #endif int i; #if DES_BS_VECTOR int index; #endif for (i=0; i<64; i++) { #if DES_BS_VECTOR for (index=0; index<DES_BS_VECTOR_SIZE; index++) #endif DES_bs_all.B[i]INDX = DES_bs_P[i]INDX; } #if DES_bs_mt DES_bs_finalize_keys_plain(t); #else DES_bs_finalize_keys_plain(); #endif k = DES_bs_all.KS.p; rounds = 8; do { for_each_depth() s1(y(31, 0), y(0, 1), y(1, 2), y(2, 3), y(3, 4), y(4, 5), z(40), z(48), z(54), z(62)); for_each_depth() s2(y(3, 6), y(4, 7), y(5, 8), y(6, 9), y(7, 10), y(8, 11), z(44), z(59), z(33), z(49)); for_each_depth() s3(y(7, 12), y(8, 13), y(9, 14), y(10, 15), y(11, 16), y(12, 17), z(55), z(47), z(61), z(37)); for_each_depth() s4(y(11, 18), y(12, 19), y(13, 20), y(14, 21), y(15, 22), y(16, 23), z(57), z(51), z(41), z(32)); for_each_depth() s5(y(15, 24), y(16, 25), y(17, 26), y(18, 27), y(19, 28), y(20, 29), z(39), z(45), z(56), z(34)); for_each_depth() s6(y(19, 30), y(20, 31), y(21, 32), y(22, 33), y(23, 34), y(24, 35), z(35), z(60), z(42), z(50)); for_each_depth() s7(y(23, 36), y(24, 37), y(25, 38), y(26, 39), y(27, 40), y(28, 41), z(63), z(43), z(53), z(38)); for_each_depth() s8(y(27, 42), y(28, 43), y(29, 44), y(30, 45), y(31, 46), y(0, 47), z(36), z(58), z(46), z(52)); for_each_depth() s1(y(63, 48), y(32, 49), y(33, 50), y(34, 51), y(35, 52), y(36, 53), z(8), z(16), z(22), z(30)); for_each_depth() s2(y(35, 54), y(36, 55), y(37, 56), y(38, 57), y(39, 58), y(40, 59), z(12), z(27), z(1), z(17)); for_each_depth() s3(y(39, 60), y(40, 61), y(41, 62), y(42, 63), y(43, 64), y(44, 65), z(23), z(15), z(29), z(5)); for_each_depth() s4(y(43, 66), y(44, 67), y(45, 68), y(46, 69), y(47, 70), y(48, 71), z(25), z(19), z(9), z(0)); for_each_depth() s5(y(47, 72), y(48, 73), y(49, 74), y(50, 75), y(51, 76), y(52, 77), z(7), z(13), z(24), z(2)); for_each_depth() s6(y(51, 78), y(52, 79), y(53, 80), y(54, 81), y(55, 82), y(56, 83), z(3), z(28), z(10), z(18)); for_each_depth() s7(y(55, 84), y(56, 85), y(57, 86), y(58, 87), y(59, 88), y(60, 89), z(31), z(11), z(21), z(6)); for_each_depth() s8(y(59, 90), y(60, 91), y(61, 92), y(62, 93), y(63, 94), y(32, 95), z(4), z(26), z(14), z(20)); k += 96; } while (--rounds); }} #endif #ifdef INDX #undef INDX #endif #if DES_BS_VECTOR #define INDX [k] #else #define INDX #endif void DES_bs_generate_plaintext(unsigned char *plaintext) { int i, j; #if DES_BS_VECTOR int k; #endif /* Set same plaintext for all bit layers */ for (i = 0; i < 64; i++) { j = (int) (plaintext[i/8] >> (7-(i%8))) & 0x01; if (j==1) j = -1; #if DES_BS_VECTOR for (k=0; k<DES_BS_VECTOR_SIZE; k++) #endif DES_bs_P[i]INDX = j; } }
GrB_UnaryOp_wait.c
//------------------------------------------------------------------------------ // GrB_UnaryOp_wait: wait for a user-defined GrB_UnaryOp to complete //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // In SuiteSparse:GraphBLAS, a user-defined GrB_UnaryOp has no pending // operations to wait for. All this method does is verify that the op is // properly initialized, and then it does an OpenMP flush. #include "GB.h" GrB_Info GrB_UnaryOp_wait // no work, just check if the GrB_UnaryOp is valid ( GrB_UnaryOp op, GrB_WaitMode waitmode ) { //-------------------------------------------------------------------------- // check inputs //-------------------------------------------------------------------------- GB_WHERE1 ("GrB_UnaryOp_wait (op, waitmode)") ; GB_RETURN_IF_NULL_OR_FAULTY (op) ; //-------------------------------------------------------------------------- // return result //-------------------------------------------------------------------------- #pragma omp flush return (GrB_SUCCESS) ; }
tracking_auto.c
/* * ***** BEGIN GPL LICENSE BLOCK ***** * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software Foundation, * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. * * The Original Code is Copyright (C) 2011 Blender Foundation. * All rights reserved. * * Contributor(s): Blender Foundation, * Sergey Sharybin * Keir Mierle * * ***** END GPL LICENSE BLOCK ***** */ /** \file blender/blenkernel/intern/tracking_auto.c * \ingroup bke */ #include <stdlib.h> #include "MEM_guardedalloc.h" #include "DNA_movieclip_types.h" #include "DNA_object_types.h" /* SELECT */ #include "BLI_utildefines.h" #include "BLI_listbase.h" #include "BLI_threads.h" #include "BLI_math.h" #include "BKE_movieclip.h" #include "BKE_tracking.h" #include "libmv-capi.h" #include "tracking_private.h" typedef struct AutoTrackOptions { int clip_index; /** Index of the clip this track belogs to. */ int track_index; /* Index of the track in AutoTrack tracks structure. */ MovieTrackingTrack *track; /* Pointer to an original track/ */ libmv_TrackRegionOptions track_region_options; /* Options for the region tracker. */ bool use_keyframe_match; /* Keyframe pattern matching. */ /* TODO(sergey): A bit awkward to keep it in here, only used to * place a disabled marker once the tracking fails, * Wither find a more clear way to do it or call it track context * or state, not options. */ bool is_failed; int failed_frame; } AutoTrackOptions; typedef struct AutoTrackContext { MovieClip *clips[MAX_ACCESSOR_CLIP]; int num_clips; MovieClipUser user; int frame_width, frame_height; struct libmv_AutoTrack *autotrack; TrackingImageAccessor *image_accessor; int num_tracks; /* Number of tracks being tracked. */ AutoTrackOptions *options; /* Per-tracking track options. */ /* Array of all tracks, indexed by track_index. */ MovieTrackingTrack **tracks; bool backwards; bool sequence; int first_frame; int sync_frame; bool first_sync; SpinLock spin_lock; } AutoTrackContext; static void normalized_to_libmv_frame(const float normalized[2], const int frame_dimensions[2], float result[2]) { result[0] = normalized[0] * frame_dimensions[0] - 0.5f; result[1] = normalized[1] * frame_dimensions[1] - 0.5f; } static void normalized_relative_to_libmv_frame(const float normalized[2], const float origin[2], const int frame_dimensions[2], float result[2]) { result[0] = (normalized[0] + origin[0]) * frame_dimensions[0] - 0.5f; result[1] = (normalized[1] + origin[1]) * frame_dimensions[1] - 0.5f; } static void libmv_frame_to_normalized(const float frame_coord[2], const int frame_dimensions[2], float result[2]) { result[0] = (frame_coord[0] + 0.5f) / frame_dimensions[0]; result[1] = (frame_coord[1] + 0.5f) / frame_dimensions[1]; } static void libmv_frame_to_normalized_relative(const float frame_coord[2], const float origin[2], const int frame_dimensions[2], float result[2]) { result[0] = (frame_coord[0] - origin[0]) / frame_dimensions[0]; result[1] = (frame_coord[1] - origin[1]) / frame_dimensions[1]; } static void dna_marker_to_libmv_marker(/*const*/ MovieTrackingTrack *track, /*const*/ MovieTrackingMarker *marker, int clip, int track_index, int frame_width, int frame_height, bool backwards, libmv_Marker *libmv_marker) { const int frame_dimensions[2] = {frame_width, frame_height}; int i; libmv_marker->clip = clip; libmv_marker->frame = marker->framenr; libmv_marker->track = track_index; normalized_to_libmv_frame(marker->pos, frame_dimensions, libmv_marker->center); for (i = 0; i < 4; ++i) { normalized_relative_to_libmv_frame(marker->pattern_corners[i], marker->pos, frame_dimensions, libmv_marker->patch[i]); } normalized_relative_to_libmv_frame(marker->search_min, marker->pos, frame_dimensions, libmv_marker->search_region_min); normalized_relative_to_libmv_frame(marker->search_max, marker->pos, frame_dimensions, libmv_marker->search_region_max); /* TODO(sergey): All the markers does have 1.0 weight. */ libmv_marker->weight = 1.0f; if (marker->flag & MARKER_TRACKED) { libmv_marker->source = LIBMV_MARKER_SOURCE_TRACKED; } else { libmv_marker->source = LIBMV_MARKER_SOURCE_MANUAL; } libmv_marker->status = LIBMV_MARKER_STATUS_UNKNOWN; libmv_marker->model_type = LIBMV_MARKER_MODEL_TYPE_POINT; libmv_marker->model_id = 0; /* TODO(sergey): We currently don't support reference marker from * different clip. */ libmv_marker->reference_clip = clip; if (track->pattern_match == TRACK_MATCH_KEYFRAME) { MovieTrackingMarker *keyframe_marker = tracking_get_keyframed_marker(track, marker->framenr, backwards); libmv_marker->reference_frame = keyframe_marker->framenr; } else { libmv_marker->reference_frame = backwards ? marker->framenr - 1 : marker->framenr; } libmv_marker->disabled_channels = ((track->flag & TRACK_DISABLE_RED) ? LIBMV_MARKER_CHANNEL_R : 0) | ((track->flag & TRACK_DISABLE_GREEN) ? LIBMV_MARKER_CHANNEL_G : 0) | ((track->flag & TRACK_DISABLE_BLUE) ? LIBMV_MARKER_CHANNEL_B : 0); } static void libmv_marker_to_dna_marker(libmv_Marker *libmv_marker, int frame_width, int frame_height, MovieTrackingMarker *marker) { const int frame_dimensions[2] = {frame_width, frame_height}; int i; marker->framenr = libmv_marker->frame; libmv_frame_to_normalized(libmv_marker->center, frame_dimensions, marker->pos); for (i = 0; i < 4; ++i) { libmv_frame_to_normalized_relative(libmv_marker->patch[i], libmv_marker->center, frame_dimensions, marker->pattern_corners[i]); } libmv_frame_to_normalized_relative(libmv_marker->search_region_min, libmv_marker->center, frame_dimensions, marker->search_min); libmv_frame_to_normalized_relative(libmv_marker->search_region_max, libmv_marker->center, frame_dimensions, marker->search_max); marker->flag = 0; if (libmv_marker->source == LIBMV_MARKER_SOURCE_TRACKED) { marker->flag |= MARKER_TRACKED; } else { marker->flag &= ~MARKER_TRACKED; } } static bool check_track_trackable(MovieClip *clip, MovieTrackingTrack *track, MovieClipUser *user) { if (TRACK_SELECTED(track) && (track->flag & (TRACK_LOCKED | TRACK_HIDDEN)) == 0) { MovieTrackingMarker *marker; int frame; frame = BKE_movieclip_remap_scene_to_clip_frame(clip, user->framenr); marker = BKE_tracking_marker_get(track, frame); return (marker->flag & MARKER_DISABLED) == 0; } return false; } /* Returns false if marker crossed margin area from frame bounds. */ static bool tracking_check_marker_margin(libmv_Marker *libmv_marker, int margin, int frame_width, int frame_height) { float patch_min[2], patch_max[2]; float margin_left, margin_top, margin_right, margin_bottom; INIT_MINMAX2(patch_min, patch_max); minmax_v2v2_v2(patch_min, patch_max, libmv_marker->patch[0]); minmax_v2v2_v2(patch_min, patch_max, libmv_marker->patch[1]); minmax_v2v2_v2(patch_min, patch_max, libmv_marker->patch[2]); minmax_v2v2_v2(patch_min, patch_max, libmv_marker->patch[3]); margin_left = max_ff(libmv_marker->center[0] - patch_min[0], margin); margin_top = max_ff(patch_max[1] - libmv_marker->center[1], margin); margin_right = max_ff(patch_max[0] - libmv_marker->center[0], margin); margin_bottom = max_ff(libmv_marker->center[1] - patch_min[1], margin); if (libmv_marker->center[0] < margin_left || libmv_marker->center[0] > frame_width - margin_right || libmv_marker->center[1] < margin_bottom || libmv_marker->center[1] > frame_height - margin_top) { return false; } return true; } AutoTrackContext *BKE_autotrack_context_new(MovieClip *clip, MovieClipUser *user, const bool backwards, const bool sequence) { AutoTrackContext *context = MEM_callocN(sizeof(AutoTrackContext), "autotrack context"); MovieTracking *tracking = &clip->tracking; MovieTrackingTrack *track; ListBase *tracksbase = BKE_tracking_get_active_tracks(tracking); int i, track_index, frame_width, frame_height; BKE_movieclip_get_size(clip, user, &frame_width, &frame_height); /* TODO(sergey): Currently using only a single clip. */ context->clips[0] = clip; context->num_clips = 1; context->user = *user; context->user.render_size = MCLIP_PROXY_RENDER_SIZE_FULL; context->user.render_flag = 0; context->frame_width = frame_width; context->frame_height = frame_height; context->backwards = backwards; context->sequence = sequence; context->first_frame = user->framenr; context->sync_frame = user->framenr; context->first_sync = true; BLI_spin_init(&context->spin_lock); int num_total_tracks = BLI_listbase_count(tracksbase); context->tracks = MEM_callocN(sizeof(MovieTrackingTrack*) * num_total_tracks, "auto track pointers"); context->image_accessor = tracking_image_accessor_new(context->clips, 1, context->tracks, num_total_tracks, user->framenr); context->autotrack = libmv_autoTrackNew(context->image_accessor->libmv_accessor); /* Fill in Autotrack with all markers we know. */ track_index = 0; for (track = tracksbase->first; track; track = track->next) { if (check_track_trackable(clip, track, user)) { context->num_tracks++; } for (i = 0; i < track->markersnr; ++i) { MovieTrackingMarker *marker = track->markers + i; if ((marker->flag & MARKER_DISABLED) == 0) { libmv_Marker libmv_marker; dna_marker_to_libmv_marker(track, marker, 0, track_index, frame_width, frame_height, backwards, &libmv_marker); libmv_autoTrackAddMarker(context->autotrack, &libmv_marker); } } track_index++; } /* Create per-track tracking options. */ context->options = MEM_callocN(sizeof(AutoTrackOptions) * context->num_tracks, "auto track options"); i = track_index = 0; for (track = tracksbase->first; track; track = track->next) { if (check_track_trackable(clip, track, user)) { AutoTrackOptions *options = &context->options[i++]; /* TODO(sergey): Single clip only for now. */ options->clip_index = 0; options->track_index = track_index; options->track = track; tracking_configure_tracker(track, NULL, &options->track_region_options); options->use_keyframe_match = track->pattern_match == TRACK_MATCH_KEYFRAME; } context->tracks[track_index] = track; ++track_index; } return context; } bool BKE_autotrack_context_step(AutoTrackContext *context) { int frame_delta = context->backwards ? -1 : 1; bool ok = false; int track; #pragma omp parallel for if (context->num_tracks > 1) for (track = 0; track < context->num_tracks; ++track) { AutoTrackOptions *options = &context->options[track]; if (options->is_failed) { continue; } libmv_Marker libmv_current_marker, libmv_reference_marker, libmv_tracked_marker; libmv_TrackRegionResult libmv_result; int frame = BKE_movieclip_remap_scene_to_clip_frame( context->clips[options->clip_index], context->user.framenr); bool has_marker; BLI_spin_lock(&context->spin_lock); has_marker = libmv_autoTrackGetMarker(context->autotrack, options->clip_index, frame, options->track_index, &libmv_current_marker); BLI_spin_unlock(&context->spin_lock); if (has_marker) { if (!tracking_check_marker_margin(&libmv_current_marker, options->track->margin, context->frame_width, context->frame_height)) { continue; } libmv_tracked_marker = libmv_current_marker; libmv_tracked_marker.frame = frame + frame_delta; if (options->use_keyframe_match) { libmv_tracked_marker.reference_frame = libmv_current_marker.reference_frame; libmv_autoTrackGetMarker(context->autotrack, options->clip_index, libmv_tracked_marker.reference_frame, options->track_index, &libmv_reference_marker); } else { libmv_tracked_marker.reference_frame = frame; libmv_reference_marker = libmv_current_marker; } if (libmv_autoTrackMarker(context->autotrack, &options->track_region_options, &libmv_tracked_marker, &libmv_result)) { BLI_spin_lock(&context->spin_lock); libmv_autoTrackAddMarker(context->autotrack, &libmv_tracked_marker); BLI_spin_unlock(&context->spin_lock); } else { options->is_failed = true; options->failed_frame = frame + frame_delta; } ok = true; } } BLI_spin_lock(&context->spin_lock); context->user.framenr += frame_delta; BLI_spin_unlock(&context->spin_lock); return ok; } void BKE_autotrack_context_sync(AutoTrackContext *context) { int newframe, frame_delta = context->backwards ? -1 : 1; int frame; BLI_spin_lock(&context->spin_lock); newframe = context->user.framenr; for (frame = context->sync_frame; frame != (context->backwards ? newframe - 1 : newframe + 1); frame += frame_delta) { MovieTrackingMarker marker; libmv_Marker libmv_marker; int clip = 0; int track; for (track = 0; track < context->num_tracks; ++track) { AutoTrackOptions *options = &context->options[track]; int track_frame = BKE_movieclip_remap_scene_to_clip_frame( context->clips[options->clip_index], frame); if (options->is_failed) { if (options->failed_frame == track_frame) { MovieTrackingMarker *prev_marker = BKE_tracking_marker_get_exact( options->track, context->backwards ? frame + 1 : frame - 1); if (prev_marker) { marker = *prev_marker; marker.framenr = track_frame; marker.flag |= MARKER_DISABLED; BKE_tracking_marker_insert(options->track, &marker); continue; } } if ((context->backwards && options->failed_frame > track_frame) || (!context->backwards && options->failed_frame < track_frame)) { continue; } } if (libmv_autoTrackGetMarker(context->autotrack, clip, track_frame, options->track_index, &libmv_marker)) { libmv_marker_to_dna_marker(&libmv_marker, context->frame_width, context->frame_height, &marker); if (context->first_sync && frame == context->sync_frame) { tracking_marker_insert_disabled(options->track, &marker, !context->backwards, false); } BKE_tracking_marker_insert(options->track, &marker); tracking_marker_insert_disabled(options->track, &marker, context->backwards, false); } } } BLI_spin_unlock(&context->spin_lock); for (int clip = 0; clip < context->num_clips; ++clip) { MovieTracking *tracking = &context->clips[clip]->tracking; BKE_tracking_dopesheet_tag_update(tracking); } context->sync_frame = newframe; context->first_sync = false; } void BKE_autotrack_context_sync_user(AutoTrackContext *context, MovieClipUser *user) { user->framenr = context->sync_frame; } void BKE_autotrack_context_finish(AutoTrackContext *context) { int clip_index; for (clip_index = 0; clip_index < context->num_clips; ++clip_index) { MovieClip *clip = context->clips[clip_index]; ListBase *plane_tracks_base = BKE_tracking_get_active_plane_tracks(&clip->tracking); MovieTrackingPlaneTrack *plane_track; for (plane_track = plane_tracks_base->first; plane_track; plane_track = plane_track->next) { if ((plane_track->flag & PLANE_TRACK_AUTOKEY) == 0) { int track; for (track = 0; track < context->num_tracks; ++track) { if (BKE_tracking_plane_track_has_point_track(plane_track, context->options[track].track)) { BKE_tracking_track_plane_from_existing_motion( plane_track, context->first_frame); break; } } } } } } void BKE_autotrack_context_free(AutoTrackContext *context) { libmv_autoTrackDestroy(context->autotrack); tracking_image_accessor_destroy(context->image_accessor); MEM_freeN(context->options); MEM_freeN(context->tracks); BLI_spin_end(&context->spin_lock); MEM_freeN(context); }
lud_omp.c
#include <stdio.h> #include <omp.h> extern int omp_num_threads; void lud_omp(float *a, int size) { int i,j,k; float sum; printf("num of threads = %d\n", omp_num_threads); for (i=0; i <size; i++){ omp_set_num_threads(omp_num_threads); #pragma omp parallel for default(none) \ private(j,k,sum) shared(size,i,a) for (j=i; j <size; j++){ sum=a[i*size+j]; for (k=0; k<i; k++) sum -= a[i*size+k]*a[k*size+j]; a[i*size+j]=sum; } #pragma omp parallel for default(none) \ private(j,k,sum) shared(size,i,a) for (j=i+1;j<size; j++){ sum=a[j*size+i]; for (k=0; k<i; k++) sum -=a[j*size+k]*a[k*size+i]; a[j*size+i]=sum/a[i*size+i]; } } }
ktuple_pair.c
/* -*- mode: c; tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- */ /********************************************************************* * Clustal Omega - Multiple sequence alignment * * Copyright (C) 2010 University College Dublin * * Clustal-Omega is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of the * License, or (at your option) any later version. * * This file is part of Clustal-Omega. * ********************************************************************/ /* * RCS $Id: ktuple_pair.c 305 2016-06-13 13:46:02Z fabian $ * * * K-Tuple code for pairwise alignment (Wilbur and Lipman, 1983; PMID * 6572363). Most code taken from showpair.c (Clustal 1.83) * DD: some functions now have lots of parameters as static variables * were removed to make code OpenMP-friendly * */ #ifdef HAVE_CONFIG_H #include "config.h" #endif #include <stdio.h> #include <string.h> #include <ctype.h> #include <stdlib.h> #include <math.h> #include <assert.h> #ifdef HAVE_OPENMP #include <omp.h> #endif #include "squid/squid.h" #include "util.h" #include "symmatrix.h" #include "ktuple_pair.h" #include "log.h" #include "progress.h" #define END_MARK -3 /* see interface.c in 1.83 */ #define NUMRES 32 /* max size of comparison matrix */ /* see notes below */ #undef SORT_LAST_ELEMENT_AS_WELL /* gap_pos1 = NUMRES-2; /@ code for gaps inserted by clustalw @/ */ static const int GAP_POS2 = NUMRES-1; /* code for gaps already in alignment */ static bool DNAFLAG = FALSE; static const char *AMINO_ACID_CODES = "ABCDEFGHIKLMNPQRSTUVWXYZ-"; static const char *NUCLEIC_ACID_CODES = "ACGTUN-"; /* As far as I understand the gap symbol should not be necessary here, * because we use isgap for testing later anyway. But changing this, * will affect max_res_code and max_nuc as well. So I leave it for now * as it is. AW */ static bool percent = TRUE; static void make_ptrs(int *tptr, int *pl, const int naseq, const int l, const int ktup, const int max_res_code, char **seq_array); static void put_frag(const int fs, const int v1, const int v2, const int flen, const int curr_frag, int *next, int *maxsf, int **accum); static bool frag_rel_pos(int a1, int b1, int a2, int b2, int ktup); static void des_quick_sort(int *array1, int *array2, const int array_size); static void pair_align(int seq_no, int l1, int l2, int max_res_code, ktuple_param_t *aln_param, char **seq_array, int *maxsf, int **accum, int max_aln_length, int *zza, int *zzb, int *zzc, int *zzd); static void encode(char *seq, char *naseq, int l, const char *res_codes); static int res_index(const char *lookup, char c); typedef struct { int i1; int i2; } two_ints_t; /* default ktuple pairwise alignment parameters * */ /* protein */ /* designated initializer */ const ktuple_param_t default_protein_param = { .ktup = 1, .wind_gap = 3, .signif = 5, .window = 5, }; /* dna */ /* designated initializer */ const ktuple_param_t default_dna_param = { .ktup = 2, .wind_gap = 5, .signif = 4, .window = 4, }; /** * note: naseq should be unit-offset */ static void encode(char *seq, char *naseq, int l, const char *res_codes) { /* code seq as ints .. use GAP_POS2 for gap */ register int i; bool seq_contains_unknown_char = FALSE; /*LOG_DEBUG("seq=%s naseq=%p l=%d", &(seq[1]), naseq, l); */ for (i=1; i<=l; i++) { char res = toupper(seq[i]); if (isgap(res)) { naseq[i] = GAP_POS2; /* gap in input */ } else { naseq[i] = res_index(res_codes, res); } /*LOG_DEBUG("Character '%c' at pos %d", res, i);*/ if (-1 == naseq[i]) { seq_contains_unknown_char = TRUE; /*LOG_DEBUG("Unknown character '%c' at pos %d", res, i);*/ } /*LOG_DEBUG("na_seq[%d]=%d", i, naseq[i]);*/ } if (TRUE == seq_contains_unknown_char) Log(&rLog, LOG_WARN, "Unknown character in seq '%s'", &(seq[1])); naseq[i] = END_MARK; return; } /* end of encode */ /** * */ static int res_index(const char *t, char c) { register int i; for (i=0; t[i] && t[i] != c; i++) ; if (t[i]) { return (i); } else { return -1; } } /* end of res_index */ /** * */ static void make_ptrs(int *tptr, int *pl, const int naseq, const int l, const int ktup, const int max_res_code, char **seq_array) { /* FIXME make 10 a constant and give it a nice name */ static int a[10]; int i, j, code, flag; char residue; const int limit = (int) pow((double)(max_res_code+1),(double)ktup); for (i=1;i<=ktup;i++) a[i] = (int) pow((double)(max_res_code+1),(double)(i-1)); for (i=1; i<=limit; ++i) pl[i]=0; for (i=1; i<=l; ++i) tptr[i]=0; for (i=1; i<=(l-ktup+1); ++i) { code=0; flag=FALSE; for (j=1; j<=ktup; ++j) { /* Log(&rLog, LOG_FORCED_DEBUG, "naseq=%d i=%d j=%d seq_array[naseq]=%p", * naseq, i, j, seq_array[naseq]); */ residue = seq_array[naseq][i+j-1]; /* Log(&rLog, LOG_FORCED_DEBUG, "residue = %d", residue); */ if ((residue<0) || (residue > max_res_code)){ flag=TRUE; break; } code += ((residue) * a[j]); } if (flag) continue; ++code; if (0 != pl[code]) tptr[i] =pl[code]; pl[code] = i; } return; } /* end of make_ptrs */ /** * * FIXME Why hardcoding of 5? */ static void put_frag(const int fs, const int v1, const int v2, const int flen, const int curr_frag, int *next, int *maxsf, int **accum) { int end; accum[0][curr_frag]=fs; accum[1][curr_frag]=v1; accum[2][curr_frag]=v2; accum[3][curr_frag]=flen; if (!*maxsf) { *maxsf=1; accum[4][curr_frag]=0; return; } if (fs >= accum[0][*maxsf]) { accum[4][curr_frag]=*maxsf; *maxsf=curr_frag; return; } else { *next=*maxsf; while (TRUE) { end=*next; *next=accum[4][*next]; if (fs>=accum[0][*next]) break; } accum[4][curr_frag]=*next; accum[4][end]=curr_frag; } return; } /* end of put_frag */ /** * */ static bool frag_rel_pos(int a1, int b1, int a2, int b2, int ktup) { if (a1-b1 == a2-b2) { if (a2<a1) { return TRUE; } } else { if (a2+ktup-1<a1 && b2+ktup-1<b1) { return TRUE; } } return FALSE; } /* end of frag_rel_pos */ /** * * @note: This is together with des_quick_sort most time consuming * routine according to gprof on r110. Tried to replace it with qsort * and/or QSortAndTrackIndex(), which is always slower! So we keep the * original. * * Original doc: Quicksort routine, adapted from chapter 4, page 115 * of software tools by Kernighan and Plauger, (1986). Sort the * elements of array1 and sort the elements of array2 accordingly * * There might be a bug here. The original function apparently never * touches the last element and keeps it as is. Tried to fix this (see * SORT_LAST_ELEMENT_AS_WELL) which gives slightly worse performance * (-0.5% on BB). My fix might not be working or it's not a bug at * all... * * * */ static void des_quick_sort(int *array1, int *array2, const int array_size) { int temp1, temp2; int p, pivlin; int i, j; int lst[50], ust[50]; /* the maximum no. of elements must be*/ /* < log(base2) of 50 */ #if 0 for (i=1; i<=array_size; i++) { Log(&rLog, LOG_FORCED_DEBUG, "b4 sort array1[%d]=%d array2[%d]=%d", i, array1[i], i, array2[i]); } #endif lst[1] = 1; #ifdef SORT_LAST_ELEMENT_AS_WELL ust[1] = array_size; #else /* original */ ust[1] = array_size-1; #endif p = 1; while (p > 0) { if (lst[p] >= ust[p]) { p--; } else { i = lst[p] - 1; j = ust[p]; pivlin = array1[j]; while (i < j) { for (i=i+1; array1[i] < pivlin; i++) ; for (j=j-1; j > i; j--) if (array1[j] <= pivlin) break; if (i < j) { temp1 = array1[i]; array1[i] = array1[j]; array1[j] = temp1; temp2 = array2[i]; array2[i] = array2[j]; array2[j] = temp2; } } j = ust[p]; temp1 = array1[i]; array1[i] = array1[j]; array1[j] = temp1; temp2 = array2[i]; array2[i] = array2[j]; array2[j] = temp2; if (i-lst[p] < ust[p] - i) { lst[p+1] = lst[p]; ust[p+1] = i - 1; lst[p] = i + 1; } else { lst[p+1] = i + 1; ust[p+1] = ust[p]; ust[p] = i - 1; } p = p + 1; } } #if 0 for (i=1; i<=array_size; i++) { Log(&rLog, LOG_FORCED_DEBUG, "after sort array1[%d]=%d array2[%d]=%d", i, array1[i], i, array2[i]); } #endif return; } /* end of des_quick_sort */ /** * * FIXME together with des_quick_sort most time consuming routine * according to gprof on r110 * */ static void pair_align(int seq_no, int l1, int l2, int max_res_code, ktuple_param_t *aln_param, char **seq_array, int *maxsf, int **accum, int max_aln_length, int *zza, int *zzb, int *zzc, int *zzd) { int next; /* forrmerly static */ int pot[8],i, j, l, m, flag, limit, pos, vn1, vn2, flen, osptr, fs; int tv1, tv2, encrypt, subt1, subt2, rmndr; char residue; int *diag_index; int *displ; char *slopes; int curr_frag; const int tl1 = (l1+l2)-1; assert(NULL!=aln_param); /* Log(&rLog, LOG_FORCED_DEBUG, "DNAFLAG=%d seq_no=%d l1=%d l2=%d window=%d ktup=%d signif=%d wind_gap=%d", DNAFLAG, seq_no, l1, l2, window, ktup, signif, wind_gap); */ slopes = (char *) CKCALLOC(tl1+1, sizeof(char)); displ = (int *) CKCALLOC(tl1+1, sizeof(int)); diag_index = (int *) CKMALLOC((tl1+1) * sizeof(int)); for (i=1; i<=tl1; ++i) { /* unnecessary, because we calloced: slopes[i] = displ[i] = 0; */ diag_index[i] = i; } for (i=1;i<=aln_param->ktup;i++) pot[i] = (int) pow((double)(max_res_code+1),(double)(i-1)); limit = (int) pow((double)(max_res_code+1),(double)aln_param->ktup); /* increment diagonal score for each k_tuple match */ for (i=1; i<=limit; ++i) { vn1=zzc[i]; while (TRUE) { if (!vn1) break; vn2 = zzd[i]; while (0 != vn2) { osptr = vn1-vn2+l2; ++displ[osptr]; vn2 = zzb[vn2]; } vn1=zza[vn1]; } } /* choose the top SIGNIF diagonals */ #ifdef QSORT_REPLACEMENT /* This was an attempt to replace des_quick_sort with qsort(), * which turns out to be much slower, so don't use this */ /* FIXME: if we use this branch, we don't need to init diag_index * before, because that is done in QSortAndTrackIndex() * automatically. */ #if 0 for (i=1; i<=tl1; i++) { Log(&rLog, LOG_FORCED_DEBUG, "b4 sort disp[%d]=%d diag_index[%d]=%d", i, diag_index[i], i, displ[i]); } #endif QSortAndTrackIndex(&(diag_index[1]), &(displ[1]), tl1, 'a', TRUE); #if 0 for (i=1; i<=tl1; i++) { Log(&rLog, LOG_FORCED_DEBUG, "after sort disp[%d]=%d diag_index[%d]=%d", i, diag_index[i], i, displ[i]); } #endif #else des_quick_sort(displ, diag_index, tl1); #endif j = tl1 - aln_param->signif + 1; if (j < 1) { j = 1; } /* flag all diagonals within WINDOW of a top diagonal */ for (i=tl1; i>=j; i--) { if (displ[i] > 0) { pos = diag_index[i]; l = (1 > pos - aln_param->window) ? 1 : pos - aln_param->window; m = (tl1 < pos + aln_param->window) ? tl1 : pos + aln_param->window; for (; l <= m; l++) slopes[l] = 1; } } for (i=1; i<=tl1; i++) { displ[i] = 0; } curr_frag=*maxsf=0; for (i=1; i<=(l1-aln_param->ktup+1); ++i) { encrypt=flag=0; for (j=1; j<=aln_param->ktup; ++j) { residue = seq_array[seq_no][i+j-1]; if ((residue<0) || (residue>max_res_code)) { flag=TRUE; break; } encrypt += ((residue)*pot[j]); } if (flag) { continue; } ++encrypt; vn2=zzd[encrypt]; flag=FALSE; while (TRUE) { if (!vn2) { flag=TRUE; break; } osptr=i-vn2+l2; if (1 != slopes[osptr]) { vn2=zzb[vn2]; continue; } flen=0; fs=aln_param->ktup; next=*maxsf; /* * A-loop */ while (TRUE) { if (!next) { ++curr_frag; if (curr_frag >= 2*max_aln_length) { Log(&rLog, LOG_VERBOSE, "(Partial alignment)"); goto free_and_exit; /* Yesss! Always wanted to * use a goto (AW) */ } displ[osptr]=curr_frag; put_frag(fs, i, vn2, flen, curr_frag, &next, maxsf, accum); } else { tv1=accum[1][next]; tv2=accum[2][next]; if (frag_rel_pos(i, vn2, tv1, tv2, aln_param->ktup)) { if (i-vn2 == accum[1][next]-accum[2][next]) { if (i > accum[1][next]+(aln_param->ktup-1)) { fs = accum[0][next]+aln_param->ktup; } else { rmndr = i-accum[1][next]; fs = accum[0][next]+rmndr; } flen=next; next=0; continue; } else { if (0 == displ[osptr]) { subt1=aln_param->ktup; } else { if (i > accum[1][displ[osptr]]+(aln_param->ktup-1)) { subt1=accum[0][displ[osptr]]+aln_param->ktup; } else { rmndr=i-accum[1][displ[osptr]]; subt1=accum[0][displ[osptr]]+rmndr; } } subt2=accum[0][next] - aln_param->wind_gap + aln_param->ktup; if (subt2>subt1) { flen=next; fs=subt2; } else { flen=displ[osptr]; fs=subt1; } next=0; continue; } } else { next=accum[4][next]; continue; } } break; } /* * End of Aloop */ vn2=zzb[vn2]; } } free_and_exit: CKFREE(displ); CKFREE(slopes); CKFREE(diag_index); return; } /* end of pair_align */ /** * * Will compute ktuple scores and store in tmat * Following values will be set: tmat[i][j], where * istart <= i <iend * and * jstart <= j < jend * i.e. zero-offset * tmat data members have to be preallocated * * if ktuple_param_t *aln_param == NULL defaults will be used */ void KTuplePairDist(symmatrix_t *tmat, mseq_t *mseq, int istart, int iend, int jstart, int jend, ktuple_param_t *param_override, progress_t *prProgress, unsigned long int *ulStepNo, unsigned long int ulTotalStepNo) { /* this first group of variables were previously static and hence un-parallelisable */ char **seq_array; int maxsf; int **accum; int max_aln_length; /* divide score with length of smallest sequence */ int *zza, *zzb, *zzc, *zzd; int private_step_no = 0; int i, j, dsr; double calc_score; int max_res_code = -1; int max_seq_len; int *seqlen_array; /* progress_t *prProgress; */ /* int uStepNo, uTotalStepNo; */ ktuple_param_t aln_param = default_protein_param; bool bPrintCR = (rLog.iLogLevelEnabled<=LOG_VERBOSE) ? FALSE : TRUE; if(prProgress == NULL) { NewProgress(&prProgress, LogGetFP(&rLog, LOG_INFO), "Ktuple-distance calculation progress", bPrintCR); } /* conversion to old style data types follows * */ seqlen_array = (int*) CKMALLOC((mseq->nseqs+1) * sizeof(int)); for (i=0; i<mseq->nseqs; i++) { seqlen_array[i+1] = mseq->sqinfo[i].len; } /* setup alignment parameters */ if (SEQTYPE_PROTEIN == mseq->seqtype) { DNAFLAG = FALSE; max_res_code = strlen(AMINO_ACID_CODES)-2; aln_param = default_protein_param; } else if (SEQTYPE_RNA == mseq->seqtype || SEQTYPE_DNA == mseq->seqtype) { DNAFLAG = TRUE; max_res_code = strlen(NUCLEIC_ACID_CODES)-2; aln_param = default_dna_param; } else { Log(&rLog, LOG_FATAL, "Internal error in %s: Unknown sequence type.", __FUNCTION__); } if (NULL!=param_override) { aln_param.ktup = param_override->ktup; aln_param.wind_gap = param_override->wind_gap; aln_param.signif = param_override->signif; aln_param.window = param_override->window; } /*LOG_DEBUG("DNAFLAG = %d max_res_code = %d", DNAFLAG, max_res_code);*/ /* convert mseq to clustal's old-style int encoded sequences (unit-offset) */ max_aln_length = 0; max_seq_len = 0; seq_array = (char **) CKMALLOC((mseq->nseqs+1) * sizeof(char *)); seq_array[0] = NULL; /* FIXME check that non of the seqs is smaller than ktup (?). * Otherwise segfault occurs */ for (i=0; i<mseq->nseqs; i++) { seq_array[i+1] = (char *) CKMALLOC((seqlen_array[i+1]+2) * sizeof (char));; } for (i=0; i<mseq->nseqs; i++) { /*LOG_DEBUG("calling encode with seq_array[%d+1] len=%d and seq=%s", i, seqlen_array[i+1], mseq->seq[i]);*/ if (TRUE == DNAFLAG) { encode(&(mseq->seq[i][-1]), seq_array[i+1], seqlen_array[i+1], NUCLEIC_ACID_CODES); } else { encode(&(mseq->seq[i][-1]), seq_array[i+1], seqlen_array[i+1], AMINO_ACID_CODES); } if (seqlen_array[i+1]>max_seq_len) { max_seq_len = seqlen_array[i+1]; } } max_aln_length = max_seq_len * 2; /* see sequence.c in old source */ /* FIXME: short sequences can cause seg-fault * because max_aln_length can get shorter * than (max_res_code+1)^k * FS, r222->r223 */ max_aln_length = max_aln_length > pow((max_res_code+1), aln_param.ktup)+1 ? max_aln_length : pow((max_res_code+1), aln_param.ktup)+1; /* * * conversion to old style clustal done (in no time) */ accum = (int **) CKCALLOC(5, sizeof (int *)); for (i=0;i<5;i++) { accum[i] = (int *) CKCALLOC((2*max_aln_length+1), sizeof(int)); } zza = (int *) CKCALLOC( (max_aln_length+1), sizeof(int)); zzb = (int *) CKCALLOC( (max_aln_length+1), sizeof(int)); zzc = (int *) CKCALLOC( (max_aln_length+1), sizeof(int)); zzd = (int *) CKCALLOC( (max_aln_length+1), sizeof(int)); /* estimation of total number of steps (if istart and jstart are * both 0) (now handled in the calling routine) */ /* uTotalStepNo = iend*jend - iend*iend/2 + iend/2; uStepNo = 0; */ /*LOG_DEBUG("istart=%d iend=%d jstart=%d jend=%d", istart, iend, jstart, jend);*/ for (i=istart+1; i<=iend; ++i) { /* by definition a sequence compared to itself should give a score of 0. AW */ SymMatrixSetValue(tmat, i-1, i-1, 0.0); make_ptrs(zza, zzc, i, seqlen_array[i], aln_param.ktup, max_res_code, seq_array); #ifdef HAVE_OPENMP #pragma omp critical(ktuple) #endif { ProgressLog(prProgress, *ulStepNo, ulTotalStepNo, FALSE); } for (j=MAX(i+1, jstart+1); j<=jend; ++j) { (*ulStepNo)++; private_step_no++; /*LOG_DEBUG("comparing pair %d:%d", i, j);*/ make_ptrs(zzb, zzd, j, seqlen_array[j], aln_param.ktup, max_res_code, seq_array); pair_align(i, seqlen_array[i], seqlen_array[j], max_res_code, &aln_param, seq_array, &maxsf, accum, max_aln_length, zza, zzb, zzc, zzd); if (!maxsf) { calc_score=0.0; } else { calc_score=(double)accum[0][maxsf]; if (percent) { dsr=(seqlen_array[i]<seqlen_array[j]) ? seqlen_array[i] : seqlen_array[j]; calc_score = (calc_score/(double)dsr) * 100.0; } } /* printf("%d %d %d\n", i-1, j-1, (100.0 - calc_score)/100.0); */ SymMatrixSetValue(tmat, i-1, j-1, (100.0 - calc_score)/100.0); /* the function allows you not to compute the full matrix. * here we explicitely make the resulting matrix a * rectangle, i.e. we always set full rows. in other * words, if we don't complete the full matrix then we * don't have a full symmetry. so only use the defined * symmetric part. AW */ /*LOG_DEBUG("setting %d : %d = %f", j, i, tmat[i][j]);*/ /* not needed anymore since we use symmatrix_t if (j<=iend) { tmat[j][i] = tmat[i][j]; } */ #ifdef HAVE_OPENMP #pragma omp critical(ktuple) #endif { Log(&rLog, LOG_DEBUG, "K-tuple distance for sequence pair %d:%d = %lg", i, j, SymMatrixGetValue(tmat, i-1, j-1)); } } } /* Log(&rLog, LOG_FORCED_DEBUG, "uTotalStepNo=%d for istart=%d iend=%d jstart=%d jend=%d", uStepNo, istart, iend, jstart, jend); Log(&rLog, LOG_FORCED_DEBUG, "Fabian = %d", iend*jend - iend*iend/2 + iend/2); */ /* printf("\n\n%d\t%d\t%d\t%d\n\n", omp_get_thread_num(), uStepNo, istart, iend); */ for (i=0;i<5;i++) { CKFREE(accum[i]); } CKFREE(accum); #ifdef HAVE_OPENMP #pragma omp critical(ktuple) #if 0 { int tid; tid = omp_get_thread_num(); printf("%s:%d: tid %d: steps %d\n", __FILE__, __LINE__, tid, private_step_no); } #endif #endif CKFREE(zza); CKFREE(zzb); CKFREE(zzc); CKFREE(zzd); free(seqlen_array); for (i=1; i<=mseq->nseqs; i++) { CKFREE(seq_array[i]); } CKFREE(seq_array); } /* end of KTuplePairDist */
weightedNorm2.c
/* The MIT License (MIT) Copyright (c) 2017 Tim Warburton, Noel Chalmers, Jesse Chan, Ali Karakus Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ extern "C" void FUNC(weightedNorm2)(const dlong & Nblocks, const dlong & N, const dfloat * __restrict__ cpu_w, const dfloat * __restrict__ cpu_a, dfloat * __restrict__ cpu_wa){ dfloat wa2 = 0; #ifdef __NEKRS__OMP__ #pragma omp parallel for reduction(+:wa2) #endif for(int i=0;i<N;++i){ const dfloat ai = cpu_a[i]; const dfloat wi = cpu_w[i]; wa2 += ai*ai*wi; } cpu_wa[0] = wa2; }
seidel.base.pluto.par.c
#include <stdio.h> #include <stdlib.h> #include <sys/time.h> #include <math.h> double A[N][N+13]; void init_arrays() { int i, j; for (i=0; i<N; i++) for (j=0; j<N; j++) A[i][j] = i*i+j*j; } double rtclock() { struct timezone tzp; struct timeval tp; int stat; gettimeofday (&tp, &tzp); return (tp.tv_sec + tp.tv_usec*1.0e-6); } int main() { init_arrays(); double annot_t_start=0, annot_t_end=0, annot_t_total=0; int annot_i; for (annot_i=0; annot_i<REPS; annot_i++) { annot_t_start = rtclock(); #include <math.h> #include <assert.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) #define S1(zT0,zT1,zT2,t,i,j) {A[i][j]=(A[1+i][1+j]+A[1+i][j]+A[1+i][j-1]+A[i][1+j]+A[i][j]+A[i][j-1]+A[i-1][1+j]+A[i-1][j]+A[i-1][j-1])/9;} int c1, c2, c3, c4, c5, c6; register int lb, ub, lb1, ub1, lb2, ub2; register int lbv, ubv; /* Generated from PLuTo-produced CLooG file by CLooG v0.14.1 64 bits in 0.02s. */ for (c1=-1;c1<=floord(2*T+N-4,32);c1++) { lb1=max(max(0,ceild(16*c1-15,32)),ceild(32*c1-T+1,32)); ub1=min(min(floord(T+N-3,32),floord(32*c1+31,32)),floord(32*c1+N+29,64)); #pragma omp parallel for shared(c1,lb1,ub1) private(c2,c3,c4,c5,c6) for (c2=lb1; c2<=ub1; c2++) { for (c3=max(max(max(max(ceild(64*c2-N-28,32),0),ceild(16*c2-15,16)),ceild(16*c1-15,16)),ceild(64*c1-64*c2-29,32));c3<=min(min(min(min(floord(32*c1-32*c2+N+29,16),floord(T+N-3,16)),floord(32*c2+T+N+28,32)),floord(64*c2+N+59,32)),floord(32*c1+N+60,32));c3++) { for (c4=max(max(max(max(-32*c2+32*c3-N-29,16*c3-N+2),32*c2-N+2),0),32*c1-32*c2);c4<=min(min(min(min(32*c1-32*c2+31,T-1),floord(32*c3+29,2)),32*c2+30),-32*c2+32*c3+30);c4++) { /*@ begin Loop( transform UnrollJam(ufactor=8) for (c5=max(max(32*c2,32*c3-c4-N+2),c4+1);c5<=min(min(c4+N-2,32*c2+31),32*c3-c4+30);c5++) transform Unroll(ufactor=8) for (c6=max(c4+c5+1,32*c3);c6<=min(c4+c5+N-2,32*c3+31);c6++) { S1(c1-c2,-c1+2*c2,-c1+c3,c4,-c4+c5,-c4-c5+c6) ; } ) @*/{ for (c5 = max(max(32 * c2, 32 * c3 - c4 - N + 2), c4 + 1); c5 <= min(min(c4 + N - 2, 32 * c2 + 31), 32 * c3 - c4 + 30) - 7; c5 = c5 + 8) { for (c6 = max(c4 + c5 + 1, 32 * c3); c6 <= min(c4 + c5 + N - 2, 32 * c3 + 31) - 7; c6 = c6 + 8) { S1(c1 - c2, -c1 + 2 * c2, -c1 + c3, c4, -c4 + c5, -c4 - c5 + c6); S1(c1 - c2, -c1 + 2 * c2, -c1 + c3, c4, -c4 + c5, -c4 - c5 + (c6 + 1)); S1(c1 - c2, -c1 + 2 * c2, -c1 + c3, c4, -c4 + c5, -c4 - c5 + (c6 + 2)); S1(c1 - c2, -c1 + 2 * c2, -c1 + c3, c4, -c4 + c5, -c4 - c5 + (c6 + 3)); S1(c1 - c2, -c1 + 2 * c2, -c1 + c3, c4, -c4 + c5, -c4 - c5 + (c6 + 4)); S1(c1 - c2, -c1 + 2 * c2, -c1 + c3, c4, -c4 + c5, -c4 - c5 + (c6 + 5)); S1(c1 - c2, -c1 + 2 * c2, -c1 + c3, c4, -c4 + c5, -c4 - c5 + (c6 + 6)); S1(c1 - c2, -c1 + 2 * c2, -c1 + c3, c4, -c4 + c5, -c4 - c5 + (c6 + 7)); } for (; c6 <= min(c4 + c5 + N - 2, 32 * c3 + 31); c6 = c6 + 1) S1(c1 - c2, -c1 + 2 * c2, -c1 + c3, c4, -c4 + c5, -c4 - c5 + c6); for (c6 = max(c4 + (c5 + 1) + 1, 32 * c3); c6 <= min(c4 + (c5 + 1) + N - 2, 32 * c3 + 31) - 7; c6 = c6 + 8) { S1(c1 - c2, -c1 + 2 * c2, -c1 + c3, c4, -c4 + (c5 + 1), -c4 - (c5 + 1) + c6); S1(c1 - c2, -c1 + 2 * c2, -c1 + c3, c4, -c4 + (c5 + 1), -c4 - (c5 + 1) + (c6 + 1)); S1(c1 - c2, -c1 + 2 * c2, -c1 + c3, c4, -c4 + (c5 + 1), -c4 - (c5 + 1) + (c6 + 2)); S1(c1 - c2, -c1 + 2 * c2, -c1 + c3, c4, -c4 + (c5 + 1), -c4 - (c5 + 1) + (c6 + 3)); S1(c1 - c2, -c1 + 2 * c2, -c1 + c3, c4, -c4 + (c5 + 1), -c4 - (c5 + 1) + (c6 + 4)); S1(c1 - c2, -c1 + 2 * c2, -c1 + c3, c4, -c4 + (c5 + 1), -c4 - (c5 + 1) + (c6 + 5)); S1(c1 - c2, -c1 + 2 * c2, -c1 + c3, c4, -c4 + (c5 + 1), -c4 - (c5 + 1) + (c6 + 6)); S1(c1 - c2, -c1 + 2 * c2, -c1 + c3, c4, -c4 + (c5 + 1), -c4 - (c5 + 1) + (c6 + 7)); } for (; c6 <= min(c4 + (c5 + 1) + N - 2, 32 * c3 + 31); c6 = c6 + 1) S1(c1 - c2, -c1 + 2 * c2, -c1 + c3, c4, -c4 + (c5 + 1), -c4 - (c5 + 1) + c6); for (c6 = max(c4 + (c5 + 2) + 1, 32 * c3); c6 <= min(c4 + (c5 + 2) + N - 2, 32 * c3 + 31) - 7; c6 = c6 + 8) { S1(c1 - c2, -c1 + 2 * c2, -c1 + c3, c4, -c4 + (c5 + 2), -c4 - (c5 + 2) + c6); S1(c1 - c2, -c1 + 2 * c2, -c1 + c3, c4, -c4 + (c5 + 2), -c4 - (c5 + 2) + (c6 + 1)); S1(c1 - c2, -c1 + 2 * c2, -c1 + c3, c4, -c4 + (c5 + 2), -c4 - (c5 + 2) + (c6 + 2)); S1(c1 - c2, -c1 + 2 * c2, -c1 + c3, c4, -c4 + (c5 + 2), -c4 - (c5 + 2) + (c6 + 3)); S1(c1 - c2, -c1 + 2 * c2, -c1 + c3, c4, -c4 + (c5 + 2), -c4 - (c5 + 2) + (c6 + 4)); S1(c1 - c2, -c1 + 2 * c2, -c1 + c3, c4, -c4 + (c5 + 2), -c4 - (c5 + 2) + (c6 + 5)); S1(c1 - c2, -c1 + 2 * c2, -c1 + c3, c4, -c4 + (c5 + 2), -c4 - (c5 + 2) + (c6 + 6)); S1(c1 - c2, -c1 + 2 * c2, -c1 + c3, c4, -c4 + (c5 + 2), -c4 - (c5 + 2) + (c6 + 7)); } for (; c6 <= min(c4 + (c5 + 2) + N - 2, 32 * c3 + 31); c6 = c6 + 1) S1(c1 - c2, -c1 + 2 * c2, -c1 + c3, c4, -c4 + (c5 + 2), -c4 - (c5 + 2) + c6); for (c6 = max(c4 + (c5 + 3) + 1, 32 * c3); c6 <= min(c4 + (c5 + 3) + N - 2, 32 * c3 + 31) - 7; c6 = c6 + 8) { S1(c1 - c2, -c1 + 2 * c2, -c1 + c3, c4, -c4 + (c5 + 3), -c4 - (c5 + 3) + c6); S1(c1 - c2, -c1 + 2 * c2, -c1 + c3, c4, -c4 + (c5 + 3), -c4 - (c5 + 3) + (c6 + 1)); S1(c1 - c2, -c1 + 2 * c2, -c1 + c3, c4, -c4 + (c5 + 3), -c4 - (c5 + 3) + (c6 + 2)); S1(c1 - c2, -c1 + 2 * c2, -c1 + c3, c4, -c4 + (c5 + 3), -c4 - (c5 + 3) + (c6 + 3)); S1(c1 - c2, -c1 + 2 * c2, -c1 + c3, c4, -c4 + (c5 + 3), -c4 - (c5 + 3) + (c6 + 4)); S1(c1 - c2, -c1 + 2 * c2, -c1 + c3, c4, -c4 + (c5 + 3), -c4 - (c5 + 3) + (c6 + 5)); S1(c1 - c2, -c1 + 2 * c2, -c1 + c3, c4, -c4 + (c5 + 3), -c4 - (c5 + 3) + (c6 + 6)); S1(c1 - c2, -c1 + 2 * c2, -c1 + c3, c4, -c4 + (c5 + 3), -c4 - (c5 + 3) + (c6 + 7)); } for (; c6 <= min(c4 + (c5 + 3) + N - 2, 32 * c3 + 31); c6 = c6 + 1) S1(c1 - c2, -c1 + 2 * c2, -c1 + c3, c4, -c4 + (c5 + 3), -c4 - (c5 + 3) + c6); for (c6 = max(c4 + (c5 + 4) + 1, 32 * c3); c6 <= min(c4 + (c5 + 4) + N - 2, 32 * c3 + 31) - 7; c6 = c6 + 8) { S1(c1 - c2, -c1 + 2 * c2, -c1 + c3, c4, -c4 + (c5 + 4), -c4 - (c5 + 4) + c6); S1(c1 - c2, -c1 + 2 * c2, -c1 + c3, c4, -c4 + (c5 + 4), -c4 - (c5 + 4) + (c6 + 1)); S1(c1 - c2, -c1 + 2 * c2, -c1 + c3, c4, -c4 + (c5 + 4), -c4 - (c5 + 4) + (c6 + 2)); S1(c1 - c2, -c1 + 2 * c2, -c1 + c3, c4, -c4 + (c5 + 4), -c4 - (c5 + 4) + (c6 + 3)); S1(c1 - c2, -c1 + 2 * c2, -c1 + c3, c4, -c4 + (c5 + 4), -c4 - (c5 + 4) + (c6 + 4)); S1(c1 - c2, -c1 + 2 * c2, -c1 + c3, c4, -c4 + (c5 + 4), -c4 - (c5 + 4) + (c6 + 5)); S1(c1 - c2, -c1 + 2 * c2, -c1 + c3, c4, -c4 + (c5 + 4), -c4 - (c5 + 4) + (c6 + 6)); S1(c1 - c2, -c1 + 2 * c2, -c1 + c3, c4, -c4 + (c5 + 4), -c4 - (c5 + 4) + (c6 + 7)); } for (; c6 <= min(c4 + (c5 + 4) + N - 2, 32 * c3 + 31); c6 = c6 + 1) S1(c1 - c2, -c1 + 2 * c2, -c1 + c3, c4, -c4 + (c5 + 4), -c4 - (c5 + 4) + c6); for (c6 = max(c4 + (c5 + 5) + 1, 32 * c3); c6 <= min(c4 + (c5 + 5) + N - 2, 32 * c3 + 31) - 7; c6 = c6 + 8) { S1(c1 - c2, -c1 + 2 * c2, -c1 + c3, c4, -c4 + (c5 + 5), -c4 - (c5 + 5) + c6); S1(c1 - c2, -c1 + 2 * c2, -c1 + c3, c4, -c4 + (c5 + 5), -c4 - (c5 + 5) + (c6 + 1)); S1(c1 - c2, -c1 + 2 * c2, -c1 + c3, c4, -c4 + (c5 + 5), -c4 - (c5 + 5) + (c6 + 2)); S1(c1 - c2, -c1 + 2 * c2, -c1 + c3, c4, -c4 + (c5 + 5), -c4 - (c5 + 5) + (c6 + 3)); S1(c1 - c2, -c1 + 2 * c2, -c1 + c3, c4, -c4 + (c5 + 5), -c4 - (c5 + 5) + (c6 + 4)); S1(c1 - c2, -c1 + 2 * c2, -c1 + c3, c4, -c4 + (c5 + 5), -c4 - (c5 + 5) + (c6 + 5)); S1(c1 - c2, -c1 + 2 * c2, -c1 + c3, c4, -c4 + (c5 + 5), -c4 - (c5 + 5) + (c6 + 6)); S1(c1 - c2, -c1 + 2 * c2, -c1 + c3, c4, -c4 + (c5 + 5), -c4 - (c5 + 5) + (c6 + 7)); } for (; c6 <= min(c4 + (c5 + 5) + N - 2, 32 * c3 + 31); c6 = c6 + 1) S1(c1 - c2, -c1 + 2 * c2, -c1 + c3, c4, -c4 + (c5 + 5), -c4 - (c5 + 5) + c6); for (c6 = max(c4 + (c5 + 6) + 1, 32 * c3); c6 <= min(c4 + (c5 + 6) + N - 2, 32 * c3 + 31) - 7; c6 = c6 + 8) { S1(c1 - c2, -c1 + 2 * c2, -c1 + c3, c4, -c4 + (c5 + 6), -c4 - (c5 + 6) + c6); S1(c1 - c2, -c1 + 2 * c2, -c1 + c3, c4, -c4 + (c5 + 6), -c4 - (c5 + 6) + (c6 + 1)); S1(c1 - c2, -c1 + 2 * c2, -c1 + c3, c4, -c4 + (c5 + 6), -c4 - (c5 + 6) + (c6 + 2)); S1(c1 - c2, -c1 + 2 * c2, -c1 + c3, c4, -c4 + (c5 + 6), -c4 - (c5 + 6) + (c6 + 3)); S1(c1 - c2, -c1 + 2 * c2, -c1 + c3, c4, -c4 + (c5 + 6), -c4 - (c5 + 6) + (c6 + 4)); S1(c1 - c2, -c1 + 2 * c2, -c1 + c3, c4, -c4 + (c5 + 6), -c4 - (c5 + 6) + (c6 + 5)); S1(c1 - c2, -c1 + 2 * c2, -c1 + c3, c4, -c4 + (c5 + 6), -c4 - (c5 + 6) + (c6 + 6)); S1(c1 - c2, -c1 + 2 * c2, -c1 + c3, c4, -c4 + (c5 + 6), -c4 - (c5 + 6) + (c6 + 7)); } for (; c6 <= min(c4 + (c5 + 6) + N - 2, 32 * c3 + 31); c6 = c6 + 1) S1(c1 - c2, -c1 + 2 * c2, -c1 + c3, c4, -c4 + (c5 + 6), -c4 - (c5 + 6) + c6); for (c6 = max(c4 + (c5 + 7) + 1, 32 * c3); c6 <= min(c4 + (c5 + 7) + N - 2, 32 * c3 + 31) - 7; c6 = c6 + 8) { S1(c1 - c2, -c1 + 2 * c2, -c1 + c3, c4, -c4 + (c5 + 7), -c4 - (c5 + 7) + c6); S1(c1 - c2, -c1 + 2 * c2, -c1 + c3, c4, -c4 + (c5 + 7), -c4 - (c5 + 7) + (c6 + 1)); S1(c1 - c2, -c1 + 2 * c2, -c1 + c3, c4, -c4 + (c5 + 7), -c4 - (c5 + 7) + (c6 + 2)); S1(c1 - c2, -c1 + 2 * c2, -c1 + c3, c4, -c4 + (c5 + 7), -c4 - (c5 + 7) + (c6 + 3)); S1(c1 - c2, -c1 + 2 * c2, -c1 + c3, c4, -c4 + (c5 + 7), -c4 - (c5 + 7) + (c6 + 4)); S1(c1 - c2, -c1 + 2 * c2, -c1 + c3, c4, -c4 + (c5 + 7), -c4 - (c5 + 7) + (c6 + 5)); S1(c1 - c2, -c1 + 2 * c2, -c1 + c3, c4, -c4 + (c5 + 7), -c4 - (c5 + 7) + (c6 + 6)); S1(c1 - c2, -c1 + 2 * c2, -c1 + c3, c4, -c4 + (c5 + 7), -c4 - (c5 + 7) + (c6 + 7)); } for (; c6 <= min(c4 + (c5 + 7) + N - 2, 32 * c3 + 31); c6 = c6 + 1) S1(c1 - c2, -c1 + 2 * c2, -c1 + c3, c4, -c4 + (c5 + 7), -c4 - (c5 + 7) + c6); } for (; c5 <= min(min(c4 + N - 2, 32 * c2 + 31), 32 * c3 - c4 + 30); c5 = c5 + 1) { for (c6 = max(c4 + c5 + 1, 32 * c3); c6 <= min(c4 + c5 + N - 2, 32 * c3 + 31) - 7; c6 = c6 + 8) { S1(c1 - c2, -c1 + 2 * c2, -c1 + c3, c4, -c4 + c5, -c4 - c5 + c6); S1(c1 - c2, -c1 + 2 * c2, -c1 + c3, c4, -c4 + c5, -c4 - c5 + (c6 + 1)); S1(c1 - c2, -c1 + 2 * c2, -c1 + c3, c4, -c4 + c5, -c4 - c5 + (c6 + 2)); S1(c1 - c2, -c1 + 2 * c2, -c1 + c3, c4, -c4 + c5, -c4 - c5 + (c6 + 3)); S1(c1 - c2, -c1 + 2 * c2, -c1 + c3, c4, -c4 + c5, -c4 - c5 + (c6 + 4)); S1(c1 - c2, -c1 + 2 * c2, -c1 + c3, c4, -c4 + c5, -c4 - c5 + (c6 + 5)); S1(c1 - c2, -c1 + 2 * c2, -c1 + c3, c4, -c4 + c5, -c4 - c5 + (c6 + 6)); S1(c1 - c2, -c1 + 2 * c2, -c1 + c3, c4, -c4 + c5, -c4 - c5 + (c6 + 7)); } for (; c6 <= min(c4 + c5 + N - 2, 32 * c3 + 31); c6 = c6 + 1) S1(c1 - c2, -c1 + 2 * c2, -c1 + c3, c4, -c4 + c5, -c4 - c5 + c6); } } /*@ end @*/ } } } } /* End of CLooG code */ annot_t_end = rtclock(); annot_t_total += annot_t_end - annot_t_start; } annot_t_total = annot_t_total / REPS; #ifndef TEST printf("%f\n", annot_t_total); #else { int i, j; for (i=0; i<N; i++) { for (j=0; j<N; j++) { if (j%100==0) printf("\n"); printf("%f ",A[i][j]); } printf("\n"); } } #endif return ((int) A[0][0]); }
cloud.c
#include <string> #include <iostream> #include <algorithm> #include <utility> #include <tfhe/tfhe.h> #include <tfhe/tfhe_io.h> #include <stdio.h> #include <time.h> #include <vector> #include <cassert> #include <sys/time.h> #include <omp.h> #include <fstream> using namespace std; ifstream read; #define T_FILE "averagestandard.txt" void add(LweSample *sum, LweSample *carryover, const LweSample *x, const LweSample *y, const LweSample *c, const int32_t nb_bits, const TFheGateBootstrappingCloudKeySet *keyset) { const LweParams *in_out_params = keyset->params->in_out_params; LweSample *carry = new_LweSample_array(1, in_out_params); LweSample *axc = new_LweSample_array(1, in_out_params); LweSample *bxc = new_LweSample_array(1, in_out_params); bootsCOPY(carry, c, keyset); for(int32_t i = 0; i < nb_bits; i++) { #pragma omp parallel sections num_threads(2) { #pragma omp section bootsXOR(axc, x + i, carry, keyset); #pragma omp section bootsXOR(bxc, y + i, carry, keyset); } #pragma omp parallel sections num_threads(2) { #pragma omp section bootsXOR(sum + i, x + i, bxc, keyset); #pragma omp section bootsAND(axc, axc, bxc, keyset); } bootsXOR(carry, carry, axc, keyset); } bootsCOPY(carryover, carry, keyset); delete_LweSample_array(1, carry); delete_LweSample_array(1, axc); delete_LweSample_array(1, bxc); } void zero(LweSample* result, const TFheGateBootstrappingCloudKeySet* keyset, const size_t size) { for(int i = 0; i < size; i++){ bootsCONSTANT(result + i, 0, keyset);} } void NOT(LweSample* result, const LweSample* x, const TFheGateBootstrappingCloudKeySet* keyset, const size_t size) { for(int i = 0; i < size; i++){ bootsNOT(result + i, x + i, keyset);} } void split(LweSample *finalresult, LweSample *finalresult2, LweSample *finalresult3, LweSample *a, LweSample *b, LweSample *c, LweSample *d,LweSample *e, const LweSample *carry, const int32_t nb_bits, TFheGateBootstrappingCloudKeySet *keyset) { const LweParams *in_out_params = keyset->params->in_out_params; LweSample *sum = new_LweSample_array(32, in_out_params); LweSample *sum2 = new_LweSample_array(32, in_out_params); LweSample *sum3 = new_LweSample_array(32, in_out_params); LweSample *carryover = new_LweSample_array(32, in_out_params); LweSample *carryover2 = new_LweSample_array(32, in_out_params); LweSample *carryover3 = new_LweSample_array(32, in_out_params); for (int32_t i = 0; i < nb_bits; ++i) { bootsCONSTANT(sum + i, 0, keyset); bootsCONSTANT(sum2 + i, 0, keyset); bootsCONSTANT(sum3 + i, 0, keyset); bootsCONSTANT(carryover + i, 0, keyset); bootsCONSTANT(carryover2 + i, 0, keyset); bootsCONSTANT(carryover3 + i, 0, keyset); } //adding the 2nd result with the carry add(sum, carryover, e, b, carry, nb_bits, keyset); add(sum2, carryover2, d, a, carryover, nb_bits, keyset); add(sum3, carryover3, c, carryover2,carry,nb_bits, keyset); for (int32_t i = 0; i < nb_bits; ++i) { bootsCOPY(finalresult + i, sum3 + i, keyset); } for (int32_t i = 0; i < nb_bits; ++i) { bootsCOPY(finalresult2 + i, sum2 + i, keyset); } for (int32_t i = 0; i < nb_bits; ++i) { bootsCOPY(finalresult3 + i, sum + i, keyset); } delete_LweSample_array(32, sum); delete_LweSample_array(32, sum2); delete_LweSample_array(32, sum3); delete_LweSample_array(32, carryover); delete_LweSample_array(32, carryover2); delete_LweSample_array(32, carryover3); } void mul32(LweSample *result, LweSample *result2, LweSample *a, LweSample *b,const LweSample *carry, const int32_t nb_bits, TFheGateBootstrappingCloudKeySet *keyset) { const LweParams *in_out_params = keyset->params->in_out_params; //sum of the output LweSample *sum3c1 = new_LweSample_array(32, in_out_params); LweSample *sum3c2 = new_LweSample_array(32, in_out_params); LweSample *tmp = new_LweSample_array(32, in_out_params); LweSample *tmp2 = new_LweSample_array(32, in_out_params); LweSample *tmp3c1 = new_LweSample_array(32, in_out_params); LweSample *tmp3c2 = new_LweSample_array(32, in_out_params); LweSample *carry1 = new_LweSample_array(32, in_out_params); LweSample *carry2 = new_LweSample_array(32, in_out_params); //set all these to 0 for (int32_t i = 0; i < nb_bits; ++i) { bootsCONSTANT(sum3c1 + i, 0, keyset); bootsCONSTANT(sum3c2 + i, 0, keyset); bootsCONSTANT(tmp + i, 0, keyset); bootsCONSTANT(tmp2 + i, 0, keyset); bootsCONSTANT(tmp3c1 + i, 0, keyset); bootsCONSTANT(tmp3c2 + i, 0, keyset); bootsCONSTANT(carry1 + i, 0, keyset); bootsCONSTANT(carry2 + i, 0, keyset); } //multiply all the bits together with the other bits.. int round = 0; for (int32_t i = 0; i < nb_bits; ++i) { for (int32_t k = 0; k < nb_bits; ++k) { //this is basically multiplying as it is an AND gate //a(ciphertext1) should be the least significant bit #pragma omp parallel sections num_threads(2) { #pragma omp section bootsAND(tmp + k, a + k, b + i, keyset); } } if (round > 0) { for (int32_t i = 0; i < round; ++i) { //putting number of 0s infront bootsCONSTANT(tmp3c1 + i, 0, keyset); } } //copy all the bits that fit into a int32 with the 0s inside for (int32_t i = 0; i < 32 - round; ++i) { // +round cause infront has the 0s //tmp is the least significant bit #pragma omp parallel sections num_threads(2) { #pragma omp section bootsCOPY(tmp3c1 + i + round , tmp + i, keyset); } } //the rest of the bits that couldnt fit inside for (int32_t i = 0; i < round; ++i) { #pragma omp parallel sections num_threads(2) { #pragma omp section bootsCOPY(tmp3c2 + i, tmp + i + 32 - round, keyset); } } add(sum3c1, carry1, sum3c1, tmp3c1, carry, 32, keyset); add(sum3c2, carry2, sum3c2, tmp3c2, carry1, 32, keyset); round++; } for (int32_t i = 0; i < 32; ++i) { bootsCOPY(result + i, sum3c2 + i, keyset); bootsCOPY(result2 + i, sum3c1 + i, keyset); } delete_LweSample_array(32, sum3c1); delete_LweSample_array(32, sum3c2); delete_LweSample_array(32, tmp); delete_LweSample_array(32, tmp2); delete_LweSample_array(32, tmp3c1); delete_LweSample_array(32, tmp3c2); delete_LweSample_array(32, carry1); delete_LweSample_array(32, carry2); } void mul64(LweSample *result, LweSample *result2,LweSample *result3, LweSample *a, LweSample *b,LweSample *c,const LweSample *carry, const int32_t nb_bits, TFheGateBootstrappingCloudKeySet *keyset) { const LweParams *in_out_params = keyset->params->in_out_params; //sum of the output LweSample *sum3c1 = new_LweSample_array(32, in_out_params); LweSample *sum3c2 = new_LweSample_array(32, in_out_params); LweSample *sum3c3 = new_LweSample_array(32, in_out_params); LweSample *tmp = new_LweSample_array(32, in_out_params); LweSample *tmp2 = new_LweSample_array(32, in_out_params); LweSample *tmp3c1 = new_LweSample_array(32, in_out_params); LweSample *tmp3c2 = new_LweSample_array(32, in_out_params); LweSample *tmp3c3 = new_LweSample_array(32, in_out_params); LweSample *tmp3c4 = new_LweSample_array(32, in_out_params); LweSample *carry1 = new_LweSample_array(32, in_out_params); LweSample *carry2 = new_LweSample_array(32, in_out_params); LweSample *carry3 = new_LweSample_array(32, in_out_params); LweSample *carry4 = new_LweSample_array(32, in_out_params); //set all these to 0 for (int32_t i = 0; i < nb_bits; ++i) { bootsCONSTANT(sum3c1 + i, 0, keyset); bootsCONSTANT(sum3c2 + i, 0, keyset); bootsCONSTANT(sum3c3 + i, 0, keyset); bootsCONSTANT(tmp + i, 0, keyset); bootsCONSTANT(tmp2 + i, 0, keyset); bootsCONSTANT(tmp3c1 + i, 0, keyset); bootsCONSTANT(tmp3c2 + i, 0, keyset); bootsCONSTANT(tmp3c3 + i, 0, keyset); bootsCONSTANT(tmp3c4 + i, 0, keyset); bootsCONSTANT(carry1 + i, 0, keyset); bootsCONSTANT(carry2 + i, 0, keyset); bootsCONSTANT(carry3 + i, 0, keyset); bootsCONSTANT(carry4 + i, 0, keyset); } //multiply all the bits together with the other bits.. int round = 0; int counter1 = 0; int counter2 = 0; for (int32_t i = 0; i < nb_bits; ++i) { for (int32_t k = 0; k < nb_bits; ++k) { //this is basically multiplying as it is an AND gate //a(ciphertext1) should be the least significant bit #pragma omp parallel sections num_threads(2) { #pragma omp section bootsAND(tmp + k, a + k, c + i, keyset); #pragma omp section bootsAND(tmp2 + k, b + k, c + i, keyset); } } counter1 = 32 - round; counter2 = 32 - counter1; if (round > 0) { for (int32_t i = 0; i < round; ++i) { //putting number of 0s infront bootsCONSTANT(tmp3c1 + i, 0, keyset); } } //copy all the bits that fit into a int32 with the 0s inside //tmp to tmp3c1 for (int32_t i = 0; i < counter1; ++i) { // +round cause infront has the 0s //tmp is the least significant bit #pragma omp parallel sections num_threads(2) { #pragma omp section bootsCOPY(tmp3c1 + i + round , tmp + i, keyset); } } //remaining of tmp to tmp3c2 for (int32_t i = 0; i < counter2; ++i) { // +round cause infront has the 0s //tmp is the least significant bit #pragma omp parallel sections num_threads(2) { #pragma omp section bootsCOPY(tmp3c2 + i, tmp + i + counter1, keyset); } } //some of tmp2 to remaining of tmp3c2 //repeats 31 times for (int32_t i = 0; i < counter1; ++i) { // +round cause infront has the 0s //tmp is the least significant bit #pragma omp parallel sections num_threads(2) { #pragma omp section bootsCOPY(tmp3c2 + i + counter2, tmp2 + i, keyset); } } //the rest of tmp2 to tmp3c3 //repeats 1 time for (int32_t i = 0; i < counter2; ++i) { // +round cause infront has the 0s //tmp is the least significant bit #pragma omp parallel sections num_threads(2) { #pragma omp section bootsCOPY(tmp3c3 + i, tmp2 + i + counter1, keyset); } } add(sum3c1, carry1, sum3c1, tmp3c1, carry, 32, keyset); add(sum3c2, carry2, sum3c2, tmp3c2, carry1, 32, keyset); add(sum3c3, carry3, sum3c3, tmp3c3, carry2, 32, keyset); round++; } for (int32_t i = 0; i < 32; ++i) { bootsCOPY(result + i, sum3c3 + i, keyset); bootsCOPY(result2 + i, sum3c2 + i, keyset); bootsCOPY(result3 + i, sum3c1 + i, keyset); } delete_LweSample_array(32, sum3c1); delete_LweSample_array(32, sum3c2); delete_LweSample_array(32, sum3c3); delete_LweSample_array(32, tmp); delete_LweSample_array(32, tmp2); delete_LweSample_array(32, tmp3c1); delete_LweSample_array(32, tmp3c2); delete_LweSample_array(32, tmp3c3); delete_LweSample_array(32, tmp3c4); delete_LweSample_array(32, carry1); delete_LweSample_array(32, carry2); delete_LweSample_array(32, carry3); delete_LweSample_array(32, carry4); } void mul128(LweSample *result, LweSample *result2,LweSample *result3,LweSample *result4,LweSample *result5, LweSample *a, LweSample *b,LweSample *c,LweSample *d, LweSample *e,const LweSample *carry, const int32_t nb_bits, TFheGateBootstrappingCloudKeySet *keyset) { const LweParams *in_out_params = keyset->params->in_out_params; //sum of the output LweSample *sum3c1 = new_LweSample_array(32, in_out_params); LweSample *sum3c2 = new_LweSample_array(32, in_out_params); LweSample *sum3c3 = new_LweSample_array(32, in_out_params); LweSample *sum3c4 = new_LweSample_array(32, in_out_params); LweSample *sum3c5 = new_LweSample_array(32, in_out_params); LweSample *tmp = new_LweSample_array(32, in_out_params); LweSample *tmp2 = new_LweSample_array(32, in_out_params); LweSample *tmp3 = new_LweSample_array(32, in_out_params); LweSample *tmp4 = new_LweSample_array(32, in_out_params); LweSample *tmp3c1 = new_LweSample_array(32, in_out_params); LweSample *tmp3c2 = new_LweSample_array(32, in_out_params); LweSample *tmp3c3 = new_LweSample_array(32, in_out_params); LweSample *tmp3c4 = new_LweSample_array(32, in_out_params); LweSample *tmp3c5 = new_LweSample_array(32, in_out_params); LweSample *carry1 = new_LweSample_array(32, in_out_params); LweSample *carry2 = new_LweSample_array(32, in_out_params); LweSample *carry3 = new_LweSample_array(32, in_out_params); LweSample *carry4 = new_LweSample_array(32, in_out_params); LweSample *carry5 = new_LweSample_array(32, in_out_params); //set all these to 0 for (int32_t i = 0; i < nb_bits; ++i) { bootsCONSTANT(sum3c1 + i, 0, keyset); bootsCONSTANT(sum3c2 + i, 0, keyset); bootsCONSTANT(sum3c3 + i, 0, keyset); bootsCONSTANT(sum3c4 + i, 0, keyset); bootsCONSTANT(sum3c5 + i, 0, keyset); bootsCONSTANT(tmp + i, 0, keyset); bootsCONSTANT(tmp2 + i, 0, keyset); bootsCONSTANT(tmp3 + i, 0, keyset); bootsCONSTANT(tmp4 + i, 0, keyset); bootsCONSTANT(tmp3c1 + i, 0, keyset); bootsCONSTANT(tmp3c2 + i, 0, keyset); bootsCONSTANT(tmp3c3 + i, 0, keyset); bootsCONSTANT(tmp3c4 + i, 0, keyset); bootsCONSTANT(tmp3c5 + i, 0, keyset); bootsCONSTANT(carry1 + i, 0, keyset); bootsCONSTANT(carry2 + i, 0, keyset); bootsCONSTANT(carry3 + i, 0, keyset); bootsCONSTANT(carry4 + i, 0, keyset); bootsCONSTANT(carry5 + i, 0, keyset); } //multiply all the bits together with the other bits.. int round = 0; int counter1 = 0; int counter2 = 0; for (int32_t i = 0; i < nb_bits; ++i) { for (int32_t k = 0; k < nb_bits; ++k) { //this is basically multiplying as it is an AND gate //a(ciphertext1) should be the least significant bit #pragma omp parallel sections num_threads(4) { #pragma omp section bootsAND(tmp + k, a + k, e + i, keyset); #pragma omp section bootsAND(tmp2 + k, b + k, e + i, keyset); #pragma omp section bootsAND(tmp3 + k, c + k, e + i, keyset); #pragma omp section bootsAND(tmp4 + k, d + k, e + i, keyset); } } counter1 = 32 - round; counter2 = 32 - counter1; if (round > 0) { for (int32_t i = 0; i < round; ++i) { //putting number of 0s infront bootsCONSTANT(tmp3c1 + i, 0, keyset); } } //copy all the bits that fit into a int32 with the 0s inside //tmp to tmp3c1 for (int32_t i = 0; i < counter1; ++i) { // +round cause infront has the 0s //tmp is the least significant bit #pragma omp parallel sections num_threads(2) { #pragma omp section bootsCOPY(tmp3c1 + i + round , tmp + i, keyset); } } //remaining of tmp to tmp3c2 for (int32_t i = 0; i < counter2; ++i) { // +round cause infront has the 0s //tmp is the least significant bit #pragma omp parallel sections num_threads(2) { #pragma omp section bootsCOPY(tmp3c2 + i, tmp + i + counter1, keyset); } } //some of tmp2 to remaining of tmp3c2 for (int32_t i = 0; i < counter1; ++i) { // +round cause infront has the 0s //tmp is the least significant bit #pragma omp parallel sections num_threads(2) { #pragma omp section bootsCOPY(tmp3c2 + i + counter2, tmp2 + i, keyset); } } //remaining tmp2 to tmp3c3 for (int32_t i = 0; i < counter2; ++i) { // +round cause infront has the 0s //tmp is the least significant bit #pragma omp parallel sections num_threads(2) { #pragma omp section bootsCOPY(tmp3c3 + i, tmp2 + i + counter1, keyset); } } //some of tmp3 to remaining tmp3c3 for (int32_t i = 0; i < counter1; ++i) { #pragma omp parallel sections num_threads(2) { #pragma omp section bootsCOPY(tmp3c3 + i + counter2, tmp3 + i, keyset); } } //rest of tmp3 to tmp3c4 for (int32_t i = 0; i < counter2; ++i) { #pragma omp parallel sections num_threads(2) { #pragma omp section bootsCOPY(tmp3c4 + i, tmp3 + i + counter1, keyset); } } //some of tmp4 to remaining tmp3c4 for (int32_t i = 0; i < counter1; ++i) { #pragma omp parallel sections num_threads(2) { #pragma omp section bootsCOPY(tmp3c4 + i + counter2, tmp4 + i, keyset); } } //rest of tmp4 to tmp3c5 for (int32_t i = 0; i < counter2; ++i) { #pragma omp parallel sections num_threads(2) { #pragma omp section bootsCOPY(tmp3c5 + i, tmp4 + i + counter1, keyset); } } add(sum3c1, carry1, sum3c1, tmp3c1, carry, 32, keyset); add(sum3c2, carry2, sum3c2, tmp3c2, carry1, 32, keyset); add(sum3c3, carry3, sum3c3, tmp3c3, carry2, 32, keyset); add(sum3c4, carry4, sum3c4, tmp3c4, carry3, 32, keyset); add(sum3c5, carry5, sum3c5, tmp3c5, carry4, 32, keyset); round++; } for (int32_t i = 0; i < 32; ++i) { bootsCOPY(result + i, sum3c5 + i, keyset); bootsCOPY(result2 + i, sum3c4 + i, keyset); bootsCOPY(result3 + i, sum3c3 + i, keyset); bootsCOPY(result4 + i, sum3c2 + i, keyset); bootsCOPY(result5 + i, sum3c1 + i, keyset); } delete_LweSample_array(32, sum3c1); delete_LweSample_array(32, sum3c2); delete_LweSample_array(32, sum3c3); delete_LweSample_array(32, sum3c4); delete_LweSample_array(32, sum3c5); delete_LweSample_array(32, tmp); delete_LweSample_array(32, tmp2); delete_LweSample_array(32, tmp3); delete_LweSample_array(32, tmp4); delete_LweSample_array(32, tmp3c1); delete_LweSample_array(32, tmp3c2); delete_LweSample_array(32, tmp3c3); delete_LweSample_array(32, tmp3c4); delete_LweSample_array(32, tmp3c5); delete_LweSample_array(32, carry1); delete_LweSample_array(32, carry2); delete_LweSample_array(32, carry3); delete_LweSample_array(32, carry4); delete_LweSample_array(32, carry5); } int main() { // sidh_cipher_cloud should have already appended 2 cipherstreams into cloud.data printf("Reading the key...\n"); // reads the cloud key from file FILE* cloud_key = fopen("cloud.key", "rb"); TFheGateBootstrappingCloudKeySet* bk = new_tfheGateBootstrappingCloudKeySet_fromFile(cloud_key); fclose(cloud_key); // reads the nbit key from file FILE* nbit_key = fopen("nbit.key","rb"); TFheGateBootstrappingSecretKeySet* nbitkey = new_tfheGateBootstrappingSecretKeySet_fromFile(nbit_key); fclose(nbit_key); // if necessary, the params are inside the key const TFheGateBootstrappingParameterSet* params = bk->params; // if necessary, the params are inside the key const TFheGateBootstrappingParameterSet* nbitparams = nbitkey->params; // Create ciphertext blocks for negative1, bit1, negative2, bit2 and values LweSample* ciphertextbit = new_gate_bootstrapping_ciphertext_array(32, nbitparams); LweSample* ciphertextnegative1 = new_gate_bootstrapping_ciphertext_array(32, nbitparams); LweSample* ciphertextbit1 = new_gate_bootstrapping_ciphertext_array(32, nbitparams); LweSample* ciphertextnegative2 = new_gate_bootstrapping_ciphertext_array(32, nbitparams); LweSample* ciphertextbit2 = new_gate_bootstrapping_ciphertext_array(32, nbitparams); LweSample* ciphertext1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* ciphertext2 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* ciphertext3 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* ciphertext4 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* ciphertext5 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* ciphertext6 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* ciphertext7 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* ciphertext8 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* ciphertext9 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* ciphertext10 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* ciphertext11 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* ciphertext12 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* ciphertext13 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* ciphertext14 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* ciphertext15 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* ciphertext16 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* ciphertextcarry1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* ciphertextcarry2 = new_gate_bootstrapping_ciphertext_array(32, params); printf("Reading input 1...\n"); // reads ciphertexts from cloud.data FILE* cloud_data = fopen("cloud.data", "rb"); for (int i = 0; i<32; i++) // line0 import_gate_bootstrapping_ciphertext_fromFile(cloud_data, &ciphertextnegative1[i], nbitparams); for (int i = 0; i<32; i++) // line1 import_gate_bootstrapping_ciphertext_fromFile(cloud_data, &ciphertextbit1[i], nbitparams); // Decrypts bit size1 int32_t int_bit1 = 0; for (int i=0; i<32; i++) { int ai = bootsSymDecrypt(&ciphertextbit1[i],nbitkey)>0; int_bit1 |= (ai<<i); } for (int i=0; i<32; i++) import_gate_bootstrapping_ciphertext_fromFile(cloud_data, &ciphertext1[i], params); for (int i=0; i<32; i++) import_gate_bootstrapping_ciphertext_fromFile(cloud_data, &ciphertext2[i], params); for (int i=0; i<32; i++) import_gate_bootstrapping_ciphertext_fromFile(cloud_data, &ciphertext3[i], params); for (int i=0; i<32; i++) import_gate_bootstrapping_ciphertext_fromFile(cloud_data, &ciphertext4[i], params); for (int i=0; i<32; i++) import_gate_bootstrapping_ciphertext_fromFile(cloud_data, &ciphertext5[i], params); for (int i=0; i<32; i++) import_gate_bootstrapping_ciphertext_fromFile(cloud_data, &ciphertext6[i], params); for (int i=0; i<32; i++) import_gate_bootstrapping_ciphertext_fromFile(cloud_data, &ciphertext7[i], params); for (int i=0; i<32; i++) import_gate_bootstrapping_ciphertext_fromFile(cloud_data, &ciphertext8[i], params); for (int i = 0; i<32; i++) // line10 import_gate_bootstrapping_ciphertext_fromFile(cloud_data, &ciphertextcarry1[i], params); printf("Reading input 2...\n"); for (int i = 0; i<32; i++) // line11 import_gate_bootstrapping_ciphertext_fromFile(cloud_data, &ciphertextnegative2[i], nbitparams); for (int i = 0; i<32; i++) // line12 import_gate_bootstrapping_ciphertext_fromFile(cloud_data, &ciphertextbit2[i], nbitparams); // Decrypts bit size2 int32_t int_bit2 = 0; for (int i=0; i<32; i++) { int ai = bootsSymDecrypt(&ciphertextbit2[i],nbitkey)>0; int_bit2 |= (ai<<i); } for (int i=0; i<32; i++) import_gate_bootstrapping_ciphertext_fromFile(cloud_data, &ciphertext9[i], params); for (int i=0; i<32; i++) import_gate_bootstrapping_ciphertext_fromFile(cloud_data, &ciphertext10[i], params); for (int i=0; i<32; i++) import_gate_bootstrapping_ciphertext_fromFile(cloud_data, &ciphertext11[i], params); for (int i=0; i<32; i++) import_gate_bootstrapping_ciphertext_fromFile(cloud_data, &ciphertext12[i], params); for (int i=0; i<32; i++) import_gate_bootstrapping_ciphertext_fromFile(cloud_data, &ciphertext13[i], params); for (int i=0; i<32; i++) import_gate_bootstrapping_ciphertext_fromFile(cloud_data, &ciphertext14[i], params); for (int i=0; i<32; i++) import_gate_bootstrapping_ciphertext_fromFile(cloud_data, &ciphertext15[i], params); for (int i=0; i<32; i++) import_gate_bootstrapping_ciphertext_fromFile(cloud_data, &ciphertext16[i], params); for (int i = 0; i<32; i++) // line21 import_gate_bootstrapping_ciphertext_fromFile(cloud_data, &ciphertextcarry2[i], params); printf("Reading operation code...\n"); // Get Operation Code from File int32_t int_op; read.open("operator.txt"); read >> int_op; // Homomorphic encryption to add negative1 and negative2 ciphertexts LweSample* ciphertextnegative = new_gate_bootstrapping_ciphertext_array(32, nbitparams); LweSample* carry1 = new_gate_bootstrapping_ciphertext_array(32, params); // add(ciphertextnegative, carry1, ciphertextnegative1, ciphertextnegative2, ciphertextcarry1, 32, nbitcloudkey); // NOTE // Decrypts Negative1 int32_t int_negative1 = 0; for (int i=0; i<32; i++) { int ai = bootsSymDecrypt(&ciphertextnegative1[i],nbitkey)>0; int_negative1 |= (ai<<i); } std::cout << int_negative1 << " => negative1" << "\n"; // convert first value negativity code from 2 to 1 if (int_negative1 == 2){ int_negative1 = 1;} // Decrypts Negative2 int32_t int_negative2 = 0; for (int i=0; i<32; i++) { int ai = bootsSymDecrypt(&ciphertextnegative2[i],nbitkey)>0; int_negative2 |= (ai<<i); } std::cout << int_negative2 << " => negative2" << "\n"; // Add Negatives. // If both v1 & v2 are positive, int_negative = 0 // If only v1 is negative, int_negative = 1 // If only v2 is negative, int_negative = 2 // If both v1 & v2 are negative, int_negative = 3 int32_t int_negative; int_negative = (int_negative1 + int_negative2); // std::cout << int_negative << " -> negatives" << "\n"; //export the negative and bit data for the verif FILE* answer_data = fopen("answer.data", "wb"); // Write negative to answer.data int32_t ciphernegative = 0; if (int_negative == 1){ ciphernegative = 1; } if (int_negative == 2){ ciphernegative = 2; } if (int_negative == 3){ ciphernegative = 4; } for (int i=0; i<32; i++) { bootsSymEncrypt(&ciphertextnegative[i], (ciphernegative>>i)&1, nbitkey); } for (int i = 0; i<32; i++) export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextnegative[i], nbitparams); std::cout << ciphernegative << " => total negatives" << "\n"; delete_gate_bootstrapping_ciphertext_array(32, ciphertextnegative); // Compare bit sizes int32_t int_bit = 0; if (int_op == 4){ if (int_bit1 >= int_bit2){int_bit = (int_bit1 * 2);} else{int_bit = (int_bit2 * 2);} for (int i=0; i<32; i++) { bootsSymEncrypt(&ciphertextbit[i], (int_bit>>i)&1, nbitkey);} for (int i = 0; i<32; i++) export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextbit[i], nbitparams); std::cout << int_bit << " written to answer.data" << "\n"; if (int_bit1 >= int_bit2){int_bit = int_bit1;} else{int_bit = int_bit2;} } else if (int_bit1 >= int_bit2) { int_bit = int_bit1; for (int i = 0; i<32; i++) export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextbit1[i], nbitparams); std::cout << int_bit << " written to answer.data" << "\n"; } else{ int_bit = int_bit2; for (int i = 0; i<32; i++) export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextbit2[i], nbitparams); std::cout << int_bit << " written to answer.data" << "\n"; } fclose(cloud_data); // If trying to multiply a 256 bit number if ((int_op == 4) && (int_bit >= 256)){ std::cout << "Cannot multiply 256 bit number!" << "\n"; fclose(answer_data); return 126; } // Addition //if (the operation is add AND (both numbers are positive OR both numbers are negative)) OR (the operation is subtract AND either number is negative) // A+B, [(-A)+(-B)], A-(-B), (-A)-(B) if ((int_op == 1 && (int_negative != 1 && int_negative != 2 )) || (int_op == 2 && (int_negative == 1 || int_negative == 2))) { if (int_op == 1){ std::cout << int_bit << " bit Addition computation" << "\n"; }else{ std::cout << int_bit << " bit Subtraction computation" << "\n"; } //32 Bit Addition if (int_bit == 32) { // Ciphertext to hold the result and carry LweSample* result = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry1 = new_gate_bootstrapping_ciphertext_array(32, params); struct timeval start, end; double get_time; gettimeofday(&start, NULL); printf("Doing the homomorphic computation...\n"); //Adding component add(result, carry1, ciphertext1, ciphertext9, ciphertextcarry1, 32, bk); // Timings gettimeofday(&end, NULL); get_time = (end.tv_sec - start.tv_sec) + (end.tv_usec - start.tv_usec) * 1.0E-6; printf("Computation Time: %lf[sec]\n", get_time); // export the result ciphertexts to a file (for the cloud) for (int i=0; i<32; i++) // result1 export_gate_bootstrapping_ciphertext_toFile(answer_data, &result[i], params); for (int i=0; i<32; i++) // 2 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 3 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 4 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 5 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 6 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 7 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 8 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // carry export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); fclose(answer_data); printf("writing the answer to file...\n"); //Clean up delete_gate_bootstrapping_ciphertext_array(32, result); delete_gate_bootstrapping_ciphertext_array(32, carry1); delete_gate_bootstrapping_ciphertext_array(32, ciphertextbit1); delete_gate_bootstrapping_ciphertext_array(32, ciphertextnegative1); delete_gate_bootstrapping_ciphertext_array(32, ciphertextbit2); delete_gate_bootstrapping_ciphertext_array(32, ciphertextnegative2); delete_gate_bootstrapping_ciphertext_array(32, ciphertext1); delete_gate_bootstrapping_ciphertext_array(32, ciphertext9); delete_gate_bootstrapping_ciphertext_array(32, ciphertextcarry1); delete_gate_bootstrapping_cloud_keyset(bk); delete_gate_bootstrapping_secret_keyset(nbitkey); } //64 Bit Addition if (int_bit == 64) { //Ciphertext to hold the result and carry LweSample* result = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result2 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry2 = new_gate_bootstrapping_ciphertext_array(32, params); struct timeval start, end; double get_time; gettimeofday(&start, NULL); printf("Doing the homomorphic computation...\n"); //Adding component add(result, carry1, ciphertext1, ciphertext9, ciphertextcarry1, 32, bk); add(result2, carry2, ciphertext2, ciphertext10, carry1, 32, bk); gettimeofday(&end, NULL); get_time = (end.tv_sec - start.tv_sec) + (end.tv_usec - start.tv_usec) * 1.0E-6; printf("Computation Time: %lf[sec]\n", get_time); printf("writing the answer to file...\n"); // export the result ciphertexts to a file (for the cloud) for (int i=0; i<32; i++) // result1 export_gate_bootstrapping_ciphertext_toFile(answer_data, &result[i], params); for (int i=0; i<32; i++) // result2 export_gate_bootstrapping_ciphertext_toFile(answer_data, &result2[i], params); for (int i=0; i<32; i++) // 3 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 4 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 5 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 6 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 7 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 8 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // carry export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); fclose(answer_data); //Clean up delete_gate_bootstrapping_ciphertext_array(32, result); delete_gate_bootstrapping_ciphertext_array(32, result2); delete_gate_bootstrapping_ciphertext_array(32, carry1); delete_gate_bootstrapping_ciphertext_array(32, carry2); delete_gate_bootstrapping_ciphertext_array(32, ciphertextbit1); delete_gate_bootstrapping_ciphertext_array(32, ciphertextnegative1); delete_gate_bootstrapping_ciphertext_array(32, ciphertextbit2); delete_gate_bootstrapping_ciphertext_array(32, ciphertextnegative2); delete_gate_bootstrapping_ciphertext_array(32, ciphertext1); delete_gate_bootstrapping_ciphertext_array(32, ciphertext2); delete_gate_bootstrapping_ciphertext_array(32, ciphertext9); delete_gate_bootstrapping_ciphertext_array(32, ciphertext10); delete_gate_bootstrapping_ciphertext_array(32, ciphertextcarry1); delete_gate_bootstrapping_cloud_keyset(bk); delete_gate_bootstrapping_secret_keyset(nbitkey); } //128 Bit Addition if (int_bit == 128) { //Ciphertext to hold the result and carry LweSample* result = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result2 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result3 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result4 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry2 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry3 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry4 = new_gate_bootstrapping_ciphertext_array(32, params); struct timeval start, end; double get_time; gettimeofday(&start, NULL); printf("Doing the homomorphic computation...\n"); //Adding component add(result, carry1, ciphertext1, ciphertext9, ciphertextcarry1, 32, bk); add(result2, carry2, ciphertext2, ciphertext10, carry1, 32, bk); add(result3, carry3, ciphertext3, ciphertext11, carry2, 32, bk); add(result4, carry4, ciphertext4, ciphertext12, carry3, 32, bk); // Timing gettimeofday(&end, NULL); get_time = (end.tv_sec - start.tv_sec) + (end.tv_usec - start.tv_usec) * 1.0E-6; printf("Computation Time: %lf[sec]\n", get_time); printf("writing the answer to file...\n"); // export the result ciphertexts to a file (for the cloud) for (int i=0; i<32; i++) // result1 export_gate_bootstrapping_ciphertext_toFile(answer_data, &result[i], params); for (int i=0; i<32; i++) // result2 export_gate_bootstrapping_ciphertext_toFile(answer_data, &result2[i], params); for (int i=0; i<32; i++) // result3 export_gate_bootstrapping_ciphertext_toFile(answer_data, &result3[i], params); for (int i=0; i<32; i++) // result4 export_gate_bootstrapping_ciphertext_toFile(answer_data, &result4[i], params); for (int i=0; i<32; i++) // 5 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 6 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 7 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 8 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // carry export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); fclose(answer_data); //Clean up delete_gate_bootstrapping_ciphertext_array(32, result); delete_gate_bootstrapping_ciphertext_array(32, result2); delete_gate_bootstrapping_ciphertext_array(32, result3); delete_gate_bootstrapping_ciphertext_array(32, result4); delete_gate_bootstrapping_ciphertext_array(32, carry1); delete_gate_bootstrapping_ciphertext_array(32, carry2); delete_gate_bootstrapping_ciphertext_array(32, carry3); delete_gate_bootstrapping_ciphertext_array(32, carry4); delete_gate_bootstrapping_ciphertext_array(32, ciphertextbit1); delete_gate_bootstrapping_ciphertext_array(32, ciphertextnegative1); delete_gate_bootstrapping_ciphertext_array(32, ciphertextbit2); delete_gate_bootstrapping_ciphertext_array(32, ciphertextnegative2); delete_gate_bootstrapping_ciphertext_array(32, ciphertext1); delete_gate_bootstrapping_ciphertext_array(32, ciphertext2); delete_gate_bootstrapping_ciphertext_array(32, ciphertext3); delete_gate_bootstrapping_ciphertext_array(32, ciphertext4); delete_gate_bootstrapping_ciphertext_array(32, ciphertext9); delete_gate_bootstrapping_ciphertext_array(32, ciphertext10); delete_gate_bootstrapping_ciphertext_array(32, ciphertext11); delete_gate_bootstrapping_ciphertext_array(32, ciphertext12); delete_gate_bootstrapping_ciphertext_array(32, ciphertextcarry1); delete_gate_bootstrapping_cloud_keyset(bk); delete_gate_bootstrapping_secret_keyset(nbitkey); } //256 Bit Addition if (int_bit == 256) { // do some operations on the ciphertexts: here, we will compute the // addition of the two LweSample* result = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result2 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result3 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result4 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result5 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result6 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result7 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result8 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry2 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry3 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry4 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry5 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry6 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry7 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry8 = new_gate_bootstrapping_ciphertext_array(32, params); // Timing struct timeval start, end; double get_time; gettimeofday(&start, NULL); add(result, carry1, ciphertext1, ciphertext9, ciphertextcarry1, 32, bk); add(result2, carry2, ciphertext2, ciphertext10, carry1, 32, bk); add(result3, carry3, ciphertext3, ciphertext11, carry2, 32, bk); add(result4, carry4, ciphertext4, ciphertext12, carry3, 32, bk); add(result5, carry5, ciphertext5, ciphertext13, carry4, 32, bk); add(result6, carry6, ciphertext6, ciphertext14, carry5, 32, bk); add(result7, carry7, ciphertext7, ciphertext15, carry6, 32, bk); add(result8, carry8, ciphertext8, ciphertext16, carry7, 32, bk); gettimeofday(&end, NULL); get_time = (end.tv_sec - start.tv_sec) + (end.tv_usec - start.tv_usec) * 1.0E-6; printf("Computation Time: %lf[sec]\n", get_time); printf("writing the answer to file...\n"); // export the 64 ciphertexts to a file (for the cloud) for (int i=0; i<32; i++) export_gate_bootstrapping_ciphertext_toFile(answer_data, &result[i], params); for (int i=0; i<32; i++) export_gate_bootstrapping_ciphertext_toFile(answer_data, &result2[i], params); for (int i=0; i<32; i++) export_gate_bootstrapping_ciphertext_toFile(answer_data, &result3[i], params); for (int i=0; i<32; i++) export_gate_bootstrapping_ciphertext_toFile(answer_data, &result4[i], params); for (int i=0; i<32; i++) export_gate_bootstrapping_ciphertext_toFile(answer_data, &result5[i], params); for (int i=0; i<32; i++) export_gate_bootstrapping_ciphertext_toFile(answer_data, &result6[i], params); for (int i=0; i<32; i++) export_gate_bootstrapping_ciphertext_toFile(answer_data, &result7[i], params); for (int i=0; i<32; i++) export_gate_bootstrapping_ciphertext_toFile(answer_data, &result8[i], params); for (int i=0; i<32; i++) // carry export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); fclose(answer_data); // clean up all pointers delete_gate_bootstrapping_ciphertext_array(32, result); delete_gate_bootstrapping_ciphertext_array(32, result2); delete_gate_bootstrapping_ciphertext_array(32, result3); delete_gate_bootstrapping_ciphertext_array(32, result4); delete_gate_bootstrapping_ciphertext_array(32, result5); delete_gate_bootstrapping_ciphertext_array(32, result6); delete_gate_bootstrapping_ciphertext_array(32, result7); delete_gate_bootstrapping_ciphertext_array(32, result8); delete_gate_bootstrapping_ciphertext_array(32, carry1); delete_gate_bootstrapping_ciphertext_array(32, carry2); delete_gate_bootstrapping_ciphertext_array(32, carry3); delete_gate_bootstrapping_ciphertext_array(32, carry4); delete_gate_bootstrapping_ciphertext_array(32, carry5); delete_gate_bootstrapping_ciphertext_array(32, carry6); delete_gate_bootstrapping_ciphertext_array(32, carry7); delete_gate_bootstrapping_ciphertext_array(32, carry8); delete_gate_bootstrapping_ciphertext_array(32, ciphertextbit1); delete_gate_bootstrapping_ciphertext_array(32, ciphertextnegative1); delete_gate_bootstrapping_ciphertext_array(32, ciphertextbit2); delete_gate_bootstrapping_ciphertext_array(32, ciphertextnegative2); delete_gate_bootstrapping_ciphertext_array(32, ciphertext1); delete_gate_bootstrapping_ciphertext_array(32, ciphertext2); delete_gate_bootstrapping_ciphertext_array(32, ciphertext3); delete_gate_bootstrapping_ciphertext_array(32, ciphertext4); delete_gate_bootstrapping_ciphertext_array(32, ciphertext5); delete_gate_bootstrapping_ciphertext_array(32, ciphertext6); delete_gate_bootstrapping_ciphertext_array(32, ciphertext7); delete_gate_bootstrapping_ciphertext_array(32, ciphertext8); delete_gate_bootstrapping_ciphertext_array(32, ciphertext9); delete_gate_bootstrapping_ciphertext_array(32, ciphertext10); delete_gate_bootstrapping_ciphertext_array(32, ciphertext11); delete_gate_bootstrapping_ciphertext_array(32, ciphertext12); delete_gate_bootstrapping_ciphertext_array(32, ciphertext13); delete_gate_bootstrapping_ciphertext_array(32, ciphertext14); delete_gate_bootstrapping_ciphertext_array(32, ciphertext15); delete_gate_bootstrapping_ciphertext_array(32, ciphertext16); delete_gate_bootstrapping_ciphertext_array(32, ciphertextcarry1); delete_gate_bootstrapping_cloud_keyset(bk); delete_gate_bootstrapping_secret_keyset(nbitkey); } } // Subtraction // If the operation is subtract OR (the operation is addition AND either one of the values are negative) A-B, A+(-B), (-A)+B else if (int_op == 2 || (int_op == 1 && (int_negative == 1 || int_negative == 2))){ // Normal Subtraction computation with no negative numbers A-B OR Addition with 2nd number negative A+(-B) if ((int_op == 2 && int_negative == 0) || (int_op == 1 && int_negative == 2)){ if (int_op == 2){ std::cout << int_bit << " bit Subtraction computation" << "\n"; }else { std::cout << int_bit << " bit Addition computation with 2nd value negative" << "\n"; } //32 Bit Subtraction if(int_bit == 32) { printf("Doing the homomorphic computation...\n"); LweSample* temp = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* inverse1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* tempcarry1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twosresult1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twoscarry1 = new_gate_bootstrapping_ciphertext_array(32, params); struct timeval start, end; double get_time; gettimeofday(&start, NULL); //Subtraction Process //Step 1. Inverse the 32 bit chunks in the second input value NOT(inverse1, ciphertext9, bk, 32); //iniailize tempcarry and temp carry to 0 zero(temp, bk, 32); zero(tempcarry1, bk, 32); //Assign temp to have a value of 1 for 2nd complement bootsCONSTANT(temp, 1, bk); //Add 1 to inverted add(twosresult1, twoscarry1, inverse1, temp, tempcarry1, 32, bk); LweSample* result1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry1 = new_gate_bootstrapping_ciphertext_array(32, params); //Do the addition, this is basically adding the first value to the inversed value of the second value, a + (-b) add(result1, carry1, ciphertext1, twosresult1, ciphertextcarry1, 32, bk); gettimeofday(&end, NULL); get_time = (end.tv_sec - start.tv_sec) + (end.tv_usec - start.tv_usec) * 1.0E-6; printf("Computation Time: %lf[sec]\n", get_time); printf("writing the answer to file...\n"); //export the 32 ciphertexts to a file (for the cloud) for (int i=0; i<32; i++) //result1 export_gate_bootstrapping_ciphertext_toFile(answer_data, &result1[i], params); for (int i=0; i<32; i++) // 2 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 3 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 4 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 5 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 6 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 7 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 8 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // carry export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); fclose(answer_data); delete_gate_bootstrapping_ciphertext_array(32, temp); delete_gate_bootstrapping_ciphertext_array(32, inverse1); delete_gate_bootstrapping_ciphertext_array(32, tempcarry1); delete_gate_bootstrapping_ciphertext_array(32, twosresult1); delete_gate_bootstrapping_ciphertext_array(32, twoscarry1); delete_gate_bootstrapping_ciphertext_array(32, carry1); delete_gate_bootstrapping_ciphertext_array(32, result1); delete_gate_bootstrapping_ciphertext_array(32, ciphertextbit1); delete_gate_bootstrapping_ciphertext_array(32, ciphertextnegative1); delete_gate_bootstrapping_ciphertext_array(32, ciphertextbit2); delete_gate_bootstrapping_ciphertext_array(32, ciphertextnegative2); delete_gate_bootstrapping_ciphertext_array(32, ciphertext1); delete_gate_bootstrapping_ciphertext_array(32, ciphertext9); delete_gate_bootstrapping_ciphertext_array(32, ciphertextcarry1); delete_gate_bootstrapping_cloud_keyset(bk); delete_gate_bootstrapping_secret_keyset(nbitkey); } //64 Bit Subtraction if(int_bit == 64) { LweSample* temp = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* inverse1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* inverse2 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* tempcarry1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* tempcarry2 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twosresult1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twosresult2 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twoscarry1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twoscarry2 = new_gate_bootstrapping_ciphertext_array(32, params); struct timeval start, end; double get_time; printf("Doing the homomorphic computation...\n"); gettimeofday(&start, NULL); //Subtraction Process //Step 1. Inverse the 32 bit chunks in the second input value NOT(inverse1, ciphertext9, bk, 32); NOT(inverse2, ciphertext10, bk, 32); //iniailize tempcarry and temp carry to 0 zero(temp, bk, 32); zero(tempcarry1, bk, 32); zero(tempcarry2, bk, 32); //Assign temp to have a value of 1 for 2nd complement bootsCONSTANT(temp, 1, bk); //Add 1 to inverted add(twosresult1, twoscarry1, inverse1, temp, tempcarry1, 32, bk); //Add the rest of the inverted add(twosresult2, twoscarry2, inverse2, tempcarry2, twoscarry1, 32, bk); LweSample* result1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result2 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry2 = new_gate_bootstrapping_ciphertext_array(32, params); //Do the addition, this is basically adding the first value to the inversed value of the second value, a + (-b) add(result1, carry1, ciphertext1, twosresult1, ciphertextcarry1, 32, bk); add(result2, carry2, ciphertext2, twosresult2, carry1, 32, bk); gettimeofday(&end, NULL); get_time = (end.tv_sec - start.tv_sec) + (end.tv_usec - start.tv_usec) * 1.0E-6; printf("Computation Time: %lf[sec]\n", get_time); printf("writing the answer to file...\n"); //export the 32 ciphertexts to a file (for the cloud) for (int i=0; i<32; i++) // result1 export_gate_bootstrapping_ciphertext_toFile(answer_data, &result1[i], params); for (int i=0; i<32; i++) // result2 export_gate_bootstrapping_ciphertext_toFile(answer_data, &result2[i], params); for (int i=0; i<32; i++) // 3 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 4 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 5 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 6 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 7 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 8 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // carry export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); fclose(answer_data); delete_gate_bootstrapping_ciphertext_array(32, temp); delete_gate_bootstrapping_ciphertext_array(32, inverse1); delete_gate_bootstrapping_ciphertext_array(32, inverse2); delete_gate_bootstrapping_ciphertext_array(32, tempcarry1); delete_gate_bootstrapping_ciphertext_array(32, tempcarry2); delete_gate_bootstrapping_ciphertext_array(32, twosresult1); delete_gate_bootstrapping_ciphertext_array(32, twosresult2); delete_gate_bootstrapping_ciphertext_array(32, twoscarry1); delete_gate_bootstrapping_ciphertext_array(32, twoscarry2); delete_gate_bootstrapping_ciphertext_array(32, carry1); delete_gate_bootstrapping_ciphertext_array(32, carry2); delete_gate_bootstrapping_ciphertext_array(32, result1); delete_gate_bootstrapping_ciphertext_array(32, result2); delete_gate_bootstrapping_ciphertext_array(32, ciphertextbit1); delete_gate_bootstrapping_ciphertext_array(32, ciphertextnegative1); delete_gate_bootstrapping_ciphertext_array(32, ciphertextbit2); delete_gate_bootstrapping_ciphertext_array(32, ciphertextnegative2); delete_gate_bootstrapping_ciphertext_array(32, ciphertext1); delete_gate_bootstrapping_ciphertext_array(32, ciphertext2); delete_gate_bootstrapping_ciphertext_array(32, ciphertext9); delete_gate_bootstrapping_ciphertext_array(32, ciphertext10); delete_gate_bootstrapping_ciphertext_array(32, ciphertextcarry1); delete_gate_bootstrapping_cloud_keyset(bk); delete_gate_bootstrapping_secret_keyset(nbitkey); } //128 Bit Subtraction if(int_bit == 128) { // reads the 2x32 ciphertexts from the cloud file printf("Doing the homomorphic computation...\n"); //do some operations on the ciphertexts: here, we will compute the //difference of the two LweSample* temp = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* inverse1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* inverse2 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* inverse3 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* inverse4 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* tempcarry1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* tempcarry2 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* tempcarry3 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* tempcarry4 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twosresult1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twosresult2 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twosresult3 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twosresult4 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twoscarry1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twoscarry2 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twoscarry3 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twoscarry4 = new_gate_bootstrapping_ciphertext_array(32, params); struct timeval start, end; double get_time; gettimeofday(&start, NULL); //Subtraction Process //Step 1. Inverse the 32 bit chunks in the second input value NOT(inverse1, ciphertext9, bk, 32); NOT(inverse2, ciphertext10, bk, 32); NOT(inverse3, ciphertext11, bk, 32); NOT(inverse4, ciphertext12, bk, 32); //iniailize tempcarry and temp carry to 0 zero(temp, bk, 32); zero(tempcarry1, bk, 32); zero(tempcarry2, bk, 32); zero(tempcarry3, bk, 32); zero(tempcarry4, bk, 32); //Assign temp to have a value of 1 for 2nd complement bootsCONSTANT(temp, 1, bk); //Add 1 to inverted add(twosresult1, twoscarry1, inverse1, temp, tempcarry1, 32, bk); //Add the rest of the inverted add(twosresult2, twoscarry2, inverse2, tempcarry2, twoscarry1, 32, bk); add(twosresult3, twoscarry3, inverse3, tempcarry3, twoscarry2, 32, bk); add(twosresult4, twoscarry4, inverse4, tempcarry4, twoscarry3, 32, bk); LweSample* result1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result2 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result3 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result4 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry2 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry3 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry4 = new_gate_bootstrapping_ciphertext_array(32, params); //Do the addition, this is basically adding the first value to the inversed value of the second value, a + (-b) add(result1, carry1, ciphertext1, twosresult1, ciphertextcarry1, 32, bk); add(result2, carry2, ciphertext2, twosresult2, carry1, 32, bk); add(result3, carry3, ciphertext3, twosresult3, carry2, 32, bk); add(result4, carry4, ciphertext4, twosresult4, carry3, 32, bk); gettimeofday(&end, NULL); get_time = (end.tv_sec - start.tv_sec) + (end.tv_usec - start.tv_usec) * 1.0E-6; printf("Computation Time: %lf[sec]\n", get_time); printf("writing the answer to file...\n"); //export the 32 ciphertexts to a file (for the cloud) for (int i=0; i<32; i++) // result1 export_gate_bootstrapping_ciphertext_toFile(answer_data, &result1[i], params); for (int i=0; i<32; i++) // result2 export_gate_bootstrapping_ciphertext_toFile(answer_data, &result2[i], params); for (int i=0; i<32; i++) // result3 export_gate_bootstrapping_ciphertext_toFile(answer_data, &result3[i], params); for (int i=0; i<32; i++) // result4 export_gate_bootstrapping_ciphertext_toFile(answer_data, &result4[i], params); for (int i=0; i<32; i++) // 5 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 6 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 7 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 8 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // carry export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); fclose(answer_data); //clean up all pointers delete_gate_bootstrapping_ciphertext_array(32, temp); delete_gate_bootstrapping_ciphertext_array(32, inverse1); delete_gate_bootstrapping_ciphertext_array(32, inverse2); delete_gate_bootstrapping_ciphertext_array(32, inverse3); delete_gate_bootstrapping_ciphertext_array(32, inverse4); delete_gate_bootstrapping_ciphertext_array(32, tempcarry1); delete_gate_bootstrapping_ciphertext_array(32, tempcarry2); delete_gate_bootstrapping_ciphertext_array(32, tempcarry3); delete_gate_bootstrapping_ciphertext_array(32, tempcarry4); delete_gate_bootstrapping_ciphertext_array(32, twosresult1); delete_gate_bootstrapping_ciphertext_array(32, twosresult2); delete_gate_bootstrapping_ciphertext_array(32, twosresult3); delete_gate_bootstrapping_ciphertext_array(32, twosresult4); delete_gate_bootstrapping_ciphertext_array(32, twoscarry1); delete_gate_bootstrapping_ciphertext_array(32, twoscarry2); delete_gate_bootstrapping_ciphertext_array(32, twoscarry3); delete_gate_bootstrapping_ciphertext_array(32, twoscarry4); delete_gate_bootstrapping_ciphertext_array(32, carry1); delete_gate_bootstrapping_ciphertext_array(32, carry2); delete_gate_bootstrapping_ciphertext_array(32, carry3); delete_gate_bootstrapping_ciphertext_array(32, carry4); delete_gate_bootstrapping_ciphertext_array(32, result1); delete_gate_bootstrapping_ciphertext_array(32, result2); delete_gate_bootstrapping_ciphertext_array(32, result3); delete_gate_bootstrapping_ciphertext_array(32, result4); delete_gate_bootstrapping_ciphertext_array(32, ciphertextbit1); delete_gate_bootstrapping_ciphertext_array(32, ciphertextnegative1); delete_gate_bootstrapping_ciphertext_array(32, ciphertextbit2); delete_gate_bootstrapping_ciphertext_array(32, ciphertextnegative2); delete_gate_bootstrapping_ciphertext_array(32, ciphertext1); delete_gate_bootstrapping_ciphertext_array(32, ciphertext2); delete_gate_bootstrapping_ciphertext_array(32, ciphertext3); delete_gate_bootstrapping_ciphertext_array(32, ciphertext4); delete_gate_bootstrapping_ciphertext_array(32, ciphertext9); delete_gate_bootstrapping_ciphertext_array(32, ciphertext10); delete_gate_bootstrapping_ciphertext_array(32, ciphertext11); delete_gate_bootstrapping_ciphertext_array(32, ciphertext12); delete_gate_bootstrapping_ciphertext_array(32, ciphertextcarry1); delete_gate_bootstrapping_cloud_keyset(bk); delete_gate_bootstrapping_secret_keyset(nbitkey); } //256 Bit Subtraction if (int_bit == 256) { // reads the 2x32 ciphertexts from the cloud file printf("Doing the homomorphic computation...\n"); //do some operations on the ciphertexts: here, we will compute the //difference of the two LweSample* temp = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* inverse1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* inverse2 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* inverse3 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* inverse4 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* inverse5 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* inverse6 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* inverse7 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* inverse8 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* tempcarry1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* tempcarry2 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* tempcarry3 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* tempcarry4 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* tempcarry5 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* tempcarry6 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* tempcarry7 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* tempcarry8 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twosresult1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twosresult2 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twosresult3 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twosresult4 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twosresult5 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twosresult6 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twosresult7 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twosresult8 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twoscarry1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twoscarry2 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twoscarry3 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twoscarry4 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twoscarry5 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twoscarry6 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twoscarry7 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twoscarry8 = new_gate_bootstrapping_ciphertext_array(32, params); struct timeval start, end; double get_time; gettimeofday(&start, NULL); //Subtraction Process //Step 1. Inverse the 32 bit chunks in the second input value NOT(inverse1, ciphertext9, bk, 32); NOT(inverse2, ciphertext10, bk, 32); NOT(inverse3, ciphertext11, bk, 32); NOT(inverse4, ciphertext12, bk, 32); NOT(inverse5, ciphertext13, bk, 32); NOT(inverse6, ciphertext14, bk, 32); NOT(inverse7, ciphertext15, bk, 32); NOT(inverse8, ciphertext16, bk, 32); //iniailize tempcarry and temp carry to 0 zero(temp, bk, 32); zero(tempcarry1, bk, 32); zero(tempcarry2, bk, 32); zero(tempcarry3, bk, 32); zero(tempcarry4, bk, 32); zero(tempcarry5, bk, 32); zero(tempcarry6, bk, 32); zero(tempcarry7, bk, 32); zero(tempcarry8, bk, 32); //Assign temp to have a value of 1 for 2nd complement bootsCONSTANT(temp, 1, bk); //Add 1 to inverted add(twosresult1, twoscarry1, inverse1, temp, tempcarry1, 32, bk); //Add the rest of the inverted add(twosresult2, twoscarry2, inverse2, tempcarry2, twoscarry1, 32, bk); add(twosresult3, twoscarry3, inverse3, tempcarry3, twoscarry2, 32, bk); add(twosresult4, twoscarry4, inverse4, tempcarry4, twoscarry3, 32, bk); add(twosresult5, twoscarry5, inverse5, tempcarry5, twoscarry4, 32, bk); add(twosresult6, twoscarry6, inverse6, tempcarry6, twoscarry5, 32, bk); add(twosresult7, twoscarry7, inverse7, tempcarry7, twoscarry6, 32, bk); add(twosresult8, twoscarry8, inverse8, tempcarry8, twoscarry7, 32, bk); LweSample* result1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result2 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result3 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result4 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result5 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result6 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result7 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result8 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry2 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry3 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry4 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry5 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry6 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry7 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry8 = new_gate_bootstrapping_ciphertext_array(32, params); //Do the addition, this is basically adding the first value to the inversed value of the second value, a + (-b) add(result1, carry1, ciphertext1, twosresult1, ciphertextcarry1, 32, bk); add(result2, carry2, ciphertext2, twosresult2, carry1, 32, bk); add(result3, carry3, ciphertext3, twosresult3, carry2, 32, bk); add(result4, carry4, ciphertext4, twosresult4, carry3, 32, bk); add(result5, carry5, ciphertext5, twosresult5, carry4, 32, bk); add(result6, carry6, ciphertext6, twosresult6, carry5, 32, bk); add(result7, carry7, ciphertext7, twosresult7, carry6, 32, bk); add(result8, carry8, ciphertext8, twosresult8, carry7, 32, bk); gettimeofday(&end, NULL); get_time = (end.tv_sec - start.tv_sec) + (end.tv_usec - start.tv_usec) * 1.0E-6; printf("Computation Time: %lf[sec]\n", get_time); printf("Writing the answer to file...\n"); //export the 32 ciphertexts to a file (for the cloud) for (int i=0; i<32; i++) //result1 export_gate_bootstrapping_ciphertext_toFile(answer_data, &result1[i], params); for (int i=0; i<32; i++) //result2 export_gate_bootstrapping_ciphertext_toFile(answer_data, &result2[i], params); for (int i=0; i<32; i++) //result3 export_gate_bootstrapping_ciphertext_toFile(answer_data, &result3[i], params); for (int i=0; i<32; i++) //result4 export_gate_bootstrapping_ciphertext_toFile(answer_data, &result4[i], params); for (int i=0; i<32; i++) //result5 export_gate_bootstrapping_ciphertext_toFile(answer_data, &result5[i], params); for (int i=0; i<32; i++) //result6 export_gate_bootstrapping_ciphertext_toFile(answer_data, &result6[i], params); for (int i=0; i<32; i++) //result7 export_gate_bootstrapping_ciphertext_toFile(answer_data, &result7[i], params); for (int i=0; i<32; i++) //result8 export_gate_bootstrapping_ciphertext_toFile(answer_data, &result8[i], params); for (int i=0; i<32; i++) // carry export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); fclose(answer_data); //clean up all pointers delete_gate_bootstrapping_ciphertext_array(32, temp); delete_gate_bootstrapping_ciphertext_array(32, inverse1); delete_gate_bootstrapping_ciphertext_array(32, inverse2); delete_gate_bootstrapping_ciphertext_array(32, inverse3); delete_gate_bootstrapping_ciphertext_array(32, inverse4); delete_gate_bootstrapping_ciphertext_array(32, inverse5); delete_gate_bootstrapping_ciphertext_array(32, inverse6); delete_gate_bootstrapping_ciphertext_array(32, inverse7); delete_gate_bootstrapping_ciphertext_array(32, inverse8); delete_gate_bootstrapping_ciphertext_array(32, tempcarry1); delete_gate_bootstrapping_ciphertext_array(32, tempcarry2); delete_gate_bootstrapping_ciphertext_array(32, tempcarry3); delete_gate_bootstrapping_ciphertext_array(32, tempcarry4); delete_gate_bootstrapping_ciphertext_array(32, tempcarry5); delete_gate_bootstrapping_ciphertext_array(32, tempcarry6); delete_gate_bootstrapping_ciphertext_array(32, tempcarry7); delete_gate_bootstrapping_ciphertext_array(32, tempcarry8); delete_gate_bootstrapping_ciphertext_array(32, twosresult1); delete_gate_bootstrapping_ciphertext_array(32, twosresult2); delete_gate_bootstrapping_ciphertext_array(32, twosresult3); delete_gate_bootstrapping_ciphertext_array(32, twosresult4); delete_gate_bootstrapping_ciphertext_array(32, twosresult5); delete_gate_bootstrapping_ciphertext_array(32, twosresult6); delete_gate_bootstrapping_ciphertext_array(32, twosresult7); delete_gate_bootstrapping_ciphertext_array(32, twosresult8); delete_gate_bootstrapping_ciphertext_array(32, twoscarry1); delete_gate_bootstrapping_ciphertext_array(32, twoscarry2); delete_gate_bootstrapping_ciphertext_array(32, twoscarry3); delete_gate_bootstrapping_ciphertext_array(32, twoscarry4); delete_gate_bootstrapping_ciphertext_array(32, twoscarry5); delete_gate_bootstrapping_ciphertext_array(32, twoscarry6); delete_gate_bootstrapping_ciphertext_array(32, twoscarry7); delete_gate_bootstrapping_ciphertext_array(32, twoscarry8); delete_gate_bootstrapping_ciphertext_array(32, carry1); delete_gate_bootstrapping_ciphertext_array(32, carry2); delete_gate_bootstrapping_ciphertext_array(32, result1); delete_gate_bootstrapping_ciphertext_array(32, result2); delete_gate_bootstrapping_ciphertext_array(32, result3); delete_gate_bootstrapping_ciphertext_array(32, result4); delete_gate_bootstrapping_ciphertext_array(32, result5); delete_gate_bootstrapping_ciphertext_array(32, result6); delete_gate_bootstrapping_ciphertext_array(32, result7); delete_gate_bootstrapping_ciphertext_array(32, result8); delete_gate_bootstrapping_ciphertext_array(32, ciphertextbit1); delete_gate_bootstrapping_ciphertext_array(32, ciphertextnegative1); delete_gate_bootstrapping_ciphertext_array(32, ciphertextbit2); delete_gate_bootstrapping_ciphertext_array(32, ciphertextnegative2); delete_gate_bootstrapping_ciphertext_array(32, ciphertext1); delete_gate_bootstrapping_ciphertext_array(32, ciphertext2); delete_gate_bootstrapping_ciphertext_array(32, ciphertext3); delete_gate_bootstrapping_ciphertext_array(32, ciphertext4); delete_gate_bootstrapping_ciphertext_array(32, ciphertext5); delete_gate_bootstrapping_ciphertext_array(32, ciphertext6); delete_gate_bootstrapping_ciphertext_array(32, ciphertext7); delete_gate_bootstrapping_ciphertext_array(32, ciphertext8); delete_gate_bootstrapping_ciphertext_array(32, ciphertext9); delete_gate_bootstrapping_ciphertext_array(32, ciphertext10); delete_gate_bootstrapping_ciphertext_array(32, ciphertext11); delete_gate_bootstrapping_ciphertext_array(32, ciphertext12); delete_gate_bootstrapping_ciphertext_array(32, ciphertext13); delete_gate_bootstrapping_ciphertext_array(32, ciphertext14); delete_gate_bootstrapping_ciphertext_array(32, ciphertext15); delete_gate_bootstrapping_ciphertext_array(32, ciphertext16); delete_gate_bootstrapping_ciphertext_array(32, ciphertextcarry1); delete_gate_bootstrapping_cloud_keyset(bk); delete_gate_bootstrapping_secret_keyset(nbitkey); } } //Addition (for subtraction) with value 1 being a negative number (-A)+B else{ if (int_op == 2){ std::cout << int_bit << " bit Subtraction computation" << "\n"; }else { std::cout << int_bit << " bit Addition computation with 1st value negative" << "\n"; } if(int_bit == 32){ LweSample* temp = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* inverse1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* tempcarry1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twosresult1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twoscarry1 = new_gate_bootstrapping_ciphertext_array(32, params); struct timeval start, end; double get_time; printf("Doing the homomorphic computation...\n"); gettimeofday(&start, NULL); //Subtraction Process //Step 1. Inverse the 32 bit chunks in the first input value NOT(inverse1, ciphertext1, bk, 32); //iniailize tempcarry and temp carry to 0 zero(temp, bk, 32); zero(tempcarry1, bk, 32); //Assign temp to have a value of 1 for 2nd complement bootsCONSTANT(temp, 1, bk); //Add 1 to inverted add(twosresult1, twoscarry1, inverse1, temp, tempcarry1, 32, bk); LweSample* result1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry1 = new_gate_bootstrapping_ciphertext_array(32, params); //Do the addition, this is basically adding the first value to the inversed value of the second value, a + (-b) add(result1, carry1, ciphertext9, twosresult1, ciphertextcarry1, 32, bk); gettimeofday(&end, NULL); get_time = (end.tv_sec - start.tv_sec) + (end.tv_usec - start.tv_usec) * 1.0E-6; printf("Computation Time: %lf[sec]\n", get_time); printf("writing the answer to file...\n"); //export the 32 ciphertexts to a file (for the cloud) for (int i=0; i<32; i++) // result1 export_gate_bootstrapping_ciphertext_toFile(answer_data, &result1[i], params); for (int i=0; i<32; i++) // 2 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 3 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 4 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 5 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 6 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 7 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 8 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // carry export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); fclose(answer_data); delete_gate_bootstrapping_ciphertext_array(32, temp); delete_gate_bootstrapping_ciphertext_array(32, inverse1); delete_gate_bootstrapping_ciphertext_array(32, tempcarry1); delete_gate_bootstrapping_ciphertext_array(32, twosresult1); delete_gate_bootstrapping_ciphertext_array(32, twoscarry1); delete_gate_bootstrapping_ciphertext_array(32, carry1); delete_gate_bootstrapping_ciphertext_array(32, result1); delete_gate_bootstrapping_ciphertext_array(32, ciphertext1); delete_gate_bootstrapping_ciphertext_array(32, ciphertext9); delete_gate_bootstrapping_ciphertext_array(32, ciphertextcarry1); delete_gate_bootstrapping_cloud_keyset(bk); delete_gate_bootstrapping_secret_keyset(nbitkey); } else if (int_bit == 64){ LweSample* temp = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* inverse1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* inverse2 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* tempcarry1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* tempcarry2 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twosresult1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twosresult2 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twoscarry1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twoscarry2 = new_gate_bootstrapping_ciphertext_array(32, params); struct timeval start, end; double get_time; printf("Doing the homomorphic computation...\n"); gettimeofday(&start, NULL); //Subtraction Process //Step 1. Inverse the 32 bit chunks in the first input value NOT(inverse1, ciphertext1, bk, 32); NOT(inverse2, ciphertext2, bk, 32); //iniailize tempcarry and temp carry to 0 zero(temp, bk, 32); zero(tempcarry1, bk, 32); zero(tempcarry2, bk, 32); //Assign temp to have a value of 1 for 2nd complement bootsCONSTANT(temp, 1, bk); //Add 1 to inverted add(twosresult1, twoscarry1, inverse1, temp, tempcarry1, 32, bk); //Add the rest of the inverted add(twosresult2, twoscarry2, inverse2, tempcarry2, twoscarry1, 32, bk); LweSample* result1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result2 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry2 = new_gate_bootstrapping_ciphertext_array(32, params); //Do the addition, this is basically adding the first inversed value to the second value, (-a) + b add(result1, carry1, ciphertext9, twosresult1, ciphertextcarry1, 32, bk); add(result2, carry2, ciphertext10, twosresult2, carry1, 32, bk); gettimeofday(&end, NULL); get_time = (end.tv_sec - start.tv_sec) + (end.tv_usec - start.tv_usec) * 1.0E-6; printf("Computation Time: %lf[sec]\n", get_time); printf("writing the answer to file...\n"); //export the 32 ciphertexts to a file (for the cloud) for (int i=0; i<32; i++) // result1 export_gate_bootstrapping_ciphertext_toFile(answer_data, &result1[i], params); for (int i=0; i<32; i++) //result2 export_gate_bootstrapping_ciphertext_toFile(answer_data, &result2[i], params); for (int i=0; i<32; i++) // 3 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 4 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 5 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 6 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 7 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 8 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // carry export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); fclose(answer_data); delete_gate_bootstrapping_ciphertext_array(32, temp); delete_gate_bootstrapping_ciphertext_array(32, inverse1); delete_gate_bootstrapping_ciphertext_array(32, inverse2); delete_gate_bootstrapping_ciphertext_array(32, tempcarry1); delete_gate_bootstrapping_ciphertext_array(32, tempcarry2); delete_gate_bootstrapping_ciphertext_array(32, twosresult1); delete_gate_bootstrapping_ciphertext_array(32, twosresult2); delete_gate_bootstrapping_ciphertext_array(32, twoscarry1); delete_gate_bootstrapping_ciphertext_array(32, twoscarry2); delete_gate_bootstrapping_ciphertext_array(32, carry1); delete_gate_bootstrapping_ciphertext_array(32, carry2); delete_gate_bootstrapping_ciphertext_array(32, result1); delete_gate_bootstrapping_ciphertext_array(32, result2); delete_gate_bootstrapping_ciphertext_array(32, ciphertextbit1); delete_gate_bootstrapping_ciphertext_array(32, ciphertextnegative1); delete_gate_bootstrapping_ciphertext_array(32, ciphertextbit2); delete_gate_bootstrapping_ciphertext_array(32, ciphertextnegative2); delete_gate_bootstrapping_ciphertext_array(32, ciphertext1); delete_gate_bootstrapping_ciphertext_array(32, ciphertext2); delete_gate_bootstrapping_ciphertext_array(32, ciphertext9); delete_gate_bootstrapping_ciphertext_array(32, ciphertext10); delete_gate_bootstrapping_ciphertext_array(32, ciphertextcarry1); delete_gate_bootstrapping_cloud_keyset(bk); delete_gate_bootstrapping_secret_keyset(nbitkey); } else if (int_bit == 128){ printf("Doing the homomorphic computation...\n"); //do some operations on the ciphertexts: here, we will compute the //difference of the two LweSample* temp = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* inverse1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* inverse2 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* inverse3 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* inverse4 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* tempcarry1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* tempcarry2 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* tempcarry3 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* tempcarry4 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twosresult1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twosresult2 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twosresult3 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twosresult4 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twoscarry1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twoscarry2 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twoscarry3 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twoscarry4 = new_gate_bootstrapping_ciphertext_array(32, params); struct timeval start, end; double get_time; gettimeofday(&start, NULL); //Subtraction Process //Step 1. Inverse the 32 bit chunks in the first input value NOT(inverse1, ciphertext1, bk, 32); NOT(inverse2, ciphertext2, bk, 32); NOT(inverse3, ciphertext3, bk, 32); NOT(inverse4, ciphertext4, bk, 32); //iniailize tempcarry and temp carry to 0 zero(temp, bk, 32); zero(tempcarry1, bk, 32); zero(tempcarry2, bk, 32); zero(tempcarry3, bk, 32); zero(tempcarry4, bk, 32); //Assign temp to have a value of 1 for 2nd complement bootsCONSTANT(temp, 1, bk); //Add 1 to inverted add(twosresult1, twoscarry1, inverse1, temp, tempcarry1, 32, bk); //Add the rest of the inverted add(twosresult2, twoscarry2, inverse2, tempcarry2, twoscarry1, 32, bk); add(twosresult3, twoscarry3, inverse3, tempcarry3, twoscarry2, 32, bk); add(twosresult4, twoscarry4, inverse4, tempcarry4, twoscarry3, 32, bk); LweSample* result1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result2 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result3 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result4 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry2 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry3 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry4 = new_gate_bootstrapping_ciphertext_array(32, params); //Do the addition, this is basically adding the first inversed value to the second value,(-a) + b add(result1, carry1, ciphertext9, twosresult1, ciphertextcarry1, 32, bk); add(result2, carry2, ciphertext10, twosresult2, carry1, 32, bk); add(result3, carry3, ciphertext11, twosresult3, carry2, 32, bk); add(result4, carry4, ciphertext12, twosresult4, carry3, 32, bk); gettimeofday(&end, NULL); get_time = (end.tv_sec - start.tv_sec) + (end.tv_usec - start.tv_usec) * 1.0E-6; printf("Computation Time: %lf[sec]\n", get_time); printf("writing the answer to file...\n"); //export the 32 ciphertexts to a file (for the cloud) for (int i=0; i<32; i++) // result1 export_gate_bootstrapping_ciphertext_toFile(answer_data, &result1[i], params); for (int i=0; i<32; i++) // result2 export_gate_bootstrapping_ciphertext_toFile(answer_data, &result2[i], params); for (int i=0; i<32; i++) // result3 export_gate_bootstrapping_ciphertext_toFile(answer_data, &result3[i], params); for (int i=0; i<32; i++) // result4 export_gate_bootstrapping_ciphertext_toFile(answer_data, &result4[i], params); for (int i=0; i<32; i++) // 5 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 6 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 7 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 8 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // carry export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); fclose(answer_data); //clean up all pointers delete_gate_bootstrapping_ciphertext_array(32, temp); delete_gate_bootstrapping_ciphertext_array(32, inverse1); delete_gate_bootstrapping_ciphertext_array(32, inverse2); delete_gate_bootstrapping_ciphertext_array(32, inverse3); delete_gate_bootstrapping_ciphertext_array(32, inverse4); delete_gate_bootstrapping_ciphertext_array(32, tempcarry1); delete_gate_bootstrapping_ciphertext_array(32, tempcarry2); delete_gate_bootstrapping_ciphertext_array(32, tempcarry3); delete_gate_bootstrapping_ciphertext_array(32, tempcarry4); delete_gate_bootstrapping_ciphertext_array(32, twosresult1); delete_gate_bootstrapping_ciphertext_array(32, twosresult2); delete_gate_bootstrapping_ciphertext_array(32, twosresult3); delete_gate_bootstrapping_ciphertext_array(32, twosresult4); delete_gate_bootstrapping_ciphertext_array(32, twoscarry1); delete_gate_bootstrapping_ciphertext_array(32, twoscarry2); delete_gate_bootstrapping_ciphertext_array(32, twoscarry3); delete_gate_bootstrapping_ciphertext_array(32, twoscarry4); delete_gate_bootstrapping_ciphertext_array(32, carry1); delete_gate_bootstrapping_ciphertext_array(32, carry2); delete_gate_bootstrapping_ciphertext_array(32, carry3); delete_gate_bootstrapping_ciphertext_array(32, carry4); delete_gate_bootstrapping_ciphertext_array(32, result1); delete_gate_bootstrapping_ciphertext_array(32, result2); delete_gate_bootstrapping_ciphertext_array(32, result3); delete_gate_bootstrapping_ciphertext_array(32, result4); delete_gate_bootstrapping_ciphertext_array(32, ciphertextbit1); delete_gate_bootstrapping_ciphertext_array(32, ciphertextnegative1); delete_gate_bootstrapping_ciphertext_array(32, ciphertextbit2); delete_gate_bootstrapping_ciphertext_array(32, ciphertextnegative2); delete_gate_bootstrapping_ciphertext_array(32, ciphertext1); delete_gate_bootstrapping_ciphertext_array(32, ciphertext2); delete_gate_bootstrapping_ciphertext_array(32, ciphertext3); delete_gate_bootstrapping_ciphertext_array(32, ciphertext4); delete_gate_bootstrapping_ciphertext_array(32, ciphertext5); delete_gate_bootstrapping_ciphertext_array(32, ciphertext6); delete_gate_bootstrapping_ciphertext_array(32, ciphertext7); delete_gate_bootstrapping_ciphertext_array(32, ciphertext8); delete_gate_bootstrapping_ciphertext_array(32, ciphertextcarry1); delete_gate_bootstrapping_cloud_keyset(bk); delete_gate_bootstrapping_secret_keyset(nbitkey); } else if (int_bit == 256){ printf("Doing the homomorphic computation...\n"); //do some operations on the ciphertexts: here, we will compute the //difference of the two LweSample* temp = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* inverse1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* inverse2 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* inverse3 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* inverse4 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* inverse5 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* inverse6 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* inverse7 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* inverse8 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* tempcarry1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* tempcarry2 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* tempcarry3 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* tempcarry4 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* tempcarry5 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* tempcarry6 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* tempcarry7 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* tempcarry8 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twosresult1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twosresult2 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twosresult3 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twosresult4 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twosresult5 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twosresult6 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twosresult7 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twosresult8 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twoscarry1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twoscarry2 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twoscarry3 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twoscarry4 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twoscarry5 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twoscarry6 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twoscarry7 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twoscarry8 = new_gate_bootstrapping_ciphertext_array(32, params); struct timeval start, end; double get_time; gettimeofday(&start, NULL); //Subtraction Process //Step 1. Inverse the 32 bit chunks in the first input value NOT(inverse1, ciphertext1, bk, 32); NOT(inverse2, ciphertext2, bk, 32); NOT(inverse3, ciphertext3, bk, 32); NOT(inverse4, ciphertext4, bk, 32); NOT(inverse5, ciphertext5, bk, 32); NOT(inverse6, ciphertext6, bk, 32); NOT(inverse7, ciphertext7, bk, 32); NOT(inverse8, ciphertext8, bk, 32); //iniailize tempcarry and temp carry to 0 zero(temp, bk, 32); zero(tempcarry1, bk, 32); zero(tempcarry2, bk, 32); zero(tempcarry3, bk, 32); zero(tempcarry4, bk, 32); zero(tempcarry5, bk, 32); zero(tempcarry6, bk, 32); zero(tempcarry7, bk, 32); zero(tempcarry8, bk, 32); //Assign temp to have a value of 1 for 2nd complement bootsCONSTANT(temp, 1, bk); //Add 1 to inverted add(twosresult1, twoscarry1, inverse1, temp, tempcarry1, 32, bk); //Add the rest of the inverted add(twosresult2, twoscarry2, inverse2, tempcarry2, twoscarry1, 32, bk); add(twosresult3, twoscarry3, inverse3, tempcarry3, twoscarry2, 32, bk); add(twosresult4, twoscarry4, inverse4, tempcarry4, twoscarry3, 32, bk); add(twosresult5, twoscarry5, inverse5, tempcarry5, twoscarry4, 32, bk); add(twosresult6, twoscarry6, inverse6, tempcarry6, twoscarry5, 32, bk); add(twosresult7, twoscarry7, inverse7, tempcarry7, twoscarry6, 32, bk); add(twosresult8, twoscarry8, inverse8, tempcarry8, twoscarry7, 32, bk); LweSample* result1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result2 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result3 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result4 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result5 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result6 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result7 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result8 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry2 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry3 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry4 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry5 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry6 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry7 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry8 = new_gate_bootstrapping_ciphertext_array(32, params); //Do the addition, this is basically adding the first inversed value to the second value, (-a) + b add(result1, carry1, ciphertext9, twosresult1, ciphertextcarry1, 32, bk); add(result2, carry2, ciphertext10, twosresult2, carry1, 32, bk); add(result3, carry3, ciphertext11, twosresult3, carry2, 32, bk); add(result4, carry4, ciphertext12, twosresult4, carry3, 32, bk); add(result5, carry5, ciphertext13, twosresult5, carry4, 32, bk); add(result6, carry6, ciphertext14, twosresult6, carry5, 32, bk); add(result7, carry7, ciphertext15, twosresult7, carry6, 32, bk); add(result8, carry8, ciphertext16, twosresult8, carry7, 32, bk); gettimeofday(&end, NULL); get_time = (end.tv_sec - start.tv_sec) + (end.tv_usec - start.tv_usec) * 1.0E-6; printf("Computation Time: %lf[sec]\n", get_time); printf("Writing the answer to file...\n"); //export the 32 ciphertexts to a file (for the cloud) for (int i=0; i<32; i++) // 1 export_gate_bootstrapping_ciphertext_toFile(answer_data, &result1[i], params); for (int i=0; i<32; i++) // 2 export_gate_bootstrapping_ciphertext_toFile(answer_data, &result2[i], params); for (int i=0; i<32; i++) // 3 export_gate_bootstrapping_ciphertext_toFile(answer_data, &result3[i], params); for (int i=0; i<32; i++) // 4 export_gate_bootstrapping_ciphertext_toFile(answer_data, &result4[i], params); for (int i=0; i<32; i++) // 5 export_gate_bootstrapping_ciphertext_toFile(answer_data, &result5[i], params); for (int i=0; i<32; i++) // 6 export_gate_bootstrapping_ciphertext_toFile(answer_data, &result6[i], params); for (int i=0; i<32; i++) // 7 export_gate_bootstrapping_ciphertext_toFile(answer_data, &result7[i], params); for (int i=0; i<32; i++) // 8 export_gate_bootstrapping_ciphertext_toFile(answer_data, &result8[i], params); for (int i=0; i<32; i++) // carry export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); fclose(answer_data); //clean up all pointers delete_gate_bootstrapping_ciphertext_array(32, temp); delete_gate_bootstrapping_ciphertext_array(32, inverse1); delete_gate_bootstrapping_ciphertext_array(32, inverse2); delete_gate_bootstrapping_ciphertext_array(32, inverse3); delete_gate_bootstrapping_ciphertext_array(32, inverse4); delete_gate_bootstrapping_ciphertext_array(32, inverse5); delete_gate_bootstrapping_ciphertext_array(32, inverse6); delete_gate_bootstrapping_ciphertext_array(32, inverse7); delete_gate_bootstrapping_ciphertext_array(32, inverse8); delete_gate_bootstrapping_ciphertext_array(32, tempcarry1); delete_gate_bootstrapping_ciphertext_array(32, tempcarry2); delete_gate_bootstrapping_ciphertext_array(32, tempcarry3); delete_gate_bootstrapping_ciphertext_array(32, tempcarry4); delete_gate_bootstrapping_ciphertext_array(32, tempcarry5); delete_gate_bootstrapping_ciphertext_array(32, tempcarry6); delete_gate_bootstrapping_ciphertext_array(32, tempcarry7); delete_gate_bootstrapping_ciphertext_array(32, tempcarry8); delete_gate_bootstrapping_ciphertext_array(32, twosresult1); delete_gate_bootstrapping_ciphertext_array(32, twosresult2); delete_gate_bootstrapping_ciphertext_array(32, twosresult3); delete_gate_bootstrapping_ciphertext_array(32, twosresult4); delete_gate_bootstrapping_ciphertext_array(32, twosresult5); delete_gate_bootstrapping_ciphertext_array(32, twosresult6); delete_gate_bootstrapping_ciphertext_array(32, twosresult7); delete_gate_bootstrapping_ciphertext_array(32, twosresult8); delete_gate_bootstrapping_ciphertext_array(32, twoscarry1); delete_gate_bootstrapping_ciphertext_array(32, twoscarry2); delete_gate_bootstrapping_ciphertext_array(32, twoscarry3); delete_gate_bootstrapping_ciphertext_array(32, twoscarry4); delete_gate_bootstrapping_ciphertext_array(32, twoscarry5); delete_gate_bootstrapping_ciphertext_array(32, twoscarry6); delete_gate_bootstrapping_ciphertext_array(32, twoscarry7); delete_gate_bootstrapping_ciphertext_array(32, twoscarry8); delete_gate_bootstrapping_ciphertext_array(32, carry1); delete_gate_bootstrapping_ciphertext_array(32, carry2); delete_gate_bootstrapping_ciphertext_array(32, result1); delete_gate_bootstrapping_ciphertext_array(32, result2); delete_gate_bootstrapping_ciphertext_array(32, result3); delete_gate_bootstrapping_ciphertext_array(32, result4); delete_gate_bootstrapping_ciphertext_array(32, result5); delete_gate_bootstrapping_ciphertext_array(32, result6); delete_gate_bootstrapping_ciphertext_array(32, result7); delete_gate_bootstrapping_ciphertext_array(32, result8); delete_gate_bootstrapping_ciphertext_array(32, ciphertextbit1); delete_gate_bootstrapping_ciphertext_array(32, ciphertextnegative1); delete_gate_bootstrapping_ciphertext_array(32, ciphertextbit2); delete_gate_bootstrapping_ciphertext_array(32, ciphertextnegative2); delete_gate_bootstrapping_ciphertext_array(32, ciphertext1); delete_gate_bootstrapping_ciphertext_array(32, ciphertext2); delete_gate_bootstrapping_ciphertext_array(32, ciphertext3); delete_gate_bootstrapping_ciphertext_array(32, ciphertext4); delete_gate_bootstrapping_ciphertext_array(32, ciphertext5); delete_gate_bootstrapping_ciphertext_array(32, ciphertext6); delete_gate_bootstrapping_ciphertext_array(32, ciphertext7); delete_gate_bootstrapping_ciphertext_array(32, ciphertext8); delete_gate_bootstrapping_ciphertext_array(32, ciphertext9); delete_gate_bootstrapping_ciphertext_array(32, ciphertext10); delete_gate_bootstrapping_ciphertext_array(32, ciphertext11); delete_gate_bootstrapping_ciphertext_array(32, ciphertext12); delete_gate_bootstrapping_ciphertext_array(32, ciphertext13); delete_gate_bootstrapping_ciphertext_array(32, ciphertext14); delete_gate_bootstrapping_ciphertext_array(32, ciphertext15); delete_gate_bootstrapping_ciphertext_array(32, ciphertext16); delete_gate_bootstrapping_ciphertext_array(32, ciphertextcarry1); delete_gate_bootstrapping_cloud_keyset(bk); delete_gate_bootstrapping_secret_keyset(nbitkey); } } } // If Multiplication else if (int_op == 4){ std::cout << int_bit << " bit Multiplication computation" << "\n"; if (int_bit == 128){ printf("Doing the homomorphic computation...\n"); // do some operations on the ciphertexts: here, we will compute the // product of the two LweSample* result1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result2 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result3 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result4 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result5 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result6 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result7 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result8 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result9 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result10 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result11 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result12 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result13 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result14 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result15 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result16 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result17 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result18 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result19 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result20 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* sum1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* sum2 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* sum3 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* sum4 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* sum5 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* sum6 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* sum7 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* sum8 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* sum9 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* sum10 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* sum11 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* sum12 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* sum13 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* sum14 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* sum15 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carryover1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carryover2 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carryover3 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carryover4 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carryover5 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carryover6 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carryover7 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carryover8 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carryover9 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carryover10 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carryover11 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carryover12 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carryover13 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carryover14 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carryover15 = new_gate_bootstrapping_ciphertext_array(32, params); struct timeval start, end; double get_time; gettimeofday(&start, NULL); //result1 mul128(result1, result2, result3, result4, result5, ciphertext1, ciphertext2,ciphertext3,ciphertext4,ciphertext9,ciphertextcarry1, 32, bk); //result2 mul128(result6, result7, result8, result9, result10, ciphertext1, ciphertext2, ciphertext3, ciphertext4, ciphertext10,ciphertextcarry1, 32, bk); //result3 mul128(result11, result12, result13, result14, result15, ciphertext1, ciphertext2,ciphertext3,ciphertext4,ciphertext11,ciphertextcarry1, 32, bk); //result4 mul128(result16,result17, result18,result19,result20, ciphertext1, ciphertext2,ciphertext3,ciphertext4,ciphertext12,ciphertextcarry1, 32, bk); add(sum1, carryover1, result10, result4, ciphertextcarry1, 32, bk); add(sum2, carryover2, result9, result3,carryover1,32, bk); add(sum3, carryover3, result8, result2,carryover2,32, bk); add(sum4, carryover4, result7, result1,carryover3,32, bk); add(sum5, carryover5, result6, ciphertextcarry1,carryover4,32, bk); add(sum6, carryover6, sum2, result15,carryover5,32, bk); add(sum7, carryover7, sum3, result14,carryover6,32, bk); add(sum8, carryover8, sum4, result13,carryover7,32, bk); add(sum9, carryover9, sum5, result12,carryover8,32, bk); add(sum10, carryover10, result11, ciphertextcarry1,carryover9,32, bk); add(sum11, carryover11, sum7, result20,carryover10,32, bk); add(sum12, carryover12, sum8, result19,carryover11,32, bk); add(sum13, carryover13, sum9, result18,carryover12,32, bk); add(sum14, carryover14, sum10, result17,carryover13,32, bk); add(sum15, carryover15, result16 , ciphertextcarry1,carryover14,32, bk); gettimeofday(&end, NULL); get_time = (end.tv_sec - start.tv_sec) + (end.tv_usec - start.tv_usec) * 1.0E-6; printf("Computation Time: %lf[sec]\n", get_time); // write computation time to file FILE *t_file; t_file = fopen(T_FILE, "a"); fprintf(t_file, "%lf\n", get_time); fclose(t_file); printf("writing the answer to file...\n"); //export the 32 ciphertexts to a file (for the cloud) for (int i=0; i<32; i++) // result1 export_gate_bootstrapping_ciphertext_toFile(answer_data, &result5[i], params); for (int i=0; i<32; i++) // result2 export_gate_bootstrapping_ciphertext_toFile(answer_data, &sum1[i], params); for (int i=0; i<32; i++) // result3 export_gate_bootstrapping_ciphertext_toFile(answer_data, &sum6[i], params); for (int i=0; i<32; i++) // result4 export_gate_bootstrapping_ciphertext_toFile(answer_data, &sum11[i], params); for (int i=0; i<32; i++) // result5 export_gate_bootstrapping_ciphertext_toFile(answer_data, &sum12[i], params); for (int i=0; i<32; i++) // result6 export_gate_bootstrapping_ciphertext_toFile(answer_data, &sum13[i], params); for (int i=0; i<32; i++) // result7 export_gate_bootstrapping_ciphertext_toFile(answer_data, &sum14[i], params); for (int i=0; i<32; i++) // result8 export_gate_bootstrapping_ciphertext_toFile(answer_data, &sum15[i], params); for (int i=0; i<32; i++) // carry export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); fclose(answer_data); // clean up all pointers delete_gate_bootstrapping_ciphertext_array(32, result1); delete_gate_bootstrapping_ciphertext_array(32, result2); delete_gate_bootstrapping_ciphertext_array(32, result3); delete_gate_bootstrapping_ciphertext_array(32, result4); delete_gate_bootstrapping_ciphertext_array(32, result5); delete_gate_bootstrapping_ciphertext_array(32, result6); delete_gate_bootstrapping_ciphertext_array(32, result7); delete_gate_bootstrapping_ciphertext_array(32, result8); delete_gate_bootstrapping_ciphertext_array(32, result9); delete_gate_bootstrapping_ciphertext_array(32, result10); delete_gate_bootstrapping_ciphertext_array(32, result11); delete_gate_bootstrapping_ciphertext_array(32, result12); delete_gate_bootstrapping_ciphertext_array(32, result13); delete_gate_bootstrapping_ciphertext_array(32, result14); delete_gate_bootstrapping_ciphertext_array(32, result15); delete_gate_bootstrapping_ciphertext_array(32, result16); delete_gate_bootstrapping_ciphertext_array(32, result17); delete_gate_bootstrapping_ciphertext_array(32, result18); delete_gate_bootstrapping_ciphertext_array(32, result19); delete_gate_bootstrapping_ciphertext_array(32, result20); delete_gate_bootstrapping_ciphertext_array(32, sum1); delete_gate_bootstrapping_ciphertext_array(32, sum2); delete_gate_bootstrapping_ciphertext_array(32, sum3); delete_gate_bootstrapping_ciphertext_array(32, sum4); delete_gate_bootstrapping_ciphertext_array(32, sum5); delete_gate_bootstrapping_ciphertext_array(32, sum6); delete_gate_bootstrapping_ciphertext_array(32, sum7); delete_gate_bootstrapping_ciphertext_array(32, sum8); delete_gate_bootstrapping_ciphertext_array(32, sum9); delete_gate_bootstrapping_ciphertext_array(32, sum10); delete_gate_bootstrapping_ciphertext_array(32, sum11); delete_gate_bootstrapping_ciphertext_array(32, sum12); delete_gate_bootstrapping_ciphertext_array(32, sum13); delete_gate_bootstrapping_ciphertext_array(32, sum14); delete_gate_bootstrapping_ciphertext_array(32, sum15); delete_gate_bootstrapping_ciphertext_array(32, carryover1); delete_gate_bootstrapping_ciphertext_array(32, carryover2); delete_gate_bootstrapping_ciphertext_array(32, carryover3); delete_gate_bootstrapping_ciphertext_array(32, carryover4); delete_gate_bootstrapping_ciphertext_array(32, carryover5); delete_gate_bootstrapping_ciphertext_array(32, carryover6); delete_gate_bootstrapping_ciphertext_array(32, carryover7); delete_gate_bootstrapping_ciphertext_array(32, carryover8); delete_gate_bootstrapping_ciphertext_array(32, carryover9); delete_gate_bootstrapping_ciphertext_array(32, carryover10); delete_gate_bootstrapping_ciphertext_array(32, carryover11); delete_gate_bootstrapping_ciphertext_array(32, carryover12); delete_gate_bootstrapping_ciphertext_array(32, carryover13); delete_gate_bootstrapping_ciphertext_array(32, carryover14); delete_gate_bootstrapping_ciphertext_array(32, carryover15); delete_gate_bootstrapping_ciphertext_array(32, ciphertextbit1); delete_gate_bootstrapping_ciphertext_array(32, ciphertextnegative1); delete_gate_bootstrapping_ciphertext_array(32, ciphertextbit2); delete_gate_bootstrapping_ciphertext_array(32, ciphertextnegative2); delete_gate_bootstrapping_ciphertext_array(32, ciphertext1); delete_gate_bootstrapping_ciphertext_array(32, ciphertext2); delete_gate_bootstrapping_ciphertext_array(32, ciphertext3); delete_gate_bootstrapping_ciphertext_array(32, ciphertext4); delete_gate_bootstrapping_ciphertext_array(32, ciphertext9); delete_gate_bootstrapping_ciphertext_array(32, ciphertext10); delete_gate_bootstrapping_ciphertext_array(32, ciphertext11); delete_gate_bootstrapping_ciphertext_array(32, ciphertext12); delete_gate_bootstrapping_ciphertext_array(32, ciphertextcarry1); delete_gate_bootstrapping_cloud_keyset(bk); delete_gate_bootstrapping_secret_keyset(nbitkey); } else if (int_bit == 64){ printf("Doing the homomorphic computation...\n"); // do some operations on the ciphertexts: here, we will compute the // product of the two LweSample* result1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result2 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result3 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result4 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result5 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result6 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* finalresult = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* finalresult2 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* finalresult3 = new_gate_bootstrapping_ciphertext_array(32, params); struct timeval start, end; double get_time; gettimeofday(&start, NULL); //result1 mul64(result1,result2, result3, ciphertext1, ciphertext2,ciphertext9,ciphertextcarry1, 32, bk); //result2 mul64(result4,result5, result6, ciphertext1, ciphertext2,ciphertext10,ciphertextcarry1, 32, bk); split(finalresult,finalresult2, finalresult3, result1, result2,result4,result5,result6,ciphertextcarry1,32,bk); gettimeofday(&end, NULL); get_time = (end.tv_sec - start.tv_sec) + (end.tv_usec - start.tv_usec) * 1.0E-6; printf("Computation Time: %lf[sec]\n", get_time); // write computation time to file FILE *t_file; t_file = fopen(T_FILE, "a"); fprintf(t_file, "%lf\n", get_time); fclose(t_file); printf("writing the answer to file...\n"); //export the 32 ciphertexts to a file (for the cloud) for (int i=0; i<32; i++) // result1 export_gate_bootstrapping_ciphertext_toFile(answer_data, &result3[i], params); for (int i=0; i<32; i++) // result2 export_gate_bootstrapping_ciphertext_toFile(answer_data, &finalresult3[i], params); for (int i=0; i<32; i++) // result3 export_gate_bootstrapping_ciphertext_toFile(answer_data, &finalresult2[i], params); for (int i=0; i<32; i++) // result4 export_gate_bootstrapping_ciphertext_toFile(answer_data, &finalresult[i], params); for (int i=0; i<32; i++) // 5 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 6 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 7 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 8 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // carry export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); fclose(answer_data); // clean up all pointers delete_gate_bootstrapping_ciphertext_array(32, result1); delete_gate_bootstrapping_ciphertext_array(32, result2); delete_gate_bootstrapping_ciphertext_array(32, result3); delete_gate_bootstrapping_ciphertext_array(32, result4); delete_gate_bootstrapping_ciphertext_array(32, result5); delete_gate_bootstrapping_ciphertext_array(32, result6); delete_gate_bootstrapping_ciphertext_array(32, ciphertextbit1); delete_gate_bootstrapping_ciphertext_array(32, ciphertextnegative1); delete_gate_bootstrapping_ciphertext_array(32, ciphertextbit2); delete_gate_bootstrapping_ciphertext_array(32, ciphertextnegative2); delete_gate_bootstrapping_ciphertext_array(32, ciphertext1); delete_gate_bootstrapping_ciphertext_array(32, ciphertext2); delete_gate_bootstrapping_ciphertext_array(32, ciphertext9); delete_gate_bootstrapping_ciphertext_array(32, ciphertext10); delete_gate_bootstrapping_ciphertext_array(32, ciphertextcarry1); delete_gate_bootstrapping_ciphertext_array(32, finalresult); delete_gate_bootstrapping_ciphertext_array(32, finalresult2); delete_gate_bootstrapping_ciphertext_array(32, finalresult3); delete_gate_bootstrapping_cloud_keyset(bk); delete_gate_bootstrapping_secret_keyset(nbitkey); } else if (int_bit == 32){ printf("Doing the homomorphic computation...\n"); // do some operations on the ciphertexts: here, we will compute the // product of the two LweSample* result1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result2 = new_gate_bootstrapping_ciphertext_array(32, params); struct timeval start, end; double get_time; gettimeofday(&start, NULL); //result1 mul32(result1,result2,ciphertext1, ciphertext9,ciphertextcarry1, 32, bk); gettimeofday(&end, NULL); get_time = (end.tv_sec - start.tv_sec) + (end.tv_usec - start.tv_usec) * 1.0E-6; printf("Computation Time: %lf[sec]\n", get_time); // write computation time to file FILE *t_file; t_file = fopen(T_FILE, "a"); fprintf(t_file, "%lf\n", get_time); fclose(t_file); printf("writing the answer to file...\n"); //export the 32 ciphertexts to a file (for the cloud) for (int i=0; i<32; i++) // result1 export_gate_bootstrapping_ciphertext_toFile(answer_data, &result2[i], params); for (int i=0; i<32; i++) // result2 export_gate_bootstrapping_ciphertext_toFile(answer_data, &result1[i], params); for (int i=0; i<32; i++) // 3 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 4 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 5 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 6 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 7 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 8 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // carry export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); fclose(answer_data); // clean up all pointers delete_gate_bootstrapping_ciphertext_array(32, result1); delete_gate_bootstrapping_ciphertext_array(32, result2); delete_gate_bootstrapping_ciphertext_array(32, ciphertextbit1); delete_gate_bootstrapping_ciphertext_array(32, ciphertextnegative1); delete_gate_bootstrapping_ciphertext_array(32, ciphertextbit2); delete_gate_bootstrapping_ciphertext_array(32, ciphertextnegative2); delete_gate_bootstrapping_ciphertext_array(32, ciphertext1); delete_gate_bootstrapping_ciphertext_array(32, ciphertext9); delete_gate_bootstrapping_ciphertext_array(32, ciphertextcarry1); delete_gate_bootstrapping_cloud_keyset(bk); delete_gate_bootstrapping_secret_keyset(nbitkey); } } }
fwi_core.c
/* * ============================================================================= * Copyright (c) 2016-2018, Barcelona Supercomputing Center (BSC) * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of the <organization> nor the * names of its contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * ============================================================================= */ #include "fwi/fwi_core.h" #include "fwi/fwi_sched.h" /* * In order to generate a source for injection, * /system/support/bscgeo/src/wavelet.c * functions can be used. */ void kernel( propagator_t propagator, real waveletFreq, int shotid, char* outputfolder, char* shotfolder) { #if defined(USE_MPI) /* find ourselves into the MPI space */ int mpi_rank, mpi_size; MPI_Comm_rank( MPI_COMM_WORLD, &mpi_rank); MPI_Comm_size( MPI_COMM_WORLD, &mpi_size); #endif /* USE_MPI */ /* local variables */ int stacki; double start_t, end_t; real dt,dz,dx,dy; integer dimmz, dimmx, dimmy, MaxYPlanesPerWorker, forw_steps, back_steps; load_shot_parameters( shotid, &stacki, &dt, &forw_steps, &back_steps, &dz, &dx, &dy, &dimmz, &dimmx, &dimmy, &MaxYPlanesPerWorker, outputfolder, waveletFreq ); #if defined(USE_MPI) /* aux variables, just to make it more readable */ const int FIRSTRANK = 0; const int LASTRANK = mpi_size - 1; /* Compute the integration limits in order to load the correct slice from the input * velocity model. These are not the limits for the wave propagator! (they are local, * i.e. starts at zero!) */ const integer y0 = (mpi_rank == FIRSTRANK) ? 0 : (MaxYPlanesPerWorker * mpi_rank) - HALO; const integer yf = (mpi_rank == LASTRANK ) ? dimmy : y0 + MaxYPlanesPerWorker; const integer edimmy = (yf - y0); #else const integer y0 = 0; const integer yf = dimmy; const integer edimmy = dimmy; #endif /* USE_MPI */ /* Compute integration limits for the wave propagator. * It assumes that the volume is local, so the indices start at zero */ const integer nz0 = 0; const integer ny0 = 0; const integer nx0 = 0; const integer nzf = dimmz; const integer nxf = dimmx; const integer nyf = edimmy; const integer numberOfCells = dimmz * dimmx * edimmy; real *rho; v_t v; s_t s; coeff_t coeffs; print_debug("The length of local arrays is " I " cells zxy[%d][%d][%d]", numberOfCells, nzf, nxf, nyf); /* allocate shot memory */ alloc_memory_shot ( dimmz, dimmx, (nyf - ny0), &coeffs, &s, &v, &rho); /* load initial model from a binary file */ load_local_velocity_model ( waveletFreq, dimmz, dimmx, y0, yf, &coeffs, &s, &v, rho); /* Allocate memory for IO buffer */ real* io_buffer = (real*) __malloc( ALIGN_REAL, numberOfCells * sizeof(real) * WRITTEN_FIELDS ); /* inspects every array positions for leaks. Enabled when DEBUG flag is defined */ check_memory_shot ( dimmz, dimmx, (nyf - ny0), &coeffs, &s, &v, rho); /* Perform forward, backward or test propagations */ switch( propagator ) { case( RTM_KERNEL ): { start_t = dtime(); propagate_shot ( FORWARD, v, s, coeffs, rho, forw_steps, back_steps -1, dt,dz,dx,dy, nz0, nzf, nx0, nxf, ny0, nyf, stacki, shotfolder, io_buffer, dimmz, dimmx, (nyf - ny0)); end_t = dtime(); print_stats("Forward propagation finished in %lf seconds", end_t - start_t ); start_t = dtime(); propagate_shot ( BACKWARD, v, s, coeffs, rho, forw_steps, back_steps -1, dt,dz,dx,dy, nz0, nzf, nx0, nxf, ny0, nyf, stacki, shotfolder, io_buffer, dimmz, dimmx, (nyf - ny0)); end_t = dtime(); print_stats("Backward propagation finished in %lf seconds", end_t - start_t ); #if defined(DO_NOT_PERFORM_IO) print_info("Warning: we are not creating gradient nor preconditioner " "fields, because IO is not enabled for this execution" ); #else #if defined(USE_MPI) if ( mpi_rank == 0 ) #endif /* USE_MPI */ { char fnameGradient[300]; char fnamePrecond[300]; sprintf( fnameGradient, "%s/gradient_%05d.dat", shotfolder, shotid ); sprintf( fnamePrecond , "%s/precond_%05d.dat" , shotfolder, shotid ); FILE* fgradient = safe_fopen( fnameGradient, "wb", __FILE__, __LINE__ ); FILE* fprecond = safe_fopen( fnamePrecond , "wb", __FILE__, __LINE__ ); print_info("Storing local preconditioner field in %s", fnameGradient ); safe_fwrite( io_buffer, sizeof(real), numberOfCells * 12, fgradient, __FILE__, __LINE__ ); print_info("Storing local gradient field in %s", fnamePrecond); safe_fwrite( io_buffer, sizeof(real), numberOfCells * 12, fprecond , __FILE__, __LINE__ ); safe_fclose( fnameGradient, fgradient, __FILE__, __LINE__ ); safe_fclose( fnamePrecond , fprecond , __FILE__, __LINE__ ); } #endif /* end DO_NOT_PERFORM_IO */ break; } case( FM_KERNEL ): { start_t = dtime(); propagate_shot ( FWMODEL, v, s, coeffs, rho, forw_steps, back_steps -1, dt,dz,dx,dy, nz0, nzf, nx0, nxf, ny0, nyf, stacki, shotfolder, io_buffer, dimmz, dimmx, dimmy); end_t = dtime(); print_stats("Forward Modelling finished in %lf seconds", end_t - start_t ); break; } default: { print_error("Invalid propagation identifier"); abort(); } } /* end case */ // liberamos la memoria alocatada en el shot free_memory_shot ( &coeffs, &s, &v, &rho); __free( io_buffer ); }; void gather_shots( char* outputfolder, const real waveletFreq, const int nshots, const int numberOfCells ) { #if defined(DO_NOT_PERFORM_IO) print_info("Warning: we are not gathering the results because the IO is disabled " "for this execution"); #else /* --------- GLOBAL PRECONDITIONER ACCUMULATION --------- */ print_info("Gathering local preconditioner fields"); /* variables for timming */ double start_t, end_t; /* buffers to read and accumulate the fields */ real* sumbuffer = (real*) __malloc( ALIGN_REAL, numberOfCells * sizeof(real) * WRITTEN_FIELDS ); real* readbuffer = (real*) __malloc( ALIGN_REAL, numberOfCells * sizeof(real) * WRITTEN_FIELDS ); start_t = dtime(); /* set buffer positions to zero */ memset ( sumbuffer, 0, numberOfCells * sizeof(real) * WRITTEN_FIELDS ); for( int shot=0; shot < nshots; shot++) { char readfilename[300]; sprintf( readfilename, "%s/shot.%2.1f.%05d/precond_%05d.dat", outputfolder, waveletFreq, shot, shot); print_info("Reading preconditioner file '%s'", readfilename ); FILE* freadfile = safe_fopen( readfilename, "rb", __FILE__, __LINE__ ); safe_fread ( readbuffer, sizeof(real), numberOfCells * WRITTEN_FIELDS, freadfile, __FILE__, __LINE__ ); #if defined(_OPENMP) #pragma omp parallel for #endif #if defined(__INTEL_COMPILER) #pragma simd #endif for( int i = 0; i < numberOfCells * WRITTEN_FIELDS; i++) sumbuffer[i] += readbuffer[i]; fclose (freadfile); } char precondfilename[300]; sprintf( precondfilename, "%s/Preconditioner.%2.1f", outputfolder, waveletFreq ); FILE* precondfile = safe_fopen( precondfilename, "wb", __FILE__, __LINE__ ); safe_fwrite ( sumbuffer, sizeof(real), numberOfCells * WRITTEN_FIELDS, precondfile, __FILE__, __LINE__ ); safe_fclose( precondfilename, precondfile, __FILE__, __LINE__ ); end_t = dtime(); print_stats("Gatering process for preconditioner %s (freq %2.1f) " "completed in: %lf seconds", precondfilename, waveletFreq, end_t - start_t ); /* --------- GLOBAL GRADIENT ACCUMULATION --------- */ print_info("Gathering local gradient fields"); start_t = dtime(); /* set buffer positions to zero */ memset ( sumbuffer, 0, numberOfCells * sizeof(real) * WRITTEN_FIELDS ); for( int shot=0; shot < nshots; shot++) { char readfilename[300]; sprintf( readfilename, "%s/shot.%2.1f.%05d/gradient_%05d.dat", outputfolder, waveletFreq, shot, shot); print_info("Reading gradient file %s", readfilename ); FILE* freadfile = safe_fopen( readfilename, "rb", __FILE__, __LINE__ ); safe_fread ( readbuffer, sizeof(real), numberOfCells * WRITTEN_FIELDS, freadfile, __FILE__, __LINE__ ); #if defined(_OPENMP) #pragma omp parallel for #endif #ifdef __INTEL_COMPILER #pragma simd #endif for( int i = 0; i < numberOfCells * WRITTEN_FIELDS; i++) sumbuffer[i] += readbuffer[i]; fclose (freadfile); } char gradientfilename[300]; sprintf( gradientfilename, "%s/Gradient.%2.1f", outputfolder, waveletFreq ); FILE* gradientfile = safe_fopen( gradientfilename, "wb", __FILE__, __LINE__ ); safe_fwrite ( sumbuffer, sizeof(real), numberOfCells * WRITTEN_FIELDS, gradientfile, __FILE__, __LINE__ ); safe_fclose( gradientfilename, gradientfile, __FILE__, __LINE__ ); end_t = dtime(); print_stats("Gatering process for gradient %s (freq %2.1f) " "completed in: %lf seconds", precondfilename, waveletFreq, end_t - start_t ); __free( sumbuffer); __free( readbuffer); #endif /* end DO_NOT_PERFORM_IO */ }; int execute_simulation( int argc, char* argv[] ) { #if defined(USE_MPI) MPI_Init ( &argc, &argv ); int mpi_rank; MPI_Comm_rank( MPI_COMM_WORLD, &mpi_rank); #elif !defined(USE_MPI) && defined(_OPENACC) //TODO: fix name int mpi_rank = 0; #endif #if defined(_OPENACC) acc_init(acc_device_default); int gpuid = mpi_rank % acc_get_num_devices( acc_device_default ); acc_set_device_num( gpuid, acc_device_default ); fprintf(stdout, "MPI rank %d with GPU %d (%d)\n", mpi_rank, acc_get_device_num(acc_device_default), acc_get_num_devices(acc_device_default)); #endif /*_OPENACC*/ /* Load parameters from schedule file */ schedule_t s = load_schedule(argv[1]); for(int i=0; i<s.nfreqs; i++) { /* Process one frequency at a time */ real waveletFreq = s.freq[i]; integer stacki = s.stacki[i]; real dt = s.dt[i]; integer forw_steps = s.forws[i]; integer back_steps = s.backs[i]; real dx = s.dx[i]; real dy = s.dy[i]; real dz = s.dz[i]; integer dimmz = s.dimmz[i]; integer dimmx = s.dimmx[i]; integer dimmy = s.dimmy[i]; //integer nworkers = s.nworkers[i]; integer MaxYPlanesPerWorker = s.ppd[i]; print_info("\n------ Computing %d-th frequency (%.2fHz). ------\n", i, waveletFreq); const integer numberOfCells = dimmz * dimmx * dimmx; const size_t VolumeMemory = numberOfCells * sizeof(real) * 58; print_stats("Local domain size for freq %f [%d][%d][%d] is %lu bytes (%lf GB)", waveletFreq, dimmz, dimmx, dimmy, VolumeMemory, TOGB(VolumeMemory) ); for(int grad=0; grad<s.ngrads; grad++) /* backward iteration */ { print_info("Processing %d-gradient iteration", grad); for(int shot=0; shot<s.nshots; shot++) { char shotfolder[512]; sprintf(shotfolder, "%s/shot.%2.2fHz.%03d", s.outputfolder, waveletFreq, shot); #if defined(USE_MPI) if ( mpi_rank == 0 ) #endif { create_folder( shotfolder ); store_shot_parameters( shot, &stacki, &dt, &forw_steps, &back_steps, &dz, &dx, &dy, &dimmz, &dimmx, &dimmy, &MaxYPlanesPerWorker, s.outputfolder, waveletFreq ); } #if defined(USE_MPI) MPI_Barrier( MPI_COMM_WORLD ); #endif kernel( RTM_KERNEL, waveletFreq, shot, s.outputfolder, shotfolder); print_info("\tGradient loop processed for %d-th shot", shot); } #if defined(USE_MPI) MPI_Barrier( MPI_COMM_WORLD ); #endif for(int test=0; test<s.ntests; test++) { print_info("\tProcessing %d-th test iteration", test); for(int shot=0; shot<s.nshots; shot++) { char shotfolder[512]; sprintf(shotfolder, "%s/test.%05d.shot.%2.2fHz.%03d", s.outputfolder, test, waveletFreq, shot); #if defined(USE_MPI) if ( mpi_rank == 0) #endif { create_folder( shotfolder ); store_shot_parameters( shot, &stacki, &dt, &forw_steps, &back_steps, &dz, &dx, &dy, &dimmz, &dimmx, &dimmy, &MaxYPlanesPerWorker, s.outputfolder, waveletFreq ); } #if defined(USE_MPI) MPI_Barrier( MPI_COMM_WORLD ); #endif kernel( FM_KERNEL , waveletFreq, shot, s.outputfolder, shotfolder); print_info("\t\tTest loop processed for the %d-th shot", shot); } } /* end of test loop */ } /* end of gradient loop */ } /* end of frequency loop */ #if defined(USE_MPI) MPI_Barrier(MPI_COMM_WORLD); MPI_Finalize(); #endif return 0; }
Quadtree.c
#include "Morton.h" #include "Quadtree.h" #include <math.h> #include <mpi.h> #include <omp.h> // Private auxiliary functions prototypes // Compute the centers of mass of the i-th vertex and its children static void computeCMrec(Quadtree *qt, int cmNo); // Public functions // Initialise the cells of a quadtree of specified height, built on the 2D area [xMin, xMax]*[yMin, yMax] // containing a total of particles picked randomly in [[nbPartMin, nbPartMax]], // with masses picked randomly in [[mMin, mMax]] void initQuadtree(Quadtree *qt, int height, int nbPartMin, int nbPartMax, double mMin, double mMax, double xMin, double xMax, double yMin, double yMax) { qt->height = height; qt->xMin = xMin; qt->xMax = xMax; qt->yMin = yMin; qt->yMax = yMax; qt->nbCells = powl(4, height-1); qt->cells = (Cell *) malloc(qt->nbCells * sizeof(Cell)); qt->nbMultipoles = (powl(4, height) - 1) / 3; qt->multipoles = (Multipole *) malloc(qt->nbMultipoles * sizeof(Multipole)); qt->firstOuterCM = qt->nbMultipoles - qt->nbCells; long dim = powl(2, height-1); double dX = (xMax - xMin) / (double)dim; double dY = (yMax - yMin) / (double)dim; int nbPartPerCellMin = nbPartMin / qt->nbCells; int nbPartPerCellMax = nbPartMax / qt->nbCells; int cellNo; for (int x = 0; x < dim; x++) for (int y = 0; y < dim; y++) { cellNo = xy_to_morton(x, y); initCell(qt->cells + cellNo, nbPartPerCellMin, nbPartPerCellMax, mMin, mMax, x*dX, (x+1)*dX, y*dY, (y+1)*dY); } } // Release the ressources associated with the specified quadtree void freeQuadtree(Quadtree *qt) { for (int i = 0; i < qt->nbCells; i++) freeCell(qt->cells + i); free(qt->cells); free(qt->multipoles); } // Compute all the centers of mass of the specified quadtree recursively, // starting by the lower level ones void computeMultipoles(Quadtree *qt) { computeCMrec(qt, 0); } // Compute the gravitationnal force exerted on each particule of the quadtree // Note: we consider that a center of mass is in the far field of a cell // if d/l > farFieldLimit where d is the distance between the cm and the cell // and w is the width of the area approximated by the cm void computeForces(Quadtree *qt, double farFieldLimit) { #pragma omp parallel { WorkingVecs wv; initWorkingVecs(&wv); #pragma omp for schedule(dynamic, 1) for (int cellNo = 0; cellNo < qt->nbCells; cellNo++) { int queue[qt->nbMultipoles]; int head = 0, tail = 0; queue[tail++] = 0; int cmNo; double d, l; while (head < tail) { cmNo = queue[head++]; d = distance(qt->multipoles + cmNo, qt->cells + cellNo); l = qt->multipoles[cmNo].xMax - qt->multipoles[cmNo].xMin; if (d > l / farFieldLimit) { M2P(qt->multipoles + cmNo, qt->cells + cellNo, &wv); } else if (cmNo < qt->firstOuterCM) { queue[tail++] = 4*cmNo+1; queue[tail++] = 4*cmNo+2; queue[tail++] = 4*cmNo+3; queue[tail++] = 4*cmNo+4; } else if (cmNo != qt->firstOuterCM + cellNo) { P2P_ext(qt->cells + (cmNo - qt->firstOuterCM), qt->cells + cellNo, &wv); } } P2P_in(qt->cells + cellNo, &wv); } freeWorkingVecs(&wv); } } // Compute the gravitationnal force exerted on each particule of the quadtree // Note: we consider that a center of mass is in the far field of a cell // if d/l > farFieldLimit where d is the distance between the cm and the cell // and w is the width of the area approximated by the cm void computeForcesDistributed(Quadtree *qt, double farFieldLimit) { int rank, size; MPI_Comm_rank(MPI_COMM_WORLD, &rank); MPI_Comm_size(MPI_COMM_WORLD, &size); int nbCellsPerNode = qt->nbCells / size; #pragma omp parallel { WorkingVecs wv; initWorkingVecs(&wv); #pragma omp for schedule(dynamic, 1) for (int cellNo = rank*nbCellsPerNode; cellNo < (rank+1 * nbCellsPerNode); cellNo++) { int queue[qt->nbMultipoles]; int head = 0, tail = 0; queue[tail++] = 0; int cmNo; double d, l; while (head < tail) { cmNo = queue[head++]; d = distance(qt->multipoles + cmNo, qt->cells + cellNo); l = qt->multipoles[cmNo].xMax - qt->multipoles[cmNo].xMin; if (d > l / farFieldLimit) { M2P(qt->multipoles + cmNo, qt->cells + cellNo, &wv); } else if (cmNo < qt->firstOuterCM) { queue[tail++] = 4*cmNo+1; queue[tail++] = 4*cmNo+2; queue[tail++] = 4*cmNo+3; queue[tail++] = 4*cmNo+4; } else if (cmNo != qt->firstOuterCM + cellNo) { P2P_ext(qt->cells + (cmNo - qt->firstOuterCM), qt->cells + cellNo, &wv); } } P2P_in(qt->cells + cellNo, &wv); } freeWorkingVecs(&wv); } } // Private auxiliary functions void computeCMrec(Quadtree *qt, int cmNo) { if (cmNo < qt->firstOuterCM) { computeCMrec(qt, 4*cmNo+1); computeCMrec(qt, 4*cmNo+2); computeCMrec(qt, 4*cmNo+3); computeCMrec(qt, 4*cmNo+4); M2M(qt->multipoles + cmNo, 4, qt->multipoles + 4*cmNo+1); } else { P2M(qt->multipoles + cmNo, qt->cells + (cmNo - qt->firstOuterCM)); } }
GB_unaryop__ainv_int8_uint16.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__ainv_int8_uint16 // op(A') function: GB_tran__ainv_int8_uint16 // C type: int8_t // A type: uint16_t // cast: int8_t cij = (int8_t) aij // unaryop: cij = -aij #define GB_ATYPE \ uint16_t #define GB_CTYPE \ int8_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint16_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = -x ; // casting #define GB_CASTING(z, x) \ int8_t z = (int8_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_AINV || GxB_NO_INT8 || GxB_NO_UINT16) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__ainv_int8_uint16 ( int8_t *restrict Cx, const uint16_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__ainv_int8_uint16 ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
esac_derivative.h
/* Based on the DSAC++ code. Copyright (c) 2016, TU Dresden Copyright (c) 2019, Heidelberg University All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of the TU Dresden, Heidelberg University nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL TU DRESDEN OR HEIDELBERG UNIVERSITY BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #pragma once #define PROB_THRESH 0.001 // ignore hypotheses with low probability for expectations namespace esac { /** * @brief Calculates the Jacobean of the projection function w.r.t the given 3D point, ie. the function has the form 3 -> 1 * @param pt Ground truth 2D location. * @param obj 3D point. * @param rot Rotation in axis-angle format (OpenCV convention) * @param trans Translation vector (OpenCV convention). * @param camMat Calibration matrix of the camera. * @param maxReproj Reprojection errors are clamped to this maximum value. * @return 1x3 Jacobean matrix of partial derivatives. */ cv::Mat_<double> dProjectdObj( const cv::Point2f& pt, const cv::Point3f& obj, const cv::Mat& rot, const cv::Mat& trans, const cv::Mat& camMat, float maxReproErr) { double f = camMat.at<float>(0, 0); double ppx = camMat.at<float>(0, 2); double ppy = camMat.at<float>(1, 2); //transform point cv::Mat objMat = cv::Mat(obj); objMat.convertTo(objMat, CV_64F); objMat = rot * objMat + trans; if(std::abs(objMat.at<double>(2, 0)) < EPS) // prevent division by zero return cv::Mat_<double>::zeros(1, 3); // project double px = f * objMat.at<double>(0, 0) / objMat.at<double>(2, 0) + ppx; double py = f * objMat.at<double>(1, 0) / objMat.at<double>(2, 0) + ppy; // calculate error double err = std::sqrt((pt.x - px) * (pt.x - px) + (pt.y - py) * (pt.y - py)); // early out if projection error is above threshold if(err > maxReproErr) return cv::Mat_<double>::zeros(1, 3); err += EPS; // avoid dividing by zero // derivative in x direction of obj coordinate double pxdx = f * rot.at<double>(0, 0) / objMat.at<double>(2, 0) - f * objMat.at<double>(0, 0) / objMat.at<double>(2, 0) / objMat.at<double>(2, 0) * rot.at<double>(2, 0); double pydx = f * rot.at<double>(1, 0) / objMat.at<double>(2, 0) - f * objMat.at<double>(1, 0) / objMat.at<double>(2, 0) / objMat.at<double>(2, 0) * rot.at<double>(2, 0); double dx = 0.5 / err * (2 * (pt.x - px) * -pxdx + 2 * (pt.y - py) * -pydx); // derivative in y direction of obj coordinate double pxdy = f * rot.at<double>(0, 1) / objMat.at<double>(2, 0) - f * objMat.at<double>(0, 0) / objMat.at<double>(2, 0) / objMat.at<double>(2, 0) * rot.at<double>(2, 1); double pydy = f * rot.at<double>(1, 1) / objMat.at<double>(2, 0) - f * objMat.at<double>(1, 0) / objMat.at<double>(2, 0) / objMat.at<double>(2, 0) * rot.at<double>(2, 1); double dy = 0.5 / err * (2 * (pt.x - px) * -pxdy + 2 * (pt.y - py) * -pydy); // derivative in z direction of obj coordinate double pxdz = f * rot.at<double>(0, 2) / objMat.at<double>(2, 0) - f * objMat.at<double>(0, 0) / objMat.at<double>(2, 0) / objMat.at<double>(2, 0) * rot.at<double>(2, 2); double pydz = f * rot.at<double>(1, 2) / objMat.at<double>(2, 0) - f * objMat.at<double>(1, 0) / objMat.at<double>(2, 0) / objMat.at<double>(2, 0) * rot.at<double>(2, 2); double dz = 0.5 / err * (2 * (pt.x - px) * -pxdz + 2 * (pt.y - py) * -pydz); cv::Mat_<double> jacobean(1, 3); jacobean(0, 0) = dx; jacobean(0, 1) = dy; jacobean(0, 2) = dz; return jacobean; } /** * @brief Checks whether the given matrix contains NaN entries. * @param m Input matrix. * @return True if m contrains NaN entries. */ inline bool containsNaNs(const cv::Mat& m) { return cv::sum(cv::Mat(m != m))[0] > 0; } /** * @brief Calculates the Jacobean of the PNP function w.r.t. the object coordinate inputs. * * PNP is treated as a n x 3 -> 6 fnuction, i.e. it takes n 3D coordinates and maps them to a 6D pose. * The Jacobean is therefore 6x3n. * The Jacobean is calculated using central differences, and hence only suitable for small point sets. * For gradients of large points sets, we use an analytical approximaten, see the backard function in esac.cpp. * * @param imgPts List of 2D points. * @param objPts List of corresponding 3D points. * @param camMat Camera calibration matrix. * @param eps Step size for central differences. * @return 6x3n Jacobean matrix of partial derivatives. */ cv::Mat_<double> dPNP( const std::vector<cv::Point2f>& imgPts, std::vector<cv::Point3f> objPts, const cv::Mat& camMat, float eps = 0.001f) { int pnpMethod = (imgPts.size() == 4) ? cv::SOLVEPNP_P3P : cv::SOLVEPNP_ITERATIVE; //in case of P3P the 4th point is needed to resolve ambiguities, its derivative is zero int effectiveObjPoints = (pnpMethod == cv::SOLVEPNP_P3P) ? 3 : objPts.size(); cv::Mat_<double> jacobean = cv::Mat_<double>::zeros(6, objPts.size() * 3); bool success; // central differences for(int i = 0; i < effectiveObjPoints; i++) for(unsigned j = 0; j < 3; j++) { if(j == 0) objPts[i].x += eps; else if(j == 1) objPts[i].y += eps; else if(j == 2) objPts[i].z += eps; // forward step esac::pose_t fStep; success = safeSolvePnP(objPts, imgPts, camMat, cv::Mat(), fStep.first, fStep.second, false, pnpMethod); if(!success) return cv::Mat_<double>::zeros(6, objPts.size() * 3); if(j == 0) objPts[i].x -= 2 * eps; else if(j == 1) objPts[i].y -= 2 * eps; else if(j == 2) objPts[i].z -= 2 * eps; // backward step esac::pose_t bStep; success = safeSolvePnP(objPts, imgPts, camMat, cv::Mat(), bStep.first, bStep.second, false, pnpMethod); if(!success) return cv::Mat_<double>::zeros(6, objPts.size() * 3); if(j == 0) objPts[i].x += eps; else if(j == 1) objPts[i].y += eps; else if(j == 2) objPts[i].z += eps; // gradient calculation fStep.first = (fStep.first - bStep.first) / (2 * eps); fStep.second = (fStep.second - bStep.second) / (2 * eps); fStep.first.copyTo(jacobean.col(i * 3 + j).rowRange(0, 3)); fStep.second.copyTo(jacobean.col(i * 3 + j).rowRange(3, 6)); if(containsNaNs(jacobean.col(i * 3 + j))) return cv::Mat_<double>::zeros(6, objPts.size() * 3); } return jacobean; } /** * @brief Calculates the Jacobean matrix of the function that maps n estimated object coordinates to a score, ie. the function has the form n x 3 -> 1. Returns one Jacobean matrix per hypothesis. * @param sceneCoordinates Scene coordinate prediction of each expert (Ex3xHxW). * @param hypAssignment 1D trensor specifying the responsible expert for each hypothesis. * @param sampling Contains original image coordinate for each scene coordinate predicted. * @param sampledPoints Corresponding minimal set for each hypotheses as scene coordinate indices. * @param jacobeansScore (output paramter) List of Jacobean matrices. One 1 x 3n matrix per pose hypothesis. * @param scoreOutputGradients Gradients w.r.t the score i.e. the gradients of the loss up to the soft inlier count. * @param hyps List of RANSAC hypotheses. * @param reproErrs Image of reprojection error for each pose hypothesis. * @param jacobeanHyps List of jacobean matrices with derivatives of the 6D pose wrt. the reprojection errors. * @param hypProbs Selection probabilities over all hypotheses. * @param camMat Camera calibration matrix. * @param inlierAlpha Alpha parameter for soft inlier counting. * @param inlierBeta Beta parameter for soft inlier counting. * @param inlierThreshold RANSAC inlier threshold. * @param maxReproj Reprojection errors are clamped to this maximum value. */ void dScore( esac::coord_t& sceneCoordinates, esac::hyp_assign_t& hypAssignment, const cv::Mat_<cv::Point2i>& sampling, const std::vector<std::vector<cv::Point2i>>& sampledPoints, std::vector<cv::Mat_<double>>& jacobeansScore, const std::vector<double>& scoreOutputGradients, const std::vector<esac::pose_t>& hyps, const std::vector<cv::Mat_<float>>& reproErrs, const std::vector<cv::Mat_<double>>& jacobeansHyps, const std::vector<double>& hypProbs, const cv::Mat& camMat, float inlierAlpha, float inlierBeta, float inlierThreshold, float maxReproErr) { int hypCount = sampledPoints.size(); // collect 2d-3D correspondences std::vector<std::vector<cv::Point2f>> imgPts(hypCount); std::vector<std::vector<cv::Point3f>> objPts(hypCount); #pragma omp parallel for for(int h = 0; h < hypCount; h++) { if(hypProbs[h] < PROB_THRESH) continue; int expert = hypAssignment[h]; for(unsigned i = 0; i < sampledPoints[h].size(); i++) { int x = sampledPoints[h][i].x; int y = sampledPoints[h][i].y; imgPts[h].push_back(sampling(y, x)); objPts[h].push_back(cv::Point3f( sceneCoordinates[expert][0][y][x], sceneCoordinates[expert][1][y][x], sceneCoordinates[expert][2][y][x])); } } // derivatives of the soft inlier scores std::vector<cv::Mat_<double>> dReproErrs(reproErrs.size()); #pragma omp parallel for for(int h = 0; h < hypCount; h++) { if(hypProbs[h] < PROB_THRESH) continue; dReproErrs[h] = cv::Mat_<double>::zeros(reproErrs[h].size()); for(int x = 0; x < sampling.cols; x++) for(int y = 0; y < sampling.rows; y++) { double softThreshold = inlierBeta * (reproErrs[h](y, x) - inlierThreshold); softThreshold = 1 / (1+std::exp(-softThreshold)); dReproErrs[h](y, x) = -softThreshold * (1 - softThreshold) * inlierBeta * scoreOutputGradients[h]; } dReproErrs[h] *= inlierAlpha / dReproErrs[h].cols / dReproErrs[h].rows; } jacobeansScore.resize(hypCount); // derivative of the loss wrt the score #pragma omp parallel for for(int h = 0; h < hypCount; h++) { cv::Mat_<double> jacobean = cv::Mat_<double>::zeros(1, sampling.cols * sampling.rows * 3); jacobeansScore[h] = jacobean; if(hypProbs[h] < PROB_THRESH) continue; int expert = hypAssignment[h]; // accumulate derivate of score wrt the object coordinates that are used to calculate the pose cv::Mat_<double> supportPointGradients = cv::Mat_<double>::zeros(1, 12); cv::Mat_<double> dHdO = dPNP(imgPts[h], objPts[h], camMat); // 6x12 if(esac::getMax(dHdO) > 10) dHdO = 0; // clamping for stability cv::Mat rot; cv::Rodrigues(hyps[h].first, rot); for(int x = 0; x < sampling.cols; x++) for(int y = 0; y < sampling.rows; y++) { int ptIdx = x * dReproErrs[h].rows + y; cv::Point2f pt(sampling(y, x).x, sampling(y, x).y); cv::Point3f obj = cv::Point3f( sceneCoordinates[expert][0][y][x], sceneCoordinates[expert][1][y][x], sceneCoordinates[expert][2][y][x]); // account for the direct influence of all object coordinates in the score cv::Mat_<double> dPdO = dProjectdObj(pt, obj, rot, hyps[h].second, camMat, maxReproErr); dPdO *= dReproErrs[h](y, x); dPdO.copyTo(jacobean.colRange(x * dReproErrs[h].rows * 3 + y * 3, x * dReproErrs[h].rows * 3 + y * 3 + 3)); // account for the indirect influence of the object coorindates that are used to calculate the pose cv::Mat_<double> dPdH = jacobeansHyps[h].row(ptIdx); supportPointGradients += dReproErrs[h](y, x) * dPdH * dHdO; } // add the accumulated derivatives for the object coordinates that are used to calculate the pose for(unsigned i = 0; i < sampledPoints[h].size(); i++) { unsigned x = sampledPoints[h][i].x; unsigned y = sampledPoints[h][i].y; jacobean.colRange(x * dReproErrs[h].rows * 3 + y * 3, x * dReproErrs[h].rows * 3 + y * 3 + 3) += supportPointGradients.colRange(i * 3, i * 3 + 3); } } } /** * @brief Calculates the Jacobean matrix of the function that maps n estimated object coordinates to a soft max score, ie. the function has the form n x 3 -> 1. Returns one Jacobean matrix per hypothesis. * * This is the Soft maxed version of dScore (see above). * * @param sceneCoordinates Scene coordinate prediction of each expert (Ex3xHxW). * @param hypAssignment 1D trensor specifying the responsible expert for each hypothesis. * @param sampling Contains original image coordinate for each scene coordinate predicted. * @param sampledPoints Corresponding minimal set for each hypotheses as scene coordinate indices. * @param losses Loss value for each hypothesis. * @param hypProbs Selection probabilities over all hypotheses. * @paran initHyps List of unrefined hypotheses. * @paran initReproErrs List of reprojection error images of unrefined hypotheses. * @param jacobeanHyps List of jacobean matrices with derivatives of the 6D pose wrt. the reprojection errors. * @param camMat Camera calibration matrix. * @param inlierAlpha Alpha parameter for soft inlier counting. * @param inlierBeta Beta parameter for soft inlier counting. * @param inlierThreshold RANSAC inlier threshold. * @param maxReproj Reprojection errors are clamped to this maximum value. * @return List of Jacobean matrices. One 1 x 3n matrix per pose hypothesis. */ std::vector<cv::Mat_<double>> dSMScore( esac::coord_t& sceneCoordinates, esac::hyp_assign_t& hypAssignment, const cv::Mat_<cv::Point2i>& sampling, const std::vector<std::vector<cv::Point2i>>& sampledPoints, const std::vector<double>& losses, const std::vector<double>& hypProbs, const std::vector<esac::pose_t>& initHyps, const std::vector<cv::Mat_<float>>& initReproErrs, const std::vector<cv::Mat_<double>>& jacobeansHyps, const cv::Mat& camMat, float inlierAlpha, float inlierBeta, float inlierThreshold, float maxReproErr) { // assemble the gradients wrt the scores, ie the gradients of soft max function std::vector<double> scoreOutputGradients(sampledPoints.size()); #pragma omp parallel for for(unsigned i = 0; i < sampledPoints.size(); i++) { if(hypProbs[i] < PROB_THRESH) continue; scoreOutputGradients[i] = hypProbs[i] * losses[i]; for(unsigned j = 0; j < sampledPoints.size(); j++) scoreOutputGradients[i] -= hypProbs[i] * hypProbs[j] * losses[j]; } // calculate gradients of the score function std::vector<cv::Mat_<double>> jacobeansScore; dScore( sceneCoordinates, hypAssignment, sampling, sampledPoints, jacobeansScore, scoreOutputGradients, initHyps, initReproErrs, jacobeansHyps, hypProbs, camMat, inlierAlpha, inlierBeta, inlierThreshold, maxReproErr); // data conversion #pragma omp parallel for for(unsigned i = 0; i < jacobeansScore.size(); i++) { // reorder to points row first into rows cv::Mat_<double> reformat = cv::Mat_<double>::zeros(sampling.cols * sampling.rows, 3); if(hypProbs[i] >= PROB_THRESH) { for(int x = 0; x < sampling.cols; x++) for(int y = 0; y < sampling.rows; y++) { cv::Mat_<double> patchGrad = jacobeansScore[i].colRange( x * sampling.rows * 3 + y * 3, x * sampling.rows * 3 + y * 3 + 3); patchGrad.copyTo(reformat.row(y * sampling.cols + x)); } } jacobeansScore[i] = reformat; } return jacobeansScore; } }
dgetrs.c
/** * * @file * * PLASMA is a software package provided by: * University of Tennessee, US, * University of Manchester, UK. * * @generated from /home/luszczek/workspace/plasma/bitbucket/plasma/compute/zgetrs.c, normal z -> d, Fri Sep 28 17:38:06 2018 * **/ #include "plasma.h" #include "plasma_async.h" #include "plasma_context.h" #include "plasma_descriptor.h" #include "plasma_internal.h" #include "plasma_tuning.h" #include "plasma_types.h" #include "plasma_workspace.h" /***************************************************************************//** * ******************************************************************************/ int plasma_dgetrs(int n, int nrhs, double *pA, int lda, int *ipiv, double *pB, int ldb) { // Get PLASMA context. plasma_context_t *plasma = plasma_context_self(); if (plasma == NULL) { plasma_fatal_error("PLASMA not initialized"); return PlasmaErrorNotInitialized; } if (n < 0) { plasma_error("illegal value of n"); return -1; } if (nrhs < 0) { plasma_error("illegal value of nrhs"); return -2; } if (lda < imax(1, n)) { plasma_error("illegal value of lda"); return -4; } if (ldb < imax(1, n)) { plasma_error("illegal value of ldb"); return -7; } // quick return if (imin(n, nrhs) == 0) return PlasmaSuccess; // Tune parameters. if (plasma->tuning) plasma_tune_trsm(plasma, PlasmaRealDouble, n, n); // Set tiling parameters. int nb = plasma->nb; // Initialize barrier. plasma_barrier_init(&plasma->barrier); // Create tile matrix. plasma_desc_t A; plasma_desc_t B; int retval; retval = plasma_desc_general_create(PlasmaRealDouble, nb, nb, n, n, 0, 0, n, n, &A); if (retval != PlasmaSuccess) { plasma_error("plasma_desc_general_create() failed"); return retval; } retval = plasma_desc_general_create(PlasmaRealDouble, nb, nb, n, nrhs, 0, 0, n, nrhs, &B); if (retval != PlasmaSuccess) { plasma_error("plasma_desc_general_create() failed"); return retval; } // Initialize sequence. plasma_sequence_t sequence; retval = plasma_sequence_init(&sequence); // Initialize request. plasma_request_t request; retval = plasma_request_init(&request); #pragma omp parallel #pragma omp master { // Translate to tile layout. plasma_omp_dge2desc(pA, lda, A, &sequence, &request); plasma_omp_dge2desc(pB, ldb, B, &sequence, &request); // Call the tile async function. plasma_omp_dgetrs(A, ipiv, B, &sequence, &request); // Translate back to LAPACK layout. plasma_omp_ddesc2ge(B, pB, ldb, &sequence, &request); } // Free matrix A in tile layout. plasma_desc_destroy(&A); plasma_desc_destroy(&B); // Return status. int status = sequence.status; return status; } /***************************************************************************//** * ******************************************************************************/ void plasma_omp_dgetrs(plasma_desc_t A, int *ipiv, plasma_desc_t B, plasma_sequence_t *sequence, plasma_request_t *request) { // Get PLASMA context. plasma_context_t *plasma = plasma_context_self(); if (plasma == NULL) { plasma_fatal_error("PLASMA not initialized"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } // Check input arguments. if (plasma_desc_check(A) != PlasmaSuccess) { plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); plasma_error("invalid A"); return; } if (plasma_desc_check(B) != PlasmaSuccess) { plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); plasma_error("invalid B"); return; } if (sequence == NULL) { plasma_fatal_error("NULL sequence"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (request == NULL) { plasma_fatal_error("NULL request"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } // quick return if (A.n == 0 || B.n == 0) return; // Call the parallel functions. plasma_pdgeswp(PlasmaRowwise, B, ipiv, 1, sequence, request); plasma_pdtrsm(PlasmaLeft, PlasmaLower, PlasmaNoTrans, PlasmaUnit, 1.0, A, B, sequence, request); plasma_pdtrsm(PlasmaLeft, PlasmaUpper, PlasmaNoTrans, PlasmaNonUnit, 1.0, A, B, sequence, request); }
ten_tusscher_2004_epi_S3_17.c
//Original Ten Tusscher #include <assert.h> #include <stdlib.h> #include "ten_tusscher_2004_epi_S3_17.h" GET_CELL_MODEL_DATA(init_cell_model_data) { assert(cell_model); if(get_initial_v) cell_model->initial_v = INITIAL_V; if(get_neq) cell_model->number_of_ode_equations = NEQ; } //TODO: this should be called only once for the whole mesh, like in the GPU code SET_ODE_INITIAL_CONDITIONS_CPU(set_model_initial_conditions_cpu) { // Default initial conditions /* sv[0] = INITIAL_V; // V; millivolt sv[1] = 0.f; //M sv[2] = 0.75; //H sv[3] = 0.75f; //J sv[4] = 0.f; //Xr1 sv[5] = 1.f; //Xr2 sv[6] = 0.f; //Xs sv[7] = 1.f; //S sv[8] = 0.f; //R sv[9] = 0.f; //D sv[10] = 1.f; //F sv[11] = 1.f; //FCa sv[12] = 1.f; //G sv[13] = 0.0002; //Cai sv[14] = 0.2f; //CaSR sv[15] = 11.6f; //Nai sv[16] = 138.3f; //Ki */ // Elnaz's steady-state initial conditions real sv_sst[]={-86.2817677225133,0.00137140396437284,0.772587672944659,0.772430179046282,0.000182109741885854,0.482109136522644,0.00300090517076632,0.999998250712446,2.02804859373247e-08,1.96469800392109e-05,0.999772201420590,1.00677807083400,0.999988516545875,5.25655559527482e-05,0.711143243815226,10.8158384856210,138.647095599922}; for (uint32_t i = 0; i < NEQ; i++) sv[i] = sv_sst[i]; } SOLVE_MODEL_ODES_CPU(solve_model_odes_cpu) { uint32_t sv_id; int i; #pragma omp parallel for private(sv_id) for (i = 0; i < num_cells_to_solve; i++) { if(cells_to_solve) sv_id = cells_to_solve[i]; else sv_id = i; for (int j = 0; j < num_steps; ++j) { solve_model_ode_cpu(dt, sv + (sv_id * NEQ), stim_currents[i]); } } } void solve_model_ode_cpu(real dt, real *sv, real stim_current) { assert(sv); real rY[NEQ], rDY[NEQ]; for(int i = 0; i < NEQ; i++) rY[i] = sv[i]; RHS_cpu(rY, rDY, stim_current, dt); for(int i = 0; i < NEQ; i++) sv[i] = rDY[i]; } void RHS_cpu(const real *sv, real *rDY_, real stim_current, real dt) { // State variables real svolt = sv[0]; real sm = sv[1]; real sh = sv[2]; real sj = sv[3]; real sxr1 = sv[4]; real sxr2 = sv[5]; real sxs = sv[6]; real ss = sv[7]; real sr = sv[8]; real sd = sv[9]; real sf = sv[10]; real sfca = sv[11]; real sg = sv[12]; real Cai = sv[13]; real CaSR = sv[14]; real Nai = sv[15]; real Ki = sv[16]; //External concentrations real Ko=5.4; real Cao=2.0; real Nao=140.0; //Intracellular volumes real Vc=0.016404; real Vsr=0.001094; //Calcium dynamics real Bufc=0.15f; real Kbufc=0.001f; real Bufsr=10.f; real Kbufsr=0.3f; real taufca=2.f; real taug=2.f; real Vmaxup=0.000425f; real Kup=0.00025f; //Constants const real R = 8314.472f; const real F = 96485.3415f; const real T =310.0f; real RTONF =(R*T)/F; //Cellular capacitance real CAPACITANCE=0.185; //Parameters for currents //Parameters for IKr real Gkr=0.096; //Parameters for Iks real pKNa=0.03; ///#ifdef EPI real Gks=0.245; ///#endif ///#ifdef ENDO /// real Gks=0.245; ///#endif ///#ifdef MCELL /// real Gks=0.062; ///#endif //Parameters for Ik1 real GK1=5.405; //Parameters for Ito //#ifdef EPI real Gto=0.294; //#endif // #ifdef ENDO // real Gto=0.073; //#endif //#ifdef MCELL // real Gto=0.294; ///#endif //Parameters for INa real GNa=14.838; //Parameters for IbNa real GbNa=0.00029; //Parameters for INaK real KmK=1.0; real KmNa=40.0; real knak=1.362; //Parameters for ICaL real GCaL=0.000175; //Parameters for IbCa real GbCa=0.000592; //Parameters for INaCa real knaca=1000; real KmNai=87.5; real KmCa=1.38; real ksat=0.1; real n=0.35; //Parameters for IpCa real GpCa=0.825; real KpCa=0.0005; //Parameters for IpK; real GpK=0.0146; real parameters []={14.8787679496037,0.000184974932693465,0.000147863814822398,0.000360245525368188,0.258799403388170,0.146960949455741,0.224671224629348,4.89938753922066,0.0140136722925207,1.16741637564006,1090.76619018721,0.000491609263870709,0.206306719137950,0.0184484367075688,0.00104985172607928,4.11251651262994e-05}; GNa=parameters[0]; GbNa=parameters[1]; GCaL=parameters[2]; GbCa=parameters[3]; Gto=parameters[4]; Gkr=parameters[5]; Gks=parameters[6]; GK1=parameters[7]; GpK=parameters[8]; knak=parameters[9]; knaca=parameters[10]; Vmaxup=parameters[11]; GpCa=parameters[12]; real arel=parameters[13]; real crel=parameters[14]; real Vleak=parameters[15]; real IKr; real IKs; real IK1; real Ito; real INa; real IbNa; real ICaL; real IbCa; real INaCa; real IpCa; real IpK; real INaK; real Irel; real Ileak; real dNai; real dKi; real dCai; real dCaSR; real A; // real BufferFactorc; // real BufferFactorsr; real SERCA; real Caisquare; real CaSRsquare; real CaCurrent; real CaSRCurrent; real fcaold; real gold; real Ek; real Ena; real Eks; real Eca; real CaCSQN; real bjsr; real cjsr; real CaBuf; real bc; real cc; real Ak1; real Bk1; real rec_iK1; real rec_ipK; real rec_iNaK; real AM; real BM; real AH_1; real BH_1; real AH_2; real BH_2; real AJ_1; real BJ_1; real AJ_2; real BJ_2; real M_INF; real H_INF; real J_INF; real TAU_M; real TAU_H; real TAU_J; real axr1; real bxr1; real axr2; real bxr2; real Xr1_INF; real Xr2_INF; real TAU_Xr1; real TAU_Xr2; real Axs; real Bxs; real Xs_INF; real TAU_Xs; real R_INF; real TAU_R; real S_INF; real TAU_S; real Ad; real Bd; real Cd; real TAU_D; real D_INF; real TAU_F; real F_INF; real FCa_INF; real G_INF; real inverseVcF2=1/(2*Vc*F); real inverseVcF=1./(Vc*F); real Kupsquare=Kup*Kup; // real BufcKbufc=Bufc*Kbufc; // real Kbufcsquare=Kbufc*Kbufc; // real Kbufc2=2*Kbufc; // real BufsrKbufsr=Bufsr*Kbufsr; // const real Kbufsrsquare=Kbufsr*Kbufsr; // const real Kbufsr2=2*Kbufsr; const real exptaufca=exp(-dt/taufca); const real exptaug=exp(-dt/taug); real sItot; //Needed to compute currents Ek=RTONF*(log((Ko/Ki))); Ena=RTONF*(log((Nao/Nai))); Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai))); Eca=0.5*RTONF*(log((Cao/Cai))); Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200))); Bk1=(3.*exp(0.0002*(svolt-Ek+100))+ exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek))); rec_iK1=Ak1/(Ak1+Bk1); rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T)))); rec_ipK=1./(1.+exp((25-svolt)/5.98)); //Compute currents INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena); ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))* (exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.); Ito=Gto*sr*ss*(svolt-Ek); IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek); IKs=Gks*sxs*sxs*(svolt-Eks); IK1=GK1*rec_iK1*(svolt-Ek); INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))* (1./(1+ksat*exp((n-1)*svolt*F/(R*T))))* (exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao- exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5); INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK; IpCa=GpCa*Cai/(KpCa+Cai); IpK=GpK*rec_ipK*(svolt-Ek); IbNa=GbNa*(svolt-Ena); IbCa=GbCa*(svolt-Eca); //Determine total current (sItot) = IKr + IKs + IK1 + Ito + INa + IbNa + ICaL + IbCa + INaK + INaCa + IpCa + IpK + stim_current; //update concentrations Caisquare=Cai*Cai; CaSRsquare=CaSR*CaSR; CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE; ///A=0.016464f*CaSRsquare/(0.0625f+CaSRsquare)+0.008232f; A=arel*CaSRsquare/(0.0625f+CaSRsquare)+crel; Irel=A*sd*sg; ///Ileak=0.00008f*(CaSR-Cai); Ileak=Vleak*(CaSR-Cai); SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare)); CaSRCurrent=SERCA-Irel-Ileak; CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr); dCaSR=dt*(Vc/Vsr)*CaSRCurrent; bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr; cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR); CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.; CaBuf=Bufc*Cai/(Cai+Kbufc); dCai=dt*(CaCurrent-CaSRCurrent); bc=Bufc-CaBuf-dCai-Cai+Kbufc; cc=Kbufc*(CaBuf+dCai+Cai); Cai=(sqrt(bc*bc+4*cc)-bc)/2; dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE; Nai+=dt*dNai; dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE; Ki+=dt*dKi; //compute steady state values and time constants AM=1./(1.+exp((-60.-svolt)/5.)); BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.)); TAU_M=AM*BM; M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03))); if (svolt>=-40.) { AH_1=0.; BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1)))); TAU_H= 1.0/(AH_1+BH_1); } else { AH_2=(0.057*exp(-(svolt+80.)/6.8)); BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt)); TAU_H=1.0/(AH_2+BH_2); } H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43))); if(svolt>=-40.) { AJ_1=0.; BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.)))); TAU_J= 1.0/(AJ_1+BJ_1); } else { AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)* exp(-0.04391*svolt))*(svolt+37.78)/ (1.+exp(0.311*(svolt+79.23)))); BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14)))); TAU_J= 1.0/(AJ_2+BJ_2); } J_INF=H_INF; Xr1_INF=1./(1.+exp((-26.-svolt)/7.)); axr1=450./(1.+exp((-45.-svolt)/10.)); bxr1=6./(1.+exp((svolt-(-30.))/11.5)); TAU_Xr1=axr1*bxr1; Xr2_INF=1./(1.+exp((svolt-(-88.))/24.)); axr2=3./(1.+exp((-60.-svolt)/20.)); bxr2=1.12/(1.+exp((svolt-60.)/20.)); TAU_Xr2=axr2*bxr2; Xs_INF=1./(1.+exp((-5.-svolt)/14.)); Axs=1100./(sqrt(1.+exp((-10.-svolt)/6))); Bxs=1./(1.+exp((svolt-60.)/20.)); TAU_Xs=Axs*Bxs; #ifdef EPI R_INF=1./(1.+exp((20-svolt)/6.)); S_INF=1./(1.+exp((svolt+20)/5.)); TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8; TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.; #endif #ifdef ENDO R_INF=1./(1.+exp((20-svolt)/6.)); S_INF=1./(1.+exp((svolt+28)/5.)); TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8; TAU_S=1000.*exp(-(svolt+67)*(svolt+67)/1000.)+8.; #endif #ifdef MCELL R_INF=1./(1.+exp((20-svolt)/6.)); S_INF=1./(1.+exp((svolt+20)/5.)); TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8; TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.; #endif D_INF=1./(1.+exp((-5-svolt)/7.5)); Ad=1.4/(1.+exp((-35-svolt)/13))+0.25; Bd=1.4/(1.+exp((svolt+5)/5)); Cd=1./(1.+exp((50-svolt)/20)); TAU_D=Ad*Bd+Cd; F_INF=1./(1.+exp((svolt+20)/7)); TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10)); FCa_INF=(1./(1.+pow((Cai/0.000325),8))+ 0.1/(1.+exp((Cai-0.0005)/0.0001))+ 0.20/(1.+exp((Cai-0.00075)/0.0008))+ 0.23 )/1.46; if(Cai<0.00035) G_INF=1./(1.+pow((Cai/0.00035),6)); else G_INF=1./(1.+pow((Cai/0.00035),16)); //Update gates rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M); rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H); rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J); rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1); rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2); rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs); rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S); rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R); rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D); rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F); fcaold= sfca; sfca = FCa_INF-(FCa_INF-sfca)*exptaufca; if(sfca>fcaold && (svolt)>-37.0) sfca = fcaold; gold = sg; sg = G_INF-(G_INF-sg)*exptaug; if(sg>gold && (svolt)>-37.0) sg=gold; //update voltage rDY_[0] = svolt + dt*(-sItot); rDY_[11] = sfca; rDY_[12] = sg; rDY_[13] = Cai; rDY_[14] = CaSR; rDY_[15] = Nai; rDY_[16] = Ki; }
mat.h
#pragma once #include <iostream> #include <iomanip> #include <string> #include <algorithm> #include <vector> #include <fstream> #include <sstream> #include <map> #include <set> #include <cstdio> #include <cstdlib> #include <cstring> #include <cmath> #include <cfloat> #include <cassert> #include "config.h" #include "utils.h" #include "timer.h" using namespace std; /* ------------------- Sparse and dense matrix and vector resources ---------------------- */ typedef vector<int> VecI; typedef vector<float> VecF; typedef vector<_double> VecD; typedef vector<pairII> VecII; typedef vector<pairIF> VecIF; typedef vector<bool> VecB; typedef vector<string> VecS; /* ------------------- Helper functions Begin -----------------------*/ template <typename T> T* getDeepCopy(T* arr, int size) { T* new_arr = new T[size]; for(int i = 0; i < size; ++i) new_arr[i] = arr[i]; return new_arr; } template <typename T> T* getDeepCopy( vector<T>& arr ) { int size = arr.size(); T* new_arr = new T[size]; for(int i = 0; i < size; ++i) new_arr[i] = arr[i]; return new_arr; } /* ------------------- Helper functions End -----------------------*/ template <typename T> class SMat // a column-major sparse matrix of type T { public: bool contiguous = false; int nc = 0; int nr = 0; vector<int> size; vector<pair<int,T>*> data; pair<int,T>* cdata = NULL; vector<int> col_indices; bool owns_data = true; vector<unordered_map<int, int>> index_hash; vector<pairII> index; bool indexed = false; SMat( bool contiguous = false ) : contiguous(contiguous) { } SMat( int nr, int nc, bool contiguous = false ) : contiguous(contiguous), nr(nr), nc(nc) { size.resize(nc, 0); data.resize(nc, NULL); } SMat( int nr, int nc, _ullint nnz, bool contiguous = false ) : contiguous(contiguous), nr(nr), nc(nc) { size.resize(nc, 0); data.resize(nc, NULL); if( contiguous ) cdata = new pair<int,T>[ nnz ]; else cdata = NULL; } SMat(SMat<T>* mat, bool deep_copy = true, bool mask = false, VecI active_cols = VecI(), bool shrink = true) { // assumption : active_cols is sorted // NOTE : if mask is true then only columns present in active_cols will be set in new matrix if(not mask) { active_cols.resize(mat->nc); iota(begin(active_cols), end(active_cols), 0); } nc = (shrink ? active_cols.size() : mat->nc); nr = mat->nr; size.resize(nc, 0); data.resize(nc, NULL); owns_data = deep_copy; for(int i = 0; i < active_cols.size(); ++i) { int col = active_cols[i]; int new_col = (shrink ? i : col); size[new_col] = mat->size[col]; if(deep_copy) data[new_col] = getDeepCopy(mat->data[col], mat->size[col]); else data[new_col] = mat->data[col]; } } friend istream& operator>>( istream& fin, SMat<T>& mat ) { typedef pair<int,T> pairIT; string line; getline(fin, line); std::istringstream iss(line); iss >> mat.nc >> mat.nr; mat.size.resize(mat.nc, 0); mat.data.resize(mat.nc, NULL); int col_no = 0; char colon; for( int lin=0; lin<mat.nc; lin++ ) { getline(fin, line); std::istringstream iss(line); int label_id; T label_score; vector<pairIT> scores; while(iss >> label_id >> colon >> label_score) scores.push_back(pairIF(label_id, label_score)); // sort to allow, mats with unsorted columns sort(scores.begin(), scores.end()); mat.size[col_no] = scores.size(); mat.data[col_no] = getDeepCopy(scores.data(), scores.size()); col_no++; if(col_no > mat.nc) break; } return fin; } SMat(string fname) { contiguous = false; this->load(fname); } // For reading in Scope/Aether SMat( string fname, int num_row ) { contiguous = false; check_valid_filename(fname, true); ifstream fin; fin.open(fname); int col_index; vector<int> inds; vector<T> vals; int max_row_index = -1; int capacity = 1; string line; int i = 0; size.resize(capacity); data.resize(capacity); col_indices.resize(capacity); while( getline( fin, line ) ) { line += "\n"; inds.clear(); vals.clear(); int pos = 0; int next_pos; next_pos = line.find_first_of( "\t", pos ); string indstr = line.substr( pos, next_pos-pos ); col_index = stoi( indstr ); pos = next_pos+1; while(next_pos=line.find_first_of(": \n",pos)) { if((size_t)next_pos==string::npos) break; string indstr = line.substr(pos,next_pos-pos); if( indstr=="" ) break; int ind = stoi(indstr); pos = next_pos+1; next_pos = line.find_first_of(": \n",pos); if((size_t)next_pos==string::npos) break; string valstr = line.substr(pos,next_pos-pos); float val = stof( valstr ); pos = next_pos+1; if( num_row != -1 ) { if( ind >= num_row ) continue; } else { max_row_index = ind>max_row_index ? ind : max_row_index; } inds.push_back( ind ); vals.push_back( val ); } assert(inds.size()==vals.size()); //assert(inds.size()==0 || inds[inds.size()-1]<nr); if( i == capacity-1 ) { int new_capacity = 2*capacity; size.resize(new_capacity, 0); data.resize(new_capacity, NULL); col_indices.resize(new_capacity, 0); capacity = new_capacity; } col_indices[i] = col_index; size[i] = inds.size(); data[i] = new pair<int,T>[inds.size()]; for(int j=0; j<size[i]; j++) { data[i][j].first = inds[j]; data[i][j].second = (T)vals[j]; } i++; } if( num_row == -1 ) nr = max_row_index+1; else nr = num_row; nc = i; size.resize(nc, 0); data.resize(nc, NULL); col_indices.resize(nc, 0); fin.close(); } /*------------- Indexed implementation ------------*/ // NOTE : only use for frozen matrix, updating matrix after building index will corrupt index void build_index() { if(not indexed) { reset_index(); sort_indices(); int cnt = 0; for(int i = 0; i < nc; ++i) { for(int j = 0; j < size[i]; ++j) { index[cnt] = pairII(i, j); index_hash[i][data[i][j].first] = cnt++; } } indexed = true; } } void clear_index() { index.clear(); index_hash.clear(); indexed = false; } void reset_index() { clear_index(); index.resize(get_nnz(), pairII(0, 0)); index_hash.resize(nc, unordered_map<int, int>()); } bool exists(int col, int row) { assertmesg(indexed, "exists() : index not built" ); return (index_hash[col].count(row) > 0); } T get_val(int col, int row, T default_val = T()) { if(exists(col, row)) return data[col][index[index_hash[col][row]].second].second; return default_val; } T get_index_val(int ind, T default_val = T()) { assertmesg((ind <= index.size()), "get_coo() : index doesn't exist" ); int col = index[ind].first; int val = data[col][index[ind].second].second; return val; } int get_index(int col, int row, int default_val = -1) { if(exists(col, row)) return index_hash[col][row]; return default_val; } pairII get_coo(int ind) { assertmesg((ind <= index.size()), "get_coo() : index doesn't exist" ); int col = index[ind].first; int row = data[col][index[ind].second].first; return pairII(col, row); } SMat<T>* slice_mat_index(const VecI& active_inds) { SMat<T>* new_mat = new SMat<T>(nr, nc); vector<vector<pair<int, T>>> vec(nc); for(auto ind : active_inds) { pairII coo = get_coo(ind); vec[coo.first].push_back(pair<int, T>(coo.second, get_index_val(ind))); } for(int i = 0; i < nc; ++i) { new_mat->size[i] = vec[i].size(); if(vec[i].size()) new_mat->data[i] = getDeepCopy(vec[i]); } new_mat->sort_indices(); return new_mat; } /*---------- END Indexed implementation ----------*/ vector<T> get_val() { vector<T> vals; for(int i = 0; i < nc; ++i) for(int j = 0; j < size[i]; ++j) vals.push_back(data[i][j].second); return vals; } void fill_col(int col, VecIF vec) { if(size[col] > 0) delete[] data[col]; size[col] = vec.size(); data[col] = getDeepCopy(vec); } int retain_topk_helper(pair<int, T>*& vec, int siz, int k) { if(siz > k) { sort(vec, vec+siz, comp_pair_by_second_desc<int, T>); Realloc(siz, k, vec); return k; } return siz; } void retain_topk(int k) { for(int i = 0; i < nc; ++i) { int siz = retain_topk_helper(data[i], size[i], k); size[i] = siz; } } void retain_randomk(int k) { for(int i = 0; i < nc; ++i) { random_shuffle(data[i], data[i]+size[i]); int siz = min(size[i], k); Realloc(size[i], siz, data[i]); size[i] = siz; } } void filter(SMat<T>* filter_mat) { DenseSVec temp(nr, 0); for(int x = 0; x < nc; ++x) { temp.add(filter_mat->data[x], filter_mat->size[x]); VecIF scores; for(int j = 0; j < size[x]; ++j) { if(not temp.touched[data[x][j].first]) scores.push_back(data[x][j]); } sort(scores.begin(), scores.end()); delete[] data[x]; data[x] = getDeepCopy(scores); size[x] = scores.size(); temp.reset(); } } void addCol(pair<int, T>* new_col, int new_col_size, bool deep_copy = true) { // TODO : write assumption size.push_back(new_col_size); nc += 1; data.push_back(NULL); if(deep_copy) { data[nc - 1] = getDeepCopy(new_col, new_col_size); } else { data[nc - 1] = new_col; owns_data = false; } } void addCol( vector< pair<int, T> >& new_col ) { int new_col_size = new_col.size(); // TODO : write assumption size.push_back(new_col_size); nc += 1; data.push_back(NULL); // only deep_copy allowed data[nc - 1] = getDeepCopy( new_col ); } SMat<T>* hstack(SMat<T>* mat) { SMat<T>* new_mat = new SMat<T>(this); for(int i = 0; i < mat->nc; ++i) new_mat->addCol(mat->data[i], mat->size[i]); return new_mat; } void reindex_rows(int _nr, VecI& rows ) { nr = _nr; for( int i=0; i < nc; i++ ) { for( int j=0; j < size[i]; j++ ) data[i][j].first = rows[ data[i][j].first ]; } } void reindex_cols(int _nc, VecI& cols ) { VecI new_size(_nc); vector<pairIF*> new_data(_nc, NULL); for( int i=0; i < nc; i++ ) { new_data[cols[i]] = data[i]; new_size[cols[i]] = size[i]; } size = new_size; data = new_data; nc = _nc; } bool empty() { for(int i = 0; i < nc; i++) if(size[i] > 0) return false; return true; } _ullint get_nnz() { _ullint nnz = 0; for( int i=0; i<nc; i++ ) nnz += size[i]; return nnz; } float get_ram() { // TODO : verify float ram = sizeof( SMat<T> ); ram += sizeof( int ) * nc; for( int i=0; i<nc; i++ ) ram += sizeof( pair<int,T> ) * size[i]; return ram; } SMat<T>* transpose() { SMat<T>* tmat = new SMat<T>(nc, nr); for(int i=0; i<nc; i++) { for(int j=0; j<size[i]; j++) { tmat->size[data[i][j].first]++; } } for(int i=0; i<tmat->nc; i++) { tmat->data[i] = new pair<int,T>[tmat->size[i]]; } vector<int> count(tmat->nc, 0); for(int i=0; i<nc; i++) { for(int j=0; j<size[i]; j++) { int ind = data[i][j].first; T val = data[i][j].second; tmat->data[ind][count[ind]].first = i; tmat->data[ind][count[ind]].second = val; count[ind]++; } } return tmat; } void in_place_transpose() { vector<VecIF> tdata(nr); for(int i = 0; i < nc; ++i) { for(int j = 0; j < size[i]; ++j) tdata[data[i][j].first].push_back(pairIF(i, data[i][j].second)); delete[] data[i]; } swap(nr, nc); size.clear(); size.resize(nc, 0); data.clear(); data.resize(nc, NULL); for(int i = 0; i < nc; ++i) { sort(tdata[i].begin(), tdata[i].end()); size[i] = tdata[i].size(); data[i] = getDeepCopy(tdata[i]); tdata[i].clear(); } tdata.clear(); } void threshold( float th, bool ignore_sign=true ) { for( int i=0; i<nc; i++ ) { int count = 0; for( int j=0; j < size[i]; j++ ) count += (fabs(data[i][j].second) > th); pair<int,T>* newvec = new pair<int,T>[count]; count = 0; for( int j=0; j<size[i]; j++ ) { int id = data[i][j].first; T val = data[i][j].second; T comp_val = ignore_sign ? fabs(val) : val; if( comp_val > th ) newvec[ count++ ] = make_pair( id, val ); } size[i] = count; delete [] data[i]; data[i] = newvec; } } void unit_normalize_columns( string norm_type="L2" ) { if( norm_type=="L2" ) { for(int i=0; i<nc; i++) { T normsq = 0; for(int j=0; j<size[i]; j++) normsq += SQ(data[i][j].second); normsq = sqrt(normsq); if(normsq==0) normsq = 1; for(int j=0; j<size[i]; j++) data[i][j].second /= normsq; } } else if( norm_type=="L1" ) { for(int i=0; i<nc; i++) { T norm = 0; for(int j=0; j<size[i]; j++) norm += fabs(data[i][j].second); if(norm==0) norm = 1; for(int j=0; j<size[i]; j++) data[i][j].second /= norm; } } else if( norm_type=="max" ) { for(int i=0; i<nc; i++) { T norm = -1e8; for(int j=0; j<size[i]; j++) norm = max(norm, data[i][j].second); if(abs(norm) < 1e-8) norm = 1; for(int j=0; j<size[i]; j++) data[i][j].second /= norm; } } } vector<T> column_norms() { vector<T> norms(nc,0); for(int i=0; i<nc; i++) { T normsq = 0; for(int j=0; j<size[i]; j++) normsq += SQ(data[i][j].second); norms[i] = sqrt(normsq); } return norms; } ~SMat() { if( contiguous ) { if(owns_data) if(cdata) delete [] cdata; } else { if(owns_data) for( int i=0; i<nc; i++ ) if(data[i]) delete [] data[i]; } } friend ostream& operator<<( ostream& fout, const SMat<T>& mat ) { int nc = mat.nc; int nr = mat.nr; fout << nc << " " << nr << endl; for(int i=0; i<nc; i++) { for(int j=0; j<mat.size[i]; j++) { if(j==0) fout << mat.data[i][j].first << ":" << mat.data[i][j].second; else fout << " " << mat.data[i][j].first << ":" << mat.data[i][j].second; } fout<<endl; } return fout; } void write( string fname, int precision=6 ) { check_valid_filename(fname,false); ofstream fout; fout.open(fname); fout << fixed << setprecision( precision ); fout << (*this); fout.close(); } void write_scope( string fname, int precision=3 ) { check_valid_filename(fname,false); ofstream fout; fout.open(fname); fout << fixed << setprecision( precision ); for( int i=0; i<nc; i++ ) { fout << col_indices[i] << "\t"; for( int j=0; j<size[i]; j++ ) if( j==0 ) fout << data[i][j].first << ":" << data[i][j].second; else fout << " " << data[i][j].first << ":" << data[i][j].second; fout << "\n"; } fout.close(); } void add(SMat<T>* smat, float coeff=1.0 ) { if(nc != smat->nc || nr != smat->nr) { cerr << "SMat::add : Matrix dimensions do not match" << endl; cerr << "Matrix 1: " << nc << " x " << nr <<endl; cerr << "Matrix 2: " << smat->nc << " x " << smat->nr << endl; exit(1); } vector<bool> ind_mask(nr, 0); vector<T> sum(nr, 0); for(int i=0; i < nc; i++) { vector<int> inds; for(int j=0; j < size[i]; j++) { int ind = data[i][j].first; T val = data[i][j].second; sum[ind] += val; if(!ind_mask[ind]) { ind_mask[ind] = true; inds.push_back(ind); } } for(int j=0; j < smat->size[i]; j++) { int ind = smat->data[i][j].first; T val = smat->data[i][j].second; sum[ind] += coeff*val; if(!ind_mask[ind]) { ind_mask[ind] = true; inds.push_back(ind); } } sort(inds.begin(), inds.end()); Realloc(size[i], inds.size(), data[i]); for(int j=0; j<inds.size(); j++) { int ind = inds[j]; data[i][j] = make_pair(ind,sum[ind]); ind_mask[ind] = false; sum[ind] = 0; } size[i] = inds.size(); } } void diff(SMat<T>* smat) { if(nc != smat->nc || nr != smat->nr) { cerr << "SMat::add : Matrix dimensions do not match" << endl; cerr << "Matrix 1: " << nc << " x " << nr <<endl; cerr << "Matrix 2: " << smat->nc << " x " << smat->nr << endl; exit(1); } vector<bool> ind_mask(nr, 0); vector<T> sum(nr, 0); for(int i=0; i < nc; i++) { vector<int> inds; for(int j=0; j < size[i]; j++) { int ind = data[i][j].first; T val = data[i][j].second; sum[ind] += val; if(!ind_mask[ind]) { ind_mask[ind] = true; inds.push_back(ind); } } for(int j=0; j < smat->size[i]; j++) { int ind = smat->data[i][j].first; T val = smat->data[i][j].second; sum[ind] -= val; if(!ind_mask[ind]) { ind_mask[ind] = true; inds.push_back(ind); } } sort(inds.begin(), inds.end()); Realloc(size[i], inds.size(), data[i]); for(int j=0; j<inds.size(); j++) { int ind = inds[j]; data[i][j] = make_pair(ind,sum[ind]); ind_mask[ind] = false; sum[ind] = 0; } size[i] = inds.size(); } } void prod_helper( int siz, pair<int,T>* dat, vector<int>& indices, vector<T>& sum, string agg_type = "sum" ) { for( int j=0; j<siz; j++ ) { int ind = dat[j].first; T prodval = dat[j].second; for(int k=0; k<size[ind]; k++) { int id = data[ind][k].first; T val = data[ind][k].second; if(sum[id]==0) indices.push_back(id); if( agg_type=="sum" ) sum[id] += val*prodval; else if( agg_type=="max" ) sum[id] = max( sum[id], val*prodval ); else if (agg_type=="mean") sum[id] += ((val*prodval)/siz); } } sort(indices.begin(), indices.end()); } // Returns sparse product matrix by retaining only top k highest scoring rows of (*this) for every column in mat2 if k > -1 else returns just the product SMat<T>* prod(SMat<T>* mat2, int k = -1, float th = -0.1, string agg_type = "sum", int num_thread = 1, bool pp = false, string ret_type = "val") { bool retain = ( k > -1 || th > -1e-5 ); bool use_k=false, use_th=false; if( k>-1 ) use_k = true; else if( th>-1e-5 ) use_th = true; int dim1 = nr; int dim2 = mat2->nc; string mesg = "ERROR: dimension mismatch in 'prod': " + to_string(nr) + "x" + to_string(nc) + " , " + to_string(mat2->nr) + "x" + to_string(mat2->nc); assertmesg(nc==mat2->nr, mesg); if(pp) { LOGN("prod called with k : " << k << " th : " << th << " num thread : " << num_thread << " lmat : (" << nr << ", " << nc << ") rmat : (" << mat2->nr << ", " << mat2->nc << ")" << ", agg type : " << agg_type); } SMat<T>* prodmat = new SMat<T>(dim1, dim2); TQDM tqdm(dim2, dim2/1000); #pragma omp parallel num_threads(num_thread) { vector<T> sum(dim1,0); #pragma omp for for(int i=0; i<dim2; i++) { if(pp) tqdm.step(); vector<int> indices; prod_helper( mat2->size[i], mat2->data[i], indices, sum, agg_type ); int siz = indices.size(); prodmat->size[i] = siz; prodmat->data[i] = new pair<int,T>[siz]; for(int j=0; j<indices.size(); j++) { int id = indices[j]; T val = sum[id]; prodmat->data[i][j] = make_pair(id,val); sum[id] = 0; } if(retain) { if(ret_type.compare("abs") == 0) { sort( prodmat->data[i], prodmat->data[i]+prodmat->size[i], [](const pair<int, T> & a, const pair<int, T> & b) -> bool { return abs(a.second) > abs(b.second); } ); } else { sort( prodmat->data[i], prodmat->data[i]+prodmat->size[i], comp_pair_by_second_desc<int, T> ); } int retk=0; if( use_k ) retk = min( k, prodmat->size[i] ); else if( use_th ) { float norm = 0; for( int j=0; j<prodmat->size[i]; j++ ) norm += SQ( prodmat->data[i][j].second ); norm = sqrt( norm ); for( int j=0; j<prodmat->size[i]; j++ ) { if( prodmat->data[i][j].second < th*norm ) break; retk++; } //cout << "\tretk: " << retk << endl; } Realloc( prodmat->size[i], retk, prodmat->data[i] ); sort( prodmat->data[i], prodmat->data[i]+retk, comp_pair_by_first<int, T>); prodmat->size[i] = retk; } } } return prodmat; } // Returns sparse product matrix by retaining only top k highest scoring rows of (*this) for every column in mat2 if k > -1 else returns just the product SMat<T>* irmprod(SMat<T>* mat2, int k = -1, float th = -0.1) { bool retain = ( k > -1 || th > -1e-5 ); bool use_k=false, use_th=false; if( k>-1 ) use_k = true; else if( th>-1e-5 ) use_th = true; int dim1 = nr; int dim2 = mat2->nc; cout << nc << " " << mat2->nr << endl; assertmesg(nc==mat2->nr, "ERROR: dimension mismatch in 'prod'"); mt19937 reng; reng.seed( 0 ); uniform_real_distribution<float> dist( -1e-5, 1e-5 ); SMat<T>* prodmat = new SMat<T>(dim1, dim2); vector<T> sum(dim1,0); Timer timer; timer.tic(); for(int i=0; i<dim2; i++) { if( i%10000==0 ) { cout << '\r' << i << flush; } vector<int> indices; prod_helper( mat2->size[i], mat2->data[i], indices, sum ); int siz = indices.size(); prodmat->size[i] = siz; prodmat->data[i] = new pair<int,T>[siz]; for(int j=0; j<indices.size(); j++) { int id = indices[j]; T val = sum[id]; prodmat->data[i][j] = make_pair(id,val); sum[id] = 0; } if(retain) { for( int j=0; j<prodmat->size[i]; j++ ) { float r = dist( reng ); prodmat->data[i][j].second += r; } sort( prodmat->data[i], prodmat->data[i]+prodmat->size[i], comp_pair_by_second_desc<int, T> ); int retk=0; if( use_k ) retk = min( k, prodmat->size[i] ); else if( use_th ) { float norm = 0; for( int j=0; j<prodmat->size[i]; j++ ) norm += SQ( prodmat->data[i][j].second ); norm = sqrt( norm ); for( int j=0; j<prodmat->size[i]; j++ ) { if( prodmat->data[i][j].second < th*norm ) break; retk++; } //cout << "\tretk: " << retk << endl; } Realloc( prodmat->size[i], retk, prodmat->data[i] ); sort( prodmat->data[i], prodmat->data[i]+retk, comp_pair_by_first<int, T> ); prodmat->size[i] = retk; } } cout << endl; timer.toc(); return prodmat; } // Returns sparse product matrix by retaining only top k highest scoring rows of (*this) for every column in mat2 SMat<T>* top_prod( SMat<T>* mat2, int k ) { return prod(mat2, k); } SMat<T>* prod_for_gradient( SMat<T>* mat2, VecS& active_item_per_row, vector<T>& afreq1, vector<T>& afreq2, float alpha, float reg, float threshold, bool pp = false, int num_thread = 1) { int dim1 = nr; int dim2 = mat2->nc; string mesg = "ERROR: dimension mismatch in 'prod_for_gradient': " + to_string(nr) + "x" + to_string(nc) + " , " + to_string(mat2->nr) + "x" + to_string(mat2->nc); assertmesg(nc==mat2->nr, mesg ); if(pp) { LOGN("prod_for_gradient called with alpha : " << alpha << " reg : " << reg << " threshold: " << threshold << " num thread : " << num_thread << " lmat : (" << nr << ", " << nc << ") rmat : (" << mat2->nr << ", " << mat2->nc << ")"); } SMat<T>* prodmat = new SMat<T>(dim1, dim2); TQDM tqdm(dim2, 10000); #pragma omp parallel num_threads(num_thread) { vector<T> sum(dim1,0); #pragma omp for for(int i=0; i<dim2; i++) { if(pp) tqdm.step(); vector<int> indices; prod_helper( mat2->size[i], mat2->data[i], indices, sum ); int siz = indices.size(); prodmat->size[i] = siz; prodmat->data[i] = new pair<int,T>[siz]; for(int j=0; j<indices.size(); j++) { int id = indices[j]; T val = sum[id]; float total = afreq1[id]*afreq2[i]; val = (val - alpha*total)/(total + reg); prodmat->data[i][j] = make_pair(id,val); } sort( prodmat->data[i], prodmat->data[i]+prodmat->size[i], comp_pair_by_second_desc<int,float> ); int retk = 0; unordered_map<string,bool> map_active_item_per_row; for( int j=0; j<prodmat->size[i]; j++ ) { int ind = prodmat->data[i][j].first; float val = prodmat->data[i][j].second; if( val < threshold ) break; if( map_active_item_per_row.find( active_item_per_row[ind] ) == map_active_item_per_row.end() ) { prodmat->data[i][retk++] = prodmat->data[i][j]; map_active_item_per_row[ active_item_per_row[ind] ] = true; } } Realloc( prodmat->size[i], retk, prodmat->data[i] ); sort( prodmat->data[i], prodmat->data[i]+retk, comp_pair_by_first<int,float> ); prodmat->size[i] = retk; for(int j=0; j<indices.size(); j++) { int id = indices[j]; sum[id] = 0; } } } return prodmat; } SMat<T>* prod_for_fscore( SMat<T>* mat2, VecS& active_item_per_row, vector<T>& freq1, vector<T>& freq2, int tot_domain, vector<T>& afreq1, vector<T>& afreq2, float alpha = 0.5, int k = -1, float th = -0.1, bool pp = false, int num_thread = 1) { bool retain = ( k > -1 || th > -1e-5 ); bool use_k=false, use_th=false; if( k>-1 ) use_k = true; else if( th>-1e-5 ) use_th = true; int dim1 = nr; int dim2 = mat2->nc; string mesg = "ERROR: dimension mismatch in 'prod': " + to_string(nr) + "x" + to_string(nc) + " , " + to_string(mat2->nr) + "x" + to_string(mat2->nc); assertmesg(nc==mat2->nr, mesg ); if(pp) { LOGN("fscore prod called with k : " << k << " th : " << th << " num thread : " << num_thread << " lmat : (" << nr << ", " << nc << ") rmat : (" << mat2->nr << ", " << mat2->nc << ")"); } SMat<T>* prodmat = new SMat<T>(dim1, dim2); TQDM tqdm(dim2, 10000); #pragma omp parallel num_threads(num_thread) { vector<T> sum(dim1,0); #pragma omp for for(int i=0; i<dim2; i++) { if(pp) tqdm.step(); vector<int> indices; prod_helper( mat2->size[i], mat2->data[i], indices, sum ); int siz = indices.size(); prodmat->size[i] = siz; prodmat->data[i] = new pair<int,T>[siz]; for(int j=0; j<indices.size(); j++) { int id = indices[j]; T val = sum[id]; // float A = 1.0; // val = A*log2( val/( freq2[i] + freq1[id] - val ) ) - 0.5*(1-A)*( log( (float)freq1[id] / (float)tot_domain ) - 0.5*log( (float)freq2[i] / (float)tot_domain ) ); // val = pow( 2, val ); val = val/( alpha*(freq2[i] + freq1[id] - val) + (1-alpha)*afreq1[id]*afreq2[i] ); // weighted F score combining precision and recall prodmat->data[i][j] = make_pair(id,val); //sum[id] = 0; } if( use_k && prodmat->size[i]>0 ) { sort( prodmat->data[i], prodmat->data[i]+prodmat->size[i], comp_pair_by_second_desc<int,float> ); int retk = 0; unordered_map<string,bool> map_active_item_per_row; for( int j=0; j<prodmat->size[i]; j++ ) { int ind = prodmat->data[i][j].first; float val = prodmat->data[i][j].second; if( map_active_item_per_row.find( active_item_per_row[ind] ) == map_active_item_per_row.end() ) { prodmat->data[i][retk++] = prodmat->data[i][j]; map_active_item_per_row[ active_item_per_row[ind] ] = true; if( retk >= k ) break; } } Realloc( prodmat->size[i], retk, prodmat->data[i] ); sort( prodmat->data[i], prodmat->data[i]+retk, comp_pair_by_first<int,float> ); prodmat->size[i] = retk; } /* if(retain && prodmat->size[i]>0 ) { sort( prodmat->data[i], prodmat->data[i]+prodmat->size[i], comp_pair_by_second_desc<int,float> ); int retk=0; if( use_k ) { retk = min( k, prodmat->size[i] ); float good_th = 100.0; int c = 0; for( int l=0; l<prodmat->size[i]; l++ ) if( prodmat->data[i][l].second > good_th ) c++; retk = max( retk, c ); } else if( use_th ) { float norm = 0; for( int j=0; j<prodmat->size[i]; j++ ) norm += SQ( prodmat->data[i][j].second ); norm = sqrt( norm ); for( int j=0; j<prodmat->size[i]; j++ ) { if( prodmat->data[i][j].second < th*norm ) break; retk++; } } Realloc( prodmat->size[i], retk, prodmat->data[i] ); sort( prodmat->data[i], prodmat->data[i]+retk, comp_pair_by_first<int,float> ); for( int j=0; j<retk; j++ ) { int id = prodmat->data[i][j].first; float val = sum[id]; float alpha = 0.0; prodmat->data[i][j].second = val/( alpha*(freq2[i] + freq1[id] - val) + (1-alpha)*afreq1[id]*afreq2[i] ); } prodmat->size[i] = retk; } */ for(int j=0; j<indices.size(); j++) { int id = indices[j]; sum[id] = 0; } } } return prodmat; } SMat<T>* sparse_prod( SMat<T>* mat2, SMat<T>* pat_mat, bool pp = false) { int dim1 = pat_mat->nr; int dim2 = pat_mat->nc; int dim = nr; assert( nr == mat2->nr ); assert( nc == dim1 ); assert( mat2->nc == dim2 ); SMat<T>* prod_mat = new SMat<T>( pat_mat ); vector<T> mask(nr,0); if(pp) { LOGN("sparse prod called with lmat : (" << nr << ", " << nc << ") rmat : (" << mat2->nr << ", " << mat2->nc << ")"); } TQDM tqdm(dim2, 1000); for( int i=0; i<dim2; i++ ) { if(pp) tqdm.step(); for( int j=0; j<mat2->size[i]; j++ ) mask[ mat2->data[i][j].first ] = mat2->data[i][j].second; for( int j=0; j<pat_mat->size[i]; j++ ) { int id = pat_mat->data[i][j].first; T prod = 0; for( int k=0; k<size[id]; k++ ) prod += mask[ data[id][k].first ] * data[id][k].second; prod_mat->data[i][j].second = prod; } for( int j=0; j<mat2->size[i]; j++ ) mask[ mat2->data[i][j].first ] = 0; } return prod_mat; } SMat<T>* get_rank_mat( string order ) { // order=="desc" or order=="asc" is the sorting order to use over nonzero elements. Zeros are ignored. Replaces the value of each nonzero element in *this matrix with its rank in its column SMat<T>* rmat = new SMat<T>( this ); if( order == "desc" ) for( int i=0; i<rmat->nc; i++ ) stable_sort( rmat->data[i], rmat->data[i]+rmat->size[i], comp_pair_by_second_desc<int,T> ); else if( order == "asc" ) for( int i=0; i<rmat->nc; i++ ) stable_sort( rmat->data[i], rmat->data[i]+rmat->size[i], comp_pair_by_second<int,T> ); for( int i=0; i<rmat->nc; i++ ) for( int j=0; j<rmat->size[i]; j++ ) rmat->data[i][j].second = (j+1); for( int i=0; i<rmat->nc; i++ ) sort( rmat->data[i], rmat->data[i]+rmat->size[i], comp_pair_by_first<int,T> ); return rmat; } void eliminate_zeros() { assert( !contiguous ); for( int i=0; i<nc; i++ ) { int siz = size[i]; int newsiz = 0; for( int j=0; j<siz; j++ ) { if( abs(data[i][j].second) > 1e-10 ) { data[i][newsiz] = data[i][j]; newsiz++; } } Realloc(size[i], newsiz, data[i]); size[i] = newsiz; } } void append_bias_feat( T bias_feat ) { if( contiguous ) { pair<int,T>* new_cdata = new pair<int,T>[ get_nnz()+nc ]; int ctr = 0; for( int i=0; i<nc; i++ ) { for( int j=0; j<size[i]; j++ ) new_cdata[ctr++] = data[i][j]; new_cdata[ctr++] = make_pair( nr, bias_feat ); size[i]++; } ctr = 0; for( int i=0; i<nc; i++ ) { data[i] = new_cdata+ctr; ctr += size[i]; } delete [] cdata; cdata = new_cdata; } else { for( int i=0; i<nc; i++ ) { int siz = size[i]; Realloc( siz, siz+1, data[i] ); data[i][siz] = make_pair( nr, bias_feat ); size[i]++; } } nr++; } void append_bias_feat( vector<T> biases ) { assertmesg(not contiguous, "append bias feat with vector input not implemented for contiguous smat"); assertmesg(nc == biases.size(), "lenght of biases vector should be same as number of columns of smat"); for( int i=0; i<nc; i++ ) { int siz = size[i]; Realloc( siz, siz+1, data[i] ); data[i][siz] = make_pair( nr, biases[i] ); size[i]++; } nr++; } void remove_bias_feat() { assertmesg(not contiguous, "remove bias feat not implemented for contiguous smat"); for( int i=0; i<nc; i++ ) { int siz = size[i]; assertmesg(data[i][siz-1].first == (nr-1), "assumption failed : data[i][siz-1].first == nr-1"); Realloc( siz, siz-1, data[i] ); size[i]--; } nr--; } void active_dims( VecI& cols, VecI& dims, VecI& counts, VecI& countmap ) { dims.clear(); counts.clear(); for( int i=0; i<cols.size(); i++ ) { int inst = cols[i]; for( int j=0; j<size[inst]; j++ ) { int dim = data[inst][j].first; if( countmap[ dim ]==0 ) dims.push_back(dim); countmap[ dim ]++; } } sort(dims.begin(),dims.end()); for( int i=0; i<dims.size(); i++ ) { counts.push_back( countmap[ dims[i] ] ); countmap[ dims[i] ] = 0; } } void in_place_shrink_mat(VecI& cols, SMat<T>*& s_mat, VecI& rows, VecI& countmap) { s_mat = new SMat<T>(this, false, true, cols); VecI counts; active_dims( cols, rows, counts, countmap ); } void shrink_mat( VecI& cols, SMat<T>*& s_mat, VecI& rows, VecI& countmap, bool transpose ) { int s_nc = cols.size(); VecI counts; active_dims( cols, rows, counts, countmap ); _ullint nnz = 0; for( int i=0; i<counts.size(); i++ ) nnz += counts[i]; int* maps = new int[ nr ]; for( int i=0; i<rows.size(); i++ ) maps[ rows[i] ] = i; int s_nr = rows.size(); if( transpose ) { s_mat = new SMat<T>( s_nc, s_nr, nnz, true ); int sumsize = 0; for( int i=0; i<s_nr; i++ ) { s_mat->size[i] = counts[i]; s_mat->data[i] = s_mat->cdata + sumsize; sumsize += counts[i]; } for( int i=0; i<s_nr; i++ ) counts[i] = 0; } else { s_mat = new SMat<T>( s_nr, s_nc, nnz, true ); int sumsize = 0; for( int i=0; i<s_nc; i++) { int col = cols[i]; s_mat->size[i] = size[ col ]; s_mat->data[i] = s_mat->cdata + sumsize; sumsize += size[ col ]; } } for( int i=0; i<s_nc; i++ ) { int col = cols[ i ]; for( int j=0; j<size[ col ]; j++ ) { int row = maps[ data[ col ][ j ].first ]; T val = data[ col ][ j ].second; if( transpose ) { s_mat->data[row][counts[row]] = make_pair( i, val ); counts[row]++; } else s_mat->data[i][j] = make_pair( row, val ); } } delete [] maps; } SMat<T>* filter_rows( VecI& retained_rows ) { VecI active_rows( nr, -1 ); for( int i=0; i<retained_rows.size(); i++ ) active_rows[ retained_rows[i] ] = i; SMat<T>* mat = new SMat<T>( 0, nc ); for( int i=0; i<nc; i++ ) { VecIF entries; for( int j=0; j<size[i]; j++ ) if( active_rows[ data[i][j].first ]>=0 ) entries.push_back( make_pair( active_rows[ data[i][j].first ], data[i][j].second ) ); mat->size[i] = entries.size(); mat->data[i] = new pairIF[ entries.size() ]; copy( entries.begin(), entries.end(), mat->data[i] ); } mat->nr = retained_rows.size(); return mat; } void split_mat( bool* split, SMat<T>*& mat1, SMat<T>*& mat2 ) { // split vector determines which columns are distributed to mat1 or mat2. If split[i]==false, ith column is given to mat1, else to mat2 int nc1 = 0, nc2 = 0; for( int i=0; i<nc; i++ ) { if( !split[i] ) nc1++; else nc2++; } mat1 = new SMat<T>( nr, nc1 ); mat2 = new SMat<T>( nr, nc2 ); int i1=0, i2=0; for( int i=0; i<nc; i++ ) { if( !split[i] ) { mat1->size[ i1 ] = size[ i ]; mat1->data[ i1 ] = new pair<int,T>[ size[ i ] ]; copy( data[ i ], data[ i ] + size[ i ], mat1->data[ i1 ] ); i1++; } else { mat2->size[ i2 ] = size[ i ]; mat2->data[ i2 ] = new pair<int,T>[ size[ i ] ]; copy( data[ i ], data[ i ] + size[ i ], mat2->data[ i2 ] ); i2++; } } } vector<T> get_max( VecI& inds, int axis ) // Only inds columns/rows are used for calculating max { assert( axis==0 || axis==1 ); // axis==0 => max along each column, axis==1 => max along each row if( axis==0 ) { cout << "Not yet implemented" << endl; exit(1); } else if( axis==1 ) { vector<T> maxval( nr, NEG_INF ); for( int i=0; i<inds.size(); i++ ) { int ind = inds[i]; for( int j=0; j<size[ind]; j++ ) { int colind = data[ind][j].first; T colval = data[ind][j].second; maxval[ colind ] = maxval[colind] > colval ? maxval[colind] : colval; } } for( int i=0; i<nr; i++ ) { if( maxval[i]==NEG_INF ) maxval[i] = 0; } return maxval; } } vector<T> get_min( VecI& inds, int axis ) // Only inds columns/rows are used for calculating min { assert( axis==0 || axis==1 ); // axis==0 => max along each column, axis==1 => max along each row if( axis==0 ) { cout << "Not yet implemented" << endl; exit(1); } else if( axis==1 ) { vector<T> minval( nr, INF ); for( int i=0; i<inds.size(); i++ ) { int ind = inds[i]; for( int j=0; j<size[ind]; j++ ) { int colind = data[ind][j].first; T colval = data[ind][j].second; minval[ colind ] = minval[colind] < colval ? minval[colind] : colval; } } for( int i=0; i<nr; i++ ) { if( minval[i]==INF ) minval[i] = 0; } return minval; } } VecI get_freq( int axis ) { assert( axis==0 || axis==1 ); // axis==0 => frequency along each column, axis==1 => frequency along each row if( axis==0 ) { VecI freq( nc, 0 ); for( int i=0; i<nc; i++ ) freq[i] = size[i]; return freq; } else if( axis==1 ) { VecI freq( nr, 0 ); for( int i=0; i<nc; i++ ) for( int j=0; j<size[i]; j++ ) freq[ data[i][j].first ]++; return freq; } } VecF get_sum( int axis ) { assert( axis==0 || axis==1 ); // axis==0 => sum along each column, axis==1 => sum along each row if( axis==0 ) { VecF sum( nc, 0 ); for( int i=0; i<nc; i++ ) for( int j=0; j<size[i]; j++ ) sum[i] += data[i][j].second; return sum; } else if( axis==1 ) { VecF sum( nr, 0 ); for( int i=0; i<nc; i++ ) for( int j=0; j<size[i]; j++ ) sum[data[i][j].first] += data[i][j].second; return sum; } } SMat<T>* chunk_mat( int start, int num ) { int end = start+num-1; assert( start>=0 && start<nc ); assert( end>=0 && end<nc ); assert( end>=start ); int chunk_nc = num; int chunk_nr = nr; _ullint chunk_nnz = 0; for( int i=start; i<=end; i++ ) chunk_nnz += size[i]; SMat<T>* chunk = new SMat<T>( chunk_nr, chunk_nc, chunk_nnz, true ); int ctr = 0; for( int i=0; i<num; i++ ) { chunk->size[i] = size[i+start]; chunk->data[i] = chunk->cdata + ctr; for( int j=0; j<size[i+start]; j++ ) chunk->data[i][j] = data[i+start][j]; ctr += size[i+start]; } return chunk; } void append_mat( SMat<T>* chunk ) { assert( nr == chunk->nr ); int chunk_nc = chunk->nc; int new_nc = nc + chunk_nc; size.resize(new_nc, 0); data.resize(new_nc, NULL); for( int i=0; i<chunk_nc; i++ ) { size[nc+i] = chunk->size[i]; data[nc+i] = new pair<int,T>[ chunk->size[i] ]; for( int j=0; j<chunk->size[i]; j++ ) { data[nc+i][j] = chunk->data[i][j]; } } nc = new_nc; } void read_legacy_mat(ifstream& fin) { // TODO : remove vector<int> inds; vector<T> vals; string line; getline( fin, line ); line += "\n"; int pos = 0; int next_pos=line.find_first_of(" \n",pos); string s = line.substr(pos,next_pos-pos); nc = stoi( s ); pos = next_pos+1; next_pos=line.find_first_of(" \n",pos); s = line.substr(pos,next_pos-pos); nr = stoi( s ); size.resize(nc, 0); data.resize(nc, NULL); for(int i=0; i<nc; i++) { inds.clear(); vals.clear(); string line; getline(fin,line); line += "\n"; int pos = 0; int next_pos; while(next_pos=line.find_first_of(": \n",pos)) { if((size_t)next_pos==string::npos) break; inds.push_back(stoi(line.substr(pos,next_pos-pos))); pos = next_pos+1; next_pos = line.find_first_of(": \n",pos); if((size_t)next_pos==string::npos) break; vals.push_back(stof(line.substr(pos,next_pos-pos))); pos = next_pos+1; } assert(inds.size()==vals.size()); assert(inds.size()==0 || inds[inds.size()-1]<nr); size[i] = inds.size(); data[i] = new pair<int,T>[inds.size()]; for(int j=0; j<size[i]; j++) { data[i][j].first = inds[j]; data[i][j].second = (T)vals[j]; } } } void readBin(std::ifstream& fin) { read_bin(nc, fin); read_bin(nr, fin); read_vec_bin(size, fin); data.resize(nc, NULL); for(int col = 0; col < nc; ++col) read_arr_bin(data[col], fin, size[col]); } void readPyBin(std::ifstream& fin) { read_bin(nc, fin); read_bin(nr, fin); read_vec_bin(size, fin); data.resize(nc, NULL); int nnz = accumulate(size.begin(), size.end(), 0); int* buf_inds = NULL; T* buf_data = NULL; read_arr_bin(buf_inds, fin, nnz); read_arr_bin(buf_data, fin, nnz); int ptr = 0; for(int col = 0; col < nc; ++col) { data[col] = new pairIF[size[col]]; for(int i = 0; i < size[col]; ++i) { data[col][i].first = buf_inds[ptr]; data[col][i].second = buf_data[ptr]; ptr++; } } delete[] buf_inds; delete[] buf_data; } void writeBin(std::ofstream& fout) { write_bin(nc, fout); write_bin(nr, fout); write_vec_bin(size, fout); for(int col = 0; col < nc; ++col) write_arr_bin(data[col], fout, size[col]); } void load(string fname) { check_valid_filename(fname, true); if(fname.substr(fname.size()-4, 4).compare(".bin") == 0) { LOG("reading binary format sparse matrix : " << fname); ifstream fin(fname, ios::in|ios::binary); this->readBin(fin); } else if (fname.substr(fname.size()-6, 6).compare(".pybin") == 0) { LOG("reading py binary format sparse matrix : " << fname); ifstream fin(fname, ios::in|ios::binary); this->readPyBin(fin); } else { LOG("reading text format sparse matrix : " << fname); ifstream fin; fin.open(fname); fin >> (*this); fin.close(); } } void dump(string fname) { check_valid_filename(fname, false); if(fname.substr(fname.size()-4, 4).compare(".bin") == 0) { LOG("writing binary format sparse matrix : " << fname); ofstream fout(fname, ios::binary | ios::out); writeBin(fout); fout.close(); } else { LOG("writing text format sparse matrix : " << fname); write(fname); } } void set_values( T value = T() ) { for( int i=0; i<nc; i++ ) for( int j=0; j<size[i]; j++ ) data[i][j].second = value; } void trunc_max_nnz_per_col( int max_nnz_per_col, string sort_type="desc" ) { for( int i=0; i<nc; i++ ) { if( sort_type=="desc" ) sort( data[i], data[i] + size[i], comp_pair_by_second_desc<int, T> ); else if( sort_type=="asc" ) sort( data[i], data[i] + size[i], comp_pair_by_second<int, T> ); int new_siz = min( size[i], max_nnz_per_col ); Realloc( size[i], new_siz, data[i] ); size[i] = new_siz; sort( data[i], data[i] + size[i], comp_pair_by_first<int, T> ); } } void visualize( VecS& X, VecS& Y, string file_name, string sort_type="SECONDDESC" ) { ofstream fout; fout.open( file_name ); int num_X = nc; int num_Y = nr; for( int i=0; i<num_X; i++ ) { fout << i << "\t" << X[i] << "\n"; VecIF probs; for( int j=0; j<size[i]; j++ ) probs.push_back( data[i][j] ); if( sort_type=="SECONDDESC" ) sort( probs.begin(), probs.end(), comp_pair_by_second_desc<int, T> ); else if( sort_type=="SECOND" ) sort( probs.begin(), probs.end(), comp_pair_by_second<int, T> ); for( int j=0; j<probs.size(); j++ ) { fout << "\t" << probs[j].first << "\t" << Y[probs[j].first] << "\t" << probs[j].second << "\n"; } fout << "\n"; } fout.close(); } void apply_log() { eliminate_zeros(); for( int i=0; i<nc; i++ ) for( int j=0; j<size[i]; j++ ) { T val = data[i][j].second; if( val < 0 ) { cerr << "ERROR: value cannot be -ve in apply_log" << endl; exit(1); } data[i][j].second = log( val ); } } void apply_exp() { for( int i=0; i<nc; i++ ) for( int j=0; j<size[i]; j++ ) { T val = data[i][j].second; data[i][j].second = exp( val ); } } vector<T> get_min( int axis ) // Only inds columns/rows are used for calculating min { assert( axis==0 || axis==1 ); // axis==0 => max along each column, axis==1 => max along each row if( axis==0 ) { vector<T> minval( nc, INF ); for( int i=0; i<nc; i++ ) { for( int j=0; j<size[i]; j++ ) { int colind = data[i][j].first; T colval = data[i][j].second; minval[ i ] = minval[i] < colval ? minval[i] : colval; } } for( int i=0; i<nc; i++ ) { if( minval[i]==INF ) minval[i] = 0; } return minval; } else if( axis==1 ) { vector<T> minval( nr, INF ); for( int i=0; i<nc; i++ ) { for( int j=0; j<size[i]; j++ ) { int colind = data[i][j].first; T colval = data[i][j].second; minval[ colind ] = minval[colind] < colval ? minval[colind] : colval; } } for( int i=0; i<nr; i++ ) { if( minval[i]==INF ) minval[i] = 0; } return minval; } } // element-wise multiples the "this" matrix and "mat" matrix SMat<T>* emultiply( SMat<T>* mat ) { assertmesg( nc == mat->nc && nr == mat->nr, "ERROR: dimensions of 2 matrices much match in emultiply" ); vector<T> values( nr, 0 ); SMat<T>* prod_mat = new SMat<T>( mat ); for( int i=0; i<nc; i++ ) { for( int j=0; j<size[i]; j++ ) values[ data[i][j].first ] = data[i][j].second; for( int j=0; j<prod_mat->size[i]; j++ ) prod_mat->data[i][j].second *= values[ prod_mat->data[i][j].first ]; for( int j=0; j<size[i]; j++ ) values[ data[i][j].first ] = 0; } prod_mat->eliminate_zeros(); return prod_mat; } void scalar_multiply(T val) { for(int col = 0; col < nc; ++col) for(int i = 0; i < size[col]; ++i) data[col][i].second *= val; } SMat<T>* emax( SMat<T>* mat ) { assertmesg( nc == mat->nc && nr == mat->nr, "ERROR: dimensions of 2 matrices much match in emultiply" ); vector<T> values( nr, 0 ); SMat<T>* max_mat = new SMat<T>( nr, nc ); for( int i=0; i<nc; i++ ) { vector<int> indices; for( int j=0; j<size[i]; j++ ) { values[ data[i][j].first ] = data[i][j].second; indices.push_back( data[i][j].first ); } for( int j=0; j<mat->size[i]; j++ ) { values[ mat->data[i][j].first ] = max( values[ mat->data[i][j].first ], mat->data[i][j].second ); indices.push_back( mat->data[i][j].first ); } sort( indices.begin(), indices.end() ); indices.resize( distance( indices.begin(), unique( indices.begin(), indices.end() ) ) ); max_mat->size[i] = indices.size(); max_mat->data[i] = new pair<int,T> [ indices.size() ]; for( int j=0; j<indices.size(); j++ ) { int ind = indices[j]; max_mat->data[i][j].first = ind; max_mat->data[i][j].second = values[ ind ]; values[ ind ] = 0; } } max_mat->eliminate_zeros(); return max_mat; } void diag_multiply_before( vector<T>& w ) { assertmesg( w.size()==nr, "ERROR: dimension mismatch in 'diag_multiply_before'" ); for( int i=0; i<nc; i++ ) for( int j=0; j<size[i]; j++ ) data[i][j].second *= w[ data[i][j].first ]; } void diag_multiply_after( vector<T>& w ) { assertmesg( w.size()==nc, "ERROR: dimension mismatch in 'diag_multiply_after'" ); for( int i=0; i<nc; i++ ) for( int j=0; j<size[i]; j++ ) data[i][j].second *= w[ i ]; } void sparse_add_value( T val ) { for( int i=0; i<nc; i++ ) for( int j=0; j<size[i]; j++ ) data[i][j].second += val; } void print_shape() { cout << "shape : (" << nr << ", " << nc << ")" << endl; } void sort_indices() { for( int i=0; i<nc; i++ ) sort( data[i], data[i]+size[i], comp_pair_by_first<int,T> ); } void idf_transform() { VecI freq = get_freq( 1 ); VecF idf( nr, 0 ); for( int i=0; i<nr; i++ ) idf[i] = log( (float)(nc+1) / (float)(freq[i]+1) ); for( int i=0; i<nc; i++ ) for( int j=0; j<size[i]; j++ ) data[i][j].second *= idf[ data[i][j].first ]; } float get_recall(SMat<T>* true_mat) { SMat<T>* intersection = this->emultiply(true_mat); float recall = intersection->get_nnz()*100.0/true_mat->get_nnz(); delete intersection; return recall; } SMat<T>* prod_for_jaccard( SMat<T>* mat2, vector<T>& freq1, vector<T>& freq2, int tot_domain, vector<T>& afreq1, vector<T>& afreq2, float alpha = 0.5, int k = -1, float th = -0.1, bool pp = false, int num_thread = 1) { bool retain = ( k > -1 || th > -1e-5 ); bool use_k=false, use_th=false; if( k > 0 ) use_k = true; else if( th>-1e-5 ) use_th = true; int dim1 = nr; int dim2 = mat2->nc; string mesg = "ERROR: dimension mismatch in 'prod': " + to_string(nr) + "x" + to_string(nc) + " , " + to_string(mat2->nr) + "x" + to_string(mat2->nc); assertmesg(nc==mat2->nr, mesg ); if(pp) { LOGN("jaccard prod called with k : " << k << " th : " << th << " num thread : " << num_thread << " lmat : (" << nr << ", " << nc << ") rmat : (" << mat2->nr << ", " << mat2->nc << ")"); } SMat<T>* prodmat = new SMat<T>(dim1, dim2); TQDM tqdm(dim2, 1000); #pragma omp parallel num_threads(num_thread) { vector<T> sum(dim1,0); #pragma omp for for(int i=0; i<dim2; i++) { if(pp) tqdm.step(); vector<int> indices; prod_helper( mat2->size[i], mat2->data[i], indices, sum ); int siz = indices.size(); prodmat->size[i] = siz; prodmat->data[i] = new pair<int,T>[siz]; for(int j=0; j<indices.size(); j++) { int id = indices[j]; T val = sum[id]; // float A = 1.0; // val = A*log2( val/( freq2[i] + freq1[id] - val ) ) - 0.5*(1-A)*( log( (float)freq1[id] / (float)tot_domain ) - 0.5*log( (float)freq2[i] / (float)tot_domain ) ); // val = pow( 2, val ); val = val/( alpha*(freq2[i] + freq1[id] - val) + (1-alpha)*afreq1[id]*afreq2[i] ); // weighted F score combining precision and recall prodmat->data[i][j] = make_pair(id,val); sum[id] = 0; } if(retain && prodmat->size[i]>0 ) { sort( prodmat->data[i], prodmat->data[i]+prodmat->size[i], comp_pair_by_second_desc<int,float> ); int retk=0; if( use_k ) { retk = min( k, prodmat->size[i] ); float good_th = 100.0; int c = 0; for( int l=0; l<prodmat->size[i]; l++ ) if( prodmat->data[i][l].second > good_th ) c++; retk = max( retk, c ); } else if( use_th ) { float norm = 0; for( int j=0; j<prodmat->size[i]; j++ ) norm += SQ( prodmat->data[i][j].second ); norm = sqrt( norm ); for( int j=0; j<prodmat->size[i]; j++ ) { if( prodmat->data[i][j].second < th*norm ) break; retk++; } } Realloc( prodmat->size[i], retk, prodmat->data[i] ); sort( prodmat->data[i], prodmat->data[i]+retk, comp_pair_by_first<int,float> ); prodmat->size[i] = retk; } } } return prodmat; } // SMat<T>* prod_for_jaccard( SMat<T>* mat2, vector<T>& freq1, vector<T>& freq2, int tot_domain, vector<T>& afreq1, vector<T>& afreq2, float alpha = 0.5, int k = -1, float th = -0.1, bool pp = false, int num_thread = 1) // { // bool retain = ( k > -1 || th > -1e-5 ); // bool use_k=false, use_th=false; // if( k>-1 ) // use_k = true; // else if( th>-1e-5 ) // use_th = true; // int dim1 = nr; // int dim2 = mat2->nc; // string mesg = "ERROR: dimension mismatch in 'prod': " + to_string(nr) + "x" + to_string(nc) + " , " + to_string(mat2->nr) + "x" + to_string(mat2->nc); // assertmesg(nc==mat2->nr, mesg); // if(pp) // { // LOGN("jaccard prod called with k : " << k << " th : " << th << " num thread : " // << num_thread << " lmat : (" << nr << ", " << nc << ") rmat : (" << mat2->nr // << ", " << mat2->nc << ")"); // } // SMat<T>* prodmat = new SMat<T>(dim1, dim2); // TQDM tqdm(dim2, 1000); // #pragma omp parallel num_threads(num_thread) // { // vector<T> sum(dim1,0); // #pragma omp for // for(int i=0; i<dim2; i++) // { // if(pp) tqdm.step(); // vector<int> indices; // prod_helper( mat2->size[i], mat2->data[i], indices, sum ); // int siz = indices.size(); // prodmat->size[i] = siz; // prodmat->data[i] = new pair<int,T>[siz]; // for(int j=0; j<indices.size(); j++) // { // int id = indices[j]; // T val = sum[id]; // //assert( freq2[i] + freq1[id] - val > 0 ); // val = val/( freq2[i] + freq1[id] - val ); // //val = log( (float)val / (float)tot_domain ) - log( (float)freq1[id] / (float)tot_domain ) - log( (float)freq2[i] / (float)tot_domain ); // prodmat->data[i][j] = make_pair(id,val); // sum[id] = 0; // } // if(retain && prodmat->size[i]>0 ) // { // sort( prodmat->data[i], prodmat->data[i]+prodmat->size[i], comp_pair_by_second_desc<int, T> ); // int retk=0; // if( use_k ) // { // retk = min( k, prodmat->size[i] ); // float good_th = 1.0; // int c = 0; // for( int l=0; l<prodmat->size[i]; l++ ) // if( prodmat->data[i][l].second > good_th ) // c++; // retk = max( retk, c ); // } // else if( use_th ) // { // float norm = 0; // for( int j=0; j<prodmat->size[i]; j++ ) // norm += SQ( prodmat->data[i][j].second ); // norm = sqrt( norm ); // for( int j=0; j<prodmat->size[i]; j++ ) // { // if( prodmat->data[i][j].second < th*norm ) // break; // retk++; // } // } // Realloc( prodmat->size[i], retk, prodmat->data[i] ); // sort( prodmat->data[i], prodmat->data[i]+retk, comp_pair_by_first<int, T> ); // prodmat->size[i] = retk; // } // } // } // return prodmat; // } }; template <typename T> class DMat // a column-major dense matrix of type T { public: int nc; int nr; T** data; DMat() { nc = 0; nr = 0; data = NULL; } DMat(int nc, int nr) { this->nc = nc; this->nr = nr; data = new T*[nc]; for(int i=0; i<nc; i++) data[i] = new T[nr](); } DMat(int nc, int nr, float val ) { this->nc = nc; this->nr = nr; data = new T*[nc]; for(int i=0; i<nc; i++) { data[i] = new T[nr](); fill( data[i], data[i]+nr, val ); } } friend istream& operator>>( istream& fin, DMat<T>& mat ) { fin >> mat.nc >> mat.nr; mat.data = new T*[mat.nc]; for(int i=0; i<mat.nc; i++) mat.data[i] = new T[mat.nr]; for( int i=0; i<mat.nc; i++ ) for( int j=0; j<mat.nr; j++ ) fin >> mat.data[i][j]; return fin; } DMat(string fname) { check_valid_filename(fname,true); ifstream fin; fin.open(fname); fin >> (*this); fin.close(); } void reset() { for( int i=0; i<nc; i++ ) fill( data[i], data[i]+nr, 0.0 ); } DMat(SMat<T>* mat) { nc = mat->nc; nr = mat->nr; data = new T*[nc]; for(int i=0; i<nc; i++) data[i] = new T[nr](); for(int i=0; i<mat->nc; i++) { pair<int,T>* vec = mat->data[i]; for(int j=0; j<mat->size[i]; j++) { data[i][vec[j].first] = vec[j].second; } } } ~DMat() { for(int i=0; i<nc; i++) delete [] data[i]; delete [] data; } friend ostream& operator<<( ostream& fout, const DMat<T>& mat ) { int nc = mat.nc; int nr = mat.nr; fout << nc << " " << nr << "\n"; for(int i=0; i<nc; i++) { for(int j=0; j<nr; j++) { if(j==0) fout << mat.data[i][j]; else fout << " " << mat.data[i][j]; } fout<<"\n"; } return fout; } void write( string fname, int precision=6 ) { check_valid_filename(fname,false); ofstream fout; fout.open(fname); fout << fixed << setprecision( precision ); fout << (*this); fout.close(); } void append_bias_feat( T bias_feat ) { for( int i=0; i<nc; i++ ) { Realloc( nr, nr+1, data[i] ); data[i][nr] = bias_feat; } nr++; } }; typedef SMat<float> SMatF; typedef SMat<int> SMatI; typedef SMat<int> SMatI; typedef DMat<float> DMatF; typedef DMat<int> DMatI; void reindex_VecIF( VecIF& vec, VecI& index ); template <typename T> inline T* read_vec( string fname ) { check_valid_filename( fname, true ); ifstream fin; fin.open( fname ); vector< T > vinp; T inp; while( fin >> inp ) { vinp.push_back( inp ); } fin.close(); T* vptr = new T[ vinp.size() ]; for( int i=0; i<vinp.size(); i++ ) vptr[i] = vinp[i]; return vptr; } inline pairII get_pos_neg_count( VecI& pos_or_neg ) { pairII counts = make_pair(0,0); for( int i=0; i<pos_or_neg.size(); i++ ) { if(pos_or_neg[i]==+1) counts.first++; else counts.second++; } return counts; } inline void reset_d_with_s( pairIF* svec, int siz, float* dvec ) { for( int i=0; i<siz; i++ ) dvec[ svec[i].first ] = 0; } inline void set_d_with_s( pairIF* svec, int siz, float* dvec ) { for( int i=0; i<siz; i++ ) dvec[ svec[i].first ] = svec[i].second; } inline void init_2d_float( int dim1, int dim2, float**& mat ) { mat = new float*[ dim1 ]; for( int i=0; i<dim1; i++ ) mat[i] = new float[ dim2 ]; } inline void delete_2d_float( int dim1, int dim2, float**& mat ) { for( int i=0; i<dim1; i++ ) delete [] mat[i]; delete [] mat; mat = NULL; } inline void reset_2d_float( int dim1, int dim2, float**& mat ) { for( int i=0; i<dim1; i++ ) for( int j=0; j<dim2; j++ ) mat[i][j] = 0; } inline float mult_d_s_vec( float* dvec, pairIF* svec, int siz ) { float prod = 0; for( int i=0; i<siz; i++ ) { int id = svec[i].first; float val = svec[i].second; prod += dvec[ id ] * val; } return prod; } inline void add_s_to_d_vec( pairIF* svec, int siz, float* dvec ) { for( int i=0; i<siz; i++ ) { int id = svec[i].first; float val = svec[i].second; dvec[ id ] += val; } } inline void add_sa_to_d_vec( float a, pairIF* svec, int siz, float* dvec ) { for( int i=0; i<siz; i++ ) { int id = svec[i].first; float val = svec[i].second; dvec[ id ] += a*val; } } inline float get_norm_d_vec( float* dvec, int siz ) { float norm = 0; for( int i=0; i<siz; i++ ) norm += SQ( dvec[i] ); norm = sqrt( norm ); return norm; } inline void div_d_vec_by_scalar( float* dvec, int siz, float s ) { for( int i=0; i<siz; i++) dvec[i] /= s; } inline void normalize_d_vec( float* dvec, int siz ) { float norm = get_norm_d_vec( dvec, siz ); if( norm>0 ) div_d_vec_by_scalar( dvec, siz, norm ); } /* Replicating these SMat<T> template functions to enable compatibility with cython code */ SMatF* p_copy( SMatF* inmat ); void p_add( SMatF* mat1, SMatF* mat2 ); void p_shrink_mat( SMatF* refmat, vector<int>& cols, SMatF*& s_mat, vector<int>& rows, vector<int>& countmap, bool transpose ); SMatF* p_get_rank_mat( SMatF* refmat, string order ); SMatF* p_transpose( SMatF* refmat ); SMatF* p_prod( SMatF* refmat, SMatF* mat2 ); SMatF* p_sparse_prod( SMatF* refmat, SMatF* mat2, SMatF* pat_mat ); void p_visualize( SMatF* mat, VecS& X, VecS& Y, string file_name, string sort_type="SECONDDESC" );
Example_target_reduction.2.c
/* * @@name: target_reduction.2.c * @@type: C * @@compilable: yes * @@linkable: yes * @@expect: success * @@version: omp_5.0 */ #include <stdio.h> int f(int); int g(int); int main() { int sum1=0, sum2=0; int i; const int n = 100; #pragma omp target data map(sum1,sum2) { #pragma omp target teams distribute reduction(+:sum1) for (int i = 0; i < n; i++) { sum1 += f(i); } #pragma omp target teams distribute map(sum1) reduction(+:sum2) for (int i = 0; i < n; i++) { sum2 += g(i) * sum1; } } printf( "sum1 = %d, sum2 = %d\n", sum1, sum2); //OUTPUT: sum1 = 9900, sum2 = 147015000 return 0; } int f(int res){ return res*2; } int g(int res){ return res*3; }
ft.c
/*-------------------------------------------------------------------- NAS Parallel Benchmarks 2.3 OpenMP C versions - FT This benchmark is an OpenMP C version of the NPB FT code. The OpenMP C versions are developed by RWCP and derived from the serial Fortran versions in "NPB 2.3-serial" developed by NAS. Permission to use, copy, distribute and modify this software for any purpose with or without fee is hereby granted. This software is provided "as is" without express or implied warranty. Send comments on the OpenMP C versions to pdp-openmp@rwcp.or.jp Information on OpenMP activities at RWCP is available at: http://pdplab.trc.rwcp.or.jp/pdperf/Omni/ Information on NAS Parallel Benchmarks 2.3 is available at: http://www.nas.nasa.gov/NAS/NPB/ --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- Authors: D. Bailey W. Saphir OpenMP C version: S. Satoh --------------------------------------------------------------------*/ #include "npb-C.h" /* global variables */ #include "global.h" /* function declarations */ static void evolve(dcomplex u0[NZ][NY][NX], dcomplex u1[NZ][NY][NX], int t, int indexmap[NZ][NY][NX], int d[3]); static void compute_initial_conditions(dcomplex u0[NZ][NY][NX], int d[3]); static void ipow46(double a, int exponent, double *result); static void setup(void); static void compute_indexmap(int indexmap[NZ][NY][NX], int d[3]); static void print_timers(void); static void fft(int dir, dcomplex x1[NZ][NY][NX], dcomplex x2[NZ][NY][NX]); static void cffts1(int is, int d[3], dcomplex x[NZ][NY][NX], dcomplex xout[NZ][NY][NX], dcomplex y0[NX][FFTBLOCKPAD], dcomplex y1[NX][FFTBLOCKPAD]); static void cffts2(int is, int d[3], dcomplex x[NZ][NY][NX], dcomplex xout[NZ][NY][NX], dcomplex y0[NX][FFTBLOCKPAD], dcomplex y1[NX][FFTBLOCKPAD]); static void cffts3(int is, int d[3], dcomplex x[NZ][NY][NX], dcomplex xout[NZ][NY][NX], dcomplex y0[NX][FFTBLOCKPAD], dcomplex y1[NX][FFTBLOCKPAD]); static void fft_init (int n); static void cfftz (int is, int m, int n, dcomplex x[NX][FFTBLOCKPAD], dcomplex y[NX][FFTBLOCKPAD]); static void fftz2 (int is, int l, int m, int n, int ny, int ny1, dcomplex u[NX], dcomplex x[NX][FFTBLOCKPAD], dcomplex y[NX][FFTBLOCKPAD]); static int ilog2(int n); static void checksum(int i, dcomplex u1[NZ][NY][NX], int d[3]); static void verify (int d1, int d2, int d3, int nt, boolean *verified, char *cclass); /*-------------------------------------------------------------------- c FT benchmark c-------------------------------------------------------------------*/ int main(int argc, char **argv) { /*c------------------------------------------------------------------- c-------------------------------------------------------------------*/ int i, ierr; /*------------------------------------------------------------------ c u0, u1, u2 are the main arrays in the problem. c Depending on the decomposition, these arrays will have different c dimensions. To accomodate all possibilities, we allocate them as c one-dimensional arrays and pass them to subroutines for different c views c - u0 contains the initial (transformed) initial condition c - u1 and u2 are working arrays c - indexmap maps i,j,k of u0 to the correct i^2+j^2+k^2 for the c time evolution operator. c-----------------------------------------------------------------*/ /*-------------------------------------------------------------------- c Large arrays are in common so that they are allocated on the c heap rather than the stack. This common block is not c referenced directly anywhere else. Padding is to avoid accidental c cache problems, since all array sizes are powers of two. c-------------------------------------------------------------------*/ static dcomplex u0[NZ][NY][NX]; static dcomplex pad1[3]; static dcomplex u1[NZ][NY][NX]; static dcomplex pad2[3]; static dcomplex u2[NZ][NY][NX]; static dcomplex pad3[3]; static int indexmap[NZ][NY][NX]; int iter; int nthreads = 1; double total_time, mflops; boolean verified; char cclass; /*-------------------------------------------------------------------- c Run the entire problem once to make sure all data is touched. c This reduces variable startup costs, which is important for such a c short benchmark. The other NPB 2 implementations are similar. c-------------------------------------------------------------------*/ for (i = 0; i < T_MAX; i++) { timer_clear(i); } setup(); #pragma omp parallel { compute_indexmap(indexmap, dims[2]); #pragma omp single { compute_initial_conditions(u1, dims[0]); fft_init (dims[0][0]); } fft(1, u1, u0); } /* end parallel */ /*-------------------------------------------------------------------- c Start over from the beginning. Note that all operations must c be timed, in contrast to other benchmarks. c-------------------------------------------------------------------*/ for (i = 0; i < T_MAX; i++) { timer_clear(i); } timer_start(T_TOTAL); if (TIMERS_ENABLED == TRUE) timer_start(T_SETUP); #pragma omp parallel private(iter) firstprivate(niter) { compute_indexmap(indexmap, dims[2]); #pragma omp single { compute_initial_conditions(u1, dims[0]); fft_init (dims[0][0]); } if (TIMERS_ENABLED == TRUE) { #pragma omp master timer_stop(T_SETUP); } if (TIMERS_ENABLED == TRUE) { #pragma omp master timer_start(T_FFT); } fft(1, u1, u0); if (TIMERS_ENABLED == TRUE) { #pragma omp master timer_stop(T_FFT); } for (iter = 1; iter <= niter; iter++) { if (TIMERS_ENABLED == TRUE) { #pragma omp master timer_start(T_EVOLVE); } evolve(u0, u1, iter, indexmap, dims[0]); if (TIMERS_ENABLED == TRUE) { #pragma omp master timer_stop(T_EVOLVE); } if (TIMERS_ENABLED == TRUE) { #pragma omp master timer_start(T_FFT); } fft(-1, u1, u2); if (TIMERS_ENABLED == TRUE) { #pragma omp master timer_stop(T_FFT); } if (TIMERS_ENABLED == TRUE) { #pragma omp master timer_start(T_CHECKSUM); } checksum(iter, u2, dims[0]); if (TIMERS_ENABLED == TRUE) { #pragma omp master timer_stop(T_CHECKSUM); } } #pragma omp single verify(NX, NY, NZ, niter, &verified, &cclass); #if defined(_OPENMP) #pragma omp master nthreads = omp_get_num_threads(); #endif /* _OPENMP */ } /* end parallel */ timer_stop(T_TOTAL); total_time = timer_read(T_TOTAL); if( total_time != 0.0) { mflops = 1.0e-6*(double)(NTOTAL) * (14.8157+7.19641*log((double)(NTOTAL)) + (5.23518+7.21113*log((double)(NTOTAL)))*niter) /total_time; } else { mflops = 0.0; } c_print_results("FT", cclass, NX, NY, NZ, niter, nthreads, total_time, mflops, " floating point", verified, NPBVERSION, COMPILETIME, CS1, CS2, CS3, CS4, CS5, CS6, CS7); if (TIMERS_ENABLED == TRUE) print_timers(); } /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ static void evolve(dcomplex u0[NZ][NY][NX], dcomplex u1[NZ][NY][NX], int t, int indexmap[NZ][NY][NX], int d[3]) { /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c evolve u0 -> u1 (t time steps) in fourier space c-------------------------------------------------------------------*/ int i, j, k; #pragma omp for for (k = 0; k < d[2]; k++) { for (j = 0; j < d[1]; j++) { for (i = 0; i < d[0]; i++) { crmul(u1[k][j][i], u0[k][j][i], ex[t*indexmap[k][j][i]]); } } } } /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ static void compute_initial_conditions(dcomplex u0[NZ][NY][NX], int d[3]) { /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c Fill in array u0 with initial conditions from c random number generator c-------------------------------------------------------------------*/ int k; double x0, start, an, dummy; static double tmp[NX*2*MAXDIM+1]; int i,j,t; start = SEED; /*-------------------------------------------------------------------- c Jump to the starting element for our first plane. c-------------------------------------------------------------------*/ ipow46(A, (zstart[0]-1)*2*NX*NY + (ystart[0]-1)*2*NX, &an); dummy = randlc(&start, an); ipow46(A, 2*NX*NY, &an); /*-------------------------------------------------------------------- c Go through by z planes filling in one square at a time. c-------------------------------------------------------------------*/ for (k = 0; k < dims[0][2]; k++) { x0 = start; vranlc(2*NX*dims[0][1], &x0, A, tmp); t = 1; for (j = 0; j < dims[0][1]; j++) for (i = 0; i < NX; i++) { u0[k][j][i].real = tmp[t++]; u0[k][j][i].imag = tmp[t++]; } if (k != dims[0][2]) dummy = randlc(&start, an); } } /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ static void ipow46(double a, int exponent, double *result) { /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c compute a^exponent mod 2^46 c-------------------------------------------------------------------*/ double dummy, q, r; int n, n2; /*-------------------------------------------------------------------- c Use c a^n = a^(n/2)*a^(n/2) if n even else c a^n = a*a^(n-1) if n odd c-------------------------------------------------------------------*/ *result = 1; if (exponent == 0) return; q = a; r = 1; n = exponent; while (n > 1) { n2 = n/2; if (n2 * 2 == n) { dummy = randlc(&q, q); n = n2; } else { dummy = randlc(&r, q); n = n-1; } } dummy = randlc(&r, q); *result = r; } /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ static void setup(void) { /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ int ierr, i, j, fstatus; printf("\n\n NAS Parallel Benchmarks 2.3 OpenMP C version" " - FT Benchmark\n\n"); niter = NITER_DEFAULT; printf(" Size : %3dx%3dx%3d\n", NX, NY, NZ); printf(" Iterations : %7d\n", niter); /* 1004 format(' Number of processes : ', i7) 1005 format(' Processor array : ', i3, 'x', i3) 1006 format(' WARNING: compiled for ', i5, ' processes. ', > ' Will not verify. ')*/ for (i = 0;i < 3 ; i++) { dims[i][0] = NX; dims[i][1] = NY; dims[i][2] = NZ; } for (i = 0; i < 3; i++) { xstart[i] = 1; xend[i] = NX; ystart[i] = 1; yend[i] = NY; zstart[i] = 1; zend[i] = NZ; } /*-------------------------------------------------------------------- c Set up info for blocking of ffts and transposes. This improves c performance on cache-based systems. Blocking involves c working on a chunk of the problem at a time, taking chunks c along the first, second, or third dimension. c c - In cffts1 blocking is on 2nd dimension (with fft on 1st dim) c - In cffts2/3 blocking is on 1st dimension (with fft on 2nd and 3rd dims) c Since 1st dim is always in processor, we'll assume it's long enough c (default blocking factor is 16 so min size for 1st dim is 16) c The only case we have to worry about is cffts1 in a 2d decomposition. c so the blocking factor should not be larger than the 2nd dimension. c-------------------------------------------------------------------*/ fftblock = FFTBLOCK_DEFAULT; fftblockpad = FFTBLOCKPAD_DEFAULT; if (fftblock != FFTBLOCK_DEFAULT) fftblockpad = fftblock+3; } /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ static void compute_indexmap(int indexmap[NZ][NY][NX], int d[3]) { /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c compute function from local (i,j,k) to ibar^2+jbar^2+kbar^2 c for time evolution exponent. c-------------------------------------------------------------------*/ int i, j, k, ii, ii2, jj, ij2, kk; double ap; /*-------------------------------------------------------------------- c basically we want to convert the fortran indices c 1 2 3 4 5 6 7 8 c to c 0 1 2 3 -4 -3 -2 -1 c The following magic formula does the trick: c mod(i-1+n/2, n) - n/2 c-------------------------------------------------------------------*/ #pragma omp for for (i = 0; i < dims[2][0]; i++) { ii = (i+1+xstart[2]-2+NX/2)%NX - NX/2; ii2 = ii*ii; for (j = 0; j < dims[2][1]; j++) { jj = (j+1+ystart[2]-2+NY/2)%NY - NY/2; ij2 = jj*jj+ii2; for (k = 0; k < dims[2][2]; k++) { kk = (k+1+zstart[2]-2+NZ/2)%NZ - NZ/2; indexmap[k][j][i] = kk*kk+ij2; } } } /*-------------------------------------------------------------------- c compute array of exponentials for time evolution. c-------------------------------------------------------------------*/ #pragma omp single { ap = - 4.0 * ALPHA * PI * PI; ex[0] = 1.0; ex[1] = exp(ap); for (i = 2; i <= EXPMAX; i++) { ex[i] = ex[i-1]*ex[1]; } } /* end single */ } /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ static void print_timers(void) { /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ int i; char *tstrings[] = { " total ", " setup ", " fft ", " evolve ", " checksum ", " fftlow ", " fftcopy " }; for (i = 0; i < T_MAX; i++) { if (timer_read(i) != 0.0) { printf("timer %2d(%16s( :%10.6f\n", i, tstrings[i], timer_read(i)); } } } /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ static void fft(int dir, dcomplex x1[NZ][NY][NX], dcomplex x2[NZ][NY][NX]) { /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ dcomplex y0[NX][FFTBLOCKPAD]; dcomplex y1[NX][FFTBLOCKPAD]; /*-------------------------------------------------------------------- c note: args x1, x2 must be different arrays c note: args for cfftsx are (direction, layout, xin, xout, scratch) c xin/xout may be the same and it can be somewhat faster c if they are c-------------------------------------------------------------------*/ if (dir == 1) { cffts1(1, dims[0], x1, x1, y0, y1); /* x1 -> x1 */ cffts2(1, dims[1], x1, x1, y0, y1); /* x1 -> x1 */ cffts3(1, dims[2], x1, x2, y0, y1); /* x1 -> x2 */ } else { cffts3(-1, dims[2], x1, x1, y0, y1); /* x1 -> x1 */ cffts2(-1, dims[1], x1, x1, y0, y1); /* x1 -> x1 */ cffts1(-1, dims[0], x1, x2, y0, y1); /* x1 -> x2 */ } } /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ static void cffts1(int is, int d[3], dcomplex x[NZ][NY][NX], dcomplex xout[NZ][NY][NX], dcomplex y0[NX][FFTBLOCKPAD], dcomplex y1[NX][FFTBLOCKPAD]) { /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ int logd[3]; int i, j, k, jj; for (i = 0; i < 3; i++) { logd[i] = ilog2(d[i]); } #pragma omp for for (k = 0; k < d[2]; k++) { for (jj = 0; jj <= d[1] - fftblock; jj+=fftblock) { /* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTCOPY); */ for (j = 0; j < fftblock; j++) { for (i = 0; i < d[0]; i++) { y0[i][j].real = x[k][j+jj][i].real; y0[i][j].imag = x[k][j+jj][i].imag; } } /* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTCOPY); */ /* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTLOW); */ cfftz (is, logd[0], d[0], y0, y1); /* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTLOW); */ /* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTCOPY); */ for (j = 0; j < fftblock; j++) { for (i = 0; i < d[0]; i++) { xout[k][j+jj][i].real = y0[i][j].real; xout[k][j+jj][i].imag = y0[i][j].imag; } } /* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTCOPY); */ } } } /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ static void cffts2(int is, int d[3], dcomplex x[NZ][NY][NX], dcomplex xout[NZ][NY][NX], dcomplex y0[NX][FFTBLOCKPAD], dcomplex y1[NX][FFTBLOCKPAD]) { /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ int logd[3]; int i, j, k, ii; for (i = 0; i < 3; i++) { logd[i] = ilog2(d[i]); } #pragma omp for for (k = 0; k < d[2]; k++) { for (ii = 0; ii <= d[0] - fftblock; ii+=fftblock) { /* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTCOPY); */ for (j = 0; j < d[1]; j++) { for (i = 0; i < fftblock; i++) { y0[j][i].real = x[k][j][i+ii].real; y0[j][i].imag = x[k][j][i+ii].imag; } } /* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTCOPY); */ /* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTLOW); */ cfftz (is, logd[1], d[1], y0, y1); /* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTLOW); */ /* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTCOPY); */ for (j = 0; j < d[1]; j++) { for (i = 0; i < fftblock; i++) { xout[k][j][i+ii].real = y0[j][i].real; xout[k][j][i+ii].imag = y0[j][i].imag; } } /* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTCOPY); */ } } } /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ static void cffts3(int is, int d[3], dcomplex x[NZ][NY][NX], dcomplex xout[NZ][NY][NX], dcomplex y0[NX][FFTBLOCKPAD], dcomplex y1[NX][FFTBLOCKPAD]) { /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ int logd[3]; int i, j, k, ii; for (i = 0;i < 3; i++) { logd[i] = ilog2(d[i]); } #pragma omp for for (j = 0; j < d[1]; j++) { for (ii = 0; ii <= d[0] - fftblock; ii+=fftblock) { /* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTCOPY); */ for (k = 0; k < d[2]; k++) { for (i = 0; i < fftblock; i++) { y0[k][i].real = x[k][j][i+ii].real; y0[k][i].imag = x[k][j][i+ii].imag; } } /* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTCOPY); */ /* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTLOW); */ cfftz (is, logd[2], d[2], y0, y1); /* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTLOW); */ /* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTCOPY); */ for (k = 0; k < d[2]; k++) { for (i = 0; i < fftblock; i++) { xout[k][j][i+ii].real = y0[k][i].real; xout[k][j][i+ii].imag = y0[k][i].imag; } } /* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTCOPY); */ } } } /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ static void fft_init (int n) { /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c compute the roots-of-unity array that will be used for subsequent FFTs. c-------------------------------------------------------------------*/ int m,nu,ku,i,j,ln; double t, ti; /*-------------------------------------------------------------------- c Initialize the U array with sines and cosines in a manner that permits c stride one access at each FFT iteration. c-------------------------------------------------------------------*/ nu = n; m = ilog2(n); u[0].real = (double)m; u[0].imag = 0.0; ku = 1; ln = 1; for (j = 1; j <= m; j++) { t = PI / ln; for (i = 0; i <= ln - 1; i++) { ti = i * t; u[i+ku].real = cos(ti); u[i+ku].imag = sin(ti); } ku = ku + ln; ln = 2 * ln; } } /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ static void cfftz (int is, int m, int n, dcomplex x[NX][FFTBLOCKPAD], dcomplex y[NX][FFTBLOCKPAD]) { /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c Computes NY N-point complex-to-complex FFTs of X using an algorithm due c to Swarztrauber. X is both the input and the output array, while Y is a c scratch array. It is assumed that N = 2^M. Before calling CFFTZ to c perform FFTs, the array U must be initialized by calling CFFTZ with IS c set to 0 and M set to MX, where MX is the maximum value of M for any c subsequent call. c-------------------------------------------------------------------*/ int i,j,l,mx; /*-------------------------------------------------------------------- c Check if input parameters are invalid. c-------------------------------------------------------------------*/ mx = (int)(u[0].real); if ((is != 1 && is != -1) || m < 1 || m > mx) { printf("CFFTZ: Either U has not been initialized, or else\n" "one of the input parameters is invalid%5d%5d%5d\n", is, m, mx); exit(1); } /*-------------------------------------------------------------------- c Perform one variant of the Stockham FFT. c-------------------------------------------------------------------*/ for (l = 1; l <= m; l+=2) { fftz2 (is, l, m, n, fftblock, fftblockpad, u, x, y); if (l == m) break; fftz2 (is, l + 1, m, n, fftblock, fftblockpad, u, y, x); } /*-------------------------------------------------------------------- c Copy Y to X. c-------------------------------------------------------------------*/ if (m % 2 == 1) { for (j = 0; j < n; j++) { for (i = 0; i < fftblock; i++) { x[j][i].real = y[j][i].real; x[j][i].imag = y[j][i].imag; } } } } /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ static void fftz2 (int is, int l, int m, int n, int ny, int ny1, dcomplex u[NX], dcomplex x[NX][FFTBLOCKPAD], dcomplex y[NX][FFTBLOCKPAD]) { /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c Performs the L-th iteration of the second variant of the Stockham FFT. c-------------------------------------------------------------------*/ int k,n1,li,lj,lk,ku,i,j,i11,i12,i21,i22; dcomplex u1,x11,x21; /*-------------------------------------------------------------------- c Set initial parameters. c-------------------------------------------------------------------*/ n1 = n / 2; if (l-1 == 0) { lk = 1; } else { lk = 2 << ((l - 1)-1); } if (m-l == 0) { li = 1; } else { li = 2 << ((m - l)-1); } lj = 2 * lk; ku = li; for (i = 0; i < li; i++) { i11 = i * lk; i12 = i11 + n1; i21 = i * lj; i22 = i21 + lk; if (is >= 1) { u1.real = u[ku+i].real; u1.imag = u[ku+i].imag; } else { u1.real = u[ku+i].real; u1.imag = -u[ku+i].imag; } /*-------------------------------------------------------------------- c This loop is vectorizable. c-------------------------------------------------------------------*/ for (k = 0; k < lk; k++) { for (j = 0; j < ny; j++) { double x11real, x11imag; double x21real, x21imag; x11real = x[i11+k][j].real; x11imag = x[i11+k][j].imag; x21real = x[i12+k][j].real; x21imag = x[i12+k][j].imag; y[i21+k][j].real = x11real + x21real; y[i21+k][j].imag = x11imag + x21imag; y[i22+k][j].real = u1.real * (x11real - x21real) - u1.imag * (x11imag - x21imag); y[i22+k][j].imag = u1.real * (x11imag - x21imag) + u1.imag * (x11real - x21real); } } } } /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ static int ilog2(int n) { /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ int nn, lg; if (n == 1) { return 0; } lg = 1; nn = 2; while (nn < n) { nn = nn << 1; lg++; } return lg; } /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ static void checksum(int i, dcomplex u1[NZ][NY][NX], int d[3]) { /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ int j, q,r,s, ierr; dcomplex chk,allchk; chk.real = 0.0; chk.imag = 0.0; #pragma omp for nowait for (j = 1; j <= 1024; j++) { q = j%NX+1; if (q >= xstart[0] && q <= xend[0]) { r = (3*j)%NY+1; if (r >= ystart[0] && r <= yend[0]) { s = (5*j)%NZ+1; if (s >= zstart[0] && s <= zend[0]) { cadd(chk,chk,u1[s-zstart[0]][r-ystart[0]][q-xstart[0]]); } } } } #pragma omp critical { sums[i].real += chk.real; sums[i].imag += chk.imag; } #pragma omp barrier #pragma omp single { /* complex % real */ sums[i].real = sums[i].real/(double)(NTOTAL); sums[i].imag = sums[i].imag/(double)(NTOTAL); printf("T = %5d Checksum = %22.12e %22.12e\n", i, sums[i].real, sums[i].imag); } } /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ static void verify (int d1, int d2, int d3, int nt, boolean *verified, char *cclass) { /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ int ierr, size, i; double err, epsilon; /*-------------------------------------------------------------------- c Sample size reference checksums c-------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c Class S size reference checksums c-------------------------------------------------------------------*/ double vdata_real_s[6+1] = { 0.0, 5.546087004964e+02, 5.546385409189e+02, 5.546148406171e+02, 5.545423607415e+02, 5.544255039624e+02, 5.542683411902e+02 }; double vdata_imag_s[6+1] = { 0.0, 4.845363331978e+02, 4.865304269511e+02, 4.883910722336e+02, 4.901273169046e+02, 4.917475857993e+02, 4.932597244941e+02 }; /*-------------------------------------------------------------------- c Class W size reference checksums c-------------------------------------------------------------------*/ double vdata_real_w[6+1] = { 0.0, 5.673612178944e+02, 5.631436885271e+02, 5.594024089970e+02, 5.560698047020e+02, 5.530898991250e+02, 5.504159734538e+02 }; double vdata_imag_w[6+1] = { 0.0, 5.293246849175e+02, 5.282149986629e+02, 5.270996558037e+02, 5.260027904925e+02, 5.249400845633e+02, 5.239212247086e+02 }; /*-------------------------------------------------------------------- c Class A size reference checksums c-------------------------------------------------------------------*/ double vdata_real_a[6+1] = { 0.0, 5.046735008193e+02, 5.059412319734e+02, 5.069376896287e+02, 5.077892868474e+02, 5.085233095391e+02, 5.091487099959e+02 }; double vdata_imag_a[6+1] = { 0.0, 5.114047905510e+02, 5.098809666433e+02, 5.098144042213e+02, 5.101336130759e+02, 5.104914655194e+02, 5.107917842803e+02 }; /*-------------------------------------------------------------------- c Class B size reference checksums c-------------------------------------------------------------------*/ double vdata_real_b[20+1] = { 0.0, 5.177643571579e+02, 5.154521291263e+02, 5.146409228649e+02, 5.142378756213e+02, 5.139626667737e+02, 5.137423460082e+02, 5.135547056878e+02, 5.133910925466e+02, 5.132470705390e+02, 5.131197729984e+02, 5.130070319283e+02, 5.129070537032e+02, 5.128182883502e+02, 5.127393733383e+02, 5.126691062020e+02, 5.126064276004e+02, 5.125504076570e+02, 5.125002331720e+02, 5.124551951846e+02, 5.124146770029e+02 }; double vdata_imag_b[20+1] = { 0.0, 5.077803458597e+02, 5.088249431599e+02, 5.096208912659e+02, 5.101023387619e+02, 5.103976610617e+02, 5.105948019802e+02, 5.107404165783e+02, 5.108576573661e+02, 5.109577278523e+02, 5.110460304483e+02, 5.111252433800e+02, 5.111968077718e+02, 5.112616233064e+02, 5.113203605551e+02, 5.113735928093e+02, 5.114218460548e+02, 5.114656139760e+02, 5.115053595966e+02, 5.115415130407e+02, 5.115744692211e+02 }; /*-------------------------------------------------------------------- c Class C size reference checksums c-------------------------------------------------------------------*/ double vdata_real_c[20+1] = { 0.0, 5.195078707457e+02, 5.155422171134e+02, 5.144678022222e+02, 5.140150594328e+02, 5.137550426810e+02, 5.135811056728e+02, 5.134569343165e+02, 5.133651975661e+02, 5.132955192805e+02, 5.132410471738e+02, 5.131971141679e+02, 5.131605205716e+02, 5.131290734194e+02, 5.131012720314e+02, 5.130760908195e+02, 5.130528295923e+02, 5.130310107773e+02, 5.130103090133e+02, 5.129905029333e+02, 5.129714421109e+02 }; double vdata_imag_c[20+1] = { 0.0, 5.149019699238e+02, 5.127578201997e+02, 5.122251847514e+02, 5.121090289018e+02, 5.121143685824e+02, 5.121496764568e+02, 5.121870921893e+02, 5.122193250322e+02, 5.122454735794e+02, 5.122663649603e+02, 5.122830879827e+02, 5.122965869718e+02, 5.123075927445e+02, 5.123166486553e+02, 5.123241541685e+02, 5.123304037599e+02, 5.123356167976e+02, 5.123399592211e+02, 5.123435588985e+02, 5.123465164008e+02 }; epsilon = 1.0e-12; *verified = TRUE; *cclass = 'U'; if (d1 == 64 && d2 == 64 && d3 == 64 && nt == 6) { *cclass = 'S'; for (i = 1; i <= nt; i++) { err = (get_real(sums[i]) - vdata_real_s[i]) / vdata_real_s[i]; if (fabs(err) > epsilon) { *verified = FALSE; break; } err = (get_imag(sums[i]) - vdata_imag_s[i]) / vdata_imag_s[i]; if (fabs(err) > epsilon) { *verified = FALSE; break; } } } else if (d1 == 128 && d2 == 128 && d3 == 32 && nt == 6) { *cclass = 'W'; for (i = 1; i <= nt; i++) { err = (get_real(sums[i]) - vdata_real_w[i]) / vdata_real_w[i]; if (fabs(err) > epsilon) { *verified = FALSE; break; } err = (get_imag(sums[i]) - vdata_imag_w[i]) / vdata_imag_w[i]; if (fabs(err) > epsilon) { *verified = FALSE; break; } } } else if (d1 == 256 && d2 == 256 && d3 == 128 && nt == 6) { *cclass = 'A'; for (i = 1; i <= nt; i++) { err = (get_real(sums[i]) - vdata_real_a[i]) / vdata_real_a[i]; if (fabs(err) > epsilon) { *verified = FALSE; break; } err = (get_imag(sums[i]) - vdata_imag_a[i]) / vdata_imag_a[i]; if (fabs(err) > epsilon) { *verified = FALSE; break; } } } else if (d1 == 512 && d2 == 256 && d3 == 256 && nt == 20) { *cclass = 'B'; for (i = 1; i <= nt; i++) { err = (get_real(sums[i]) - vdata_real_b[i]) / vdata_real_b[i]; if (fabs(err) > epsilon) { *verified = FALSE; break; } err = (get_imag(sums[i]) - vdata_imag_b[i]) / vdata_imag_b[i]; if (fabs(err) > epsilon) { *verified = FALSE; break; } } } else if (d1 == 512 && d2 == 512 && d3 == 512 && nt == 20) { *cclass = 'C'; for (i = 1; i <= nt; i++) { err = (get_real(sums[i]) - vdata_real_c[i]) / vdata_real_c[i]; if (fabs(err) > epsilon) { *verified = FALSE; break; } err = (get_imag(sums[i]) - vdata_imag_c[i]) / vdata_imag_c[i]; if (fabs(err) > epsilon) { *verified = FALSE; break; } } } if (*cclass != 'U') { printf("Result verification successful\n"); } else { printf("Result verification failed\n"); } printf("cclass = %1c\n", *cclass); }
demos.h
//------------------------------------------------------------------------------ // GraphBLAS/Demo/Include/demos.h: include file for all demo programs //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ #ifndef GRAPHBLAS_DEMOS_H #define GRAPHBLAS_DEMOS_H #include "GraphBLAS.h" #include "simple_rand.h" #include "simple_timer.h" #include "usercomplex.h" #ifdef MATLAB_MEX_FILE #include "mex.h" #include "matrix.h" #define malloc mxMalloc #define free mxFree #define calloc mxCalloc #define realloc mxRealloc #endif //------------------------------------------------------------------------------ // manage compiler warnings //------------------------------------------------------------------------------ #if defined __INTEL_COMPILER #pragma warning (disable: 58 167 144 177 181 186 188 589 593 869 981 1418 1419 1572 1599 2259 2282 2557 2547 3280 ) #elif defined __GNUC__ #pragma GCC diagnostic ignored "-Wunknown-pragmas" #pragma GCC diagnostic ignored "-Wunknown-warning-option" #pragma GCC diagnostic ignored "-Wformat-truncation=" #pragma GCC diagnostic ignored "-Wunused-variable" #pragma GCC diagnostic ignored "-Wunused-result" #pragma GCC diagnostic ignored "-Wint-in-bool-context" #pragma GCC diagnostic ignored "-Wunused-parameter" #pragma GCC diagnostic ignored "-Wsign-compare" #pragma GCC diagnostic ignored "-Wtype-limits" #pragma GCC diagnostic ignored "-Wincompatible-pointer-types" // enable these warnings as errors #pragma GCC diagnostic error "-Wmisleading-indentation" #pragma GCC diagnostic error "-Wswitch-default" #endif #undef MIN #undef MAX #define MIN(a,b) (((a) < (b)) ? (a) : (b)) #define MAX(a,b) (((a) > (b)) ? (a) : (b)) GrB_Info bfs5m // BFS of a graph (using vector assign & reduce) ( GrB_Vector *v_output, // v [i] is the BFS level of node i in the graph GrB_Matrix A, // input graph, treated as if boolean in semiring GrB_Index s // starting node of the BFS ) ; GrB_Info bfs5m_check // BFS of a graph (using vector assign & reduce) ( GrB_Vector *v_output, // v [i] is the BFS level of node i in the graph GrB_Matrix A, // input graph, treated as if boolean in semiring GrB_Index s // starting node of the BFS ) ; GrB_Info bfs6 // BFS of a graph (using apply) ( GrB_Vector *v_output, // v [i] is the BFS level of node i in the graph const GrB_Matrix A, // input graph, treated as if boolean in semiring GrB_Index s // starting node of the BFS ) ; GrB_Info bfs6_check // BFS of a graph (using apply) ( GrB_Vector *v_output, // v [i] is the BFS level of node i in the graph const GrB_Matrix A, // input graph, treated as if boolean in semiring GrB_Index s // starting node of the BFS ) ; GrB_Info read_matrix // read a double-precision matrix ( GrB_Matrix *A, // handle of matrix to create FILE *f, // file to read the tuples from bool make_symmetric, // if true, return A as symmetric bool no_self_edges, // if true, then remove self edges from A bool one_based, // if true, input matrix is 1-based bool boolean, // if true, input is GrB_BOOL, otherwise GrB_FP64 bool printstuff // if true, print status to stdout ) ; GrB_Info mis // compute a maximal independent set ( GrB_Vector *iset_output, // iset(i) = true if i is in the set const GrB_Matrix A // symmetric Boolean matrix ) ; GrB_Info mis_check // compute a maximal independent set ( GrB_Vector *iset_output, // iset(i) = true if i is in the set const GrB_Matrix A // symmetric Boolean matrix ) ; void mis_score (double *result, uint32_t *degree) ; extern int32_t level ; #pragma omp threadprivate(level) void bfs_level (int32_t *result, bool *element) ; GrB_Info random_matrix // create a random double-precision matrix ( GrB_Matrix *A_output, // handle of matrix to create bool make_symmetric, // if true, return A as symmetric bool no_self_edges, // if true, then do not create self edges int64_t nrows, // number of rows int64_t ncols, // number of columns int64_t ntuples, // number of entries (x2 if made symmetric) int method, // method to use: 0:setElement, 1:build bool A_complex // if true, create a Complex matrix ) ; GrB_Info get_matrix // get a matrix from stdin, or create random one ( GrB_Matrix *A_output, // matrix to create int argc, // command-line arguments char **argv, bool no_self_edges, // if true, ensure the matrix has no self-edges bool boolean // if true, file is read as GrB_BOOL, else GrB_FP64 ) ; GrB_Info wathen // construct a random Wathen matrix ( GrB_Matrix *A_output, // output matrix int64_t nx, // grid dimension nx int64_t ny, // grid dimension ny bool scale, // if true, scale the rows int method, // 0 to 3 double *rho_given // nx-by-ny dense matrix, if NULL use random rho ) ; GrB_Info triu // C = triu (A,1) ( GrB_Matrix *C_output, // output matrix const GrB_Matrix A // input matrix, boolean or double ) ; GrB_Info tricount // count # of triangles ( int64_t *ntri, // # of triangles in the graph const int method, // 0 to 4, see above const GrB_Matrix A, // adjacency matrix for methods 0, 1, and 2 const GrB_Matrix E, // edge incidence matrix for method 0 const GrB_Matrix L, // L=tril(A) for methods 2, 4, and 4 const GrB_Matrix U, // U=triu(A) for methods 2, 3, and 5 double t [2] // t [0]: multiply time, t [1]: reduce time ) ; GrB_Info isequal_type // return GrB_SUCCESS if successful ( bool *result, // true if A == B, false if A != B or error GrB_Matrix A, GrB_Matrix B, GrB_BinaryOp op // should be GrB_EQ_<type>, for the type of A and B ) ; GrB_Info isequal // return GrB_SUCCESS if successful ( bool *result, // true if A == B, false if A != B or error GrB_Matrix A, GrB_Matrix B, GrB_BinaryOp userop // for A and B with user-defined types. ignored // if A and B are of built-in types ) ; //------------------------------------------------------------------------------ // page rank //------------------------------------------------------------------------------ // dpagerank computes an array of structs for its result typedef struct { double pagerank ; // the pagerank of a node GrB_Index page ; // the node number itself } PageRank ; // ipagerank computes an array of structs for its result typedef struct { uint64_t pagerank ; // the pagerank of a node GrB_Index page ; // the node number itself } iPageRank ; // using a standard semiring and FP64 arithmetic GrB_Info dpagerank // GrB_SUCCESS or error condition ( PageRank **Phandle, // output: pointer to array of PageRank structs GrB_Matrix A ) ; // like dpagerank but with user-defined type, operators, and semiring; // also a stopping critirion GrB_Info dpagerank2 // GrB_SUCCESS or error condition ( PageRank **Phandle, // output: pointer to array of PageRank structs GrB_Matrix A, // input graph, not modified int itermax, // max number of iterations double tol, // stop when norm (r-rnew,2) < tol int *iters, // number of iterations taken GrB_Desc_Value method // method to use for GrB_vxm (for testing only) ) ; GrB_Info drowscale // GrB_SUCCESS or error condition ( GrB_Matrix *Chandle, // output matrix C = rowscale (A) GrB_Matrix A // input matrix, not modified ) ; GrB_Info ipagerank // GrB_SUCCESS or error condition ( iPageRank **Phandle, // output: pointer to array of iPageRank structs GrB_Matrix A // input graph, not modified ) ; GrB_Info irowscale // GrB_SUCCESS or error condition ( GrB_Matrix *Chandle, // output matrix C = rowscale (A) GrB_Matrix A // input matrix, not modified ) ; // multiplicative scaling factor for ipagerank, ZSCALE = 2^30 #define ZSCALE ((uint64_t) 1073741824) //------------------------------------------------------------------------------ // import/export test //------------------------------------------------------------------------------ GrB_Info import_test (GrB_Matrix *C_handle, int format, bool dump) ; //------------------------------------------------------------------------------ // CHECK: expr must be true; if not, return an error condition //------------------------------------------------------------------------------ // the #include'ing file must define the FREE_ALL macro #define CHECK(expr,info) \ { \ if (! (expr)) \ { \ /* free the result and all workspace, and return NULL */ \ FREE_ALL ; \ printf ("Failure: line %d file %s\n", __LINE__, __FILE__) ; \ return (info) ; \ } \ } //------------------------------------------------------------------------------ // OK: call a GraphBLAS method and check the result //------------------------------------------------------------------------------ // OK(method) is a macro that calls a GraphBLAS method and checks the status; // if a failure occurs, it handles the error via the CHECK macro above, and // returns the error status to the caller. #define OK(method) \ { \ info = method ; \ if (info != GrB_SUCCESS) \ { \ printf ("GraphBLAS error:\n%s\n", GrB_error ( )) ; \ CHECK (false, info) ; \ } \ } #endif
e3.c
#include <omp.h> #include <stdio.h> #include <stdlib.h> #define N 50 int main () { int i, nthreads, tid, section; float a[N], b[N], c[N]; void print_results(float array[N], int tid, int section); /* Some initializations */ for (i=0; i<N; i++) a[i] = b[i] = i * 1.0; #pragma omp parallel shared(a, b) private(c, i,tid,section) { tid = omp_get_thread_num(); // return thread id if (tid == 0) { nthreads = omp_get_num_threads(); // returns number of threads printf("Number of threads = %d\n", nthreads); } /*** Use barriers for clean output ***/ #pragma omp barrier printf("Thread %d starting...\n",tid); #pragma omp barrier #pragma omp sections nowait { #pragma omp section { section = 1; for (i=0; i<N; i++) c[i] = a[i] * b[i]; print_results(c, tid, section); } #pragma omp section { section = 2; for (i=0; i<N; i++) c[i] = a[i] + b[i]; print_results(c, tid, section); } } /* end of sections */ /*** Use barrier for clean output ***/ #pragma omp barrier printf("Thread %d done and synchronized.\n", tid); #pragma omp barrier printf("Thread %d exiting...\n",tid); } /* end of parallel section */ return 0; } void print_results(float array[N], int tid, int section) { int i,j; j = 1; /*** use critical for clean output ***/ #pragma omp critical { printf("\nThread %d did section %d. The results are:\n", tid, section); for (i=0; i<N; i++) { printf("%e ",array[i]); j++; if (j == 6) { printf("\n"); j = 1; } } printf("\n"); } /*** end of critical ***/ }
GB_unop__identity_uint16_uint16.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__(none)) // op(A') function: GB (_unop_tran__identity_uint16_uint16) // C type: uint16_t // A type: uint16_t // cast: uint16_t cij = aij // unaryop: cij = aij #define GB_ATYPE \ uint16_t #define GB_CTYPE \ uint16_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint16_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ uint16_t z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ uint16_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ uint16_t z = aij ; \ Cx [pC] = z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_UINT16) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ #if 0 GrB_Info GB (_unop_apply__(none)) ( uint16_t *Cx, // Cx and Ax may be aliased const uint16_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { uint16_t aij = Ax [p] ; uint16_t z = aij ; Cx [p] = z ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; uint16_t aij = Ax [p] ; uint16_t z = aij ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__identity_uint16_uint16) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
binary-trees.c
// The Computer Language Benchmarks Game // http://benchmarksgame.alioth.debian.org/ // // Contributed by Jeremy Zerfas // Based on the C++ program from Jon Harrop, Alex Mizrahi, and Bruno Coutinho. // *reset* // This controls the width of lines that are output by this program. #define MAXIMUM_LINE_WIDTH 60 #include <stdint.h> #include <stdlib.h> #include <stdio.h> typedef off_t off64_t; // This is needed to keep APR happy on 32 bit systems. #include <apr_pools.h> // intptr_t should be the native integer type on most sane systems. typedef intptr_t intnative_t; typedef struct tree_node{ struct tree_node * left_Node, * right_Node; } tree_node; // Create a binary tree of depth tree_Depth in memory_Pool, set the root node's // value to root_Node_Value, and finally return a pointer to the created binary // tree. static inline tree_node * create_Tree(const intnative_t tree_Depth, apr_pool_t * const memory_Pool){ tree_node * const root_Node=apr_palloc(memory_Pool, sizeof(tree_node)); // If tree_Depth is one or more then recursively call create_Tree() in order // to create the left and right subtrees using 2*root_Node_Value-1 and // 2*root_Node_Value respectively as the root values for those subtrees. if(tree_Depth>0){ root_Node->left_Node=create_Tree(tree_Depth-1, memory_Pool); root_Node->right_Node=create_Tree(tree_Depth-1, memory_Pool); }else root_Node->left_Node=root_Node->right_Node=NULL; return root_Node; } // Compute and return the checksum for the binary tree that has root_Node as the // root node. static inline intnative_t compute_Tree_Checksum( const tree_node * const root_Node){ // If there are subtrees then recursively call compute_Tree_Checksum() on // them and factor their values into the checksum, otherwise just return // the value of root_Node. if(root_Node->left_Node) return compute_Tree_Checksum(root_Node->left_Node)+ compute_Tree_Checksum(root_Node->right_Node)+1; else return 1; } int main(int argc, char ** argv){ // Set minimum_Tree_Depth to 4 and maximum_Tree_Depth to the maximum of what // was specified as the argument to the program and minimum_Tree_Depth+2. const intnative_t minimum_Tree_Depth=4; intnative_t maximum_Tree_Depth=atoi(argv[1]); if(maximum_Tree_Depth < minimum_Tree_Depth+2) maximum_Tree_Depth=minimum_Tree_Depth+2; apr_initialize(); apr_pool_t * memory_Pool; // Create a memory pool, create a binary tree of depth maximum_Tree_Depth+1, // compute the checksum of the binary tree, print the statistics, and then // delete the memory pool. apr_pool_create_unmanaged(&memory_Pool); tree_node * stretch_Tree=create_Tree(maximum_Tree_Depth+1, memory_Pool); printf("stretch tree of depth %jd\t check: %jd\n", (intmax_t)maximum_Tree_Depth+1, (intmax_t)compute_Tree_Checksum(stretch_Tree)); apr_pool_destroy(memory_Pool); // Create a memory pool and then create a long-lived binary tree of depth // maximum_Tree_Depth which will be left alone for a while while // more binary trees get allocated and deallocaited as required by the // rules. We'll finish working with this later. apr_pool_create_unmanaged(&memory_Pool); tree_node * long_Lived_Tree=create_Tree(maximum_Tree_Depth, memory_Pool); // Create a lot of binary trees in parallel of depths ranging from // minimum_Tree_Depth to maximum_Tree_Depth, compute and tally up all their // checksums, destroy the trees, and then record the statistics to // output_Buffer[] so they can be displayed in order later. char output_Buffer[maximum_Tree_Depth+1][MAXIMUM_LINE_WIDTH+1]; intnative_t current_Tree_Depth; #pragma omp parallel for for(current_Tree_Depth=minimum_Tree_Depth; current_Tree_Depth<=maximum_Tree_Depth; current_Tree_Depth+=2){ intnative_t iterations=1<<(maximum_Tree_Depth-current_Tree_Depth+ minimum_Tree_Depth); // Create a memory pool for this thread to use. apr_pool_t * thread_Memory_Pool; apr_pool_create_unmanaged(&thread_Memory_Pool); intnative_t i=1, total_Trees_Checksum=0; for(; i<=iterations; ++i){ // Create a binary tree of depth current_Tree_Depth tree_node * const tree_1=create_Tree(current_Tree_Depth, thread_Memory_Pool); total_Trees_Checksum+=compute_Tree_Checksum(tree_1); apr_pool_clear(thread_Memory_Pool); } apr_pool_destroy(thread_Memory_Pool); // Record the statistics for the trees of depth current_Tree_Depth. sprintf(output_Buffer[current_Tree_Depth], "%jd\t trees of depth %jd\t check: %jd\n", (intmax_t)iterations, (intmax_t)current_Tree_Depth, (intmax_t)total_Trees_Checksum); } // Print the statistics for all of the various tree depths. for(current_Tree_Depth=minimum_Tree_Depth; current_Tree_Depth<=maximum_Tree_Depth; current_Tree_Depth+=2) printf("%s", output_Buffer[current_Tree_Depth]); // Compute the checksum of the long-lived binary tree that we created // earlier, print the statistics, and then delete the memory pool. printf("long lived tree of depth %jd\t check: %jd\n", (intmax_t)maximum_Tree_Depth, (intmax_t)compute_Tree_Checksum(long_Lived_Tree)); apr_pool_destroy(memory_Pool); apr_terminate(); return 0; } /* notes, command-line, and program output NOTES: 64-bit Ubuntu quad core gcc (Ubuntu 6.3.0-12ubuntu2) 6.3.0 20170406 Fri, 14 Apr 2017 17:22:32 GMT MAKE: /usr/bin/gcc -pipe -Wall -O3 -fomit-frame-pointer -march=native -fopenmp -D_FILE_OFFSET_BITS=64 -I/usr/include/apr-1.0 binarytrees.gcc-3.c -o binarytrees.gcc-3.gcc_run -lapr-1 -lgomp -lm rm binarytrees.gcc-3.c 0.50s to complete and log all make actions COMMAND LINE: ./binarytrees.gcc-3.gcc_run 21 PROGRAM OUTPUT: stretch tree of depth 22 check: 8388607 2097152 trees of depth 4 check: 65011712 524288 trees of depth 6 check: 66584576 131072 trees of depth 8 check: 66977792 32768 trees of depth 10 check: 67076096 8192 trees of depth 12 check: 67100672 2048 trees of depth 14 check: 67106816 512 trees of depth 16 check: 67108352 128 trees of depth 18 check: 67108736 32 trees of depth 20 check: 67108832 long lived tree of depth 21 check: 4194303 */
ike_fmt_plug.c
/* PSK cracker patch for JtR. Hacked together during March of 2012 by * Dhiru Kholia <dhiru.kholia at gmail.com> . * * This software is Copyright (c) 2012, Dhiru Kholia <dhiru.kholia at gmail.com> * and it is hereby released to the general public under GPL * * The IKE Scanner (ike-scan) is Copyright (C) 2003-2007 Roy Hills, * NTA Monitor Ltd. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * * In addition, as a special exception, the copyright holders give * permission to link the code of portions of this program with the * OpenSSL library, and distribute linked combinations including the two. * * You must obey the GNU General Public License in all respects * for all of the code used other than OpenSSL. If you modify * file(s) with this exception, you may extend this exception to your * version of the file(s), but you are not obligated to do so. If you * do not wish to do so, delete this exception statement from your * version. * * If this license is unacceptable to you, I may be willing to negotiate * alternative licenses (contact ike-scan@nta-monitor.com). * * You are encouraged to send comments, improvements or suggestions to * me at ike-scan@nta-monitor.com. * * psk-crack.c -- IKE Aggressive Mode Pre-Shared Key cracker for ike-scan * * Author: Roy Hills * Date: 8 July 2004 * * July, 2012, JimF small changes made, many more should be done. */ #if FMT_EXTERNS_H extern struct fmt_main fmt_ike; #elif FMT_REGISTERS_H john_register_one(&fmt_ike); #else #include <string.h> #include <assert.h> #include <errno.h> #include "arch.h" #include "misc.h" #include "common.h" #include "formats.h" #include "params.h" #include "options.h" #include "ike-crack.h" #ifdef _OPENMP #include <omp.h> #ifndef OMP_SCALE #define OMP_SCALE 16 #endif static int omp_t = 1; #endif #include "memdbg.h" #define FORMAT_LABEL "IKE" #define FORMAT_NAME "PSK" #define ALGORITHM_NAME "HMAC MD5/SHA1 32/" ARCH_BITS_STR #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH -1 #define PLAINTEXT_LENGTH 32 #define BINARY_SIZE 20 /* SHA1 */ #define BINARY_SIZE_SMALLER 16 /* MD5 */ #define SALT_SIZE sizeof(psk_entry) #define BINARY_ALIGN sizeof(ARCH_WORD_32) #define SALT_ALIGN sizeof(size_t) #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 16 static struct fmt_tests ike_tests[] = { {"$ike$*0*5c7916ddf8db4d233b3b36005bb3ccc115a73807e11a897be943fd4a2d0f942624cb00588d8b3a0a26502b73e639df217ef6c4cb90f96b0a3c3ef2f62ed025b4a705df9de65e33e380c1ba5fa23bf1f9911bbf388d0844256fa0131fc5cf8acb396936ba3295b4637b039d93f58db90a3a1cf1ef5051103bacf6e1a3334f9f89*fde8c68c5f324c7dbcbadde1d757af6962c63496c009f77cad647f2997fd4295e50821453a6dc2f6279fd7fef68768584d9cee0da6e68a534a097ce206bf77ecc798310206f3f82d92d02c885794e0a430ceb2d6b43c2aff45a6e14c6558382df0692ff65c2724eef750764ee456f31424a5ebd9e115d826bbb9722111aa4e01*b2a3c7aa4be95e85*756e3fa11c1b102c*00000001000000010000002c01010001000000240101000080010001800200018003000180040002800b0001000c000400007080*01000000ac100202*251d7ace920b17cb34f9d561bca46d037b337d19*e045819a64edbf022620bff3efdb935216584cc4*b9c594fa3fca6bb30a85c4208a8df348", "abc123"}, {"$ike$*0*9bdee7aa341cf1a6c19bc0191106b5056537ce6b837cd70678ea5a3ccb606b56dee4548feb67f24fd6f4d5f58967a9ff3c674d9d79e4195b7def5aac147c9fe9abdc2f8ba2eca58f4c863fedc7a8c8e1ad6e1551b1e44bf9a0e258561a5db1c2ca1e8b5dfda1b012012b6fdf24ecd07da6b10d76ab3b58d07b30b4f9da26aee4*c9b7ef0610a22b3e1c88b1a01ce4d4110edf6baa122ed1285eb2184cd75d30a11520a725c2d263de5a157f77f953880732f3b14521836d7f3585cb0ce3fcadf81c541dde2680bd81953cf88e8f8096c173470694ca7414fff9df0cdcdbb9d4f70ef1d6347293b507cfad965e2d2c1fa07326353e9a493d93284970040344fb11*3506592130312567*6c362583ce7a2a26*00000001000000010000002c01010001000000240101000080010001800200028003000180040002800b0001000c000400007080*01000000ac100202*84943233f42a0b5a9b33c327162fe0efee2545e4*76f451dce3fea6402b67f3fddae561ebdb4a6efe*f63f237b3c0f1fe57a5b852203cfd27cbf0c78d4", "abc123"}, {NULL} }; static psk_entry *cur_salt; static char (*saved_key)[PLAINTEXT_LENGTH + 1]; static ARCH_WORD_32 (*crypt_out)[BINARY_SIZE / sizeof(ARCH_WORD_32)]; static void init(struct fmt_main *self) { #if defined (_OPENMP) omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif saved_key = mem_calloc(sizeof(*saved_key), self->params.max_keys_per_crypt); crypt_out = mem_calloc(sizeof(*crypt_out), self->params.max_keys_per_crypt); } static void done(void) { MEM_FREE(crypt_out); MEM_FREE(saved_key); } static int valid(char *ciphertext, struct fmt_main *self) { char *ptr, *ctcopy, *keeptr; if (strncmp(ciphertext, "$ike$*", 6)) return 0; if (!(ctcopy = strdup(ciphertext))) return 0; keeptr = ctcopy; ctcopy += 6; /* skip leading '$ike$*' */ if (*ctcopy != '0' && *ctcopy != '1') goto error; /* skip '*0' */ ctcopy += 1; if (*ctcopy != '*') goto error; ctcopy += 1; if (!(ptr = strtokm(ctcopy, "*"))) goto error; if (strlen(ptr) > MAXLEN) goto error; if (!ishexlc(ptr)) goto error; if (!(ptr = strtokm(NULL, "*"))) goto error; if (strlen(ptr) > MAXLEN) goto error; if (!ishexlc(ptr)) goto error; if (!(ptr = strtokm(NULL, "*"))) goto error; if (strlen(ptr) > MAXLEN) goto error; if (!ishexlc(ptr)) goto error; if (!(ptr = strtokm(NULL, "*"))) goto error; if (strlen(ptr) > MAXLEN) goto error; if (!ishexlc(ptr)) goto error; if (!(ptr = strtokm(NULL, "*"))) goto error; if (strlen(ptr) > MAXLEN) goto error; if (!ishexlc(ptr)) goto error; if (!(ptr = strtokm(NULL, "*"))) goto error; if (strlen(ptr) > MAXLEN) goto error; if (!ishexlc(ptr)) goto error; if (!(ptr = strtokm(NULL, "*"))) goto error; if (strlen(ptr) > MAXLEN) goto error; if (!ishexlc(ptr)) goto error; if (!(ptr = strtokm(NULL, "*"))) goto error; if (strlen(ptr) > MAXLEN) goto error; if (!ishexlc(ptr)) goto error; if (!(ptr = strtokm(NULL, "*"))) goto error; if (strlen(ptr) != 32 && strlen(ptr) != 40) // md5 or sha1 length. goto error; if (!ishexlc(ptr)) goto error; MEM_FREE(keeptr); return 1; error: MEM_FREE(keeptr); return 0; } static void *get_salt(char *ciphertext) { static psk_entry cs; cs.isnortel = atoi(&ciphertext[6]); load_psk_params(&ciphertext[8], NULL, &cs); return (void *)&cs; } static void *get_binary(char *ciphertext) { static union { unsigned char c[BINARY_SIZE]; ARCH_WORD dummy; } buf; unsigned char *out = buf.c; char *p; int i; p = strrchr(ciphertext, '*') + 1; for (i = 0; i < BINARY_SIZE_SMALLER; i++) { out[i] = (atoi16[ARCH_INDEX(*p)] << 4) | atoi16[ARCH_INDEX(p[1])]; p += 2; } return out; } static int get_hash_0(int index) { return crypt_out[index][0] & PH_MASK_0; } static int get_hash_1(int index) { return crypt_out[index][0] & PH_MASK_1; } static int get_hash_2(int index) { return crypt_out[index][0] & PH_MASK_2; } static int get_hash_3(int index) { return crypt_out[index][0] & PH_MASK_3; } static int get_hash_4(int index) { return crypt_out[index][0] & PH_MASK_4; } static int get_hash_5(int index) { return crypt_out[index][0] & PH_MASK_5; } static int get_hash_6(int index) { return crypt_out[index][0] & PH_MASK_6; } static void set_salt(void *salt) { cur_salt = (psk_entry *)salt; } static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; int index = 0; #ifdef _OPENMP #pragma omp parallel for #endif for (index = 0; index < count; index++) { compute_hash(cur_salt, saved_key[index], (unsigned char*)crypt_out[index]); } return count; } static int cmp_all(void *binary, int count) { int index = 0; for (; index < count; index++) if (*((ARCH_WORD_32*)binary) == crypt_out[index][0]) return 1; return 0; } static int cmp_one(void *binary, int index) { return (*((ARCH_WORD_32*)binary) == crypt_out[index][0]); } static int cmp_exact(char *source, int index) { void *binary = get_binary(source); return !memcmp(binary, crypt_out[index], BINARY_SIZE_SMALLER); } static void ike_set_key(char *key, int index) { int saved_len = strlen(key); if (saved_len > PLAINTEXT_LENGTH) saved_len = PLAINTEXT_LENGTH; memcpy(saved_key[index], key, saved_len); saved_key[index][saved_len] = 0; } static char *get_key(int index) { return saved_key[index]; } /* * For ike, the hash algorithm used for hmac * is returned as the first "tunable cost": * 1: MD5 * 2: SHA1 * * However, the there is almost no difference in speed, * so if the different hash types for HMAC shouldn't be reported, * just define IKE_REPORT_TUNABLE_COSTS to be 0 instead of 1. */ #define IKE_REPORT_TUNABLE_COSTS 1 #if IKE_REPORT_TUNABLE_COSTS static unsigned int tunable_cost_hmac_hash_type(void *salt) { psk_entry *my_salt; my_salt = salt; return (unsigned int) my_salt->hash_type; } #endif struct fmt_main fmt_ike = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE_SMALLER, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP, { #if IKE_REPORT_TUNABLE_COSTS "hash algorithm used for hmac [1:MD5 2:SHA1]", #else NULL #endif }, ike_tests }, { init, done, fmt_default_reset, fmt_default_prepare, valid, fmt_default_split, get_binary, get_salt, { #if IKE_REPORT_TUNABLE_COSTS tunable_cost_hmac_hash_type, #else NULL #endif }, fmt_default_source, { fmt_default_binary_hash_0, fmt_default_binary_hash_1, fmt_default_binary_hash_2, fmt_default_binary_hash_3, fmt_default_binary_hash_4, fmt_default_binary_hash_5, fmt_default_binary_hash_6 }, fmt_default_salt_hash, NULL, set_salt, ike_set_key, get_key, fmt_default_clear_keys, crypt_all, { get_hash_0, get_hash_1, get_hash_2, get_hash_3, get_hash_4, get_hash_5, get_hash_6 }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
time.c
#include <stdio.h> #include <stdlib.h> #include <math.h> #include <unistd.h> #include <omp.h> // Определение функции double Func(double x) { // Недействительные значения не должны вносить вклад в интеграл if (x > 2) { return 0; } return sqrt(4 - x*x); } // Формула Котеса рассчета определенного интеграла для равномерной сетки double Integral(size_t left_index, size_t right_index, double h) { double I = (Func(right_index * h) + Func(left_index * h)) / 2; for (size_t i = left_index + 1; i < right_index; i++) { I += Func(i * h); } return I * h; } int main(int argc, char **argv) { // Количество шагов size_t N = 1000000; // Запрошенное кол-во процессов int size = 1; // Количество последовательных выполнений программы // для получения среднего времени выполнения size_t numexp = 1; if (argc > 1) { N = atoll(argv[1]); if (argc > 2) { size = atoi(argv[2]); if (argc > 3) { numexp = atoll(argv[3]); } } } // Задаем границы интегрирования double a = 0, b = 2; // Задаем мелкость разбиения отрезка double h = (b - a) / N; double result = 0.0; // Создание замка omp_lock_t lock; // Инициализация замка omp_init_lock(&lock); for (size_t i = 0; i < numexp; i++) { // Начинаем отсчет времени double start = omp_get_wtime(); // Устанавливаем требуемое кол-во процессов omp_set_num_threads(size); // Начало параллельной секции #pragma omp parallel { // Устанавливаем ранг процесса int rank = omp_get_thread_num(); // Передаем каждому процессу "свои" индексы интегрирования size_t left_index = rank * (N / size); size_t right_index = (rank != size - 1) ? (rank + 1) * (N / size) : N; // Определяем интеграл на заданном интервале double integral = Integral(left_index, right_index, h); // Заблокировать замок omp_set_lock(&lock); // Сбор значений со всех потоков result += integral; // Разблокировать замок omp_unset_lock(&lock); } // Суммирование времени работы averaged_time += (omp_get_wtime() - start); } // Удаление замка omp_destroy_lock(&lock); // Вывод кол-ва процессов и усредненного времени работы printf(" %d %lf\n", size, averaged_time / numexp); return EXIT_SUCCESS; }
Example_atomic.2.c
/* * @@name: atomic.2c * @@type: C * @@compilable: yes * @@linkable: no * @@expect: success * @@version: omp_3.1 */ int atomic_read(const int *p) { int value; /* Guarantee that the entire value of *p is read atomically. No part of * *p can change during the read operation. */ #pragma omp atomic read value = *p; return value; } void atomic_write(int *p, int value) { /* Guarantee that value is stored atomically into *p. No part of *p can change * until after the entire write operation is completed. */ #pragma omp atomic write *p = value; }
yolov2_forward_network_quantized.c
#include "additionally.h" // some definitions from: im2col.h, blas.h, list.h, utils.h, activations.h, tree.h, layer.h, network.h // softmax_layer.h, reorg_layer.h, route_layer.h, region_layer.h, maxpool_layer.h, convolutional_layer.h #define GEMMCONV //#define SSE41 //#undef AVX #define W_MAX_VAL (256/2 - 1) // 7-bit (1-bit sign) #define I_MAX_VAL (256/2 - 1) // 7-bit (1-bit sign) #define R_MAX_VAL (256*256/2 - 1) // 31-bit (1-bit sign) #define R_MULT (32) // 4 - 32 /* // from: box.h typedef struct { float x, y, w, h; } box; */ int max_abs(int src, int max_val) { if (abs(src) > abs(max_val)) src = (src > 0) ? max_val : -max_val; return src; } short int max_abs_short(short int src, short int max_val) { if (abs(src) > abs(max_val)) src = (src > 0) ? max_val : -max_val; return src; } int * get_distribution(float *arr_ptr, int arr_size, int number_of_ranges, float start_range) { //const int number_of_ranges = 32; //const float start_range = 1.F / 65536; int *count = calloc(number_of_ranges, sizeof(int)); float min_val = 10000, max_val = 0; int i, j; for (i = 0; i < arr_size; ++i) { float w = arr_ptr[i]; float cur_range = start_range; for (j = 0; j < number_of_ranges; ++j) { if (fabs(cur_range) <= w && w < fabs(cur_range * 2)) count[j]++;// , printf("found \n"); cur_range *= 2; //printf("%f, ", w); } } return count; } float get_multiplier(float *arr_ptr, int arr_size, int bits_length) { const int number_of_ranges = 32; const float start_range = 1.F / 65536; int i, j; int *count = get_distribution(arr_ptr, arr_size, number_of_ranges, start_range); int max_count_range = 0; int index_max_count = 0; for (j = 0; j < number_of_ranges; ++j) { int counter = 0; for (i = j; i < (j + bits_length) && i < number_of_ranges; ++i) { counter += count[i]; //counter += log2(count[i]); } if (max_count_range < counter) { max_count_range = counter; index_max_count = j; } } //index_max_count = index_max_count + 2; // optimal shift multipler float multiplier = 1 / (start_range * powf(2., (float)index_max_count)); //printf(" max_count_range = %d, index_max_count = %d, multiplier = %g \n", // max_count_range, index_max_count, multiplier); free(count); return multiplier; } #ifdef OPENCV #include <opencv2/core/fast_math.hpp> #include "opencv2/highgui/highgui_c.h" #include "opencv2/core/core_c.h" #include "opencv2/core/version.hpp" #define CV_RGB(r, g, b) cvScalar( (b), (g), (r), 0 ) void draw_distribution(float *arr_ptr, int arr_size, char *name) { int img_w = 1200, img_h = 800; const int number_of_ranges = 32; const float start_range = 1.F / 65536; //int *count = calloc(number_of_ranges, sizeof(int)); //float min_val = 100, max_val = 0; int i, j; int *count = get_distribution(arr_ptr, arr_size, number_of_ranges, start_range); float multiplier = get_multiplier(arr_ptr, arr_size, 8); int max_count_range = 0; for (j = 0; j < number_of_ranges; ++j) { count[j] = log2(count[j]); if (max_count_range < count[j]) max_count_range = count[j]; } cvNamedWindow("Distribution", CV_WINDOW_NORMAL); cvResizeWindow("Distribution", img_w, img_h); IplImage *img = cvCreateImage(cvSize(img_w, img_h), IPL_DEPTH_8U, 3); if (max_count_range > 0) { for (j = 0; j < number_of_ranges; ++j) { //printf("count[j] = %d, max_count_range = %d, img_w = %d, img_h = %d, j = %d, number_of_ranges = %d \n", // count[j], max_count_range, img_w, img_h, j, number_of_ranges); CvPoint pt1, pt2; pt1.x = j*img_w / number_of_ranges; pt2.x = (j + 1)*img_w / number_of_ranges; pt1.y = img_h; pt2.y = img_h - img_h*count[j] / max_count_range; //printf("pt1.x = %d, pt1.y = %d, pt2.x = %d, pt2.y = %d \n", pt1.x, pt1.y, pt2.x, pt2.y); //if(pt2.y < pt1.y) cvRectangle(img, pt1, pt2, CV_RGB(128, 64, 32), CV_FILLED, 8, 0); cvRectangle(img, pt1, pt2, CV_RGB(32, 32, 32), 1, 8, 0); } } int index_multiplier = log2(1 / (multiplier*start_range)); int x_coord_multiplier = index_multiplier*img_w / number_of_ranges; cvLine(img, cvPoint(x_coord_multiplier, 0), cvPoint(x_coord_multiplier, img_h), CV_RGB(255, 32, 32), 1, 8, 0); char buff[256]; //sprintf(buff, "[%g - %g]", min_val, max_val); sprintf(buff, "optimal multiplier = %g", multiplier); //printf("[%g - %g]", min_val, max_val); CvFont font; cvInitFont(&font, CV_FONT_HERSHEY_COMPLEX, 1, 1, 0, 2, 8); cvPutText(img, buff, cvPoint(100, 50), &font, CV_RGB(32, 64, 128)); if (name) cvPutText(img, name, cvPoint(0, 20), &font, CV_RGB(32, 64, 128)); float cur_range = start_range; cvInitFont(&font, CV_FONT_HERSHEY_COMPLEX, 0.5, 0.5, 0, 1, 8); for (j = 0; j < number_of_ranges; ++j) { CvPoint pt_text = cvPoint(j*img_w / number_of_ranges, img_h - 50); int lg = log2(cur_range); sprintf(buff, "%d", lg); cvPutText(img, buff, pt_text, &font, CV_RGB(32, 64, 128)); cur_range *= 2; } cvPutText(img, "X and Y are log2", cvPoint(img_w / 2 - 100, img_h - 10), &font, CV_RGB(32, 64, 128)); cvShowImage("Distribution", img); cvWaitKey(0); free(count); } #endif // OPENCV // im2col.c int8_t im2col_get_pixel_int8(int8_t *im, int height, int width, int channels, int row, int col, int channel, int pad) { row -= pad; col -= pad; if (row < 0 || col < 0 || row >= height || col >= width) return 0; return im[col + width*(row + height*channel)]; } // im2col.c //From Berkeley Vision's Caffe! //https://github.com/BVLC/caffe/blob/master/LICENSE void im2col_cpu_int8(int8_t* data_im, int channels, int height, int width, int ksize, int stride, int pad, int8_t* data_col) { int c, h, w; int height_col = (height + 2 * pad - ksize) / stride + 1; int width_col = (width + 2 * pad - ksize) / stride + 1; int channels_col = channels * ksize * ksize; for (c = 0; c < channels_col; ++c) { int w_offset = c % ksize; int h_offset = (c / ksize) % ksize; int c_im = c / ksize / ksize; for (h = 0; h < height_col; ++h) { for (w = 0; w < width_col; ++w) { int im_row = h_offset + h * stride; int im_col = w_offset + w * stride; int col_index = (c * height_col + h) * width_col + w; data_col[col_index] = im2col_get_pixel_int8(data_im, height, width, channels, im_row, im_col, c_im, pad); } } } } // Use to enable AVX or SSE41 //#define AVX // 1.35 sec (0.8 FPS) 2.3x - GCC -mavx -mavx2 -mfma -ffp-contract=fast //#define SSE41 // 1.55 sec (0.7 FPS) 2x // default 3.10 sec (0.3 FPS) #if defined(AVX) || defined(SSE41) #ifdef _WIN64 #include <intrin.h> #else #include <x86intrin.h> #endif #include <ammintrin.h> #include <immintrin.h> #include <smmintrin.h> #include <emmintrin.h> // https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=broad&expand=561 #endif // AVX or SSE41 #if defined(AVX) __m256i _mm256_div_epi16(const __m256i va, const int b) { __m256i vb = _mm256_set1_epi16(32768 / b); return _mm256_mulhrs_epi16(va, vb); } #define INTERMEDIATE_MULT 15 // 8 or 15 #define FINAL_MULT (R_MULT / INTERMEDIATE_MULT) // 0.89 sec void gemm_nn_int8_int16_conv16(int M, int N, int K, int8_t ALPHA, int8_t *A, int lda, int8_t *B, int ldb, int16_t *C, int ldc) { __m256i res; __m256i a, b, d; __m128i tmp128; __m256i div256 = _mm256_set1_epi16(INTERMEDIATE_MULT); int16_t *c_tmp = calloc(N, sizeof(int16_t)); int i, j, k; for (i = 0; i < M; ++i) { for (k = 0; k < K; ++k) { register int16_t A_PART = ALPHA*A[i*lda + k]; a = _mm256_set1_epi16(A_PART); for (j = 0; j < N - 32; j += 32) { int index = k*ldb + j; d = _mm256_loadu_si256((__m256i*)&B[index]); tmp128 = _mm256_extractf128_si256(d, 0);// get low 128 bit b = _mm256_cvtepi8_epi16(tmp128); // int8 -> int16 b = _mm256_mullo_epi16(a, b); // B = A * B b = _mm256_div_epi16(b, INTERMEDIATE_MULT); // B = (A * B) / INTERMEDIATE_MULL res = _mm256_loadu_si256(&c_tmp[j]); // load temp C res = _mm256_add_epi16(b, res); // (A*B) + C _mm256_storeu_si256(&c_tmp[j], res); // store temp C tmp128 = _mm256_extractf128_si256(d, 1);// get high 128 bit b = _mm256_cvtepi8_epi16(tmp128); // int8 -> int16 (for low 8 bytes) b = _mm256_mullo_epi16(a, b); // B = A * B b = _mm256_div_epi16(b, INTERMEDIATE_MULT); // B = (A * B) / INTERMEDIATE_MULL res = _mm256_loadu_si256(&c_tmp[j + 16]); // Load next temp C res = _mm256_add_epi16(b, res); // (A*B) + C _mm256_storeu_si256(&c_tmp[j + 16], res); // store temp C //c_tmp[j] += A_PART*B[k*ldb + j]; //C[i*ldc + j] += max_abs(A_PART*B[k*ldb + j] / (INTERMEDIATE_MULL), (256 * 128 - 1)); } int prev_end = (N % 32 == 0) ? (N - 32) : (N / 32) * 32; for (j = prev_end; j < N; ++j) { c_tmp[j] += A_PART*B[k*ldb + j] / (INTERMEDIATE_MULT); } } for (j = 0; j < N; ++j) { C[i*ldc + j] += (c_tmp[j] / FINAL_MULT); c_tmp[j] = 0; } } free(c_tmp); } // 1.15 sec void gemm_nn_int8_int16(int M, int N, int K, int8_t ALPHA, int8_t *A, int lda, int8_t *B, int ldb, int16_t *C, int ldc) { __m256i multyplied_i32, res; __m256i a, b, d; __m128i tmp128; int32_t *c_tmp = calloc(N, sizeof(int32_t)); int i, j, k; for (i = 0; i < M; ++i) { for (k = 0; k < K; ++k) { register int16_t A_PART = ALPHA*A[i*lda + k]; a = _mm256_set1_epi16(A_PART); for (j = 0; j < N - 32; j += 32) { int index = k*ldb + j; d = _mm256_loadu_si256((__m256i*)&B[index]); tmp128 = _mm256_extractf128_si256(d, 0);// get low 128 bit b = _mm256_cvtepi8_epi16(tmp128); // int8 -> int16 b = _mm256_mullo_epi16(a, b); // B = A * B tmp128 = _mm256_extractf128_si256(b, 0); // get low 128 bit multyplied_i32 = _mm256_cvtepi16_epi32(tmp128); // int16 -> int32 res = _mm256_loadu_si256(&c_tmp[j]); // load temp C res = _mm256_add_epi32(multyplied_i32, res);// (A*B) + C _mm256_storeu_si256(&c_tmp[j], res); // store temp C tmp128 = _mm256_extractf128_si256(b, 1); // get high 128 bit multyplied_i32 = _mm256_cvtepi16_epi32(tmp128); // int16 -> int32 res = _mm256_loadu_si256(&c_tmp[j + 8]); // Load next temp C res = _mm256_add_epi32(multyplied_i32, res);// (A*B) + C _mm256_storeu_si256(&c_tmp[j + 8], res); // store temp C tmp128 = _mm256_extractf128_si256(d, 1);// get high 128 bit b = _mm256_cvtepi8_epi16(tmp128); // int8 -> int16 (for low 8 bytes) b = _mm256_mullo_epi16(a, b); // B = A * B tmp128 = _mm256_extractf128_si256(b, 0); // get low 128 bit multyplied_i32 = _mm256_cvtepi16_epi32(tmp128); // int16 -> int32 res = _mm256_loadu_si256(&c_tmp[j + 16]); // Load next temp C res = _mm256_add_epi32(multyplied_i32, res);// (A*B) + C _mm256_storeu_si256(&c_tmp[j + 16], res); // store temp C tmp128 = _mm256_extractf128_si256(b, 1); // get high 128 bit multyplied_i32 = _mm256_cvtepi16_epi32(tmp128); // int16 -> int32 res = _mm256_loadu_si256(&c_tmp[j + 24]); // Load next temp C res = _mm256_add_epi32(multyplied_i32, res);// (A*B) + C _mm256_storeu_si256(&c_tmp[j + 24], res); // store temp C //c_tmp[j] += A_PART*B[k*ldb + j]; //C[i*ldc + j] += max_abs(A_PART*B[k*ldb + j] / (32), (256 * 128 - 1)); } int prev_end = (N % 32 == 0) ? (N - 32) : (N / 32) * 32; for (j = prev_end; j < N; ++j) { c_tmp[j] += A_PART*B[k*ldb + j]; } } for (j = 0; j < N; ++j) { C[i*ldc + j] += max_abs(c_tmp[j] / (R_MULT), (256 * 128 - 1)); c_tmp[j] = 0; } //for (j = 0; j < N; ++j) C[i*ldc + j] += c_tmp[j] / (R_MULT); } free(c_tmp); } #elif defined(SSE41) // 1.3 sec void gemm_nn_int8_int16(int M, int N, int K, int8_t ALPHA, int8_t *A, int lda, int8_t *B, int ldb, int16_t *C, int ldc) { __m128i multyplied_i32, res; __m128i a, b, d; //c = _mm_set1_epi16(32); int32_t *c_tmp = calloc(N, sizeof(int32_t)); int i, j, k; for (i = 0; i < M; ++i) { for (k = 0; k < K; ++k) { register int16_t A_PART = ALPHA*A[i*lda + k]; a = _mm_set1_epi16(A_PART); for (j = 0; j < N - 16; j += 16) { int index = k*ldb + j; d = _mm_loadu_si128((__m128i*)&B[index]); b = _mm_cvtepi8_epi16(d); // int8 -> int16 b = _mm_mullo_epi16(a, b); // B = A * B multyplied_i32 = _mm_cvtepi16_epi32(b); // int16 -> int32 res = _mm_loadu_si128(&c_tmp[j]); // load temp C res = _mm_add_epi32(multyplied_i32, res);// (A*B) + C _mm_store_si128(&c_tmp[j], res); // store temp C b = _mm_srli_si128(b, 8); // Shift Right -> 8 bytes multyplied_i32 = _mm_cvtepi16_epi32(b); // int16 -> int32 res = _mm_loadu_si128(&c_tmp[j + 4]); // Load next temp C res = _mm_add_epi32(multyplied_i32, res);// (A*B) + C _mm_store_si128(&c_tmp[j + 4], res); // store temp C d = _mm_srli_si128(d, 8); // Shift Right -> 8 bytes b = _mm_cvtepi8_epi16(d); // int8 -> int16 (for low 8 bytes) b = _mm_mullo_epi16(a, b); // B = A * B multyplied_i32 = _mm_cvtepi16_epi32(b); // int16 -> int32 res = _mm_loadu_si128(&c_tmp[j + 8]); // Load next temp C res = _mm_add_epi32(multyplied_i32, res);// (A*B) + C _mm_store_si128(&c_tmp[j + 8], res); // store temp C b = _mm_srli_si128(b, 8); // Shift Right -> 8 bytes multyplied_i32 = _mm_cvtepi16_epi32(b); // int16 -> int32 res = _mm_loadu_si128(&c_tmp[j + 12]); // Load next temp C res = _mm_add_epi32(multyplied_i32, res);// (A*B) + C _mm_store_si128(&c_tmp[j + 12], res); // store temp C //c_tmp[j] += A_PART*B[k*ldb + j]; //C[i*ldc + j] += max_abs(A_PART*B[k*ldb + j] / (32), (256 * 128 - 1)); } int prev_end = (N % 16 == 0) ? (N - 16) : (N / 16) * 16; for (j = prev_end; j < N; ++j) { c_tmp[j] += A_PART*B[k*ldb + j]; } } for (j = 0; j < N; ++j) { C[i*ldc + j] += max_abs(c_tmp[j] / (R_MULT), (256 * 128 - 1)); c_tmp[j] = 0; } //for (j = 0; j < N; ++j) C[i*ldc + j] += c_tmp[j] / (R_MULT); } free(c_tmp); } void gemm_nn_int8_int16_conv16(int M, int N, int K, int8_t ALPHA, int8_t *A, int lda, int8_t *B, int ldb, int16_t *C, int ldc) { printf(" gemm_nn_int8_int16_conv16() isn't implemented for SSE4.1 \n"); } #else // 2.9 sec void gemm_nn_int8_int16(int M, int N, int K, int8_t ALPHA, int8_t *A, int lda, int8_t *B, int ldb, int16_t *C, int ldc) { int32_t *c_tmp = calloc(N, sizeof(int32_t)); int i, j, k; for (i = 0; i < M; ++i) { for (k = 0; k < K; ++k) { register int16_t A_PART = ALPHA*A[i*lda + k]; //#pragma simd parallel for for (j = 0; j < N; ++j) { c_tmp[j] += A_PART*B[k*ldb + j]; //C[i*ldc + j] += max_abs(A_PART*B[k*ldb + j] / (R_MULT), (256 * 128 - 1)); } } for (j = 0; j < N; ++j) { C[i*ldc + j] += max_abs(c_tmp[j] / (R_MULT), (256 * 128 - 1)); c_tmp[j] = 0; } } free(c_tmp); } void gemm_nn_int8_int32(int M, int N, int K, int8_t ALPHA, int8_t *A, int lda, int8_t *B, int ldb, int32_t *C, int ldc) { int32_t *c_tmp = calloc(N, sizeof(int32_t)); int i, j, k; for (i = 0; i < M; ++i) { for (k = 0; k < K; ++k) { register int16_t A_PART = ALPHA*A[i*lda + k]; //#pragma simd parallel for for (j = 0; j < N; ++j) { c_tmp[j] += A_PART*B[k*ldb + j]; //C[i*ldc + j] += max_abs(A_PART*B[k*ldb + j] / (R_MULT), (256 * 128 - 1)); } } for (j = 0; j < N; ++j) { C[i*ldc + j] += max_abs(c_tmp[j] / (R_MULT), (256 * 128 - 1)); c_tmp[j] = 0; } } free(c_tmp); } void gemm_nn_int8_int16_conv16(int M, int N, int K, int8_t ALPHA, int8_t *A, int lda, int8_t *B, int ldb, int16_t *C, int ldc) { printf(" gemm_nn_int8_int16_conv16() isn't implemented \n"); } #endif // SSE41 or AVX void forward_convolutional_layer_q(layer l, network_state state) { int out_h = (l.h + 2 * l.pad - l.size) / l.stride + 1; // output_height=input_height for stride=1 and pad=1 int out_w = (l.w + 2 * l.pad - l.size) / l.stride + 1; // output_width=input_width for stride=1 and pad=1 int i, f, j; int const out_size = out_h*out_w; size_t const weights_size = l.size*l.size*l.c*l.n; // fill zero (ALPHA) //for (i = 0; i < l.outputs; ++i) l.output[i] = 0; // l.n - number of filters on this layer // l.c - channels of input-array // l.h - height of input-array // l.w - width of input-array // l.size - width and height of filters (the same size for all filters) //draw_distribution(l.weights, weights_size, "weights"); //draw_distribution(state.input, l.inputs, "input"); //typedef int32_t conv_t; // l.output typedef int16_t conv_t; // l.output conv_t *output_q = calloc(l.outputs, sizeof(conv_t)); state.input_int8 = (int *)calloc(l.inputs, sizeof(int)); int z; for (z = 0; z < l.inputs; ++z) { //int16_t src = lround(state.input[k] * net.layers[0].input_quant_multipler); int16_t src = state.input[z] * l.input_quant_multipler; state.input_int8[z] = max_abs(src, I_MAX_VAL); } //////////////////////////////////// // cudnnConvolutionBiasActivationForward() // y = act ( alpha1 * conv(x) + alpha2 * z + bias ) // int8 = activation( float * conv(int8) + float * int8 + float ) // int8 = activation( conv(input_int8) + bias_float ) // X_INT8x4 or X_INT8 // https://docs.nvidia.com/deeplearning/sdk/cudnn-developer-guide/index.html#cudnnConvolutionBiasActivationForward /////////////////////////////////// // 1. Convolution !!! int fil; // cuDNN: y = conv(x) int m = l.n; int k = l.size*l.size*l.c; int n = out_h*out_w; int8_t *a = l.weights_int8; int8_t *b = (int8_t *)state.workspace; conv_t *c = output_q; // int16_t // convolution as GEMM (as part of BLAS) //for (i = 0; i < l.batch; ++i) { im2col_cpu_int8(state.input_int8, l.c, l.h, l.w, l.size, l.stride, l.pad, b); // here //gemm_nn_int8_int16(m, n, k, 1, a, k, b, n, c, n); // single-thread gemm int t; // multi-thread gemm #pragma omp parallel for for (t = 0; t < m; ++t) { gemm_nn_int8_int16(1, n, k, 1, a + t*k, k, b, n, c + t*n, n); //gemm_nn_int8_int16_conv16(1, n, k, 1, a + t*k, k, b, n, c + t*n, n); //gemm_nn_int8_int32(1, n, k, 1, a + t*k, k, b, n, c + t*n, n); // conv_t should be int32_t } //} free(state.input_int8); float ALPHA1 = R_MULT / (l.input_quant_multipler * l.weights_quant_multipler); // cuDNN: y = alpha1 * conv(x) for (i = 0; i < l.outputs; ++i) { l.output[i] = output_q[i] * ALPHA1; // cuDNN: alpha1 } //for (fil = 0; fil < l.n; ++fil) { // for (j = 0; j < out_size; ++j) { // l.output[fil*out_size + j] = l.output[fil*out_size + j] * ALPHA1; // } //} // cuDNN: y = alpha1 * conv(x) + bias for (fil = 0; fil < l.n; ++fil) { for (j = 0; j < out_size; ++j) { l.output[fil*out_size + j] += l.biases[fil]; } } //draw_distribution(l.output, l.outputs, "output"); // cuDNN: y = act ( alpha1 * conv(x) + bias ) // bias is always FLOAT if (l.activation == LEAKY) { for (i = 0; i < l.n*out_size; ++i) { l.output[i] = (l.output[i]>0) ? l.output[i] : l.output[i] / 10; //leaky_activate(l.output[i]); } } free(output_q); } // 4 layers in 1: convolution, batch-normalization, BIAS and activation void forward_convolutional_layer_q_old(layer l, network_state state, int return_float) { int out_h = (l.h + 2 * l.pad - l.size) / l.stride + 1; // output_height=input_height for stride=1 and pad=1 int out_w = (l.w + 2 * l.pad - l.size) / l.stride + 1; // output_width=input_width for stride=1 and pad=1 int i, f, j; int const out_size = out_h*out_w; size_t const weights_size = l.size*l.size*l.c*l.n; // fill zero (ALPHA) //for (i = 0; i < l.outputs; ++i) l.output[i] = 0; // l.n - number of filters on this layer // l.c - channels of input-array // l.h - height of input-array // l.w - width of input-array // l.size - width and height of filters (the same size for all filters) //draw_distribution(l.weights, weights_size, NULL); //draw_distribution(state.input, l.inputs, NULL); typedef int16_t conv_t; // l.output conv_t *output_q = calloc(l.outputs, sizeof(conv_t)); //////////////////////////////////// // cudnnConvolutionBiasActivationForward() // y = act ( alpha1 * conv(x) + alpha2 * z + bias ) // int8 = activation( float * conv(int8) + float * int8 + float ) // int8 = activation( conv(input_int8) + bias_float ) // X_INT8x4 or X_INT8 // https://docs.nvidia.com/deeplearning/sdk/cudnn-developer-guide/index.html#cudnnConvolutionBiasActivationForward /////////////////////////////////// // 1. Convolution !!! #ifndef GEMMCONV int fil; // filter index #pragma omp parallel for // "omp parallel for" - automatic parallelization of loop by using OpenMP for (fil = 0; fil < l.n; ++fil) { int chan, y, x, f_y, f_x; // channel index for (chan = 0; chan < l.c; ++chan) // input - y for (y = 0; y < l.h; ++y) // input - x for (x = 0; x < l.w; ++x) { int const output_index = fil*l.w*l.h + y*l.w + x; int const weights_pre_index = fil*l.c*l.size*l.size + chan*l.size*l.size; int const input_pre_index = chan*l.w*l.h; //float sum = 0; //int16_t sum = 0; int32_t sum = 0; //conv_t sum = 0; // filter - y for (f_y = 0; f_y < l.size; ++f_y) { int input_y = y + f_y - l.pad; // filter - x for (f_x = 0; f_x < l.size; ++f_x) { int input_x = x + f_x - l.pad; if (input_y < 0 || input_x < 0 || input_y >= l.h || input_x >= l.w) continue; int input_index = input_pre_index + input_y*l.w + input_x; int weights_index = weights_pre_index + f_y*l.size + f_x; //sum += state.input[input_index] * l.weights[weights_index]; // int16 += int8 * int8; sum += (int32_t)state.input_int8[input_index] * (int32_t)l.weights_int8[weights_index]; } } // l.output[filters][width][height] += // state.input[channels][width][height] * // l.weights[filters][channels][filter_width][filter_height]; //output_q[output_index] += max_abs(sum, R_MAX_VAL); output_q[output_index] += max_abs(sum / R_MULT, R_MAX_VAL); //output_q[output_index] += sum / R_MULT; //if (fabs(output_q[output_index]) > 65535) printf(" fabs(output_q[output_index]) > 65535 \n"); } } #else int fil; // cuDNN: y = conv(x) int m = l.n; int k = l.size*l.size*l.c; int n = out_h*out_w; int8_t *a = l.weights_int8; int8_t *b = (int8_t *)state.workspace; conv_t *c = output_q; // int16_t // convolution as GEMM (as part of BLAS) //for (i = 0; i < l.batch; ++i) { im2col_cpu_int8(state.input_int8, l.c, l.h, l.w, l.size, l.stride, l.pad, b); // here //gemm_nn_int8_int16(m, n, k, 1, a, k, b, n, c, n); // single-thread gemm int t; // multi-thread gemm #pragma omp parallel for for (t = 0; t < m; ++t) { gemm_nn_int8_int16(1, n, k, 1, a + t*k, k, b, n, c + t*n, n); //gemm_nn_int8_int16_conv16(1, n, k, 1, a + t*k, k, b, n, c + t*n, n); //gemm_nn_int8_int32(1, n, k, 1, a + t*k, k, b, n, c + t*n, n); conv_t should be int32_t } //} #endif // cuDNN: y = alpha1 * conv(x) //for (i = 0; i < l.outputs; ++i) { // output_q[i] = output_q[i] * l.output_multipler; // cuDNN: alpha1 //} for (fil = 0; fil < l.n; ++fil) { for (j = 0; j < out_size; ++j) { output_q[fil*out_size + j] = output_q[fil*out_size + j] * l.output_multipler; } } // cuDNN: y = alpha1 * conv(x) + bias for (fil = 0; fil < l.n; ++fil) { for (j = 0; j < out_size; ++j) { output_q[fil*out_size + j] += l.biases_quant[fil]; } } //for (i = 0; i < l.inputs; ++i) state.input[i] = state.input_int8[i]; //char buff[1024]; //sprintf(buff, "inputs - filters %d", l.n); //draw_distribution(state.input, l.inputs, buff); //for (i = 0; i < l.outputs; ++i) l.output[i] = (float)output_q[i]; //draw_distribution(l.output, l.outputs, "output"); // cuDNN: y = act ( alpha1 * conv(x) + bias ) // bias is always FLOAT if (l.activation == LEAKY) { for (i = 0; i < l.n*out_size; ++i) { output_q[i] = (output_q[i]>0) ? output_q[i] : output_q[i] / 10; //leaky_activate(l.output[i]); } } // cuDNN: y = act ( alpha1 * conv(x) + alpha2 * z + bias ), where: alpha2=0, z=NULL if (return_float) { // y - FLOAT, x,w - X_INT8 / X_INT8x4 for (i = 0; i < l.outputs; ++i) { l.output[i] = (float)output_q[i] / 16.F; // /8 // float32 // 15.769 } } else { // y - X_INT8 / X_INT8x4, x,w - X_INT8 / X_INT8x4 for (i = 0; i < l.outputs; ++i) { l.output_int8[i] = max_abs(output_q[i], I_MAX_VAL); // int8 } } free(output_q); } #define MIN_INT8 -128 // MAX pooling layer void forward_maxpool_layer_q(const layer l, network_state state) { int b, i, j, k, m, n; int w_offset = -l.pad; int h_offset = -l.pad; int h = l.out_h; int w = l.out_w; int c = l.c; // batch index for (b = 0; b < l.batch; ++b) { // channel index for (k = 0; k < c; ++k) { // y - input for (i = 0; i < h; ++i) { // x - input for (j = 0; j < w; ++j) { int out_index = j + w*(i + h*(k + c*b)); int8_t max = MIN_INT8; int max_i = -1; // pooling x-index for (n = 0; n < l.size; ++n) { // pooling y-index for (m = 0; m < l.size; ++m) { int cur_h = h_offset + i*l.stride + n; int cur_w = w_offset + j*l.stride + m; int index = cur_w + l.w*(cur_h + l.h*(k + b*l.c)); int valid = (cur_h >= 0 && cur_h < l.h && cur_w >= 0 && cur_w < l.w); int8_t val = (valid != 0) ? state.input_int8[index] : MIN_INT8; max_i = (val > max) ? index : max_i; // get max index max = (val > max) ? val : max; // get max value } } //l.output[out_index] = max; // store max value l.output_int8[out_index] = max; // store max value l.indexes[out_index] = max_i; // store max index } } } } } // Route layer - just copy 1 or more layers into the current layer void forward_route_layer_q(const layer l, network_state state) { int i, j; int offset = 0; // number of merged layers for (i = 0; i < l.n; ++i) { int index = l.input_layers[i]; // source layer index //float *input = state.net.layers[index].output; // source layer output ptr int8_t *input = state.net.layers[index].output_int8; // source layer output ptr int input_size = l.input_sizes[i]; // source layer size // batch index for (j = 0; j < l.batch; ++j) { memcpy(l.output_int8 + offset + j*l.outputs, input + j*input_size, input_size * sizeof(int8_t)); } offset += input_size; } } // Reorg layer - just change dimension sizes of the previous layer (some dimension sizes are increased by decreasing other) void forward_reorg_layer_q(const layer l, network_state state) { //float *out = l.output; //float *x = state.input; int8_t *out = l.output_int8; int8_t *x = state.input_int8; int out_w = l.out_w; int out_h = l.out_h; int out_c = l.out_c; int batch = l.batch; int stride = l.stride; int b, i, j, k; int in_c = out_c / (stride*stride); int out_w_X_stride = out_w*stride; int out_h_X_stride = out_h*stride; //printf("\n out_c = %d, out_w = %d, out_h = %d, stride = %d, forward = %d \n", out_c, out_w, out_h, stride, forward); //printf(" in_c = %d, in_w = %d, in_h = %d \n", in_c, out_w*stride, out_h*stride); // batch for (b = 0; b < batch; ++b) { // channel for (k = 0; k < out_c; ++k) { int c2 = k % in_c; int pre_out_index = out_h_X_stride*(c2 + in_c*b); int offset = k / in_c; int offset_mod_stride = offset % stride; int offset_div_stride = offset / stride; // y for (j = 0; j < out_h; ++j) { int pre_in_index = out_w*(j + out_h*(k + out_c*b)); // x for (i = 0; i < out_w; ++i) { int in_index = i + pre_in_index; int w2 = i*stride + offset_mod_stride; int h2 = j*stride + offset_div_stride; int out_index = w2 + out_w_X_stride*(h2 + pre_out_index); out[in_index] = x[out_index]; } } } } } // ---- region layer ---- static void softmax_q(float *input, int n, float temp, float *output) { int i; float sum = 0; float largest = -FLT_MAX; for (i = 0; i < n; ++i) { if (input[i] > largest) largest = input[i]; } for (i = 0; i < n; ++i) { float e = expf(input[i] / temp - largest / temp); sum += e; output[i] = e; } for (i = 0; i < n; ++i) { output[i] /= sum; } } static void softmax_tree(float *input, int batch, int inputs, float temp, tree *hierarchy, float *output) { int b; for (b = 0; b < batch; ++b) { int i; int count = 0; for (i = 0; i < hierarchy->groups; ++i) { int group_size = hierarchy->group_size[i]; softmax_q(input + b*inputs + count, group_size, temp, output + b*inputs + count); count += group_size; } } } // --- // Region layer - just change places of array items, then do logistic_activate and softmax void forward_region_layer_q(const layer l, network_state state) { int i, b; int size = l.coords + l.classes + 1; // 4 Coords(x,y,w,h) + Classes + 1 Probability-t0 //printf("\n l.coords = %d \n", l.coords); memcpy(l.output, state.input, l.outputs*l.batch * sizeof(float)); //flatten(l.output, l.w*l.h, size*l.n, l.batch, 1); // convert many channels to the one channel (depth=1) // (each grid cell will have a number of float-variables equal = to the initial number of channels) { float *x = l.output; int layer_size = l.w*l.h; // W x H - size of layer int layers = size*l.n; // number of channels (where l.n = number of anchors) int batch = l.batch; float *swap = calloc(layer_size*layers*batch, sizeof(float)); int i, c, b; // batch index for (b = 0; b < batch; ++b) { // channel index for (c = 0; c < layers; ++c) { // layer grid index for (i = 0; i < layer_size; ++i) { int i1 = b*layers*layer_size + c*layer_size + i; int i2 = b*layers*layer_size + i*layers + c; swap[i2] = x[i1]; } } } memcpy(x, swap, layer_size*layers*batch * sizeof(float)); free(swap); } // logistic activation only for: t0 (where is t0 = Probability * IoU(box, object)) for (b = 0; b < l.batch; ++b) { // for each item (x, y, anchor-index) for (i = 0; i < l.h*l.w*l.n; ++i) { int index = size*i + b*l.outputs; float x = l.output[index + 4]; l.output[index + 4] = 1.0F / (1.0F + expf(-x)); // logistic_activate_q(l.output[index + 4]); } } if (l.softmax_tree) { // Yolo 9000 for (b = 0; b < l.batch; ++b) { for (i = 0; i < l.h*l.w*l.n; ++i) { int index = size*i + b*l.outputs; softmax_tree(l.output + index + 5, 1, 0, 1, l.softmax_tree, l.output + index + 5); } } } else if (l.softmax) { // Yolo v2 // softmax activation only for Classes probability for (b = 0; b < l.batch; ++b) { // for each item (x, y, anchor-index) //#pragma omp parallel for for (i = 0; i < l.h*l.w*l.n; ++i) { int index = size*i + b*l.outputs; softmax_q(l.output + index + 5, l.classes, 1, l.output + index + 5); } } } } void yolov2_forward_network_q(network net, network_state state) { printf("im in yolov2_fowrad_network_q\n"); state.workspace = net.workspace; int i, k; for (i = 0; i < net.n; ++i) { state.index = i; layer l = net.layers[i]; if (l.type == CONVOLUTIONAL) { if (i >= 1 && l.activation != LINEAR) forward_convolutional_layer_q(l, state); else forward_convolutional_layer_cpu(l, state); printf("\n %d - CONVOLUTIONAL \t\t l.size = %d \n", i, l.size); } else if (l.type == MAXPOOL) { forward_maxpool_layer_cpu(l, state); //printf("\n MAXPOOL \t\t l.size = %d \n", l.size); } else if (l.type == ROUTE) { forward_route_layer_cpu(l, state); //printf("\n ROUTE \t\t\t l.n = %d \n", l.n); } else if (l.type == REORG) { forward_reorg_layer_cpu(l, state); //printf("\n REORG \n"); } else if (l.type == UPSAMPLE) { forward_upsample_layer_cpu(l, state); //printf("\n UPSAMPLE \n"); } else if (l.type == SHORTCUT) { forward_shortcut_layer_cpu(l, state); //printf("\n SHORTCUT \n"); } else if (l.type == YOLO) { forward_yolo_layer_cpu(l, state); //printf("\n YOLO \n"); } else if (l.type == REGION) { forward_region_layer_cpu(l, state); //printf("\n REGION \n"); } else { printf("\n layer: %d \n", l.type); } state.input = l.output; //state.input_int8 = l.output_int8; /* if (i == 0) { //draw_distribution(state.input, l.outputs, NULL); int k; for (k = 0; k < l.out_w*l.out_h*l.out_c; ++k) { int16_t src = state.input[k] * 3.88677;// *net.layers[2].input_quant_multipler; state.input_int8[k] = max_abs(src, I_MAX_VAL); //printf(" %d, ", src); } } */ } } void yolov2_forward_network_q_old(network net, network_state state) { state.workspace = net.workspace; int i, k; for (i = 0; i < net.n; ++i) { state.index = i; layer l = net.layers[i]; if (l.type == CONVOLUTIONAL) { int return_float = (net.layers[i+1].activation == LINEAR); // if next layer has LINEAR activation if (i >= 1 && l.activation != LINEAR) forward_convolutional_layer_q_old(l, state, return_float); else forward_convolutional_layer_cpu(l, state); printf("\n %d - CONVOLUTIONAL \t\t l.size = %d \n", i, l.size); } else if (l.type == MAXPOOL) { forward_maxpool_layer_q(l, state); //printf("\n MAXPOOL \t\t l.size = %d \n", l.size); } else if (l.type == ROUTE) { forward_route_layer_q(l, state); //printf("\n ROUTE \t\t\t l.n = %d \n", l.n); } else if (l.type == REORG) { forward_reorg_layer_q(l, state); //printf("\n REORG \n"); } /* else if (l.type == UPSAMPLE) { forward_upsample_layer_cpu(l, state); //printf("\n UPSAMPLE \n"); } else if (l.type == SHORTCUT) { forward_shortcut_layer_cpu(l, state); //printf("\n SHORTCUT \n"); } else if (l.type == YOLO) { forward_yolo_layer_cpu(l, state); //printf("\n YOLO \n"); } */ else if (l.type == REGION) { forward_region_layer_q(l, state); //printf("\n REGION \n"); } else { printf("\n layer: %d \n", l.type); } state.input = l.output; state.input_int8 = l.output_int8; if (i == 0) { //draw_distribution(state.input, l.outputs, NULL); int k; for (k = 0; k < l.out_w*l.out_h*l.out_c; ++k) { int16_t src = state.input[k] * 3.88677;// *net.layers[2].input_quant_multipler; state.input_int8[k] = max_abs(src, I_MAX_VAL); //printf(" %d, ", src); } } } } // detect on CPU float *network_predict_quantized(network net, float *input) { network_state state; state.net = net; state.index = 0; state.input = input; //state.input_int8 = calloc(net.w*net.h*net.c, sizeof(int8_t)); state.truth = 0; state.train = 0; state.delta = 0; /*/ int k; for (k = 0; k < net.w*net.h*net.c; ++k) { //int16_t src = lround(state.input[k] * net.layers[0].input_quant_multipler); int16_t src = state.input[k] * net.layers[0].input_quant_multipler; state.input_int8[k] = max_abs(src, I_MAX_VAL); } */ yolov2_forward_network_q(net, state); // network on CPU //float *out = get_network_output(net); int i; for (i = net.n - 1; i > 0; --i) if (net.layers[i].type != COST) break; //free(state.input_int8); return net.layers[i].output; } // detect on CPU float *network_predict_quantized_old(network net, float *input) { network_state state; state.net = net; state.index = 0; state.input = input; state.input_int8 = calloc(net.w*net.h*net.c, sizeof(int8_t)); state.truth = 0; state.train = 0; state.delta = 0; int k; for (k = 0; k < net.w*net.h*net.c; ++k) { //int16_t src = lround(state.input[k] * net.layers[0].input_quant_multipler); int16_t src = state.input[k] * net.layers[0].input_quant_multipler; state.input_int8[k] = max_abs(src, I_MAX_VAL); } yolov2_forward_network_q_old(net, state); // network on CPU //float *out = get_network_output(net); int i; for (i = net.n - 1; i > 0; --i) if (net.layers[i].type != COST) break; free(state.input_int8); return net.layers[i].output; } // -------------------- // x - last conv-layer output // biases - anchors from cfg-file // n - number of anchors from cfg-file box get_region_box_q(float *x, float *biases, int n, int index, int i, int j, int w, int h) { box b; b.x = (i + logistic_activate(x[index + 0])) / w; // (col + 1./(1. + exp(-x))) / width_last_layer b.y = (j + logistic_activate(x[index + 1])) / h; // (row + 1./(1. + exp(-x))) / height_last_layer b.w = expf(x[index + 2]) * biases[2 * n] / w; // exp(x) * anchor_w / width_last_layer b.h = expf(x[index + 3]) * biases[2 * n + 1] / h; // exp(x) * anchor_h / height_last_layer return b; } // get prediction boxes void get_region_boxes_q(layer l, int w, int h, float thresh, float **probs, box *boxes, int only_objectness, int *map) { int i, j, n; float *predictions = l.output; // grid index for (i = 0; i < l.w*l.h; ++i) { int row = i / l.w; int col = i % l.w; // anchor index for (n = 0; n < l.n; ++n) { int index = i*l.n + n; // index for each grid-cell & anchor int p_index = index * (l.classes + 5) + 4; float scale = predictions[p_index]; // scale = t0 = Probability * IoU(box, object) if (l.classfix == -1 && scale < .5) scale = 0; // if(t0 < 0.5) t0 = 0; int box_index = index * (l.classes + 5); boxes[index] = get_region_box_q(predictions, l.biases, n, box_index, col, row, l.w, l.h); boxes[index].x *= w; boxes[index].y *= h; boxes[index].w *= w; boxes[index].h *= h; int class_index = index * (l.classes + 5) + 5; // Yolo 9000 or Yolo v2 if (l.softmax_tree) { // Yolo 9000 hierarchy_predictions(predictions + class_index, l.classes, l.softmax_tree, 0); int found = 0; if (map) { for (j = 0; j < 200; ++j) { float prob = scale*predictions[class_index + map[j]]; probs[index][j] = (prob > thresh) ? prob : 0; } } else { for (j = l.classes - 1; j >= 0; --j) { if (!found && predictions[class_index + j] > .5) { found = 1; } else { predictions[class_index + j] = 0; } float prob = predictions[class_index + j]; probs[index][j] = (scale > thresh) ? prob : 0; } } } else { // Yolo v2 for (j = 0; j < l.classes; ++j) { float prob = scale*predictions[class_index + j]; // prob = IoU(box, object) = t0 * class-probability probs[index][j] = (prob > thresh) ? prob : 0; // if (IoU < threshold) IoU = 0; } } if (only_objectness) { probs[index][0] = scale; } } } } float entropy_calibration(float *src_arr, const size_t size, const float bin_width, const int max_bin) { //const float bin_width = 1.0 / 4096;// 1.0F / 64.0F; //const int max_bin = 2048*2;// 2048; const int max_global_val = max_bin * bin_width; // 1024 // 32 float *m_array = (float*)calloc(max_bin, sizeof(float)); float *H_histogram = (float*)calloc(max_bin, sizeof(float)); float *P_array = (float*)calloc(max_bin, sizeof(float)); float *Q_array = (float*)calloc(max_bin, sizeof(float)); float *quant_Q_array = (float*)calloc(128, sizeof(float)); // 128 for INT8 uint64_t *quant_Q_array_count = (uint64_t*)calloc(128, sizeof(uint64_t)); // 128 for INT8 int i, j; { //uint64_t outliers = 0; const int last_bin = max_bin - 1; for (j = 0; j <= last_bin; ++j) P_array[j] = 0; for (j = 0; j < size; ++j) { int bin_num = lround(fabs(src_arr[j]) / bin_width); int bin_num_saturated = (bin_num >= last_bin) ? last_bin : bin_num; H_histogram[bin_num_saturated]++; //if (bin_num > last_bin) outliers++; //else H_histogram[bin_num]++; } } for (i = 128; i < max_bin; ++i) { // [1/64; 1024] // [1/64; 32] //if (i > max_bin) printf(" i > max_bin = %d, ", i); //printf(" %d \r", i); // calculate bin histogram uint64_t outliers = 0; const int last_bin = i - 1; for (j = 0; j <= last_bin; ++j) P_array[j] = 0; /*for (j = 0; j < size; ++j) { int bin_num = lround(fabs(src_arr[j]) / bin_width); //int bin_num_saturated = (bin_num >= last_bin) ? last_bin : bin_num; if (bin_num > last_bin) outliers++; else P_array[bin_num]++; }*/ for (j = 0; j < max_bin; ++j) { if (j <= last_bin) P_array[j] = H_histogram[j]; else outliers += H_histogram[j]; } // quantinization P-i-bins to Q-128-bins const float quant_expand_width = i / 128.0F; for (j = 0; j < 128; ++j) quant_Q_array[j] = 0, quant_Q_array_count[j] = 0; for (j = 0; j < i; ++j) { int quant_bin = lround(j / quant_expand_width); if (quant_bin > 127) quant_bin = 127; // printf(" quant_bin > 127 = %d \n", quant_bin); quant_Q_array[quant_bin] += P_array[j]; if (P_array[j] != 0) quant_Q_array_count[quant_bin]++; } // expand 128-bins to i-bins for (j = 0; j < i; ++j) Q_array[j] = 0; for (j = 0; j < i; ++j) { int quant_bin = lround(j / quant_expand_width); if (quant_bin > 127) quant_bin = 127;// printf(" quant_bin > 127 = %d \n", quant_bin); //Q_array[j] = llround(quant_Q_array[quant_bin] / quant_expand_width); if (P_array[j] != 0) // preserve empty bins from original P Q_array[j] = quant_Q_array[quant_bin] / quant_Q_array_count[quant_bin]; //printf(" quant_bin = %d, Q[j] = %f = q_Q %f / q_w %f, P = %f \n", quant_bin, Q_array[j], quant_Q_array[quant_bin], quant_expand_width, P_array[j]); } P_array[last_bin] += outliers; // saturation // P /= SUM(P); Q /= SUM(Q); float sum_P = 0, sum_Q = 0, quant_sum_Q = 0; for (j = 0; j < 128; ++j) quant_sum_Q += quant_Q_array[j]; for (j = 0; j < i; ++j) { sum_P += P_array[j]; sum_Q += Q_array[j]; //printf(" P_array = %f, Q_array = %f \n", P_array[j], Q_array[j]); } for (j = 0; j < i; ++j) { P_array[j] /= sum_P; Q_array[j] /= sum_Q; } // KL_divergence(P, Q); for (j = 0; j < i; ++j) { m_array[i] += P_array[j] * (log((P_array[j] + FLT_MIN) / (Q_array[j] + FLT_MIN))); //printf(" p = %f, q = %f, p/q = %f, log(p/q) = %f, m = %f \n", P_array[j], Q_array[j], P_array[j] / Q_array[j], log((P_array[j] + FLT_MIN) / (Q_array[j] + FLT_MIN)), m_array[i]); } //printf("\n i = %d, size = %zu, sum_P = %f, sum_Q = %f, q_sum_Q = %f, q_e_width = %f, m = %f \n", i, size, sum_P, sum_Q, quant_sum_Q, quant_expand_width, m_array[i]); //getchar(); } float m_index = 128, min_m = FLT_MAX; for (i = 128; i < max_bin; ++i) { if (m_array[i] < min_m) { min_m = m_array[i]; m_index = i; } } float threshold = (m_index + 0.5) * bin_width; float multiplier = 127 / threshold; printf(" mult = %g, threshold = %g, min_m = %g, m_index = %g \n", multiplier, threshold, min_m, m_index); free(H_histogram); free(P_array); free(Q_array); free(quant_Q_array); free(quant_Q_array_count); free(m_array); //getchar(); return multiplier; } // Quantinization and get multiplers for convolutional weights for quantinization void quantinization_and_get_multipliers(network net) { // ----------- entropy_calibration(,, 1.0 / 16, 4096); - FULL ---------------------- //float input_mult[] = { 256, 4,32,64,32,32,32,32,32,64,64,64,64,64,128,64,128,128,64,128,64,128,128 }; // divided 4 - full works int counter = 0; //const int input_mult_size = sizeof(input_mult) / sizeof(float); int j; for (j = 0; j < net.n; ++j) { layer *l = &net.layers[j]; if (l->type == CONVOLUTIONAL) { size_t const weights_size = l->size*l->size*l->c*l->n; size_t const filter_size = l->size*l->size*l->c; int i, k, fil; // get optimal multipliers - for Weights //float *weights_multiplier = (float *)calloc(l->n, sizeof(float)); //l->output_multipler = (float *)calloc(l->n, sizeof(float)); //float weights_multiplier_single = entropy_calibration(l->weights, weights_size, 1.0 / (2048), (2048)); //float weights_multiplier_single = entropy_calibration(l->weights, weights_size, 1.0 / 4096, 4096) / 2; //if (j == 0) weights_multiplier_single = entropy_calibration(l->weights, weights_size, 1.0 / 2, 2048); float old_weight_mult = get_multiplier(l->weights, weights_size, 8) / 4; // good [2 - 8], best 4 float weights_multiplier_single = old_weight_mult; //float old_weight_mult = get_multiplier(l->weights, weights_size, 7) / 4; printf(" old_weight_mult = %f, weights_multiplier_single = %f \n\n", old_weight_mult, weights_multiplier_single); //weights_multiplier_single = old_weight_mult; l->weights_quant_multipler = weights_multiplier_single; for (fil = 0; fil < l->n; ++fil) { for (i = 0; i < filter_size; ++i) { float w = l->weights[fil*filter_size + i] * l->weights_quant_multipler;// [fil]; l->weights_int8[fil*filter_size + i] = max_abs(w, W_MAX_VAL); //l->weights_int8[fil*filter_size + i] = max_abs(lround(w), W_MAX_VAL); } } if (counter >= net.input_calibration_size) { printf("\n Warning: input_calibration= in the cfg-file has less values %d than convolutional layers %d \n", net.input_calibration_size, counter); } //l->input_quant_multipler = 40;//(counter < net.input_calibration_size) ? net.input_calibration[counter] : 16; // best 40 l->input_quant_multipler = (counter < net.input_calibration_size) ? net.input_calibration[counter] : 40; ++counter; //float current_input_mult = 40;//(counter < net.input_calibration_size) ? net.input_calibration[counter] : 16; float current_input_mult = (counter < net.input_calibration_size) ? net.input_calibration[counter] : 40; for (fil = 0; fil < l->n; ++fil) { if (counter == 1) l->output_multipler = current_input_mult / (l->weights_quant_multipler * l->input_quant_multipler / R_MULT); if (counter == 2) l->output_multipler = current_input_mult / (l->weights_quant_multipler * l->input_quant_multipler / R_MULT); else if (counter >= 2) l->output_multipler = current_input_mult / (l->weights_quant_multipler * l->input_quant_multipler / R_MULT); } // quantinization Biases for (fil = 0; fil < l->n; ++fil) { // calculate optimal multipliers - for Biases float biases_multipler = (l->output_multipler * l->weights_quant_multipler * l->input_quant_multipler / R_MULT); l->biases_quant[fil] = l->biases[fil] * biases_multipler; } printf(" Multiplers: weights %g, input %g, output %g \n", l->weights_quant_multipler, l->input_quant_multipler, l->output_multipler); } else { printf(" Skip layer: %d \n", l->type); } } #ifdef GPU // init weights and cuDNN for quantized IINT8x4 init_gpu_int8x4(net); #endif //GPU }
csr_matop.c
/****************************************************************************** * Copyright 1998-2019 Lawrence Livermore National Security, LLC and other * HYPRE Project Developers. See the top-level COPYRIGHT file for details. * * SPDX-License-Identifier: (Apache-2.0 OR MIT) ******************************************************************************/ /****************************************************************************** * * Matrix operation functions for hypre_CSRMatrix class. * *****************************************************************************/ #include "seq_mv.h" #include "csr_matrix.h" /*-------------------------------------------------------------------------- * hypre_CSRMatrixAddFirstPass: * * Performs the first pass needed for Matrix/Matrix addition (C = A + B). * This function: * 1) Computes the row pointer of the resulting matrix C_i * 2) Allocates memory for the matrix C and returns it to the user * * Notes: 1) It can be used safely inside OpenMP parallel regions. * 2) firstrow, lastrow and marker are private variables. * 3) The remaining arguments are shared variables. * 4) twspace (thread workspace) must be allocated outside the * parallel region. * 5) The mapping arrays map_A2C and map_B2C are used when adding * off-diagonal matrices. They can be set to NULL pointer when * adding diagonal matrices. * 6) Assumes that the elements of C_i are initialized to zero. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_CSRMatrixAddFirstPass( HYPRE_Int firstrow, HYPRE_Int lastrow, HYPRE_Int *twspace, HYPRE_Int *marker, HYPRE_Int *map_A2C, HYPRE_Int *map_B2C, hypre_CSRMatrix *A, hypre_CSRMatrix *B, HYPRE_Int nrows_C, HYPRE_Int nnzrows_C, HYPRE_Int ncols_C, HYPRE_Int *rownnz_C, HYPRE_MemoryLocation memory_location_C, HYPRE_Int *C_i, hypre_CSRMatrix **C_ptr ) { HYPRE_Int *A_i = hypre_CSRMatrixI(A); HYPRE_Int *A_j = hypre_CSRMatrixJ(A); HYPRE_Int *B_i = hypre_CSRMatrixI(B); HYPRE_Int *B_j = hypre_CSRMatrixJ(B); HYPRE_Int i, ia, ib, ic, iic, ii, i1; HYPRE_Int jcol, jj; HYPRE_Int num_threads = hypre_NumActiveThreads(); HYPRE_Int num_nonzeros; /* Initialize marker array */ for (i = 0; i < ncols_C; i++) { marker[i] = -1; } ii = hypre_GetThreadNum(); num_nonzeros = 0; for (ic = firstrow; ic < lastrow; ic++) { iic = rownnz_C ? rownnz_C[ic] : ic; if (map_A2C) { for (ia = A_i[iic]; ia < A_i[iic+1]; ia++) { jcol = map_A2C[A_j[ia]]; marker[jcol] = iic; num_nonzeros++; } } else { for (ia = A_i[iic]; ia < A_i[iic+1]; ia++) { jcol = A_j[ia]; marker[jcol] = iic; num_nonzeros++; } } if (map_B2C) { for (ib = B_i[iic]; ib < B_i[iic+1]; ib++) { jcol = map_B2C[B_j[ib]]; if (marker[jcol] != iic) { marker[jcol] = iic; num_nonzeros++; } } } else { for (ib = B_i[iic]; ib < B_i[iic+1]; ib++) { jcol = B_j[ib]; if (marker[jcol] != iic) { marker[jcol] = iic; num_nonzeros++; } } } C_i[iic+1] = num_nonzeros; } twspace[ii] = num_nonzeros; #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif /* Correct C_i - phase 1 */ if (ii) { jj = twspace[0]; for (i1 = 1; i1 < ii; i1++) { jj += twspace[i1]; } for (ic = firstrow; ic < lastrow; ic++) { iic = rownnz_C ? rownnz_C[ic] : ic; C_i[iic+1] += jj; } } else { num_nonzeros = 0; for (i1 = 0; i1 < num_threads; i1++) { num_nonzeros += twspace[i1]; } *C_ptr = hypre_CSRMatrixCreate(nrows_C, ncols_C, num_nonzeros); hypre_CSRMatrixI(*C_ptr) = C_i; hypre_CSRMatrixRownnz(*C_ptr) = rownnz_C; hypre_CSRMatrixNumRownnz(*C_ptr) = nnzrows_C; hypre_CSRMatrixInitialize_v2(*C_ptr, 0, memory_location_C); } /* Correct C_i - phase 2 */ if (rownnz_C != NULL) { #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif for (ic = firstrow; ic < (lastrow-1); ic++) { for (iic = rownnz_C[ic] + 1; iic < rownnz_C[ic+1]; iic++) { hypre_assert(C_i[iic+1] == 0); C_i[iic+1] = C_i[rownnz_C[ic]+1]; } } if (ii < (num_threads - 1)) { for (iic = rownnz_C[lastrow-1] + 1; iic < rownnz_C[lastrow]; iic++) { hypre_assert(C_i[iic+1] == 0); C_i[iic+1] = C_i[rownnz_C[lastrow-1]+1]; } } else { for (iic = rownnz_C[lastrow-1] + 1; iic < nrows_C; iic++) { hypre_assert(C_i[iic+1] == 0); C_i[iic+1] = C_i[rownnz_C[lastrow-1]+1]; } } } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif #ifdef HYPRE_DEBUG if (!ii) { for (i = 0; i < nrows_C; i++) { hypre_assert(C_i[i] <= C_i[i+1]); hypre_assert(((A_i[i+1] - A_i[i]) + (B_i[i+1] - B_i[i])) >= (C_i[i+1] - C_i[i])); hypre_assert((C_i[i+1] - C_i[i]) >= (A_i[i+1] - A_i[i])); hypre_assert((C_i[i+1] - C_i[i]) >= (B_i[i+1] - B_i[i])); } hypre_assert((C_i[nrows_C] - C_i[0]) == num_nonzeros); } #endif return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_CSRMatrixAddSecondPass: * * Performs the second pass needed for Matrix/Matrix addition (C = A + B). * This function computes C_j and C_data. * * Notes: see notes for hypre_CSRMatrixAddFirstPass *--------------------------------------------------------------------------*/ HYPRE_Int hypre_CSRMatrixAddSecondPass( HYPRE_Int firstrow, HYPRE_Int lastrow, HYPRE_Int *twspace, HYPRE_Int *marker, HYPRE_Int *map_A2C, HYPRE_Int *map_B2C, HYPRE_Int *rownnz_C, HYPRE_Complex alpha, HYPRE_Complex beta, hypre_CSRMatrix *A, hypre_CSRMatrix *B, hypre_CSRMatrix *C ) { HYPRE_Int *A_i = hypre_CSRMatrixI(A); HYPRE_Int *A_j = hypre_CSRMatrixJ(A); HYPRE_Complex *A_data = hypre_CSRMatrixData(A); HYPRE_Int nnzs_A = hypre_CSRMatrixNumNonzeros(A); HYPRE_Int *B_i = hypre_CSRMatrixI(B); HYPRE_Int *B_j = hypre_CSRMatrixJ(B); HYPRE_Complex *B_data = hypre_CSRMatrixData(B); HYPRE_Int nnzs_B = hypre_CSRMatrixNumNonzeros(A); HYPRE_Int *C_i = hypre_CSRMatrixI(C); HYPRE_Int *C_j = hypre_CSRMatrixJ(C); HYPRE_Complex *C_data = hypre_CSRMatrixData(C); HYPRE_Int ncols_C = hypre_CSRMatrixNumCols(C); HYPRE_Int ia, ib, ic, iic; HYPRE_Int jcol, pos; hypre_assert(( map_A2C && map_B2C) || (!map_A2C && !map_B2C) || ( map_A2C && (nnzs_B == 0)) || ( map_B2C && (nnzs_A == 0))); /* Initialize marker vector */ for (ia = 0; ia < ncols_C; ia++) { marker[ia] = -1; } pos = C_i[rownnz_C ? rownnz_C[firstrow] : firstrow]; if ((map_A2C && map_B2C) || ( map_A2C && (nnzs_B == 0)) || ( map_B2C && (nnzs_A == 0))) { for (ic = firstrow; ic < lastrow; ic++) { iic = rownnz_C ? rownnz_C[ic] : ic; for (ia = A_i[iic]; ia < A_i[iic+1]; ia++) { jcol = map_A2C[A_j[ia]]; C_j[pos] = jcol; C_data[pos] = alpha*A_data[ia]; marker[jcol] = pos; pos++; } for (ib = B_i[iic]; ib < B_i[iic+1]; ib++) { jcol = map_B2C[B_j[ib]]; if (marker[jcol] < C_i[iic]) { C_j[pos] = jcol; C_data[pos] = beta*B_data[ib]; marker[jcol] = pos; pos++; } else { hypre_assert(C_j[marker[jcol]] == jcol); C_data[marker[jcol]] += beta*B_data[ib]; } } hypre_assert(pos == C_i[iic+1]); } /* end for loop */ } else { for (ic = firstrow; ic < lastrow; ic++) { iic = rownnz_C ? rownnz_C[ic] : ic; for (ia = A_i[iic]; ia < A_i[iic+1]; ia++) { jcol = A_j[ia]; C_j[pos] = jcol; C_data[pos] = alpha*A_data[ia]; marker[jcol] = pos; pos++; } for (ib = B_i[iic]; ib < B_i[iic+1]; ib++) { jcol = B_j[ib]; if (marker[jcol] < C_i[iic]) { C_j[pos] = jcol; C_data[pos] = beta*B_data[ib]; marker[jcol] = pos; pos++; } else { hypre_assert(C_j[marker[jcol]] == jcol); C_data[marker[jcol]] += beta*B_data[ib]; } } hypre_assert(pos == C_i[iic+1]); } /* end for loop */ } hypre_assert(pos == C_i[rownnz_C ? rownnz_C[lastrow-1] + 1 : lastrow]); return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_CSRMatrixAdd: * * Adds two CSR Matrices A and B and returns a CSR Matrix C; * * Note: The routine does not check for 0-elements which might be generated * through cancellation of elements in A and B or already contained * in A and B. To remove those, use hypre_CSRMatrixDeleteZeros * * This function is ready to compute C = alpha*A + beta*B if needed. *--------------------------------------------------------------------------*/ hypre_CSRMatrix* hypre_CSRMatrixAddHost ( hypre_CSRMatrix *A, hypre_CSRMatrix *B ) { /* CSRMatrix A */ HYPRE_Int *rownnz_A = hypre_CSRMatrixRownnz(A); HYPRE_Int nrows_A = hypre_CSRMatrixNumRows(A); HYPRE_Int nnzrows_A = hypre_CSRMatrixNumRownnz(A); HYPRE_Int ncols_A = hypre_CSRMatrixNumCols(A); /* CSRMatrix B */ HYPRE_Int *rownnz_B = hypre_CSRMatrixRownnz(B); HYPRE_Int nrows_B = hypre_CSRMatrixNumRows(B); HYPRE_Int nnzrows_B = hypre_CSRMatrixNumRownnz(B); HYPRE_Int ncols_B = hypre_CSRMatrixNumCols(B); /* CSRMatrix C */ hypre_CSRMatrix *C; HYPRE_Int *C_i; HYPRE_Int *rownnz_C; HYPRE_Int nnzrows_C; HYPRE_Int *twspace; HYPRE_Complex alpha = 1.0; HYPRE_Complex beta = 1.0; HYPRE_MemoryLocation memory_location_A = hypre_CSRMatrixMemoryLocation(A); HYPRE_MemoryLocation memory_location_B = hypre_CSRMatrixMemoryLocation(B); /* RL: TODO cannot guarantee, maybe should never assert hypre_assert(memory_location_A == memory_location_B); */ /* RL: in the case of A=H, B=D, or A=D, B=H, let C = D, * not sure if this is the right thing to do. * Also, need something like this in other places * TODO */ HYPRE_MemoryLocation memory_location_C = hypre_max(memory_location_A, memory_location_B); if (nrows_A != nrows_B || ncols_A != ncols_B) { hypre_error_w_msg(HYPRE_ERROR_GENERIC,"Warning! incompatible matrix dimensions!\n"); return NULL; } /* Allocate memory */ twspace = hypre_TAlloc(HYPRE_Int, hypre_NumThreads(), HYPRE_MEMORY_HOST); C_i = hypre_CTAlloc(HYPRE_Int, nrows_A + 1, memory_location_C); /* Set nonzero rows data of diag_C */ nnzrows_C = nrows_A; if ((nnzrows_A < nrows_A) && (nnzrows_B < nrows_B)) { hypre_MergeOrderedArrays(nnzrows_A, rownnz_A, nnzrows_B, rownnz_B, &nnzrows_C, &rownnz_C); } else { rownnz_C = NULL; } #ifdef HYPRE_USING_OPENMP #pragma omp parallel #endif { HYPRE_Int ns, ne; HYPRE_Int *marker = NULL; hypre_partition1D(nnzrows_C, hypre_NumActiveThreads(), hypre_GetThreadNum(), &ns, &ne); marker = hypre_CTAlloc(HYPRE_Int, ncols_A, HYPRE_MEMORY_HOST); hypre_CSRMatrixAddFirstPass(ns, ne, twspace, marker, NULL, NULL, A, B, nrows_A, nnzrows_C, ncols_A, rownnz_C, memory_location_C, C_i, &C); hypre_CSRMatrixAddSecondPass(ns, ne, twspace, marker, NULL, NULL, rownnz_C, alpha, beta, A, B, C); hypre_TFree(marker, HYPRE_MEMORY_HOST); } /* end of parallel region */ /* Free memory */ hypre_TFree(twspace, HYPRE_MEMORY_HOST); return C; } hypre_CSRMatrix* hypre_CSRMatrixAdd( hypre_CSRMatrix *A, hypre_CSRMatrix *B) { hypre_CSRMatrix *C = NULL; #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy2( hypre_CSRMatrixMemoryLocation(A), hypre_CSRMatrixMemoryLocation(B) ); if (exec == HYPRE_EXEC_DEVICE) { C = hypre_CSRMatrixAddDevice(A, B); } else #endif { C = hypre_CSRMatrixAddHost(A, B); } return C; } /*-------------------------------------------------------------------------- * hypre_CSRMatrixBigAdd: * * Adds two CSR Matrices A and B with column indices stored as HYPRE_BigInt * and returns a CSR Matrix C; * * Note: The routine does not check for 0-elements which might be generated * through cancellation of elements in A and B or already contained * in A and B. To remove those, use hypre_CSRMatrixDeleteZeros *--------------------------------------------------------------------------*/ hypre_CSRMatrix * hypre_CSRMatrixBigAdd( hypre_CSRMatrix *A, hypre_CSRMatrix *B ) { HYPRE_Complex *A_data = hypre_CSRMatrixData(A); HYPRE_Int *A_i = hypre_CSRMatrixI(A); HYPRE_BigInt *A_j = hypre_CSRMatrixBigJ(A); HYPRE_Int nrows_A = hypre_CSRMatrixNumRows(A); HYPRE_Int ncols_A = hypre_CSRMatrixNumCols(A); HYPRE_Complex *B_data = hypre_CSRMatrixData(B); HYPRE_Int *B_i = hypre_CSRMatrixI(B); HYPRE_BigInt *B_j = hypre_CSRMatrixBigJ(B); HYPRE_Int nrows_B = hypre_CSRMatrixNumRows(B); HYPRE_Int ncols_B = hypre_CSRMatrixNumCols(B); hypre_CSRMatrix *C; HYPRE_Complex *C_data; HYPRE_Int *C_i; HYPRE_BigInt *C_j; HYPRE_Int *twspace; HYPRE_MemoryLocation memory_location_A = hypre_CSRMatrixMemoryLocation(A); HYPRE_MemoryLocation memory_location_B = hypre_CSRMatrixMemoryLocation(B); /* RL: TODO cannot guarantee, maybe should never assert hypre_assert(memory_location_A == memory_location_B); */ /* RL: in the case of A=H, B=D, or A=D, B=H, let C = D, * not sure if this is the right thing to do. * Also, need something like this in other places * TODO */ HYPRE_MemoryLocation memory_location_C = hypre_max(memory_location_A, memory_location_B); if (nrows_A != nrows_B || ncols_A != ncols_B) { hypre_error_w_msg(HYPRE_ERROR_GENERIC,"Warning! incompatible matrix dimensions!\n"); return NULL; } /* Allocate memory */ twspace = hypre_TAlloc(HYPRE_Int, hypre_NumThreads(), HYPRE_MEMORY_HOST); C_i = hypre_CTAlloc(HYPRE_Int, nrows_A + 1, memory_location_C); #ifdef HYPRE_USING_OPENMP #pragma omp parallel #endif { HYPRE_Int ia, ib, ic, num_nonzeros; HYPRE_Int ns, ne, pos; HYPRE_BigInt jcol; HYPRE_Int ii, num_threads; HYPRE_Int jj; HYPRE_Int *marker = NULL; ii = hypre_GetThreadNum(); num_threads = hypre_NumActiveThreads(); hypre_partition1D(nrows_A, num_threads, ii, &ns, &ne); marker = hypre_CTAlloc(HYPRE_Int, ncols_A, HYPRE_MEMORY_HOST); for (ia = 0; ia < ncols_A; ia++) { marker[ia] = -1; } /* First pass */ num_nonzeros = 0; for (ic = ns; ic < ne; ic++) { C_i[ic] = num_nonzeros; for (ia = A_i[ic]; ia < A_i[ic+1]; ia++) { jcol = A_j[ia]; marker[jcol] = ic; num_nonzeros++; } for (ib = B_i[ic]; ib < B_i[ic+1]; ib++) { jcol = B_j[ib]; if (marker[jcol] != ic) { marker[jcol] = ic; num_nonzeros++; } } C_i[ic+1] = num_nonzeros; } twspace[ii] = num_nonzeros; #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif /* Correct row pointer */ if (ii) { jj = twspace[0]; for (ic = 1; ic < ii; ic++) { jj += twspace[ia]; } for (ic = ns; ic < ne; ic++) { C_i[ic] += jj; } } else { C_i[nrows_A] = 0; for (ic = 0; ic < num_threads; ic++) { C_i[nrows_A] += twspace[ic]; } C = hypre_CSRMatrixCreate(nrows_A, ncols_A, C_i[nrows_A]); hypre_CSRMatrixI(C) = C_i; hypre_CSRMatrixInitialize_v2(C, 1, memory_location_C); C_j = hypre_CSRMatrixBigJ(C); C_data = hypre_CSRMatrixData(C); } /* Second pass */ for (ia = 0; ia < ncols_A; ia++) { marker[ia] = -1; } pos = C_i[ns]; for (ic = ns; ic < ne; ic++) { for (ia = A_i[ic]; ia < A_i[ic+1]; ia++) { jcol = A_j[ia]; C_j[pos] = jcol; C_data[pos] = A_data[ia]; marker[jcol] = pos; pos++; } for (ib = B_i[ic]; ib < B_i[ic+1]; ib++) { jcol = B_j[ib]; if (marker[jcol] < C_i[ic]) { C_j[pos] = jcol; C_data[pos] = B_data[ib]; marker[jcol] = pos; pos++; } else { C_data[marker[jcol]] += B_data[ib]; } } } hypre_TFree(marker, HYPRE_MEMORY_HOST); } /* end of parallel region */ /* Free memory */ hypre_TFree(twspace, HYPRE_MEMORY_HOST); return C; } /*-------------------------------------------------------------------------- * hypre_CSRMatrixMultiplyHost * * Multiplies two CSR Matrices A and B and returns a CSR Matrix C; * * Note: The routine does not check for 0-elements which might be generated * through cancellation of elements in A and B or already contained * in A and B. To remove those, use hypre_CSRMatrixDeleteZeros *--------------------------------------------------------------------------*/ hypre_CSRMatrix* hypre_CSRMatrixMultiplyHost( hypre_CSRMatrix *A, hypre_CSRMatrix *B ) { HYPRE_Complex *A_data = hypre_CSRMatrixData(A); HYPRE_Int *A_i = hypre_CSRMatrixI(A); HYPRE_Int *A_j = hypre_CSRMatrixJ(A); HYPRE_Int *rownnz_A = hypre_CSRMatrixRownnz(A); HYPRE_Int nrows_A = hypre_CSRMatrixNumRows(A); HYPRE_Int ncols_A = hypre_CSRMatrixNumCols(A); HYPRE_Int nnzrows_A = hypre_CSRMatrixNumRownnz(A); HYPRE_Int num_nnz_A = hypre_CSRMatrixNumNonzeros(A); HYPRE_Complex *B_data = hypre_CSRMatrixData(B); HYPRE_Int *B_i = hypre_CSRMatrixI(B); HYPRE_Int *B_j = hypre_CSRMatrixJ(B); HYPRE_Int nrows_B = hypre_CSRMatrixNumRows(B); HYPRE_Int ncols_B = hypre_CSRMatrixNumCols(B); HYPRE_Int num_nnz_B = hypre_CSRMatrixNumNonzeros(B); HYPRE_MemoryLocation memory_location_A = hypre_CSRMatrixMemoryLocation(A); HYPRE_MemoryLocation memory_location_B = hypre_CSRMatrixMemoryLocation(B); hypre_CSRMatrix *C; HYPRE_Complex *C_data; HYPRE_Int *C_i; HYPRE_Int *C_j; HYPRE_Int ia, ib, ic, ja, jb, num_nonzeros; HYPRE_Int counter; HYPRE_Complex a_entry, b_entry; HYPRE_Int allsquare = 0; HYPRE_Int *twspace; /* RL: TODO cannot guarantee, maybe should never assert hypre_assert(memory_location_A == memory_location_B); */ /* RL: in the case of A=H, B=D, or A=D, B=H, let C = D, * not sure if this is the right thing to do. * Also, need something like this in other places * TODO */ HYPRE_MemoryLocation memory_location_C = hypre_max(memory_location_A, memory_location_B); if (ncols_A != nrows_B) { hypre_error_w_msg(HYPRE_ERROR_GENERIC,"Warning! incompatible matrix dimensions!\n"); return NULL; } if (nrows_A == ncols_B) { allsquare = 1; } if ((num_nnz_A == 0) || (num_nnz_B == 0)) { C = hypre_CSRMatrixCreate(nrows_A, ncols_B, 0); hypre_CSRMatrixNumRownnz(C) = 0; hypre_CSRMatrixInitialize_v2(C, 0, memory_location_C); return C; } /* Allocate memory */ twspace = hypre_TAlloc(HYPRE_Int, hypre_NumThreads(), HYPRE_MEMORY_HOST); C_i = hypre_CTAlloc(HYPRE_Int, nrows_A+1, memory_location_C); #ifdef HYPRE_USING_OPENMP #pragma omp parallel private(ia, ib, ic, ja, jb, num_nonzeros, counter, a_entry, b_entry) #endif { HYPRE_Int *B_marker = NULL; HYPRE_Int ns, ne, ii, jj; HYPRE_Int num_threads; HYPRE_Int i1, iic; ii = hypre_GetThreadNum(); num_threads = hypre_NumActiveThreads(); hypre_partition1D(nnzrows_A, num_threads, ii, &ns, &ne); B_marker = hypre_CTAlloc(HYPRE_Int, ncols_B, HYPRE_MEMORY_HOST); for (ib = 0; ib < ncols_B; ib++) { B_marker[ib] = -1; } /* First pass: compute sizes of C rows. */ num_nonzeros = 0; for (ic = ns; ic < ne; ic++) { if (rownnz_A) { iic = rownnz_A[ic]; C_i[iic] = num_nonzeros; } else { iic = ic; C_i[iic] = num_nonzeros; if (allsquare) { B_marker[iic] = iic; num_nonzeros++; } } for (ia = A_i[iic]; ia < A_i[iic+1]; ia++) { ja = A_j[ia]; for (ib = B_i[ja]; ib < B_i[ja+1]; ib++) { jb = B_j[ib]; if (B_marker[jb] != iic) { B_marker[jb] = iic; num_nonzeros++; } } } } twspace[ii] = num_nonzeros; #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif /* Correct C_i - phase 1 */ if (ii) { jj = twspace[0]; for (i1 = 1; i1 < ii; i1++) { jj += twspace[i1]; } for (i1 = ns; i1 < ne; i1++) { iic = rownnz_A ? rownnz_A[i1] : i1; C_i[iic] += jj; } } else { C_i[nrows_A] = 0; for (i1 = 0; i1 < num_threads; i1++) { C_i[nrows_A] += twspace[i1]; } C = hypre_CSRMatrixCreate(nrows_A, ncols_B, C_i[nrows_A]); hypre_CSRMatrixI(C) = C_i; hypre_CSRMatrixInitialize_v2(C, 0, memory_location_C); C_j = hypre_CSRMatrixJ(C); C_data = hypre_CSRMatrixData(C); } /* Correct C_i - phase 2 */ if (rownnz_A != NULL) { #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif for (ic = ns; ic < (ne-1); ic++) { for (iic = rownnz_A[ic] + 1; iic < rownnz_A[ic+1]; iic++) { C_i[iic] = C_i[rownnz_A[ic+1]]; } } if (ii < (num_threads - 1)) { for (iic = rownnz_A[ne-1] + 1; iic < rownnz_A[ne]; iic++) { C_i[iic] = C_i[rownnz_A[ne]]; } } else { for (iic = rownnz_A[ne-1] + 1; iic < nrows_A; iic++) { C_i[iic] = C_i[nrows_A]; } } } /* End of First Pass */ #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif /* Second pass: Fill in C_data and C_j. */ for (ib = 0; ib < ncols_B; ib++) { B_marker[ib] = -1; } counter = rownnz_A ? C_i[rownnz_A[ns]] : C_i[ns]; for (ic = ns; ic < ne; ic++) { if (rownnz_A) { iic = rownnz_A[ic]; } else { iic = ic; if (allsquare) { B_marker[ic] = counter; C_data[counter] = 0; C_j[counter] = ic; counter++; } } for (ia = A_i[iic]; ia < A_i[iic+1]; ia++) { ja = A_j[ia]; a_entry = A_data[ia]; for (ib = B_i[ja]; ib < B_i[ja+1]; ib++) { jb = B_j[ib]; b_entry = B_data[ib]; if (B_marker[jb] < C_i[iic]) { B_marker[jb] = counter; C_j[B_marker[jb]] = jb; C_data[B_marker[jb]] = a_entry*b_entry; counter++; } else { C_data[B_marker[jb]] += a_entry*b_entry; } } } } /* End of Second Pass */ hypre_TFree(B_marker, HYPRE_MEMORY_HOST); } /*end parallel region */ #ifdef HYPRE_DEBUG for (ic = 0; ic < nrows_A; ic++) { hypre_assert(C_i[ic] <= C_i[ic+1]); } #endif // Set rownnz and num_rownnz hypre_CSRMatrixSetRownnz(C); /* Free memory */ hypre_TFree(twspace, HYPRE_MEMORY_HOST); return C; } hypre_CSRMatrix* hypre_CSRMatrixMultiply( hypre_CSRMatrix *A, hypre_CSRMatrix *B) { hypre_CSRMatrix *C = NULL; #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy2( hypre_CSRMatrixMemoryLocation(A), hypre_CSRMatrixMemoryLocation(B) ); if (exec == HYPRE_EXEC_DEVICE) { C = hypre_CSRMatrixMultiplyDevice(A,B); } else #endif { C = hypre_CSRMatrixMultiplyHost(A,B); } return C; } /*-------------------------------------------------------------------------- * hypre_CSRMatrixDeleteZeros *--------------------------------------------------------------------------*/ hypre_CSRMatrix * hypre_CSRMatrixDeleteZeros( hypre_CSRMatrix *A, HYPRE_Real tol ) { HYPRE_Complex *A_data = hypre_CSRMatrixData(A); HYPRE_Int *A_i = hypre_CSRMatrixI(A); HYPRE_Int *A_j = hypre_CSRMatrixJ(A); HYPRE_Int nrows_A = hypre_CSRMatrixNumRows(A); HYPRE_Int ncols_A = hypre_CSRMatrixNumCols(A); HYPRE_Int num_nonzeros = hypre_CSRMatrixNumNonzeros(A); hypre_CSRMatrix *B; HYPRE_Complex *B_data; HYPRE_Int *B_i; HYPRE_Int *B_j; HYPRE_Int zeros; HYPRE_Int i, j; HYPRE_Int pos_A, pos_B; zeros = 0; for (i = 0; i < num_nonzeros; i++) { if (hypre_cabs(A_data[i]) <= tol) { zeros++; } } if (zeros) { B = hypre_CSRMatrixCreate(nrows_A,ncols_A,num_nonzeros-zeros); hypre_CSRMatrixInitialize(B); B_i = hypre_CSRMatrixI(B); B_j = hypre_CSRMatrixJ(B); B_data = hypre_CSRMatrixData(B); B_i[0] = 0; pos_A = pos_B = 0; for (i = 0; i < nrows_A; i++) { for (j = A_i[i]; j < A_i[i+1]; j++) { if (hypre_cabs(A_data[j]) <= tol) { pos_A++; } else { B_data[pos_B] = A_data[pos_A]; B_j[pos_B] = A_j[pos_A]; pos_B++; pos_A++; } } B_i[i+1] = pos_B; } return B; } else { return NULL; } } /****************************************************************************** * * Finds transpose of a hypre_CSRMatrix * *****************************************************************************/ /** * idx = idx2*dim1 + idx1 * -> ret = idx1*dim2 + idx2 * = (idx%dim1)*dim2 + idx/dim1 */ static inline HYPRE_Int transpose_idx (HYPRE_Int idx, HYPRE_Int dim1, HYPRE_Int dim2) { return idx%dim1*dim2 + idx/dim1; } /*-------------------------------------------------------------------------- * hypre_CSRMatrixTransposeHost *--------------------------------------------------------------------------*/ HYPRE_Int hypre_CSRMatrixTransposeHost(hypre_CSRMatrix *A, hypre_CSRMatrix **AT, HYPRE_Int data) { HYPRE_Complex *A_data = hypre_CSRMatrixData(A); HYPRE_Int *A_i = hypre_CSRMatrixI(A); HYPRE_Int *A_j = hypre_CSRMatrixJ(A); HYPRE_Int *rownnz_A = hypre_CSRMatrixRownnz(A); HYPRE_Int nnzrows_A = hypre_CSRMatrixNumRownnz(A); HYPRE_Int num_rows_A = hypre_CSRMatrixNumRows(A); HYPRE_Int num_cols_A = hypre_CSRMatrixNumCols(A); HYPRE_Int num_nnzs_A = hypre_CSRMatrixNumNonzeros(A); HYPRE_MemoryLocation memory_location = hypre_CSRMatrixMemoryLocation(A); HYPRE_Complex *AT_data; HYPRE_Int *AT_j; HYPRE_Int num_rows_AT; HYPRE_Int num_cols_AT; HYPRE_Int num_nnzs_AT; HYPRE_Int max_col; HYPRE_Int i, j; /*-------------------------------------------------------------- * First, ascertain that num_cols and num_nonzeros has been set. * If not, set them. *--------------------------------------------------------------*/ HYPRE_ANNOTATE_FUNC_BEGIN; if (!num_nnzs_A) { num_nnzs_A = A_i[num_rows_A]; } if (num_rows_A && num_nnzs_A && ! num_cols_A) { max_col = -1; for (i = 0; i < num_rows_A; ++i) { for (j = A_i[i]; j < A_i[i+1]; j++) { if (A_j[j] > max_col) { max_col = A_j[j]; } } } num_cols_A = max_col + 1; } num_rows_AT = num_cols_A; num_cols_AT = num_rows_A; num_nnzs_AT = num_nnzs_A; *AT = hypre_CSRMatrixCreate(num_rows_AT, num_cols_AT, num_nnzs_AT); hypre_CSRMatrixMemoryLocation(*AT) = memory_location; if (num_cols_A == 0) { // JSP: parallel counting sorting breaks down // when A has no columns hypre_CSRMatrixInitialize(*AT); HYPRE_ANNOTATE_FUNC_END; return hypre_error_flag; } AT_j = hypre_CTAlloc(HYPRE_Int, num_nnzs_AT, memory_location); hypre_CSRMatrixJ(*AT) = AT_j; if (data) { AT_data = hypre_CTAlloc(HYPRE_Complex, num_nnzs_AT, memory_location); hypre_CSRMatrixData(*AT) = AT_data; } /*----------------------------------------------------------------- * Parallel count sort *-----------------------------------------------------------------*/ HYPRE_Int *bucket = hypre_CTAlloc(HYPRE_Int, (num_cols_A + 1)*hypre_NumThreads(), HYPRE_MEMORY_HOST); #ifdef HYPRE_USING_OPENMP #pragma omp parallel #endif { HYPRE_Int ii, num_threads, ns, ne; HYPRE_Int i, j, j0, j1, ir; HYPRE_Int idx, offset; HYPRE_Int transpose_i; HYPRE_Int transpose_i_minus_1; HYPRE_Int transpose_i0; HYPRE_Int transpose_j0; HYPRE_Int transpose_j1; ii = hypre_GetThreadNum(); num_threads = hypre_NumActiveThreads(); hypre_partition1D(nnzrows_A, num_threads, ii, &ns, &ne); /*----------------------------------------------------------------- * Count the number of entries that will go into each bucket * bucket is used as HYPRE_Int[num_threads][num_colsA] 2D array *-----------------------------------------------------------------*/ if (rownnz_A == NULL) { for (j = A_i[ns]; j < A_i[ne]; ++j) { bucket[ii*num_cols_A + A_j[j]]++; } } else { for (i = ns; i < ne; i++) { ir = rownnz_A[i]; for (j = A_i[ir]; j < A_i[ir+1]; ++j) { bucket[ii*num_cols_A + A_j[j]]++; } } } /*----------------------------------------------------------------- * Parallel prefix sum of bucket with length num_colsA * num_threads * accessed as if it is transposed as HYPRE_Int[num_colsA][num_threads] *-----------------------------------------------------------------*/ #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif for (i = ii*num_cols_A + 1; i < (ii + 1)*num_cols_A; ++i) { transpose_i = transpose_idx(i, num_threads, num_cols_A); transpose_i_minus_1 = transpose_idx(i - 1, num_threads, num_cols_A); bucket[transpose_i] += bucket[transpose_i_minus_1]; } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #pragma omp master #endif { for (i = 1; i < num_threads; ++i) { j0 = num_cols_A*i - 1; j1 = num_cols_A*(i + 1) - 1; transpose_j0 = transpose_idx(j0, num_threads, num_cols_A); transpose_j1 = transpose_idx(j1, num_threads, num_cols_A); bucket[transpose_j1] += bucket[transpose_j0]; } } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif if (ii > 0) { transpose_i0 = transpose_idx(num_cols_A*ii - 1, num_threads, num_cols_A); offset = bucket[transpose_i0]; for (i = ii*num_cols_A; i < (ii + 1)*num_cols_A - 1; ++i) { transpose_i = transpose_idx(i, num_threads, num_cols_A); bucket[transpose_i] += offset; } } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif /*---------------------------------------------------------------- * Load the data and column numbers of AT *----------------------------------------------------------------*/ if (data) { for (i = ne - 1; i >= ns; --i) { ir = rownnz_A ? rownnz_A[i] : i; for (j = A_i[ir + 1] - 1; j >= A_i[ir]; --j) { idx = A_j[j]; --bucket[ii*num_cols_A + idx]; offset = bucket[ii*num_cols_A + idx]; AT_data[offset] = A_data[j]; AT_j[offset] = ir; } } } else { for (i = ne - 1; i >= ns; --i) { ir = rownnz_A ? rownnz_A[i] : i; for (j = A_i[ir + 1] - 1; j >= A_i[ir]; --j) { idx = A_j[j]; --bucket[ii*num_cols_A + idx]; offset = bucket[ii*num_cols_A + idx]; AT_j[offset] = ir; } } } } /* end parallel region */ hypre_CSRMatrixI(*AT) = hypre_TAlloc(HYPRE_Int, num_cols_A + 1, memory_location); hypre_TMemcpy(hypre_CSRMatrixI(*AT), bucket, HYPRE_Int, num_cols_A + 1, memory_location, HYPRE_MEMORY_HOST); hypre_CSRMatrixI(*AT)[num_cols_A] = num_nnzs_A; hypre_TFree(bucket, HYPRE_MEMORY_HOST); // Set rownnz and num_rownnz if (hypre_CSRMatrixNumRownnz(A) < num_rows_A) { hypre_CSRMatrixSetRownnz(*AT); } HYPRE_ANNOTATE_FUNC_END; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_CSRMatrixTranspose *--------------------------------------------------------------------------*/ HYPRE_Int hypre_CSRMatrixTranspose(hypre_CSRMatrix *A, hypre_CSRMatrix **AT, HYPRE_Int data) { HYPRE_Int ierr = 0; #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy1( hypre_CSRMatrixMemoryLocation(A) ); if (exec == HYPRE_EXEC_DEVICE) { ierr = hypre_CSRMatrixTransposeDevice(A, AT, data); } else #endif { ierr = hypre_CSRMatrixTransposeHost(A, AT, data); } return ierr; } /*-------------------------------------------------------------------------- * hypre_CSRMatrixSplit *--------------------------------------------------------------------------*/ /* RL: TODO add memory locations */ HYPRE_Int hypre_CSRMatrixSplit(hypre_CSRMatrix *Bs_ext, HYPRE_BigInt first_col_diag_B, HYPRE_BigInt last_col_diag_B, HYPRE_Int num_cols_offd_B, HYPRE_BigInt *col_map_offd_B, HYPRE_Int *num_cols_offd_C_ptr, HYPRE_BigInt **col_map_offd_C_ptr, hypre_CSRMatrix **Bext_diag_ptr, hypre_CSRMatrix **Bext_offd_ptr) { HYPRE_Complex *Bs_ext_data = hypre_CSRMatrixData(Bs_ext); HYPRE_Int *Bs_ext_i = hypre_CSRMatrixI(Bs_ext); HYPRE_BigInt *Bs_ext_j = hypre_CSRMatrixBigJ(Bs_ext); HYPRE_Int num_rows_Bext = hypre_CSRMatrixNumRows(Bs_ext); HYPRE_Int B_ext_diag_size = 0; HYPRE_Int B_ext_offd_size = 0; HYPRE_Int *B_ext_diag_i = NULL; HYPRE_Int *B_ext_diag_j = NULL; HYPRE_Complex *B_ext_diag_data = NULL; HYPRE_Int *B_ext_offd_i = NULL; HYPRE_Int *B_ext_offd_j = NULL; HYPRE_Complex *B_ext_offd_data = NULL; HYPRE_Int *my_diag_array; HYPRE_Int *my_offd_array; HYPRE_BigInt *temp; HYPRE_Int max_num_threads; HYPRE_Int cnt = 0; hypre_CSRMatrix *Bext_diag = NULL; hypre_CSRMatrix *Bext_offd = NULL; HYPRE_BigInt *col_map_offd_C = NULL; HYPRE_Int num_cols_offd_C = 0; B_ext_diag_i = hypre_CTAlloc(HYPRE_Int, num_rows_Bext+1, HYPRE_MEMORY_HOST); B_ext_offd_i = hypre_CTAlloc(HYPRE_Int, num_rows_Bext+1, HYPRE_MEMORY_HOST); max_num_threads = hypre_NumThreads(); my_diag_array = hypre_CTAlloc(HYPRE_Int, max_num_threads, HYPRE_MEMORY_HOST); my_offd_array = hypre_CTAlloc(HYPRE_Int, max_num_threads, HYPRE_MEMORY_HOST); #ifdef HYPRE_USING_OPENMP #pragma omp parallel #endif { HYPRE_Int ns, ne, ii, num_threads; HYPRE_Int i1, i, j; HYPRE_Int my_offd_size, my_diag_size; HYPRE_Int cnt_offd, cnt_diag; ii = hypre_GetThreadNum(); num_threads = hypre_NumActiveThreads(); hypre_partition1D(num_rows_Bext, num_threads, ii, &ns, &ne); my_diag_size = 0; my_offd_size = 0; for (i=ns; i < ne; i++) { B_ext_diag_i[i] = my_diag_size; B_ext_offd_i[i] = my_offd_size; for (j = Bs_ext_i[i]; j < Bs_ext_i[i+1]; j++) { if (Bs_ext_j[j] < first_col_diag_B || Bs_ext_j[j] > last_col_diag_B) { my_offd_size++; } else { my_diag_size++; } } } my_diag_array[ii] = my_diag_size; my_offd_array[ii] = my_offd_size; #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif if (ii) { my_diag_size = my_diag_array[0]; my_offd_size = my_offd_array[0]; for (i1 = 1; i1 < ii; i1++) { my_diag_size += my_diag_array[i1]; my_offd_size += my_offd_array[i1]; } for (i1 = ns; i1 < ne; i1++) { B_ext_diag_i[i1] += my_diag_size; B_ext_offd_i[i1] += my_offd_size; } } else { B_ext_diag_size = 0; B_ext_offd_size = 0; for (i1 = 0; i1 < num_threads; i1++) { B_ext_diag_size += my_diag_array[i1]; B_ext_offd_size += my_offd_array[i1]; } B_ext_diag_i[num_rows_Bext] = B_ext_diag_size; B_ext_offd_i[num_rows_Bext] = B_ext_offd_size; if (B_ext_diag_size) { B_ext_diag_j = hypre_CTAlloc(HYPRE_Int, B_ext_diag_size, HYPRE_MEMORY_HOST); B_ext_diag_data = hypre_CTAlloc(HYPRE_Complex, B_ext_diag_size, HYPRE_MEMORY_HOST); } if (B_ext_offd_size) { B_ext_offd_j = hypre_CTAlloc(HYPRE_Int, B_ext_offd_size, HYPRE_MEMORY_HOST); B_ext_offd_data = hypre_CTAlloc(HYPRE_Complex, B_ext_offd_size, HYPRE_MEMORY_HOST); } if (B_ext_offd_size || num_cols_offd_B) { temp = hypre_CTAlloc(HYPRE_BigInt, B_ext_offd_size + num_cols_offd_B, HYPRE_MEMORY_HOST); } } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif cnt_offd = B_ext_offd_i[ns]; cnt_diag = B_ext_diag_i[ns]; for (i = ns; i < ne; i++) { for (j = Bs_ext_i[i]; j < Bs_ext_i[i+1]; j++) { if (Bs_ext_j[j] < first_col_diag_B || Bs_ext_j[j] > last_col_diag_B) { temp[cnt_offd] = Bs_ext_j[j]; B_ext_offd_j[cnt_offd] = Bs_ext_j[j]; B_ext_offd_data[cnt_offd++] = Bs_ext_data[j]; } else { B_ext_diag_j[cnt_diag] = Bs_ext_j[j] - first_col_diag_B; B_ext_diag_data[cnt_diag++] = Bs_ext_data[j]; } } } /* This computes the mappings */ #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif if (ii == 0) { cnt = 0; if (B_ext_offd_size || num_cols_offd_B) { cnt = B_ext_offd_size; for (i=0; i < num_cols_offd_B; i++) { temp[cnt++] = col_map_offd_B[i]; } if (cnt) { hypre_BigQsort0(temp, 0, cnt-1); num_cols_offd_C = 1; HYPRE_BigInt value = temp[0]; for (i = 1; i < cnt; i++) { if (temp[i] > value) { value = temp[i]; temp[num_cols_offd_C++] = value; } } } if (num_cols_offd_C) { col_map_offd_C = hypre_CTAlloc(HYPRE_BigInt, num_cols_offd_C, HYPRE_MEMORY_HOST); } for (i = 0; i < num_cols_offd_C; i++) { col_map_offd_C[i] = temp[i]; } hypre_TFree(temp, HYPRE_MEMORY_HOST); } } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif for (i = ns; i < ne; i++) { for (j = B_ext_offd_i[i]; j < B_ext_offd_i[i+1]; j++) { B_ext_offd_j[j] = hypre_BigBinarySearch(col_map_offd_C, B_ext_offd_j[j], num_cols_offd_C); } } } /* end parallel region */ hypre_TFree(my_diag_array, HYPRE_MEMORY_HOST); hypre_TFree(my_offd_array, HYPRE_MEMORY_HOST); Bext_diag = hypre_CSRMatrixCreate(num_rows_Bext, last_col_diag_B-first_col_diag_B+1, B_ext_diag_size); hypre_CSRMatrixMemoryLocation(Bext_diag) = HYPRE_MEMORY_HOST; Bext_offd = hypre_CSRMatrixCreate(num_rows_Bext, num_cols_offd_C, B_ext_offd_size); hypre_CSRMatrixMemoryLocation(Bext_offd) = HYPRE_MEMORY_HOST; hypre_CSRMatrixI(Bext_diag) = B_ext_diag_i; hypre_CSRMatrixJ(Bext_diag) = B_ext_diag_j; hypre_CSRMatrixData(Bext_diag) = B_ext_diag_data; hypre_CSRMatrixI(Bext_offd) = B_ext_offd_i; hypre_CSRMatrixJ(Bext_offd) = B_ext_offd_j; hypre_CSRMatrixData(Bext_offd) = B_ext_offd_data; *col_map_offd_C_ptr = col_map_offd_C; *Bext_diag_ptr = Bext_diag; *Bext_offd_ptr = Bext_offd; *num_cols_offd_C_ptr = num_cols_offd_C; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_CSRMatrixReorder: * Reorders the column and data arrays of a square CSR matrix, such that the * first entry in each row is the diagonal one. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_CSRMatrixReorderHost(hypre_CSRMatrix *A) { HYPRE_Complex *A_data = hypre_CSRMatrixData(A); HYPRE_Int *A_i = hypre_CSRMatrixI(A); HYPRE_Int *A_j = hypre_CSRMatrixJ(A); HYPRE_Int *rownnz_A = hypre_CSRMatrixRownnz(A); HYPRE_Int nnzrows_A = hypre_CSRMatrixNumRownnz(A); HYPRE_Int num_rows_A = hypre_CSRMatrixNumRows(A); HYPRE_Int num_cols_A = hypre_CSRMatrixNumCols(A); HYPRE_Int i, ii, j; /* the matrix should be square */ if (num_rows_A != num_cols_A) { return -1; } #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i, ii, j) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < nnzrows_A; i++) { ii = rownnz_A ? rownnz_A[i] : i; for (j = A_i[ii]; j < A_i[ii+1]; j++) { if (A_j[j] == ii) { if (j != A_i[ii]) { hypre_swap(A_j, A_i[ii], j); hypre_swap_c(A_data, A_i[ii], j); } break; } } } return hypre_error_flag; } HYPRE_Int hypre_CSRMatrixReorder(hypre_CSRMatrix *A) { HYPRE_Int ierr = 0; #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy1( hypre_CSRMatrixMemoryLocation(A) ); if (exec == HYPRE_EXEC_DEVICE) { ierr = hypre_CSRMatrixMoveDiagFirstDevice(A); } else #endif { ierr = hypre_CSRMatrixReorderHost(A); } return ierr; } /*-------------------------------------------------------------------------- * hypre_CSRMatrixAddPartial: * adds matrix rows in the CSR matrix B to the CSR Matrix A, where row_nums[i] * defines to which row of A the i-th row of B is added, and returns a CSR Matrix C; * Note: The routine does not check for 0-elements which might be generated * through cancellation of elements in A and B or already contained * in A and B. To remove those, use hypre_CSRMatrixDeleteZeros *--------------------------------------------------------------------------*/ hypre_CSRMatrix * hypre_CSRMatrixAddPartial( hypre_CSRMatrix *A, hypre_CSRMatrix *B, HYPRE_Int *row_nums) { HYPRE_Complex *A_data = hypre_CSRMatrixData(A); HYPRE_Int *A_i = hypre_CSRMatrixI(A); HYPRE_Int *A_j = hypre_CSRMatrixJ(A); HYPRE_Int nrows_A = hypre_CSRMatrixNumRows(A); HYPRE_Int ncols_A = hypre_CSRMatrixNumCols(A); HYPRE_Complex *B_data = hypre_CSRMatrixData(B); HYPRE_Int *B_i = hypre_CSRMatrixI(B); HYPRE_Int *B_j = hypre_CSRMatrixJ(B); HYPRE_Int nrows_B = hypre_CSRMatrixNumRows(B); HYPRE_Int ncols_B = hypre_CSRMatrixNumCols(B); hypre_CSRMatrix *C; HYPRE_Complex *C_data; HYPRE_Int *C_i; HYPRE_Int *C_j; HYPRE_Int ia, ib, ic, jcol, num_nonzeros; HYPRE_Int pos, i, i2, j, cnt; HYPRE_Int *marker; HYPRE_Int *map; HYPRE_Int *temp; HYPRE_MemoryLocation memory_location_A = hypre_CSRMatrixMemoryLocation(A); HYPRE_MemoryLocation memory_location_B = hypre_CSRMatrixMemoryLocation(B); /* RL: TODO cannot guarantee, maybe should never assert hypre_assert(memory_location_A == memory_location_B); */ /* RL: in the case of A=H, B=D, or A=D, B=H, let C = D, * not sure if this is the right thing to do. * Also, need something like this in other places * TODO */ HYPRE_MemoryLocation memory_location_C = hypre_max(memory_location_A, memory_location_B); if (ncols_A != ncols_B) { hypre_error_w_msg(HYPRE_ERROR_GENERIC,"Warning! incompatible matrix dimensions!\n"); return NULL; } map = hypre_CTAlloc(HYPRE_Int, nrows_B, HYPRE_MEMORY_HOST); temp = hypre_CTAlloc(HYPRE_Int, nrows_B, HYPRE_MEMORY_HOST); for (i=0; i < nrows_B; i++) { map[i] = i; temp[i] = row_nums[i]; } hypre_qsort2i(temp,map,0,nrows_B-1); marker = hypre_CTAlloc(HYPRE_Int, ncols_A, HYPRE_MEMORY_HOST); C_i = hypre_CTAlloc(HYPRE_Int, nrows_A+1, memory_location_C); for (ia = 0; ia < ncols_A; ia++) { marker[ia] = -1; } num_nonzeros = 0; C_i[0] = 0; cnt = 0; for (ic = 0; ic < nrows_A; ic++) { for (ia = A_i[ic]; ia < A_i[ic+1]; ia++) { jcol = A_j[ia]; marker[jcol] = ic; num_nonzeros++; } if (cnt < nrows_B && temp[cnt] == ic) { for (j = cnt; j < nrows_B; j++) { if (temp[j] == ic) { i2 = map[cnt++]; for (ib = B_i[i2]; ib < B_i[i2+1]; ib++) { jcol = B_j[ib]; if (marker[jcol] != ic) { marker[jcol] = ic; num_nonzeros++; } } } else { break; } } } C_i[ic+1] = num_nonzeros; } C = hypre_CSRMatrixCreate(nrows_A, ncols_A, num_nonzeros); hypre_CSRMatrixI(C) = C_i; hypre_CSRMatrixInitialize_v2(C, 0, memory_location_C); C_j = hypre_CSRMatrixJ(C); C_data = hypre_CSRMatrixData(C); for (ia = 0; ia < ncols_A; ia++) { marker[ia] = -1; } cnt = 0; pos = 0; for (ic = 0; ic < nrows_A; ic++) { for (ia = A_i[ic]; ia < A_i[ic+1]; ia++) { jcol = A_j[ia]; C_j[pos] = jcol; C_data[pos] = A_data[ia]; marker[jcol] = pos; pos++; } if (cnt < nrows_B && temp[cnt] == ic) { for (j = cnt; j < nrows_B; j++) { if (temp[j] == ic) { i2 = map[cnt++]; for (ib = B_i[i2]; ib < B_i[i2+1]; ib++) { jcol = B_j[ib]; if (marker[jcol] < C_i[ic]) { C_j[pos] = jcol; C_data[pos] = B_data[ib]; marker[jcol] = pos; pos++; } else { C_data[marker[jcol]] += B_data[ib]; } } } else { break; } } } } hypre_TFree(marker, HYPRE_MEMORY_HOST); hypre_TFree(map, HYPRE_MEMORY_HOST); hypre_TFree(temp, HYPRE_MEMORY_HOST); return C; } /*-------------------------------------------------------------------------- * hypre_CSRMatrixSumElts: * Returns the sum of all matrix elements. *--------------------------------------------------------------------------*/ HYPRE_Complex hypre_CSRMatrixSumElts( hypre_CSRMatrix *A ) { HYPRE_Complex sum = 0; HYPRE_Complex *data = hypre_CSRMatrixData(A); HYPRE_Int num_nonzeros = hypre_CSRMatrixNumNonzeros(A); HYPRE_Int i; #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) reduction(+:sum) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < num_nonzeros; i++) { sum += data[i]; } return sum; } /*-------------------------------------------------------------------------- * hypre_CSRMatrixFnorm *--------------------------------------------------------------------------*/ HYPRE_Real hypre_CSRMatrixFnorm( hypre_CSRMatrix *A ) { HYPRE_Int nrows = hypre_CSRMatrixNumRows(A); HYPRE_Int num_nonzeros = hypre_CSRMatrixNumNonzeros(A); HYPRE_Int *A_i = hypre_CSRMatrixI(A); HYPRE_Complex *A_data = hypre_CSRMatrixData(A); HYPRE_Int i; HYPRE_Complex sum = 0; hypre_assert(num_nonzeros == A_i[nrows]); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) reduction(+:sum) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < num_nonzeros; ++i) { HYPRE_Complex v = A_data[i]; sum += v * v; } return sqrt(sum); } /*-------------------------------------------------------------------------- * hypre_CSRMatrixComputeRowSumHost * * type == 0, sum, * 1, abs sum * 2, square sum *--------------------------------------------------------------------------*/ void hypre_CSRMatrixComputeRowSumHost( hypre_CSRMatrix *A, HYPRE_Int *CF_i, HYPRE_Int *CF_j, HYPRE_Complex *row_sum, HYPRE_Int type, HYPRE_Complex scal, const char *set_or_add) { HYPRE_Int nrows = hypre_CSRMatrixNumRows(A); HYPRE_Complex *A_data = hypre_CSRMatrixData(A); HYPRE_Int *A_i = hypre_CSRMatrixI(A); HYPRE_Int *A_j = hypre_CSRMatrixJ(A); HYPRE_Int i, j; for (i = 0; i < nrows; i++) { HYPRE_Complex row_sum_i = set_or_add[0] == 's' ? 0.0 : row_sum[i]; for (j = A_i[i]; j < A_i[i+1]; j++) { if (CF_i && CF_j && CF_i[i] != CF_j[A_j[j]]) { continue; } if (type == 0) { row_sum_i += scal * A_data[j]; } else if (type == 1) { row_sum_i += scal * fabs(A_data[j]); } else if (type == 2) { row_sum_i += scal * A_data[j] * A_data[j]; } } row_sum[i] = row_sum_i; } } /*-------------------------------------------------------------------------- * hypre_CSRMatrixComputeRowSum *--------------------------------------------------------------------------*/ void hypre_CSRMatrixComputeRowSum( hypre_CSRMatrix *A, HYPRE_Int *CF_i, HYPRE_Int *CF_j, HYPRE_Complex *row_sum, HYPRE_Int type, HYPRE_Complex scal, const char *set_or_add) { hypre_assert( (CF_i && CF_j) || (!CF_i && !CF_j) ); #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy1( hypre_CSRMatrixMemoryLocation(A) ); if (exec == HYPRE_EXEC_DEVICE) { hypre_CSRMatrixComputeRowSumDevice(A, CF_i, CF_j, row_sum, type, scal, set_or_add); } else #endif { hypre_CSRMatrixComputeRowSumHost(A, CF_i, CF_j, row_sum, type, scal, set_or_add); } } /*-------------------------------------------------------------------------- * hypre_CSRMatrixExtractDiagonalHost *--------------------------------------------------------------------------*/ void hypre_CSRMatrixExtractDiagonalHost( hypre_CSRMatrix *A, HYPRE_Complex *d, HYPRE_Int type) { HYPRE_Int nrows = hypre_CSRMatrixNumRows(A); HYPRE_Complex *A_data = hypre_CSRMatrixData(A); HYPRE_Int *A_i = hypre_CSRMatrixI(A); HYPRE_Int *A_j = hypre_CSRMatrixJ(A); HYPRE_Int i, j; HYPRE_Complex d_i; for (i = 0; i < nrows; i++) { d_i = 0.0; for (j = A_i[i]; j < A_i[i+1]; j++) { if (A_j[j] == i) { if (type == 0) { d_i = A_data[j]; } else if (type == 1) { d_i = fabs(A_data[j]); } break; } } d[i] = d_i; } } /*-------------------------------------------------------------------------- * hypre_CSRMatrixExtractDiagonal * * type 0: diag * 1: abs diag *--------------------------------------------------------------------------*/ void hypre_CSRMatrixExtractDiagonal( hypre_CSRMatrix *A, HYPRE_Complex *d, HYPRE_Int type) { #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy1( hypre_CSRMatrixMemoryLocation(A) ); if (exec == HYPRE_EXEC_DEVICE) { hypre_CSRMatrixExtractDiagonalDevice(A, d, type); } else #endif { hypre_CSRMatrixExtractDiagonalHost(A, d, type); } }
model_standard_mex.c
/*========================================================== * modelStandard.c - STARDCE toolbox * * Implements the DCE standard model * * The calling syntax is: * * C = model_standard_mex(time, VIF, vp, kt, kep); * * Compilation: * * mex -R2018a model_standard_mex.c * or: * 1) uncomment the compiler directive MATLAB2015 * 2) mex COPTIMFLAGS="\$COPTIMFLAGS -std=c99" model_standard_mex.c * * Yannick 2020 * *========================================================*/ #include "mex.h" #include <math.h> #ifdef __GNU__ #include <omp.h> #endif #ifndef MAXCORES #define MAXCORES 1 #endif // #define MATLAB2015 float dce_standard_value_float( float vp, // vp float kt, //Ktrans float kep, // Kep int const point_index, // time points to evaluate teh model at float const * T, // time point vector float const * Cp // VIF ) { // integral/convolution float convFunc = 0; for (int i = 1; i <= point_index; i++) { // simple sum rule float spacing = T[i] - T[i - 1]; float Ct = Cp[i] * exp(- kep * (T[point_index]-T[i])); convFunc += (Ct * spacing); } float function_value = kt * convFunc + vp * Cp[point_index]; return function_value; } double dce_standard_value_double( double vp, // vp double kt, //Ktrans double kep, // Kep int const point_index, // time points to evaluate teh model at double const * T, // time point vector double const * Cp // VIF ) { // integral/convolution double convFunc = 0; for (int i = 1; i <= point_index; i++) { // simple sum rule double spacing = T[i] - T[i - 1]; double Ct = Cp[i] * exp(- kep * (T[point_index]-T[i])); convFunc += (Ct * spacing); } double function_value = kt * convFunc + vp * Cp[point_index]; return function_value; } /* The gateway function */ void mexFunction( int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) { size_t N, Nt, Na; mxClassID precision; float *Cpf, *timef, *vpf, *ktf, *kepf, *Cf; double *Cpd, *timed, *vpd, *ktd, *kepd, *Cd; /* check for proper number of arguments */ if(nrhs!=5) { mexErrMsgIdAndTxt("STARDCE:model_standard:nrhs","Five inputs required: time, VIF, vp, kt, kep."); } if(nlhs!=1) { mexErrMsgIdAndTxt("STARDCE:model_standard:nlhs","One output required."); } // read all inputs Nt = mxGetN(prhs[0]); N = mxGetN(prhs[2]); if (mxGetClassID(prhs[1]) == mxDOUBLE_CLASS) { precision = mxDOUBLE_CLASS; #ifdef MATLAB2015 // try to be backward compatible timed = mxGetPr(prhs[0]); Cpd = mxGetPr(prhs[1]); vpd = mxGetPr(prhs[2]); ktd = mxGetPr(prhs[3]); kepd = mxGetPr(prhs[4]); #else timed = mxGetDoubles(prhs[0]); Cpd = mxGetDoubles(prhs[1]); vpd = mxGetDoubles(prhs[2]); ktd = mxGetDoubles(prhs[3]); kepd = mxGetDoubles(prhs[4]); #endif } else { precision = mxSINGLE_CLASS; #ifdef MATLAB2015 // try to be backward compatible timef = mxGetData(prhs[0]); Cpf = mxGetData(prhs[1]); vpf = mxGetData(prhs[2]); ktf = mxGetData(prhs[3]); kepf = mxGetData(prhs[4]); #else timef = mxGetSingles(prhs[0]); Cpf = mxGetSingles(prhs[1]); vpf = mxGetSingles(prhs[2]); ktf = mxGetSingles(prhs[3]); kepf = mxGetSingles(prhs[4]); #endif } // output concentration plhs[0] = mxCreateNumericMatrix(N, Nt, precision, mxREAL); #ifdef __GNU__ /* Set number of threads */ omp_set_num_threads(MAXCORES); #endif if (precision == mxDOUBLE_CLASS){ #ifdef MATLAB2015 Cd = mxGetPr(plhs[0]); #else Cd = mxGetDoubles(plhs[0]); #endif #pragma omp parallel for private(n,t) for (int n = 0; n < N; n++) { for (int t = 0; t < Nt; t++) { Cd[t * N + n] = dce_standard_value_double(vpd[n], ktd[n], kepd[n], t, timed, Cpd); } } } else { #ifdef MATLAB2015 Cf = mxGetData(plhs[0]); #else Cf = mxGetSingles(plhs[0]); #endif #pragma omp parallel for private(n,t) for (int n = 0; n < N; n++) { for (int t = 0; t < Nt; t++) { Cf[t * N + n] = dce_standard_value_float(vpf[n], ktf[n], kepf[n], t, timef, Cpf); } } } }
reduction.c
#include <omp.h> #include <stdio.h> main () { int i, n, chunk; float a[100], b[100], result; /* Some initializations */ n = 100; chunk = 10; result = 0.0; for (i=0; i < n; i++) { a[i] = i * 1.0; b[i] = i * 2.0; } #pragma omp parallel for \ default(shared) private(i) \ schedule(static,chunk) \ reduction(+:result) for (i=0; i < n; i++) result = result + (a[i] * b[i]); printf("Final result= %f\n",result); }
main.c
#include <stdio.h> #include <stdlib.h> #include <math.h> #include <string.h> #include <time.h> #include "omp.h" #include "functions.h" int main (int argc, char **argv) { int Nthreads = atoi(argv[1]); omp_set_num_threads(Nthreads); //seed value for the randomizer //double seed = clock(); //this will make your program run differently everytime double seed = 0; //uncomment this and your program will behave the same everytime it's run srand(seed); //declare storage for an ElGamal cryptosytem unsigned int p, g, h, x; //begin with rank 0 getting user's input unsigned int n = 25; //printf("Enter a number of bits: "); fflush(stdout); //char status = scanf("%u",&n); //make sure the input makes sense if ((n<8)||(n>31)) {//Updated bounds. 8 is no good (need to encode chars) printf("Unsupported bit size.\n"); return 0; } printf("\n"); //setup an ElGamal cryptosystem setupElGamal(n,&p,&g,&h,&x); int bufferSize = 1024; unsigned char *message = (unsigned char *) malloc(bufferSize*sizeof(unsigned char)); //populate the string with a message strcpy(message, "Hello, this is the message as a string."); printf("Message = \"%s\"\n", message); /* Q1.1 Finish this line */ unsigned int charsPerInt = (n-1)/8; padString(message, charsPerInt); printf("Padded Message = \"%s\"\n", message); unsigned int Nchars = strlen(message); unsigned int Nints = strlen(message)/charsPerInt; //storage for message as elements of Z_p unsigned int *Zmessage = (unsigned int *) malloc(Nints*sizeof(unsigned int)); //storage for extra encryption coefficient unsigned int *a = (unsigned int *) malloc(Nints*sizeof(unsigned int)); // cast the string into an unsigned int array convertStringToZ(message, Nchars, Zmessage, Nints); //Encrypt the Zmessage with the ElGamal cyrptographic system ElGamalEncrypt(Zmessage,a,Nints,p,g,h); printf("The encrypted text is: "); for (unsigned int i=0;i<Nints;i++) { printf("(%u,%u) ", Zmessage[i], a[i]); } printf("]\n"); //Decrypt the Zmessage with the ElGamal cyrptographic system ElGamalDecrypt(Zmessage,a,Nints,p,x); convertZToString(Zmessage, Nints, message, Nchars); printf("Decrypted Message = \"%s\"\n", message); printf("\n"); //Suppose we don't know the secret key. Use OpenMP threads to try and find it in parallel printf("Using %d OpenMP threads to find the secret key...\n", Nthreads); /* Q2.3 Parallelize this loop with OpenMP */ double startTime = omp_get_wtime(); unsigned int stop = 0; #pragma omp parallel for shared(stop) for (unsigned int i=0;i<p-1;i++) { if (stop==0 && modExp(g,i+1,p)==h) { printf("Secret key found! x = %u \n", i+1); stop = 1; } } double endTime = omp_get_wtime(); double totalTime = endTime-startTime; double work = (double) p; double throughput = work/totalTime; printf("Searching all keys took %g seconds, throughput was %g values tested per second.\n", totalTime, throughput); return 0; }
mput_ispc.c
#include "hmap_struct_isp.h" // #include "hmap_aux.h" // #include "hmap_get.h" // #include "fasthash.h" static inline void * set_key( void **keys, uint32 i, void *alt_keys, uint32 key_len ) { void *key_i; if ( keys != NULL ) { key_i = keys[i]; } else { key_i = (void *)((int8 *)alt_keys + (i*key_len)); } return key_i; } //-------------------------------- static inline void * set_val( void **vals, uint32 i, void *alt_vals, uint32 val_len ) { void *val_i; if ( vals != NULL ) { val_i = vals[i]; } else { val_i = (void *)((int8 *)alt_vals + (i*val_len)); } return val_i; } //-------------------------------- static inline uint16 set_key_len( uint16 *key_lens, uint32 i, uint32 key_len ) { uint16 key_len_i; if ( key_lens != NULL ) { key_len_i = key_lens[i]; } else { key_len_i = key_len; } return key_len_i; } //-------------------------------- export void hmap_mput( uniform hmap_t * uniform ptr_hmap, uniform hmap_multi_t * uniform M, uniform void ** uniform keys, // [nkeys] uint16 key_lens[], // [nkeys] void *alt_keys, // either keys or alt_keys but not both uint32 key_len, uint32 nkeys, void **vals, // [nkeys] void *alt_vals, // either keys or alt_keys but not both uint32 val_len ) { int status = 0; if ( nkeys == 0 ) { goto BYE; } //------------------------------------------------- if ( keys == NULL ) { if ( key_lens != NULL ) { go_BYE(-1); } if ( alt_keys == NULL ) { go_BYE(-1); } if ( key_len == 0 ) { go_BYE(-1); } } else { if ( key_lens == NULL ) { go_BYE(-1); } if ( alt_keys != NULL ) { go_BYE(-1); } if ( key_len != 0 ) { go_BYE(-1); } } //------------------------------------------------- if ( vals == NULL ) { if ( alt_vals == NULL ) { go_BYE(-1); } if ( val_len == 0 ) { go_BYE(-1); } } else { if ( alt_vals != NULL ) { go_BYE(-1); } if ( val_len != 0 ) { go_BYE(-1); } } //------------------------------------------------- uint32 *idxs = M->idxs; uint32 *hashes = M->hashes; uint32 *locs = M->locs; int8_t *tids = M->tids; bool *exists = M->exists; uint16 *m_key_len = M->m_key_len; void **m_key = M->m_key; int nP = M->num_procs; #ifdef SEQUENTIAL nP = 1; #else if ( nP <= 0 ) { nP = omp_get_num_procs(); } #endif // fprintf(stderr, "Using %d cores \n", nP); uint64 proc_divinfo = fast_div32_init(nP); uint32 lb = 0, ub; register uint64 hashkey = ptr_hmap->hashkey; register uint64 divinfo = ptr_hmap->divinfo; for ( int iter = 0; ; iter++ ) { ub = lb + M->num_at_once; if ( ub > nkeys ) { ub = nkeys; } uint32 niters = ub - lb; int num_per_core = 16; // so that no false sharing on write bool do_sequential_loop = false; #pragma omp parallel for num_threads(nP) schedule(static, 1) for ( uint32 j = 0; j < niters; j += num_per_core ) { uint32 i = j + lb; //-------------------------------- m_key[j] = set_key(keys, i, alt_keys, key_len); m_key_len[j] = set_key_len(key_lens, i, key_len); dbg_t dbg; idxs[j] = UINT_MAX; // bad value // hashes[j] = murmurhash3(m_key[j], m_key_len[j], hashkey); // hashes[j] = fasthash32(m_key[j], m_key_len[j], hashkey); locs[j] = fast_rem32(hashes[j], ptr_hmap->size, divinfo); dbg.hash = hashes[j]; dbg.probe_loc = locs[j]; int lstatus = hmap_get(ptr_hmap, m_key[j], m_key_len[j], NULL, exists+j, idxs+j, &dbg); // do not exist if bad status, this is an omp loop if ( lstatus != 0 ) { if ( status == 0 ) { status = lstatus; } } if ( !exists[j] ) { // new key=> insert in sequential loop tids[j] = -1; // assigned to nobody, done in sequential loop if ( do_sequential_loop == false ) { do_sequential_loop = true; } } else { tids[j] = fast_rem32(hashes[j], nP, proc_divinfo); } } cBYE(status); if ( ub >= nkeys ) { break; } lb += M->num_at_once; } BYE: return status; }
cri.c
#include <omp.h> int main() { int x; int i,id; x = 0; #pragma omp parallel shared(x) private(i,id) { #pragma omp critical { id = omp_get_thread_num(); printf("before thread %d : X = %d\n",id,x); for(i=0;i<3000000;i++) x = x + 1; printf("after thread %d : X = %d\n",id,x); } } /* end of parallel section */ printf("out of the parallel region : X = %d\n",x); return 0; }
GB_unop__identity_uint32_uint64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__identity_uint32_uint64) // op(A') function: GB (_unop_tran__identity_uint32_uint64) // C type: uint32_t // A type: uint64_t // cast: uint32_t cij = (uint32_t) aij // unaryop: cij = aij #define GB_ATYPE \ uint64_t #define GB_CTYPE \ uint32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ uint32_t z = (uint32_t) aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ uint64_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ uint32_t z = (uint32_t) aij ; \ Cx [pC] = z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_UINT32 || GxB_NO_UINT64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__identity_uint32_uint64) ( uint32_t *Cx, // Cx and Ax may be aliased const uint64_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { uint64_t aij = Ax [p] ; uint32_t z = (uint32_t) aij ; Cx [p] = z ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; uint64_t aij = Ax [p] ; uint32_t z = (uint32_t) aij ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__identity_uint32_uint64) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
if-clause.c
#include <stdio.h> #include <stdlib.h> #include <omp.h> int main(int argc, char **argv) { int i, n=20, tid; int a[n],suma=0,sumalocal,x; if(argc < 2) { fprintf(stderr,"[ERROR]-Falta iteraciones y numero de hebras\n"); exit(-1); } n = atoi(argv[1]); x = atoi(argv[2]); if (n>20) n=20; for (i=0; i<n; i++) { a[i] = i; } #pragma omp parallel num_threads(x) if(n>4) default(none) \ private(sumalocal,tid) shared(a,suma,n) { sumalocal=0; tid=omp_get_thread_num(); #pragma omp for private(i) schedule(static) nowait for (i=0; i<n; i++) { sumalocal += a[i]; printf(" thread %d suma de a[%d]=%d sumalocal=%d \n", tid,i,a[i],sumalocal); } #pragma omp atomic suma += sumalocal; #pragma omp barrier #pragma omp master printf("thread master=%d imprime suma=%d\n",tid,suma); } }
GB_binop__isne_fc32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__isne_fc32) // A.*B function (eWiseMult): GB (_AemultB_08__isne_fc32) // A.*B function (eWiseMult): GB (_AemultB_02__isne_fc32) // A.*B function (eWiseMult): GB (_AemultB_04__isne_fc32) // A.*B function (eWiseMult): GB (_AemultB_bitmap__isne_fc32) // A*D function (colscale): GB ((none)) // D*A function (rowscale): GB ((none)) // C+=B function (dense accum): GB (_Cdense_accumB__isne_fc32) // C+=b function (dense accum): GB (_Cdense_accumb__isne_fc32) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__isne_fc32) // C=scalar+B GB (_bind1st__isne_fc32) // C=scalar+B' GB (_bind1st_tran__isne_fc32) // C=A+scalar GB (_bind2nd__isne_fc32) // C=A'+scalar GB (_bind2nd_tran__isne_fc32) // C type: GxB_FC32_t // A type: GxB_FC32_t // B,b type: GxB_FC32_t // BinaryOp: cij = GB_FC32_isne (aij, bij) #define GB_ATYPE \ GxB_FC32_t #define GB_BTYPE \ GxB_FC32_t #define GB_CTYPE \ GxB_FC32_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ GxB_FC32_t aij = GBX (Ax, pA, A_iso) // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ GxB_FC32_t bij = GBX (Bx, pB, B_iso) // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ GxB_FC32_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = GB_FC32_isne (x, y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ISNE || GxB_NO_FC32 || GxB_NO_ISNE_FC32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__isne_fc32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__isne_fc32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__isne_fc32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type GxB_FC32_t GxB_FC32_t bwork = (*((GxB_FC32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC32_t *restrict Cx = (GxB_FC32_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC32_t *restrict Cx = (GxB_FC32_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__isne_fc32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__isne_fc32) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__isne_fc32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__isne_fc32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__isne_fc32) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__isne_fc32) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC32_t *Cx = (GxB_FC32_t *) Cx_output ; GxB_FC32_t x = (*((GxB_FC32_t *) x_input)) ; GxB_FC32_t *Bx = (GxB_FC32_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; GxB_FC32_t bij = GBX (Bx, p, false) ; Cx [p] = GB_FC32_isne (x, bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__isne_fc32) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; GxB_FC32_t *Cx = (GxB_FC32_t *) Cx_output ; GxB_FC32_t *Ax = (GxB_FC32_t *) Ax_input ; GxB_FC32_t y = (*((GxB_FC32_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; GxB_FC32_t aij = GBX (Ax, p, false) ; Cx [p] = GB_FC32_isne (aij, y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ GxB_FC32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_FC32_isne (x, aij) ; \ } GrB_Info GB (_bind1st_tran__isne_fc32) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ GxB_FC32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC32_t x = (*((const GxB_FC32_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ GxB_FC32_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ GxB_FC32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_FC32_isne (aij, y) ; \ } GrB_Info GB (_bind2nd_tran__isne_fc32) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC32_t y = (*((const GxB_FC32_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_binop__land_int32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__land_int32) // A.*B function (eWiseMult): GB (_AemultB_08__land_int32) // A.*B function (eWiseMult): GB (_AemultB_02__land_int32) // A.*B function (eWiseMult): GB (_AemultB_04__land_int32) // A.*B function (eWiseMult): GB (_AemultB_bitmap__land_int32) // A*D function (colscale): GB (_AxD__land_int32) // D*A function (rowscale): GB (_DxB__land_int32) // C+=B function (dense accum): GB (_Cdense_accumB__land_int32) // C+=b function (dense accum): GB (_Cdense_accumb__land_int32) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__land_int32) // C=scalar+B GB (_bind1st__land_int32) // C=scalar+B' GB (_bind1st_tran__land_int32) // C=A+scalar GB (_bind2nd__land_int32) // C=A'+scalar GB (_bind2nd_tran__land_int32) // C type: int32_t // A type: int32_t // B,b type: int32_t // BinaryOp: cij = ((aij != 0) && (bij != 0)) #define GB_ATYPE \ int32_t #define GB_BTYPE \ int32_t #define GB_CTYPE \ int32_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ int32_t aij = GBX (Ax, pA, A_iso) // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ int32_t bij = GBX (Bx, pB, B_iso) // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int32_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = ((x != 0) && (y != 0)) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LAND || GxB_NO_INT32 || GxB_NO_LAND_INT32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__land_int32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__land_int32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__land_int32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int32_t int32_t bwork = (*((int32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__land_int32) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *restrict Cx = (int32_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__land_int32) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *restrict Cx = (int32_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__land_int32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__land_int32) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__land_int32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__land_int32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__land_int32) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__land_int32) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *Cx = (int32_t *) Cx_output ; int32_t x = (*((int32_t *) x_input)) ; int32_t *Bx = (int32_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; int32_t bij = GBX (Bx, p, false) ; Cx [p] = ((x != 0) && (bij != 0)) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__land_int32) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int32_t *Cx = (int32_t *) Cx_output ; int32_t *Ax = (int32_t *) Ax_input ; int32_t y = (*((int32_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int32_t aij = GBX (Ax, p, false) ; Cx [p] = ((aij != 0) && (y != 0)) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = ((x != 0) && (aij != 0)) ; \ } GrB_Info GB (_bind1st_tran__land_int32) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t x = (*((const int32_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int32_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = ((aij != 0) && (y != 0)) ; \ } GrB_Info GB (_bind2nd_tran__land_int32) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t y = (*((const int32_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
stream.c
/*-----------------------------------------------------------------------*/ /* Program: STREAM */ /* Revision: $Id: stream.c,v 5.10 2013/01/17 16:01:06 mccalpin Exp mccalpin $ */ /* Original code developed by John D. McCalpin */ /* Programmers: John D. McCalpin */ /* Joe R. Zagar */ /* */ /* This program measures memory transfer rates in MB/s for simple */ /* computational kernels coded in C. */ /*-----------------------------------------------------------------------*/ /* Copyright 1991-2013: John D. McCalpin */ /*-----------------------------------------------------------------------*/ /* License: */ /* 1. You are free to use this program and/or to redistribute */ /* this program. */ /* 2. You are free to modify this program for your own use, */ /* including commercial use, subject to the publication */ /* restrictions in item 3. */ /* 3. You are free to publish results obtained from running this */ /* program, or from works that you derive from this program, */ /* with the following limitations: */ /* 3a. In order to be referred to as "STREAM benchmark results", */ /* published results must be in conformance to the STREAM */ /* Run Rules, (briefly reviewed below) published at */ /* http://www.cs.virginia.edu/stream/ref.html */ /* and incorporated herein by reference. */ /* As the copyright holder, John McCalpin retains the */ /* right to determine conformity with the Run Rules. */ /* 3b. Results based on modified source code or on runs not in */ /* accordance with the STREAM Run Rules must be clearly */ /* labelled whenever they are published. Examples of */ /* proper labelling include: */ /* "tuned STREAM benchmark results" */ /* "based on a variant of the STREAM benchmark code" */ /* Other comparable, clear, and reasonable labelling is */ /* acceptable. */ /* 3c. Submission of results to the STREAM benchmark web site */ /* is encouraged, but not required. */ /* 4. Use of this program or creation of derived works based on this */ /* program constitutes acceptance of these licensing restrictions. */ /* 5. Absolutely no warranty is expressed or implied. */ /*-----------------------------------------------------------------------*/ # include <stdio.h> # include <unistd.h> # include <math.h> # include <float.h> # include <limits.h> # include <sys/time.h> #ifdef USE_CALI #include <caliper/cali.h> #endif #ifdef USE_LIKWID #include <likwid-marker.h> #endif /*----------------------------------------------------------------------- * INSTRUCTIONS: * * 1) STREAM requires different amounts of memory to run on different * systems, depending on both the system cache size(s) and the * granularity of the system timer. * You should adjust the value of 'STREAM_ARRAY_SIZE' (below) * to meet *both* of the following criteria: * (a) Each array must be at least 4 times the size of the * available cache memory. I don't worry about the difference * between 10^6 and 2^20, so in practice the minimum array size * is about 3.8 times the cache size. * Example 1: One Xeon E3 with 8 MB L3 cache * STREAM_ARRAY_SIZE should be >= 4 million, giving * an array size of 30.5 MB and a total memory requirement * of 91.5 MB. * Example 2: Two Xeon E5's with 20 MB L3 cache each (using OpenMP) * STREAM_ARRAY_SIZE should be >= 20 million, giving * an array size of 153 MB and a total memory requirement * of 458 MB. * (b) The size should be large enough so that the 'timing calibration' * output by the program is at least 20 clock-ticks. * Example: most versions of Windows have a 10 millisecond timer * granularity. 20 "ticks" at 10 ms/tic is 200 milliseconds. * If the chip is capable of 10 GB/s, it moves 2 GB in 200 msec. * This means the each array must be at least 1 GB, or 128M elements. * * Version 5.10 increases the default array size from 2 million * elements to 10 million elements in response to the increasing * size of L3 caches. The new default size is large enough for caches * up to 20 MB. * Version 5.10 changes the loop index variables from "register int" * to "ssize_t", which allows array indices >2^32 (4 billion) * on properly configured 64-bit systems. Additional compiler options * (such as "-mcmodel=medium") may be required for large memory runs. * * Array size can be set at compile time without modifying the source * code for the (many) compilers that support preprocessor definitions * on the compile line. E.g., * gcc -O -DSTREAM_ARRAY_SIZE=100000000 stream.c -o stream.100M * will override the default size of 10M with a new size of 100M elements * per array. */ #ifndef STREAM_ARRAY_SIZE # define STREAM_ARRAY_SIZE 60000000 #endif /* 2) STREAM runs each kernel "NTIMES" times and reports the *best* result * for any iteration after the first, therefore the minimum value * for NTIMES is 2. * There are no rules on maximum allowable values for NTIMES, but * values larger than the default are unlikely to noticeably * increase the reported performance. * NTIMES can also be set on the compile line without changing the source * code using, for example, "-DNTIMES=7". */ #ifdef NTIMES #if NTIMES<=1 # define NTIMES 10 #endif #endif #ifndef NTIMES # define NTIMES 10 #endif /* Users are allowed to modify the "OFFSET" variable, which *may* change the * relative alignment of the arrays (though compilers may change the * effective offset by making the arrays non-contiguous on some systems). * Use of non-zero values for OFFSET can be especially helpful if the * STREAM_ARRAY_SIZE is set to a value close to a large power of 2. * OFFSET can also be set on the compile line without changing the source * code using, for example, "-DOFFSET=56". */ #ifndef OFFSET # define OFFSET 0 #endif /* * 3) Compile the code with optimization. Many compilers generate * unreasonably bad code before the optimizer tightens things up. * If the results are unreasonably good, on the other hand, the * optimizer might be too smart for me! * * For a simple single-core version, try compiling with: * cc -O stream.c -o stream * This is known to work on many, many systems.... * * To use multiple cores, you need to tell the compiler to obey the OpenMP * directives in the code. This varies by compiler, but a common example is * gcc -O -fopenmp stream.c -o stream_omp * The environment variable OMP_NUM_THREADS allows runtime control of the * number of threads/cores used when the resulting "stream_omp" program * is executed. * * To run with single-precision variables and arithmetic, simply add * -DSTREAM_TYPE=float * to the compile line. * Note that this changes the minimum array sizes required --- see (1) above. * * The preprocessor directive "TUNED" does not do much -- it simply causes the * code to call separate functions to execute each kernel. Trivial versions * of these functions are provided, but they are *not* tuned -- they just * provide predefined interfaces to be replaced with tuned code. * * * 4) Optional: Mail the results to mccalpin@cs.virginia.edu * Be sure to include info that will help me understand: * a) the computer hardware configuration (e.g., processor model, memory type) * b) the compiler name/version and compilation flags * c) any run-time information (such as OMP_NUM_THREADS) * d) all of the output from the test case. * * Thanks! * *-----------------------------------------------------------------------*/ # define HLINE "-------------------------------------------------------------\n" # ifndef MIN # define MIN(x,y) ((x)<(y)?(x):(y)) # endif # ifndef MAX # define MAX(x,y) ((x)>(y)?(x):(y)) # endif #ifndef STREAM_TYPE #define STREAM_TYPE double #endif static STREAM_TYPE a[STREAM_ARRAY_SIZE+OFFSET], b[STREAM_ARRAY_SIZE+OFFSET], c[STREAM_ARRAY_SIZE+OFFSET]; static double avgtime[4] = {0}, maxtime[4] = {0}, mintime[4] = {FLT_MAX,FLT_MAX,FLT_MAX,FLT_MAX}; static char *label[4] = {"Copy: ", "Scale: ", "Add: ", "Triad: "}; static double bytes[4] = { 2 * sizeof(STREAM_TYPE) * STREAM_ARRAY_SIZE, 2 * sizeof(STREAM_TYPE) * STREAM_ARRAY_SIZE, 3 * sizeof(STREAM_TYPE) * STREAM_ARRAY_SIZE, 3 * sizeof(STREAM_TYPE) * STREAM_ARRAY_SIZE }; extern double mysecond(); extern void checkSTREAMresults(); #ifdef TUNED extern void tuned_STREAM_Copy(); extern void tuned_STREAM_Scale(STREAM_TYPE scalar); extern void tuned_STREAM_Add(); extern void tuned_STREAM_Triad(STREAM_TYPE scalar); #endif #ifdef _OPENMP extern int omp_get_num_threads(); #endif int main() { int quantum, checktick(); int BytesPerWord; int k; ssize_t j; STREAM_TYPE scalar; double t, times[4][NTIMES]; /* --- SETUP --- determine precision and check timing --- */ printf(HLINE); printf("STREAM version $Revision: 5.10 $\n"); printf(HLINE); BytesPerWord = sizeof(STREAM_TYPE); printf("This system uses %d bytes per array element.\n", BytesPerWord); printf(HLINE); #ifdef N printf("***** WARNING: ******\n"); printf(" It appears that you set the preprocessor variable N when compiling this code.\n"); printf(" This version of the code uses the preprocesor variable STREAM_ARRAY_SIZE to control the array size\n"); printf(" Reverting to default value of STREAM_ARRAY_SIZE=%llu\n",(unsigned long long) STREAM_ARRAY_SIZE); printf("***** WARNING: ******\n"); #endif printf("Array size = %llu (elements), Offset = %d (elements)\n" , (unsigned long long) STREAM_ARRAY_SIZE, OFFSET); printf("Memory per array = %.1f MiB (= %.1f GiB).\n", BytesPerWord * ( (double) STREAM_ARRAY_SIZE / 1024.0/1024.0), BytesPerWord * ( (double) STREAM_ARRAY_SIZE / 1024.0/1024.0/1024.0)); printf("Total memory required = %.1f MiB (= %.1f GiB).\n", (3.0 * BytesPerWord) * ( (double) STREAM_ARRAY_SIZE / 1024.0/1024.), (3.0 * BytesPerWord) * ( (double) STREAM_ARRAY_SIZE / 1024.0/1024./1024.)); printf("Each kernel will be executed %d times.\n", NTIMES); printf(" The *best* time for each kernel (excluding the first iteration)\n"); printf(" will be used to compute the reported bandwidth.\n"); #ifdef _OPENMP printf(HLINE); #ifdef USE_CALI cali_id_t thread_attr = cali_create_attribute("thread_id", CALI_TYPE_INT, CALI_ATTR_ASVALUE | CALI_ATTR_SKIP_EVENTS); #pragma omp parallel { cali_set_int(thread_attr, omp_get_thread_num()); } #endif #ifdef USE_LIKWID LIKWID_MARKER_INIT; #endif #pragma omp parallel { #pragma omp master { k = omp_get_num_threads(); printf ("Number of Threads requested = %i\n",k); } } #endif #ifdef _OPENMP k = 0; #pragma omp parallel #pragma omp atomic k++; printf ("Number of Threads counted = %i\n",k); #endif /* Get initial value for system clock. */ #pragma omp parallel for for (j=0; j<STREAM_ARRAY_SIZE; j++) { a[j] = 1.0; b[j] = 2.0; c[j] = 0.0; } printf(HLINE); if ( (quantum = checktick()) >= 1) printf("Your clock granularity/precision appears to be " "%d microseconds.\n", quantum); else { printf("Your clock granularity appears to be " "less than one microsecond.\n"); quantum = 1; } t = mysecond(); #pragma omp parallel for for (j = 0; j < STREAM_ARRAY_SIZE; j++) a[j] = 2.0E0 * a[j]; t = 1.0E6 * (mysecond() - t); printf("Each test below will take on the order" " of %d microseconds.\n", (int) t ); printf(" (= %d clock ticks)\n", (int) (t/quantum) ); printf("Increase the size of the arrays if this shows that\n"); printf("you are not getting at least 20 clock ticks per test.\n"); printf(HLINE); printf("WARNING -- The above is only a rough guideline.\n"); printf("For best results, please be sure you know the\n"); printf("precision of your system timer.\n"); printf(HLINE); /* --- MAIN LOOP --- repeat test cases NTIMES times --- */ scalar = 3.0; for (k=0; k<NTIMES; k++) { times[0][k] = mysecond(); #ifdef TUNED tuned_STREAM_Copy(); #else #pragma omp parallel for for (j=0; j<STREAM_ARRAY_SIZE; j++) c[j] = a[j]; #endif times[0][k] = mysecond() - times[0][k]; times[1][k] = mysecond(); #ifdef TUNED tuned_STREAM_Scale(scalar); #else #pragma omp parallel for for (j=0; j<STREAM_ARRAY_SIZE; j++) b[j] = scalar*c[j]; #endif times[1][k] = mysecond() - times[1][k]; times[2][k] = mysecond(); #ifdef TUNED tuned_STREAM_Add(); #else #pragma omp parallel for for (j=0; j<STREAM_ARRAY_SIZE; j++) c[j] = a[j]+b[j]; #endif times[2][k] = mysecond() - times[2][k]; times[3][k] = mysecond(); #ifdef TUNED tuned_STREAM_Triad(scalar); #else #pragma omp parallel for for (j=0; j<STREAM_ARRAY_SIZE; j++) a[j] = b[j]+scalar*c[j]; #endif times[3][k] = mysecond() - times[3][k]; } #ifdef USE_LIKWID LIKWID_MARKER_CLOSE; #endif /* --- SUMMARY --- */ for (k=1; k<NTIMES; k++) /* note -- skip first iteration */ { for (j=0; j<4; j++) { avgtime[j] = avgtime[j] + times[j][k]; mintime[j] = MIN(mintime[j], times[j][k]); maxtime[j] = MAX(maxtime[j], times[j][k]); } } printf("Function Best Rate MB/s Avg time Min time Max time\n"); for (j=0; j<4; j++) { avgtime[j] = avgtime[j]/(double)(NTIMES-1); printf("%s%12.1f %11.6f %11.6f %11.6f\n", label[j], 1.0E-06 * bytes[j]/mintime[j], avgtime[j], mintime[j], maxtime[j]); } printf(HLINE); /* --- Check Results --- */ checkSTREAMresults(); printf(HLINE); return 0; } # define M 20 int checktick() { int i, minDelta, Delta; double t1, t2, timesfound[M]; /* Collect a sequence of M unique time values from the system. */ for (i = 0; i < M; i++) { t1 = mysecond(); while( ((t2=mysecond()) - t1) < 1.0E-6 ) ; timesfound[i] = t1 = t2; } /* * Determine the minimum difference between these M values. * This result will be our estimate (in microseconds) for the * clock granularity. */ minDelta = 1000000; for (i = 1; i < M; i++) { Delta = (int)( 1.0E6 * (timesfound[i]-timesfound[i-1])); minDelta = MIN(minDelta, MAX(Delta,0)); } return(minDelta); } /* A gettimeofday routine to give access to the wall clock timer on most UNIX-like systems. */ #include <sys/time.h> double mysecond() { struct timeval tp; struct timezone tzp; int i; i = gettimeofday(&tp,&tzp); return ( (double) tp.tv_sec + (double) tp.tv_usec * 1.e-6 ); } #ifndef abs #define abs(a) ((a) >= 0 ? (a) : -(a)) #endif void checkSTREAMresults () { STREAM_TYPE aj,bj,cj,scalar; STREAM_TYPE aSumErr,bSumErr,cSumErr; STREAM_TYPE aAvgErr,bAvgErr,cAvgErr; double epsilon; ssize_t j; int k,ierr,err; /* reproduce initialization */ aj = 1.0; bj = 2.0; cj = 0.0; /* a[] is modified during timing check */ aj = 2.0E0 * aj; /* now execute timing loop */ scalar = 3.0; for (k=0; k<NTIMES; k++) { cj = aj; bj = scalar*cj; cj = aj+bj; aj = bj+scalar*cj; } /* accumulate deltas between observed and expected results */ aSumErr = 0.0; bSumErr = 0.0; cSumErr = 0.0; for (j=0; j<STREAM_ARRAY_SIZE; j++) { aSumErr += abs(a[j] - aj); bSumErr += abs(b[j] - bj); cSumErr += abs(c[j] - cj); // if (j == 417) printf("Index 417: c[j]: %f, cj: %f\n",c[j],cj); // MCCALPIN } aAvgErr = aSumErr / (STREAM_TYPE) STREAM_ARRAY_SIZE; bAvgErr = bSumErr / (STREAM_TYPE) STREAM_ARRAY_SIZE; cAvgErr = cSumErr / (STREAM_TYPE) STREAM_ARRAY_SIZE; if (sizeof(STREAM_TYPE) == 4) { epsilon = 1.e-6; } else if (sizeof(STREAM_TYPE) == 8) { epsilon = 1.e-13; } else { printf("WEIRD: sizeof(STREAM_TYPE) = %lu\n",sizeof(STREAM_TYPE)); epsilon = 1.e-6; } err = 0; if (abs(aAvgErr/aj) > epsilon) { err++; printf ("Failed Validation on array a[], AvgRelAbsErr > epsilon (%e)\n",epsilon); printf (" Expected Value: %e, AvgAbsErr: %e, AvgRelAbsErr: %e\n",aj,aAvgErr,abs(aAvgErr)/aj); ierr = 0; for (j=0; j<STREAM_ARRAY_SIZE; j++) { if (abs(a[j]/aj-1.0) > epsilon) { ierr++; #ifdef VERBOSE if (ierr < 10) { printf(" array a: index: %ld, expected: %e, observed: %e, relative error: %e\n", j,aj,a[j],abs((aj-a[j])/aAvgErr)); } #endif } } printf(" For array a[], %d errors were found.\n",ierr); } if (abs(bAvgErr/bj) > epsilon) { err++; printf ("Failed Validation on array b[], AvgRelAbsErr > epsilon (%e)\n",epsilon); printf (" Expected Value: %e, AvgAbsErr: %e, AvgRelAbsErr: %e\n",bj,bAvgErr,abs(bAvgErr)/bj); printf (" AvgRelAbsErr > Epsilon (%e)\n",epsilon); ierr = 0; for (j=0; j<STREAM_ARRAY_SIZE; j++) { if (abs(b[j]/bj-1.0) > epsilon) { ierr++; #ifdef VERBOSE if (ierr < 10) { printf(" array b: index: %ld, expected: %e, observed: %e, relative error: %e\n", j,bj,b[j],abs((bj-b[j])/bAvgErr)); } #endif } } printf(" For array b[], %d errors were found.\n",ierr); } if (abs(cAvgErr/cj) > epsilon) { err++; printf ("Failed Validation on array c[], AvgRelAbsErr > epsilon (%e)\n",epsilon); printf (" Expected Value: %e, AvgAbsErr: %e, AvgRelAbsErr: %e\n",cj,cAvgErr,abs(cAvgErr)/cj); printf (" AvgRelAbsErr > Epsilon (%e)\n",epsilon); ierr = 0; for (j=0; j<STREAM_ARRAY_SIZE; j++) { if (abs(c[j]/cj-1.0) > epsilon) { ierr++; #ifdef VERBOSE if (ierr < 10) { printf(" array c: index: %ld, expected: %e, observed: %e, relative error: %e\n", j,cj,c[j],abs((cj-c[j])/cAvgErr)); } #endif } } printf(" For array c[], %d errors were found.\n",ierr); } if (err == 0) { printf ("Solution Validates: avg error less than %e on all three arrays\n",epsilon); } #ifdef VERBOSE printf ("Results Validation Verbose Results: \n"); printf (" Expected a(1), b(1), c(1): %f %f %f \n",aj,bj,cj); printf (" Observed a(1), b(1), c(1): %f %f %f \n",a[1],b[1],c[1]); printf (" Rel Errors on a, b, c: %e %e %e \n",abs(aAvgErr/aj),abs(bAvgErr/bj),abs(cAvgErr/cj)); #endif } #ifndef BW_SCALE #define BW_SCALE 1 #endif #ifdef TUNED /* stubs for "tuned" versions of the kernels */ void tuned_STREAM_Copy() { ssize_t j, _b; #pragma omp parallel { #ifdef USE_CALI CALI_MARK_BEGIN("copy"); #endif #ifdef USE_LIKWID LIKWID_MARKER_START("copy"); #endif #pragma omp for for (j=0; j<STREAM_ARRAY_SIZE; j++) for (_b = 0; _b < BW_SCALE; _b++) c[j] = a[j]; #ifdef USE_CALI CALI_MARK_END("copy"); #endif #ifdef USE_LIKWID LIKWID_MARKER_STOP("copy"); #endif } // parallel } void tuned_STREAM_Scale(STREAM_TYPE scalar) { ssize_t j, _b; #pragma omp parallel { #ifdef USE_CALI CALI_MARK_BEGIN("scale"); #endif #ifdef USE_LIKWID LIKWID_MARKER_START("scale"); #endif #pragma omp for for (j=0; j<STREAM_ARRAY_SIZE; j++) for (_b = 0; _b < BW_SCALE; _b++) b[j] = scalar*c[j]; #ifdef USE_CALI CALI_MARK_END("scale"); #endif #ifdef USE_LIKWID LIKWID_MARKER_STOP("scale"); #endif } // parallel } void tuned_STREAM_Add() { ssize_t j, _b; #pragma omp parallel { #ifdef USE_CALI CALI_MARK_BEGIN("add"); #endif #ifdef USE_LIKWID LIKWID_MARKER_START("add"); #endif #pragma omp for for (j=0; j<STREAM_ARRAY_SIZE; j++) for (_b = 0; _b < BW_SCALE; _b++) c[j] = a[j]+b[j]; #ifdef USE_CALI CALI_MARK_END("add"); #endif #ifdef USE_LIKWID LIKWID_MARKER_STOP("add"); #endif } // parallel } void tuned_STREAM_Triad(STREAM_TYPE scalar) { ssize_t j, _b; #pragma omp parallel { #ifdef USE_CALI CALI_MARK_BEGIN("triad"); #endif #ifdef USE_LIKWID LIKWID_MARKER_START("triad"); #endif #pragma omp for for (j=0; j<STREAM_ARRAY_SIZE; j++) for (_b = 0; _b < BW_SCALE; _b++) a[j] = b[j]+scalar*c[j]; #ifdef USE_CALI CALI_MARK_END("triad"); #endif #ifdef USE_LIKWID LIKWID_MARKER_STOP("triad"); #endif } // parallel } /* end of stubs for the "tuned" versions of the kernels */ #endif
GB_unop__identity_fp64_int64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__identity_fp64_int64) // op(A') function: GB (_unop_tran__identity_fp64_int64) // C type: double // A type: int64_t // cast: double cij = (double) aij // unaryop: cij = aij #define GB_ATYPE \ int64_t #define GB_CTYPE \ double // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ double z = (double) aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ int64_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ double z = (double) aij ; \ Cx [pC] = z ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_FP64 || GxB_NO_INT64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__identity_fp64_int64) ( double *Cx, // Cx and Ax may be aliased const int64_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; // TODO: if OP is ONE and uniform-valued matrices are exploited, then // do this in O(1) time if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (int64_t), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int64_t aij = Ax [p] ; double z = (double) aij ; Cx [p] = z ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; int64_t aij = Ax [p] ; double z = (double) aij ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__identity_fp64_int64) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
divsufsort.c
/* * divsufsort.c for libdivsufsort * Copyright (c) 2003-2008 Yuta Mori All Rights Reserved. * * Permission is hereby granted, free of charge, to any person * obtaining a copy of this software and associated documentation * files (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, * copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following * conditions: * * The above copyright notice and this permission notice shall be * included in all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. */ #include "divsufsort_private.h" #ifdef _OPENMP # include <omp.h> #endif /*- Private Functions -*/ /* Sorts suffixes of type B*. */ static saidx_t sort_typeBstar(const sauchar_t *T, saidx_t *SA, saidx_t *bucket_A, saidx_t *bucket_B, saidx_t n) { saidx_t *PAb, *ISAb, *buf; #ifdef _OPENMP saidx_t *curbuf; saidx_t l; #endif saidx_t i, j, k, t, m, bufsize; saint_t c0, c1; #ifdef _OPENMP saint_t d0, d1; int tmp; #endif /* Initialize bucket arrays. */ for(i = 0; i < BUCKET_A_SIZE; ++i) { bucket_A[i] = 0; } for(i = 0; i < BUCKET_B_SIZE; ++i) { bucket_B[i] = 0; } /* Count the number of occurrences of the first one or two characters of each type A, B and B* suffix. Moreover, store the beginning position of all type B* suffixes into the array SA. */ for(i = n - 1, m = n, c0 = T[n - 1]; 0 <= i;) { /* type A suffix. */ do { ++BUCKET_A(c1 = c0); } while((0 <= --i) && ((c0 = T[i]) >= c1)); if(0 <= i) { /* type B* suffix. */ ++BUCKET_BSTAR(c0, c1); SA[--m] = i; /* type B suffix. */ for(--i, c1 = c0; (0 <= i) && ((c0 = T[i]) <= c1); --i, c1 = c0) { ++BUCKET_B(c0, c1); } } } m = n - m; /* note: A type B* suffix is lexicographically smaller than a type B suffix that begins with the same first two characters. */ /* Calculate the index of start/end point of each bucket. */ for(c0 = 0, i = 0, j = 0; c0 < ALPHABET_SIZE; ++c0) { t = i + BUCKET_A(c0); BUCKET_A(c0) = i + j; /* start point */ i = t + BUCKET_B(c0, c0); for(c1 = c0 + 1; c1 < ALPHABET_SIZE; ++c1) { j += BUCKET_BSTAR(c0, c1); BUCKET_BSTAR(c0, c1) = j; /* end point */ i += BUCKET_B(c0, c1); } } if(0 < m) { /* Sort the type B* suffixes by their first two characters. */ PAb = SA + n - m; ISAb = SA + m; for(i = m - 2; 0 <= i; --i) { t = PAb[i], c0 = T[t], c1 = T[t + 1]; SA[--BUCKET_BSTAR(c0, c1)] = i; } t = PAb[m - 1], c0 = T[t], c1 = T[t + 1]; SA[--BUCKET_BSTAR(c0, c1)] = m - 1; /* Sort the type B* substrings using sssort. */ #ifdef _OPENMP tmp = omp_get_max_threads(); buf = SA + m, bufsize = (n - (2 * m)) / tmp; c0 = ALPHABET_SIZE - 2, c1 = ALPHABET_SIZE - 1, j = m; #pragma omp parallel default(shared) private(curbuf, k, l, d0, d1, tmp) { tmp = omp_get_thread_num(); curbuf = buf + tmp * bufsize; k = 0; for(;;) { #pragma omp critical(sssort_lock) { if(0 < (l = j)) { d0 = c0, d1 = c1; do { k = BUCKET_BSTAR(d0, d1); if(--d1 <= d0) { d1 = ALPHABET_SIZE - 1; if(--d0 < 0) { break; } } } while(((l - k) <= 1) && (0 < (l = k))); c0 = d0, c1 = d1, j = k; } } if(l == 0) { break; } sssort(T, PAb, SA + k, SA + l, curbuf, bufsize, 2, n, *(SA + k) == (m - 1)); } } #else buf = SA + m, bufsize = n - (2 * m); for(c0 = ALPHABET_SIZE - 2, j = m; 0 < j; --c0) { for(c1 = ALPHABET_SIZE - 1; c0 < c1; j = i, --c1) { i = BUCKET_BSTAR(c0, c1); if(1 < (j - i)) { sssort(T, PAb, SA + i, SA + j, buf, bufsize, 2, n, *(SA + i) == (m - 1)); } } } #endif /* Compute ranks of type B* substrings. */ for(i = m - 1; 0 <= i; --i) { if(0 <= SA[i]) { j = i; do { ISAb[SA[i]] = i; } while((0 <= --i) && (0 <= SA[i])); SA[i + 1] = i - j; if(i <= 0) { break; } } j = i; do { ISAb[SA[i] = ~SA[i]] = j; } while(SA[--i] < 0); ISAb[SA[i]] = j; } /* Construct the inverse suffix array of type B* suffixes using trsort. */ trsort(ISAb, SA, m, 1); /* Set the sorted order of tyoe B* suffixes. */ for(i = n - 1, j = m, c0 = T[n - 1]; 0 <= i;) { for(--i, c1 = c0; (0 <= i) && ((c0 = T[i]) >= c1); --i, c1 = c0) { } if(0 <= i) { t = i; for(--i, c1 = c0; (0 <= i) && ((c0 = T[i]) <= c1); --i, c1 = c0) { } SA[ISAb[--j]] = ((t == 0) || (1 < (t - i))) ? t : ~t; } } /* Calculate the index of start/end point of each bucket. */ BUCKET_B(ALPHABET_SIZE - 1, ALPHABET_SIZE - 1) = n; /* end point */ for(c0 = ALPHABET_SIZE - 2, k = m - 1; 0 <= c0; --c0) { i = BUCKET_A(c0 + 1) - 1; for(c1 = ALPHABET_SIZE - 1; c0 < c1; --c1) { t = i - BUCKET_B(c0, c1); BUCKET_B(c0, c1) = i; /* end point */ /* Move all type B* suffixes to the correct position. */ for(i = t, j = BUCKET_BSTAR(c0, c1); j <= k; --i, --k) { SA[i] = SA[k]; } } BUCKET_BSTAR(c0, c0 + 1) = i - BUCKET_B(c0, c0) + 1; /* start point */ BUCKET_B(c0, c0) = i; /* end point */ } } return m; } /* Constructs the suffix array by using the sorted order of type B* suffixes. */ static void construct_SA(const sauchar_t *T, saidx_t *SA, saidx_t *bucket_A, saidx_t *bucket_B, saidx_t n, saidx_t m) { saidx_t *i, *j, *k; saidx_t s; saint_t c0, c1, c2; if(0 < m) { /* Construct the sorted order of type B suffixes by using the sorted order of type B* suffixes. */ for(c1 = ALPHABET_SIZE - 2; 0 <= c1; --c1) { /* Scan the suffix array from right to left. */ for(i = SA + BUCKET_BSTAR(c1, c1 + 1), j = SA + BUCKET_A(c1 + 1) - 1, k = NULL, c2 = -1; i <= j; --j) { if(0 < (s = *j)) { assert(T[s] == c1); assert(((s + 1) < n) && (T[s] <= T[s + 1])); assert(T[s - 1] <= T[s]); *j = ~s; c0 = T[--s]; if((0 < s) && (T[s - 1] > c0)) { s = ~s; } if(c0 != c2) { if(0 <= c2) { BUCKET_B(c2, c1) = k - SA; } k = SA + BUCKET_B(c2 = c0, c1); } assert(k < j); *k-- = s; } else { assert(((s == 0) && (T[s] == c1)) || (s < 0)); *j = ~s; } } } } /* Construct the suffix array by using the sorted order of type B suffixes. */ k = SA + BUCKET_A(c2 = T[n - 1]); *k++ = (T[n - 2] < c2) ? ~(n - 1) : (n - 1); /* Scan the suffix array from left to right. */ for(i = SA, j = SA + n; i < j; ++i) { if(0 < (s = *i)) { assert(T[s - 1] >= T[s]); c0 = T[--s]; if((s == 0) || (T[s - 1] < c0)) { s = ~s; } if(c0 != c2) { BUCKET_A(c2) = k - SA; k = SA + BUCKET_A(c2 = c0); } assert(i < k); *k++ = s; } else { assert(s < 0); *i = ~s; } } } /* Constructs the burrows-wheeler transformed string directly by using the sorted order of type B* suffixes. */ static saidx_t construct_BWT(const sauchar_t *T, saidx_t *SA, saidx_t *bucket_A, saidx_t *bucket_B, saidx_t n, saidx_t m) { saidx_t *i, *j, *k, *orig; saidx_t s; saint_t c0, c1, c2; if(0 < m) { /* Construct the sorted order of type B suffixes by using the sorted order of type B* suffixes. */ for(c1 = ALPHABET_SIZE - 2; 0 <= c1; --c1) { /* Scan the suffix array from right to left. */ for(i = SA + BUCKET_BSTAR(c1, c1 + 1), j = SA + BUCKET_A(c1 + 1) - 1, k = NULL, c2 = -1; i <= j; --j) { if(0 < (s = *j)) { assert(T[s] == c1); assert(((s + 1) < n) && (T[s] <= T[s + 1])); assert(T[s - 1] <= T[s]); c0 = T[--s]; *j = ~((saidx_t)c0); if((0 < s) && (T[s - 1] > c0)) { s = ~s; } if(c0 != c2) { if(0 <= c2) { BUCKET_B(c2, c1) = k - SA; } k = SA + BUCKET_B(c2 = c0, c1); } assert(k < j); *k-- = s; } else if(s != 0) { *j = ~s; #ifndef NDEBUG } else { assert(T[s] == c1); #endif } } } } /* Construct the BWTed string by using the sorted order of type B suffixes. */ k = SA + BUCKET_A(c2 = T[n - 1]); *k++ = (T[n - 2] < c2) ? ~((saidx_t)T[n - 2]) : (n - 1); /* Scan the suffix array from left to right. */ for(i = SA, j = SA + n, orig = SA; i < j; ++i) { if(0 < (s = *i)) { assert(T[s - 1] >= T[s]); c0 = T[--s]; *i = c0; if((0 < s) && (T[s - 1] < c0)) { s = ~((saidx_t)T[s - 1]); } if(c0 != c2) { BUCKET_A(c2) = k - SA; k = SA + BUCKET_A(c2 = c0); } assert(i < k); *k++ = s; } else if(s != 0) { *i = ~s; } else { orig = i; } } return orig - SA; } /*---------------------------------------------------------------------------*/ /*- Function -*/ saint_t divsufsort(const sauchar_t *T, saidx_t *SA, saidx_t n) { saidx_t *bucket_A, *bucket_B; saidx_t m; saint_t err = 0; /* Check arguments. */ if((T == NULL) || (SA == NULL) || (n < 0)) { return -1; } else if(n == 0) { return 0; } else if(n == 1) { SA[0] = 0; return 0; } else if(n == 2) { m = (T[0] < T[1]); SA[m ^ 1] = 0, SA[m] = 1; return 0; } bucket_A = (saidx_t *)malloc(BUCKET_A_SIZE * sizeof(saidx_t)); bucket_B = (saidx_t *)malloc(BUCKET_B_SIZE * sizeof(saidx_t)); /* Suffixsort. */ if((bucket_A != NULL) && (bucket_B != NULL)) { m = sort_typeBstar(T, SA, bucket_A, bucket_B, n); construct_SA(T, SA, bucket_A, bucket_B, n, m); } else { err = -2; } free(bucket_B); free(bucket_A); return err; } saidx_t divbwt(const sauchar_t *T, sauchar_t *U, saidx_t *A, saidx_t n) { saidx_t *B; saidx_t *bucket_A, *bucket_B; saidx_t m, pidx, i; /* Check arguments. */ if((T == NULL) || (U == NULL) || (n < 0)) { return -1; } else if(n <= 1) { if(n == 1) { U[0] = T[0]; } return n; } if((B = A) == NULL) { B = (saidx_t *)malloc((size_t)(n + 1) * sizeof(saidx_t)); } bucket_A = (saidx_t *)malloc(BUCKET_A_SIZE * sizeof(saidx_t)); bucket_B = (saidx_t *)malloc(BUCKET_B_SIZE * sizeof(saidx_t)); /* Burrows-Wheeler Transform. */ if((B != NULL) && (bucket_A != NULL) && (bucket_B != NULL)) { m = sort_typeBstar(T, B, bucket_A, bucket_B, n); pidx = construct_BWT(T, B, bucket_A, bucket_B, n, m); /* Copy to output string. */ U[0] = T[n - 1]; for(i = 0; i < pidx; ++i) { U[i + 1] = (sauchar_t)B[i]; } for(i += 1; i < n; ++i) { U[i] = (sauchar_t)B[i]; } pidx += 1; } else { pidx = -2; } free(bucket_B); free(bucket_A); if(A == NULL) { free(B); } return pidx; } const char * divsufsort_version(void) { return PROJECT_VERSION_FULL; }
sections.c
#include <stdio.h> #include <stdlib.h> #include <omp.h> int main(void) { int ID; /* Thread identification number*/ int numThreads; /* Current number of threads running */ numThreads = omp_get_num_threads(); printf("Outside of the pragma, the number of threads is %d\n\n", numThreads); #pragma omp parallel private(ID) { #pragma omp sections nowait { #pragma omp section { numThreads = omp_get_num_threads(); printf("Inside the pragma, the number of threads is %d\n\n", numThreads); } } ID = omp_get_thread_num(); printf("Hello(%d)", ID); printf(" world from process %d!\n\n", ID); } return 0; }
cryptbreaker.c
#define _GNU_SOURCE /* See feature_test_macros(7) */ #include <crypt.h> #include <omp.h> #include <mpi.h> #include <crypt.h> #include <unistd.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #define byte unsigned char #define HASH_SIZE 14 #define VOCABULARY " ./0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz" int world_size = 1; int world_rank = 0; int vocabulary_size = 0; char* data = NULL; unsigned long data_size = 0; char** hash_list = 0; unsigned int hash_list_size = 0; const int root_id = 0; extern inline int count_vocabulary_size_is_ended() { return VOCABULARY[vocabulary_size] == '\0'; } void count_vocabulary_size() { if (!count_vocabulary_size_is_ended()) { vocabulary_size++; count_vocabulary_size(); } } unsigned long read_file_size(char* name) { FILE *file = fopen(name, "rb"); fseek(file, 0, SEEK_END); unsigned long file_size = ftell(file); fclose(file); return file_size; } char* read_all_file(char* name) { FILE *file = fopen(name, "rb"); fseek(file, 0, SEEK_END); unsigned long file_size = ftell(file); fseek(file, 0, SEEK_SET); char *large_buffer = malloc(sizeof(char)*(file_size + 1)); fread(large_buffer, 1, file_size, file); fclose(file); large_buffer[file_size] = 0; return large_buffer; } void read_data() { data = read_all_file("imput"); data_size = read_file_size("imput"); } int is_frontend() { return world_rank == root_id; } void print_rank_status() { char* status = is_frontend() ? "frontend" : "backend"; printf("rank %d of %d (is %s)\n", world_rank, world_size, status); } void print_vocabulary_status() { if (is_frontend()) printf("vocabulary: %d |> '%s'\n", vocabulary_size, VOCABULARY); } void alloc_data() { data = malloc(sizeof(char)*(data_size + 1)); } void broadcast_data() { MPI_Bcast(data, data_size, MPI_CHAR, root_id, MPI_COMM_WORLD); } void broadcast_data_size() { MPI_Bcast(&data_size, 1, MPI_UNSIGNED_LONG, root_id, MPI_COMM_WORLD); } void send_data() { broadcast_data_size(); broadcast_data(); } void recv_data() { broadcast_data_size(); alloc_data(); broadcast_data(); } void calcule_hash_list_size() { hash_list_size = (unsigned int)((data_size + 1) / HASH_SIZE); } void index_hash_list() { #pragma omp parallel for schedule(static) for(int index = 0; index < hash_list_size; index++) { hash_list[index] = &data[HASH_SIZE * index]; hash_list[index][HASH_SIZE - 1] = '\0'; } } void alloc_hash_list() { hash_list = (char**)malloc(sizeof(char*) * hash_list_size); } static int string_comparator(const void* a, const void* b) { return strcmp(*(const char**)a, *(const char**)b); } void sort(char** arr, int n) { qsort(arr, n, sizeof(const char*), string_comparator); } void sort_hash_list() { sort(hash_list, hash_list_size); } void organize_data() { calcule_hash_list_size(); alloc_hash_list(); index_hash_list(); sort_hash_list(); } void break_it(char password[9], int thread_id) { char salt[2] = {'\0', '\0'}; char* hash; struct crypt_data data; data.initialized = 0; for(int index; index < hash_list_size; index++) { char *indexed_hash_list = hash_list[index]; if (indexed_hash_list[0] == '\0' ) continue; if (indexed_hash_list[0] != salt[0] || indexed_hash_list[1] != salt[1]) { salt[0] = indexed_hash_list[0]; salt[1] = indexed_hash_list[1]; hash = crypt_r(password, salt, &data); } if (indexed_hash_list[12] == hash[12] && indexed_hash_list[11] == hash[11] && indexed_hash_list[10] == hash[10] && indexed_hash_list[9] == hash[9] && indexed_hash_list[8] == hash[8] && indexed_hash_list[7] == hash[7] && indexed_hash_list[6] == hash[6] && indexed_hash_list[5] == hash[5] && indexed_hash_list[4] == hash[4] && indexed_hash_list[3] == hash[3] && indexed_hash_list[2] == hash[2]) { indexed_hash_list[0] = '\0'; printf("decrypted (THREAD %d RANK %d) |> %s |> %s\n", thread_id, world_rank, hash, password); } } } void combine_1() { #pragma omp parallel for for (int n0 = world_rank; n0 < vocabulary_size; n0 += world_size) { int thread_id = omp_get_thread_num(); char password[9] = {'\0', '\0'}; password[0] = VOCABULARY[n0]; break_it(password, thread_id); } } void combine_2() { #pragma omp parallel for for (int n0 = world_rank; n0 < vocabulary_size; n0 += world_size) { int thread_id = omp_get_thread_num(); char password[9] = {'\0', '\0', '\0'}; password[0] = VOCABULARY[n0]; for (int n1 = 0; n1 < vocabulary_size; n1 ++) { password[1] = VOCABULARY[n1]; break_it(password, thread_id); } } } void combine_3() { #pragma omp parallel for for (int n0 = world_rank; n0 < vocabulary_size; n0 += world_size) { int thread_id = omp_get_thread_num(); char password[9] = {'\0', '\0', '\0', '\0'}; password[0] = VOCABULARY[n0]; for (int n1 = 0; n1 < vocabulary_size; n1 ++) { password[1] = VOCABULARY[n1]; for (int n2 = 0; n2 < vocabulary_size; n2 ++) { password[2] = VOCABULARY[n2]; break_it(password, thread_id); } } } } void combine_4() { #pragma omp parallel for for (int n0 = world_rank; n0 < vocabulary_size; n0 += world_size) { int thread_id = omp_get_thread_num(); char password[9] = {'\0', '\0', '\0', '\0', '\0'}; password[0] = VOCABULARY[n0]; for (int n1 = 0; n1 < vocabulary_size; n1 ++) { password[1] = VOCABULARY[n1]; for (int n2 = 0; n2 < vocabulary_size; n2 ++) { password[2] = VOCABULARY[n2]; for (int n3 = 0; n3 < vocabulary_size; n3 ++) { password[3] = VOCABULARY[n3]; break_it(password, thread_id); } } } } } void combine_5() { #pragma omp parallel for for (int n0 = world_rank; n0 < vocabulary_size; n0 += world_size) { int thread_id = omp_get_thread_num(); char password[9] = {'\0', '\0', '\0', '\0', '\0', '\0'}; password[0] = VOCABULARY[n0]; for (int n1 = 0; n1 < vocabulary_size; n1 ++) { password[1] = VOCABULARY[n1]; for (int n2 = 0; n2 < vocabulary_size; n2 ++) { password[2] = VOCABULARY[n2]; for (int n3 = 0; n3 < vocabulary_size; n3 ++) { password[3] = VOCABULARY[n3]; for (int n4 = 0; n4 < vocabulary_size; n4 ++) { password[4] = VOCABULARY[n4]; break_it(password, thread_id); } } } } } } void combine_6() { #pragma omp parallel for for (int n0 = world_rank; n0 < vocabulary_size; n0 += world_size) { int thread_id = omp_get_thread_num(); char password[9] = {'\0', '\0', '\0', '\0', '\0', '\0', '\0'}; password[0] = VOCABULARY[n0]; for (int n1 = 0; n1 < vocabulary_size; n1 ++) { password[1] = VOCABULARY[n1]; for (int n2 = 0; n2 < vocabulary_size; n2 ++) { password[2] = VOCABULARY[n2]; for (int n3 = 0; n3 < vocabulary_size; n3 ++) { password[3] = VOCABULARY[n3]; for (int n4 = 0; n4 < vocabulary_size; n4 ++) { password[4] = VOCABULARY[n4]; for (int n5 = 0; n5 < vocabulary_size; n5 ++) { password[5] = VOCABULARY[n5]; break_it(password, thread_id); } } } } } } } void combine_7() { #pragma omp parallel for for (int n0 = world_rank; n0 < vocabulary_size; n0 += world_size) { int thread_id = omp_get_thread_num(); char password[9] = {'\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0'}; password[0] = VOCABULARY[n0]; for (int n1 = 0; n1 < vocabulary_size; n1 ++) { password[1] = VOCABULARY[n1]; for (int n2 = 0; n2 < vocabulary_size; n2 ++) { password[2] = VOCABULARY[n2]; for (int n3 = 0; n3 < vocabulary_size; n3 ++) { password[3] = VOCABULARY[n3]; for (int n4 = 0; n4 < vocabulary_size; n4 ++) { password[4] = VOCABULARY[n4]; for (int n5 = 0; n5 < vocabulary_size; n5 ++) { password[5] = VOCABULARY[n5]; for (int n6 = 0; n6 < vocabulary_size; n6 ++) { password[6] = VOCABULARY[n6]; break_it(password, thread_id); } } } } } } } } void combine_8() { #pragma omp parallel for for (int n0 = world_rank; n0 < vocabulary_size; n0 += world_size) { int thread_id = omp_get_thread_num(); char password[9] = {'\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0'}; password[0] = VOCABULARY[n0]; for (int n1 = 0; n1 < vocabulary_size; n1 ++) { password[1] = VOCABULARY[n1]; for (int n2 = 0; n2 < vocabulary_size; n2 ++) { password[2] = VOCABULARY[n2]; for (int n3 = 0; n3 < vocabulary_size; n3 ++) { password[3] = VOCABULARY[n3]; for (int n4 = 0; n4 < vocabulary_size; n4 ++) { password[4] = VOCABULARY[n4]; for (int n5 = 0; n5 < vocabulary_size; n5 ++) { password[5] = VOCABULARY[n5]; for (int n6 = 0; n6 < vocabulary_size; n6 ++) { password[6] = VOCABULARY[n6]; for (int n7 = 0; n7 < vocabulary_size; n7 ++) { password[7] = VOCABULARY[n7]; break_it(password, thread_id); } } } } } } } } } void combine() { combine_1(); combine_2(); combine_3(); combine_4(); combine_5(); combine_6(); combine_7(); combine_8(); } void front_end() { read_data(); send_data(); organize_data(); combine(); } void back_end() { recv_data(); organize_data(); combine(); } int main(int argc, char** argv) { MPI_Init(&argc, &argv); MPI_Comm_size(MPI_COMM_WORLD, &world_size); MPI_Comm_rank(MPI_COMM_WORLD, &world_rank); count_vocabulary_size(); print_rank_status(); print_vocabulary_status(); is_frontend() ? front_end() : back_end(); MPI_Finalize(); return 0; }
3d7pt_var.c
/* * Order-1, 3D 7 point stencil with variable coefficients * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, m, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+2; Ny = atoi(argv[2])+2; Nz = atoi(argv[3])+2; } if (argc > 4) Nt = atoi(argv[4]); // allocate the arrays double ****A = (double ****) malloc(sizeof(double***)*2); for(m=0; m<2;m++){ A[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } double ****coef = (double ****) malloc(sizeof(double***)*7); for(m=0; m<7;m++){ coef[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ coef[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ coef[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 4; tile_size[1] = 4; tile_size[2] = 32; tile_size[3] = 256; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } for (m=0; m<7; m++) { for (i=1; i<Nz; i++) { for (j=1; j<Ny; j++) { for (k=1; k<Nx; k++) { coef[m][i][j][k] = 1.0 * (rand() % BASE); } } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 #pragma scop for (t = 0; t < Nt-1; t++) { for (i = 1; i < Nz-1; i++) { for (j = 1; j < Ny-1; j++) { for (k = 1; k < Nx-1; k++) { A[(t+1)%2][i][j][k] = coef[0][i][j][k] * A[t%2][i ][j ][k ] + coef[1][i][j][k] * A[t%2][i-1][j ][k ] + coef[2][i][j][k] * A[t%2][i ][j-1][k ] + coef[3][i][j][k] * A[t%2][i ][j ][k-1] + coef[4][i][j][k] * A[t%2][i+1][j ][k ] + coef[5][i][j][k] * A[t%2][i ][j+1][k ] + coef[6][i][j][k] * A[t%2][i ][j ][k+1]; } } } } #pragma endscop gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(1, "variable no-symmetry") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); for(m=0; m<7;m++){ for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(coef[m][i][j]); } free(coef[m][i]); } free(coef[m]); } return 0; }
IonisationBox.c
// Re-write of find_HII_bubbles.c for being accessible within the MCMC int INIT_ERFC_INTERPOLATION = 1; int INIT_RECOMBINATIONS = 1; double *ERFC_VALS, *ERFC_VALS_DIFF; float absolute_delta_z; float overdense_small_min, overdense_small_bin_width, overdense_small_bin_width_inv; float overdense_large_min, overdense_large_bin_width, overdense_large_bin_width_inv; float prev_overdense_small_min, prev_overdense_small_bin_width, prev_overdense_small_bin_width_inv; float prev_overdense_large_min, prev_overdense_large_bin_width, prev_overdense_large_bin_width_inv; float log10Mturn_min, log10Mturn_max, log10Mturn_bin_width, log10Mturn_bin_width_inv; float log10Mturn_min_MINI, log10Mturn_max_MINI, log10Mturn_bin_width_MINI, log10Mturn_bin_width_inv_MINI; int EvaluateSplineTable(bool MINI_HALOS, int dens_type, float curr_dens, float filtered_Mturn, float filtered_Mturn_MINI, float *Splined_Fcoll, float *Splined_Fcoll_MINI); void InterpolationRange(int dens_type, float R, float L, float *min_density, float *max_density); int ComputeIonizedBox(float redshift, float prev_redshift, struct UserParams *user_params, struct CosmoParams *cosmo_params, struct AstroParams *astro_params, struct FlagOptions *flag_options, struct PerturbedField *perturbed_field, struct PerturbedField *previous_perturbed_field, struct IonizedBox *previous_ionize_box, struct TsBox *spin_temp, struct PerturbHaloField *halos, struct InitialConditions *ini_boxes, struct IonizedBox *box) { int status; Try{ // This Try brackets the whole function, so we don't indent. LOG_DEBUG("input values:"); LOG_DEBUG("redshift=%f, prev_redshift=%f", redshift, prev_redshift); #if LOG_LEVEL >= DEBUG_LEVEL writeUserParams(user_params); writeCosmoParams(cosmo_params); writeAstroParams(flag_options, astro_params); writeFlagOptions(flag_options); #endif // Makes the parameter structs visible to a variety of functions/macros // Do each time to avoid Python garbage collection issues Broadcast_struct_global_PS(user_params,cosmo_params); Broadcast_struct_global_UF(user_params,cosmo_params); omp_set_num_threads(user_params->N_THREADS); // Other parameters used in the code int i,j,k,x,y,z, LAST_FILTER_STEP, first_step_R, short_completely_ionised,i_halo; int counter, N_halos_in_cell; unsigned long long ct; float growth_factor, pixel_mass, cell_length_factor, M_MIN, prev_growth_factor; float erfc_denom, erfc_denom_cell, res_xH, Splined_Fcoll, xHII_from_xrays, curr_dens, massofscaleR, ION_EFF_FACTOR, growth_factor_dz; float Splined_Fcoll_MINI, prev_dens, ION_EFF_FACTOR_MINI, prev_Splined_Fcoll, prev_Splined_Fcoll_MINI; float ave_M_coll_cell, ave_N_min_cell, pixel_volume, density_over_mean; float curr_vcb; double global_xH, ST_over_PS, f_coll, R, stored_R, f_coll_min; double ST_over_PS_MINI, f_coll_MINI, f_coll_min_MINI; double t_ast, Gamma_R_prefactor, rec, dNrec, sigmaMmax; double Gamma_R_prefactor_MINI; float fabs_dtdz, ZSTEP, z_eff; const float dz = 0.01; float dens_val, prev_dens_val; int overdense_int,status_int; int something_finite_or_infinite = 0; int log10_Mturnover_MINI_int, log10_Mturnover_int; int *overdense_int_boundexceeded_threaded = calloc(user_params->N_THREADS,sizeof(int)); if(user_params->USE_INTERPOLATION_TABLES) { overdense_large_min = global_params.CRIT_DENS_TRANSITION*0.999; overdense_large_bin_width = 1./((double)NSFR_high-1.)*(Deltac-overdense_large_min); overdense_large_bin_width_inv = 1./overdense_large_bin_width; prev_overdense_large_min = global_params.CRIT_DENS_TRANSITION*0.999; prev_overdense_large_bin_width = 1./((double)NSFR_high-1.)*(Deltac-prev_overdense_large_min); prev_overdense_large_bin_width_inv = 1./prev_overdense_large_bin_width; } double ave_log10_Mturnover, ave_log10_Mturnover_MINI; float Mlim_Fstar, Mlim_Fesc; float Mlim_Fstar_MINI, Mlim_Fesc_MINI; float Mcrit_atom, log10_Mcrit_atom, log10_Mcrit_mol; fftwf_complex *log10_Mturnover_unfiltered=NULL, *log10_Mturnover_filtered=NULL; fftwf_complex *log10_Mturnover_MINI_unfiltered=NULL, *log10_Mturnover_MINI_filtered=NULL; float log10_Mturnover, log10_Mturnover_MINI, Mcrit_LW, Mcrit_RE, Mturnover, Mturnover_MINI; float min_density, max_density; float prev_min_density, prev_max_density; float stored_redshift, adjustment_factor; gsl_rng * r[user_params->N_THREADS]; LOG_SUPER_DEBUG("initing heat"); init_heat(); float TK; TK = T_RECFAST(redshift,0); LOG_SUPER_DEBUG("inited heat"); init_ps(); LOG_SUPER_DEBUG("defined parameters"); pixel_volume = pow(user_params->BOX_LEN/((float)(user_params->HII_DIM)), 3); if(flag_options->USE_MASS_DEPENDENT_ZETA) { ION_EFF_FACTOR = global_params.Pop2_ion * astro_params->F_STAR10 * astro_params->F_ESC10; ION_EFF_FACTOR_MINI = global_params.Pop3_ion * astro_params->F_STAR7_MINI * astro_params->F_ESC7_MINI; } else { ION_EFF_FACTOR = astro_params->HII_EFF_FACTOR; ION_EFF_FACTOR_MINI = 0.; } // For recombinations if(flag_options->INHOMO_RECO) { if(INIT_RECOMBINATIONS) { init_MHR(); INIT_RECOMBINATIONS=0; } if (prev_redshift < 1) //deal with first redshift ZSTEP = (1. + redshift) * (global_params.ZPRIME_STEP_FACTOR - 1.); else ZSTEP = prev_redshift - redshift; #pragma omp parallel shared(box) private(ct) num_threads(user_params->N_THREADS) { #pragma omp for for (ct=0; ct<HII_TOT_NUM_PIXELS; ct++) { box->Gamma12_box[ct] = 0.0; box->MFP_box[ct] = 0.0; } } } else { ZSTEP = 0.2; } #pragma omp parallel shared(box) private(ct) num_threads(user_params->N_THREADS) { #pragma omp for for (ct=0; ct<HII_TOT_NUM_PIXELS; ct++) { box->z_re_box[ct] = -1.0; } } LOG_SUPER_DEBUG("z_re_box init: "); debugSummarizeBox(box->z_re_box, user_params->HII_DIM, " "); fabs_dtdz = fabs(dtdz(redshift))/1e15; //reduce to have good precision t_ast = astro_params->t_STAR * t_hubble(redshift); growth_factor_dz = dicke(redshift-dz); // Modify the current sampled redshift to a redshift which matches the expected filling factor given our astrophysical parameterisation. // This is the photon non-conservation correction if(flag_options->PHOTON_CONS) { adjust_redshifts_for_photoncons(astro_params,flag_options,&redshift,&stored_redshift,&absolute_delta_z); LOG_DEBUG("PhotonCons data:"); LOG_DEBUG("original redshift=%f, updated redshift=%f delta-z = %f", stored_redshift, redshift, absolute_delta_z); if(isfinite(redshift)==0 || isfinite(absolute_delta_z)==0) { LOG_ERROR("Updated photon non-conservation redshift is either infinite or NaN!"); LOG_ERROR("This can sometimes occur when reionisation stalls (i.e. extremely low"\ "F_ESC or F_STAR or not enough sources)"); // Throw(ParameterError); Throw(PhotonConsError); } } Splined_Fcoll = 0.; Splined_Fcoll_MINI = 0.; double ArgBinWidth, InvArgBinWidth, erfc_arg_val, erfc_arg_min, erfc_arg_max; int erfc_arg_val_index, ERFC_NUM_POINTS; erfc_arg_val = 0.; erfc_arg_val_index = 0; // Setup an interpolation table for the error function, helpful for calcluating the collapsed fraction // (only for the default model, i.e. mass-independent ionising efficiency) erfc_arg_min = -15.0; erfc_arg_max = 15.0; ERFC_NUM_POINTS = 10000; ArgBinWidth = (erfc_arg_max - erfc_arg_min)/((double)ERFC_NUM_POINTS - 1.); InvArgBinWidth = 1./ArgBinWidth; if(!flag_options->USE_MASS_DEPENDENT_ZETA && INIT_ERFC_INTERPOLATION) { ERFC_VALS = calloc(ERFC_NUM_POINTS,sizeof(double)); ERFC_VALS_DIFF = calloc(ERFC_NUM_POINTS,sizeof(double)); #pragma omp parallel shared(ERFC_VALS,erfc_arg_min,ArgBinWidth) private(i,erfc_arg_val) num_threads(user_params->N_THREADS) { #pragma omp for for(i=0;i<ERFC_NUM_POINTS;i++) { erfc_arg_val = erfc_arg_min + ArgBinWidth*(double)i; ERFC_VALS[i] = splined_erfc(erfc_arg_val); } } #pragma omp parallel shared(ERFC_VALS_DIFF,ERFC_VALS) private(i) num_threads(user_params->N_THREADS) { #pragma omp for for(i=0;i<(ERFC_NUM_POINTS-1);i++) { ERFC_VALS_DIFF[i] = ERFC_VALS[i+1] - ERFC_VALS[i]; } } INIT_ERFC_INTERPOLATION = 0; } LOG_SUPER_DEBUG("erfc interpolation done"); ///////////////////////////////// BEGIN INITIALIZATION ////////////////////////////////// // perform a very rudimentary check to see if we are underresolved and not using the linear approx if ((user_params->BOX_LEN > user_params->DIM) && !(global_params.EVOLVE_DENSITY_LINEARLY)){ LOG_WARNING("Resolution is likely too low for accurate evolved density fields\n It Is recommended \ that you either increase the resolution (DIM/Box_LEN) or set the EVOLVE_DENSITY_LINEARLY flag to 1\n"); } // initialize power spectrum growth_factor = dicke(redshift); prev_growth_factor = dicke(prev_redshift); fftwf_complex *deltax_unfiltered, *deltax_unfiltered_original, *deltax_filtered; fftwf_complex *xe_unfiltered, *xe_filtered, *N_rec_unfiltered, *N_rec_filtered; fftwf_complex *prev_deltax_unfiltered, *prev_deltax_filtered; fftwf_complex *M_coll_unfiltered,*M_coll_filtered; deltax_unfiltered = (fftwf_complex *) fftwf_malloc(sizeof(fftwf_complex)*HII_KSPACE_NUM_PIXELS); deltax_unfiltered_original = (fftwf_complex *) fftwf_malloc(sizeof(fftwf_complex)*HII_KSPACE_NUM_PIXELS); deltax_filtered = (fftwf_complex *) fftwf_malloc(sizeof(fftwf_complex)*HII_KSPACE_NUM_PIXELS); if (flag_options->USE_MINI_HALOS){ prev_deltax_unfiltered = (fftwf_complex *) fftwf_malloc(sizeof(fftwf_complex)*HII_KSPACE_NUM_PIXELS); prev_deltax_filtered = (fftwf_complex *) fftwf_malloc(sizeof(fftwf_complex)*HII_KSPACE_NUM_PIXELS); } if(flag_options->USE_TS_FLUCT) { xe_unfiltered = (fftwf_complex *) fftwf_malloc(sizeof(fftwf_complex)*HII_KSPACE_NUM_PIXELS); xe_filtered = (fftwf_complex *) fftwf_malloc(sizeof(fftwf_complex)*HII_KSPACE_NUM_PIXELS); } if (flag_options->INHOMO_RECO){ N_rec_unfiltered = (fftwf_complex *) fftwf_malloc(sizeof(fftwf_complex)*HII_KSPACE_NUM_PIXELS); // cumulative number of recombinations N_rec_filtered = (fftwf_complex *) fftwf_malloc(sizeof(fftwf_complex)*HII_KSPACE_NUM_PIXELS); } if(flag_options->USE_MASS_DEPENDENT_ZETA) { xi_SFR = calloc(NGL_SFR+1,sizeof(float)); wi_SFR = calloc(NGL_SFR+1,sizeof(float)); if(user_params->USE_INTERPOLATION_TABLES) { log10_overdense_spline_SFR = calloc(NSFR_low,sizeof(double)); Overdense_spline_SFR = calloc(NSFR_high,sizeof(float)); log10_Nion_spline = calloc(NSFR_low,sizeof(float)); Nion_spline = calloc(NSFR_high,sizeof(float)); if (flag_options->USE_MINI_HALOS){ prev_log10_overdense_spline_SFR = calloc(NSFR_low,sizeof(double)); prev_Overdense_spline_SFR = calloc(NSFR_high,sizeof(float)); log10_Nion_spline = calloc(NSFR_low*NMTURN,sizeof(float)); Nion_spline = calloc(NSFR_high*NMTURN,sizeof(float)); log10_Nion_spline_MINI = calloc(NSFR_low*NMTURN,sizeof(float)); Nion_spline_MINI = calloc(NSFR_high*NMTURN,sizeof(float)); prev_log10_Nion_spline = calloc(NSFR_low*NMTURN,sizeof(float)); prev_Nion_spline = calloc(NSFR_high*NMTURN,sizeof(float)); prev_log10_Nion_spline_MINI = calloc(NSFR_low*NMTURN,sizeof(float)); prev_Nion_spline_MINI = calloc(NSFR_high*NMTURN,sizeof(float)); } } if (flag_options->USE_MINI_HALOS){ Mturns = calloc(NMTURN,sizeof(float)); Mturns_MINI = calloc(NMTURN,sizeof(float)); } } // Calculate the density field for this redshift if the initial conditions/cosmology are changing if(flag_options->PHOTON_CONS) { adjustment_factor = dicke(redshift)/dicke(stored_redshift); } else { adjustment_factor = 1.; } #pragma omp parallel shared(deltax_unfiltered,perturbed_field,adjustment_factor) private(i,j,k) num_threads(user_params->N_THREADS) { #pragma omp for for (i=0; i<user_params->HII_DIM; i++){ for (j=0; j<user_params->HII_DIM; j++){ for (k=0; k<user_params->HII_DIM; k++){ *((float *)deltax_unfiltered + HII_R_FFT_INDEX(i,j,k)) = (perturbed_field->density[HII_R_INDEX(i,j,k)])*adjustment_factor; } } } } LOG_SUPER_DEBUG("density field calculated"); // keep the unfiltered density field in an array, to save it for later memcpy(deltax_unfiltered_original, deltax_unfiltered, sizeof(fftwf_complex)*HII_KSPACE_NUM_PIXELS); i=0; // Newer setup to be performed in parallel int thread_num; for(thread_num = 0; thread_num < user_params->N_THREADS; thread_num++){ // Original defaults with gsl_rng_mt19937 and SEED = 0, thus start with this and iterate for all other threads by their thread number r[thread_num] = gsl_rng_alloc(gsl_rng_mt19937); gsl_rng_set(r[thread_num], thread_num); } pixel_mass = RtoM(L_FACTOR*user_params->BOX_LEN/(float)(user_params->HII_DIM)); cell_length_factor = L_FACTOR; if(flag_options->USE_HALO_FIELD && (global_params.FIND_BUBBLE_ALGORITHM == 2) && ((user_params->BOX_LEN/(float)(user_params->HII_DIM) < 1))) { cell_length_factor = 1.; } if (prev_redshift < 1){ LOG_DEBUG("first redshift, do some initialization"); previous_ionize_box->z_re_box = (float *) calloc(HII_TOT_NUM_PIXELS, sizeof(float)); #pragma omp parallel shared(previous_ionize_box) private(i,j,k) num_threads(user_params->N_THREADS) { #pragma omp for for (i=0; i<user_params->HII_DIM; i++){ for (j=0; j<user_params->HII_DIM; j++){ for (k=0; k<user_params->HII_DIM; k++){ previous_ionize_box->z_re_box[HII_R_INDEX(i, j, k)] = -1.0; } } } } if (flag_options->INHOMO_RECO) previous_ionize_box->dNrec_box = (float *) calloc(HII_TOT_NUM_PIXELS, sizeof(float)); } //set the minimum source mass if (flag_options->USE_MASS_DEPENDENT_ZETA) { if (flag_options->USE_MINI_HALOS){ ave_log10_Mturnover = 0.; ave_log10_Mturnover_MINI = 0.; // this is the first z, and the previous_ionize_box are empty if (prev_redshift < 1){ previous_ionize_box->Gamma12_box = (float *) calloc(HII_TOT_NUM_PIXELS, sizeof(float)); // really painful to get the length... counter = 1; R=fmax(global_params.R_BUBBLE_MIN, (cell_length_factor*user_params->BOX_LEN/(float)user_params->HII_DIM)); while ((R - fmin(astro_params->R_BUBBLE_MAX, L_FACTOR*user_params->BOX_LEN)) <= FRACT_FLOAT_ERR ){ if(R >= fmin(astro_params->R_BUBBLE_MAX, L_FACTOR*user_params->BOX_LEN)) { stored_R = R/(global_params.DELTA_R_HII_FACTOR); } R*= global_params.DELTA_R_HII_FACTOR; counter += 1; } previous_ionize_box->Fcoll = (float *) calloc(HII_TOT_NUM_PIXELS*counter, sizeof(float)); previous_ionize_box->Fcoll_MINI = (float *) calloc(HII_TOT_NUM_PIXELS*counter, sizeof(float)); previous_ionize_box->mean_f_coll = 0.0; previous_ionize_box->mean_f_coll_MINI = 0.0; #pragma omp parallel shared(prev_deltax_unfiltered) private(i,j,k) num_threads(user_params->N_THREADS) { #pragma omp for for (i=0; i<user_params->HII_DIM; i++){ for (j=0; j<user_params->HII_DIM; j++){ for (k=0; k<user_params->HII_DIM; k++){ *((float *)prev_deltax_unfiltered + HII_R_FFT_INDEX(i,j,k)) = -1.5; } } } } } else{ #pragma omp parallel shared(prev_deltax_unfiltered,previous_perturbed_field) private(i,j,k) num_threads(user_params->N_THREADS) { #pragma omp for for (i=0; i<user_params->HII_DIM; i++){ for (j=0; j<user_params->HII_DIM; j++){ for (k=0; k<user_params->HII_DIM; k++){ *((float *)prev_deltax_unfiltered + HII_R_FFT_INDEX(i,j,k)) = previous_perturbed_field->density[HII_R_INDEX(i,j,k)]; } } } } } LOG_SUPER_DEBUG("previous density field calculated"); // fields added for minihalos Mcrit_atom = atomic_cooling_threshold(redshift); log10_Mcrit_atom = log10(Mcrit_atom); log10_Mcrit_mol = log10(lyman_werner_threshold(redshift, 0.,0., astro_params)); log10_Mturnover_unfiltered = (fftwf_complex *) fftwf_malloc(sizeof(fftwf_complex)*HII_KSPACE_NUM_PIXELS); log10_Mturnover_filtered = (fftwf_complex *) fftwf_malloc(sizeof(fftwf_complex)*HII_KSPACE_NUM_PIXELS); log10_Mturnover_MINI_unfiltered = (fftwf_complex *) fftwf_malloc(sizeof(fftwf_complex)*HII_KSPACE_NUM_PIXELS); log10_Mturnover_MINI_filtered = (fftwf_complex *) fftwf_malloc(sizeof(fftwf_complex)*HII_KSPACE_NUM_PIXELS); if (!log10_Mturnover_unfiltered || !log10_Mturnover_filtered || !log10_Mturnover_MINI_unfiltered || !log10_Mturnover_MINI_filtered){// || !Mcrit_RE_grid || !Mcrit_LW_grid) LOG_ERROR("Error allocating memory for Mturnover or Mturnover_MINI boxes"); Throw(MemoryAllocError); } LOG_SUPER_DEBUG("Calculating and outputting Mcrit boxes for atomic and molecular halos..."); #pragma omp parallel shared(redshift,previous_ionize_box,spin_temp,Mcrit_atom,log10_Mturnover_unfiltered,log10_Mturnover_MINI_unfiltered)\ private(x,y,z,Mcrit_RE,Mcrit_LW,Mturnover,Mturnover_MINI,log10_Mturnover,log10_Mturnover_MINI,curr_vcb) num_threads(user_params->N_THREADS) { #pragma omp for reduction(+:ave_log10_Mturnover,ave_log10_Mturnover_MINI) for (x=0; x<user_params->HII_DIM; x++){ for (y=0; y<user_params->HII_DIM; y++){ for (z=0; z<user_params->HII_DIM; z++){ Mcrit_RE = reionization_feedback(redshift, previous_ionize_box->Gamma12_box[HII_R_INDEX(x, y, z)], previous_ionize_box->z_re_box[HII_R_INDEX(x, y, z)]); if (flag_options->FIX_VCB_AVG){ //with this flag we ignore reading vcb box curr_vcb = global_params.VAVG; } else{ if(user_params->USE_RELATIVE_VELOCITIES ){ curr_vcb = ini_boxes->lowres_vcb[HII_R_INDEX(x,y,z)]; } else{ //set vcb to a constant, either zero or vavg. curr_vcb = 0.0; } } Mcrit_LW = lyman_werner_threshold(redshift, spin_temp->J_21_LW_box[HII_R_INDEX(x, y, z)], curr_vcb, astro_params); //JBM: this only accounts for effect 3 (largest on minihaloes). Effects 1 and 2 affect both minihaloes (MCGs) and regular ACGs, but they're smaller ~10%. See Sec 2 of Muñoz+21 (2110.13919) //*((float *)Mcrit_RE_grid + HII_R_FFT_INDEX(x,y,z)) = Mcrit_RE; //*((float *)Mcrit_LW_grid + HII_R_FFT_INDEX(x,y,z)) = Mcrit_LW; Mturnover = Mcrit_RE > Mcrit_atom ? Mcrit_RE : Mcrit_atom; Mturnover_MINI = Mcrit_RE > Mcrit_LW ? Mcrit_RE : Mcrit_LW; log10_Mturnover = log10(Mturnover); log10_Mturnover_MINI = log10(Mturnover_MINI); *((float *)log10_Mturnover_unfiltered + HII_R_FFT_INDEX(x,y,z)) = log10_Mturnover; *((float *)log10_Mturnover_MINI_unfiltered + HII_R_FFT_INDEX(x,y,z)) = log10_Mturnover_MINI; ave_log10_Mturnover += log10_Mturnover; ave_log10_Mturnover_MINI += log10_Mturnover_MINI; } } } } box->log10_Mturnover_ave = ave_log10_Mturnover/(double) HII_TOT_NUM_PIXELS; box->log10_Mturnover_MINI_ave = ave_log10_Mturnover_MINI/(double) HII_TOT_NUM_PIXELS; Mturnover = pow(10., box->log10_Mturnover_ave); Mturnover_MINI = pow(10., box->log10_Mturnover_MINI_ave); M_MIN = global_params.M_MIN_INTEGRAL; Mlim_Fstar_MINI = Mass_limit_bisection(M_MIN, 1e16, astro_params->ALPHA_STAR_MINI, astro_params->F_STAR7_MINI * pow(1e3,astro_params->ALPHA_STAR_MINI)); Mlim_Fesc_MINI = Mass_limit_bisection(M_MIN, 1e16, astro_params->ALPHA_ESC, astro_params->F_ESC7_MINI * pow(1e3, astro_params->ALPHA_ESC)); LOG_SUPER_DEBUG("average turnover masses are %.2f and %.2f for ACGs and MCGs", box->log10_Mturnover_ave, box->log10_Mturnover_MINI_ave); } else{ M_MIN = astro_params->M_TURN/50.; Mturnover = astro_params->M_TURN; box->log10_Mturnover_ave = log10(Mturnover); box->log10_Mturnover_MINI_ave = log10(Mturnover); } Mlim_Fstar = Mass_limit_bisection(M_MIN, 1e16, astro_params->ALPHA_STAR, astro_params->F_STAR10); Mlim_Fesc = Mass_limit_bisection(M_MIN, 1e16, astro_params->ALPHA_ESC, astro_params->F_ESC10); } else { //set the minimum source mass if (astro_params->ION_Tvir_MIN < 9.99999e3) { // neutral IGM M_MIN = (float)TtoM(redshift, astro_params->ION_Tvir_MIN, 1.22); } else { // ionized IGM M_MIN = (float)TtoM(redshift, astro_params->ION_Tvir_MIN, 0.6); } } LOG_SUPER_DEBUG("minimum source mass has been set: %f", M_MIN); if(user_params->USE_INTERPOLATION_TABLES) { if(user_params->FAST_FCOLL_TABLES){ initialiseSigmaMInterpTable(fmin(MMIN_FAST,M_MIN),1e20); } else{ if(!flag_options->USE_TS_FLUCT) { initialiseSigmaMInterpTable(M_MIN,1e20); } else if(flag_options->USE_MINI_HALOS){ initialiseSigmaMInterpTable(global_params.M_MIN_INTEGRAL/50.,1e20); } } } LOG_SUPER_DEBUG("sigma table has been initialised"); // check for WDM if (global_params.P_CUTOFF && ( M_MIN < M_J_WDM())){ LOG_WARNING("The default Jeans mass of %e Msun is smaller than the scale supressed by the effective pressure of WDM.", M_MIN); M_MIN = M_J_WDM(); LOG_WARNING("Setting a new effective Jeans mass from WDM pressure supression of %e Msun", M_MIN); } // ARE WE USING A DISCRETE HALO FIELD (identified in the ICs with FindHaloes.c and evolved with PerturbHaloField.c) if(flag_options->USE_HALO_FIELD) { M_coll_unfiltered = (fftwf_complex *) fftwf_malloc(sizeof(fftwf_complex)*HII_KSPACE_NUM_PIXELS); M_coll_filtered = (fftwf_complex *) fftwf_malloc(sizeof(fftwf_complex)*HII_KSPACE_NUM_PIXELS); #pragma omp parallel shared(M_coll_unfiltered) private(ct) num_threads(user_params->N_THREADS) { #pragma omp for for (ct=0; ct<HII_TOT_FFT_NUM_PIXELS; ct++){ *((float *)M_coll_unfiltered + ct) = 0; } } #pragma omp parallel shared(M_coll_unfiltered,halos) \ private(i_halo,x,y,z) num_threads(user_params->N_THREADS) { #pragma omp for for (i_halo=0; i_halo<halos->n_halos; i_halo++){ x = halos->halo_coords[0+3*i_halo]; y = halos->halo_coords[1+3*i_halo]; z = halos->halo_coords[2+3*i_halo]; #pragma omp atomic *((float *)M_coll_unfiltered + HII_R_FFT_INDEX(x, y, z)) += halos->halo_masses[i_halo]; } } } // end of the USE_HALO_FIELD option // lets check if we are going to bother with computing the inhmogeneous field at all... global_xH = 0.0; // Determine the normalisation for the excursion set algorithm if (flag_options->USE_MASS_DEPENDENT_ZETA) { if (flag_options->USE_MINI_HALOS){ if (previous_ionize_box->mean_f_coll * ION_EFF_FACTOR < 1e-4){ box->mean_f_coll = Nion_General(redshift,M_MIN,Mturnover,astro_params->ALPHA_STAR,astro_params->ALPHA_ESC, astro_params->F_STAR10,astro_params->F_ESC10,Mlim_Fstar,Mlim_Fesc); } else{ box->mean_f_coll = previous_ionize_box->mean_f_coll + \ Nion_General(redshift,M_MIN,Mturnover,astro_params->ALPHA_STAR,astro_params->ALPHA_ESC, astro_params->F_STAR10,astro_params->F_ESC10,Mlim_Fstar,Mlim_Fesc) - \ Nion_General(prev_redshift,M_MIN,Mturnover,astro_params->ALPHA_STAR,astro_params->ALPHA_ESC, astro_params->F_STAR10,astro_params->F_ESC10,Mlim_Fstar,Mlim_Fesc); } if (previous_ionize_box->mean_f_coll_MINI * ION_EFF_FACTOR_MINI < 1e-4){ box->mean_f_coll_MINI = Nion_General_MINI(redshift,M_MIN,Mturnover_MINI,Mcrit_atom, astro_params->ALPHA_STAR_MINI,astro_params->ALPHA_ESC,astro_params->F_STAR7_MINI, astro_params->F_ESC7_MINI,Mlim_Fstar_MINI,Mlim_Fesc_MINI); } else{ box->mean_f_coll_MINI = previous_ionize_box->mean_f_coll_MINI + \ Nion_General_MINI(redshift,M_MIN,Mturnover_MINI,Mcrit_atom,astro_params->ALPHA_STAR_MINI, astro_params->ALPHA_ESC,astro_params->F_STAR7_MINI,astro_params->F_ESC7_MINI ,Mlim_Fstar_MINI,Mlim_Fesc_MINI) - \ Nion_General_MINI(prev_redshift,M_MIN,Mturnover_MINI,Mcrit_atom,astro_params->ALPHA_STAR_MINI, astro_params->ALPHA_ESC,astro_params->F_STAR7_MINI,astro_params->F_ESC7_MINI, Mlim_Fstar_MINI,Mlim_Fesc_MINI); } f_coll_min = Nion_General(global_params.Z_HEAT_MAX,M_MIN,Mturnover,astro_params->ALPHA_STAR, astro_params->ALPHA_ESC,astro_params->F_STAR10,astro_params->F_ESC10,Mlim_Fstar,Mlim_Fesc); f_coll_min_MINI = Nion_General_MINI(global_params.Z_HEAT_MAX,M_MIN,Mturnover_MINI,Mcrit_atom, astro_params->ALPHA_STAR_MINI,astro_params->ALPHA_ESC,astro_params->F_STAR7_MINI, astro_params->F_ESC7_MINI,Mlim_Fstar_MINI,Mlim_Fesc_MINI); } else{ box->mean_f_coll = Nion_General(redshift,M_MIN,Mturnover,astro_params->ALPHA_STAR,astro_params->ALPHA_ESC, astro_params->F_STAR10,astro_params->F_ESC10,Mlim_Fstar,Mlim_Fesc); box->mean_f_coll_MINI = 0.; f_coll_min = Nion_General(global_params.Z_HEAT_MAX,M_MIN,Mturnover,astro_params->ALPHA_STAR,astro_params->ALPHA_ESC, astro_params->F_STAR10,astro_params->F_ESC10,Mlim_Fstar,Mlim_Fesc); } } else { box->mean_f_coll = FgtrM_General(redshift, M_MIN); } if(isfinite(box->mean_f_coll)==0) { LOG_ERROR("Mean collapse fraction is either infinite or NaN!"); // Throw(ParameterError); Throw(InfinityorNaNError); } LOG_SUPER_DEBUG("excursion set normalisation, mean_f_coll: %e", box->mean_f_coll); if (flag_options->USE_MINI_HALOS){ if(isfinite(box->mean_f_coll_MINI)==0) { LOG_ERROR("Mean collapse fraction of MINI is either infinite or NaN!"); // Throw(ParameterError); Throw(InfinityorNaNError); } LOG_SUPER_DEBUG("excursion set normalisation, mean_f_coll_MINI: %e", box->mean_f_coll_MINI); } if (box->mean_f_coll * ION_EFF_FACTOR + box->mean_f_coll_MINI * ION_EFF_FACTOR_MINI< global_params.HII_ROUND_ERR){ // way too small to ionize anything... // printf( "The mean collapse fraction is %e, which is much smaller than the effective critical collapse fraction of %e\n I will just declare everything to be neutral\n", mean_f_coll, f_coll_crit); // find the neutral fraction if(flag_options->USE_TS_FLUCT) { #pragma omp parallel shared(box,spin_temp) private(ct) num_threads(user_params->N_THREADS) { #pragma omp for reduction(+:global_xH) for (ct=0; ct<HII_TOT_NUM_PIXELS; ct++){ box->xH_box[ct] = 1.-spin_temp->x_e_box[ct]; // convert from x_e to xH global_xH += box->xH_box[ct]; box->temp_kinetic_all_gas[ct] = spin_temp->Tk_box[ct]; } } global_xH /= (double)HII_TOT_NUM_PIXELS; } else { global_xH = 1. - xion_RECFAST(redshift, 0); #pragma omp parallel shared(box,global_xH,TK) private(ct) num_threads(user_params->N_THREADS) { #pragma omp for for (ct=0; ct<HII_TOT_NUM_PIXELS; ct++){ box->xH_box[ct] = global_xH; box->temp_kinetic_all_gas[ct] = TK; } } } } else { // Take the ionisation fraction from the X-ray ionisations from Ts.c (only if the calculate spin temperature flag is set) if (flag_options->USE_TS_FLUCT) { #pragma omp parallel shared(xe_unfiltered, spin_temp) private(i, j, k) num_threads(user_params->N_THREADS) { #pragma omp for for (i = 0; i < user_params->HII_DIM; i++) { for (j = 0; j < user_params->HII_DIM; j++) { for (k = 0; k < user_params->HII_DIM; k++) { *((float *) xe_unfiltered + HII_R_FFT_INDEX(i, j, k)) = spin_temp->x_e_box[HII_R_INDEX(i, j, k)]; } } } } } LOG_SUPER_DEBUG("calculated ionization fraction"); if (flag_options->INHOMO_RECO) { #pragma omp parallel shared(N_rec_unfiltered, previous_ionize_box) private(i, j, k) num_threads(user_params->N_THREADS) { #pragma omp for for (i = 0; i < user_params->HII_DIM; i++) { for (j = 0; j < user_params->HII_DIM; j++) { for (k = 0; k < user_params->HII_DIM; k++) { *((float *) N_rec_unfiltered + HII_R_FFT_INDEX(i, j, k)) = previous_ionize_box->dNrec_box[HII_R_INDEX(i, j, k)]; } } } } } dft_r2c_cube(user_params->USE_FFTW_WISDOM, user_params->HII_DIM, user_params->N_THREADS, deltax_unfiltered); LOG_SUPER_DEBUG("FFTs performed"); if(flag_options->USE_MINI_HALOS){ dft_r2c_cube(user_params->USE_FFTW_WISDOM, user_params->HII_DIM, user_params->N_THREADS, prev_deltax_unfiltered); dft_r2c_cube(user_params->USE_FFTW_WISDOM, user_params->HII_DIM, user_params->N_THREADS, log10_Mturnover_MINI_unfiltered); dft_r2c_cube(user_params->USE_FFTW_WISDOM, user_params->HII_DIM, user_params->N_THREADS, log10_Mturnover_unfiltered); LOG_SUPER_DEBUG("MINI HALO ffts performed"); } if (flag_options->USE_HALO_FIELD){ dft_r2c_cube(user_params->USE_FFTW_WISDOM, user_params->HII_DIM, user_params->N_THREADS, M_coll_unfiltered); LOG_SUPER_DEBUG("HALO_FIELD ffts performed"); } if(flag_options->USE_TS_FLUCT) { dft_r2c_cube(user_params->USE_FFTW_WISDOM, user_params->HII_DIM, user_params->N_THREADS, xe_unfiltered); LOG_SUPER_DEBUG("Ts ffts performed"); } if (flag_options->INHOMO_RECO) { dft_r2c_cube(user_params->USE_FFTW_WISDOM, user_params->HII_DIM, user_params->N_THREADS, N_rec_unfiltered); } // remember to add the factor of VOLUME/TOT_NUM_PIXELS when converting from // real space to k-space // Note: we will leave off factor of VOLUME, in anticipation of the inverse FFT below #pragma omp parallel shared(deltax_unfiltered,xe_unfiltered,N_rec_unfiltered,prev_deltax_unfiltered,\ log10_Mturnover_unfiltered,log10_Mturnover_MINI_unfiltered,M_coll_unfiltered) \ private(ct) num_threads(user_params->N_THREADS) { #pragma omp for for (ct=0; ct<HII_KSPACE_NUM_PIXELS; ct++){ deltax_unfiltered[ct] /= (HII_TOT_NUM_PIXELS+0.0); if(flag_options->USE_TS_FLUCT) { xe_unfiltered[ct] /= (double)HII_TOT_NUM_PIXELS; } if (flag_options->INHOMO_RECO){ N_rec_unfiltered[ct] /= (double)HII_TOT_NUM_PIXELS; } if(flag_options->USE_HALO_FIELD) { M_coll_unfiltered[ct] /= (double)HII_TOT_NUM_PIXELS; } if(flag_options->USE_MINI_HALOS){ prev_deltax_unfiltered[ct] /= (HII_TOT_NUM_PIXELS+0.0); log10_Mturnover_unfiltered[ct] /= (HII_TOT_NUM_PIXELS+0.0); log10_Mturnover_MINI_unfiltered[ct] /= (HII_TOT_NUM_PIXELS+0.0); } } } LOG_SUPER_DEBUG("deltax unfiltered calculated"); // ************************************************************************************* // // ***************** LOOP THROUGH THE FILTER RADII (in Mpc) *************************** // // ************************************************************************************* // // set the max radius we will use, making sure we are always sampling the same values of radius // (this avoids aliasing differences w redshift) short_completely_ionised = 0; // loop through the filter radii (in Mpc) erfc_denom_cell = 1; //dummy value R=fmax(global_params.R_BUBBLE_MIN, (cell_length_factor*user_params->BOX_LEN/(float)user_params->HII_DIM)); while ((R - fmin(astro_params->R_BUBBLE_MAX, L_FACTOR * user_params->BOX_LEN)) <= FRACT_FLOAT_ERR) { R *= global_params.DELTA_R_HII_FACTOR; if (R >= fmin(astro_params->R_BUBBLE_MAX, L_FACTOR * user_params->BOX_LEN)) { stored_R = R / (global_params.DELTA_R_HII_FACTOR); } } LOG_DEBUG("set max radius: %f", R); R=fmin(astro_params->R_BUBBLE_MAX, L_FACTOR*user_params->BOX_LEN); LAST_FILTER_STEP = 0; first_step_R = 1; double R_temp = (double) (astro_params->R_BUBBLE_MAX); counter = 0; while (!LAST_FILTER_STEP && (M_MIN < RtoM(R)) ){ LOG_ULTRA_DEBUG("while loop for until RtoM(R)=%f reaches M_MIN=%f", RtoM(R), M_MIN); // Check if we are the last filter step if ( ((R/(global_params.DELTA_R_HII_FACTOR) - cell_length_factor*(user_params->BOX_LEN)/(float)(user_params->HII_DIM)) <= FRACT_FLOAT_ERR) || \ ((R/(global_params.DELTA_R_HII_FACTOR) - global_params.R_BUBBLE_MIN) <= FRACT_FLOAT_ERR) ) { LAST_FILTER_STEP = 1; R = fmax(cell_length_factor*user_params->BOX_LEN/(double)(user_params->HII_DIM), global_params.R_BUBBLE_MIN); } // Copy all relevant quantities from memory into new arrays to be smoothed and FFT'd. if (flag_options->USE_TS_FLUCT) { memcpy(xe_filtered, xe_unfiltered, sizeof(fftwf_complex) * HII_KSPACE_NUM_PIXELS); } if (flag_options->INHOMO_RECO) { memcpy(N_rec_filtered, N_rec_unfiltered, sizeof(fftwf_complex) * HII_KSPACE_NUM_PIXELS); } if (flag_options->USE_HALO_FIELD) { memcpy(M_coll_filtered, M_coll_unfiltered, sizeof(fftwf_complex) * HII_KSPACE_NUM_PIXELS); } memcpy(deltax_filtered, deltax_unfiltered, sizeof(fftwf_complex)*HII_KSPACE_NUM_PIXELS); if(flag_options->USE_MINI_HALOS){ memcpy(prev_deltax_filtered, prev_deltax_unfiltered, sizeof(fftwf_complex)*HII_KSPACE_NUM_PIXELS); memcpy(log10_Mturnover_MINI_filtered, log10_Mturnover_MINI_unfiltered, sizeof(fftwf_complex)*HII_KSPACE_NUM_PIXELS); memcpy(log10_Mturnover_filtered, log10_Mturnover_unfiltered, sizeof(fftwf_complex)*HII_KSPACE_NUM_PIXELS); } if (!LAST_FILTER_STEP || ((R - cell_length_factor * (user_params->BOX_LEN / (double) (user_params->HII_DIM))) > FRACT_FLOAT_ERR)) { if (flag_options->USE_TS_FLUCT) { filter_box(xe_filtered, 1, global_params.HII_FILTER, R); } if (flag_options->INHOMO_RECO) { filter_box(N_rec_filtered, 1, global_params.HII_FILTER, R); } if (flag_options->USE_HALO_FIELD) { filter_box(M_coll_filtered, 1, global_params.HII_FILTER, R); } filter_box(deltax_filtered, 1, global_params.HII_FILTER, R); if(flag_options->USE_MINI_HALOS){ filter_box(prev_deltax_filtered, 1, global_params.HII_FILTER, R); filter_box(log10_Mturnover_MINI_filtered, 1, global_params.HII_FILTER, R); filter_box(log10_Mturnover_filtered, 1, global_params.HII_FILTER, R); } } // Perform FFTs dft_c2r_cube(user_params->USE_FFTW_WISDOM, user_params->HII_DIM, user_params->N_THREADS, deltax_filtered); if(flag_options->USE_MINI_HALOS){ dft_c2r_cube(user_params->USE_FFTW_WISDOM, user_params->HII_DIM, user_params->N_THREADS, prev_deltax_filtered); dft_c2r_cube(user_params->USE_FFTW_WISDOM, user_params->HII_DIM, user_params->N_THREADS, log10_Mturnover_MINI_filtered); dft_c2r_cube(user_params->USE_FFTW_WISDOM, user_params->HII_DIM, user_params->N_THREADS, log10_Mturnover_filtered); } if (flag_options->USE_HALO_FIELD) { dft_c2r_cube(user_params->USE_FFTW_WISDOM, user_params->HII_DIM, user_params->N_THREADS, M_coll_filtered); } if (flag_options->USE_TS_FLUCT) { dft_c2r_cube(user_params->USE_FFTW_WISDOM, user_params->HII_DIM, user_params->N_THREADS, xe_filtered); } if (flag_options->INHOMO_RECO) { dft_c2r_cube(user_params->USE_FFTW_WISDOM, user_params->HII_DIM, user_params->N_THREADS, N_rec_filtered); } // Check if this is the last filtering scale. If so, we don't need deltax_unfiltered anymore. // We will re-read it to get the real-space field, which we will use to set the residual neutral fraction ST_over_PS = 0; ST_over_PS_MINI = 0; f_coll = 0; f_coll_MINI = 0; massofscaleR = RtoM(R); if(!user_params->USE_INTERPOLATION_TABLES) { sigmaMmax = sigma_z0(massofscaleR); } if (!flag_options->USE_HALO_FIELD) { if (flag_options->USE_MASS_DEPENDENT_ZETA) { min_density = max_density = 0.0; #pragma omp parallel shared(deltax_filtered) private(x, y, z) num_threads(user_params->N_THREADS) { #pragma omp for reduction(max:max_density) reduction(min:min_density) for (x = 0; x < user_params->HII_DIM; x++) { for (y = 0; y < user_params->HII_DIM; y++) { for (z = 0; z < user_params->HII_DIM; z++) { // delta cannot be less than -1 *((float *) deltax_filtered + HII_R_FFT_INDEX(x, y, z)) = fmaxf( *((float *) deltax_filtered + HII_R_FFT_INDEX(x, y, z)), -1. + FRACT_FLOAT_ERR); if (*((float *) deltax_filtered + HII_R_FFT_INDEX(x, y, z)) < min_density) { min_density = *((float *) deltax_filtered + HII_R_FFT_INDEX(x, y, z)); } if (*((float *) deltax_filtered + HII_R_FFT_INDEX(x, y, z)) > max_density) { max_density = *((float *) deltax_filtered + HII_R_FFT_INDEX(x, y, z)); } } } } } if(user_params->USE_INTERPOLATION_TABLES) { InterpolationRange(1,R,user_params->BOX_LEN,&min_density, &max_density); } if (flag_options->USE_MINI_HALOS){ // do the same for prev prev_min_density = prev_max_density = 0.0; #pragma omp parallel shared(prev_deltax_filtered) private(x, y, z) num_threads(user_params->N_THREADS) { #pragma omp for reduction(max:prev_max_density) reduction(min:prev_min_density) for (x=0; x<user_params->HII_DIM; x++){ for (y=0; y<user_params->HII_DIM; y++){ for (z=0; z<user_params->HII_DIM; z++){ // delta cannot be less than -1 *((float *)prev_deltax_filtered + HII_R_FFT_INDEX(x,y,z)) = \ fmaxf(*((float *)prev_deltax_filtered + HII_R_FFT_INDEX(x,y,z)) , -1.+FRACT_FLOAT_ERR); if( *((float *)prev_deltax_filtered + HII_R_FFT_INDEX(x,y,z)) < prev_min_density ) { prev_min_density = *((float *)prev_deltax_filtered + HII_R_FFT_INDEX(x,y,z)); } if( *((float *)prev_deltax_filtered + HII_R_FFT_INDEX(x,y,z)) > prev_max_density ) { prev_max_density = *((float *)prev_deltax_filtered + HII_R_FFT_INDEX(x,y,z)); } } } } } if(user_params->USE_INTERPOLATION_TABLES) { InterpolationRange(2,R,user_params->BOX_LEN,&prev_min_density, &prev_max_density); } // do the same for logM log10Mturn_min = 999; log10Mturn_max = 0.0; log10Mturn_min_MINI = 999; log10Mturn_max_MINI = 0.0; #pragma omp parallel shared(log10_Mturnover_filtered,log10_Mturnover_MINI_filtered,log10_Mcrit_atom,log10_Mcrit_mol) private(x, y, z) num_threads(user_params->N_THREADS) { #pragma omp for reduction(max:log10Mturn_max,log10Mturn_max_MINI) reduction(min:log10Mturn_min,log10Mturn_min_MINI) for (x=0; x<user_params->HII_DIM; x++){ for (y=0; y<user_params->HII_DIM; y++){ for (z=0; z<user_params->HII_DIM; z++){ if (*((float *)log10_Mturnover_filtered + HII_R_FFT_INDEX(x,y,z)) < log10_Mcrit_atom) *((float *)log10_Mturnover_filtered + HII_R_FFT_INDEX(x,y,z)) = log10_Mcrit_atom; if (*((float *)log10_Mturnover_filtered + HII_R_FFT_INDEX(x,y,z)) > LOG10_MTURN_MAX) *((float *)log10_Mturnover_filtered + HII_R_FFT_INDEX(x,y,z)) = LOG10_MTURN_MAX; // Mturnover cannot be less than Mcrit_mol if (*((float *)log10_Mturnover_MINI_filtered + HII_R_FFT_INDEX(x,y,z)) < log10_Mcrit_mol) *((float *)log10_Mturnover_MINI_filtered + HII_R_FFT_INDEX(x,y,z)) = log10_Mcrit_mol; if (*((float *)log10_Mturnover_MINI_filtered + HII_R_FFT_INDEX(x,y,z)) > LOG10_MTURN_MAX) *((float *)log10_Mturnover_MINI_filtered + HII_R_FFT_INDEX(x,y,z)) = LOG10_MTURN_MAX; if (*((float *)log10_Mturnover_filtered + HII_R_FFT_INDEX(x,y,z)) < log10Mturn_min) log10Mturn_min = *((float *)log10_Mturnover_filtered + HII_R_FFT_INDEX(x,y,z)); if (*((float *)log10_Mturnover_filtered + HII_R_FFT_INDEX(x,y,z)) > log10Mturn_max) log10Mturn_max = *((float *)log10_Mturnover_filtered + HII_R_FFT_INDEX(x,y,z)); if (*((float *)log10_Mturnover_MINI_filtered + HII_R_FFT_INDEX(x,y,z)) < log10Mturn_min_MINI) log10Mturn_min_MINI = *((float *)log10_Mturnover_MINI_filtered + HII_R_FFT_INDEX(x,y,z)); if (*((float *)log10_Mturnover_MINI_filtered + HII_R_FFT_INDEX(x,y,z)) > log10Mturn_max_MINI) log10Mturn_max_MINI = *((float *)log10_Mturnover_MINI_filtered + HII_R_FFT_INDEX(x,y,z)); } } } } if(user_params->USE_INTERPOLATION_TABLES) { log10Mturn_min = log10Mturn_min *0.99; log10Mturn_max = log10Mturn_max *1.01; log10Mturn_min_MINI = log10Mturn_min_MINI *0.99; log10Mturn_max_MINI = log10Mturn_max_MINI *1.01; log10Mturn_bin_width = (log10Mturn_max - log10Mturn_min) / NMTURN; log10Mturn_bin_width_inv = 1./log10Mturn_bin_width; log10Mturn_bin_width_MINI = (log10Mturn_max_MINI - log10Mturn_min_MINI) / NMTURN; log10Mturn_bin_width_inv_MINI = 1./log10Mturn_bin_width_MINI; } } initialiseGL_Nion(NGL_SFR, M_MIN,massofscaleR); if(user_params->USE_INTERPOLATION_TABLES) { if(flag_options->USE_MINI_HALOS){ initialise_Nion_General_spline_MINI(redshift,Mcrit_atom,min_density,max_density,massofscaleR,M_MIN, log10Mturn_min,log10Mturn_max,log10Mturn_min_MINI,log10Mturn_max_MINI, astro_params->ALPHA_STAR, astro_params->ALPHA_STAR_MINI, astro_params->ALPHA_ESC,astro_params->F_STAR10, astro_params->F_ESC10,Mlim_Fstar,Mlim_Fesc,astro_params->F_STAR7_MINI, astro_params->F_ESC7_MINI,Mlim_Fstar_MINI, Mlim_Fesc_MINI, user_params->FAST_FCOLL_TABLES); if (previous_ionize_box->mean_f_coll_MINI * ION_EFF_FACTOR_MINI + previous_ionize_box->mean_f_coll * ION_EFF_FACTOR > 1e-4){ initialise_Nion_General_spline_MINI_prev(prev_redshift,Mcrit_atom,prev_min_density,prev_max_density, massofscaleR,M_MIN,log10Mturn_min,log10Mturn_max,log10Mturn_min_MINI, log10Mturn_max_MINI,astro_params->ALPHA_STAR, astro_params->ALPHA_STAR_MINI, astro_params->ALPHA_ESC, astro_params->F_STAR10,astro_params->F_ESC10,Mlim_Fstar,Mlim_Fesc, astro_params->F_STAR7_MINI,astro_params->F_ESC7_MINI, Mlim_Fstar_MINI, Mlim_Fesc_MINI, user_params->FAST_FCOLL_TABLES); } } else{ initialise_Nion_General_spline(redshift,min_density,max_density,massofscaleR,astro_params->M_TURN, astro_params->ALPHA_STAR,astro_params->ALPHA_ESC,astro_params->F_STAR10, astro_params->F_ESC10,Mlim_Fstar,Mlim_Fesc, user_params->FAST_FCOLL_TABLES); } } } else { erfc_denom = 2. * (pow(sigma_z0(M_MIN), 2) - pow(sigma_z0(massofscaleR), 2)); if (erfc_denom < 0) { // our filtering scale has become too small break; } erfc_denom = sqrt(erfc_denom); erfc_denom = 1. / (growth_factor * erfc_denom); } } // Determine the global averaged f_coll for the overall normalisation // Reset value of int check to see if we are over-stepping our interpolation table for (i = 0; i < user_params->N_THREADS; i++) { overdense_int_boundexceeded_threaded[i] = 0; } // renormalize the collapse fraction so that the mean matches ST, // since we are using the evolved (non-linear) density field #pragma omp parallel shared(deltax_filtered,N_rec_filtered,xe_filtered,overdense_int_boundexceeded_threaded,log10_Nion_spline,Nion_spline,erfc_denom,erfc_arg_min,\ erfc_arg_max,InvArgBinWidth,ArgBinWidth,ERFC_VALS_DIFF,ERFC_VALS,log10_Mturnover_filtered,log10Mturn_min,log10Mturn_bin_width_inv, \ log10_Mturnover_MINI_filtered,log10Mturn_bin_width_inv_MINI,log10_Nion_spline_MINI,prev_deltax_filtered,previous_ionize_box,ION_EFF_FACTOR,\ prev_overdense_small_bin_width, overdense_small_bin_width,overdense_small_bin_width_inv,\ prev_overdense_small_min,prev_overdense_small_bin_width_inv,prev_log10_Nion_spline,prev_log10_Nion_spline_MINI,prev_overdense_large_min,\ prev_overdense_large_bin_width_inv,prev_Nion_spline,prev_Nion_spline_MINI,box,counter,M_coll_filtered,massofscaleR,pixel_volume,sigmaMmax,\ M_MIN,growth_factor,Mlim_Fstar,Mlim_Fesc,Mcrit_atom,Mlim_Fstar_MINI,Mlim_Fesc_MINI,prev_growth_factor) \ private(x,y,z,curr_dens,Splined_Fcoll,Splined_Fcoll_MINI,dens_val,overdense_int,erfc_arg_val,erfc_arg_val_index,log10_Mturnover,\ log10_Mturnover_int,log10_Mturnover_MINI,log10_Mturnover_MINI_int,prev_dens,prev_Splined_Fcoll,prev_Splined_Fcoll_MINI,\ prev_dens_val,density_over_mean,status_int) \ num_threads(user_params->N_THREADS) { #pragma omp for reduction(+:f_coll,f_coll_MINI) for (x = 0; x < user_params->HII_DIM; x++) { for (y = 0; y < user_params->HII_DIM; y++) { for (z = 0; z < user_params->HII_DIM; z++) { // delta cannot be less than -1 *((float *) deltax_filtered + HII_R_FFT_INDEX(x, y, z)) = fmaxf( *((float *) deltax_filtered + HII_R_FFT_INDEX(x, y, z)), -1. + FRACT_FLOAT_ERR); // <N_rec> cannot be less than zero if (flag_options->INHOMO_RECO) { *((float *) N_rec_filtered + HII_R_FFT_INDEX(x, y, z)) = fmaxf(*((float *) N_rec_filtered + HII_R_FFT_INDEX(x, y, z)), 0.0); } // x_e has to be between zero and unity if (flag_options->USE_TS_FLUCT) { *((float *) xe_filtered + HII_R_FFT_INDEX(x, y, z)) = fmaxf(*((float *) xe_filtered + HII_R_FFT_INDEX(x, y, z)), 0.); *((float *) xe_filtered + HII_R_FFT_INDEX(x, y, z)) = fminf(*((float *) xe_filtered + HII_R_FFT_INDEX(x, y, z)), 0.999); } if(flag_options->USE_HALO_FIELD) { // collapsed mass cannot be less than zero *((float *)M_coll_filtered + HII_R_FFT_INDEX(x,y,z)) = fmaxf( *((float *)M_coll_filtered + HII_R_FFT_INDEX(x,y,z)) , 0.0); density_over_mean = 1.0 + *((float *)deltax_filtered + HII_R_FFT_INDEX(x,y,z)); Splined_Fcoll = *((float *)M_coll_filtered + HII_R_FFT_INDEX(x,y,z)) / (massofscaleR*density_over_mean); Splined_Fcoll *= (4/3.0)*PI*pow(R,3) / pixel_volume; } else { curr_dens = *((float *) deltax_filtered + HII_R_FFT_INDEX(x, y, z)); if (flag_options->USE_MASS_DEPENDENT_ZETA) { if (flag_options->USE_MINI_HALOS){ log10_Mturnover = *((float *)log10_Mturnover_filtered + HII_R_FFT_INDEX(x,y,z)); log10_Mturnover_MINI = *((float *)log10_Mturnover_MINI_filtered + HII_R_FFT_INDEX(x,y,z)); if(user_params->USE_INTERPOLATION_TABLES) { status_int = EvaluateSplineTable(flag_options->USE_MINI_HALOS,1,curr_dens,log10_Mturnover,log10_Mturnover_MINI, &Splined_Fcoll,&Splined_Fcoll_MINI); if(status_int > 0) { overdense_int_boundexceeded_threaded[omp_get_thread_num()] = status_int; LOG_ULTRA_DEBUG("Broken 1059 in thread=%d", omp_get_thread_num()); } } else { Splined_Fcoll = Nion_ConditionalM(growth_factor,log(M_MIN),log(massofscaleR),sigmaMmax,Deltac,curr_dens, pow(10.,log10_Mturnover),astro_params->ALPHA_STAR, astro_params->ALPHA_ESC,astro_params->F_STAR10, astro_params->F_ESC10,Mlim_Fstar,Mlim_Fesc, user_params->FAST_FCOLL_TABLES); Splined_Fcoll_MINI = Nion_ConditionalM_MINI(growth_factor,log(M_MIN),log(massofscaleR),sigmaMmax,Deltac,curr_dens, pow(10.,log10_Mturnover_MINI),Mcrit_atom,astro_params->ALPHA_STAR_MINI, astro_params->ALPHA_ESC,astro_params->F_STAR7_MINI,astro_params->F_ESC7_MINI, Mlim_Fstar_MINI,Mlim_Fesc_MINI, user_params->FAST_FCOLL_TABLES); } prev_dens = *((float *)prev_deltax_filtered + HII_R_FFT_INDEX(x,y,z)); if (previous_ionize_box->mean_f_coll_MINI * ION_EFF_FACTOR_MINI + previous_ionize_box->mean_f_coll * ION_EFF_FACTOR > 1e-4){ if(user_params->USE_INTERPOLATION_TABLES) { status_int = EvaluateSplineTable(flag_options->USE_MINI_HALOS,2,prev_dens,log10_Mturnover,log10_Mturnover_MINI, &prev_Splined_Fcoll,&prev_Splined_Fcoll_MINI); if(status_int > 0) { overdense_int_boundexceeded_threaded[omp_get_thread_num()] = status_int; LOG_ULTRA_DEBUG("Broken 1086 in thread=%d", omp_get_thread_num()); } } else { prev_Splined_Fcoll = Nion_ConditionalM(prev_growth_factor,log(M_MIN),log(massofscaleR),sigmaMmax,Deltac,prev_dens, pow(10.,log10_Mturnover),astro_params->ALPHA_STAR, astro_params->ALPHA_ESC,astro_params->F_STAR10, astro_params->F_ESC10,Mlim_Fstar,Mlim_Fesc, user_params->FAST_FCOLL_TABLES); prev_Splined_Fcoll_MINI = Nion_ConditionalM_MINI(prev_growth_factor,log(M_MIN),log(massofscaleR),sigmaMmax,Deltac,prev_dens, pow(10.,log10_Mturnover_MINI),Mcrit_atom,astro_params->ALPHA_STAR_MINI, astro_params->ALPHA_ESC,astro_params->F_STAR7_MINI,astro_params->F_ESC7_MINI, Mlim_Fstar_MINI,Mlim_Fesc_MINI, user_params->FAST_FCOLL_TABLES); } } else{ prev_Splined_Fcoll = 0.; prev_Splined_Fcoll_MINI = 0.; } } else{ if(user_params->USE_INTERPOLATION_TABLES) { status_int = EvaluateSplineTable(flag_options->USE_MINI_HALOS,1,curr_dens,0.,0.,&Splined_Fcoll,&Splined_Fcoll_MINI); if(status_int > 0) { overdense_int_boundexceeded_threaded[omp_get_thread_num()] = status_int; LOG_ULTRA_DEBUG("Broken 1115 in thread=%d", omp_get_thread_num()); } } else { Splined_Fcoll = Nion_ConditionalM(growth_factor,log(M_MIN),log(massofscaleR),sigmaMmax,Deltac,curr_dens, astro_params->M_TURN,astro_params->ALPHA_STAR, astro_params->ALPHA_ESC,astro_params->F_STAR10, astro_params->F_ESC10,Mlim_Fstar,Mlim_Fesc, user_params->FAST_FCOLL_TABLES); } } } else { erfc_arg_val = (Deltac - curr_dens) * erfc_denom; if (erfc_arg_val < erfc_arg_min || erfc_arg_val > erfc_arg_max) { Splined_Fcoll = splined_erfc(erfc_arg_val); } else { erfc_arg_val_index = (int) floor((erfc_arg_val - erfc_arg_min) * InvArgBinWidth); Splined_Fcoll = ERFC_VALS[erfc_arg_val_index] + \ (erfc_arg_val - (erfc_arg_min + ArgBinWidth * (double) erfc_arg_val_index)) * ERFC_VALS_DIFF[erfc_arg_val_index] *InvArgBinWidth; } } } // save the value of the collasped fraction into the Fcoll array if (flag_options->USE_MINI_HALOS){ if (Splined_Fcoll > 1.) Splined_Fcoll = 1.; if (Splined_Fcoll < 0.) Splined_Fcoll = 1e-40; if (prev_Splined_Fcoll > 1.) prev_Splined_Fcoll = 1.; if (prev_Splined_Fcoll < 0.) prev_Splined_Fcoll = 1e-40; box->Fcoll[counter * HII_TOT_NUM_PIXELS + HII_R_INDEX(x,y,z)] = \ previous_ionize_box->Fcoll[counter * HII_TOT_NUM_PIXELS + HII_R_INDEX(x,y,z)] + Splined_Fcoll - prev_Splined_Fcoll; if (box->Fcoll[counter * HII_TOT_NUM_PIXELS + HII_R_INDEX(x,y,z)] >1.) box->Fcoll[counter * HII_TOT_NUM_PIXELS + HII_R_INDEX(x,y,z)] = 1.; //if (box->Fcoll[counter * HII_TOT_NUM_PIXELS + HII_R_INDEX(x,y,z)] <0.) box->Fcoll[counter * HII_TOT_NUM_PIXELS + HII_R_INDEX(x,y,z)] = 1e-40; //if (box->Fcoll[counter * HII_TOT_NUM_PIXELS + HII_R_INDEX(x,y,z)] < previous_ionize_box->Fcoll[counter * HII_TOT_NUM_PIXELS + HII_R_INDEX(x,y,z)]) // box->Fcoll[counter * HII_TOT_NUM_PIXELS + HII_R_INDEX(x,y,z)] = previous_ionize_box->Fcoll[counter * HII_TOT_NUM_PIXELS + HII_R_INDEX(x,y,z)]; f_coll += box->Fcoll[counter * HII_TOT_NUM_PIXELS + HII_R_INDEX(x,y,z)]; if(isfinite(f_coll)==0) { LOG_ERROR("f_coll is either infinite or NaN!(%d,%d,%d)%g,%g,%g,%g,%g,%g,%g,%g,%g",\ x,y,z,curr_dens,prev_dens,previous_ionize_box->Fcoll[counter * HII_TOT_NUM_PIXELS + HII_R_INDEX(x,y,z)],\ Splined_Fcoll, prev_Splined_Fcoll, curr_dens, prev_dens, \ log10_Mturnover, *((float *)log10_Mturnover_filtered + HII_R_FFT_INDEX(x,y,z))); // Throw(ParameterError); Throw(InfinityorNaNError); } if (Splined_Fcoll_MINI > 1.) Splined_Fcoll_MINI = 1.; if (Splined_Fcoll_MINI < 0.) Splined_Fcoll_MINI = 1e-40; if (prev_Splined_Fcoll_MINI > 1.) prev_Splined_Fcoll_MINI = 1.; if (prev_Splined_Fcoll_MINI < 0.) prev_Splined_Fcoll_MINI = 1e-40; box->Fcoll_MINI[counter * HII_TOT_NUM_PIXELS + HII_R_INDEX(x,y,z)] = \ previous_ionize_box->Fcoll_MINI[counter * HII_TOT_NUM_PIXELS + HII_R_INDEX(x,y,z)] + Splined_Fcoll_MINI - prev_Splined_Fcoll_MINI; if (box->Fcoll_MINI[counter * HII_TOT_NUM_PIXELS + HII_R_INDEX(x,y,z)] >1.) box->Fcoll_MINI[counter * HII_TOT_NUM_PIXELS + HII_R_INDEX(x,y,z)] = 1.; //if (box->Fcoll_MINI[counter * HII_TOT_NUM_PIXELS + HII_R_INDEX(x,y,z)] <0.) box->Fcoll_MINI[counter * HII_TOT_NUM_PIXELS + HII_R_INDEX(x,y,z)] = 1e-40; //if (box->Fcoll_MINI[counter * HII_TOT_NUM_PIXELS + HII_R_INDEX(x,y,z)] < previous_ionize_box->Fcoll_MINI[counter * HII_TOT_NUM_PIXELS + HII_R_INDEX(x,y,z)]) // box->Fcoll_MINI[counter * HII_TOT_NUM_PIXELS + HII_R_INDEX(x,y,z)] = previous_ionize_box->Fcoll_MINI[counter * HII_TOT_NUM_PIXELS + HII_R_INDEX(x,y,z)]; f_coll_MINI += box->Fcoll_MINI[counter * HII_TOT_NUM_PIXELS + HII_R_INDEX(x,y,z)]; if(isfinite(f_coll_MINI)==0) { LOG_ERROR("f_coll_MINI is either infinite or NaN!(%d,%d,%d)%g,%g,%g,%g,%g,%g,%g",\ x,y,z,curr_dens, prev_dens, previous_ionize_box->Fcoll_MINI[counter * HII_TOT_NUM_PIXELS + HII_R_INDEX(x,y,z)],\ Splined_Fcoll_MINI, prev_Splined_Fcoll_MINI, log10_Mturnover_MINI,\ *((float *)log10_Mturnover_MINI_filtered + HII_R_FFT_INDEX(x,y,z))); LOG_DEBUG("%g,%g",previous_ionize_box->Fcoll[counter * HII_TOT_NUM_PIXELS + HII_R_INDEX(x,y,z)],\ previous_ionize_box->Fcoll_MINI[counter * HII_TOT_NUM_PIXELS + HII_R_INDEX(x,y,z)]); LOG_DEBUG("%g,%g,%g,%g,%g,%g,%g,%g,",log10Mturn_min, log10Mturn_max, log10Mturn_bin_width, \ log10Mturn_bin_width_inv, log10Mturn_max_MINI, log10Mturn_min_MINI, \ log10Mturn_bin_width_MINI, log10Mturn_bin_width_inv_MINI); LOG_DEBUG("%g,%g,%g,%g,%d",curr_dens, overdense_small_min, overdense_small_bin_width_inv, dens_val, overdense_int); LOG_DEBUG("%d,%g,%g,%g",log10_Mturnover_MINI_int, log10_Mturnover_MINI, log10Mturn_min_MINI, log10Mturn_bin_width_inv_MINI); LOG_DEBUG("%g", *((float *)log10_Mturnover_MINI_filtered + HII_R_FFT_INDEX(x,y,z))); LOG_DEBUG("%d", counter); LOG_DEBUG("%g,%g,%g,%g",log10_Nion_spline_MINI[overdense_int + NSFR_low* log10_Mturnover_MINI_int ], \ log10_Nion_spline_MINI[overdense_int +1+ NSFR_low* log10_Mturnover_MINI_int ], \ log10_Nion_spline_MINI[overdense_int + NSFR_low*(log10_Mturnover_MINI_int+1)], \ log10_Nion_spline_MINI[overdense_int +1+ NSFR_low*(log10_Mturnover_MINI_int+1)]); // Throw(ParameterError); Throw(InfinityorNaNError); } } else{ box->Fcoll[counter * HII_TOT_NUM_PIXELS + HII_R_INDEX(x,y,z)] = Splined_Fcoll; f_coll += Splined_Fcoll; } } } } } // end loop through Fcoll box for (i = 0; i < user_params->N_THREADS; i++) { if (overdense_int_boundexceeded_threaded[i] == 1) { LOG_ERROR("I have overstepped my allocated memory for one of the interpolation tables for the nion_splines"); // Throw(ParameterError); Throw(TableEvaluationError); } } if(isfinite(f_coll)==0) { LOG_ERROR("f_coll is either infinite or NaN!"); // Throw(ParameterError); Throw(InfinityorNaNError); } f_coll /= (double) HII_TOT_NUM_PIXELS; if(isfinite(f_coll_MINI)==0) { LOG_ERROR("f_coll_MINI is either infinite or NaN!"); // Throw(ParameterError); Throw(InfinityorNaNError); } f_coll_MINI /= (double) HII_TOT_NUM_PIXELS; // To avoid ST_over_PS becoming nan when f_coll = 0, I set f_coll = FRACT_FLOAT_ERR. if (flag_options->USE_MASS_DEPENDENT_ZETA) { if (f_coll <= f_coll_min) f_coll = f_coll_min; if (flag_options->USE_MINI_HALOS){ if (f_coll_MINI <= f_coll_min_MINI) f_coll_MINI = f_coll_min_MINI; } } else { if (f_coll <= FRACT_FLOAT_ERR) f_coll = FRACT_FLOAT_ERR; } ST_over_PS = box->mean_f_coll/f_coll; ST_over_PS_MINI = box->mean_f_coll_MINI/f_coll_MINI; ////////////////////////////// MAIN LOOP THROUGH THE BOX /////////////////////////////////// // now lets scroll through the filtered box Gamma_R_prefactor = (R*CMperMPC) * SIGMA_HI * global_params.ALPHA_UVB / (global_params.ALPHA_UVB+2.75) * N_b0 * ION_EFF_FACTOR / 1.0e-12; Gamma_R_prefactor_MINI = (R*CMperMPC) * SIGMA_HI * global_params.ALPHA_UVB / (global_params.ALPHA_UVB+2.75) * N_b0 * ION_EFF_FACTOR_MINI / 1.0e-12; if(flag_options->PHOTON_CONS) { // Used for recombinations, which means we want to use the original redshift not the adjusted redshift Gamma_R_prefactor *= pow(1+stored_redshift, 2); Gamma_R_prefactor_MINI *= pow(1+stored_redshift, 2); } else { Gamma_R_prefactor *= pow(1+redshift, 2); Gamma_R_prefactor_MINI *= pow(1+redshift, 2); } Gamma_R_prefactor /= t_ast; Gamma_R_prefactor_MINI /= t_ast; if (global_params.FIND_BUBBLE_ALGORITHM != 2 && global_params.FIND_BUBBLE_ALGORITHM != 1) { // center method LOG_ERROR("Incorrect choice of find bubble algorithm: %i", global_params.FIND_BUBBLE_ALGORITHM); Throw(ValueError); } #pragma omp parallel shared(deltax_filtered,N_rec_filtered,xe_filtered,box,ST_over_PS,pixel_mass,M_MIN,r,f_coll_min,Gamma_R_prefactor,\ ION_EFF_FACTOR,ION_EFF_FACTOR_MINI,LAST_FILTER_STEP,counter,ST_over_PS_MINI,f_coll_min_MINI,Gamma_R_prefactor_MINI,TK) \ private(x,y,z,curr_dens,Splined_Fcoll,f_coll,ave_M_coll_cell,ave_N_min_cell,N_halos_in_cell,rec,xHII_from_xrays,res_xH,\ Splined_Fcoll_MINI,f_coll_MINI) \ num_threads(user_params->N_THREADS) { #pragma omp for for (x = 0; x < user_params->HII_DIM; x++) { for (y = 0; y < user_params->HII_DIM; y++) { for (z = 0; z < user_params->HII_DIM; z++) { curr_dens = *((float *)deltax_filtered + HII_R_FFT_INDEX(x,y,z)); Splined_Fcoll = box->Fcoll[counter * HII_TOT_NUM_PIXELS + HII_R_INDEX(x,y,z)]; f_coll = ST_over_PS * Splined_Fcoll; if (flag_options->USE_MINI_HALOS){ Splined_Fcoll_MINI = box->Fcoll_MINI[counter * HII_TOT_NUM_PIXELS + HII_R_INDEX(x,y,z)]; f_coll_MINI = ST_over_PS_MINI * Splined_Fcoll_MINI; } else{ f_coll_MINI = 0.; } if (LAST_FILTER_STEP){ ave_M_coll_cell = (f_coll + f_coll_MINI) * pixel_mass * (1. + curr_dens); ave_N_min_cell = ave_M_coll_cell / M_MIN; // ave # of M_MIN halos in cell if(user_params->NO_RNG) { N_halos_in_cell = 1.; } else { N_halos_in_cell = (int) gsl_ran_poisson(r[omp_get_thread_num()], global_params.N_POISSON); } } if (flag_options->USE_MASS_DEPENDENT_ZETA) { if (f_coll <= f_coll_min) f_coll = f_coll_min; if (flag_options->USE_MINI_HALOS){ if (f_coll_MINI <= f_coll_min_MINI) f_coll_MINI = f_coll_min_MINI; } } if (flag_options->INHOMO_RECO) { rec = (*((float *) N_rec_filtered + HII_R_FFT_INDEX(x, y, z))); // number of recombinations per mean baryon rec /= (1. + curr_dens); // number of recombinations per baryon inside <R> } else { rec = 0.; } // adjust the denominator of the collapse fraction for the residual electron fraction in the neutral medium if (flag_options->USE_TS_FLUCT){ xHII_from_xrays = *((float *)xe_filtered + HII_R_FFT_INDEX(x,y,z)); } else { xHII_from_xrays = 0.; } // check if fully ionized! if ( (f_coll * ION_EFF_FACTOR + f_coll_MINI * ION_EFF_FACTOR_MINI> (1. - xHII_from_xrays)*(1.0+rec)) ){ //IONIZED!! // if this is the first crossing of the ionization barrier for this cell (largest R), record the gamma // this assumes photon-starved growth of HII regions... breaks down post EoR if (flag_options->INHOMO_RECO && (box->xH_box[HII_R_INDEX(x,y,z)] > FRACT_FLOAT_ERR) ){ box->Gamma12_box[HII_R_INDEX(x,y,z)] = Gamma_R_prefactor * f_coll + Gamma_R_prefactor_MINI * f_coll_MINI; box->MFP_box[HII_R_INDEX(x,y,z)] = R; } // keep track of the first time this cell is ionized (earliest time) if (previous_ionize_box->z_re_box[HII_R_INDEX(x,y,z)] < 0){ box->z_re_box[HII_R_INDEX(x,y,z)] = redshift; } else{ box->z_re_box[HII_R_INDEX(x,y,z)] = previous_ionize_box->z_re_box[HII_R_INDEX(x,y,z)]; } // FLAG CELL(S) AS IONIZED if (global_params.FIND_BUBBLE_ALGORITHM == 2) // center method box->xH_box[HII_R_INDEX(x,y,z)] = 0; if (global_params.FIND_BUBBLE_ALGORITHM == 1) // sphere method update_in_sphere(box->xH_box, user_params->HII_DIM, R/(user_params->BOX_LEN), \ x/(user_params->HII_DIM+0.0), y/(user_params->HII_DIM+0.0), z/(user_params->HII_DIM+0.0)); } // end ionized // If not fully ionized, then assign partial ionizations else if (LAST_FILTER_STEP && (box->xH_box[HII_R_INDEX(x, y, z)] > TINY)) { if (f_coll>1) f_coll=1; if (f_coll_MINI>1) f_coll_MINI=1; if (!flag_options->USE_HALO_FIELD){ if(ave_N_min_cell < global_params.N_POISSON) { f_coll = N_halos_in_cell * ( ave_M_coll_cell / (float)global_params.N_POISSON ) / (pixel_mass*(1. + curr_dens)); if (flag_options->USE_MINI_HALOS){ f_coll_MINI = f_coll * (f_coll_MINI * ION_EFF_FACTOR_MINI) / (f_coll * ION_EFF_FACTOR + f_coll_MINI * ION_EFF_FACTOR_MINI); f_coll = f_coll - f_coll_MINI; } else{ f_coll_MINI = 0.; } } if (ave_M_coll_cell < (M_MIN / 5.)) { f_coll = 0.; f_coll_MINI = 0.; } } if (f_coll>1) f_coll=1; if (f_coll_MINI>1) f_coll_MINI=1; res_xH = 1. - f_coll * ION_EFF_FACTOR - f_coll_MINI * ION_EFF_FACTOR_MINI; // put the partial ionization here because we need to exclude xHII_from_xrays... if (flag_options->USE_TS_FLUCT){ box->temp_kinetic_all_gas[HII_R_INDEX(x,y,z)] = ComputePartiallyIoinizedTemperature(spin_temp->Tk_box[HII_R_INDEX(x,y,z)], res_xH); } else{ box->temp_kinetic_all_gas[HII_R_INDEX(x,y,z)] = ComputePartiallyIoinizedTemperature(TK, res_xH); } res_xH -= xHII_from_xrays; // and make sure fraction doesn't blow up for underdense pixels if (res_xH < 0) res_xH = 0; else if (res_xH > 1) res_xH = 1; box->xH_box[HII_R_INDEX(x, y, z)] = res_xH; } // end partial ionizations at last filtering step } // k } // j } // i } LOG_SUPER_DEBUG("z_re_box after R=%f: ", R); debugSummarizeBox(box->z_re_box, user_params->HII_DIM, " "); if (first_step_R) { R = stored_R; first_step_R = 0; } else { R /= (global_params.DELTA_R_HII_FACTOR); } if (flag_options->USE_MINI_HALOS) counter += 1; } #pragma omp parallel shared(box,spin_temp,redshift,deltax_unfiltered_original,TK) private(x,y,z) num_threads(user_params->N_THREADS) { #pragma omp for for (x=0; x<user_params->HII_DIM; x++){ for (y=0; y<user_params->HII_DIM; y++){ for (z=0; z<user_params->HII_DIM; z++){ if ((box->z_re_box[HII_R_INDEX(x,y,z)]>0) && (box->xH_box[HII_R_INDEX(x,y,z)] < TINY)){ box->temp_kinetic_all_gas[HII_R_INDEX(x,y,z)] = ComputeFullyIoinizedTemperature(box->z_re_box[HII_R_INDEX(x,y,z)], \ redshift, *((float *)deltax_unfiltered_original + HII_R_FFT_INDEX(x,y,z))); // Below sometimes (very rare though) can happen when the density drops too fast and to below T_HI if (flag_options->USE_TS_FLUCT){ if (box->temp_kinetic_all_gas[HII_R_INDEX(x,y,z)] < spin_temp->Tk_box[HII_R_INDEX(x,y,z)]) box->temp_kinetic_all_gas[HII_R_INDEX(x,y,z)] = spin_temp->Tk_box[HII_R_INDEX(x,y,z)]; } else{ if (box->temp_kinetic_all_gas[HII_R_INDEX(x,y,z)] < TK) box->temp_kinetic_all_gas[HII_R_INDEX(x,y,z)] = TK; } } } } } } for (x=0; x<user_params->HII_DIM; x++){ for (y=0; y<user_params->HII_DIM; y++){ for (z=0; z<user_params->HII_DIM; z++){ if(isfinite(box->temp_kinetic_all_gas[HII_R_INDEX(x,y,z)])==0){ LOG_ERROR("Tk after fully ioinzation is either infinite or a Nan. Something has gone wrong "\ "in the temperature calculation: z_re=%.4f, redshift=%.4f, curr_dens=%.4e", box->z_re_box[HII_R_INDEX(x,y,z)], redshift, curr_dens); // Throw(ParameterError); Throw(InfinityorNaNError); } } } } // find the neutral fraction if (LOG_LEVEL >= DEBUG_LEVEL) { global_xH = 0; #pragma omp parallel shared(box) private(ct) num_threads(user_params->N_THREADS) { #pragma omp for reduction(+:global_xH) for (ct = 0; ct < HII_TOT_NUM_PIXELS; ct++) { global_xH += box->xH_box[ct]; } } global_xH /= (float) HII_TOT_NUM_PIXELS; } if (isfinite(global_xH) == 0) { LOG_ERROR( "Neutral fraction is either infinite or a Nan. Something has gone wrong in the ionisation calculation!"); // Throw(ParameterError); Throw(InfinityorNaNError); } // update the N_rec field if (flag_options->INHOMO_RECO) { #pragma omp parallel shared(perturbed_field, adjustment_factor, stored_redshift, redshift, box, previous_ionize_box, \ fabs_dtdz, ZSTEP, something_finite_or_infinite) \ private(x, y, z, curr_dens, z_eff, dNrec) num_threads(user_params->N_THREADS) { #pragma omp for for (x = 0; x < user_params->HII_DIM; x++) { for (y = 0; y < user_params->HII_DIM; y++) { for (z = 0; z < user_params->HII_DIM; z++) { // use the original density and redshift for the snapshot (not the adjusted redshift) // Only want to use the adjusted redshift for the ionisation field curr_dens = 1.0 + (perturbed_field->density[HII_R_INDEX(x, y, z)]) / adjustment_factor; z_eff = pow(curr_dens, 1.0 / 3.0); if (flag_options->PHOTON_CONS) { z_eff *= (1 + stored_redshift); } else { z_eff *= (1 + redshift); } dNrec = splined_recombination_rate(z_eff - 1., box->Gamma12_box[HII_R_INDEX(x, y, z)]) * fabs_dtdz * ZSTEP * (1. - box->xH_box[HII_R_INDEX(x, y, z)]); if (isfinite(dNrec) == 0) { something_finite_or_infinite = 1; } box->dNrec_box[HII_R_INDEX(x, y, z)] = previous_ionize_box->dNrec_box[HII_R_INDEX(x, y, z)] + dNrec; } } } } if (something_finite_or_infinite) { LOG_ERROR("Recombinations have returned either an infinite or NaN value."); // Throw(ParameterError); Throw(InfinityorNaNError); } } fftwf_cleanup_threads(); fftwf_cleanup(); fftwf_forget_wisdom(); } destruct_heat(); for (i=0; i<user_params->N_THREADS; i++) { gsl_rng_free (r[i]); } LOG_DEBUG("global_xH = %e",global_xH); fftwf_free(deltax_unfiltered); fftwf_free(deltax_unfiltered_original); fftwf_free(deltax_filtered); if(flag_options->USE_MINI_HALOS){ fftwf_free(prev_deltax_unfiltered); fftwf_free(prev_deltax_filtered); } if(flag_options->USE_TS_FLUCT) { fftwf_free(xe_unfiltered); fftwf_free(xe_filtered); } if (flag_options->INHOMO_RECO){ fftwf_free(N_rec_unfiltered); fftwf_free(N_rec_filtered); } if(flag_options->USE_HALO_FIELD) { fftwf_free(M_coll_unfiltered); fftwf_free(M_coll_filtered); } LOG_SUPER_DEBUG("freed fftw boxes"); if(flag_options->USE_MASS_DEPENDENT_ZETA) { free(xi_SFR); free(wi_SFR); if(user_params->USE_INTERPOLATION_TABLES) { free(log10_overdense_spline_SFR); free(Overdense_spline_SFR); free(log10_Nion_spline); free(Nion_spline); } if(flag_options->USE_MINI_HALOS){ free(Mturns); free(Mturns_MINI); fftwf_free(log10_Mturnover_unfiltered); fftwf_free(log10_Mturnover_filtered); fftwf_free(log10_Mturnover_MINI_unfiltered); fftwf_free(log10_Mturnover_MINI_filtered); if(user_params->USE_INTERPOLATION_TABLES) { free(prev_log10_overdense_spline_SFR); free(prev_Overdense_spline_SFR); free(prev_log10_Nion_spline); free(prev_Nion_spline); free(log10_Nion_spline_MINI); free(Nion_spline_MINI); free(prev_log10_Nion_spline_MINI); free(prev_Nion_spline_MINI); } } //fftwf_free(Mcrit_RE_grid); //fftwf_free(Mcrit_LW_grid); } if (prev_redshift < 1){ free(previous_ionize_box->z_re_box); if (flag_options->USE_MASS_DEPENDENT_ZETA && flag_options->USE_MINI_HALOS){ free(previous_ionize_box->Gamma12_box); free(previous_ionize_box->dNrec_box); free(previous_ionize_box->Fcoll); free(previous_ionize_box->Fcoll_MINI); } } if(!flag_options->USE_TS_FLUCT && user_params->USE_INTERPOLATION_TABLES) { freeSigmaMInterpTable(); } free(overdense_int_boundexceeded_threaded); LOG_DEBUG("finished!\n"); } // End of Try() Catch(status){ return(status); } return(0); } int EvaluateSplineTable(bool MINI_HALOS, int dens_type, float curr_dens, float filtered_Mturn, float filtered_Mturn_MINI, float *Splined_Fcoll, float *Splined_Fcoll_MINI) { int overdense_int,overdense_int_status; float dens_val, small_bin_width, small_bin_width_inv, small_min; float log10_Mturnover, log10_Mturnover_MINI; int log10_Mturnover_int, log10_Mturnover_MINI_int; overdense_int_status = 0; if(MINI_HALOS) { log10_Mturnover = (filtered_Mturn - log10Mturn_min ) * log10Mturn_bin_width_inv; log10_Mturnover_int = (int)floorf( log10_Mturnover ); log10_Mturnover_MINI = (filtered_Mturn_MINI - log10Mturn_min_MINI ) * log10Mturn_bin_width_inv_MINI; log10_Mturnover_MINI_int = (int)floorf( log10_Mturnover_MINI ); } if(dens_type==1) { small_min = overdense_small_min; small_bin_width = overdense_small_bin_width; small_bin_width_inv = overdense_small_bin_width_inv; } if(dens_type==2) { small_min = prev_overdense_small_min; small_bin_width = prev_overdense_small_bin_width; small_bin_width_inv = prev_overdense_small_bin_width_inv; } if (curr_dens < global_params.CRIT_DENS_TRANSITION) { if (curr_dens <= -1.) { *Splined_Fcoll = 0; if(MINI_HALOS) { *Splined_Fcoll_MINI = 0; } } else { dens_val = (log10f(curr_dens + 1.) - small_min) * small_bin_width_inv; overdense_int = (int) floorf(dens_val); if (overdense_int < 0 || (overdense_int + 1) > (NSFR_low - 1)) { overdense_int_status = 1; LOG_INFO("overdense_int in thread %d got value %d (exceeded bounds). Current density=%g", omp_get_thread_num(), overdense_int, dens_val); } if(MINI_HALOS) { if(dens_type==1) { *Splined_Fcoll = ( \ log10_Nion_spline[overdense_int + NSFR_low*log10_Mturnover_int]*( 1 + (float)overdense_int - dens_val ) + \ log10_Nion_spline[overdense_int + 1 + NSFR_low*log10_Mturnover_int]*( dens_val - (float)overdense_int ) \ ) * (1 + (float)log10_Mturnover_int - log10_Mturnover) + \ ( \ log10_Nion_spline[overdense_int + NSFR_low*(log10_Mturnover_int+1)]*( 1 + (float)overdense_int - dens_val ) + \ log10_Nion_spline[overdense_int + 1 + NSFR_low*(log10_Mturnover_int+1)]*( dens_val - (float)overdense_int ) \ ) * (log10_Mturnover - (float)log10_Mturnover_int); *Splined_Fcoll_MINI = ( \ log10_Nion_spline_MINI[overdense_int + NSFR_low*log10_Mturnover_MINI_int]*( 1 + (float)overdense_int - dens_val ) + \ log10_Nion_spline_MINI[overdense_int + 1 + NSFR_low*log10_Mturnover_MINI_int]*( dens_val - (float)overdense_int ) \ ) * (1 + (float)log10_Mturnover_MINI_int - log10_Mturnover_MINI) + \ ( \ log10_Nion_spline_MINI[overdense_int + NSFR_low*(log10_Mturnover_MINI_int+1)]*( 1 + (float)overdense_int - dens_val ) + \ log10_Nion_spline_MINI[overdense_int + 1 + NSFR_low*(log10_Mturnover_MINI_int+1)]*( dens_val - (float)overdense_int ) \ ) * (log10_Mturnover_MINI - (float)log10_Mturnover_MINI_int); } if(dens_type==2) { *Splined_Fcoll = ( \ prev_log10_Nion_spline[overdense_int + NSFR_low*log10_Mturnover_int]*( 1 + (float)overdense_int - dens_val ) + \ prev_log10_Nion_spline[overdense_int + 1 + NSFR_low*log10_Mturnover_int]*( dens_val - (float)overdense_int ) \ ) * (1 + (float)log10_Mturnover_int - log10_Mturnover) + \ ( \ prev_log10_Nion_spline[overdense_int + NSFR_low*(log10_Mturnover_int+1)]*( 1 + (float)overdense_int - dens_val ) + \ prev_log10_Nion_spline[overdense_int + 1 + NSFR_low*(log10_Mturnover_int+1)]*( dens_val - (float)overdense_int ) \ ) * (log10_Mturnover - (float)log10_Mturnover_int); *Splined_Fcoll_MINI = ( \ prev_log10_Nion_spline_MINI[overdense_int + NSFR_low*log10_Mturnover_MINI_int]*( 1 + (float)overdense_int - dens_val ) + \ prev_log10_Nion_spline_MINI[overdense_int + 1 + NSFR_low*log10_Mturnover_MINI_int]*( dens_val - (float)overdense_int ) \ ) * (1 + (float)log10_Mturnover_MINI_int - log10_Mturnover_MINI) + \ ( \ prev_log10_Nion_spline_MINI[overdense_int + NSFR_low*(log10_Mturnover_MINI_int+1)]*( 1 + (float)overdense_int - dens_val ) + \ prev_log10_Nion_spline_MINI[overdense_int + 1 + NSFR_low*(log10_Mturnover_MINI_int+1)]*( dens_val - (float)overdense_int ) \ ) * (log10_Mturnover_MINI - (float)log10_Mturnover_MINI_int); } *Splined_Fcoll_MINI = expf(*Splined_Fcoll_MINI); } else { *Splined_Fcoll = log10_Nion_spline[overdense_int] * (1 + (float) overdense_int - dens_val) + log10_Nion_spline[overdense_int + 1] * (dens_val - (float) overdense_int); } *Splined_Fcoll = expf(*Splined_Fcoll); } } else { if (curr_dens < 0.99 * Deltac) { if(dens_type==1) { dens_val = (curr_dens - overdense_large_min) * overdense_large_bin_width_inv; LOG_ULTRA_DEBUG("type=%d curr_dens=%e, overdense_large_min=%e, overdense_large_bin_width_inv=%e",\ dens_type,curr_dens, overdense_large_min,overdense_large_bin_width_inv); } if(dens_type==2) { dens_val = (curr_dens - prev_overdense_large_min) * prev_overdense_large_bin_width_inv; LOG_ULTRA_DEBUG("type=%d curr_dens=%e, prev_overdense_large_min=%e, prev_overdense_large_bin_width_inv=%e",\ dens_type,curr_dens, prev_overdense_large_min,prev_overdense_large_bin_width_inv); } overdense_int = (int) floorf(dens_val); if (overdense_int < 0 || (overdense_int + 1) > (NSFR_high - 1)) { overdense_int_status = 1; LOG_INFO("overdense_int in thread %d got value %d (exceeded bounds). Current density=%g", omp_get_thread_num(), overdense_int, dens_val); } if(MINI_HALOS) { if(dens_type==1) { *Splined_Fcoll = ( \ Nion_spline[overdense_int + NSFR_high* log10_Mturnover_int]*( 1 + (float)overdense_int - dens_val ) + \ Nion_spline[overdense_int + 1 + NSFR_high* log10_Mturnover_int]*( dens_val - (float)overdense_int ) \ ) * (1 + (float)log10_Mturnover_int - log10_Mturnover) + \ ( \ Nion_spline[overdense_int + NSFR_high*(log10_Mturnover_int+1)]*( 1 + (float)overdense_int - dens_val ) + \ Nion_spline[overdense_int+ 1 + NSFR_high*(log10_Mturnover_int+1)]*( dens_val - (float)overdense_int ) \ ) * (log10_Mturnover - (float)log10_Mturnover_int); *Splined_Fcoll_MINI = ( \ Nion_spline_MINI[overdense_int + NSFR_high* log10_Mturnover_MINI_int]*( 1 + (float)overdense_int - dens_val ) + \ Nion_spline_MINI[overdense_int + 1 + NSFR_high* log10_Mturnover_MINI_int]*( dens_val - (float)overdense_int ) \ ) * (1 + (float)log10_Mturnover_MINI_int - log10_Mturnover_MINI) + \ ( \ Nion_spline_MINI[overdense_int + NSFR_high*(log10_Mturnover_MINI_int+1)]*( 1 + (float)overdense_int - dens_val ) + \ Nion_spline_MINI[overdense_int + 1 + NSFR_high*(log10_Mturnover_MINI_int+1)]*( dens_val - (float)overdense_int ) \ ) * (log10_Mturnover_MINI - (float)log10_Mturnover_MINI_int); } if(dens_type==2) { *Splined_Fcoll = ( \ prev_Nion_spline[overdense_int + NSFR_high* log10_Mturnover_int]*( 1 + (float)overdense_int - dens_val ) + \ prev_Nion_spline[overdense_int + 1 + NSFR_high* log10_Mturnover_int]*( dens_val - (float)overdense_int ) \ ) * (1 + (float)log10_Mturnover_int - log10_Mturnover) + \ ( \ prev_Nion_spline[overdense_int + NSFR_high*(log10_Mturnover_int+1)]*( 1 + (float)overdense_int - dens_val ) + \ prev_Nion_spline[overdense_int+ 1 + NSFR_high*(log10_Mturnover_int+1)]*( dens_val - (float)overdense_int ) \ ) * (log10_Mturnover - (float)log10_Mturnover_int); *Splined_Fcoll_MINI = ( \ prev_Nion_spline_MINI[overdense_int + NSFR_high* log10_Mturnover_MINI_int]*( 1 + (float)overdense_int - dens_val ) + \ prev_Nion_spline_MINI[overdense_int + 1 + NSFR_high* log10_Mturnover_MINI_int]*( dens_val - (float)overdense_int ) \ ) * (1 + (float)log10_Mturnover_MINI_int - log10_Mturnover_MINI) + \ ( \ prev_Nion_spline_MINI[overdense_int + NSFR_high*(log10_Mturnover_MINI_int+1)]*( 1 + (float)overdense_int - dens_val ) + \ prev_Nion_spline_MINI[overdense_int + 1 + NSFR_high*(log10_Mturnover_MINI_int+1)]*( dens_val - (float)overdense_int ) \ ) * (log10_Mturnover_MINI - (float)log10_Mturnover_MINI_int); } } else { *Splined_Fcoll = Nion_spline[overdense_int] * (1 + (float) overdense_int - dens_val) + Nion_spline[overdense_int + 1] * (dens_val - (float) overdense_int); } } else { *Splined_Fcoll = 1.; if(MINI_HALOS) { *Splined_Fcoll_MINI = 1.; } } } return overdense_int_status; } void InterpolationRange(int dens_type, float R, float L, float *min_density, float *max_density) { float small_bin_width, small_bin_width_inv, small_min; if (*min_density < 0.) { *min_density = *min_density * 1.001; if (*min_density <= -1.) { // Use MIN_DENSITY_LOW_LIMIT as is it smaller than FRACT_FLOAT_ERR *min_density = -1. + global_params.MIN_DENSITY_LOW_LIMIT; } } else { *min_density = *min_density * 0.999; } if (*max_density < 0.) { *max_density = *max_density * 0.999; } else { *max_density = *max_density * 1.001; } if (global_params.HII_FILTER == 1) { if ((0.413566994 * R * 2. * PI / L) > 1.) { // The sharp k-space filter will set every cell to zero, and the interpolation table using a flexible min/max density will fail. *min_density = -1. + global_params.MIN_DENSITY_LOW_LIMIT; *max_density = global_params.CRIT_DENS_TRANSITION * 1.001; } } small_min = log10(1. + *min_density); if (*max_density > global_params.CRIT_DENS_TRANSITION * 1.001) { small_bin_width = 1 / ((double) NSFR_low - 1.) * (log10(1. + global_params.CRIT_DENS_TRANSITION * 1.001) - small_min); } else { small_bin_width = 1 / ((double) NSFR_low - 1.) * (log10(1. + *max_density) - small_min); } small_bin_width_inv = 1./small_bin_width; if(dens_type==1) { overdense_small_min = small_min; overdense_small_bin_width = small_bin_width; overdense_small_bin_width_inv = small_bin_width_inv; LOG_ULTRA_DEBUG("R=%f, min_density=%f, max_density=%f, overdense_small_min=%f, overdense_small_bin_width=%f",\ R, *min_density, *max_density, small_min, small_bin_width); } if(dens_type==2) { prev_overdense_small_min = small_min; prev_overdense_small_bin_width = small_bin_width; prev_overdense_small_bin_width_inv = small_bin_width_inv; LOG_ULTRA_DEBUG("R=%f, prev_min_density=%f, prev_max_density=%f, prev_overdense_small_min=%f, prev_overdense_small_bin_width=%f",\ R, *min_density, *max_density, small_min, small_bin_width); } }
GB_unop__identity_uint8_int16.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__identity_uint8_int16 // op(A') function: GB_unop_tran__identity_uint8_int16 // C type: uint8_t // A type: int16_t // cast: uint8_t cij = (uint8_t) aij // unaryop: cij = aij #define GB_ATYPE \ int16_t #define GB_CTYPE \ uint8_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int16_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ uint8_t z = (uint8_t) aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ int16_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ uint8_t z = (uint8_t) aij ; \ Cx [pC] = z ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_UINT8 || GxB_NO_INT16) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__identity_uint8_int16 ( uint8_t *Cx, // Cx and Ax may be aliased const int16_t *Ax, const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (int16_t), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int16_t aij = Ax [p] ; uint8_t z = (uint8_t) aij ; Cx [p] = z ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; int16_t aij = Ax [p] ; uint8_t z = (uint8_t) aij ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__identity_uint8_int16 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
MultiwayMerge.h
#include "CombBLAS.h" namespace combblas { /*************************************************************************** * Find indices of column splitters in a list of tuple in parallel. * Inputs: * tuples: an array of SpTuples each tuple is (rowid, colid, val) * nsplits: number of splits requested * Output: * splitters: An array of size (nsplits+1) storing the starts and ends of split tuples. * different type used for output since we might need int or IT ***************************************************************************/ template <typename RT, typename IT, typename NT> std::vector<RT> findColSplitters(SpTuples<IT,NT> * & spTuples, int nsplits) { std::vector<RT> splitters(nsplits+1); splitters[0] = static_cast<RT>(0); ColLexiCompare<IT,NT> comp; #ifdef THREADED #pragma omp parallel for #endif for(int i=1; i< nsplits; i++) { IT cur_col = i * (spTuples->getncol()/nsplits); std::tuple<IT,IT,NT> search_tuple(0, cur_col, NT()); std::tuple<IT,IT,NT>* it = std::lower_bound (spTuples->tuples, spTuples->tuples + spTuples->getnnz(), search_tuple, comp); splitters[i] = (RT) (it - spTuples->tuples); } splitters[nsplits] = spTuples->getnnz(); return splitters; } // Symbolic serial merge : only estimates nnz template<class IT, class NT> IT SerialMergeNNZ( const std::vector<SpTuples<IT,NT> *> & ArrSpTups) { int nlists = ArrSpTups.size(); ColLexiCompare<IT,int> heapcomp; std::vector<std::tuple<IT, IT, int>> heap(nlists); std::vector<IT> curptr(nlists, static_cast<IT>(0)); IT hsize = 0; for(int i=0; i< nlists; ++i) { if(ArrSpTups[i]->getnnz()>0) { heap[hsize++] = std::make_tuple(std::get<0>(ArrSpTups[i]->tuples[0]), std::get<1>(ArrSpTups[i]->tuples[0]), i); } } std::make_heap(heap.data(), heap.data()+hsize, std::not2(heapcomp)); std::tuple<IT, IT, NT> curTuple; IT estnnz = 0; while(hsize > 0) { std::pop_heap(heap.data(), heap.data() + hsize, std::not2(heapcomp)); // result is stored in heap[hsize-1] int source = std::get<2>(heap[hsize-1]); if( (estnnz ==0) || (std::get<0>(curTuple) != std::get<0>(heap[hsize-1])) || (std::get<1>(curTuple) != std::get<1>(heap[hsize-1]))) { curTuple = ArrSpTups[source]->tuples[curptr[source]]; estnnz++; } curptr[source]++; if(curptr[source] != ArrSpTups[source]->getnnz()) // That array has not been depleted { heap[hsize-1] = std::make_tuple(std::get<0>(ArrSpTups[source]->tuples[curptr[source]]), std::get<1>(ArrSpTups[source]->tuples[curptr[source]]), source); std::push_heap(heap.data(), heap.data()+hsize, std::not2(heapcomp)); } else { --hsize; } } return estnnz; } /* "Internal function" called by MultiwayMerge inside threaded region. The merged list is stored in a preallocated buffer ntuples Never called from outside. Assumption1: the input lists are already column sorted Assumption2: at least two lists are passed to this function Assumption3: the input and output lists are to be deleted by caller */ template<class SR, class IT, class NT> void SerialMerge( const std::vector<SpTuples<IT,NT> *> & ArrSpTups, std::tuple<IT, IT, NT> * ntuples) { int nlists = ArrSpTups.size(); ColLexiCompare<IT,int> heapcomp; std::vector<std::tuple<IT, IT, int>> heap(nlists); // if performance issue, create this outside of threaded region std::vector<IT> curptr(nlists, static_cast<IT>(0)); IT estnnz = 0; IT hsize = 0; for(int i=0; i< nlists; ++i) { if(ArrSpTups[i]->getnnz()>0) { estnnz += ArrSpTups[i]->getnnz(); heap[hsize++] = std::make_tuple(std::get<0>(ArrSpTups[i]->tuples[0]), std::get<1>(ArrSpTups[i]->tuples[0]), i); } } std::make_heap(heap.data(), heap.data()+hsize, std::not2(heapcomp)); IT cnz = 0; while(hsize > 0) { std::pop_heap(heap.data(), heap.data() + hsize, std::not2(heapcomp)); // result is stored in heap[hsize-1] int source = std::get<2>(heap[hsize-1]); if( (cnz != 0) && ((std::get<0>(ntuples[cnz-1]) == std::get<0>(heap[hsize-1])) && (std::get<1>(ntuples[cnz-1]) == std::get<1>(heap[hsize-1]))) ) { std::get<2>(ntuples[cnz-1]) = SR::add(std::get<2>(ntuples[cnz-1]), ArrSpTups[source]->numvalue(curptr[source]++)); } else { ntuples[cnz++] = ArrSpTups[source]->tuples[curptr[source]++]; } if(curptr[source] != ArrSpTups[source]->getnnz()) // That array has not been depleted { heap[hsize-1] = std::make_tuple(std::get<0>(ArrSpTups[source]->tuples[curptr[source]]), std::get<1>(ArrSpTups[source]->tuples[curptr[source]]), source); std::push_heap(heap.data(), heap.data()+hsize, std::not2(heapcomp)); } else { --hsize; } } } // Performs a balanced merge of the array of SpTuples // Assumes the input parameters are already column sorted template<class SR, class IT, class NT> SpTuples<IT, NT>* MultiwayMerge( std::vector<SpTuples<IT,NT> *> & ArrSpTups, IT mdim = 0, IT ndim = 0, bool delarrs = false ) { int nlists = ArrSpTups.size(); if(nlists == 0) { return new SpTuples<IT,NT>(0, mdim, ndim); //empty mxn SpTuples } if(nlists == 1) { if(delarrs) // steal data from input, and don't delete input { return ArrSpTups[0]; } else // copy input to output { std::tuple<IT, IT, NT>* mergeTups = static_cast<std::tuple<IT, IT, NT>*> (::operator new (sizeof(std::tuple<IT, IT, NT>[ArrSpTups[0]->getnnz()]))); #ifdef THREADED #pragma omp parallel for #endif for(int i=0; i<ArrSpTups[0]->getnnz(); i++) mergeTups[i] = ArrSpTups[0]->tuples[i]; return new SpTuples<IT,NT> (ArrSpTups[0]->getnnz(), mdim, ndim, mergeTups, true); } } // ---- check correctness of input dimensions ------ for(int i=0; i< nlists; ++i) { if((mdim != ArrSpTups[i]->getnrow()) || ndim != ArrSpTups[i]->getncol()) { std::cerr << "Dimensions of SpTuples do not match on multiwayMerge()" << std::endl; return new SpTuples<IT,NT>(0,0,0); } } int nthreads = 1; #ifdef THREADED #pragma omp parallel { nthreads = omp_get_num_threads(); } #endif int nsplits = 4*nthreads; // oversplit for load balance nsplits = std::min(nsplits, (int)ndim); // we cannot split a column std::vector< std::vector<IT> > colPtrs; for(int i=0; i< nlists; i++) { colPtrs.push_back(findColSplitters<IT>(ArrSpTups[i], nsplits)); // in parallel } std::vector<IT> mergedNnzPerSplit(nsplits); std::vector<IT> inputNnzPerSplit(nsplits); // ------ estimate memory requirement after merge in each split ------ #ifdef THREADED #pragma omp parallel for schedule(dynamic) #endif for(int i=0; i< nsplits; i++) // for each part { std::vector<SpTuples<IT,NT> *> listSplitTups(nlists); IT t = static_cast<IT>(0); for(int j=0; j< nlists; ++j) { IT curnnz= colPtrs[j][i+1] - colPtrs[j][i]; listSplitTups[j] = new SpTuples<IT, NT> (curnnz, mdim, ndim, ArrSpTups[j]->tuples + colPtrs[j][i], true); t += colPtrs[j][i+1] - colPtrs[j][i]; } mergedNnzPerSplit[i] = SerialMergeNNZ(listSplitTups); inputNnzPerSplit[i] = t; } std::vector<IT> mdisp(nsplits+1,0); for(int i=0; i<nsplits; ++i) mdisp[i+1] = mdisp[i] + mergedNnzPerSplit[i]; IT mergedNnzAll = mdisp[nsplits]; #ifdef COMBBLAS_DEBUG IT inputNnzAll = std::accumulate(inputNnzPerSplit.begin(), inputNnzPerSplit.end(), static_cast<IT>(0)); double ratio = inputNnzAll / (double) mergedNnzAll; std::ostringstream outs; outs << "Multiwaymerge: inputNnz/mergedNnz = " << ratio << std::endl; SpParHelper::Print(outs.str()); #endif // ------ allocate memory outside of the parallel region ------ std::tuple<IT, IT, NT> * mergeBuf = static_cast<std::tuple<IT, IT, NT>*> (::operator new (sizeof(std::tuple<IT, IT, NT>[mergedNnzAll]))); // ------ perform merge in parallel ------ #ifdef THREADED #pragma omp parallel for schedule(dynamic) #endif for(int i=0; i< nsplits; i++) // serially merge part by part { std::vector<SpTuples<IT,NT> *> listSplitTups(nlists); for(int j=0; j< nlists; ++j) { IT curnnz= colPtrs[j][i+1] - colPtrs[j][i]; listSplitTups[j] = new SpTuples<IT, NT> (curnnz, mdim, ndim, ArrSpTups[j]->tuples + colPtrs[j][i], true); } SerialMerge<SR>(listSplitTups, mergeBuf + mdisp[i]); } for(int i=0; i< nlists; i++) { if(delarrs) delete ArrSpTups[i]; // May be expensive for large local matrices } return new SpTuples<IT, NT> (mergedNnzAll, mdim, ndim, mergeBuf, true, true); } }
CGOpenMPRuntime.h
//===----- CGOpenMPRuntime.h - Interface to OpenMP Runtimes -----*- C++ -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // // This provides a class for OpenMP runtime code generation. // //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_LIB_CODEGEN_CGOPENMPRUNTIME_H #define LLVM_CLANG_LIB_CODEGEN_CGOPENMPRUNTIME_H #include "CGValue.h" #include "clang/AST/DeclOpenMP.h" #include "clang/AST/GlobalDecl.h" #include "clang/AST/Type.h" #include "clang/Basic/OpenMPKinds.h" #include "clang/Basic/SourceLocation.h" #include "llvm/ADT/DenseMap.h" #include "llvm/ADT/PointerIntPair.h" #include "llvm/ADT/SmallPtrSet.h" #include "llvm/ADT/StringMap.h" #include "llvm/ADT/StringSet.h" #include "llvm/Frontend/OpenMP/OMPConstants.h" #include "llvm/Frontend/OpenMP/OMPIRBuilder.h" #include "llvm/IR/Function.h" #include "llvm/IR/ValueHandle.h" #include "llvm/Support/AtomicOrdering.h" namespace llvm { class ArrayType; class Constant; class FunctionType; class GlobalVariable; class StructType; class Type; class Value; class OpenMPIRBuilder; } // namespace llvm namespace clang { class Expr; class OMPDependClause; class OMPExecutableDirective; class OMPLoopDirective; class VarDecl; class OMPDeclareReductionDecl; class IdentifierInfo; namespace CodeGen { class Address; class CodeGenFunction; class CodeGenModule; /// A basic class for pre|post-action for advanced codegen sequence for OpenMP /// region. class PrePostActionTy { public: explicit PrePostActionTy() {} virtual void Enter(CodeGenFunction &CGF) {} virtual void Exit(CodeGenFunction &CGF) {} virtual ~PrePostActionTy() {} }; /// Class provides a way to call simple version of codegen for OpenMP region, or /// an advanced with possible pre|post-actions in codegen. class RegionCodeGenTy final { intptr_t CodeGen; typedef void (*CodeGenTy)(intptr_t, CodeGenFunction &, PrePostActionTy &); CodeGenTy Callback; mutable PrePostActionTy *PrePostAction; RegionCodeGenTy() = delete; RegionCodeGenTy &operator=(const RegionCodeGenTy &) = delete; template <typename Callable> static void CallbackFn(intptr_t CodeGen, CodeGenFunction &CGF, PrePostActionTy &Action) { return (*reinterpret_cast<Callable *>(CodeGen))(CGF, Action); } public: template <typename Callable> RegionCodeGenTy( Callable &&CodeGen, std::enable_if_t<!std::is_same<std::remove_reference_t<Callable>, RegionCodeGenTy>::value> * = nullptr) : CodeGen(reinterpret_cast<intptr_t>(&CodeGen)), Callback(CallbackFn<std::remove_reference_t<Callable>>), PrePostAction(nullptr) {} void setAction(PrePostActionTy &Action) const { PrePostAction = &Action; } void operator()(CodeGenFunction &CGF) const; }; struct OMPTaskDataTy final { SmallVector<const Expr *, 4> PrivateVars; SmallVector<const Expr *, 4> PrivateCopies; SmallVector<const Expr *, 4> FirstprivateVars; SmallVector<const Expr *, 4> FirstprivateCopies; SmallVector<const Expr *, 4> FirstprivateInits; SmallVector<const Expr *, 4> LastprivateVars; SmallVector<const Expr *, 4> LastprivateCopies; SmallVector<const Expr *, 4> ReductionVars; SmallVector<const Expr *, 4> ReductionOrigs; SmallVector<const Expr *, 4> ReductionCopies; SmallVector<const Expr *, 4> ReductionOps; SmallVector<CanonicalDeclPtr<const VarDecl>, 4> PrivateLocals; struct DependData { OpenMPDependClauseKind DepKind = OMPC_DEPEND_unknown; const Expr *IteratorExpr = nullptr; SmallVector<const Expr *, 4> DepExprs; explicit DependData() = default; DependData(OpenMPDependClauseKind DepKind, const Expr *IteratorExpr) : DepKind(DepKind), IteratorExpr(IteratorExpr) {} }; SmallVector<DependData, 4> Dependences; llvm::PointerIntPair<llvm::Value *, 1, bool> Final; llvm::PointerIntPair<llvm::Value *, 1, bool> Schedule; llvm::PointerIntPair<llvm::Value *, 1, bool> Priority; llvm::Value *Reductions = nullptr; unsigned NumberOfParts = 0; bool Tied = true; bool Nogroup = false; bool IsReductionWithTaskMod = false; bool IsWorksharingReduction = false; }; /// Class intended to support codegen of all kind of the reduction clauses. class ReductionCodeGen { private: /// Data required for codegen of reduction clauses. struct ReductionData { /// Reference to the item shared between tasks to reduce into. const Expr *Shared = nullptr; /// Reference to the original item. const Expr *Ref = nullptr; /// Helper expression for generation of private copy. const Expr *Private = nullptr; /// Helper expression for generation reduction operation. const Expr *ReductionOp = nullptr; ReductionData(const Expr *Shared, const Expr *Ref, const Expr *Private, const Expr *ReductionOp) : Shared(Shared), Ref(Ref), Private(Private), ReductionOp(ReductionOp) { } }; /// List of reduction-based clauses. SmallVector<ReductionData, 4> ClausesData; /// List of addresses of shared variables/expressions. SmallVector<std::pair<LValue, LValue>, 4> SharedAddresses; /// List of addresses of original variables/expressions. SmallVector<std::pair<LValue, LValue>, 4> OrigAddresses; /// Sizes of the reduction items in chars. SmallVector<std::pair<llvm::Value *, llvm::Value *>, 4> Sizes; /// Base declarations for the reduction items. SmallVector<const VarDecl *, 4> BaseDecls; /// Emits lvalue for shared expression. LValue emitSharedLValue(CodeGenFunction &CGF, const Expr *E); /// Emits upper bound for shared expression (if array section). LValue emitSharedLValueUB(CodeGenFunction &CGF, const Expr *E); /// Performs aggregate initialization. /// \param N Number of reduction item in the common list. /// \param PrivateAddr Address of the corresponding private item. /// \param SharedLVal Address of the original shared variable. /// \param DRD Declare reduction construct used for reduction item. void emitAggregateInitialization(CodeGenFunction &CGF, unsigned N, Address PrivateAddr, LValue SharedLVal, const OMPDeclareReductionDecl *DRD); public: ReductionCodeGen(ArrayRef<const Expr *> Shareds, ArrayRef<const Expr *> Origs, ArrayRef<const Expr *> Privates, ArrayRef<const Expr *> ReductionOps); /// Emits lvalue for the shared and original reduction item. /// \param N Number of the reduction item. void emitSharedOrigLValue(CodeGenFunction &CGF, unsigned N); /// Emits the code for the variable-modified type, if required. /// \param N Number of the reduction item. void emitAggregateType(CodeGenFunction &CGF, unsigned N); /// Emits the code for the variable-modified type, if required. /// \param N Number of the reduction item. /// \param Size Size of the type in chars. void emitAggregateType(CodeGenFunction &CGF, unsigned N, llvm::Value *Size); /// Performs initialization of the private copy for the reduction item. /// \param N Number of the reduction item. /// \param PrivateAddr Address of the corresponding private item. /// \param DefaultInit Default initialization sequence that should be /// performed if no reduction specific initialization is found. /// \param SharedLVal Address of the original shared variable. void emitInitialization(CodeGenFunction &CGF, unsigned N, Address PrivateAddr, LValue SharedLVal, llvm::function_ref<bool(CodeGenFunction &)> DefaultInit); /// Returns true if the private copy requires cleanups. bool needCleanups(unsigned N); /// Emits cleanup code for the reduction item. /// \param N Number of the reduction item. /// \param PrivateAddr Address of the corresponding private item. void emitCleanups(CodeGenFunction &CGF, unsigned N, Address PrivateAddr); /// Adjusts \p PrivatedAddr for using instead of the original variable /// address in normal operations. /// \param N Number of the reduction item. /// \param PrivateAddr Address of the corresponding private item. Address adjustPrivateAddress(CodeGenFunction &CGF, unsigned N, Address PrivateAddr); /// Returns LValue for the reduction item. LValue getSharedLValue(unsigned N) const { return SharedAddresses[N].first; } /// Returns LValue for the original reduction item. LValue getOrigLValue(unsigned N) const { return OrigAddresses[N].first; } /// Returns the size of the reduction item (in chars and total number of /// elements in the item), or nullptr, if the size is a constant. std::pair<llvm::Value *, llvm::Value *> getSizes(unsigned N) const { return Sizes[N]; } /// Returns the base declaration of the reduction item. const VarDecl *getBaseDecl(unsigned N) const { return BaseDecls[N]; } /// Returns the base declaration of the reduction item. const Expr *getRefExpr(unsigned N) const { return ClausesData[N].Ref; } /// Returns true if the initialization of the reduction item uses initializer /// from declare reduction construct. bool usesReductionInitializer(unsigned N) const; }; class CGOpenMPRuntime { public: /// Allows to disable automatic handling of functions used in target regions /// as those marked as `omp declare target`. class DisableAutoDeclareTargetRAII { CodeGenModule &CGM; bool SavedShouldMarkAsGlobal; public: DisableAutoDeclareTargetRAII(CodeGenModule &CGM); ~DisableAutoDeclareTargetRAII(); }; /// Manages list of nontemporal decls for the specified directive. class NontemporalDeclsRAII { CodeGenModule &CGM; const bool NeedToPush; public: NontemporalDeclsRAII(CodeGenModule &CGM, const OMPLoopDirective &S); ~NontemporalDeclsRAII(); }; /// Manages list of nontemporal decls for the specified directive. class UntiedTaskLocalDeclsRAII { CodeGenModule &CGM; const bool NeedToPush; public: UntiedTaskLocalDeclsRAII( CodeGenFunction &CGF, const llvm::DenseMap<CanonicalDeclPtr<const VarDecl>, std::pair<Address, Address>> &LocalVars); ~UntiedTaskLocalDeclsRAII(); }; /// Maps the expression for the lastprivate variable to the global copy used /// to store new value because original variables are not mapped in inner /// parallel regions. Only private copies are captured but we need also to /// store private copy in shared address. /// Also, stores the expression for the private loop counter and it /// threaprivate name. struct LastprivateConditionalData { llvm::MapVector<CanonicalDeclPtr<const Decl>, SmallString<16>> DeclToUniqueName; LValue IVLVal; llvm::Function *Fn = nullptr; bool Disabled = false; }; /// Manages list of lastprivate conditional decls for the specified directive. class LastprivateConditionalRAII { enum class ActionToDo { DoNotPush, PushAsLastprivateConditional, DisableLastprivateConditional, }; CodeGenModule &CGM; ActionToDo Action = ActionToDo::DoNotPush; /// Check and try to disable analysis of inner regions for changes in /// lastprivate conditional. void tryToDisableInnerAnalysis(const OMPExecutableDirective &S, llvm::DenseSet<CanonicalDeclPtr<const Decl>> &NeedToAddForLPCsAsDisabled) const; LastprivateConditionalRAII(CodeGenFunction &CGF, const OMPExecutableDirective &S); public: explicit LastprivateConditionalRAII(CodeGenFunction &CGF, const OMPExecutableDirective &S, LValue IVLVal); static LastprivateConditionalRAII disable(CodeGenFunction &CGF, const OMPExecutableDirective &S); ~LastprivateConditionalRAII(); }; llvm::OpenMPIRBuilder &getOMPBuilder() { return OMPBuilder; } protected: CodeGenModule &CGM; StringRef FirstSeparator, Separator; /// An OpenMP-IR-Builder instance. llvm::OpenMPIRBuilder OMPBuilder; /// Constructor allowing to redefine the name separator for the variables. explicit CGOpenMPRuntime(CodeGenModule &CGM, StringRef FirstSeparator, StringRef Separator); /// Creates offloading entry for the provided entry ID \a ID, /// address \a Addr, size \a Size, and flags \a Flags. virtual void createOffloadEntry(llvm::Constant *ID, llvm::Constant *Addr, uint64_t Size, int32_t Flags, llvm::GlobalValue::LinkageTypes Linkage); /// Helper to emit outlined function for 'target' directive. /// \param D Directive to emit. /// \param ParentName Name of the function that encloses the target region. /// \param OutlinedFn Outlined function value to be defined by this call. /// \param OutlinedFnID Outlined function ID value to be defined by this call. /// \param IsOffloadEntry True if the outlined function is an offload entry. /// \param CodeGen Lambda codegen specific to an accelerator device. /// An outlined function may not be an entry if, e.g. the if clause always /// evaluates to false. virtual void emitTargetOutlinedFunctionHelper(const OMPExecutableDirective &D, StringRef ParentName, llvm::Function *&OutlinedFn, llvm::Constant *&OutlinedFnID, bool IsOffloadEntry, const RegionCodeGenTy &CodeGen); /// Emits object of ident_t type with info for source location. /// \param Flags Flags for OpenMP location. /// llvm::Value *emitUpdateLocation(CodeGenFunction &CGF, SourceLocation Loc, unsigned Flags = 0); /// Returns pointer to ident_t type. llvm::Type *getIdentTyPointerTy(); /// Gets thread id value for the current thread. /// llvm::Value *getThreadID(CodeGenFunction &CGF, SourceLocation Loc); /// Get the function name of an outlined region. // The name can be customized depending on the target. // virtual StringRef getOutlinedHelperName() const { return ".omp_outlined."; } /// Emits \p Callee function call with arguments \p Args with location \p Loc. void emitCall(CodeGenFunction &CGF, SourceLocation Loc, llvm::FunctionCallee Callee, ArrayRef<llvm::Value *> Args = llvm::None) const; /// Emits address of the word in a memory where current thread id is /// stored. virtual Address emitThreadIDAddress(CodeGenFunction &CGF, SourceLocation Loc); void setLocThreadIdInsertPt(CodeGenFunction &CGF, bool AtCurrentPoint = false); void clearLocThreadIdInsertPt(CodeGenFunction &CGF); /// Check if the default location must be constant. /// Default is false to support OMPT/OMPD. virtual bool isDefaultLocationConstant() const { return false; } /// Returns additional flags that can be stored in reserved_2 field of the /// default location. virtual unsigned getDefaultLocationReserved2Flags() const { return 0; } /// Returns default flags for the barriers depending on the directive, for /// which this barier is going to be emitted. static unsigned getDefaultFlagsForBarriers(OpenMPDirectiveKind Kind); /// Get the LLVM type for the critical name. llvm::ArrayType *getKmpCriticalNameTy() const {return KmpCriticalNameTy;} /// Returns corresponding lock object for the specified critical region /// name. If the lock object does not exist it is created, otherwise the /// reference to the existing copy is returned. /// \param CriticalName Name of the critical region. /// llvm::Value *getCriticalRegionLock(StringRef CriticalName); private: /// Map for SourceLocation and OpenMP runtime library debug locations. typedef llvm::DenseMap<SourceLocation, llvm::Value *> OpenMPDebugLocMapTy; OpenMPDebugLocMapTy OpenMPDebugLocMap; /// The type for a microtask which gets passed to __kmpc_fork_call(). /// Original representation is: /// typedef void (kmpc_micro)(kmp_int32 global_tid, kmp_int32 bound_tid,...); llvm::FunctionType *Kmpc_MicroTy = nullptr; /// Stores debug location and ThreadID for the function. struct DebugLocThreadIdTy { llvm::Value *DebugLoc; llvm::Value *ThreadID; /// Insert point for the service instructions. llvm::AssertingVH<llvm::Instruction> ServiceInsertPt = nullptr; }; /// Map of local debug location, ThreadId and functions. typedef llvm::DenseMap<llvm::Function *, DebugLocThreadIdTy> OpenMPLocThreadIDMapTy; OpenMPLocThreadIDMapTy OpenMPLocThreadIDMap; /// Map of UDRs and corresponding combiner/initializer. typedef llvm::DenseMap<const OMPDeclareReductionDecl *, std::pair<llvm::Function *, llvm::Function *>> UDRMapTy; UDRMapTy UDRMap; /// Map of functions and locally defined UDRs. typedef llvm::DenseMap<llvm::Function *, SmallVector<const OMPDeclareReductionDecl *, 4>> FunctionUDRMapTy; FunctionUDRMapTy FunctionUDRMap; /// Map from the user-defined mapper declaration to its corresponding /// functions. llvm::DenseMap<const OMPDeclareMapperDecl *, llvm::Function *> UDMMap; /// Map of functions and their local user-defined mappers. using FunctionUDMMapTy = llvm::DenseMap<llvm::Function *, SmallVector<const OMPDeclareMapperDecl *, 4>>; FunctionUDMMapTy FunctionUDMMap; /// Maps local variables marked as lastprivate conditional to their internal /// types. llvm::DenseMap<llvm::Function *, llvm::DenseMap<CanonicalDeclPtr<const Decl>, std::tuple<QualType, const FieldDecl *, const FieldDecl *, LValue>>> LastprivateConditionalToTypes; /// Maps function to the position of the untied task locals stack. llvm::DenseMap<llvm::Function *, unsigned> FunctionToUntiedTaskStackMap; /// Type kmp_critical_name, originally defined as typedef kmp_int32 /// kmp_critical_name[8]; llvm::ArrayType *KmpCriticalNameTy; /// An ordered map of auto-generated variables to their unique names. /// It stores variables with the following names: 1) ".gomp_critical_user_" + /// <critical_section_name> + ".var" for "omp critical" directives; 2) /// <mangled_name_for_global_var> + ".cache." for cache for threadprivate /// variables. llvm::StringMap<llvm::AssertingVH<llvm::Constant>, llvm::BumpPtrAllocator> InternalVars; /// Type typedef kmp_int32 (* kmp_routine_entry_t)(kmp_int32, void *); llvm::Type *KmpRoutineEntryPtrTy = nullptr; QualType KmpRoutineEntryPtrQTy; /// Type typedef struct kmp_task { /// void * shareds; /**< pointer to block of pointers to /// shared vars */ /// kmp_routine_entry_t routine; /**< pointer to routine to call for /// executing task */ /// kmp_int32 part_id; /**< part id for the task */ /// kmp_routine_entry_t destructors; /* pointer to function to invoke /// deconstructors of firstprivate C++ objects */ /// } kmp_task_t; QualType KmpTaskTQTy; /// Saved kmp_task_t for task directive. QualType SavedKmpTaskTQTy; /// Saved kmp_task_t for taskloop-based directive. QualType SavedKmpTaskloopTQTy; /// Type typedef struct kmp_depend_info { /// kmp_intptr_t base_addr; /// size_t len; /// struct { /// bool in:1; /// bool out:1; /// } flags; /// } kmp_depend_info_t; QualType KmpDependInfoTy; /// Type typedef struct kmp_task_affinity_info { /// kmp_intptr_t base_addr; /// size_t len; /// struct { /// bool flag1 : 1; /// bool flag2 : 1; /// kmp_int32 reserved : 30; /// } flags; /// } kmp_task_affinity_info_t; QualType KmpTaskAffinityInfoTy; /// struct kmp_dim { // loop bounds info casted to kmp_int64 /// kmp_int64 lo; // lower /// kmp_int64 up; // upper /// kmp_int64 st; // stride /// }; QualType KmpDimTy; /// Type struct __tgt_offload_entry{ /// void *addr; // Pointer to the offload entry info. /// // (function or global) /// char *name; // Name of the function or global. /// size_t size; // Size of the entry info (0 if it a function). /// int32_t flags; /// int32_t reserved; /// }; QualType TgtOffloadEntryQTy; /// Entity that registers the offloading constants that were emitted so /// far. class OffloadEntriesInfoManagerTy { CodeGenModule &CGM; /// Number of entries registered so far. unsigned OffloadingEntriesNum = 0; public: /// Base class of the entries info. class OffloadEntryInfo { public: /// Kind of a given entry. enum OffloadingEntryInfoKinds : unsigned { /// Entry is a target region. OffloadingEntryInfoTargetRegion = 0, /// Entry is a declare target variable. OffloadingEntryInfoDeviceGlobalVar = 1, /// Invalid entry info. OffloadingEntryInfoInvalid = ~0u }; protected: OffloadEntryInfo() = delete; explicit OffloadEntryInfo(OffloadingEntryInfoKinds Kind) : Kind(Kind) {} explicit OffloadEntryInfo(OffloadingEntryInfoKinds Kind, unsigned Order, uint32_t Flags) : Flags(Flags), Order(Order), Kind(Kind) {} ~OffloadEntryInfo() = default; public: bool isValid() const { return Order != ~0u; } unsigned getOrder() const { return Order; } OffloadingEntryInfoKinds getKind() const { return Kind; } uint32_t getFlags() const { return Flags; } void setFlags(uint32_t NewFlags) { Flags = NewFlags; } llvm::Constant *getAddress() const { return cast_or_null<llvm::Constant>(Addr); } void setAddress(llvm::Constant *V) { assert(!Addr.pointsToAliveValue() && "Address has been set before!"); Addr = V; } static bool classof(const OffloadEntryInfo *Info) { return true; } private: /// Address of the entity that has to be mapped for offloading. llvm::WeakTrackingVH Addr; /// Flags associated with the device global. uint32_t Flags = 0u; /// Order this entry was emitted. unsigned Order = ~0u; OffloadingEntryInfoKinds Kind = OffloadingEntryInfoInvalid; }; /// Return true if a there are no entries defined. bool empty() const; /// Return number of entries defined so far. unsigned size() const { return OffloadingEntriesNum; } OffloadEntriesInfoManagerTy(CodeGenModule &CGM) : CGM(CGM) {} // // Target region entries related. // /// Kind of the target registry entry. enum OMPTargetRegionEntryKind : uint32_t { /// Mark the entry as target region. OMPTargetRegionEntryTargetRegion = 0x0, /// Mark the entry as a global constructor. OMPTargetRegionEntryCtor = 0x02, /// Mark the entry as a global destructor. OMPTargetRegionEntryDtor = 0x04, }; /// Target region entries info. class OffloadEntryInfoTargetRegion final : public OffloadEntryInfo { /// Address that can be used as the ID of the entry. llvm::Constant *ID = nullptr; public: OffloadEntryInfoTargetRegion() : OffloadEntryInfo(OffloadingEntryInfoTargetRegion) {} explicit OffloadEntryInfoTargetRegion(unsigned Order, llvm::Constant *Addr, llvm::Constant *ID, OMPTargetRegionEntryKind Flags) : OffloadEntryInfo(OffloadingEntryInfoTargetRegion, Order, Flags), ID(ID) { setAddress(Addr); } llvm::Constant *getID() const { return ID; } void setID(llvm::Constant *V) { assert(!ID && "ID has been set before!"); ID = V; } static bool classof(const OffloadEntryInfo *Info) { return Info->getKind() == OffloadingEntryInfoTargetRegion; } }; /// Initialize target region entry. void initializeTargetRegionEntryInfo(unsigned DeviceID, unsigned FileID, StringRef ParentName, unsigned LineNum, unsigned Order); /// Register target region entry. void registerTargetRegionEntryInfo(unsigned DeviceID, unsigned FileID, StringRef ParentName, unsigned LineNum, llvm::Constant *Addr, llvm::Constant *ID, OMPTargetRegionEntryKind Flags); /// Return true if a target region entry with the provided information /// exists. bool hasTargetRegionEntryInfo(unsigned DeviceID, unsigned FileID, StringRef ParentName, unsigned LineNum, bool IgnoreAddressId = false) const; /// brief Applies action \a Action on all registered entries. typedef llvm::function_ref<void(unsigned, unsigned, StringRef, unsigned, const OffloadEntryInfoTargetRegion &)> OffloadTargetRegionEntryInfoActTy; void actOnTargetRegionEntriesInfo( const OffloadTargetRegionEntryInfoActTy &Action); // // Device global variable entries related. // /// Kind of the global variable entry.. enum OMPTargetGlobalVarEntryKind : uint32_t { /// Mark the entry as a to declare target. OMPTargetGlobalVarEntryTo = 0x0, /// Mark the entry as a to declare target link. OMPTargetGlobalVarEntryLink = 0x1, }; /// Device global variable entries info. class OffloadEntryInfoDeviceGlobalVar final : public OffloadEntryInfo { /// Type of the global variable. CharUnits VarSize; llvm::GlobalValue::LinkageTypes Linkage; public: OffloadEntryInfoDeviceGlobalVar() : OffloadEntryInfo(OffloadingEntryInfoDeviceGlobalVar) {} explicit OffloadEntryInfoDeviceGlobalVar(unsigned Order, OMPTargetGlobalVarEntryKind Flags) : OffloadEntryInfo(OffloadingEntryInfoDeviceGlobalVar, Order, Flags) {} explicit OffloadEntryInfoDeviceGlobalVar( unsigned Order, llvm::Constant *Addr, CharUnits VarSize, OMPTargetGlobalVarEntryKind Flags, llvm::GlobalValue::LinkageTypes Linkage) : OffloadEntryInfo(OffloadingEntryInfoDeviceGlobalVar, Order, Flags), VarSize(VarSize), Linkage(Linkage) { setAddress(Addr); } CharUnits getVarSize() const { return VarSize; } void setVarSize(CharUnits Size) { VarSize = Size; } llvm::GlobalValue::LinkageTypes getLinkage() const { return Linkage; } void setLinkage(llvm::GlobalValue::LinkageTypes LT) { Linkage = LT; } static bool classof(const OffloadEntryInfo *Info) { return Info->getKind() == OffloadingEntryInfoDeviceGlobalVar; } }; /// Initialize device global variable entry. void initializeDeviceGlobalVarEntryInfo(StringRef Name, OMPTargetGlobalVarEntryKind Flags, unsigned Order); /// Register device global variable entry. void registerDeviceGlobalVarEntryInfo(StringRef VarName, llvm::Constant *Addr, CharUnits VarSize, OMPTargetGlobalVarEntryKind Flags, llvm::GlobalValue::LinkageTypes Linkage); /// Checks if the variable with the given name has been registered already. bool hasDeviceGlobalVarEntryInfo(StringRef VarName) const { return OffloadEntriesDeviceGlobalVar.count(VarName) > 0; } /// Applies action \a Action on all registered entries. typedef llvm::function_ref<void(StringRef, const OffloadEntryInfoDeviceGlobalVar &)> OffloadDeviceGlobalVarEntryInfoActTy; void actOnDeviceGlobalVarEntriesInfo( const OffloadDeviceGlobalVarEntryInfoActTy &Action); private: // Storage for target region entries kind. The storage is to be indexed by // file ID, device ID, parent function name and line number. typedef llvm::DenseMap<unsigned, OffloadEntryInfoTargetRegion> OffloadEntriesTargetRegionPerLine; typedef llvm::StringMap<OffloadEntriesTargetRegionPerLine> OffloadEntriesTargetRegionPerParentName; typedef llvm::DenseMap<unsigned, OffloadEntriesTargetRegionPerParentName> OffloadEntriesTargetRegionPerFile; typedef llvm::DenseMap<unsigned, OffloadEntriesTargetRegionPerFile> OffloadEntriesTargetRegionPerDevice; typedef OffloadEntriesTargetRegionPerDevice OffloadEntriesTargetRegionTy; OffloadEntriesTargetRegionTy OffloadEntriesTargetRegion; /// Storage for device global variable entries kind. The storage is to be /// indexed by mangled name. typedef llvm::StringMap<OffloadEntryInfoDeviceGlobalVar> OffloadEntriesDeviceGlobalVarTy; OffloadEntriesDeviceGlobalVarTy OffloadEntriesDeviceGlobalVar; }; OffloadEntriesInfoManagerTy OffloadEntriesInfoManager; bool ShouldMarkAsGlobal = true; /// List of the emitted declarations. llvm::DenseSet<CanonicalDeclPtr<const Decl>> AlreadyEmittedTargetDecls; /// List of the global variables with their addresses that should not be /// emitted for the target. llvm::StringMap<llvm::WeakTrackingVH> EmittedNonTargetVariables; /// List of variables that can become declare target implicitly and, thus, /// must be emitted. llvm::SmallDenseSet<const VarDecl *> DeferredGlobalVariables; using NontemporalDeclsSet = llvm::SmallDenseSet<CanonicalDeclPtr<const Decl>>; /// Stack for list of declarations in current context marked as nontemporal. /// The set is the union of all current stack elements. llvm::SmallVector<NontemporalDeclsSet, 4> NontemporalDeclsStack; using UntiedLocalVarsAddressesMap = llvm::DenseMap<CanonicalDeclPtr<const VarDecl>, std::pair<Address, Address>>; llvm::SmallVector<UntiedLocalVarsAddressesMap, 4> UntiedLocalVarsStack; /// Stack for list of addresses of declarations in current context marked as /// lastprivate conditional. The set is the union of all current stack /// elements. llvm::SmallVector<LastprivateConditionalData, 4> LastprivateConditionalStack; /// Flag for keeping track of weather a requires unified_shared_memory /// directive is present. bool HasRequiresUnifiedSharedMemory = false; /// Atomic ordering from the omp requires directive. llvm::AtomicOrdering RequiresAtomicOrdering = llvm::AtomicOrdering::Monotonic; /// Flag for keeping track of weather a target region has been emitted. bool HasEmittedTargetRegion = false; /// Flag for keeping track of weather a device routine has been emitted. /// Device routines are specific to the bool HasEmittedDeclareTargetRegion = false; /// Loads all the offload entries information from the host IR /// metadata. void loadOffloadInfoMetadata(); /// Returns __tgt_offload_entry type. QualType getTgtOffloadEntryQTy(); /// Start scanning from statement \a S and and emit all target regions /// found along the way. /// \param S Starting statement. /// \param ParentName Name of the function declaration that is being scanned. void scanForTargetRegionsFunctions(const Stmt *S, StringRef ParentName); /// Build type kmp_routine_entry_t (if not built yet). void emitKmpRoutineEntryT(QualType KmpInt32Ty); /// Returns pointer to kmpc_micro type. llvm::Type *getKmpc_MicroPointerTy(); /// Returns __kmpc_for_static_init_* runtime function for the specified /// size \a IVSize and sign \a IVSigned. llvm::FunctionCallee createForStaticInitFunction(unsigned IVSize, bool IVSigned); /// Returns __kmpc_dispatch_init_* runtime function for the specified /// size \a IVSize and sign \a IVSigned. llvm::FunctionCallee createDispatchInitFunction(unsigned IVSize, bool IVSigned); /// Returns __kmpc_dispatch_next_* runtime function for the specified /// size \a IVSize and sign \a IVSigned. llvm::FunctionCallee createDispatchNextFunction(unsigned IVSize, bool IVSigned); /// Returns __kmpc_dispatch_fini_* runtime function for the specified /// size \a IVSize and sign \a IVSigned. llvm::FunctionCallee createDispatchFiniFunction(unsigned IVSize, bool IVSigned); /// If the specified mangled name is not in the module, create and /// return threadprivate cache object. This object is a pointer's worth of /// storage that's reserved for use by the OpenMP runtime. /// \param VD Threadprivate variable. /// \return Cache variable for the specified threadprivate. llvm::Constant *getOrCreateThreadPrivateCache(const VarDecl *VD); /// Gets (if variable with the given name already exist) or creates /// internal global variable with the specified Name. The created variable has /// linkage CommonLinkage by default and is initialized by null value. /// \param Ty Type of the global variable. If it is exist already the type /// must be the same. /// \param Name Name of the variable. llvm::Constant *getOrCreateInternalVariable(llvm::Type *Ty, const llvm::Twine &Name, unsigned AddressSpace = 0); /// Set of threadprivate variables with the generated initializer. llvm::StringSet<> ThreadPrivateWithDefinition; /// Set of declare target variables with the generated initializer. llvm::StringSet<> DeclareTargetWithDefinition; /// Emits initialization code for the threadprivate variables. /// \param VDAddr Address of the global variable \a VD. /// \param Ctor Pointer to a global init function for \a VD. /// \param CopyCtor Pointer to a global copy function for \a VD. /// \param Dtor Pointer to a global destructor function for \a VD. /// \param Loc Location of threadprivate declaration. void emitThreadPrivateVarInit(CodeGenFunction &CGF, Address VDAddr, llvm::Value *Ctor, llvm::Value *CopyCtor, llvm::Value *Dtor, SourceLocation Loc); /// Emit the array initialization or deletion portion for user-defined mapper /// code generation. void emitUDMapperArrayInitOrDel(CodeGenFunction &MapperCGF, llvm::Value *Handle, llvm::Value *BasePtr, llvm::Value *Ptr, llvm::Value *Size, llvm::Value *MapType, llvm::Value *MapName, CharUnits ElementSize, llvm::BasicBlock *ExitBB, bool IsInit); struct TaskResultTy { llvm::Value *NewTask = nullptr; llvm::Function *TaskEntry = nullptr; llvm::Value *NewTaskNewTaskTTy = nullptr; LValue TDBase; const RecordDecl *KmpTaskTQTyRD = nullptr; llvm::Value *TaskDupFn = nullptr; }; /// Emit task region for the task directive. The task region is emitted in /// several steps: /// 1. Emit a call to kmp_task_t *__kmpc_omp_task_alloc(ident_t *, kmp_int32 /// gtid, kmp_int32 flags, size_t sizeof_kmp_task_t, size_t sizeof_shareds, /// kmp_routine_entry_t *task_entry). Here task_entry is a pointer to the /// function: /// kmp_int32 .omp_task_entry.(kmp_int32 gtid, kmp_task_t *tt) { /// TaskFunction(gtid, tt->part_id, tt->shareds); /// return 0; /// } /// 2. Copy a list of shared variables to field shareds of the resulting /// structure kmp_task_t returned by the previous call (if any). /// 3. Copy a pointer to destructions function to field destructions of the /// resulting structure kmp_task_t. /// \param D Current task directive. /// \param TaskFunction An LLVM function with type void (*)(i32 /*gtid*/, i32 /// /*part_id*/, captured_struct */*__context*/); /// \param SharedsTy A type which contains references the shared variables. /// \param Shareds Context with the list of shared variables from the \p /// TaskFunction. /// \param Data Additional data for task generation like tiednsee, final /// state, list of privates etc. TaskResultTy emitTaskInit(CodeGenFunction &CGF, SourceLocation Loc, const OMPExecutableDirective &D, llvm::Function *TaskFunction, QualType SharedsTy, Address Shareds, const OMPTaskDataTy &Data); /// Returns default address space for the constant firstprivates, 0 by /// default. virtual unsigned getDefaultFirstprivateAddressSpace() const { return 0; } /// Emit code that pushes the trip count of loops associated with constructs /// 'target teams distribute' and 'teams distribute parallel for'. /// \param SizeEmitter Emits the int64 value for the number of iterations of /// the associated loop. void emitTargetNumIterationsCall( CodeGenFunction &CGF, const OMPExecutableDirective &D, llvm::Value *DeviceID, llvm::function_ref<llvm::Value *(CodeGenFunction &CGF, const OMPLoopDirective &D)> SizeEmitter); /// Emit update for lastprivate conditional data. void emitLastprivateConditionalUpdate(CodeGenFunction &CGF, LValue IVLVal, StringRef UniqueDeclName, LValue LVal, SourceLocation Loc); /// Returns the number of the elements and the address of the depobj /// dependency array. /// \return Number of elements in depobj array and the pointer to the array of /// dependencies. std::pair<llvm::Value *, LValue> getDepobjElements(CodeGenFunction &CGF, LValue DepobjLVal, SourceLocation Loc); public: explicit CGOpenMPRuntime(CodeGenModule &CGM) : CGOpenMPRuntime(CGM, ".", ".") {} virtual ~CGOpenMPRuntime() {} virtual void clear(); /// Emits code for OpenMP 'if' clause using specified \a CodeGen /// function. Here is the logic: /// if (Cond) { /// ThenGen(); /// } else { /// ElseGen(); /// } void emitIfClause(CodeGenFunction &CGF, const Expr *Cond, const RegionCodeGenTy &ThenGen, const RegionCodeGenTy &ElseGen); /// Checks if the \p Body is the \a CompoundStmt and returns its child /// statement iff there is only one that is not evaluatable at the compile /// time. static const Stmt *getSingleCompoundChild(ASTContext &Ctx, const Stmt *Body); /// Get the platform-specific name separator. std::string getName(ArrayRef<StringRef> Parts) const; /// Emit code for the specified user defined reduction construct. virtual void emitUserDefinedReduction(CodeGenFunction *CGF, const OMPDeclareReductionDecl *D); /// Get combiner/initializer for the specified user-defined reduction, if any. virtual std::pair<llvm::Function *, llvm::Function *> getUserDefinedReduction(const OMPDeclareReductionDecl *D); /// Emit the function for the user defined mapper construct. void emitUserDefinedMapper(const OMPDeclareMapperDecl *D, CodeGenFunction *CGF = nullptr); /// Get the function for the specified user-defined mapper. If it does not /// exist, create one. llvm::Function * getOrCreateUserDefinedMapperFunc(const OMPDeclareMapperDecl *D); /// Emits outlined function for the specified OpenMP parallel directive /// \a D. This outlined function has type void(*)(kmp_int32 *ThreadID, /// kmp_int32 BoundID, struct context_vars*). /// \param D OpenMP directive. /// \param ThreadIDVar Variable for thread id in the current OpenMP region. /// \param InnermostKind Kind of innermost directive (for simple directives it /// is a directive itself, for combined - its innermost directive). /// \param CodeGen Code generation sequence for the \a D directive. virtual llvm::Function *emitParallelOutlinedFunction( const OMPExecutableDirective &D, const VarDecl *ThreadIDVar, OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen); /// Emits outlined function for the specified OpenMP teams directive /// \a D. This outlined function has type void(*)(kmp_int32 *ThreadID, /// kmp_int32 BoundID, struct context_vars*). /// \param D OpenMP directive. /// \param ThreadIDVar Variable for thread id in the current OpenMP region. /// \param InnermostKind Kind of innermost directive (for simple directives it /// is a directive itself, for combined - its innermost directive). /// \param CodeGen Code generation sequence for the \a D directive. virtual llvm::Function *emitTeamsOutlinedFunction( const OMPExecutableDirective &D, const VarDecl *ThreadIDVar, OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen); /// Emits outlined function for the OpenMP task directive \a D. This /// outlined function has type void(*)(kmp_int32 ThreadID, struct task_t* /// TaskT). /// \param D OpenMP directive. /// \param ThreadIDVar Variable for thread id in the current OpenMP region. /// \param PartIDVar Variable for partition id in the current OpenMP untied /// task region. /// \param TaskTVar Variable for task_t argument. /// \param InnermostKind Kind of innermost directive (for simple directives it /// is a directive itself, for combined - its innermost directive). /// \param CodeGen Code generation sequence for the \a D directive. /// \param Tied true if task is generated for tied task, false otherwise. /// \param NumberOfParts Number of parts in untied task. Ignored for tied /// tasks. /// virtual llvm::Function *emitTaskOutlinedFunction( const OMPExecutableDirective &D, const VarDecl *ThreadIDVar, const VarDecl *PartIDVar, const VarDecl *TaskTVar, OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen, bool Tied, unsigned &NumberOfParts); /// Cleans up references to the objects in finished function. /// virtual void functionFinished(CodeGenFunction &CGF); /// Emits code for parallel or serial call of the \a OutlinedFn with /// variables captured in a record which address is stored in \a /// CapturedStruct. /// \param OutlinedFn Outlined function to be run in parallel threads. Type of /// this function is void(*)(kmp_int32 *, kmp_int32, struct context_vars*). /// \param CapturedVars A pointer to the record with the references to /// variables used in \a OutlinedFn function. /// \param IfCond Condition in the associated 'if' clause, if it was /// specified, nullptr otherwise. /// virtual void emitParallelCall(CodeGenFunction &CGF, SourceLocation Loc, llvm::Function *OutlinedFn, ArrayRef<llvm::Value *> CapturedVars, const Expr *IfCond); /// Emits a critical region. /// \param CriticalName Name of the critical region. /// \param CriticalOpGen Generator for the statement associated with the given /// critical region. /// \param Hint Value of the 'hint' clause (optional). virtual void emitCriticalRegion(CodeGenFunction &CGF, StringRef CriticalName, const RegionCodeGenTy &CriticalOpGen, SourceLocation Loc, const Expr *Hint = nullptr); /// Emits a master region. /// \param MasterOpGen Generator for the statement associated with the given /// master region. virtual void emitMasterRegion(CodeGenFunction &CGF, const RegionCodeGenTy &MasterOpGen, SourceLocation Loc); /// Emits code for a taskyield directive. virtual void emitTaskyieldCall(CodeGenFunction &CGF, SourceLocation Loc); /// Emit a taskgroup region. /// \param TaskgroupOpGen Generator for the statement associated with the /// given taskgroup region. virtual void emitTaskgroupRegion(CodeGenFunction &CGF, const RegionCodeGenTy &TaskgroupOpGen, SourceLocation Loc); /// Emits a single region. /// \param SingleOpGen Generator for the statement associated with the given /// single region. virtual void emitSingleRegion(CodeGenFunction &CGF, const RegionCodeGenTy &SingleOpGen, SourceLocation Loc, ArrayRef<const Expr *> CopyprivateVars, ArrayRef<const Expr *> DestExprs, ArrayRef<const Expr *> SrcExprs, ArrayRef<const Expr *> AssignmentOps); /// Emit an ordered region. /// \param OrderedOpGen Generator for the statement associated with the given /// ordered region. virtual void emitOrderedRegion(CodeGenFunction &CGF, const RegionCodeGenTy &OrderedOpGen, SourceLocation Loc, bool IsThreads); /// Emit an implicit/explicit barrier for OpenMP threads. /// \param Kind Directive for which this implicit barrier call must be /// generated. Must be OMPD_barrier for explicit barrier generation. /// \param EmitChecks true if need to emit checks for cancellation barriers. /// \param ForceSimpleCall true simple barrier call must be emitted, false if /// runtime class decides which one to emit (simple or with cancellation /// checks). /// virtual void emitBarrierCall(CodeGenFunction &CGF, SourceLocation Loc, OpenMPDirectiveKind Kind, bool EmitChecks = true, bool ForceSimpleCall = false); /// Check if the specified \a ScheduleKind is static non-chunked. /// This kind of worksharing directive is emitted without outer loop. /// \param ScheduleKind Schedule kind specified in the 'schedule' clause. /// \param Chunked True if chunk is specified in the clause. /// virtual bool isStaticNonchunked(OpenMPScheduleClauseKind ScheduleKind, bool Chunked) const; /// Check if the specified \a ScheduleKind is static non-chunked. /// This kind of distribute directive is emitted without outer loop. /// \param ScheduleKind Schedule kind specified in the 'dist_schedule' clause. /// \param Chunked True if chunk is specified in the clause. /// virtual bool isStaticNonchunked(OpenMPDistScheduleClauseKind ScheduleKind, bool Chunked) const; /// Check if the specified \a ScheduleKind is static chunked. /// \param ScheduleKind Schedule kind specified in the 'schedule' clause. /// \param Chunked True if chunk is specified in the clause. /// virtual bool isStaticChunked(OpenMPScheduleClauseKind ScheduleKind, bool Chunked) const; /// Check if the specified \a ScheduleKind is static non-chunked. /// \param ScheduleKind Schedule kind specified in the 'dist_schedule' clause. /// \param Chunked True if chunk is specified in the clause. /// virtual bool isStaticChunked(OpenMPDistScheduleClauseKind ScheduleKind, bool Chunked) const; /// Check if the specified \a ScheduleKind is dynamic. /// This kind of worksharing directive is emitted without outer loop. /// \param ScheduleKind Schedule Kind specified in the 'schedule' clause. /// virtual bool isDynamic(OpenMPScheduleClauseKind ScheduleKind) const; /// struct with the values to be passed to the dispatch runtime function struct DispatchRTInput { /// Loop lower bound llvm::Value *LB = nullptr; /// Loop upper bound llvm::Value *UB = nullptr; /// Chunk size specified using 'schedule' clause (nullptr if chunk /// was not specified) llvm::Value *Chunk = nullptr; DispatchRTInput() = default; DispatchRTInput(llvm::Value *LB, llvm::Value *UB, llvm::Value *Chunk) : LB(LB), UB(UB), Chunk(Chunk) {} }; /// Call the appropriate runtime routine to initialize it before start /// of loop. /// This is used for non static scheduled types and when the ordered /// clause is present on the loop construct. /// Depending on the loop schedule, it is necessary to call some runtime /// routine before start of the OpenMP loop to get the loop upper / lower /// bounds \a LB and \a UB and stride \a ST. /// /// \param CGF Reference to current CodeGenFunction. /// \param Loc Clang source location. /// \param ScheduleKind Schedule kind, specified by the 'schedule' clause. /// \param IVSize Size of the iteration variable in bits. /// \param IVSigned Sign of the iteration variable. /// \param Ordered true if loop is ordered, false otherwise. /// \param DispatchValues struct containing llvm values for lower bound, upper /// bound, and chunk expression. /// For the default (nullptr) value, the chunk 1 will be used. /// virtual void emitForDispatchInit(CodeGenFunction &CGF, SourceLocation Loc, const OpenMPScheduleTy &ScheduleKind, unsigned IVSize, bool IVSigned, bool Ordered, const DispatchRTInput &DispatchValues); /// Struct with the values to be passed to the static runtime function struct StaticRTInput { /// Size of the iteration variable in bits. unsigned IVSize = 0; /// Sign of the iteration variable. bool IVSigned = false; /// true if loop is ordered, false otherwise. bool Ordered = false; /// Address of the output variable in which the flag of the last iteration /// is returned. Address IL = Address::invalid(); /// Address of the output variable in which the lower iteration number is /// returned. Address LB = Address::invalid(); /// Address of the output variable in which the upper iteration number is /// returned. Address UB = Address::invalid(); /// Address of the output variable in which the stride value is returned /// necessary to generated the static_chunked scheduled loop. Address ST = Address::invalid(); /// Value of the chunk for the static_chunked scheduled loop. For the /// default (nullptr) value, the chunk 1 will be used. llvm::Value *Chunk = nullptr; StaticRTInput(unsigned IVSize, bool IVSigned, bool Ordered, Address IL, Address LB, Address UB, Address ST, llvm::Value *Chunk = nullptr) : IVSize(IVSize), IVSigned(IVSigned), Ordered(Ordered), IL(IL), LB(LB), UB(UB), ST(ST), Chunk(Chunk) {} }; /// Call the appropriate runtime routine to initialize it before start /// of loop. /// /// This is used only in case of static schedule, when the user did not /// specify a ordered clause on the loop construct. /// Depending on the loop schedule, it is necessary to call some runtime /// routine before start of the OpenMP loop to get the loop upper / lower /// bounds LB and UB and stride ST. /// /// \param CGF Reference to current CodeGenFunction. /// \param Loc Clang source location. /// \param DKind Kind of the directive. /// \param ScheduleKind Schedule kind, specified by the 'schedule' clause. /// \param Values Input arguments for the construct. /// virtual void emitForStaticInit(CodeGenFunction &CGF, SourceLocation Loc, OpenMPDirectiveKind DKind, const OpenMPScheduleTy &ScheduleKind, const StaticRTInput &Values); /// /// \param CGF Reference to current CodeGenFunction. /// \param Loc Clang source location. /// \param SchedKind Schedule kind, specified by the 'dist_schedule' clause. /// \param Values Input arguments for the construct. /// virtual void emitDistributeStaticInit(CodeGenFunction &CGF, SourceLocation Loc, OpenMPDistScheduleClauseKind SchedKind, const StaticRTInput &Values); /// Call the appropriate runtime routine to notify that we finished /// iteration of the ordered loop with the dynamic scheduling. /// /// \param CGF Reference to current CodeGenFunction. /// \param Loc Clang source location. /// \param IVSize Size of the iteration variable in bits. /// \param IVSigned Sign of the iteration variable. /// virtual void emitForOrderedIterationEnd(CodeGenFunction &CGF, SourceLocation Loc, unsigned IVSize, bool IVSigned); /// Call the appropriate runtime routine to notify that we finished /// all the work with current loop. /// /// \param CGF Reference to current CodeGenFunction. /// \param Loc Clang source location. /// \param DKind Kind of the directive for which the static finish is emitted. /// virtual void emitForStaticFinish(CodeGenFunction &CGF, SourceLocation Loc, OpenMPDirectiveKind DKind); /// Call __kmpc_dispatch_next( /// ident_t *loc, kmp_int32 tid, kmp_int32 *p_lastiter, /// kmp_int[32|64] *p_lower, kmp_int[32|64] *p_upper, /// kmp_int[32|64] *p_stride); /// \param IVSize Size of the iteration variable in bits. /// \param IVSigned Sign of the iteration variable. /// \param IL Address of the output variable in which the flag of the /// last iteration is returned. /// \param LB Address of the output variable in which the lower iteration /// number is returned. /// \param UB Address of the output variable in which the upper iteration /// number is returned. /// \param ST Address of the output variable in which the stride value is /// returned. virtual llvm::Value *emitForNext(CodeGenFunction &CGF, SourceLocation Loc, unsigned IVSize, bool IVSigned, Address IL, Address LB, Address UB, Address ST); /// Emits call to void __kmpc_push_num_threads(ident_t *loc, kmp_int32 /// global_tid, kmp_int32 num_threads) to generate code for 'num_threads' /// clause. /// \param NumThreads An integer value of threads. virtual void emitNumThreadsClause(CodeGenFunction &CGF, llvm::Value *NumThreads, SourceLocation Loc); /// Emit call to void __kmpc_push_proc_bind(ident_t *loc, kmp_int32 /// global_tid, int proc_bind) to generate code for 'proc_bind' clause. virtual void emitProcBindClause(CodeGenFunction &CGF, llvm::omp::ProcBindKind ProcBind, SourceLocation Loc); /// Returns address of the threadprivate variable for the current /// thread. /// \param VD Threadprivate variable. /// \param VDAddr Address of the global variable \a VD. /// \param Loc Location of the reference to threadprivate var. /// \return Address of the threadprivate variable for the current thread. virtual Address getAddrOfThreadPrivate(CodeGenFunction &CGF, const VarDecl *VD, Address VDAddr, SourceLocation Loc); /// Returns the address of the variable marked as declare target with link /// clause OR as declare target with to clause and unified memory. virtual Address getAddrOfDeclareTargetVar(const VarDecl *VD); /// Emit a code for initialization of threadprivate variable. It emits /// a call to runtime library which adds initial value to the newly created /// threadprivate variable (if it is not constant) and registers destructor /// for the variable (if any). /// \param VD Threadprivate variable. /// \param VDAddr Address of the global variable \a VD. /// \param Loc Location of threadprivate declaration. /// \param PerformInit true if initialization expression is not constant. virtual llvm::Function * emitThreadPrivateVarDefinition(const VarDecl *VD, Address VDAddr, SourceLocation Loc, bool PerformInit, CodeGenFunction *CGF = nullptr); /// Emit a code for initialization of declare target variable. /// \param VD Declare target variable. /// \param Addr Address of the global variable \a VD. /// \param PerformInit true if initialization expression is not constant. virtual bool emitDeclareTargetVarDefinition(const VarDecl *VD, llvm::GlobalVariable *Addr, bool PerformInit); /// Creates artificial threadprivate variable with name \p Name and type \p /// VarType. /// \param VarType Type of the artificial threadprivate variable. /// \param Name Name of the artificial threadprivate variable. virtual Address getAddrOfArtificialThreadPrivate(CodeGenFunction &CGF, QualType VarType, StringRef Name); /// Emit flush of the variables specified in 'omp flush' directive. /// \param Vars List of variables to flush. virtual void emitFlush(CodeGenFunction &CGF, ArrayRef<const Expr *> Vars, SourceLocation Loc, llvm::AtomicOrdering AO); /// Emit task region for the task directive. The task region is /// emitted in several steps: /// 1. Emit a call to kmp_task_t *__kmpc_omp_task_alloc(ident_t *, kmp_int32 /// gtid, kmp_int32 flags, size_t sizeof_kmp_task_t, size_t sizeof_shareds, /// kmp_routine_entry_t *task_entry). Here task_entry is a pointer to the /// function: /// kmp_int32 .omp_task_entry.(kmp_int32 gtid, kmp_task_t *tt) { /// TaskFunction(gtid, tt->part_id, tt->shareds); /// return 0; /// } /// 2. Copy a list of shared variables to field shareds of the resulting /// structure kmp_task_t returned by the previous call (if any). /// 3. Copy a pointer to destructions function to field destructions of the /// resulting structure kmp_task_t. /// 4. Emit a call to kmp_int32 __kmpc_omp_task(ident_t *, kmp_int32 gtid, /// kmp_task_t *new_task), where new_task is a resulting structure from /// previous items. /// \param D Current task directive. /// \param TaskFunction An LLVM function with type void (*)(i32 /*gtid*/, i32 /// /*part_id*/, captured_struct */*__context*/); /// \param SharedsTy A type which contains references the shared variables. /// \param Shareds Context with the list of shared variables from the \p /// TaskFunction. /// \param IfCond Not a nullptr if 'if' clause was specified, nullptr /// otherwise. /// \param Data Additional data for task generation like tiednsee, final /// state, list of privates etc. virtual void emitTaskCall(CodeGenFunction &CGF, SourceLocation Loc, const OMPExecutableDirective &D, llvm::Function *TaskFunction, QualType SharedsTy, Address Shareds, const Expr *IfCond, const OMPTaskDataTy &Data); /// Emit task region for the taskloop directive. The taskloop region is /// emitted in several steps: /// 1. Emit a call to kmp_task_t *__kmpc_omp_task_alloc(ident_t *, kmp_int32 /// gtid, kmp_int32 flags, size_t sizeof_kmp_task_t, size_t sizeof_shareds, /// kmp_routine_entry_t *task_entry). Here task_entry is a pointer to the /// function: /// kmp_int32 .omp_task_entry.(kmp_int32 gtid, kmp_task_t *tt) { /// TaskFunction(gtid, tt->part_id, tt->shareds); /// return 0; /// } /// 2. Copy a list of shared variables to field shareds of the resulting /// structure kmp_task_t returned by the previous call (if any). /// 3. Copy a pointer to destructions function to field destructions of the /// resulting structure kmp_task_t. /// 4. Emit a call to void __kmpc_taskloop(ident_t *loc, int gtid, kmp_task_t /// *task, int if_val, kmp_uint64 *lb, kmp_uint64 *ub, kmp_int64 st, int /// nogroup, int sched, kmp_uint64 grainsize, void *task_dup ), where new_task /// is a resulting structure from /// previous items. /// \param D Current task directive. /// \param TaskFunction An LLVM function with type void (*)(i32 /*gtid*/, i32 /// /*part_id*/, captured_struct */*__context*/); /// \param SharedsTy A type which contains references the shared variables. /// \param Shareds Context with the list of shared variables from the \p /// TaskFunction. /// \param IfCond Not a nullptr if 'if' clause was specified, nullptr /// otherwise. /// \param Data Additional data for task generation like tiednsee, final /// state, list of privates etc. virtual void emitTaskLoopCall(CodeGenFunction &CGF, SourceLocation Loc, const OMPLoopDirective &D, llvm::Function *TaskFunction, QualType SharedsTy, Address Shareds, const Expr *IfCond, const OMPTaskDataTy &Data); /// Emit code for the directive that does not require outlining. /// /// \param InnermostKind Kind of innermost directive (for simple directives it /// is a directive itself, for combined - its innermost directive). /// \param CodeGen Code generation sequence for the \a D directive. /// \param HasCancel true if region has inner cancel directive, false /// otherwise. virtual void emitInlinedDirective(CodeGenFunction &CGF, OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen, bool HasCancel = false); /// Emits reduction function. /// \param ArgsType Array type containing pointers to reduction variables. /// \param Privates List of private copies for original reduction arguments. /// \param LHSExprs List of LHS in \a ReductionOps reduction operations. /// \param RHSExprs List of RHS in \a ReductionOps reduction operations. /// \param ReductionOps List of reduction operations in form 'LHS binop RHS' /// or 'operator binop(LHS, RHS)'. llvm::Function *emitReductionFunction(SourceLocation Loc, llvm::Type *ArgsType, ArrayRef<const Expr *> Privates, ArrayRef<const Expr *> LHSExprs, ArrayRef<const Expr *> RHSExprs, ArrayRef<const Expr *> ReductionOps); /// Emits single reduction combiner void emitSingleReductionCombiner(CodeGenFunction &CGF, const Expr *ReductionOp, const Expr *PrivateRef, const DeclRefExpr *LHS, const DeclRefExpr *RHS); struct ReductionOptionsTy { bool WithNowait; bool SimpleReduction; OpenMPDirectiveKind ReductionKind; }; /// Emit a code for reduction clause. Next code should be emitted for /// reduction: /// \code /// /// static kmp_critical_name lock = { 0 }; /// /// void reduce_func(void *lhs[<n>], void *rhs[<n>]) { /// ... /// *(Type<i>*)lhs[i] = RedOp<i>(*(Type<i>*)lhs[i], *(Type<i>*)rhs[i]); /// ... /// } /// /// ... /// void *RedList[<n>] = {&<RHSExprs>[0], ..., &<RHSExprs>[<n>-1]}; /// switch (__kmpc_reduce{_nowait}(<loc>, <gtid>, <n>, sizeof(RedList), /// RedList, reduce_func, &<lock>)) { /// case 1: /// ... /// <LHSExprs>[i] = RedOp<i>(*<LHSExprs>[i], *<RHSExprs>[i]); /// ... /// __kmpc_end_reduce{_nowait}(<loc>, <gtid>, &<lock>); /// break; /// case 2: /// ... /// Atomic(<LHSExprs>[i] = RedOp<i>(*<LHSExprs>[i], *<RHSExprs>[i])); /// ... /// break; /// default:; /// } /// \endcode /// /// \param Privates List of private copies for original reduction arguments. /// \param LHSExprs List of LHS in \a ReductionOps reduction operations. /// \param RHSExprs List of RHS in \a ReductionOps reduction operations. /// \param ReductionOps List of reduction operations in form 'LHS binop RHS' /// or 'operator binop(LHS, RHS)'. /// \param Options List of options for reduction codegen: /// WithNowait true if parent directive has also nowait clause, false /// otherwise. /// SimpleReduction Emit reduction operation only. Used for omp simd /// directive on the host. /// ReductionKind The kind of reduction to perform. virtual void emitReduction(CodeGenFunction &CGF, SourceLocation Loc, ArrayRef<const Expr *> Privates, ArrayRef<const Expr *> LHSExprs, ArrayRef<const Expr *> RHSExprs, ArrayRef<const Expr *> ReductionOps, ReductionOptionsTy Options); /// Emit a code for initialization of task reduction clause. Next code /// should be emitted for reduction: /// \code /// /// _taskred_item_t red_data[n]; /// ... /// red_data[i].shar = &shareds[i]; /// red_data[i].orig = &origs[i]; /// red_data[i].size = sizeof(origs[i]); /// red_data[i].f_init = (void*)RedInit<i>; /// red_data[i].f_fini = (void*)RedDest<i>; /// red_data[i].f_comb = (void*)RedOp<i>; /// red_data[i].flags = <Flag_i>; /// ... /// void* tg1 = __kmpc_taskred_init(gtid, n, red_data); /// \endcode /// For reduction clause with task modifier it emits the next call: /// \code /// /// _taskred_item_t red_data[n]; /// ... /// red_data[i].shar = &shareds[i]; /// red_data[i].orig = &origs[i]; /// red_data[i].size = sizeof(origs[i]); /// red_data[i].f_init = (void*)RedInit<i>; /// red_data[i].f_fini = (void*)RedDest<i>; /// red_data[i].f_comb = (void*)RedOp<i>; /// red_data[i].flags = <Flag_i>; /// ... /// void* tg1 = __kmpc_taskred_modifier_init(loc, gtid, is_worksharing, n, /// red_data); /// \endcode /// \param LHSExprs List of LHS in \a Data.ReductionOps reduction operations. /// \param RHSExprs List of RHS in \a Data.ReductionOps reduction operations. /// \param Data Additional data for task generation like tiedness, final /// state, list of privates, reductions etc. virtual llvm::Value *emitTaskReductionInit(CodeGenFunction &CGF, SourceLocation Loc, ArrayRef<const Expr *> LHSExprs, ArrayRef<const Expr *> RHSExprs, const OMPTaskDataTy &Data); /// Emits the following code for reduction clause with task modifier: /// \code /// __kmpc_task_reduction_modifier_fini(loc, gtid, is_worksharing); /// \endcode virtual void emitTaskReductionFini(CodeGenFunction &CGF, SourceLocation Loc, bool IsWorksharingReduction); /// Required to resolve existing problems in the runtime. Emits threadprivate /// variables to store the size of the VLAs/array sections for /// initializer/combiner/finalizer functions. /// \param RCG Allows to reuse an existing data for the reductions. /// \param N Reduction item for which fixups must be emitted. virtual void emitTaskReductionFixups(CodeGenFunction &CGF, SourceLocation Loc, ReductionCodeGen &RCG, unsigned N); /// Get the address of `void *` type of the privatue copy of the reduction /// item specified by the \p SharedLVal. /// \param ReductionsPtr Pointer to the reduction data returned by the /// emitTaskReductionInit function. /// \param SharedLVal Address of the original reduction item. virtual Address getTaskReductionItem(CodeGenFunction &CGF, SourceLocation Loc, llvm::Value *ReductionsPtr, LValue SharedLVal); /// Emit code for 'taskwait' directive. virtual void emitTaskwaitCall(CodeGenFunction &CGF, SourceLocation Loc); /// Emit code for 'cancellation point' construct. /// \param CancelRegion Region kind for which the cancellation point must be /// emitted. /// virtual void emitCancellationPointCall(CodeGenFunction &CGF, SourceLocation Loc, OpenMPDirectiveKind CancelRegion); /// Emit code for 'cancel' construct. /// \param IfCond Condition in the associated 'if' clause, if it was /// specified, nullptr otherwise. /// \param CancelRegion Region kind for which the cancel must be emitted. /// virtual void emitCancelCall(CodeGenFunction &CGF, SourceLocation Loc, const Expr *IfCond, OpenMPDirectiveKind CancelRegion); /// Emit outilined function for 'target' directive. /// \param D Directive to emit. /// \param ParentName Name of the function that encloses the target region. /// \param OutlinedFn Outlined function value to be defined by this call. /// \param OutlinedFnID Outlined function ID value to be defined by this call. /// \param IsOffloadEntry True if the outlined function is an offload entry. /// \param CodeGen Code generation sequence for the \a D directive. /// An outlined function may not be an entry if, e.g. the if clause always /// evaluates to false. virtual void emitTargetOutlinedFunction(const OMPExecutableDirective &D, StringRef ParentName, llvm::Function *&OutlinedFn, llvm::Constant *&OutlinedFnID, bool IsOffloadEntry, const RegionCodeGenTy &CodeGen); /// Emit the target offloading code associated with \a D. The emitted /// code attempts offloading the execution to the device, an the event of /// a failure it executes the host version outlined in \a OutlinedFn. /// \param D Directive to emit. /// \param OutlinedFn Host version of the code to be offloaded. /// \param OutlinedFnID ID of host version of the code to be offloaded. /// \param IfCond Expression evaluated in if clause associated with the target /// directive, or null if no if clause is used. /// \param Device Expression evaluated in device clause associated with the /// target directive, or null if no device clause is used and device modifier. /// \param SizeEmitter Callback to emit number of iterations for loop-based /// directives. virtual void emitTargetCall( CodeGenFunction &CGF, const OMPExecutableDirective &D, llvm::Function *OutlinedFn, llvm::Value *OutlinedFnID, const Expr *IfCond, llvm::PointerIntPair<const Expr *, 2, OpenMPDeviceClauseModifier> Device, llvm::function_ref<llvm::Value *(CodeGenFunction &CGF, const OMPLoopDirective &D)> SizeEmitter); /// Emit the target regions enclosed in \a GD function definition or /// the function itself in case it is a valid device function. Returns true if /// \a GD was dealt with successfully. /// \param GD Function to scan. virtual bool emitTargetFunctions(GlobalDecl GD); /// Emit the global variable if it is a valid device global variable. /// Returns true if \a GD was dealt with successfully. /// \param GD Variable declaration to emit. virtual bool emitTargetGlobalVariable(GlobalDecl GD); /// Checks if the provided global decl \a GD is a declare target variable and /// registers it when emitting code for the host. virtual void registerTargetGlobalVariable(const VarDecl *VD, llvm::Constant *Addr); /// Registers provided target firstprivate variable as global on the /// target. llvm::Constant *registerTargetFirstprivateCopy(CodeGenFunction &CGF, const VarDecl *VD); /// Emit the global \a GD if it is meaningful for the target. Returns /// if it was emitted successfully. /// \param GD Global to scan. virtual bool emitTargetGlobal(GlobalDecl GD); /// Creates and returns a registration function for when at least one /// requires directives was used in the current module. llvm::Function *emitRequiresDirectiveRegFun(); /// Creates all the offload entries in the current compilation unit /// along with the associated metadata. void createOffloadEntriesAndInfoMetadata(); /// Emits code for teams call of the \a OutlinedFn with /// variables captured in a record which address is stored in \a /// CapturedStruct. /// \param OutlinedFn Outlined function to be run by team masters. Type of /// this function is void(*)(kmp_int32 *, kmp_int32, struct context_vars*). /// \param CapturedVars A pointer to the record with the references to /// variables used in \a OutlinedFn function. /// virtual void emitTeamsCall(CodeGenFunction &CGF, const OMPExecutableDirective &D, SourceLocation Loc, llvm::Function *OutlinedFn, ArrayRef<llvm::Value *> CapturedVars); /// Emits call to void __kmpc_push_num_teams(ident_t *loc, kmp_int32 /// global_tid, kmp_int32 num_teams, kmp_int32 thread_limit) to generate code /// for num_teams clause. /// \param NumTeams An integer expression of teams. /// \param ThreadLimit An integer expression of threads. virtual void emitNumTeamsClause(CodeGenFunction &CGF, const Expr *NumTeams, const Expr *ThreadLimit, SourceLocation Loc); /// Struct that keeps all the relevant information that should be kept /// throughout a 'target data' region. class TargetDataInfo { /// Set to true if device pointer information have to be obtained. bool RequiresDevicePointerInfo = false; /// Set to true if Clang emits separate runtime calls for the beginning and /// end of the region. These calls might have separate map type arrays. bool SeparateBeginEndCalls = false; public: /// The array of base pointer passed to the runtime library. llvm::Value *BasePointersArray = nullptr; /// The array of section pointers passed to the runtime library. llvm::Value *PointersArray = nullptr; /// The array of sizes passed to the runtime library. llvm::Value *SizesArray = nullptr; /// The array of map types passed to the runtime library for the beginning /// of the region or for the entire region if there are no separate map /// types for the region end. llvm::Value *MapTypesArray = nullptr; /// The array of map types passed to the runtime library for the end of the /// region, or nullptr if there are no separate map types for the region /// end. llvm::Value *MapTypesArrayEnd = nullptr; /// The array of user-defined mappers passed to the runtime library. llvm::Value *MappersArray = nullptr; /// The array of original declaration names of mapped pointers sent to the /// runtime library for debugging llvm::Value *MapNamesArray = nullptr; /// Indicate whether any user-defined mapper exists. bool HasMapper = false; /// The total number of pointers passed to the runtime library. unsigned NumberOfPtrs = 0u; /// Map between the a declaration of a capture and the corresponding base /// pointer address where the runtime returns the device pointers. llvm::DenseMap<const ValueDecl *, Address> CaptureDeviceAddrMap; explicit TargetDataInfo() {} explicit TargetDataInfo(bool RequiresDevicePointerInfo, bool SeparateBeginEndCalls) : RequiresDevicePointerInfo(RequiresDevicePointerInfo), SeparateBeginEndCalls(SeparateBeginEndCalls) {} /// Clear information about the data arrays. void clearArrayInfo() { BasePointersArray = nullptr; PointersArray = nullptr; SizesArray = nullptr; MapTypesArray = nullptr; MapTypesArrayEnd = nullptr; MapNamesArray = nullptr; MappersArray = nullptr; HasMapper = false; NumberOfPtrs = 0u; } /// Return true if the current target data information has valid arrays. bool isValid() { return BasePointersArray && PointersArray && SizesArray && MapTypesArray && (!HasMapper || MappersArray) && NumberOfPtrs; } bool requiresDevicePointerInfo() { return RequiresDevicePointerInfo; } bool separateBeginEndCalls() { return SeparateBeginEndCalls; } }; /// Emit the target data mapping code associated with \a D. /// \param D Directive to emit. /// \param IfCond Expression evaluated in if clause associated with the /// target directive, or null if no device clause is used. /// \param Device Expression evaluated in device clause associated with the /// target directive, or null if no device clause is used. /// \param Info A record used to store information that needs to be preserved /// until the region is closed. virtual void emitTargetDataCalls(CodeGenFunction &CGF, const OMPExecutableDirective &D, const Expr *IfCond, const Expr *Device, const RegionCodeGenTy &CodeGen, TargetDataInfo &Info); /// Emit the data mapping/movement code associated with the directive /// \a D that should be of the form 'target [{enter|exit} data | update]'. /// \param D Directive to emit. /// \param IfCond Expression evaluated in if clause associated with the target /// directive, or null if no if clause is used. /// \param Device Expression evaluated in device clause associated with the /// target directive, or null if no device clause is used. virtual void emitTargetDataStandAloneCall(CodeGenFunction &CGF, const OMPExecutableDirective &D, const Expr *IfCond, const Expr *Device); /// Marks function \a Fn with properly mangled versions of vector functions. /// \param FD Function marked as 'declare simd'. /// \param Fn LLVM function that must be marked with 'declare simd' /// attributes. virtual void emitDeclareSimdFunction(const FunctionDecl *FD, llvm::Function *Fn); /// Emit initialization for doacross loop nesting support. /// \param D Loop-based construct used in doacross nesting construct. virtual void emitDoacrossInit(CodeGenFunction &CGF, const OMPLoopDirective &D, ArrayRef<Expr *> NumIterations); /// Emit code for doacross ordered directive with 'depend' clause. /// \param C 'depend' clause with 'sink|source' dependency kind. virtual void emitDoacrossOrdered(CodeGenFunction &CGF, const OMPDependClause *C); /// Translates the native parameter of outlined function if this is required /// for target. /// \param FD Field decl from captured record for the parameter. /// \param NativeParam Parameter itself. virtual const VarDecl *translateParameter(const FieldDecl *FD, const VarDecl *NativeParam) const { return NativeParam; } /// Gets the address of the native argument basing on the address of the /// target-specific parameter. /// \param NativeParam Parameter itself. /// \param TargetParam Corresponding target-specific parameter. virtual Address getParameterAddress(CodeGenFunction &CGF, const VarDecl *NativeParam, const VarDecl *TargetParam) const; /// Choose default schedule type and chunk value for the /// dist_schedule clause. virtual void getDefaultDistScheduleAndChunk(CodeGenFunction &CGF, const OMPLoopDirective &S, OpenMPDistScheduleClauseKind &ScheduleKind, llvm::Value *&Chunk) const {} /// Choose default schedule type and chunk value for the /// schedule clause. virtual void getDefaultScheduleAndChunk(CodeGenFunction &CGF, const OMPLoopDirective &S, OpenMPScheduleClauseKind &ScheduleKind, const Expr *&ChunkExpr) const; /// Emits call of the outlined function with the provided arguments, /// translating these arguments to correct target-specific arguments. virtual void emitOutlinedFunctionCall(CodeGenFunction &CGF, SourceLocation Loc, llvm::FunctionCallee OutlinedFn, ArrayRef<llvm::Value *> Args = llvm::None) const; /// Emits OpenMP-specific function prolog. /// Required for device constructs. virtual void emitFunctionProlog(CodeGenFunction &CGF, const Decl *D); /// Gets the OpenMP-specific address of the local variable. virtual Address getAddressOfLocalVariable(CodeGenFunction &CGF, const VarDecl *VD); /// Marks the declaration as already emitted for the device code and returns /// true, if it was marked already, and false, otherwise. bool markAsGlobalTarget(GlobalDecl GD); /// Emit deferred declare target variables marked for deferred emission. void emitDeferredTargetDecls() const; /// Adjust some parameters for the target-based directives, like addresses of /// the variables captured by reference in lambdas. virtual void adjustTargetSpecificDataForLambdas(CodeGenFunction &CGF, const OMPExecutableDirective &D) const; /// Perform check on requires decl to ensure that target architecture /// supports unified addressing virtual void processRequiresDirective(const OMPRequiresDecl *D); /// Gets default memory ordering as specified in requires directive. llvm::AtomicOrdering getDefaultMemoryOrdering() const; /// Checks if the variable has associated OMPAllocateDeclAttr attribute with /// the predefined allocator and translates it into the corresponding address /// space. virtual bool hasAllocateAttributeForGlobalVar(const VarDecl *VD, LangAS &AS); /// Return whether the unified_shared_memory has been specified. bool hasRequiresUnifiedSharedMemory() const; /// Checks if the \p VD variable is marked as nontemporal declaration in /// current context. bool isNontemporalDecl(const ValueDecl *VD) const; /// Create specialized alloca to handle lastprivate conditionals. Address emitLastprivateConditionalInit(CodeGenFunction &CGF, const VarDecl *VD); /// Checks if the provided \p LVal is lastprivate conditional and emits the /// code to update the value of the original variable. /// \code /// lastprivate(conditional: a) /// ... /// <type> a; /// lp_a = ...; /// #pragma omp critical(a) /// if (last_iv_a <= iv) { /// last_iv_a = iv; /// global_a = lp_a; /// } /// \endcode virtual void checkAndEmitLastprivateConditional(CodeGenFunction &CGF, const Expr *LHS); /// Checks if the lastprivate conditional was updated in inner region and /// writes the value. /// \code /// lastprivate(conditional: a) /// ... /// <type> a;bool Fired = false; /// #pragma omp ... shared(a) /// { /// lp_a = ...; /// Fired = true; /// } /// if (Fired) { /// #pragma omp critical(a) /// if (last_iv_a <= iv) { /// last_iv_a = iv; /// global_a = lp_a; /// } /// Fired = false; /// } /// \endcode virtual void checkAndEmitSharedLastprivateConditional( CodeGenFunction &CGF, const OMPExecutableDirective &D, const llvm::DenseSet<CanonicalDeclPtr<const VarDecl>> &IgnoredDecls); /// Gets the address of the global copy used for lastprivate conditional /// update, if any. /// \param PrivLVal LValue for the private copy. /// \param VD Original lastprivate declaration. virtual void emitLastprivateConditionalFinalUpdate(CodeGenFunction &CGF, LValue PrivLVal, const VarDecl *VD, SourceLocation Loc); /// Emits list of dependecies based on the provided data (array of /// dependence/expression pairs). /// \returns Pointer to the first element of the array casted to VoidPtr type. std::pair<llvm::Value *, Address> emitDependClause(CodeGenFunction &CGF, ArrayRef<OMPTaskDataTy::DependData> Dependencies, SourceLocation Loc); /// Emits list of dependecies based on the provided data (array of /// dependence/expression pairs) for depobj construct. In this case, the /// variable is allocated in dynamically. \returns Pointer to the first /// element of the array casted to VoidPtr type. Address emitDepobjDependClause(CodeGenFunction &CGF, const OMPTaskDataTy::DependData &Dependencies, SourceLocation Loc); /// Emits the code to destroy the dependency object provided in depobj /// directive. void emitDestroyClause(CodeGenFunction &CGF, LValue DepobjLVal, SourceLocation Loc); /// Updates the dependency kind in the specified depobj object. /// \param DepobjLVal LValue for the main depobj object. /// \param NewDepKind New dependency kind. void emitUpdateClause(CodeGenFunction &CGF, LValue DepobjLVal, OpenMPDependClauseKind NewDepKind, SourceLocation Loc); /// Initializes user defined allocators specified in the uses_allocators /// clauses. void emitUsesAllocatorsInit(CodeGenFunction &CGF, const Expr *Allocator, const Expr *AllocatorTraits); /// Destroys user defined allocators specified in the uses_allocators clause. void emitUsesAllocatorsFini(CodeGenFunction &CGF, const Expr *Allocator); /// Returns true if the variable is a local variable in untied task. bool isLocalVarInUntiedTask(CodeGenFunction &CGF, const VarDecl *VD) const; }; /// Class supports emissionof SIMD-only code. class CGOpenMPSIMDRuntime final : public CGOpenMPRuntime { public: explicit CGOpenMPSIMDRuntime(CodeGenModule &CGM) : CGOpenMPRuntime(CGM) {} ~CGOpenMPSIMDRuntime() override {} /// Emits outlined function for the specified OpenMP parallel directive /// \a D. This outlined function has type void(*)(kmp_int32 *ThreadID, /// kmp_int32 BoundID, struct context_vars*). /// \param D OpenMP directive. /// \param ThreadIDVar Variable for thread id in the current OpenMP region. /// \param InnermostKind Kind of innermost directive (for simple directives it /// is a directive itself, for combined - its innermost directive). /// \param CodeGen Code generation sequence for the \a D directive. llvm::Function * emitParallelOutlinedFunction(const OMPExecutableDirective &D, const VarDecl *ThreadIDVar, OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen) override; /// Emits outlined function for the specified OpenMP teams directive /// \a D. This outlined function has type void(*)(kmp_int32 *ThreadID, /// kmp_int32 BoundID, struct context_vars*). /// \param D OpenMP directive. /// \param ThreadIDVar Variable for thread id in the current OpenMP region. /// \param InnermostKind Kind of innermost directive (for simple directives it /// is a directive itself, for combined - its innermost directive). /// \param CodeGen Code generation sequence for the \a D directive. llvm::Function * emitTeamsOutlinedFunction(const OMPExecutableDirective &D, const VarDecl *ThreadIDVar, OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen) override; /// Emits outlined function for the OpenMP task directive \a D. This /// outlined function has type void(*)(kmp_int32 ThreadID, struct task_t* /// TaskT). /// \param D OpenMP directive. /// \param ThreadIDVar Variable for thread id in the current OpenMP region. /// \param PartIDVar Variable for partition id in the current OpenMP untied /// task region. /// \param TaskTVar Variable for task_t argument. /// \param InnermostKind Kind of innermost directive (for simple directives it /// is a directive itself, for combined - its innermost directive). /// \param CodeGen Code generation sequence for the \a D directive. /// \param Tied true if task is generated for tied task, false otherwise. /// \param NumberOfParts Number of parts in untied task. Ignored for tied /// tasks. /// llvm::Function *emitTaskOutlinedFunction( const OMPExecutableDirective &D, const VarDecl *ThreadIDVar, const VarDecl *PartIDVar, const VarDecl *TaskTVar, OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen, bool Tied, unsigned &NumberOfParts) override; /// Emits code for parallel or serial call of the \a OutlinedFn with /// variables captured in a record which address is stored in \a /// CapturedStruct. /// \param OutlinedFn Outlined function to be run in parallel threads. Type of /// this function is void(*)(kmp_int32 *, kmp_int32, struct context_vars*). /// \param CapturedVars A pointer to the record with the references to /// variables used in \a OutlinedFn function. /// \param IfCond Condition in the associated 'if' clause, if it was /// specified, nullptr otherwise. /// void emitParallelCall(CodeGenFunction &CGF, SourceLocation Loc, llvm::Function *OutlinedFn, ArrayRef<llvm::Value *> CapturedVars, const Expr *IfCond) override; /// Emits a critical region. /// \param CriticalName Name of the critical region. /// \param CriticalOpGen Generator for the statement associated with the given /// critical region. /// \param Hint Value of the 'hint' clause (optional). void emitCriticalRegion(CodeGenFunction &CGF, StringRef CriticalName, const RegionCodeGenTy &CriticalOpGen, SourceLocation Loc, const Expr *Hint = nullptr) override; /// Emits a master region. /// \param MasterOpGen Generator for the statement associated with the given /// master region. void emitMasterRegion(CodeGenFunction &CGF, const RegionCodeGenTy &MasterOpGen, SourceLocation Loc) override; /// Emits code for a taskyield directive. void emitTaskyieldCall(CodeGenFunction &CGF, SourceLocation Loc) override; /// Emit a taskgroup region. /// \param TaskgroupOpGen Generator for the statement associated with the /// given taskgroup region. void emitTaskgroupRegion(CodeGenFunction &CGF, const RegionCodeGenTy &TaskgroupOpGen, SourceLocation Loc) override; /// Emits a single region. /// \param SingleOpGen Generator for the statement associated with the given /// single region. void emitSingleRegion(CodeGenFunction &CGF, const RegionCodeGenTy &SingleOpGen, SourceLocation Loc, ArrayRef<const Expr *> CopyprivateVars, ArrayRef<const Expr *> DestExprs, ArrayRef<const Expr *> SrcExprs, ArrayRef<const Expr *> AssignmentOps) override; /// Emit an ordered region. /// \param OrderedOpGen Generator for the statement associated with the given /// ordered region. void emitOrderedRegion(CodeGenFunction &CGF, const RegionCodeGenTy &OrderedOpGen, SourceLocation Loc, bool IsThreads) override; /// Emit an implicit/explicit barrier for OpenMP threads. /// \param Kind Directive for which this implicit barrier call must be /// generated. Must be OMPD_barrier for explicit barrier generation. /// \param EmitChecks true if need to emit checks for cancellation barriers. /// \param ForceSimpleCall true simple barrier call must be emitted, false if /// runtime class decides which one to emit (simple or with cancellation /// checks). /// void emitBarrierCall(CodeGenFunction &CGF, SourceLocation Loc, OpenMPDirectiveKind Kind, bool EmitChecks = true, bool ForceSimpleCall = false) override; /// This is used for non static scheduled types and when the ordered /// clause is present on the loop construct. /// Depending on the loop schedule, it is necessary to call some runtime /// routine before start of the OpenMP loop to get the loop upper / lower /// bounds \a LB and \a UB and stride \a ST. /// /// \param CGF Reference to current CodeGenFunction. /// \param Loc Clang source location. /// \param ScheduleKind Schedule kind, specified by the 'schedule' clause. /// \param IVSize Size of the iteration variable in bits. /// \param IVSigned Sign of the iteration variable. /// \param Ordered true if loop is ordered, false otherwise. /// \param DispatchValues struct containing llvm values for lower bound, upper /// bound, and chunk expression. /// For the default (nullptr) value, the chunk 1 will be used. /// void emitForDispatchInit(CodeGenFunction &CGF, SourceLocation Loc, const OpenMPScheduleTy &ScheduleKind, unsigned IVSize, bool IVSigned, bool Ordered, const DispatchRTInput &DispatchValues) override; /// Call the appropriate runtime routine to initialize it before start /// of loop. /// /// This is used only in case of static schedule, when the user did not /// specify a ordered clause on the loop construct. /// Depending on the loop schedule, it is necessary to call some runtime /// routine before start of the OpenMP loop to get the loop upper / lower /// bounds LB and UB and stride ST. /// /// \param CGF Reference to current CodeGenFunction. /// \param Loc Clang source location. /// \param DKind Kind of the directive. /// \param ScheduleKind Schedule kind, specified by the 'schedule' clause. /// \param Values Input arguments for the construct. /// void emitForStaticInit(CodeGenFunction &CGF, SourceLocation Loc, OpenMPDirectiveKind DKind, const OpenMPScheduleTy &ScheduleKind, const StaticRTInput &Values) override; /// /// \param CGF Reference to current CodeGenFunction. /// \param Loc Clang source location. /// \param SchedKind Schedule kind, specified by the 'dist_schedule' clause. /// \param Values Input arguments for the construct. /// void emitDistributeStaticInit(CodeGenFunction &CGF, SourceLocation Loc, OpenMPDistScheduleClauseKind SchedKind, const StaticRTInput &Values) override; /// Call the appropriate runtime routine to notify that we finished /// iteration of the ordered loop with the dynamic scheduling. /// /// \param CGF Reference to current CodeGenFunction. /// \param Loc Clang source location. /// \param IVSize Size of the iteration variable in bits. /// \param IVSigned Sign of the iteration variable. /// void emitForOrderedIterationEnd(CodeGenFunction &CGF, SourceLocation Loc, unsigned IVSize, bool IVSigned) override; /// Call the appropriate runtime routine to notify that we finished /// all the work with current loop. /// /// \param CGF Reference to current CodeGenFunction. /// \param Loc Clang source location. /// \param DKind Kind of the directive for which the static finish is emitted. /// void emitForStaticFinish(CodeGenFunction &CGF, SourceLocation Loc, OpenMPDirectiveKind DKind) override; /// Call __kmpc_dispatch_next( /// ident_t *loc, kmp_int32 tid, kmp_int32 *p_lastiter, /// kmp_int[32|64] *p_lower, kmp_int[32|64] *p_upper, /// kmp_int[32|64] *p_stride); /// \param IVSize Size of the iteration variable in bits. /// \param IVSigned Sign of the iteration variable. /// \param IL Address of the output variable in which the flag of the /// last iteration is returned. /// \param LB Address of the output variable in which the lower iteration /// number is returned. /// \param UB Address of the output variable in which the upper iteration /// number is returned. /// \param ST Address of the output variable in which the stride value is /// returned. llvm::Value *emitForNext(CodeGenFunction &CGF, SourceLocation Loc, unsigned IVSize, bool IVSigned, Address IL, Address LB, Address UB, Address ST) override; /// Emits call to void __kmpc_push_num_threads(ident_t *loc, kmp_int32 /// global_tid, kmp_int32 num_threads) to generate code for 'num_threads' /// clause. /// \param NumThreads An integer value of threads. void emitNumThreadsClause(CodeGenFunction &CGF, llvm::Value *NumThreads, SourceLocation Loc) override; /// Emit call to void __kmpc_push_proc_bind(ident_t *loc, kmp_int32 /// global_tid, int proc_bind) to generate code for 'proc_bind' clause. void emitProcBindClause(CodeGenFunction &CGF, llvm::omp::ProcBindKind ProcBind, SourceLocation Loc) override; /// Returns address of the threadprivate variable for the current /// thread. /// \param VD Threadprivate variable. /// \param VDAddr Address of the global variable \a VD. /// \param Loc Location of the reference to threadprivate var. /// \return Address of the threadprivate variable for the current thread. Address getAddrOfThreadPrivate(CodeGenFunction &CGF, const VarDecl *VD, Address VDAddr, SourceLocation Loc) override; /// Emit a code for initialization of threadprivate variable. It emits /// a call to runtime library which adds initial value to the newly created /// threadprivate variable (if it is not constant) and registers destructor /// for the variable (if any). /// \param VD Threadprivate variable. /// \param VDAddr Address of the global variable \a VD. /// \param Loc Location of threadprivate declaration. /// \param PerformInit true if initialization expression is not constant. llvm::Function * emitThreadPrivateVarDefinition(const VarDecl *VD, Address VDAddr, SourceLocation Loc, bool PerformInit, CodeGenFunction *CGF = nullptr) override; /// Creates artificial threadprivate variable with name \p Name and type \p /// VarType. /// \param VarType Type of the artificial threadprivate variable. /// \param Name Name of the artificial threadprivate variable. Address getAddrOfArtificialThreadPrivate(CodeGenFunction &CGF, QualType VarType, StringRef Name) override; /// Emit flush of the variables specified in 'omp flush' directive. /// \param Vars List of variables to flush. void emitFlush(CodeGenFunction &CGF, ArrayRef<const Expr *> Vars, SourceLocation Loc, llvm::AtomicOrdering AO) override; /// Emit task region for the task directive. The task region is /// emitted in several steps: /// 1. Emit a call to kmp_task_t *__kmpc_omp_task_alloc(ident_t *, kmp_int32 /// gtid, kmp_int32 flags, size_t sizeof_kmp_task_t, size_t sizeof_shareds, /// kmp_routine_entry_t *task_entry). Here task_entry is a pointer to the /// function: /// kmp_int32 .omp_task_entry.(kmp_int32 gtid, kmp_task_t *tt) { /// TaskFunction(gtid, tt->part_id, tt->shareds); /// return 0; /// } /// 2. Copy a list of shared variables to field shareds of the resulting /// structure kmp_task_t returned by the previous call (if any). /// 3. Copy a pointer to destructions function to field destructions of the /// resulting structure kmp_task_t. /// 4. Emit a call to kmp_int32 __kmpc_omp_task(ident_t *, kmp_int32 gtid, /// kmp_task_t *new_task), where new_task is a resulting structure from /// previous items. /// \param D Current task directive. /// \param TaskFunction An LLVM function with type void (*)(i32 /*gtid*/, i32 /// /*part_id*/, captured_struct */*__context*/); /// \param SharedsTy A type which contains references the shared variables. /// \param Shareds Context with the list of shared variables from the \p /// TaskFunction. /// \param IfCond Not a nullptr if 'if' clause was specified, nullptr /// otherwise. /// \param Data Additional data for task generation like tiednsee, final /// state, list of privates etc. void emitTaskCall(CodeGenFunction &CGF, SourceLocation Loc, const OMPExecutableDirective &D, llvm::Function *TaskFunction, QualType SharedsTy, Address Shareds, const Expr *IfCond, const OMPTaskDataTy &Data) override; /// Emit task region for the taskloop directive. The taskloop region is /// emitted in several steps: /// 1. Emit a call to kmp_task_t *__kmpc_omp_task_alloc(ident_t *, kmp_int32 /// gtid, kmp_int32 flags, size_t sizeof_kmp_task_t, size_t sizeof_shareds, /// kmp_routine_entry_t *task_entry). Here task_entry is a pointer to the /// function: /// kmp_int32 .omp_task_entry.(kmp_int32 gtid, kmp_task_t *tt) { /// TaskFunction(gtid, tt->part_id, tt->shareds); /// return 0; /// } /// 2. Copy a list of shared variables to field shareds of the resulting /// structure kmp_task_t returned by the previous call (if any). /// 3. Copy a pointer to destructions function to field destructions of the /// resulting structure kmp_task_t. /// 4. Emit a call to void __kmpc_taskloop(ident_t *loc, int gtid, kmp_task_t /// *task, int if_val, kmp_uint64 *lb, kmp_uint64 *ub, kmp_int64 st, int /// nogroup, int sched, kmp_uint64 grainsize, void *task_dup ), where new_task /// is a resulting structure from /// previous items. /// \param D Current task directive. /// \param TaskFunction An LLVM function with type void (*)(i32 /*gtid*/, i32 /// /*part_id*/, captured_struct */*__context*/); /// \param SharedsTy A type which contains references the shared variables. /// \param Shareds Context with the list of shared variables from the \p /// TaskFunction. /// \param IfCond Not a nullptr if 'if' clause was specified, nullptr /// otherwise. /// \param Data Additional data for task generation like tiednsee, final /// state, list of privates etc. void emitTaskLoopCall(CodeGenFunction &CGF, SourceLocation Loc, const OMPLoopDirective &D, llvm::Function *TaskFunction, QualType SharedsTy, Address Shareds, const Expr *IfCond, const OMPTaskDataTy &Data) override; /// Emit a code for reduction clause. Next code should be emitted for /// reduction: /// \code /// /// static kmp_critical_name lock = { 0 }; /// /// void reduce_func(void *lhs[<n>], void *rhs[<n>]) { /// ... /// *(Type<i>*)lhs[i] = RedOp<i>(*(Type<i>*)lhs[i], *(Type<i>*)rhs[i]); /// ... /// } /// /// ... /// void *RedList[<n>] = {&<RHSExprs>[0], ..., &<RHSExprs>[<n>-1]}; /// switch (__kmpc_reduce{_nowait}(<loc>, <gtid>, <n>, sizeof(RedList), /// RedList, reduce_func, &<lock>)) { /// case 1: /// ... /// <LHSExprs>[i] = RedOp<i>(*<LHSExprs>[i], *<RHSExprs>[i]); /// ... /// __kmpc_end_reduce{_nowait}(<loc>, <gtid>, &<lock>); /// break; /// case 2: /// ... /// Atomic(<LHSExprs>[i] = RedOp<i>(*<LHSExprs>[i], *<RHSExprs>[i])); /// ... /// break; /// default:; /// } /// \endcode /// /// \param Privates List of private copies for original reduction arguments. /// \param LHSExprs List of LHS in \a ReductionOps reduction operations. /// \param RHSExprs List of RHS in \a ReductionOps reduction operations. /// \param ReductionOps List of reduction operations in form 'LHS binop RHS' /// or 'operator binop(LHS, RHS)'. /// \param Options List of options for reduction codegen: /// WithNowait true if parent directive has also nowait clause, false /// otherwise. /// SimpleReduction Emit reduction operation only. Used for omp simd /// directive on the host. /// ReductionKind The kind of reduction to perform. void emitReduction(CodeGenFunction &CGF, SourceLocation Loc, ArrayRef<const Expr *> Privates, ArrayRef<const Expr *> LHSExprs, ArrayRef<const Expr *> RHSExprs, ArrayRef<const Expr *> ReductionOps, ReductionOptionsTy Options) override; /// Emit a code for initialization of task reduction clause. Next code /// should be emitted for reduction: /// \code /// /// _taskred_item_t red_data[n]; /// ... /// red_data[i].shar = &shareds[i]; /// red_data[i].orig = &origs[i]; /// red_data[i].size = sizeof(origs[i]); /// red_data[i].f_init = (void*)RedInit<i>; /// red_data[i].f_fini = (void*)RedDest<i>; /// red_data[i].f_comb = (void*)RedOp<i>; /// red_data[i].flags = <Flag_i>; /// ... /// void* tg1 = __kmpc_taskred_init(gtid, n, red_data); /// \endcode /// For reduction clause with task modifier it emits the next call: /// \code /// /// _taskred_item_t red_data[n]; /// ... /// red_data[i].shar = &shareds[i]; /// red_data[i].orig = &origs[i]; /// red_data[i].size = sizeof(origs[i]); /// red_data[i].f_init = (void*)RedInit<i>; /// red_data[i].f_fini = (void*)RedDest<i>; /// red_data[i].f_comb = (void*)RedOp<i>; /// red_data[i].flags = <Flag_i>; /// ... /// void* tg1 = __kmpc_taskred_modifier_init(loc, gtid, is_worksharing, n, /// red_data); /// \endcode /// \param LHSExprs List of LHS in \a Data.ReductionOps reduction operations. /// \param RHSExprs List of RHS in \a Data.ReductionOps reduction operations. /// \param Data Additional data for task generation like tiedness, final /// state, list of privates, reductions etc. llvm::Value *emitTaskReductionInit(CodeGenFunction &CGF, SourceLocation Loc, ArrayRef<const Expr *> LHSExprs, ArrayRef<const Expr *> RHSExprs, const OMPTaskDataTy &Data) override; /// Emits the following code for reduction clause with task modifier: /// \code /// __kmpc_task_reduction_modifier_fini(loc, gtid, is_worksharing); /// \endcode void emitTaskReductionFini(CodeGenFunction &CGF, SourceLocation Loc, bool IsWorksharingReduction) override; /// Required to resolve existing problems in the runtime. Emits threadprivate /// variables to store the size of the VLAs/array sections for /// initializer/combiner/finalizer functions + emits threadprivate variable to /// store the pointer to the original reduction item for the custom /// initializer defined by declare reduction construct. /// \param RCG Allows to reuse an existing data for the reductions. /// \param N Reduction item for which fixups must be emitted. void emitTaskReductionFixups(CodeGenFunction &CGF, SourceLocation Loc, ReductionCodeGen &RCG, unsigned N) override; /// Get the address of `void *` type of the privatue copy of the reduction /// item specified by the \p SharedLVal. /// \param ReductionsPtr Pointer to the reduction data returned by the /// emitTaskReductionInit function. /// \param SharedLVal Address of the original reduction item. Address getTaskReductionItem(CodeGenFunction &CGF, SourceLocation Loc, llvm::Value *ReductionsPtr, LValue SharedLVal) override; /// Emit code for 'taskwait' directive. void emitTaskwaitCall(CodeGenFunction &CGF, SourceLocation Loc) override; /// Emit code for 'cancellation point' construct. /// \param CancelRegion Region kind for which the cancellation point must be /// emitted. /// void emitCancellationPointCall(CodeGenFunction &CGF, SourceLocation Loc, OpenMPDirectiveKind CancelRegion) override; /// Emit code for 'cancel' construct. /// \param IfCond Condition in the associated 'if' clause, if it was /// specified, nullptr otherwise. /// \param CancelRegion Region kind for which the cancel must be emitted. /// void emitCancelCall(CodeGenFunction &CGF, SourceLocation Loc, const Expr *IfCond, OpenMPDirectiveKind CancelRegion) override; /// Emit outilined function for 'target' directive. /// \param D Directive to emit. /// \param ParentName Name of the function that encloses the target region. /// \param OutlinedFn Outlined function value to be defined by this call. /// \param OutlinedFnID Outlined function ID value to be defined by this call. /// \param IsOffloadEntry True if the outlined function is an offload entry. /// \param CodeGen Code generation sequence for the \a D directive. /// An outlined function may not be an entry if, e.g. the if clause always /// evaluates to false. void emitTargetOutlinedFunction(const OMPExecutableDirective &D, StringRef ParentName, llvm::Function *&OutlinedFn, llvm::Constant *&OutlinedFnID, bool IsOffloadEntry, const RegionCodeGenTy &CodeGen) override; /// Emit the target offloading code associated with \a D. The emitted /// code attempts offloading the execution to the device, an the event of /// a failure it executes the host version outlined in \a OutlinedFn. /// \param D Directive to emit. /// \param OutlinedFn Host version of the code to be offloaded. /// \param OutlinedFnID ID of host version of the code to be offloaded. /// \param IfCond Expression evaluated in if clause associated with the target /// directive, or null if no if clause is used. /// \param Device Expression evaluated in device clause associated with the /// target directive, or null if no device clause is used and device modifier. void emitTargetCall( CodeGenFunction &CGF, const OMPExecutableDirective &D, llvm::Function *OutlinedFn, llvm::Value *OutlinedFnID, const Expr *IfCond, llvm::PointerIntPair<const Expr *, 2, OpenMPDeviceClauseModifier> Device, llvm::function_ref<llvm::Value *(CodeGenFunction &CGF, const OMPLoopDirective &D)> SizeEmitter) override; /// Emit the target regions enclosed in \a GD function definition or /// the function itself in case it is a valid device function. Returns true if /// \a GD was dealt with successfully. /// \param GD Function to scan. bool emitTargetFunctions(GlobalDecl GD) override; /// Emit the global variable if it is a valid device global variable. /// Returns true if \a GD was dealt with successfully. /// \param GD Variable declaration to emit. bool emitTargetGlobalVariable(GlobalDecl GD) override; /// Emit the global \a GD if it is meaningful for the target. Returns /// if it was emitted successfully. /// \param GD Global to scan. bool emitTargetGlobal(GlobalDecl GD) override; /// Emits code for teams call of the \a OutlinedFn with /// variables captured in a record which address is stored in \a /// CapturedStruct. /// \param OutlinedFn Outlined function to be run by team masters. Type of /// this function is void(*)(kmp_int32 *, kmp_int32, struct context_vars*). /// \param CapturedVars A pointer to the record with the references to /// variables used in \a OutlinedFn function. /// void emitTeamsCall(CodeGenFunction &CGF, const OMPExecutableDirective &D, SourceLocation Loc, llvm::Function *OutlinedFn, ArrayRef<llvm::Value *> CapturedVars) override; /// Emits call to void __kmpc_push_num_teams(ident_t *loc, kmp_int32 /// global_tid, kmp_int32 num_teams, kmp_int32 thread_limit) to generate code /// for num_teams clause. /// \param NumTeams An integer expression of teams. /// \param ThreadLimit An integer expression of threads. void emitNumTeamsClause(CodeGenFunction &CGF, const Expr *NumTeams, const Expr *ThreadLimit, SourceLocation Loc) override; /// Emit the target data mapping code associated with \a D. /// \param D Directive to emit. /// \param IfCond Expression evaluated in if clause associated with the /// target directive, or null if no device clause is used. /// \param Device Expression evaluated in device clause associated with the /// target directive, or null if no device clause is used. /// \param Info A record used to store information that needs to be preserved /// until the region is closed. void emitTargetDataCalls(CodeGenFunction &CGF, const OMPExecutableDirective &D, const Expr *IfCond, const Expr *Device, const RegionCodeGenTy &CodeGen, TargetDataInfo &Info) override; /// Emit the data mapping/movement code associated with the directive /// \a D that should be of the form 'target [{enter|exit} data | update]'. /// \param D Directive to emit. /// \param IfCond Expression evaluated in if clause associated with the target /// directive, or null if no if clause is used. /// \param Device Expression evaluated in device clause associated with the /// target directive, or null if no device clause is used. void emitTargetDataStandAloneCall(CodeGenFunction &CGF, const OMPExecutableDirective &D, const Expr *IfCond, const Expr *Device) override; /// Emit initialization for doacross loop nesting support. /// \param D Loop-based construct used in doacross nesting construct. void emitDoacrossInit(CodeGenFunction &CGF, const OMPLoopDirective &D, ArrayRef<Expr *> NumIterations) override; /// Emit code for doacross ordered directive with 'depend' clause. /// \param C 'depend' clause with 'sink|source' dependency kind. void emitDoacrossOrdered(CodeGenFunction &CGF, const OMPDependClause *C) override; /// Translates the native parameter of outlined function if this is required /// for target. /// \param FD Field decl from captured record for the parameter. /// \param NativeParam Parameter itself. const VarDecl *translateParameter(const FieldDecl *FD, const VarDecl *NativeParam) const override; /// Gets the address of the native argument basing on the address of the /// target-specific parameter. /// \param NativeParam Parameter itself. /// \param TargetParam Corresponding target-specific parameter. Address getParameterAddress(CodeGenFunction &CGF, const VarDecl *NativeParam, const VarDecl *TargetParam) const override; /// Gets the OpenMP-specific address of the local variable. Address getAddressOfLocalVariable(CodeGenFunction &CGF, const VarDecl *VD) override { return Address::invalid(); } }; } // namespace CodeGen } // namespace clang #endif
c_pi.c
/* *********************************************************************** This program is part of the OpenMP Source Code Repository http://www.pcg.ull.es/ompscr/ e-mail: ompscr@etsii.ull.es This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License (LICENSE file) along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA FILE: c_pi.c VERSION: 1.0 DATE: May 2004 COMMENTS TO: sande@csi.ull.es DESCRIPTION: Parallel implementation of PI generator using OpenMP COMMENTS: The area under the curve y=4/(1+x*x) between 0 and 1 provides a way to compute Pi The value of this integral can be approximated using a sum. REFERENCES: http://en.wikipedia.org/wiki/Pi http://nereida.deioc.ull.es/~llCoMP/examples/examples/pi/pi_description.html BASIC PRAGMAS: parallel USAGE: ./c_pi.par INPUT: Default precision OUTPUT: The value of PI FILE FORMATS: - RESTRICTIONS: - REVISION HISTORY: **************************************************************************/ /* variable local should have been in private clauses, otherwise it can cause race condition the expected result is that local has been reported as data race */ #include "OmpSCR.h" #define DEFAULT_PREC 1000000 /* Default precision */ #define NUM_ARGS 1 #define NUM_TIMERS 1 int main(int argc, char *argv[]) { double PI25DT = 3.141592653589793238462643; double local, w, total_time, pi; long i, N; /* Precision */ int NUMTHREADS; char *PARAM_NAMES[NUM_ARGS] = {"Precision"}; char *DEFAULTS_VALUE[NUM_ARGS] = {"1000000"}; char *TIMERS_NAMES[NUM_TIMERS] = {"Total_time"}; /* Default: DEFAULT_PREC; */ NUMTHREADS = omp_get_max_threads(); OSCR_init (NUMTHREADS, "Pi generator", "Param: precission", NUM_ARGS, PARAM_NAMES, DEFAULTS_VALUE , NUM_TIMERS, NUM_TIMERS, TIMERS_NAMES, argc, argv); N = OSCR_getarg_int(NUM_ARGS); OSCR_timer_start(0); w = 1.0 / N; pi = 0.0; /* #pragma omp parallel for default(shared) private(i, local)reduction(+:pi) schedule(static, 1) */ #pragma omp parallel default(shared) #pragma omp for reduction(+:pi) for(i = 0; i < N; i++) { local = (i + 0.5) * w; pi += 4.0 / (1.0 + local * local); } pi *= w; OSCR_timer_stop(0); total_time = OSCR_timer_read(0); OSCR_report(); printf("\n \t# THREADS INTERVAL \tTIME (secs.) \tPI \t\t\tERROR\n"); printf("\t %d \t%10ld \t%14.6lf \t%1.20f\t%g\n", NUMTHREADS, N, total_time, pi, PI25DT-pi); return 0; } /* * vim:ts=2:sw=2: */
GB_binop__bset_int16.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__bset_int16) // A.*B function (eWiseMult): GB (_AemultB_08__bset_int16) // A.*B function (eWiseMult): GB (_AemultB_02__bset_int16) // A.*B function (eWiseMult): GB (_AemultB_04__bset_int16) // A.*B function (eWiseMult): GB (_AemultB_bitmap__bset_int16) // A*D function (colscale): GB ((none)) // D*A function (rowscale): GB ((none)) // C+=B function (dense accum): GB (_Cdense_accumB__bset_int16) // C+=b function (dense accum): GB (_Cdense_accumb__bset_int16) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__bset_int16) // C=scalar+B GB (_bind1st__bset_int16) // C=scalar+B' GB (_bind1st_tran__bset_int16) // C=A+scalar GB (_bind2nd__bset_int16) // C=A'+scalar GB (_bind2nd_tran__bset_int16) // C type: int16_t // A type: int16_t // A pattern? 0 // B type: int16_t // B pattern? 0 // BinaryOp: cij = GB_BITSET (aij, bij, int16_t, 16) #define GB_ATYPE \ int16_t #define GB_BTYPE \ int16_t #define GB_CTYPE \ int16_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ int16_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ int16_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int16_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = GB_BITSET (x, y, int16_t, 16) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 1 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_BSET || GxB_NO_INT16 || GxB_NO_BSET_INT16) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__bset_int16) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__bset_int16) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__bset_int16) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int16_t int16_t bwork = (*((int16_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *restrict Cx = (int16_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *restrict Cx = (int16_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__bset_int16) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; int16_t alpha_scalar ; int16_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((int16_t *) alpha_scalar_in)) ; beta_scalar = (*((int16_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__bset_int16) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__bset_int16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__bset_int16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__bset_int16) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__bset_int16) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *Cx = (int16_t *) Cx_output ; int16_t x = (*((int16_t *) x_input)) ; int16_t *Bx = (int16_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; int16_t bij = GBX (Bx, p, false) ; Cx [p] = GB_BITSET (x, bij, int16_t, 16) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__bset_int16) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int16_t *Cx = (int16_t *) Cx_output ; int16_t *Ax = (int16_t *) Ax_input ; int16_t y = (*((int16_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int16_t aij = GBX (Ax, p, false) ; Cx [p] = GB_BITSET (aij, y, int16_t, 16) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_BITSET (x, aij, int16_t, 16) ; \ } GrB_Info GB (_bind1st_tran__bset_int16) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int16_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t x = (*((const int16_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int16_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_BITSET (aij, y, int16_t, 16) ; \ } GrB_Info GB (_bind2nd_tran__bset_int16) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t y = (*((const int16_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
atomic.c
#include <omp.h> int main (void) { int a; #pragma omp parallel { #pragma omp atomic release hint(test) read a+=1; } return 0; }
GB_binop__lor_uint32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__lor_uint32 // A.*B function (eWiseMult): GB_AemultB__lor_uint32 // A*D function (colscale): GB_AxD__lor_uint32 // D*A function (rowscale): GB_DxB__lor_uint32 // C+=B function (dense accum): GB_Cdense_accumB__lor_uint32 // C+=b function (dense accum): GB_Cdense_accumb__lor_uint32 // C+=A+B function (dense ewise3): (none) // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__lor_uint32 // C=scalar+B GB_bind1st__lor_uint32 // C=scalar+B' GB_bind1st_tran__lor_uint32 // C=A+scalar GB_bind2nd__lor_uint32 // C=A'+scalar GB_bind2nd_tran__lor_uint32 // C type: uint32_t // A type: uint32_t // B,b type: uint32_t // BinaryOp: cij = ((aij != 0) || (bij != 0)) #define GB_ATYPE \ uint32_t #define GB_BTYPE \ uint32_t #define GB_CTYPE \ uint32_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint32_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ uint32_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint32_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = ((x != 0) || (y != 0)) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LOR || GxB_NO_UINT32 || GxB_NO_LOR_UINT32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void (none) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__lor_uint32 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__lor_uint32 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__lor_uint32 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint32_t uint32_t bwork = (*((uint32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_AxD__lor_uint32 ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t *GB_RESTRICT Cx = (uint32_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_DxB__lor_uint32 ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t *GB_RESTRICT Cx = (uint32_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ #undef GB_FREE_ALL #define GB_FREE_ALL \ { \ GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \ GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \ GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \ } GrB_Info GB_AaddB__lor_uint32 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_add_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__lor_uint32 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_emult_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__lor_uint32 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *GB_RESTRICT Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t *Cx = (uint32_t *) Cx_output ; uint32_t x = (*((uint32_t *) x_input)) ; uint32_t *Bx = (uint32_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; uint32_t bij = Bx [p] ; Cx [p] = ((x != 0) || (bij != 0)) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__lor_uint32 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *GB_RESTRICT Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint32_t *Cx = (uint32_t *) Cx_output ; uint32_t *Ax = (uint32_t *) Ax_input ; uint32_t y = (*((uint32_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint32_t aij = Ax [p] ; Cx [p] = ((aij != 0) || (y != 0)) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint32_t aij = Ax [pA] ; \ Cx [pC] = ((x != 0) || (aij != 0)) ; \ } GrB_Info GB_bind1st_tran__lor_uint32 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t x = (*((const uint32_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint32_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint32_t aij = Ax [pA] ; \ Cx [pC] = ((aij != 0) || (y != 0)) ; \ } GrB_Info GB_bind2nd_tran__lor_uint32 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t y = (*((const uint32_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
target-32.c
#include <stdlib.h> #include <unistd.h> int main () { int a = 0, b = 0, c = 0, d[7]; #pragma omp parallel #pragma omp single { #pragma omp task depend(out: d[0]) a = 2; #pragma omp target enter data nowait map(to: a,b,c) depend(in: d[0]) depend(out: d[1]) #pragma omp target nowait map(alloc: a) depend(in: d[1]) depend(out: d[2]) a++; #pragma omp target nowait map(alloc: b) depend(in: d[2]) depend(out: d[3]) { usleep (1000); #pragma omp atomic update b |= 4; } #pragma omp target nowait map(alloc: b) depend(in: d[2]) depend(out: d[4]) { usleep (5000); #pragma omp atomic update b |= 1; } #pragma omp target nowait map(alloc: c) depend(in: d[3], d[4]) depend(out: d[5]) { usleep (5000); #pragma omp atomic update c |= 8; } #pragma omp target nowait map(alloc: c) depend(in: d[3], d[4]) depend(out: d[6]) { usleep (1000); #pragma omp atomic update c |= 2; } #pragma omp target exit data map(always,from: a,b,c) depend(in: d[5], d[6]) } if (a != 3 || b != 5 || c != 10) abort (); return 0; }
pi_mc_par.c
#include <stdio.h> #include <omp.h> #include "random.h" static long num_trials = 1000000; int main() { long i; long Ncirc = 0; double pi, x, y, test, time; double r = 1.0; // radius of circle. Side of squrare is 2*r time = omp_get_wtime(); #pragma omp parallel { #pragma omp single printf(" %d threads ", omp_get_num_threads()); seed(-r, r); #pragma omp for reduction(+ \ : Ncirc) private(x, y, test) for (i = 0; i < num_trials; i++) { x = drandom(); y = drandom(); test = x * x + y * y; if (test <= r * r) Ncirc++; } } pi = 4.0 * ((double)Ncirc / (double)num_trials); printf("\n %ld trials, pi is %lf ", num_trials, pi); printf(" in %lf seconds\n", omp_get_wtime() - time); return 0; }
040_distributed_array.c
#include <stdio.h> #include <stdlib.h> #include <mpi.h> #include <omp.h> #define NR_DIMS 2 int main(int argc, char *argv[]) { const int periodic[NR_DIMS] = {0, 0}; const int reorder = 0; const int local_rows = 300, local_cols = 400; int proc_rows, proc_cols; int row, col; int rank, size; int dims[NR_DIMS] = {0, 0}; int cart_coords[NR_DIMS]; double sum = 0.0, avg; double *data; int data_size = local_rows*local_cols*sizeof(double); MPI_Comm cart_comm; MPI_Init(&argc, &argv); MPI_Comm_rank(MPI_COMM_WORLD, &rank); MPI_Comm_size(MPI_COMM_WORLD, &size); MPI_Dims_create(size, NR_DIMS, dims); MPI_Cart_create(MPI_COMM_WORLD, NR_DIMS, dims, periodic, reorder, &cart_comm); if (!(data = (double *) malloc(data_size))) { fprintf(stderr, "### error: rank %d can't allocate memory\n", rank); MPI_Abort(MPI_COMM_WORLD, 1); } MPI_Cart_coords(cart_comm, rank, NR_DIMS, cart_coords); fprintf(stderr, "rank %d: (%d, %d)\n", rank, cart_coords[0], cart_coords[1]); srand(rank); for (row = 0; row < local_rows; row++) for (col = 0; col < local_cols; col++) data[row*local_cols + col] = ((double) rand())/RAND_MAX; #pragma omp parallel { int thread_num = omp_get_thread_num(); printf("rank %d:%d has %d threads\n", rank, thread_num, omp_get_num_threads()); #pragma omp for reduction(+:sum) for (row = 0; row < local_rows; row++) { int col; for (col = 0; col < local_cols; col++) sum += data[row*local_cols + col]; } } MPI_Reduce(&sum, &avg, 1, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD); if (rank == 0) printf("average = %le\n", avg); free(data); MPI_Finalize(); return 0; }
rf_matrix.h
#ifndef RF_MATRIX_H #define RF_MATRIX_H // headers #include <cstdio> #include <cstdlib> #include <cstring> #include <algorithm> #include <vector> #include <cmath> #include <cstddef> #include <assert.h> #include <omp.h> #include <iostream> #include <fstream> #include <sstream> #if __cplusplus >= 201103L || (defined(_MSC_VER) && (_MSC_VER >= 1500)) // Visual Studio 2008 #define CPP11 #endif #ifdef _MSC_VER #if _MSC_VER >= 1600 #include <cstdint> #else typedef __int8 int8_t; typedef __int16 int16_t; typedef __int32 int32_t; typedef __int64 int64_t; typedef unsigned __int8 uint8_t; typedef unsigned __int16 uint16_t; typedef unsigned __int32 uint32_t; typedef unsigned __int64 uint64_t; #endif #else #if !defined(_MSC_VER) && defined(CPP11) #include <cstdint> #else typedef short int int16_t; typedef int int32_t; typedef long int64_t; typedef unsigned char uint8_t; typedef unsigned short int uint16_t; typedef unsigned int uint32_t; typedef unsigned long uint64_t; #endif #endif /* random number genrator: simulate the interface of python random module*/ #include <limits> #if defined(CPP11) #include <random> template<typename engine_t=std::mt19937> struct random_number_generator : public engine_t { typedef typename engine_t::result_type result_type; random_number_generator(unsigned seed=0): engine_t(seed){ } result_type randrange(result_type end=engine_t::max()) { return engine_t::operator()() % end; } template<class T=double, class T2=double> T uniform(T start=0.0, T2 end=1.0) { return std::uniform_real_distribution<T>(start, (T)end)(*this); } template<class T=double> T normal(T mean=0.0, T stddev=1.0) { return std::normal_distribution<T>(mean, stddev)(*this); } template<class T=int, class T2=T> T randint(T start=0, T2 end=std::numeric_limits<T>::max()) { return std::uniform_int_distribution<T>(start, end)(*this); } template<class RandIter> void shuffle(RandIter first, RandIter last) { std::shuffle(first, last, *this); } }; #else #include <tr1/random> template<typename engine_t=std::tr1::mt19937> struct random_number_generator : public engine_t { typedef typename engine_t::result_type result_type; random_number_generator(unsigned seed=0): engine_t(seed) { } result_type operator()() { return engine_t::operator()(); } result_type operator()(result_type n) { return randint(result_type(0), result_type(n-1)); } result_type randrange(result_type end=engine_t::max()) { return engine_t::operator()() % end; } template<class T, class T2> T uniform(T start=0.0, T2 end=1.0) { typedef std::tr1::uniform_real<T> dist_t; return std::tr1::variate_generator<engine_t*, dist_t>(this, dist_t(start,(T)end))(); } template<class T, class T2> T normal(T mean=0.0, T2 stddev=1.0) { typedef std::tr1::normal_distribution<T> dist_t; return std::tr1::variate_generator<engine_t*, dist_t>(this, dist_t(mean, (T)stddev))(); } template<class T, class T2> T randint(T start=0, T2 end=std::numeric_limits<T>::max()) { typedef std::tr1::uniform_int<T> dist_t; return std::tr1::variate_generator<engine_t*, dist_t>(this, dist_t(start,end))(); } template<class RandIter> void shuffle(RandIter first, RandIter last) { std::random_shuffle(first, last, *this); } }; #endif typedef random_number_generator<> rng_t; template<typename T> void gen_permutation_pair(size_t size, std::vector<T> &perm, std::vector<T> &inv_perm, int seed=0) { perm.resize(size); for(size_t i = 0; i < size; i++) perm[i] = i; rng_t rng(seed); rng.shuffle(perm.begin(), perm.end()); //std::srand(seed); //std::random_shuffle(perm.begin(), perm.end()); inv_perm.resize(size); for(size_t i = 0; i < size; i++) inv_perm[perm[i]] = i; } //#include "zlib_util.h" #define MALLOC(type, size) (type*)malloc(sizeof(type)*(size)) #define CALLOC(type, size) (type*)calloc((size), sizeof(type)) #define REALLOC(ptr, type, size) (type*)realloc((ptr), sizeof(type)*(size)) typedef unsigned major_t; const major_t ROWMAJOR = 1U; const major_t COLMAJOR = 2U; const major_t default_major = COLMAJOR; // Zip Iterator // Commom usage: std::sort(zip_iter(A.begin(),B.begin()), zip_iter(A.end(),B.end())); template<class T1, class T2> struct zip_body; template<class T1, class T2> struct zip_ref; template<class IterT1, class IterT2> struct zip_it; template<class IterT1, class IterT2> zip_it<IterT1, IterT2> zip_iter(IterT1 x, IterT2 y); #define dvec_t dense_vector template<typename val_type> class dvec_t; #define svec_t sparse_vector template<typename val_type> class svec_t; #define sdvec_t sparse_dense_vector template<typename val_type> class sdvec_t; // a dense vector with sparse indices #define gvec_t general_vector template<typename val_type> class gvec_t { public: size_t len; gvec_t(size_t len=0): len(len){} size_t size() const { return len; } virtual bool is_sparse() const {return false;} virtual bool is_dense() const {return false;} svec_t<val_type>& get_sparse() {assert(is_sparse()); return static_cast<svec_t<val_type>&>(*this);} const svec_t<val_type>& get_sparse() const {assert(is_sparse()); return static_cast<const svec_t<val_type>&>(*this);} dvec_t<val_type>& get_dense() {assert(is_dense()); return static_cast<dvec_t<val_type>&>(*this);} const dvec_t<val_type>& get_dense() const {assert(is_dense()); return static_cast<const dvec_t<val_type>&>(*this);} }; #define dmat_t dense_matrix template<typename val_type> class dmat_t; #define smat_t sparse_matrix template<typename val_type> class smat_t; #define eye_t identity_matrix template<typename val_type> class eye_t; #define gmat_t general_matrix template<typename val_type> class gmat_t { public: size_t rows, cols; gmat_t(size_t rows=0, size_t cols=0): rows(rows), cols(cols) { } size_t num_rows() const { return rows; } size_t num_cols() const { return cols; } virtual bool is_sparse() const { return false; } virtual bool is_dense() const { return false; } virtual bool is_identity() const { return false; } bool is_zero() const { return !is_sparse() && !is_dense() && !is_identity(); } smat_t<val_type>& get_sparse() { assert(is_sparse()); return static_cast<smat_t<val_type>&>(*this); } const smat_t<val_type>& get_sparse() const { assert(is_sparse()); return static_cast<const smat_t<val_type>&>(*this); } dmat_t<val_type>& get_dense() { assert(is_dense()); return static_cast<dmat_t<val_type>&>(*this); } const dmat_t<val_type>& get_dense() const { assert(is_dense()); return static_cast<const dmat_t<val_type>&>(*this); } virtual dvec_t<val_type>& Xv(const dvec_t<val_type>& v, dvec_t<val_type>& Xv, bool addson=0) const { if(addson == 0) memset(Xv.buf, 0, sizeof(val_type) * Xv.len); return Xv; } virtual dvec_t<val_type>& Xv(const svec_t<val_type>& v, dvec_t<val_type>& Xv, bool addson=0) const { if(addson == 0) memset(Xv.buf, 0, sizeof(val_type) * Xv.len); return Xv; } dvec_t<val_type>& Xv(const gvec_t<val_type>& v, dvec_t<val_type>& Xv, bool addson=0) const { if(v.is_sparse()) return this->Xv(v.get_sparse(), Xv, addson); else if(v.is_dense()) return this->Xv(v.get_dense(), Xv, addson); else // Should not be here return Xv; } virtual dvec_t<val_type>& XTu(const dvec_t<val_type>& u, dvec_t<val_type>& XTu, bool addson=0) const { if(addson == 0) memset(XTu.buf, 0, sizeof(val_type) * XTu.len); return XTu; } virtual dvec_t<val_type>& XTu(const svec_t<val_type>& u, dvec_t<val_type>& XTu, bool addson=0) const { if(addson == 0) memset(XTu.buf, 0, sizeof(val_type) * XTu.len); return XTu; } dvec_t<val_type>& XTu(const gvec_t<val_type>& u, dvec_t<val_type>& XTu, bool addson=0) const { if(u.is_sparse()) return this->XTu(u.get_sparse(), XTu, addson); else if(u.is_dense()) return this->XTu(u.get_dense(), XTu, addson); else // Should not be here return XTu; } }; #define coo_t coo_matrix template<typename val_type> class coo_t; template<typename val_type> class entry_t; template<typename val_type> class entry_iterator_t; // iterator base class template<typename val_type> class file_iterator_t; // iterator for files with (i,j,v) tuples template<typename val_type> class svmlight_file_iterator_t; // iterator for svmlight files template<typename val_type> class coo_iterator_t; //iterator for three vectors (I, J, V) tuples template<typename val_type> class smat_iterator_t; // iterator for nonzero entries in smat_t template<typename val_type> class smat_subset_iterator_t; // iterator for nonzero entries in a subset template<typename val_type> class dmat_iterator_t; // iterator for nonzero entries in dmat_t /*------------------- Essential Linear Algebra Operations -------------------*/ // H = X*W, (X: m*n, W: n*k, H: m*k) template<typename val_type> dmat_t<val_type>& dmat_x_dmat(const dmat_t<val_type> &X, const dmat_t<val_type> &W, dmat_t<val_type> &H); template<typename val_type> dmat_t<val_type> operator*(const dmat_t<val_type> &X, const dmat_t<val_type> &W); template<typename val_type> dmat_t<val_type>& smat_x_dmat(const smat_t<val_type>& X, const dmat_t<val_type> &W, dmat_t<val_type> &H); template<typename val_type> dmat_t<val_type>& gmat_x_dmat(const gmat_t<val_type>& X, const dmat_t<val_type> &W, dmat_t<val_type> &H); template<typename val_type> dmat_t<val_type> operator*(const smat_t<val_type> &X, const dmat_t<val_type> &W); // H = a*X*W + H0, (X: m*n, W: n*k, H: m*k) template<typename val_type, typename T2> dmat_t<val_type>& dmat_x_dmat(T2 a, const dmat_t<val_type> &X, const dmat_t<val_type> &W, const dmat_t<val_type> &H0, dmat_t<val_type> &H); template<typename val_type, typename T2> dmat_t<val_type>& smat_x_dmat(T2 a, const smat_t<val_type> &X, const dmat_t<val_type> &W, const dmat_t<val_type> &H0, dmat_t<val_type> &H); template<typename val_type, typename T2> dmat_t<val_type>& gmat_x_dmat(T2 a, const gmat_t<val_type> &X, const dmat_t<val_type> &W, const dmat_t<val_type> &H0, dmat_t<val_type> &H); // H = a*X*W + b*H0, (X: m*n, W: n*k, H: m*k) template<typename val_type, typename T2, typename T3> dmat_t<val_type>& dmat_x_dmat(T2 a, const dmat_t<val_type> &X, const dmat_t<val_type> &W, T3 b, const dmat_t<val_type> &H0, dmat_t<val_type> &H); template<typename val_type, typename T2, typename T3> dmat_t<val_type>& smat_x_dmat(T2 a, const smat_t<val_type> &X, const dmat_t<val_type> &W, T3 b, const dmat_t<val_type> &H0, dmat_t<val_type> &H); template<typename val_type, typename T2, typename T3> dmat_t<val_type>& gmat_x_dmat(T2 a, const gmat_t<val_type> &X, const dmat_t<val_type> &W, T3 b, const dmat_t<val_type> &H0, dmat_t<val_type> &H); // trace(W'*X*H) template<typename val_type> val_type trace_dmat_T_smat_dmat(const dmat_t<val_type>& W, const smat_t<val_type>& X, const dmat_t<val_type>& H); // trace(W'*diag(D)*H) template<typename val_type> val_type trace_dmat_T_diag_dmat(const dmat_t<val_type>& W, const dvec_t<val_type>& D, const dmat_t<val_type>& H); /*-------------- Essential Linear Algebra Solvers -------------------*/ // Solve AX = B using Cholesky Factorization (A: Positive Definite) template<typename val_type> dmat_t<val_type> ls_solve_chol(const dmat_t<val_type>& A, const dmat_t<val_type>& B, bool A_as_workspace); // Solve Ax = b using Cholesky Factorization (A: Positive Definite) template<typename val_type> dvec_t<val_type> ls_solve_chol(const dmat_t<val_type>& A, const dvec_t<val_type>& b, bool A_as_workspace); // SVD: A = USV' template<typename val_type> void svd(const dmat_t<val_type>& A, dmat_t<val_type>& U, dvec_t<val_type>& S, dmat_t<val_type>& V, bool reduced=true, bool A_as_workspace=false); /*-------------- Vectors & Matrices -------------------*/ // Dense Vector template<typename val_type> class dvec_t : public gvec_t<val_type> { friend class dmat_t<val_type>; private: bool mem_alloc_by_me; void zero_init() { len = 0; buf = NULL; mem_alloc_by_me = false; } public: // size_t len; inherited from gvec_t using gvec_t<val_type>::len; val_type *buf; // Default Constructor dvec_t() { zero_init(); } // Copy Constructor dvec_t(const dvec_t& v) { zero_init(); *this = v; } // Copy Assignment dvec_t& operator=(const dvec_t& other) { if(this == &other) { return *this; } if(other.is_view()) { // view to view copy if(mem_alloc_by_me) clear_space(); memcpy(static_cast<void*>(this), &other, sizeof(dvec_t)); } else { // deep to deep copy resize(other.size()); memcpy(buf, other.buf, sizeof(val_type)*len); } return *this; } // View Constructor: allocate space (w/ all 0) if buf == NULL explicit dvec_t(size_t len, val_type *buf=NULL): gvec_t<val_type>(len), mem_alloc_by_me(false), buf(buf) { if(buf == NULL && len != 0) { this->buf = MALLOC(val_type, len); memset(this->buf, 0, sizeof(val_type)*len); mem_alloc_by_me = true; } } // Fill Constructor explicit dvec_t(size_t len, const val_type &x) { zero_init(); resize(len, x); } // Constructor - dense_matrix => dense_vector: // Having the same status (view or deep) as m (the dense matrix). // (expand the matrix using row major) dvec_t(const dmat_t<val_type>& m) { zero_init(); if(m.is_view()) { len = m.rows * m.cols; buf = m.buf; } else { resize(m.rows * m.cols); memcpy(buf, m.buf, sizeof(val_type) * len); } } // Constructor - sparse_vector => dense_vector: // Always deep. dvec_t(const svec_t<val_type>& v) { zero_init(); resize(v.len); memset(buf, 0, sizeof(val_type) * len); for(size_t i = 0; i < v.nnz; i++) buf[v.idx[i]] = v.val[i]; } #if defined(CPP11) // Move Constructor dvec_t(dvec_t&& m) { zero_init(); *this = std::move(m); } // Move Assignment dvec_t& operator=(dvec_t&& other) { if(this == &other) { return *this; } clear_space(); memcpy(static_cast<void*>(this), &other, sizeof(dvec_t)); other.zero_init(); return *this; } #endif ~dvec_t() { clear_space(); } bool is_view() const { return mem_alloc_by_me == false; } bool is_dense() const { return true; } void clear_space() { if(mem_alloc_by_me) { free(buf); } zero_init(); } dvec_t get_view() const { return dvec_t(len, buf); // using view constructor } dvec_t& grow_body() { if(is_view()) { dvec_t tmp_view = *this; // Copy Assignment: View to view this->resize(len); memcpy(buf, tmp_view.buf, sizeof(val_type)*len); } return *this; } // Similar to operator=, but operator= uses view to view, deep to deep. // "assign" will directly change the underlying data, no matter view or deep. dvec_t& assign(const dvec_t& other) { assert(len == other.len); return assign((val_type)1.0, other); } // "assign" will directly change the underlying data, no matter view or deep. dvec_t& assign(val_type a, const dvec_t& other) { assert(len == other.len); if(a == val_type(0)) memset(buf, 0, sizeof(val_type)*len); else if(a == val_type(1)) { if(this == &other) return *this; #pragma omp parallel for schedule(static) for(size_t idx = 0; idx < len; idx++) at(idx) = other.at(idx); } else { #pragma omp parallel for schedule(static) for(size_t idx = 0; idx < len; idx++) at(idx) = a*other.at(idx); } return *this; } // resize will always grow body => is_view() becomes false void resize(size_t len_, const val_type &x) { resize(len_); if(x == 0) memset(buf, 0, sizeof(val_type) * len); else { std::fill_n(buf, len_, x); /* for(size_t i = 0; i < len; i++) { buf[i] = x; } */ } } // resize will always grow body => is_view() becomes false // (values in buf are not initialized) void resize(size_t len_) { if(mem_alloc_by_me) buf = REALLOC(buf, val_type, len_); else buf = MALLOC(val_type, len_); mem_alloc_by_me = true; len = len_; } val_type& at(size_t idx) { return buf[idx]; } const val_type& at(size_t idx) const { return buf[idx]; } val_type& operator[](size_t idx) { return buf[idx]; } const val_type& operator[](size_t idx) const { return buf[idx]; } val_type* data() { return buf; } const val_type* data() const { return buf; } val_type& back() { return buf[len - 1]; } const val_type& back() const { return buf[len - 1]; } void print(const char *str="") const { printf("%s dvec_t: len %lu, is_view %d, buf %p\n", str, len, is_view(), buf); for(size_t i = 0; i < len; i ++) printf("%.3f ", buf[i]); puts(""); } }; // Sparse Vector template<typename val_type> class svec_t : public gvec_t<val_type> { friend class smat_t<val_type>; private: bool mem_alloc_by_me; void zero_init() { len = nnz = 0; idx = NULL; val = NULL; mem_alloc_by_me = false; } public: // size_t len; inherited from gvec_t using gvec_t<val_type>::len; size_t nnz; unsigned *idx; val_type *val; // Default Constructor svec_t() { zero_init(); } // Copy Constructor svec_t(const svec_t& v) { zero_init(); *this = v; } // Copy Assignment svec_t& operator=(const svec_t& other) { if(this == &other) return *this; if(other.is_view()) { // view to view copy if(mem_alloc_by_me) clear_space(); memcpy(this, &other, sizeof(svec_t)); } else { // deep to deep copy resize(other.len, other.nnz); memcpy(idx, other.idx, sizeof(unsigned) * nnz); memcpy(val, other.val, sizeof(val_type) * nnz); } return *this; } // View Constructor: // If idx != NULL and val != NULL, we create a view copy. (view) // Otherwise, we will allocate nnz space for both idx and val. (deep) explicit svec_t(size_t len, size_t nnz, unsigned *idx, val_type *val) : gvec_t<val_type>(len), mem_alloc_by_me(false), nnz(nnz) { if(nnz == 0){ this->idx = NULL; this->val = NULL; } else { if(idx != NULL && val != NULL) { this->idx = idx; this->val = val; } else { zero_init(); resize(len, nnz); } } } /* (Don't delete yet, so can understand codes not yet adapted elsewhere) // Fill Constructor: // Always deep. // If idx == NULL, we fill this->idx with 0. // If idx != NULL, we still allocate this->idx and copy from idx. explicit svec_t(size_t len, size_t nnz, const unsigned *idx=NULL, const val_type &x=0) { zero_init(); resize(len, nnz, x, idx); } */ // Constructor - sparse_matrix => sparse_vector: // Always deep. (expand using row major) svec_t(const smat_t<val_type>& m) { zero_init(); resize(m.rows * m.cols, m.nnz); for(int i = 0; i < m.rows; i++) { for(int j = m.row_ptr[i]; j < m.row_ptr[i+1]; j++) { idx[j] = m.cols * i + m.col_idx[j]; val[j] = m.val_t[j]; } } } // Constructor - dense_vector => sparse_vector: // Always deep. svec_t(const dvec_t<val_type>& v, double threshold=1e-12) { zero_init(); len = v.size(); for(size_t i = 0; i < v.size(); i++) if(fabs((double)v.at(i)) >= threshold) nnz ++; resize(len, nnz); int k = 0; for(size_t i = 0; i < v.size(); i++) if(fabs((double)v.at(i)) >= threshold) { idx[k] = i; val[k] = v.at(i); k++; } } #if defined(CPP11) // Move Constructor svec_t(svec_t&& m) { zero_init(); *this = std::move(m); } // Move Assignment svec_t& operator=(svec_t&& other) { if(this == &other) return *this; clear_space(); memcpy(static_cast<void*>(this), &other, sizeof(svec_t)); other.zero_init(); return *this; } #endif ~svec_t() { clear_space(); } size_t get_nnz() const { return nnz; } bool is_view() const { return mem_alloc_by_me == false; } bool is_sparse() const { return true; } void clear_space() { if(mem_alloc_by_me){ free(idx); free(val); } zero_init(); } svec_t get_view() const { return svec_t(len, nnz, idx, val); // using view constructor } svec_t& grow_body() { if(is_view()) { svec_t tmp_view = *this; // Copy Assignment: View to view this->resize(len, nnz); memcpy(idx, tmp_view.idx, sizeof(unsigned)*nnz); memcpy(val, tmp_view.val, sizeof(val_type)*nnz); } return *this; } // Similar to operator=, but operator= uses view to view, deep to deep. // "assign" will directly change the underlying data, no matter view or deep. // (so we assert that the sparse vector is not a view on sparse matrix) svec_t& assign(const svec_t& other) { assert(len == other.len && nnz == other.nnz); return assign((val_type)1.0, other); } // "assign" will directly change the underlying data, no matter view or deep. // (so we assert that the sparse vector is not a view on sparse matrix) svec_t& assign(val_type a, const svec_t& other) { assert(len == other.len && nnz == other.nnz); if(a == val_type(0)) memset(val, 0, sizeof(val_type)*nnz); else if(a == val_type(1) && this == &other) { return *this; } else { #pragma omp parallel for schedule(static) for(int k = 0; k < nnz; k++){ idx[k] = other.idx[k]; val[k] = a*other.val[k]; } } } /* (Don't delete yet, so can understand codes not yet adapted elsewhere) // "resize" will always grow body => is_view() becomes false // (we will copy the whole idx to this->idx) void resize(size_t len_, size_t nnz_, const val_type &x, const unsigned *idx=NULL) { resize(len_, nnz_); if(idx == NULL) memset(this->idx, 0, sizeof(unsigned)*nnz); else memcpy(this->idx, idx, sizeof(unsigned)*nnz); for(size_t k = 0; k < nnz; k++) this->val[k] = x; } */ // "resize" will always grow body => is_view() becomes false // (values in idx, val are not initialized) void resize(size_t len_, size_t nnz_) { if(mem_alloc_by_me){ idx = REALLOC(idx, unsigned, nnz_); val = REALLOC(val, val_type, nnz_); } else{ idx = MALLOC(unsigned, nnz_); val = MALLOC(val_type, nnz_); } mem_alloc_by_me = true; len = len_; nnz = nnz_; } void print(const char *str="") const { printf("%s svec_t: len %lu, nnz %lu, is_view %d\n", str, len, nnz, is_view()); size_t j = 0; for(size_t i = 0; i < len; i++){ if(j < nnz && idx[j] == i){ printf("%.3f ", val[j]); j++; } else printf("0.000 "); } puts(""); } }; // Sparse Dense Vector template<typename val_type> class sdvec_t : public dvec_t<val_type> { friend class smat_t<val_type>; public: using gvec_t<val_type>::len; using dvec_t<val_type>::buf; std::vector<unsigned> nz_idx; std::vector<unsigned char> is_nonzero; size_t nnz; sdvec_t(size_t len=0) : dvec_t<val_type>(len), nz_idx(len), is_nonzero(len), nnz(0){ } size_t get_nnz() const { return nnz; } void resize(size_t len_) { if(len != len_) { dvec_t<val_type>::resize(len_, 0.0); nz_idx.clear(); nz_idx.resize(len_); is_nonzero.clear(); is_nonzero.resize(len_); nnz = 0; } } template<typename V> void init_with_svec(const svec_t<V>& svec) { clear(); nnz = svec.nnz; for(size_t t = 0; t < svec.nnz; t++) { size_t idx = svec.idx[t]; V val = svec.val[t]; is_nonzero[idx] = 1; nz_idx[t] = idx; buf[idx] = val; } } template<typename I, typename V> val_type& add_nonzero_at(I idx, V val) { buf[idx] += static_cast<val_type>(val); if(!is_nonzero[idx]) { is_nonzero[idx] = 1; nz_idx[nnz++] = static_cast<unsigned>(idx); } return buf[idx]; } sdvec_t& update_nz_idx() { for(size_t t = 0 ; t < nnz; t++) { if(buf[nz_idx[t]] == static_cast<val_type>(0)) { std::swap(nz_idx[t], nz_idx[nnz - 1]); is_nonzero[nz_idx[t]] = 0; t -= 1; nnz -= 1; } } std::sort(nz_idx.data(), nz_idx.data() + nnz); nnz = std::unique(nz_idx.data(), nz_idx.data() + nnz) - nz_idx.data(); return *this; } void clear() { if(nnz < (len >> 2)) { for(size_t t = 0; t < nnz; t++) { buf[nz_idx[t]] = 0; is_nonzero[nz_idx[t]] = 0; } } else { memset(buf, 0, sizeof(val_type) * len); memset(is_nonzero.data(), 0, sizeof(unsigned char) * len); } nnz = 0; } }; // Dense Matrix template<typename val_type> class dmat_t : public gmat_t<val_type> { friend class dvec_t<val_type>; public: // size_t rows, cols; inherited from gmat_t using gmat_t<val_type>::rows; using gmat_t<val_type>::cols; val_type *buf; static dmat_t rand(rng_t &rng, size_t m, size_t n, double lower=0.0, double upper=1.0, major_t major_type_=default_major) { dmat_t ret(m, n, major_type_); if(lower >= upper) lower = upper; for(size_t idx = 0; idx < m*n; idx++) ret.buf[idx] = (val_type)rng.uniform(lower, upper); return ret; } static dmat_t randn(rng_t &rng, size_t m, size_t n, double mean=0.0, double std=1.0, major_t major_type_=default_major) { dmat_t ret(m, n, major_type_); for(size_t idx = 0; idx < m*n; idx++) ret.buf[idx] = (val_type)rng.normal(mean, std); return ret; } private: bool mem_alloc_by_me; major_t major_type; typedef dvec_t<val_type> vec_t; void zero_init() { rows = 0; cols = 0; buf = NULL; major_type = default_major; mem_alloc_by_me = false; } public: // Default Constructor dmat_t() { zero_init(); } // Copy Constructor: // Having the same status (view or deep) as other. // Using the same major_type as other. dmat_t(const dmat_t& other) { zero_init(); *this = other; } // Copy Assignment: // Having the same status (view or deep) as other. // Using the same major_type as other. dmat_t& operator=(const dmat_t& other) { if(this == &other) return *this; if(other.is_view()) { // for view if(mem_alloc_by_me) clear_space(); rows = other.rows; cols = other.cols; buf = other.buf; major_type = other.major_type; mem_alloc_by_me = false; } else { // deep copy if(is_view() || rows!=other.rows || cols!=other.cols || major_type!=other.major_type) { major_type = other.major_type; resize(other.rows, other.cols); } memcpy(buf, other.buf, sizeof(val_type)*rows*cols); } return *this; } // View Constructor: // If buf != NULL, it creates a view on buf. // If buf == NULL, it creates a deep matrix w/ all 0. explicit dmat_t(size_t rows_, size_t cols_, major_t major_type_=default_major, val_type *buf=NULL): gmat_t<val_type>(rows_,cols_), buf(buf), mem_alloc_by_me(false), major_type(major_type_) { if(buf == NULL && rows * cols != 0){ this->buf = MALLOC(val_type, rows * cols); memset(this->buf, 0, sizeof(val_type) * rows * cols); mem_alloc_by_me = true; } } // Fill Constructor: fill in dense_vector based on the major_type. // Always Deep. explicit dmat_t(size_t nr_copy, const dvec_t<val_type>& v, major_t major_type_=default_major) { zero_init(); major_type = major_type_; resize(nr_copy, v); } // Constructor: dense_vector => dense_matrix: // Having the same status (view or deep) as v (the dense vector). dmat_t(const dvec_t<val_type>& v, major_t major_type_=default_major) { zero_init(); major_type = major_type_; if(!v.is_view()) resize(1, v); else { rows = is_rowmajor()? 1: v.size(); cols = is_colmajor()? 1: v.size(); buf = v.buf; } } // Constructor: sparse_matrix => dense_matrix: // Always deep. template<typename T> dmat_t(const smat_t<T>& sm, major_t major_type_=default_major) { zero_init(); major_type = major_type_; resize(sm.rows, sm.cols); memset(buf, 0, sizeof(val_type)*rows*cols); for(size_t i = 0; i < sm.rows; i++) for(size_t idx = sm.row_ptr[i]; idx != sm.row_ptr[i+1]; idx++) at(i, sm.col_idx[idx]) = sm.val_t[idx]; } // Constructor: identity_matrix => dense_matrix: // Always deep. template<typename T> dmat_t(const eye_t<T>& eye, major_t major_type_=default_major) { zero_init(); major_type = major_type_; resize(eye.rows, eye.cols); memset(buf, 0, sizeof(val_type)*rows*cols); for(size_t i = 0; i < rows; i++) at(i,i) = 1; } #if defined(CPP11) // Move Constructor dmat_t(dmat_t&& m){ zero_init(); *this = std::move(m); } // Move Assignment dmat_t& operator=(dmat_t&& other) { if(this == &other) return *this; clear_space(); rows = other.rows; cols = other.cols; buf = other.buf; mem_alloc_by_me = other.mem_alloc_by_me; major_type = other.major_type; other.zero_init(); return *this; } #endif ~dmat_t() { clear_space(); } bool is_view() const { return mem_alloc_by_me==false; } bool is_dense() const { return true; } bool is_rowmajor() const { return major_type==ROWMAJOR; } bool is_colmajor() const { return major_type==COLMAJOR; } major_t get_major() const { return major_type; } void clear_space() { if(mem_alloc_by_me) { free(buf); } zero_init(); } // The view of the current dense matrix is returned. // (Using View Constructor) dmat_t get_view() const { return dmat_t(rows,cols,major_type,buf); } /* (Not yet deleted, to understand the behavior for unsync code elsewhere) // For ROWMAJOR, the view of a single row is returned. // For COLMAJOR, the view of a single column is returned. dvec_t<val_type> get_single_view(const size_t &idx) const { if(is_rowmajor()) return dvec_t<val_type>(cols, &buf[idx * cols]); else return dvec_t<val_type>(rows, &buf[idx * rows]); } */ // Return a view on the idx-th row of the dense matrix. // (Can only called when the matrix is ROWMAJOR) dvec_t<val_type> get_row(const size_t &idx) const { assert(is_rowmajor()); if(is_rowmajor()) return dvec_t<val_type>(cols, &buf[idx * cols]); else return dvec_t<val_type>(); } // Return a view on the idx-th col of the dense matrix. // (Can only called when the matrix is COLMAJOR) dvec_t<val_type> get_col(const size_t &idx) const { assert(is_colmajor()); if(is_colmajor()) return dvec_t<val_type>(rows, &buf[idx * rows]); else return dvec_t<val_type>(); } // For grow_body(): // Deep, View => Deep. // (this is the sole purpose of this function) dmat_t& grow_body() { if(is_view()) { dmat_t tmp_view = *this; this->resize(rows,cols); memcpy(buf, tmp_view.buf, sizeof(val_type) * rows * cols); } return *this; } // For transpose(): // It will return a view of the transpose of *this. // (the major for ret will be the opposite of *this) dmat_t transpose() const { dmat_t ret = get_view(); ret.to_transpose(); return ret; } // ==================================================== // ================ In-place functions ================ // ==================================================== // For assign(): // Deep => Deep. // View => View. // Note: It differents from copy assignment! // After copy assignment, *this have the same status(View or Deep) as other. // But assign() directly overwrites the values in buf. // (it can modify the values it is viewing) dmat_t& assign(const dmat_t& other) { return assign((val_type)1.0, other); } // Similar to the above assign(), but now *this = a * other. template<typename T> dmat_t& assign(T a, const dmat_t& other) { if(a == T(0)) memset(buf, 0, sizeof(val_type)*rows*cols); else if(a == T(1)) { if(this == &other) return *this; if(is_rowmajor()) { #pragma omp parallel for schedule(static) for(size_t r = 0; r < rows; r++) for(size_t c = 0; c < cols; c++) at(r,c) = other.at(r,c); } else { #pragma omp parallel for schedule(static) for(size_t c = 0; c < cols; c++) for(size_t r = 0; r < rows; r++) at(r,c) = other.at(r,c); } } else { if(is_rowmajor()) { #pragma omp parallel for schedule(static) for(size_t r = 0; r < rows; r++) for(size_t c = 0; c < cols; c++) at(r,c) = a * other.at(r,c); } else { #pragma omp parallel for schedule(static) for(size_t c = 0; c < cols; c++) for(size_t r = 0; r < rows; r++) at(r,c) = a * other.at(r,c); } } return *this; } // After to_transpose(): // Deep => Deep. // View => View. // major_type will change. dmat_t& to_transpose() { std::swap(rows,cols); major_type = is_rowmajor()? COLMAJOR: ROWMAJOR; return *this; } // After inv_major(): // View, Deep => Deep. dmat_t& inv_major() { if(rows == cols && !is_view()) { // inplace for deep square matrix for(size_t r = 0; r < rows; r++) for(size_t c = 0; c < r; c++) std::swap(at(r,c),at(c,r)); major_type = is_rowmajor()? COLMAJOR: ROWMAJOR; } else { dmat_t tmp(*this); major_type = is_rowmajor()? COLMAJOR: ROWMAJOR; resize(rows,cols); for(size_t r = 0; r < rows; r++) for(size_t c = 0; c < cols; c++) at(r,c) = tmp.at(r,c); } return *this; } // After to_rowmajor(): // Deep => Deep. // View => View (if originally rowmajor), Deep (if originally colmajor). dmat_t& to_rowmajor() { if(is_colmajor()) inv_major(); return *this; } // After to_colmajor(): // Deep => Deep. // View => View (if originally colmajor), Deep (if originally rowmajor). dmat_t& to_colmajor() { if(is_rowmajor()) inv_major(); return *this; } // After apply_permutation(): // Deep => Deep. // View => View. // apply_permutation() directly overwrites the values in buf. // (thus it can modify the values dmat is viewing) dmat_t& apply_permutation(const std::vector<unsigned> &row_perm, const std::vector<unsigned> &col_perm) { return apply_permutation(row_perm.size()==rows? &row_perm[0]: NULL, col_perm.size()==cols? &col_perm[0] : NULL); } dmat_t& apply_permutation(const unsigned *row_perm=NULL, const unsigned *col_perm=NULL) { dmat_t tmp(*this); tmp.grow_body(); for(size_t r = 0; r < rows; r++) for(size_t c = 0; c < cols; c++) at(r,c) = tmp.at(row_perm? row_perm[r]: r, col_perm? col_perm[c]: c); return *this; } template<typename V1, typename V2> dmat_t& apply_scale(const V1 *row_scale, const V2 *col_scale) { if(row_scale != NULL && col_scale != NULL) { for(size_t r = 0; r < rows; r++) { for(size_t c = 0; c < cols; c++) { at(r, c) *= row_scale[r] * col_scale[c]; } } } else if(row_scale != NULL && col_scale == NULL) { for(size_t r = 0; r < rows; r++) { for(size_t c = 0; c < cols; c++) { at(r, c) *= row_scale[r]; } } } else if(row_scale == NULL && col_scale != NULL) { for(size_t r = 0; r < rows; r++) { for(size_t c = 0; c < cols; c++) { at(r, c) *= col_scale[c]; } } } return *this; } template<typename V> dmat_t& apply_scale(const dense_vector<V>& row_scale, const dense_vector<V>& col_scale) { return apply_scale(row_scale.data(), col_scale.data()); } template<typename V> dmat_t& apply_row_scale(const dense_vector<V>& row_scale) { return apply_scale<V, V>(row_scale.data(), NULL); } template<typename V> dmat_t& apply_col_scale(const dense_vector<V>& col_scale) { return apply_scale<V, V>(NULL, col_scale.data()); } // After resize(): // View, Deep => Deep. void resize(size_t nr_copy, const vec_t &v) { if(is_rowmajor()) { size_t rows_ = nr_copy, cols_ = v.size(); resize(rows_, cols_); size_t unit = sizeof(val_type)*v.size(); for(size_t r = 0; r < rows; r++) memcpy(buf + r * cols, v.data(), unit); } else { size_t rows_ = v.size(), cols_ = nr_copy; resize(rows_, cols_); size_t unit = sizeof(val_type)*v.size(); for(size_t c = 0; c < cols; c++) memcpy(buf + c * rows, v.data(), unit); } } // After resize(): // View, Deep => Deep. dmat_t& resize(size_t rows_, size_t cols_) { if(mem_alloc_by_me) { if(rows_ == rows && cols_ == cols) return *this; if(rows_*cols_ != rows*cols) buf = REALLOC(buf, val_type, rows_*cols_); } else { buf = MALLOC(val_type, rows_*cols_); } mem_alloc_by_me = true; rows = rows_; cols = cols_; return *this; } // After lazy_resize(): // Deep => Deep. // View => (If possible) ? View : Deep. dmat_t& lazy_resize(size_t rows_, size_t cols_, major_t major_type_=0) { if(is_view() && rows_*cols_==rows*cols && (major_type_ == 0 || major_type==major_type_)) reshape(rows_,cols_); else { if(major_type_ != 0) major_type = major_type_; resize(rows_, cols_); } return *this; } // After reshape: // Deep => Deep. // View => View. dmat_t& reshape(size_t rows_, size_t cols_) { assert(rows_*cols_ == rows*cols); if(rows_ != rows || cols != cols) { rows = rows_; cols = cols_; } return *this; } // ==================================================== // ============ Dmat-Vector Multiplication ============ // ==================================================== dvec_t<val_type>& Xv(const dvec_t<val_type>& v, dvec_t<val_type>& Xv, bool addson=0) const { assert(v.size() == this->cols); if(Xv.size() != this->rows) Xv.resize(this->rows, 0.0); for(size_t i = 0; i < rows; i++) { if(addson == 0) Xv[i] = 0; for(size_t j = 0; j < cols; j++) Xv[i] += at(i, j) * v[j]; } return Xv; } dvec_t<val_type>& Xv(const svec_t<val_type>& v, dvec_t<val_type>& Xv, bool addson=0) const { assert(v.size() == this->cols); if(Xv.size() != this->rows) Xv.resize(this->rows, 0.0); for(size_t i = 0; i < rows; i++) { if(addson == 0) Xv[i] = 0; for(size_t p = 0; p < v.get_nnz(); p++) Xv[i] += at(i, v.idx[p]) * v.val[p]; } return Xv; } dvec_t<val_type>& XTu(const dvec_t<val_type>& u, dvec_t<val_type>& XTu, bool addson=0) const { assert(u.size() == this->rows); if(XTu.size() != this->cols) XTu.resize(this->rows, 0.0); for(size_t i = 0; i < cols; i++) { if(addson == 0) XTu[i] = 0; for(size_t j = 0; j < rows; j++) XTu[i] += at(j, i) * u[j]; } return XTu; } dvec_t<val_type>& XTu(const svec_t<val_type>& u, dvec_t<val_type>& XTu, bool addson=0) const { assert(u.size() == this->rows); if(XTu.size() != this->cols) XTu.resize(this->rows, 0.0); for(size_t i = 0; i < cols; i++) { if(addson == 0) XTu[i] = 0; for(size_t p = 0; p < u.get_nnz(); p++) XTu[i] += at(u.idx[p], i) * u.val[p]; } return XTu; } // ==================================================== // ==================== IO Methods ==================== // ==================================================== void load_from_binary(const char *filename, major_t major_type_=default_major) { FILE *fp = fopen(filename, "rb"); if(fp == NULL) { fprintf(stderr, "Error: can't read the file (%s)!!\n", filename); return; } load_from_binary(fp, major_type_, filename); fclose(fp); } void load_from_binary(FILE *fp, major_t major_type_=default_major, const char *filename=NULL) { clear_space(); zero_init(); size_t rows_, cols_; if(fread(&rows_, sizeof(size_t), 1, fp) != 1) fprintf(stderr, "Error: wrong input stream in %s.\n", filename); if(fread(&cols_, sizeof(size_t), 1, fp) != 1) fprintf(stderr, "Error: wrong input stream in %s.\n", filename); std::vector<double> tmp(rows_*cols_); if(fread(&tmp[0], sizeof(double), rows_*cols_, fp) != rows_*cols_) fprintf(stderr, "Error: wrong input stream in %s.\n", filename); dmat_t<double> tmp_view(rows_, cols_, ROWMAJOR, &tmp[0]); major_type = major_type_; resize(rows_, cols_); for(size_t r = 0; r < rows; r++) for(size_t c = 0; c < cols; c++) at(r,c) = tmp_view.at(r,c); } void save_binary_to_file(const char *filename) { FILE *fp = fopen(filename, "wb"); if(fp == NULL) { fprintf(stderr,"Error: can't open file %s\n", filename); exit(1); } save_binary_to_file(fp); fclose(fp); } void save_binary_to_file(FILE *fp) { fwrite(&rows, sizeof(size_t), 1, fp); fwrite(&cols, sizeof(size_t), 1, fp); std::vector<double> tmp(rows*cols); size_t idx = 0; for(size_t r = 0; r < rows; r++) for(size_t c = 0; c < cols; c++) tmp[idx++] = (double)at(r,c); fwrite(&tmp[0], sizeof(double), tmp.size(), fp); } val_type& at(size_t r, size_t c) { return is_rowmajor()? buf[r*cols+c] : buf[c*rows+r]; } const val_type& at(size_t r, size_t c) const { return is_rowmajor()? buf[r*cols+c] : buf[c*rows+r]; } val_type* data() { return buf; } const val_type* data() const { return buf; } void print_mat(const char *str="", FILE *fp=stdout) const { fprintf(fp, "===>%s<===\n", str); fprintf(fp, "rows %ld cols %ld mem_alloc_by_me %d row_major %d\nbuf %p\n", rows, cols, mem_alloc_by_me, is_rowmajor(), buf); for(size_t r = 0; r < rows; r++) { for(size_t c = 0; c < cols; c++) fprintf(fp, "%.3f ", at(r,c)); fprintf(fp, "\n"); } } }; // Identity Matrix template<typename val_type> class eye_t : public gmat_t<val_type> { public: // size_t rows, cols; inherited from gmat_t using gmat_t<val_type>::rows; using gmat_t<val_type>::cols; eye_t (size_t rows_ = 0) : gmat_t<val_type>(rows_, rows_){} bool is_identity() const { return true; } dvec_t<val_type>& Xv(const dvec_t<val_type>& v, dvec_t<val_type>& Xv, bool addson=0) const { assert(v.size() == this->cols); if(Xv.size() != this->rows) Xv.resize(this->rows, 0.0); return addson? do_axpy(1, v, Xv): Xv.assign(v); } dvec_t<val_type>& Xv(const svec_t<val_type>& v, dvec_t<val_type>& Xv, bool addson=0) const { assert(v.size() == this->cols); if(Xv.size() != this->rows) Xv.resize(this->rows, 0.0); dvec_t<val_type> dv(v); return addson? do_axpy(1, dv, Xv): Xv.assign(dv); } dvec_t<val_type>& XTu(const dvec_t<val_type>& u, dvec_t<val_type>& XTu, bool addson=0) const { assert(u.size() == this->rows); if(XTu.size() != this->cols) XTu.resize(this->rows, 0.0); return addson? do_axpy(1, u, XTu): XTu.assign(u); } dvec_t<val_type>& XTu(const svec_t<val_type>& u, dvec_t<val_type>& XTu, bool addson=0) const { assert(u.size() == this->rows); if(XTu.size() != this->cols) XTu.resize(this->rows, 0.0); dvec_t<val_type> du(u); return addson? do_axpy(1, du, XTu): XTu.assign(du); } }; // Sparse Matrix (format CSC & CSR) template<typename val_type> class smat_t : public gmat_t<val_type> { private: bool mem_alloc_by_me; void zero_init() { mem_alloc_by_me = false; val=val_t=NULL; col_ptr=row_ptr=NULL; row_idx=col_idx=NULL; rows=cols=nnz=max_col_nnz=max_row_nnz=0; } void allocate_space(size_t rows_, size_t cols_, size_t nnz_) { if(mem_alloc_by_me) clear_space(); rows = rows_; cols = cols_; nnz = nnz_; val = MALLOC(val_type, nnz); val_t = MALLOC(val_type, nnz); row_idx = MALLOC(unsigned, nnz); col_idx = MALLOC(unsigned, nnz); row_ptr = MALLOC(size_t, rows+1); col_ptr = MALLOC(size_t, cols+1); memset(row_ptr, 0, sizeof(size_t)*(rows+1)); memset(col_ptr, 0, sizeof(size_t)*(cols+1)); mem_alloc_by_me = true; } void csc_to_csr_old() { memset(row_ptr, 0, sizeof(size_t)*(rows+1)); for(size_t idx = 0; idx < nnz; idx++) row_ptr[row_idx[idx]+1]++; for(size_t r = 1; r <= rows; r++) row_ptr[r] += row_ptr[r-1]; for(size_t c = 0; c < cols; c++) { for(size_t idx = col_ptr[c]; idx != col_ptr[c+1]; idx++) { size_t r = (size_t) row_idx[idx]; col_idx[row_ptr[r]] = c; val_t[row_ptr[r]++] = val[idx]; } } for(size_t r = rows; r > 0; r--) row_ptr[r] = row_ptr[r-1]; row_ptr[0] = 0; } void csc_to_csr() { smat_t tmp = this->transpose(); tmp.csr_to_csc(); } void csr_to_csc() { memset(col_ptr, 0, sizeof(size_t) * (cols + 1)); for(size_t idx = 0; idx < nnz; idx++) { col_ptr[col_idx[idx] + 1]++; } for(size_t c = 1; c <= cols; c++) { col_ptr[c] += col_ptr[c - 1]; } for(size_t r = 0; r < rows; r++) { for(size_t idx = row_ptr[r]; idx != row_ptr[r + 1]; idx++) { size_t c = (size_t) col_idx[idx]; row_idx[col_ptr[c]] = r; val[col_ptr[c]++] = val_t[idx]; } } for(size_t c = cols; c > 0; c--) { col_ptr[c] = col_ptr[c - 1]; } col_ptr[0] = 0; } void update_max_nnz() { max_row_nnz = max_col_nnz = 0; for(size_t c = 0; c < cols; c++) max_col_nnz = std::max(max_col_nnz, nnz_of_col(c)); for(size_t r = 0; r < rows; r++) max_row_nnz = std::max(max_row_nnz, nnz_of_row(r)); } // Comparator for sorting rates into row/column comopression storage class SparseLess { public: const unsigned *row_idx; const unsigned *col_idx; SparseLess(const unsigned *row_idx_, const unsigned *col_idx_, bool isCSR=true) { row_idx = (isCSR)? row_idx_: col_idx_; col_idx = (isCSR)? col_idx_: row_idx_; } bool operator()(size_t x, size_t y) const { return (row_idx[x] < row_idx[y]) || ((row_idx[x] == row_idx[y]) && (col_idx[x] < col_idx[y])); } }; class SparseEq { public: const unsigned *row_idx; const unsigned *col_idx; SparseEq(const unsigned *row_idx_, const unsigned *col_idx_) { row_idx = row_idx_; col_idx = col_idx_; } bool operator()(size_t x, size_t y) const { return (row_idx[x] == row_idx[y]) && (col_idx[x] == col_idx[y]); } }; public: // static methods static smat_t rand(rng_t &rng, size_t m, size_t n, double sparsity=0.01, double lower=0.0, double upper=1.0) { if(lower > upper) lower = upper; smat_t ret; size_t nnz_ = (size_t)(m*n*sparsity); ret.allocate_space(m, n, nnz_); for(size_t idx = 0; idx < nnz_; idx++) { ret.val_t[idx] = rng.uniform(lower, upper); ret.col_idx[idx] = rng.randint(0, n-1); ret.row_ptr[rng.randint(1, m)] += 1; } for(size_t i = 1; i <= m; i++) ret.row_ptr[i] += ret.row_ptr[i-1]; ret.csr_to_csc(); ret.update_max_nnz(); return ret; } static smat_t randn(rng_t &rng, size_t m, size_t n, double sparsity=0.01, double mean=0.0, double std=1.0) { smat_t ret; size_t nnz_ = (size_t)(m*n*sparsity); ret.allocate_space(m, n, nnz_); for(size_t idx = 0; idx < nnz_; idx++) { ret.val_t[idx] = (val_type)rng.normal(mean, std); ret.col_idx[idx] = rng.randint(0, n-1); ret.row_ptr[rng.randint(1,m)] += 1; } for(size_t i = 1; i <= m; i++) ret.row_ptr[i] += ret.row_ptr[i-1]; ret.csr_to_csc(); ret.update_max_nnz(); return ret; } // rows, cols are inherited from gmat_t using gmat_t<val_type>::rows; using gmat_t<val_type>::cols; size_t nnz, max_row_nnz, max_col_nnz; val_type *val, *val_t; size_t *col_ptr, *row_ptr; unsigned *row_idx, *col_idx; // filetypes for loading smat_t enum format_t { TXT=0, PETSc=1, SVMLIGHT=2, BINARY=3, COMPRESSION=4 }; // Default Constructor smat_t() { zero_init(); } // Copy Constructor smat_t(const smat_t& m) { zero_init(); *this = m; } // Copy Assignment // view => view, deep => deep. smat_t& operator=(const smat_t& other) { if(this == &other) { return *this; } if(mem_alloc_by_me) { clear_space(); } if(other.is_view()) { // for view memcpy(static_cast<void*>(this), &other, sizeof(smat_t)); } else { // deep copy *this = other.get_view(); grow_body(); } return *this; } // View Constructor: explicit smat_t(size_t rows, size_t cols, size_t nnz, val_type *val, val_type *val_t, size_t *col_ptr, size_t *row_ptr, unsigned *row_idx, unsigned *col_idx) : gmat_t<val_type>(rows, cols), nnz(nnz), val(val), val_t(val_t), col_ptr(col_ptr), row_ptr(row_ptr), row_idx(row_idx), col_idx(col_idx) { mem_alloc_by_me = false; update_max_nnz(); } // Constructor: dense matrix => sparse matrix smat_t(const dmat_t<val_type>& m) { zero_init(); dmat_iterator_t<val_type> entry_it(m); load_from_iterator(m.rows, m.cols, entry_it.get_nnz(), &entry_it); } // Constructor: identity matrix => sparse matrix smat_t(const eye_t<val_type>& eye) { zero_init(); allocate_space(eye.rows, eye.rows, eye.rows); for(size_t i = 0; i < eye.rows; i++) { row_ptr[i+1] = i+1; col_idx[i] = i; val_t[i] = (val_type)1; } for(size_t j = 0; j < eye.cols; j++) { col_ptr[j+1] = j+1; row_idx[j] = j; val[j] = (val_type)1; } } smat_t(size_t rows_, size_t cols_, size_t nnz_=0){ zero_init(); allocate_space(rows_, cols_, nnz_); } #if defined(CPP11) // Move Constructor smat_t(smat_t&& m){ zero_init(); *this = std::move(m); } // Move Assignment smat_t& operator=(smat_t&& other) { if(this == &other) { return *this; } clear_space(); memcpy(static_cast<void*>(this), &other, sizeof(smat_t)); other.zero_init(); return *this; } #endif // Destructor ~smat_t(){ clear_space(); } size_t get_nnz() const { return nnz; } bool is_view() const { return mem_alloc_by_me==false; } bool is_sparse() const { return true; } void clear_space() { if(mem_alloc_by_me) { if(val) { free(val); } if(val_t) { free(val_t); } if(row_ptr) { free(row_ptr); } if(row_idx) { free(row_idx); } if(col_ptr) { free(col_ptr); } if(col_idx) { free(col_idx); } } zero_init(); } smat_t get_view() const { if(is_view()) { return *this; } else { smat_t tmp; memcpy(static_cast<void*>(&tmp), this, sizeof(smat_t)); tmp.mem_alloc_by_me = false; return tmp; } } /* (Don't delete yet, so can understand codes not yet adapted elsewhere) svec_t<val_type> get_single_view(const size_t &idx, const major_t &major=default_major) const { if(major == ROWMAJOR) return svec_t<val_type>(cols, nnz_of_row(idx), &col_idx[row_ptr[idx]], &val_t[row_ptr[idx]], 0); else return svec_t<val_type>(rows, nnz_of_col(idx), &row_idx[col_ptr[idx]], &val[col_ptr[idx]], 0); } */ // For get_row and get_col, a sparse vector view is returned. // Caveat: If you directly modify the returned sparse vector view, // it will change the sparse matrix's underlying data. // And because we store both column and row major format, // the modification on the returned svec_t will only effect one of the format. // Resulting in an inconsistency within the sparse matrix. // Summary: Do not directly modify the returned sparse vector view. // (if the view becomes a deep vector afterwards, then things will be fine.) svec_t<val_type> get_row(const size_t &idx) const { return svec_t<val_type>(cols, nnz_of_row(idx), &col_idx[row_ptr[idx]], &val_t[row_ptr[idx]]); } svec_t<val_type> get_col(const size_t &idx) const { return svec_t<val_type>(rows, nnz_of_col(idx), &row_idx[col_ptr[idx]], &val[col_ptr[idx]]); } smat_t& grow_body() { if(is_view()) { smat_t tmp = *this; // a copy of the view col_ptr = MALLOC(size_t, cols + 1); memcpy(col_ptr, tmp.col_ptr, sizeof(size_t) * (cols + 1)); row_idx = MALLOC(unsigned, nnz); memcpy(row_idx, tmp.row_idx, sizeof(unsigned) * nnz); val = MALLOC(val_type, nnz); memcpy(val, tmp.val, sizeof(val_type) * nnz); row_ptr = MALLOC(size_t, rows + 1); memcpy(row_ptr, tmp.row_ptr, sizeof(size_t) * (rows + 1)); col_idx = MALLOC(unsigned, nnz); memcpy(col_idx, tmp.col_idx, sizeof(unsigned) * nnz); val_t = MALLOC(val_type, nnz); memcpy(val_t, tmp.val_t, sizeof(val_type) * nnz); mem_alloc_by_me = true; } return *this; } smat_t transpose() const{ smat_t<val_type> mt = get_view().to_transpose(); return mt; } // ==================================================== // ================ In-place functions ================ // ==================================================== smat_t& to_transpose() { std::swap(rows,cols); std::swap(val,val_t); std::swap(row_ptr,col_ptr); std::swap(row_idx,col_idx); std::swap(max_col_nnz, max_row_nnz); return *this; } smat_t& apply_permutation(const std::vector<unsigned> &row_perm, const std::vector<unsigned> &col_perm) { return apply_permutation(row_perm.size()==rows? &row_perm[0]: NULL, col_perm.size()==cols? &col_perm[0]: NULL); } smat_t& apply_permutation(const unsigned *row_perm=NULL, const unsigned *col_perm=NULL) { if(row_perm != NULL) { for(size_t idx = 0; idx < nnz; idx++) { row_idx[idx] = row_perm[row_idx[idx]]; } csc_to_csr(); csr_to_csc(); } if(col_perm != NULL) { for(size_t idx = 0; idx < nnz; idx++) { col_idx[idx] = col_perm[col_idx[idx]]; } csr_to_csc(); csc_to_csr(); } return *this; } template<typename V1, typename V2> smat_t& apply_scale(const V1 *row_scale, const V2 *col_scale) { if(row_scale != NULL && col_scale != NULL) { for(size_t r = 0; r < rows; r++) { val_type alpha = row_scale[r]; for(size_t idx = row_ptr[r]; idx != row_ptr[r + 1]; idx++) { val_t[idx] *= alpha * col_scale[col_idx[idx]]; } } for(size_t c = 0; c < cols; c++) { val_type alpha = col_scale[c]; for(size_t idx = col_ptr[c]; idx != col_ptr[c + 1]; idx++) { val[idx] *= alpha * row_scale[row_idx[idx]]; } } } else if(row_scale != NULL && col_scale == NULL) { for(size_t r = 0; r < rows; r++) { if(nnz_of_row(r)) { for(size_t idx = row_ptr[r]; idx < row_ptr[r + 1]; idx++) { val_t[idx] *= row_scale[r]; } } } for(size_t idx = 0; idx < nnz; idx++) { val[idx] *= row_scale[row_idx[idx]]; } } else if(row_scale == NULL && col_scale != NULL) { for(size_t c = 0; c < cols; c++) { if(nnz_of_col(c)) { for(size_t idx = col_ptr[c]; idx < col_ptr[c + 1]; idx++) { val[idx] *= col_scale[c]; } } } for(size_t idx = 0; idx < nnz; idx++) { val_t[idx] *= col_scale[col_idx[idx]]; } } return *this; } template<typename V1, typename V2> smat_t& apply_scale(const dvec_t<V1> &row_scale, const dvec_t<V2> &col_scale) { return apply_scale(row_scale.data(), col_scale.data()); } template<typename V> smat_t& apply_row_scale(const dvec_t<V> &row_scale) { return apply_scale<V, V>(row_scale.data(), NULL); } template<typename V> smat_t& apply_col_scale(const dvec_t<V> &col_scale) { return apply_scale<V, V>(NULL, col_scale.data()); } smat_t row_subset(const std::vector<unsigned> &subset) const { return row_subset(&subset[0], (int)subset.size()); } smat_t row_subset(const unsigned *subset, int subset_size) const { smat_subset_iterator_t<val_type> it(*this, subset, subset_size, ROWMAJOR); smat_t<val_type> sub_smat; sub_smat.load_from_iterator(subset_size, cols, it.get_nnz(), &it); return sub_smat; } smat_t col_subset(const std::vector<unsigned> &subset) const { return col_subset(&subset[0], (int)subset.size()); } smat_t col_subset(const unsigned *subset, int subset_size) const { smat_subset_iterator_t<val_type> it(*this, subset, subset_size, COLMAJOR); smat_t<val_type> sub_smat; sub_smat.load_from_iterator(rows, subset_size, it.get_nnz(), &it); return sub_smat; } size_t nnz_of_row(unsigned i) const { return (row_ptr[i+1] - row_ptr[i]); } size_t nnz_of_col(unsigned i) const { return (col_ptr[i+1] - col_ptr[i]); } // ==================================================== // ============ Smat-Vector Multiplication ============ // ==================================================== val_type* Xv(const val_type* v, val_type* Xv, bool addson=0) const { for(size_t i = 0; i < rows; i++) { if(addson == 0) Xv[i] = 0; for(size_t idx = row_ptr[i]; idx < row_ptr[i+1]; idx++) Xv[i] += val_t[idx] * v[col_idx[idx]]; } return Xv; } dvec_t<val_type>& Xv(const dvec_t<val_type>& v, dvec_t<val_type>& Xv, bool addson=0) const { assert(v.size() == this->cols); if(Xv.size() != this->rows) Xv.resize(this->rows, 0.0); this->Xv(v.data(), Xv.data(), addson); return Xv; } dvec_t<val_type>& Xv(const svec_t<val_type>& v, dvec_t<val_type>& Xv, bool addson=0) const { assert(v.size() == this->cols); if(Xv.size() != this->rows) Xv.resize(this->rows, 0.0); if(addson == 0) { for(size_t i = 0; i < Xv.size(); i++) { Xv[i] = 0; } } for(size_t k = 0; k < v.nnz; k++) { size_t col_idx = static_cast<size_t>(v.idx[k]); const val_type& alpha = v.val[k]; do_axpy(alpha, get_col(col_idx), Xv); } /* slower implementatoin dvec_t<val_type> dv(v); this->Xv(dv.data(), Xv.data(), addson); */ return Xv; } val_type* XTu(const val_type* u, val_type* XTu, bool addson=0) const { for(size_t i = 0; i < cols; i++) { if(addson == 0) XTu[i] = 0; for(size_t idx = col_ptr[i]; idx < col_ptr[i+1]; idx++) XTu[i] += val[idx] * u[row_idx[idx]]; } return XTu; } dvec_t<val_type>& XTu(const dvec_t<val_type>& u, dvec_t<val_type>& XTu, bool addson=0) const { assert(u.size() == this->rows); if(XTu.size() != this->cols) XTu.resize(this->rows, 0.0); this->XTu(u.data(), XTu.data(), addson); return XTu; } dvec_t<val_type>& XTu(const svec_t<val_type>& u, dvec_t<val_type>& XTu, bool addson=0) const { assert(u.size() == this->rows); if(XTu.size() != this->cols) XTu.resize(this->rows, 0.0); if(addson == 0) { for(size_t i = 0; i < XTu.size(); i++) { XTu[i] = 0; } } for(size_t k = 0; k < u.nnz; k++) { size_t row_idx = static_cast<size_t>(u.idx[k]); const val_type& alpha = u.val[k]; do_axpy(alpha, get_row(row_idx), XTu); } /* slower implementatoin dvec_t<val_type> du(u); this->XTu(du.data(), XTu.data(), addson); */ return XTu; } // ==================================================== // ==================== IO Methods ==================== // ==================================================== // The entry_iterator can be in arbitrary order (sort+unique is applied). void load_from_iterator(size_t _rows, size_t _cols, size_t _nnz, entry_iterator_t<val_type>* entry_it) { clear_space(); // clear any pre-allocated space in case of memory leak rows =_rows, cols=_cols, nnz=_nnz; allocate_space(rows,cols,nnz); // a trick to utilize the space that have been allocated std::vector<size_t> perm(nnz); unsigned *tmp_row_idx = col_idx; unsigned *tmp_col_idx = row_idx; val_type *tmp_val = val; for(size_t idx = 0; idx < nnz; idx++){ entry_t<val_type> rate = entry_it->next(); tmp_row_idx[idx] = rate.i; tmp_col_idx[idx] = rate.j; tmp_val[idx] = rate.v; perm[idx] = idx; } // TODO can change to O(n) method // sort entries into row-majored ordering std::sort(perm.begin(), perm.end(), SparseLess(tmp_row_idx, tmp_col_idx)); // add up the values in the same position (i, j) size_t cur_nnz = 0; for(size_t idx = 0; idx < nnz; idx++) { if(cur_nnz > 0 && tmp_row_idx[perm[idx]] == tmp_row_idx[perm[cur_nnz-1]] && tmp_col_idx[perm[idx]] == tmp_col_idx[perm[cur_nnz-1]]) tmp_val[perm[cur_nnz-1]] += tmp_val[perm[idx]]; else { tmp_row_idx[perm[cur_nnz]] = tmp_row_idx[perm[idx]]; tmp_col_idx[perm[cur_nnz]] = tmp_col_idx[perm[idx]]; tmp_val[perm[cur_nnz]] = tmp_val[perm[idx]]; cur_nnz ++; } } nnz = cur_nnz; for(size_t idx = 0; idx < nnz; idx++){ row_ptr[tmp_row_idx[perm[idx]] + 1] ++; col_ptr[tmp_col_idx[perm[idx]] + 1] ++; } // Generate CSR format for(size_t idx = 0; idx < nnz; idx++) { val_t[idx] = tmp_val[perm[idx]]; col_idx[idx] = tmp_col_idx[perm[idx]]; } // Calculate nnz for each row and col max_row_nnz = max_col_nnz = 0; for(size_t r = 1; r <= rows; r++) { max_row_nnz = std::max(max_row_nnz, row_ptr[r]); row_ptr[r] += row_ptr[r-1]; } for(size_t c = 1; c <= cols; c++) { max_col_nnz = std::max(max_col_nnz, col_ptr[c]); col_ptr[c] += col_ptr[c-1]; } // Transpose CSR into CSC matrix for(size_t r = 0; r < rows; r++){ for(size_t idx = row_ptr[r]; idx < row_ptr[r+1]; idx++){ size_t c = (size_t) col_idx[idx]; row_idx[col_ptr[c]] = r; val[col_ptr[c]++] = val_t[idx]; } } for(size_t c = cols; c > 0; c--) col_ptr[c] = col_ptr[c-1]; col_ptr[0] = 0; } void load(size_t _rows, size_t _cols, size_t _nnz, const char *filename, format_t fmt) { if(fmt == smat_t<val_type>::TXT) { file_iterator_t<val_type> entry_it(_nnz, filename); load_from_iterator(_rows, _cols, _nnz, &entry_it); } else if(fmt == smat_t<val_type>::PETSc) { load_from_PETSc(filename); } else if(fmt == smat_t<val_type>::SVMLIGHT) { load_from_svmlight(filename); } else { fprintf(stderr, "Error: filetype %d not supported\n", fmt); return; } } void load_from_svmlight(const char *filename, size_t nr_skips=1, bool zero_based=false, double append_bias=-1.0) { svmlight_file_iterator_t<val_type> entry_it(filename, nr_skips, zero_based, append_bias); load_from_iterator(entry_it.get_rows(), entry_it.get_cols(), entry_it.get_nnz(), &entry_it); } void load_from_PETSc(const char *filename) { FILE *fp = fopen(filename, "rb"); if(fp == NULL) { fprintf(stderr, "Error: can't read the file (%s)!!\n", filename); return; } load_from_PETSc(fp, filename); fclose(fp); } void load_from_PETSc(FILE *fp, const char *filename=NULL) { clear_space(); // clear any pre-allocated space in case of memory leak const int UNSIGNED_FILE = 1211216, LONG_FILE = 1015; int32_t int_buf[3]; size_t headersize = 0; headersize += sizeof(int)*fread(int_buf, sizeof(int), 3, fp); int filetype = int_buf[0]; rows = (size_t) int_buf[1]; cols = (size_t) int_buf[2]; if(filetype == UNSIGNED_FILE) { headersize += sizeof(int)*fread(int_buf, sizeof(int32_t), 1, fp); nnz = (size_t) int_buf[0]; } else if (filetype == LONG_FILE){ headersize += sizeof(size_t)*fread(&nnz, sizeof(int64_t), 1, fp); } else { fprintf(stderr, "Error: wrong PETSc format in %s.\n", filename); } allocate_space(rows,cols,nnz); // load CSR from the binary PETSc format { // read row_ptr std::vector<int32_t> nnz_row(rows); headersize += sizeof(int32_t)*fread(&nnz_row[0], sizeof(int32_t), rows, fp); row_ptr[0] = 0; for(size_t r = 1; r <= rows; r++) row_ptr[r] = row_ptr[r-1] + nnz_row[r-1]; // read col_idx headersize += sizeof(int)*fread(&col_idx[0], sizeof(unsigned), nnz, fp); // read val_t const size_t chunksize = 1024; double buf[chunksize]; size_t idx = 0; while(idx + chunksize < nnz) { headersize += sizeof(double)*fread(&buf[0], sizeof(double), chunksize, fp); for(size_t i = 0; i < chunksize; i++) val_t[idx+i] = (val_type) buf[i]; idx += chunksize; } size_t remaining = nnz - idx; headersize += sizeof(double)*fread(&buf[0], sizeof(double), remaining, fp); for(size_t i = 0; i < remaining; i++) val_t[idx+i] = (val_type) buf[i]; } csr_to_csc(); update_max_nnz(); } void save_PETSc_to_file(const char *filename) const { FILE *fp = fopen(filename, "wb"); if(fp == NULL) { fprintf(stderr,"Error: can't open file %s\n", filename); exit(1); } save_PETSc_to_file(fp); } void save_PETSc_to_file(FILE *fp) const { const int UNSIGNED_FILE = 1211216, LONG_FILE = 1015; int32_t int_buf[3] = {(int32_t)LONG_FILE, (int32_t)rows, (int32_t)cols}; std::vector<int32_t> nnz_row(rows); for(size_t r = 0; r < rows; r++) nnz_row[r] = (int)nnz_of_row(r); fwrite(&int_buf[0], sizeof(int32_t), 3, fp); fwrite(&nnz, sizeof(size_t), 1, fp); fwrite(&nnz_row[0], sizeof(int32_t), rows, fp); fwrite(&col_idx[0], sizeof(unsigned), nnz, fp); // the following part == fwrite(val_t, sizeof(double), nnz, fp); const size_t chunksize = 1024; double buf[chunksize]; size_t idx = 0; while(idx + chunksize < nnz) { for(size_t i = 0; i < chunksize; i++) buf[i] = (double) val_t[idx+i]; fwrite(&buf[0], sizeof(double), chunksize, fp); idx += chunksize; } size_t remaining = nnz - idx; for(size_t i = 0; i < remaining; i++) buf[i] = (double) val_t[idx+i]; fwrite(&buf[0], sizeof(double), remaining, fp); } val_type get_global_mean() const { val_type sum=0; for(size_t idx = 0; idx < nnz; idx++) sum += val[idx]; return sum / (val_type)nnz; } void remove_bias(val_type bias=0) { if(bias) { for(size_t idx = 0; idx < nnz; idx++) { val[idx] -= bias; val_t[idx] -= bias; } } } void print_mat(const char *str="", FILE *fp=stdout) const { fprintf(fp, "===>%s<===\n", str); fprintf(fp, "rows %lu, cols %lu, nnz %lu\n", rows, cols, nnz); fprintf(fp, "col_ptr, row_idx, val = %p, %p, %p\n", col_ptr, row_idx, val); fprintf(fp, "row_ptr, col_idx, val_t = %p, %p, %p\n", row_ptr, col_idx, val_t); fprintf(fp, "mem_alloc_by_me = %d\n", mem_alloc_by_me); fprintf(fp, "Matrix:\n"); for(size_t i = 0; i < rows; i++) { size_t it = row_ptr[i]; for(size_t j = 0; j < cols; j++) { if(it < row_ptr[i+1] && col_idx[it] == j) { fprintf(fp, "%.3f ", val_t[it]); it ++; } else fprintf(fp, "0.000 "); } fprintf(fp, "\n"); } fprintf(fp, "Matrix^T:\n"); for(size_t i = 0; i < cols; i++) { size_t it = col_ptr[i]; for(size_t j = 0; j < rows; j++) { if(it < col_ptr[i+1] && row_idx[it] == j) { fprintf(fp, "%.3f ", val[it]); it ++; } else fprintf(fp, "0.000 "); } fprintf(fp, "\n"); } } // =========================================== // ========= Friend Functions/Classes ======== // =========================================== template<typename VX, typename VY, typename VZ> friend smat_t<VZ>& smat_x_smat(const smat_t<VX> &X, const smat_t<VY> &Y, smat_t<VZ> &Z, int threads); template<typename VX, typename VY, typename VZ> friend smat_t<VZ>& smat_x_smat_single_thread(const smat_t<VX> &X, const smat_t<VY> &Y, smat_t<VZ> &Z); }; #ifdef __cplusplus extern "C" { #endif // rows, cols, nnz, &row_ptr, &col_ptr, &val_ptr typedef void(*py_coo_allocator_t)(uint64_t, uint64_t, uint64_t, void*, void*, void*); #ifdef __cplusplus } // extern #endif template<typename val_type> struct coo_t { size_t rows; size_t cols; std::vector<size_t> row_idx; std::vector<size_t> col_idx; std::vector<val_type> val; coo_t(size_t rows=0, size_t cols=0): rows(rows), cols(cols) {} size_t nnz() const { return val.size(); } void reshape(size_t rows_, size_t cols_) { rows = rows_; cols = cols_; clear(); } void clear() { row_idx.clear(); col_idx.clear(); val.clear(); } void reserve(size_t capacity) { row_idx.reserve(capacity); col_idx.reserve(capacity); val.reserve(capacity); } void swap(coo_t& other) { std::swap(rows, other.rows); std::swap(cols, other.cols); row_idx.swap(other.row_idx); col_idx.swap(other.col_idx); val.swap(other.val); } void extends(coo_t& other) { std::copy(other.row_idx.begin(), other.row_idx.end(), std::back_inserter(row_idx)); std::copy(other.col_idx.begin(), other.col_idx.end(), std::back_inserter(col_idx)); std::copy(other.val.begin(), other.val.end(), std::back_inserter(val)); } template<typename I, typename V> void push_back(I i, I j, V x, double threshold=0) { if(fabs(x) >= threshold) { row_idx.push_back(i); col_idx.push_back(j); val.push_back(x); } } void create_smat(smat_t<val_type>& X) { coo_iterator_t<val_type> it(nnz(), row_idx.data(), col_idx.data(), val.data()); X.load_from_iterator(rows, cols, nnz(), &it); } void create_pycoo(const py_coo_allocator_t& alloc) const { uint64_t* row_ptr=NULL; uint64_t* col_ptr=NULL; val_type* val_ptr=NULL; alloc(rows, cols, nnz(), &row_ptr, &col_ptr, &val_ptr); for(size_t i = 0; i < nnz(); i++) { row_ptr[i] = row_idx[i]; col_ptr[i] = col_idx[i]; val_ptr[i] = val[i]; } } }; /*-------------- Iterators -------------------*/ template<typename val_type> class entry_t{ public: unsigned i, j; val_type v, weight; entry_t(int _i=0, int _j=0, val_type _v=0, val_type _w=1.0): i(_i), j(_j), v(_v), weight(_w){} }; template<typename val_type> class entry_iterator_t { public: // Number of elements left to iterate size_t nnz; // When no next entry, return entry_t(0, 0, 0, -1); virtual entry_t<val_type> next() = 0; size_t get_nnz() const { return nnz; } }; #define MAXLINE 10240 // Iterator for files with (i,j,v) tuples template<typename val_type> class file_iterator_t: public entry_iterator_t<val_type> { public: using entry_iterator_t<val_type>::nnz; file_iterator_t(size_t nnz_, const char* filename, size_t start_pos=0) { nnz = nnz_; fp = fopen(filename,"rb"); if(fp == NULL) { fprintf(stderr, "Error: cannot read the file (%s)!!\n", filename); return; } fseek(fp, start_pos, SEEK_SET); } ~file_iterator_t(){ if (fp) fclose(fp); } entry_t<val_type> next() { const int base10 = 10; if(nnz > 0) { --nnz; if(fgets(&line[0], MAXLINE, fp)==NULL) fprintf(stderr, "Error: reading error !!\n"); char *head_ptr = &line[0]; size_t i = strtol(head_ptr, &head_ptr, base10); size_t j = strtol(head_ptr, &head_ptr, base10); double v = strtod(head_ptr, &head_ptr); return entry_t<val_type>(i - 1, j - 1, (val_type)v); } else { // No more to iterate return entry_t<val_type>(0, 0, 0, -1); } } private: FILE *fp; char line[MAXLINE]; }; template<class val_type> class svmlight_file_iterator_t : public entry_iterator_t<val_type> { public: using entry_iterator_t<val_type>::nnz; svmlight_file_iterator_t( const char* filename, size_t nr_skips=1, bool zero_based=false, double append_bias=-1.0) { std::ifstream fs; std::string line, kv; const int base10 = 10; fs.open(filename, std::ios::in); if(!fs.is_open()) { std::cout << "Unable to open" << filename << std::endl; exit(-1); } I.clear(); J.clear(); V.clear(); nr_rows = nr_cols = 0; while(std::getline(fs, line)) { if(fs.eof()) { break; } std::stringstream line_ss; line_ss.str(line); if(nr_skips != 0) { // skip label part; for(size_t i = 0; i < nr_skips; i++) { line_ss >> kv; } } size_t row_idx = nr_rows; while(line_ss >> kv) { char *head_ptr = const_cast<char*>(kv.c_str()); size_t key = strtol(head_ptr, &head_ptr, base10); head_ptr++; // advancing for the ":" seperator val_type val = static_cast<val_type>(strtod(head_ptr, &head_ptr)); size_t col_idx = (zero_based)? key : (key - 1); nr_cols = std::max(nr_cols, col_idx + 1); I.push_back(row_idx); J.push_back(col_idx); V.push_back(val); } nr_rows += 1; } if(append_bias > 0) { size_t col_idx = nr_cols; nr_cols += 1; val_type val = static_cast<val_type>(append_bias); for(size_t row_idx = 0; row_idx < nr_rows; row_idx++) { I.push_back(row_idx); J.push_back(col_idx); V.push_back(val); } } idx = 0; nnz = I.size(); } entry_t<val_type> next() { if(nnz > 0) { ++idx; --nnz; return entry_t<val_type>(I[idx - 1], J[idx - 1], V[idx - 1]); } else { return entry_t<val_type>(0, 0, 0, -1); } } size_t get_rows() const { return nr_rows; } size_t get_cols() const { return nr_cols; } private: size_t nr_rows, nr_cols; size_t idx; std::vector<size_t> I, J; std::vector<val_type> V; }; // Iterator for three vectors (I, J, V) template<typename val_type> class coo_iterator_t: public entry_iterator_t<val_type> { public: using entry_iterator_t<val_type>::nnz; coo_iterator_t(const std::vector<size_t> _I, const std::vector<size_t> _J, const std::vector<val_type> _V){ nnz = std::min(std::min(_I.size(), _J.size()), _V.size()); idx = 0; I = &_I[0]; J = &_J[0]; V = &_V[0]; } coo_iterator_t(const size_t _nnz, const size_t* _I, const size_t* _J, const val_type* _V){ nnz = _nnz; idx = 0; I = _I; J = _J; V = _V; } ~coo_iterator_t(){ } entry_t<val_type> next() { if(nnz > 0) { ++idx; --nnz; return entry_t<val_type>(I[idx - 1], J[idx - 1], V[idx - 1]); } else { return entry_t<val_type>(0, 0, 0, -1); } } private: int idx; const size_t *I, *J; const val_type *V; }; // Iterator for sparse matrix template<typename val_type> class smat_iterator_t: public entry_iterator_t<val_type> { public: using entry_iterator_t<val_type>::nnz; smat_iterator_t(const smat_t<val_type>& M, major_t major = ROWMAJOR) { nnz = M.nnz; col_idx = (major == ROWMAJOR)? M.col_idx: M.row_idx; row_ptr = (major == ROWMAJOR)? M.row_ptr: M.col_ptr; val_t = (major == ROWMAJOR)? M.val_t: M.val; rows = (major==ROWMAJOR)? M.rows: M.cols; cols = (major==ROWMAJOR)? M.cols: M.rows; cur_idx = cur_row = 0; } ~smat_iterator_t() {} entry_t<val_type> next() { if (nnz > 0) nnz--; else return entry_t<val_type>(0, 0, 0, -1); while (cur_idx >= row_ptr[cur_row+1]) cur_row++; entry_t<val_type> ret(cur_row, col_idx[cur_idx], val_t[cur_idx]); cur_idx++; return ret; } private: unsigned *col_idx; size_t *row_ptr; val_type *val_t; size_t rows, cols, cur_idx; size_t cur_row; }; // Iterator for a subset of sparse matrix template<typename val_type> class smat_subset_iterator_t: public entry_iterator_t<val_type> { public: using entry_iterator_t<val_type>::nnz; // When ROWMAJOR (COLMAJOR) is used, we sample several rows (columns) according to the order in subset_. // If remapping = true, then we are using the corresponding index (i, j) in the submatrix. // If remapping = false, then we are using the index (i, j) in the original matrix. smat_subset_iterator_t(const smat_t<val_type>& M, const unsigned *subset_, size_t size, major_t major_ = ROWMAJOR, bool remapping_=true) { major = major_; remapping = remapping_; cr_idx = (major == ROWMAJOR)? M.col_idx: M.row_idx; rc_ptr = (major == ROWMAJOR)? M.row_ptr: M.col_ptr; val_t = (major == ROWMAJOR)? M.val_t: M.val; rows = (major==ROWMAJOR)? (remapping? size: M.rows): M.rows; cols = (major==ROWMAJOR)? M.cols: (remapping? size: M.cols); subset.resize(size); nnz = 0; for(size_t i = 0; i < size; i++) { unsigned idx = subset_[i]; subset[i] = idx; nnz += (major == ROWMAJOR)? M.nnz_of_row(idx): M.nnz_of_col(idx); } cur_rc = 0; cur_idx = rc_ptr[subset[cur_rc]]; } smat_subset_iterator_t(const smat_t<val_type>& M, const std::vector<unsigned> &subset_, major_t major_ = ROWMAJOR, bool remapping_=true) { major = major_; remapping = remapping_; cr_idx = (major == ROWMAJOR)? M.col_idx: M.row_idx; rc_ptr = (major == ROWMAJOR)? M.row_ptr: M.col_ptr; val_t = (major == ROWMAJOR)? M.val_t: M.val; rows = (major==ROWMAJOR)? (remapping? subset_.size(): M.rows): M.rows; cols = (major==ROWMAJOR)? M.cols: (remapping? subset_.size(): M.cols); subset.resize(subset_.size()); nnz = 0; for(size_t i = 0; i < subset_.size(); i++) { unsigned idx = subset_[i]; subset[i] = idx; nnz += (major == ROWMAJOR)? M.nnz_of_row(idx): M.nnz_of_col(idx); } cur_rc = 0; cur_idx = rc_ptr[subset[cur_rc]]; } ~smat_subset_iterator_t() {} size_t get_rows() { return rows; } size_t get_cols() { return cols; } entry_t<val_type> next() { if (nnz > 0) nnz--; else return entry_t<val_type>(0,0,0, -1); while (cur_idx >= rc_ptr[subset[cur_rc]+1]) { cur_rc++; cur_idx = rc_ptr[subset[cur_rc]]; } entry_t<val_type> ret_rowwise(remapping? cur_rc: subset[cur_rc], cr_idx[cur_idx], val_t[cur_idx]); entry_t<val_type> ret_colwise(cr_idx[cur_idx], remapping? cur_rc: subset[cur_rc], val_t[cur_idx]); cur_idx++; return major==ROWMAJOR? ret_rowwise: ret_colwise; } private: size_t rows, cols; std::vector<unsigned>subset; unsigned *cr_idx; size_t *rc_ptr; val_type *val_t; size_t cur_rc, cur_idx; major_t major; bool remapping; }; // Iterator for a dense matrix template<typename val_type> class dmat_iterator_t: public entry_iterator_t<val_type> { public: using entry_iterator_t<val_type>::nnz; dmat_iterator_t(const dmat_t<val_type>& M, double threshold=1e-12) : M(M), rows(M.rows), cols(M.cols), threshold(fabs(threshold)) { cur_row = 0; cur_col = 0; nnz = 0; bool find_firstnz = true; for(size_t i = 0; i < rows; i++) for(size_t j = 0; j < cols; j++) if(fabs((double)M.at(i,j)) >= threshold) { if(find_firstnz) { cur_row = i; cur_col = j; find_firstnz = false; } nnz++; } } ~dmat_iterator_t() {} entry_t<val_type> next() { if (nnz > 0) nnz--; else return entry_t<val_type>(0,0,0, -1); entry_t<val_type> entry(cur_row, cur_col, M.at(cur_row, cur_col)); do { cur_col ++; if(cur_col == cols) { cur_row ++; cur_col = 0; } } while(fabs((double)M.at(cur_row, cur_col)) < threshold); return entry; } private: const dmat_t<val_type>& M; size_t rows, cols, cur_row, cur_col; double threshold; }; /*-------------- Implementation of Linear Algebra Operations --------------*/ // Lapack and Blas support #ifdef _WIN32 #define ddot_ ddot #define sdot_ sdot #define daxpy_ daxpy #define saxpy_ saxpy #define dcopy_ dcopy #define scopy_ scopy #define dgemm_ dgemm #define sgemm_ sgemm #define dposv_ dposv #define sposv_ sposv #define dgesdd_ dgesdd #define sgesdd_ sgesdd #endif extern "C" { double ddot_(ptrdiff_t *, double *, ptrdiff_t *, double *, ptrdiff_t *); float sdot_(ptrdiff_t *, float *, ptrdiff_t *, float *, ptrdiff_t *); ptrdiff_t dscal_(ptrdiff_t *, double *, double *, ptrdiff_t *); ptrdiff_t sscal_(ptrdiff_t *, float *, float *, ptrdiff_t *); ptrdiff_t daxpy_(ptrdiff_t *, double *, double *, ptrdiff_t *, double *, ptrdiff_t *); ptrdiff_t saxpy_(ptrdiff_t *, float *, float *, ptrdiff_t *, float *, ptrdiff_t *); double dcopy_(ptrdiff_t *, double *, ptrdiff_t *, double *, ptrdiff_t *); float scopy_(ptrdiff_t *, float *, ptrdiff_t *, float *, ptrdiff_t *); void dgemm_(char *transa, char *transb, ptrdiff_t *m, ptrdiff_t *n, ptrdiff_t *k, double *alpha, double *a, ptrdiff_t *lda, double *b, ptrdiff_t *ldb, double *beta, double *c, ptrdiff_t *ldc); void sgemm_(char *transa, char *transb, ptrdiff_t *m, ptrdiff_t *n, ptrdiff_t *k, float *alpha, float *a, ptrdiff_t *lda, float *b, ptrdiff_t *ldb, float *beta, float *c, ptrdiff_t *ldc); int dposv_(char *uplo, ptrdiff_t *n, ptrdiff_t *nrhs, double *a, ptrdiff_t *lda, double *b, ptrdiff_t *ldb, ptrdiff_t *info); int sposv_(char *uplo, ptrdiff_t *n, ptrdiff_t *nrhs, float *a, ptrdiff_t *lda, float *b, ptrdiff_t *ldb, ptrdiff_t *info); void dgesdd_(char* jobz, ptrdiff_t* m, ptrdiff_t* n, double* a, ptrdiff_t* lda, double* s, double* u, ptrdiff_t* ldu, double* vt, ptrdiff_t* ldvt, double* work, ptrdiff_t* lwork, ptrdiff_t* iwork, ptrdiff_t* info); void sgesdd_(char* jobz, ptrdiff_t* m, ptrdiff_t* n, float* a, ptrdiff_t* lda, float* s, float* u, ptrdiff_t* ldu, float* vt, ptrdiff_t* ldvt, float* work, ptrdiff_t* lwork, ptrdiff_t* iwork, ptrdiff_t* info); } template<typename val_type> val_type dot(ptrdiff_t *, val_type *, ptrdiff_t *, val_type *, ptrdiff_t *); template<> inline double dot(ptrdiff_t *len, double *x, ptrdiff_t *xinc, double *y, ptrdiff_t *yinc) { return ddot_(len,x,xinc,y,yinc);} template<> inline float dot(ptrdiff_t *len, float *x, ptrdiff_t *xinc, float *y, ptrdiff_t *yinc) { return sdot_(len,x,xinc,y,yinc);} template<typename val_type> val_type scal(ptrdiff_t *, val_type *, val_type *, ptrdiff_t *); template<> inline double scal(ptrdiff_t *len, double *a, double *x, ptrdiff_t *xinc) { return dscal_(len,a,x,xinc);} template<> inline float scal(ptrdiff_t *len, float *a, float *x, ptrdiff_t *xinc) { return sscal_(len,a,x,xinc);} template<typename val_type> ptrdiff_t axpy(ptrdiff_t *, val_type *, val_type *, ptrdiff_t *, val_type *, ptrdiff_t *); template<> inline ptrdiff_t axpy(ptrdiff_t *len, double *alpha, double *x, ptrdiff_t *xinc, double *y, ptrdiff_t *yinc) { return daxpy_(len,alpha,x,xinc,y,yinc);}; template<> inline ptrdiff_t axpy(ptrdiff_t *len, float *alpha, float *x, ptrdiff_t *xinc, float *y, ptrdiff_t *yinc) { return saxpy_(len,alpha,x,xinc,y,yinc);}; template<typename val_type> val_type copy(ptrdiff_t *, val_type *, ptrdiff_t *, val_type *, ptrdiff_t *); template<> inline double copy(ptrdiff_t *len, double *x, ptrdiff_t *xinc, double *y, ptrdiff_t *yinc) { return dcopy_(len,x,xinc,y,yinc);} template<> inline float copy(ptrdiff_t *len, float *x, ptrdiff_t *xinc, float *y, ptrdiff_t *yinc) { return scopy_(len,x,xinc,y,yinc);} template<typename val_type> void gemm(char *transa, char *transb, ptrdiff_t *m, ptrdiff_t *n, ptrdiff_t *k, val_type *alpha, val_type *a, ptrdiff_t *lda, val_type *b, ptrdiff_t *ldb, val_type *beta, val_type *c, ptrdiff_t *ldc); template<> inline void gemm(char *transa, char *transb, ptrdiff_t *m, ptrdiff_t *n, ptrdiff_t *k, double *alpha, double *a, ptrdiff_t *lda, double *b, ptrdiff_t *ldb, double *beta, double *c, ptrdiff_t *ldc) { dgemm_(transa, transb, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc); } template<> inline void gemm<float>(char *transa, char *transb, ptrdiff_t *m, ptrdiff_t *n, ptrdiff_t *k, float *alpha, float *a, ptrdiff_t *lda, float *b, ptrdiff_t *ldb, float *beta, float *c, ptrdiff_t *ldc) { sgemm_(transa, transb, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc); } template<typename val_type> int posv(char *uplo, ptrdiff_t *n, ptrdiff_t *nrhs, val_type *a, ptrdiff_t *lda, val_type *b, ptrdiff_t *ldb, ptrdiff_t *info); template<> inline int posv(char *uplo, ptrdiff_t *n, ptrdiff_t *nrhs, double *a, ptrdiff_t *lda, double *b, ptrdiff_t *ldb, ptrdiff_t *info) { return dposv_(uplo, n, nrhs, a, lda, b, ldb, info); } template<> inline int posv(char *uplo, ptrdiff_t *n, ptrdiff_t *nrhs, float *a, ptrdiff_t *lda, float *b, ptrdiff_t *ldb, ptrdiff_t *info) { return sposv_(uplo, n, nrhs, a, lda, b, ldb, info); } template<typename val_type> void gesdd(char* jobz, ptrdiff_t* m, ptrdiff_t* n, val_type* a, ptrdiff_t* lda, val_type* s, val_type* u, ptrdiff_t* ldu, val_type* vt, ptrdiff_t* ldvt, val_type* work, ptrdiff_t* lwork, ptrdiff_t* iwork, ptrdiff_t* info); template<> inline void gesdd(char* jobz, ptrdiff_t* m, ptrdiff_t* n, double* a, ptrdiff_t* lda, double* s, double* u, ptrdiff_t* ldu, double* vt, ptrdiff_t* ldvt, double* work, ptrdiff_t* lwork, ptrdiff_t* iwork, ptrdiff_t* info) { return dgesdd_(jobz, m, n, a, lda, s, u, ldu, vt, ldvt, work, lwork, iwork, info); } template<> inline void gesdd(char* jobz, ptrdiff_t* m, ptrdiff_t* n, float* a, ptrdiff_t* lda, float* s, float* u, ptrdiff_t* ldu, float* vt, ptrdiff_t* ldvt, float* work, ptrdiff_t* lwork, ptrdiff_t* iwork, ptrdiff_t* info) { return sgesdd_(jobz, m, n, a, lda, s, u, ldu, vt, ldvt, work, lwork, iwork, info); } // <x,y> template<typename val_type> val_type do_dot_product(const val_type *x, const val_type *y, size_t size) { val_type *xx = const_cast<val_type*>(x); val_type *yy = const_cast<val_type*>(y); ptrdiff_t inc = 1; ptrdiff_t len = (ptrdiff_t) size; return dot(&len, xx, &inc, yy, &inc); } template<typename val_type> val_type do_dot_product(const dvec_t<val_type> &x, const dvec_t<val_type> &y) { assert(x.size() == y.size()); return do_dot_product(x.data(), y.data(), x.size()); } template<typename val_type> val_type do_dot_product(const svec_t<val_type> &x, const svec_t<val_type> &y) { if(x.nnz > y.nnz) { return do_dot_product(y, x); } val_type ret = 0; size_t s = 0, t = 0; unsigned *xend = x.idx + x.nnz; unsigned *yend = y.idx + y.nnz; while(s < x.nnz && t < y.nnz) { if(x.idx[s] == y.idx[t]) { ret += x.val[s] * y.val[t]; s++; t++; } else if(x.idx[s] < y.idx[t]) { s = std::lower_bound(x.idx + s, xend, y.idx[t]) - x.idx; } else { t = std::lower_bound(y.idx + t, yend, x.idx[s]) - y.idx; } } return ret; } template<typename val_type> val_type do_dot_product_old(const svec_t<val_type> &x, const svec_t<val_type> &y) { assert(x.size() == y.size()); val_type ret = 0; for(size_t i = 0, j = 0; i < x.get_nnz() && j < y.get_nnz();) { if(x.idx[i] < y.idx[j]) { i ++; } else if(x.idx[i] > y.idx[j]) { j ++; } else { ret += x.val[i] * y.val[j]; i ++; j ++; } } return ret; } template<typename val_type> val_type do_dot_product(const sdvec_t<val_type> &x, const sdvec_t<val_type> &y) { assert(x.size() == y.size()); val_type ret = 0; for(size_t i = 0, j = 0; i < x.get_nnz() && j < y.get_nnz();) { if(x.nz_idx[i] < y.nz_idx[j]) { i++; } else if(x.nz_idx[i] < y.nz_idx[i]) { j++; } else { ret += x[x.nz_idx[i]] * y[y.nz_idx[j]]; i++; j++; } } return ret; } template<typename val_type> val_type do_dot_product(const dvec_t<val_type> &x, const svec_t<val_type> &y) { assert(x.size() == y.size()); val_type ret = 0; for(size_t i = 0; i < y.get_nnz(); i++) ret += x[y.idx[i]] * y.val[i]; return ret; } template<typename val_type> val_type do_dot_product(const svec_t<val_type> &x, const dvec_t<val_type> &y) { assert(x.size() == y.size()); return do_dot_product(y, x); } template<typename val_type> val_type do_dot_product(const dvec_t<val_type> &x, const sdvec_t<val_type> &y) { val_type ret = 0; for(size_t i = 0; i < y.get_nnz(); i++) { ret += x[y.nz_idx[i]] * y[y.nz_idx[i]]; } return ret; } template<typename val_type> val_type do_dot_product(const sdvec_t<val_type> &x, const dvec_t<val_type> &y) { return do_dot_product(y, x); } template<typename val_type> val_type do_dot_product_old(const svec_t<val_type> &x, const sdvec_t<val_type> &y) { val_type ret = 0; for(size_t i = 0, j = 0; i < x.get_nnz() && j < y.get_nnz();) { if(x.idx[i] < y.nz_idx[j]) { i++; } else if(x.idx[i] > y.nz_idx[j]) { j++; } else { ret += x.val[i] * y[y.nz_idx[j]]; i++; j++; } } return ret; } template<typename val_type> val_type do_dot_product(const sdvec_t<val_type> &x, const svec_t<val_type> &y) { return do_dot_product(y, x); } template<typename val_type> val_type do_dot_product(const gvec_t<val_type> &x, const gvec_t<val_type> &y) { assert(x.size() == y.size()); if(x.is_sparse() && y.is_sparse()) return do_dot_product(x.get_sparse(), y.get_sparse()); else if(x.is_sparse() && y.is_dense()) return do_dot_product(x.get_sparse(), y.get_dense()); else if(x.is_dense() && y.is_sparse()) return do_dot_product(x.get_dense(), y.get_sparse()); else if(x.is_dense() && y.is_dense()) return do_dot_product(x.get_dense(), y.get_dense()); else return 0; } template<typename val_type> val_type do_dot_product(const dmat_t<val_type> &x, const dmat_t<val_type> &y) { assert(x.rows == y.rows && x.cols == y.cols); if((x.is_rowmajor() && y.is_rowmajor()) || (x.is_colmajor() && y.is_colmajor())) return do_dot_product(x.data(), y.data(), x.rows*x.cols); else { val_type ret = 0.0; const dmat_t<val_type> &xx = (x.rows > x.cols) ? x : x.transpose(); const dmat_t<val_type> &yy = (y.rows > y.cols) ? y : y.transpose(); #pragma omp parallel for schedule(static) reduction(+:ret) for(size_t i = 0; i < xx.rows; i++) { double ret_local = 0.0; for(size_t j = 0; j < xx.cols; j++) ret_local += xx.at(i,j)*yy.at(i,j); ret += ret_local; } return (val_type)ret; } } template<typename val_type> val_type do_dot_product(const smat_t<val_type> &x, const smat_t<val_type> &y) { assert(x.rows == y.rows && x.cols == y.cols); val_type ret = 0.0; const smat_t<val_type> &xx = (x.rows > x.cols) ? x : x.transpose(); const smat_t<val_type> &yy = (y.rows > y.cols) ? y : y.transpose(); #pragma omp parallel for schedule(static) reduction(+:ret) for(size_t i = 0; i < xx.rows; i++) { svec_t<val_type> sv1 = xx.get_row(i); svec_t<val_type> sv2 = yy.get_row(i); val_type ret_local = do_dot_product(sv1, sv2); ret += ret_local; } return (val_type)ret; } template<typename val_type> val_type do_dot_product(const smat_t<val_type> &x, const dmat_t<val_type>&y) { assert(x.rows == y.rows && x.cols == y.cols); double ret = 0; const smat_t<val_type> &xx = (x.rows > x.cols) ? x : x.transpose(); #pragma omp parallel for schedule(static) reduction(+:ret) for(size_t i = 0; i < xx.rows; i++) { double tmp = 0; for(size_t idx = xx.row_ptr[i]; idx < xx.row_ptr[i + 1]; idx++) { tmp += xx.val[idx] * y.at(i, xx.col_idx[idx]); } ret += tmp; } return static_cast<val_type>(ret); } template<typename val_type> val_type do_dot_product(const dmat_t<val_type>&x, const smat_t<val_type> &y) { return do_dot_product(y, x); } template<typename val_type> val_type do_dot_product(const gmat_t<val_type>&x, const gmat_t<val_type> &y) { assert(x.rows == y.rows && x.cols == y.cols); if(x.is_sparse() && y.is_sparse()) return do_dot_product(x.get_sparse(), y.get_sparse()); else if(x.is_sparse() && y.is_dense()) return do_dot_product(x.get_sparse(), y.get_dense()); else if(x.is_dense() && y.is_sparse()) return do_dot_product(x.get_dense(), y.get_sparse()); else if(x.is_dense() && y.is_dense()) return do_dot_product(x.get_dense(), y.get_dense()); else return 0; } // y = alpha * x + y template<typename val_type, typename T> val_type* do_axpy(T alpha, const val_type *x, val_type *y, size_t size) { if(alpha == 0) return y; val_type alpha_ = (val_type)alpha; ptrdiff_t inc = 1; ptrdiff_t len = (ptrdiff_t) size; val_type *xx = const_cast<val_type*>(x); axpy(&len, &alpha_, xx, &inc, y, &inc); return y; } template<typename val_type, typename T> dvec_t<val_type>& do_axpy(T alpha, const dvec_t<val_type> &x, dvec_t<val_type> &y) { do_axpy(alpha, x.data(), y.data(), x.size()); return y; } template<typename val_type, typename T> dvec_t<val_type>& do_axpy(T alpha, const svec_t<val_type> &x, dvec_t<val_type> &y) { if(alpha == 0) return y; for(size_t i = 0; i < x.get_nnz(); i++) { y[x.idx[i]] += alpha * x.val[i]; } return y; } template<typename XV, typename YV, typename T> sdvec_t<YV>& do_axpy(T alpha, const svec_t<XV>& x, sdvec_t<YV> &y) { if(alpha == 0) return y; for(size_t i = 0; i < x.get_nnz(); i++) { y.add_nonzero_at(x.idx[i], alpha * x.val[i]); } return y; } template<typename XV, typename YV, typename T> sdvec_t<YV>& do_axpy(T alpha, const dvec_t<XV>& x, sdvec_t<YV> &y) { if(alpha == 0) return y; for(size_t i = 0; i < x.size(); i++) { y.add_nonzero_at(i, alpha * x[i]); } return y; } template<typename val_type, typename T> dmat_t<val_type>& do_axpy(T alpha, const dmat_t<val_type> &x, dmat_t<val_type> &y) { assert(x.rows == y.rows && x.cols == y.cols); if((x.is_rowmajor() && y.is_rowmajor()) || (x.is_colmajor() && y.is_colmajor())) do_axpy(alpha, x.data(), y.data(), x.rows*x.cols); else { if(x.rows > x.cols) { #pragma omp parallel for schedule(static) for(size_t i = 0; i < x.rows; i++) for(size_t j = 0; j < x.cols; j++) y.at(i,j) += alpha*x.at(i,j); } else { #pragma omp parallel for schedule(static) for(size_t j = 0; j < x.cols; j++) for(size_t i = 0; i < x.rows; i++) y.at(i,j) += alpha*x.at(i,j); } } return y; } // x *= alpha template<typename val_type, typename T> void do_scale(T alpha, val_type *x, size_t size) { if(alpha == 0.0) { memset(x, 0, sizeof(val_type) * size); } else if (alpha == 1.0) { return; } else { val_type alpha_minus_one = (val_type)(alpha - 1); do_axpy(alpha_minus_one, x, x, size); } } template<typename val_type, typename T> void do_scale(T alpha, dvec_t<val_type> &x) { do_scale(alpha, x.data(), x.size()); } template<typename val_type, typename T> void do_scale(T alpha, svec_t<val_type> &x) { do_scale(alpha, x.val, x.get_nnz()); } template<typename val_type, typename T> void do_scale(T alpha, gvec_t<val_type> &x) { if(x.is_sparse()) do_scale(alpha, x.get_sparse()); else if(x.is_dense()) do_scale(alpha, x.get_dense()); } template<typename val_type, typename T> void do_scale(T alpha, dmat_t<val_type> &x) { do_scale(alpha, x.data(), x.rows*x.cols); } template<typename val_type, typename T> void do_scale(T alpha, smat_t<val_type> &x) { do_scale(alpha, x.val, x.get_nnz()); do_scale(alpha, x.val_t, x.get_nnz()); } // H = a*X*W + b H0 (H0 can put H. However H don't need to be pre-allocated, but H0 do.) template<typename val_type, typename T2, typename T3> dmat_t<val_type>& dmat_x_dmat(T2 a, const dmat_t<val_type> &X, const dmat_t<val_type> &W, T3 b, const dmat_t<val_type> &H0, dmat_t<val_type> &H) { if(b == 0) assert(X.cols == W.rows); else assert(W.cols == H0.cols && X.cols == W.rows && X.rows == H0.rows); H.lazy_resize(X.rows, W.cols).assign(b, H0); return dmat_x_dmat(a, X, W, 1, H); } template<typename val_type, typename T2, typename T3> dmat_t<val_type>& smat_x_dmat(T2 a, const smat_t<val_type> &X, const dmat_t<val_type> &W, T3 b, const dmat_t<val_type> &H0, dmat_t<val_type> &H) { if(b == 0) assert(X.cols == W.rows); else assert(W.cols == H0.cols && X.cols == W.rows && X.rows == H0.rows); H.lazy_resize(X.rows, W.cols).assign(b, H0); // H += aXW if(W.is_rowmajor()) { if(H.is_rowmajor()) { smat_x_dmat(a, X, W.data(), W.cols, 1.0, H.data(), H.data()); } else { // H is col_major #pragma omp parallel for schedule(dynamic, 64) shared(X, W, H) for(size_t i = 0; i < X.rows; i++) { for(size_t idx = X.row_ptr[i]; idx != X.row_ptr[i+1]; idx++){ size_t j = X.col_idx[idx]; const val_type &Xij = X.val_t[idx]; for(size_t t = 0; t < W.cols; t++) H.at(i,t) += a*Xij*W.at(j,t); } } } } else { // W.is_colmajor if(H.is_colmajor()) { #pragma omp parallel for schedule(static) for(size_t j = 0; j < W.cols; j++) { dvec_t<val_type> Wj = W.get_col(j); dvec_t<val_type> Hj = H.get_col(j); X.Xv(Wj, Hj, true); } } else { // H.is row_major #pragma omp parallel for schedule(dynamic, 64) shared(X, W, H) for(size_t i = 0; i < X.rows; i++) { for(size_t idx = X.row_ptr[i]; idx != X.row_ptr[i+1]; idx++){ size_t j = X.col_idx[idx]; const val_type &Xij = X.val_t[idx]; for(size_t t = 0; t < W.cols; t++) H.at(i,t) += a*Xij*W.at(j,t); } } } } return H; } template<typename val_type, typename T2, typename T3> dmat_t<val_type>& gmat_x_dmat(T2 a, const gmat_t<val_type> &X, const dmat_t<val_type> &W, T3 b, const dmat_t<val_type> &H0, dmat_t<val_type> &H) { if(b == 0) assert(X.cols == W.rows); else assert(W.cols == H0.cols && X.cols == W.rows && X.rows == H0.rows); if(X.is_sparse()) smat_x_dmat(a, X.get_sparse(), W, b, H0, H); else if(X.is_dense()) dmat_x_dmat(a, X.get_dense(), W, b, H0, H); else if(X.is_identity()) { H.lazy_resize(X.rows, W.cols).assign(b, H0); do_axpy(a, W, H); } return H; } // H = a*X*W + H0 (H0 can put H. However H don't need to be pre-allocated, but H0 do) template<typename val_type, typename T2> dmat_t<val_type>& dmat_x_dmat(T2 a, const dmat_t<val_type> &X, const dmat_t<val_type> &W, const dmat_t<val_type> &H0, dmat_t<val_type> &H) { return dmat_x_dmat(a, X, W, 1.0, H0, H); } template<typename val_type, typename T2> dmat_t<val_type>& smat_x_dmat(T2 a, const smat_t<val_type> &X, const dmat_t<val_type> &W, const dmat_t<val_type> &H0, dmat_t<val_type> &H) { return smat_x_dmat(a, X, W, 1.0, H0, H); } template<typename val_type, typename T2> dmat_t<val_type>& gmat_x_dmat(T2 a, const gmat_t<val_type> &X, const dmat_t<val_type> &W, const dmat_t<val_type> &H0, dmat_t<val_type> &H) { return gmat_x_dmat(a, X, W, 1.0, H0, H); } // H = X*W (H don't need to be pre-allocated) template<typename val_type> dmat_t<val_type>& dmat_x_dmat(const dmat_t<val_type> &X, const dmat_t<val_type> &W, dmat_t<val_type> &H) { return dmat_x_dmat(1.0, X, W, 0.0, H, H); } template<typename val_type> dmat_t<val_type> operator*(const dmat_t<val_type> &X, const dmat_t<val_type> &W) { dmat_t<val_type> H(X.rows, W.cols); dmat_x_dmat(X, W, H); return H; } template<typename VX, typename VW, typename VH> smat_t<VH>& smat_x_smat_single_thread(const smat_t<VX> &X, const smat_t<VW> &W, smat_t<VH> &H) { std::vector<unsigned> row_idx; std::vector<size_t> col_ptr; std::vector<VH> val; size_t rows = X.rows, cols = W.cols; sdvec_t<VH> temp(rows); col_ptr.push_back(0); size_t total_nnz = 0; for(size_t c = 0; c < cols; c++) { const svec_t<VW>& Wc = W.get_col(c); temp.clear(); for(size_t s = 0; s < Wc.nnz; s++) { // temp += Wc[i] * Xi do_axpy(Wc.val[s], X.get_col(Wc.idx[s]), temp); } temp.update_nz_idx(); total_nnz += temp.nnz; col_ptr.push_back(total_nnz); for(size_t s = 0; s < temp.nnz; s++) { row_idx.push_back(temp.nz_idx[s]); val.push_back(temp[temp.nz_idx[s]]); } } H.allocate_space(rows, cols, total_nnz); memcpy(H.val, val.data(), sizeof(VH) * total_nnz); memcpy(H.row_idx, row_idx.data(), sizeof(unsigned) * total_nnz); memcpy(H.col_ptr, col_ptr.data(), sizeof(size_t) * (cols + 1)); H.csc_to_csr(); return H; } template<typename VX, typename VW, typename VH> smat_t<VH>& smat_x_smat(const smat_t<VX> &X, const smat_t<VW> &W, smat_t<VH> &H, int threads=-1) { struct worker_t { worker_t() {} sdvec_t<VH> temp; std::vector<unsigned> row_idx; std::vector<VH> val; size_t nnz() const { return row_idx.size(); } void set_rows(size_t rows) { temp.resize(rows); } void reserve(size_t capacity) { row_idx.reserve(capacity); val.reserve(capacity); } void push_back(unsigned idx, VH value) { row_idx.push_back(static_cast<unsigned>(idx)); val.push_back(static_cast<VH>(value)); } }; size_t rows = X.rows, cols = W.cols; if(threads == 1) { return smat_x_smat_single_thread(X, W, H); } if(rows > cols) { // maximize the parallelism smat_t<VX> Xt = X.transpose(); smat_t<VW> Wt = W.transpose(); smat_x_smat(Wt, Xt, H, threads); H.to_transpose(); return H; } if(threads < 1) { threads = omp_get_num_procs(); } threads = std::min(threads, omp_get_num_procs()); std::vector<worker_t> worker_set(threads); std::vector<size_t> col_ptr(cols + 1); size_t workload = (cols / threads) + (cols % threads != 0); #pragma omp parallel for schedule(static,1) for(int tid = 0; tid < threads; tid++) { worker_t& worker = worker_set[tid]; worker.set_rows(rows); worker.reserve(X.nnz + W.nnz); size_t c_start = tid * workload; size_t c_end = std::min((tid + 1) * workload, cols); sdvec_t<VH>& temp = worker.temp; for(size_t c = c_start; c < c_end; c++) { const svec_t<VW>& Wc = W.get_col(c); temp.clear(); for(size_t s = 0; s < Wc.nnz; s++) { // temp += Wc[i] * Xi do_axpy(Wc.val[s], X.get_col(Wc.idx[s]), temp); } temp.update_nz_idx(); col_ptr[c + 1] = temp.nnz; for(size_t s = 0; s < temp.nnz; s++) { size_t r = temp.nz_idx[s]; worker.push_back(r, temp[r]); } } } for(size_t c = 1; c <= cols; c++) { col_ptr[c] += col_ptr[c - 1]; } size_t total_nnz = col_ptr[cols]; H.allocate_space(rows, cols, total_nnz); memcpy(H.col_ptr, col_ptr.data(), sizeof(size_t) * (cols + 1)); #pragma omp parallel for schedule(static,1) for(int tid = 0; tid < threads; tid++) { size_t c_start = tid * workload; worker_t& worker = worker_set[tid]; memcpy(&H.val[col_ptr[c_start]], worker.val.data(), sizeof(VH) * worker.nnz()); memcpy(&H.row_idx[col_ptr[c_start]], worker.row_idx.data(), sizeof(unsigned) * worker.nnz()); } H.csc_to_csr(); return H; } template<typename VX, typename VW> smat_t<VX> operator*(const smat_t<VX> &X, const smat_t<VW>& W) { smat_t<VX> H; smat_x_smat(X, W, H); return H; } template<typename val_type> dmat_t<val_type>& smat_x_dmat(const smat_t<val_type> &X, const dmat_t<val_type> &W, dmat_t<val_type> &H) { return smat_x_dmat(1.0, X, W, 0.0, H, H); } template<typename val_type> dmat_t<val_type> operator*(const smat_t<val_type> &X, const dmat_t<val_type> &W) { dmat_t<val_type> H(X.rows, W.cols); smat_x_dmat(X, W, H); return H; } template<typename val_type> dmat_t<val_type> operator*(const dmat_t<val_type> &X, const smat_t<val_type> &W) { dmat_t<val_type> H(X.rows, W.cols); smat_x_dmat(X.transpose(), W.transpose(), H.transpose()); return H; } template<typename val_type> dmat_t<val_type>& gmat_x_dmat(const gmat_t<val_type> &X, const dmat_t<val_type> &W, dmat_t<val_type> &H) { return gmat_x_dmat(1.0, X, W, 0.0, H, H); } template<typename val_type> dmat_t<val_type> operator*(const gmat_t<val_type> &X, const dmat_t<val_type> &W) { dmat_t<val_type> H(X.rows, W.cols); gmat_x_dmat(X, W, H); return H; } template<typename val_type, typename I, typename V> void compute_sparse_entries_from_gmat_x_gmat( const gmat_t<val_type> &gX, const gmat_t<val_type> &gM, size_t len, const I *X_row_idx, const I *M_col_idx, V *val) { if(gX.is_sparse() && gM.is_sparse()) { const smat_t<val_type>& X = gX.get_sparse(); const smat_t<val_type>& M = gM.get_sparse(); #pragma omp parallel for schedule(dynamic,64) for(size_t idx = 0; idx < len; idx++) { const svec_t<val_type>& xi = X.get_row(X_row_idx[idx]); const svec_t<val_type>& mj = M.get_col(M_col_idx[idx]); val[idx] = static_cast<V>(do_dot_product(xi, mj)); } } else if(gX.is_sparse() && gM.is_dense()) { const smat_t<val_type>& X = gX.get_sparse(); const dmat_t<val_type>& M = gM.get_dense(); #pragma omp parallel for schedule(dynamic,64) for(size_t idx = 0; idx < len; idx++) { const svec_t<val_type>& xi = X.get_row(X_row_idx[idx]); I j = M_col_idx[idx]; double tmp = 0; for(size_t t = 0; t < xi.nnz; t++) { tmp += xi.val[t] * M.at(xi.idx[t], j); } val[idx] = tmp; } } else if(gX.is_dense() && gM.is_sparse()) { const dmat_t<val_type>& X = gX.get_dense(); const smat_t<val_type>& M = gM.get_sparse(); #pragma omp parallel for schedule(dynamic,64) for(size_t idx = 0; idx < len; idx++) { const svec_t<val_type>& mj = M.get_col(M_col_idx[idx]); I i = X_row_idx[idx]; double tmp = 0; for(size_t t = 0; t < mj.nnz; t++) { tmp += X.at(i, mj.idx[t]) * mj.val[t]; } val[idx] = tmp; } } else if(gX.is_dense() && gM.is_dense()) { const dmat_t<val_type>& X = gX.get_dense(); const dmat_t<val_type>& M = gM.get_dense(); #pragma omp parallel for schedule(static,64) for(size_t idx = 0; idx < len; idx++) { I i = X_row_idx[idx]; I j = M_col_idx[idx]; double tmp = 0; for(size_t t = 0; t < X.cols; t++) { tmp += X.at(i, t) * M.at(t, j); } val[idx] = tmp; } } } // tr(W^T X H) (W, H: dense matrix; X: sparse matrix) template<typename val_type> val_type trace_dmat_T_smat_dmat(const dmat_t<val_type> &W, const smat_t<val_type> &X, const dmat_t<val_type> &H) { assert(W.cols == H.cols && W.rows == X.rows && H.rows == X.cols); if(W.is_colmajor() && H.is_colmajor()) { double ret = 0; #pragma omp parallel for schedule(static) reduction(+:ret) for(size_t t = 0; t < W.cols; t++) { const dvec_t<val_type> u = W.get_col(t); const dvec_t<val_type> v = H.get_col(t); double local_sum = 0; for(size_t i = 0; i < X.rows; i++) { for(size_t idx = X.row_ptr[i]; idx != X.row_ptr[i+1]; idx++) local_sum += X.val_t[idx]*u[i]*v[X.col_idx[idx]]; } ret += local_sum; } return ret; } else { double ret= 0; #pragma omp parallel for schedule(dynamic,64) reduction(+:ret) for(size_t i = 0; i < X.rows; i++) { double local_sum = 0; for(size_t idx = X.row_ptr[i]; idx != X.row_ptr[i+1]; idx++) { size_t j = X.col_idx[idx]; double sum = 0; for(size_t t = 0; t < W.cols; t++) sum += W.at(i,t)*H.at(j,t); local_sum += sum * X.val_t[idx]; } ret += local_sum; } return ret; } } // tr(W^T diag(D) H) (W, H: dense matrix; D: dense vector) template<typename val_type> val_type trace_dmat_T_diag_dmat(const dmat_t<val_type> &W, const dvec_t<val_type> &D, const dmat_t<val_type> &H) { assert(W.rows == H.rows && W.rows == D.len && W.cols == H.cols); assert(W.is_rowmajor() && H.is_rowmajor()); return trace_dmat_T_diag_dmat(W.data(),D.data(),H.data(),W.rows,W.cols); } // -------------- Implementation of Linear Algebra Solvers -------------- // Solve Ax = b, A is symmetric positive definite, b is overwritten with the result x // A will be modifed by internal Lapack. Make copy when necessary template<typename val_type> bool ls_solve_chol(val_type *A, val_type *b, size_t n) { ptrdiff_t nn=n, lda=n, ldb=n, nrhs=1, info=0; char uplo = 'U'; posv(&uplo, &nn, &nrhs, A, &lda, b, &ldb, &info); return (info == 0); } // Solve AX = B, A is symmetric positive definite, B is overwritten with the result X // A is a m-by-m matrix, while B is a m-by-n matrix stored in col_major // A will be modified by internal Lapack. Make copy when necessary template<typename val_type> bool ls_solve_chol_matrix_colmajor(val_type *A, val_type *B, size_t m, size_t n = size_t(0)) { ptrdiff_t mm=m, lda=m, ldb=m, nrhs=n, info=0; char uplo = 'U'; posv(&uplo, &mm, &nrhs, A, &lda, B, &ldb, &info); return (info == 0); } // Solve AX = B, A is symmetric positive definite, return X template<typename val_type> dmat_t<val_type> ls_solve_chol(const dmat_t<val_type>& A, const dmat_t<val_type>& B, bool A_as_workspace) { dmat_t<val_type> X(B); X.grow_body().to_colmajor(); dmat_t<val_type> AA(A); if(A_as_workspace == false) AA.grow_body(); if(ls_solve_chol_matrix_colmajor(AA.data(), X.data(), AA.rows, X.cols) == false) fprintf(stderr, "error when applying ls_solve_cho_matrix_colmajor"); return X; } // Solve Ax = b, A is symmetric positive definite, return x template<typename val_type> dvec_t<val_type> ls_solve_chol(const dmat_t<val_type>& A, const dvec_t<val_type>& b, bool A_as_workspace) { dvec_t<val_type> x(b); x.grow_body(); dmat_t<val_type> AA(A); if(A_as_workspace == false) AA.grow_body(); if(ls_solve_chol(AA.data(), x.data(), AA.rows) == false) fprintf(stderr, "error when applying ls_solve_chol"); return x; } // SVD: A = USV' // U, S, V don't necessarily need to be pre-allocated template<typename val_type> class svd_solver_t { private: char jobz; ptrdiff_t mm, nn, min_mn, max_mn, lda, ldu, ldvt, lwork1, lwork2, lwork, info; std::vector<val_type> u_buf, v_buf, s_buf, work; std::vector<ptrdiff_t> iwork; size_t k; void prepare_parameter(const dmat_t<val_type>& A, dmat_t<val_type>& U, dvec_t<val_type>& S, dmat_t<val_type>& V, bool reduced) { k = std::min(A.rows, A.cols); mm = (ptrdiff_t)A.rows; nn = (ptrdiff_t)A.cols; min_mn = std::min(mm,nn); max_mn = std::max(mm,nn); lda = mm; ldu = mm; ldvt = reduced? min_mn : nn; lwork1 = 3*min_mn*min_mn + std::max(max_mn, 4*min_mn*min_mn + 4*min_mn); lwork2 = 3*min_mn + std::max(max_mn, 4*min_mn*min_mn + 3*min_mn + max_mn); lwork = 2 * std::max(lwork1, lwork2); // due to differences between lapack 3.1 and 3.4 info = 0; work.resize(lwork); iwork.resize((size_t)(8*min_mn)); if(!S.is_view() || S.size() != k) S.resize(k); if(reduced) { jobz = 'S'; U.lazy_resize(A.rows, k, COLMAJOR); V.lazy_resize(A.cols, k, ROWMAJOR); } else { jobz = 'A'; U.lazy_resize(A.rows, A.rows, COLMAJOR); V.lazy_resize(A.cols, A.cols, ROWMAJOR); } } public: svd_solver_t() {} bool solve(const dmat_t<val_type>& A, dmat_t<val_type>& U, dvec_t<val_type>& S, dmat_t<val_type>& V, bool reduced=true, bool A_as_workspace=false) { if(A.is_rowmajor()) return solve(A.transpose(), V, S, U, reduced, A_as_workspace); else { dmat_t<val_type> AA(A.get_view()); if(A_as_workspace == false) AA.grow_body(); prepare_parameter(AA, U, S, V, reduced); #if defined(CPP11) gesdd(&jobz, &mm, &nn, AA.data(), &lda, S.data(), U.data(), &ldu, V.data(), &ldvt, work.data(), &lwork, iwork.data(), &info); #else gesdd(&jobz, &mm, &nn, AA.data(), &lda, S.data(), U.data(), &ldu, V.data(), &ldvt, &work[0], &lwork, &iwork[0], &info); #endif return (info == 0); } } }; template<typename val_type> void svd(const dmat_t<val_type>& A, dmat_t<val_type>& U, dvec_t<val_type>& S, dmat_t<val_type>& V, bool reduced, bool A_as_workspace) { svd_solver_t<val_type> solver; solver.solve(A, U, S, V, reduced, A_as_workspace); } // -------------- Implementation of Miscellaneous Functions -------------- // y = x for pointer to array template<typename val_type> void do_copy(const val_type *x, val_type *y, size_t size) { if(x == y) return; ptrdiff_t inc = 1; ptrdiff_t len = (ptrdiff_t) size; val_type *xx = const_cast<val_type*>(x); copy(&len, xx, &inc, y, &inc); } // H = a*X*W + b H0 // X is an m*n // W is an n*k, row-majored array // H is an m*k, row-majored array template<typename val_type, typename T2, typename T3> void smat_x_dmat(T2 a, const smat_t<val_type> &X, const val_type *W, const size_t k, T3 b, const val_type *H0, val_type *H) { size_t m = X.rows; val_type aa = (val_type) a; val_type bb = (val_type) b; if(a == T2(0)) { if(bb == (val_type)0.0){ memset(H, 0, sizeof(val_type)*m*k); return ; } else { if(H!=H0) { do_copy(H0, H, m*k); //memcpy(H, H0, sizeof(val_type)*m*k); } do_scale(bb, H, m*k); } return; } #pragma omp parallel for schedule(dynamic,64) shared(X, W, H, H0, aa,bb) for(size_t i = 0; i < m; i++) { val_type *Hi = &H[k*i]; if(bb == (val_type)0.0) memset(Hi, 0, sizeof(val_type)*k); else { if(Hi!=&H0[k*i]) do_copy(&H0[k*i], Hi, k); do_scale(bb, Hi, k); } for(size_t idx = X.row_ptr[i]; idx < X.row_ptr[i+1]; idx++) { const val_type Xij = X.val_t[idx]; const val_type *Wj = &W[X.col_idx[idx]*k]; for(size_t t = 0; t < k; t++) Hi[t] += aa*Xij*Wj[t]; } } } template<typename val_type, typename T2> void smat_x_dmat(T2 a, const smat_t<val_type> &X, const val_type* W, const size_t k, const val_type *H0, val_type *H) { smat_x_dmat(a, X, W, k, 1.0, H0, H); } // C = alpha*A*B + beta*C // C : m * n, k is the dimension of the middle // (1) A, B, C are stored in column major! template<typename val_type, typename T1, typename T2> void dmat_x_dmat_colmajor(T1 alpha, const val_type *A, bool trans_A, const val_type *B, bool trans_B, T2 beta, val_type *C, size_t m, size_t n, size_t k) { ptrdiff_t mm = (ptrdiff_t)m, nn = (ptrdiff_t)n, kk = (ptrdiff_t)k; ptrdiff_t lda = trans_A? kk:mm, ldb = trans_B? nn:kk, ldc = mm; char transpose = 'T', notranspose = 'N'; char *transa = trans_A? &transpose: &notranspose; char *transb = trans_B? &transpose: &notranspose; val_type alpha_ = (val_type) alpha; val_type beta_ = (val_type) beta; val_type *AA = const_cast<val_type*>(A); val_type *BB = const_cast<val_type*>(B); gemm(transa, transb, &mm, &nn, &kk, &alpha_, AA, &lda, BB, &ldb, &beta_, C, &ldc); } // (2) A, B, C are stored in row major! template<typename val_type, typename T1, typename T2> void dmat_x_dmat(T1 alpha, const val_type *A, bool trans_A, const val_type *B, bool trans_B, T2 beta, val_type *C, size_t m, size_t n, size_t k) { dmat_x_dmat_colmajor(alpha, B, trans_B, A, trans_A, beta, C, n, m, k); } // C = alpha*A*B + beta*C template<typename val_type, typename T1, typename T2> dmat_t<val_type>& dmat_x_dmat(T1 alpha, const dmat_t<val_type>& A, const dmat_t<val_type>& B, T2 beta, dmat_t<val_type>& C) { assert(A.cols == B.rows); C.lazy_resize(A.rows, B.cols); if (C.is_rowmajor()) { bool trans_A = A.is_rowmajor()? false : true; bool trans_B = B.is_rowmajor()? false : true; dmat_x_dmat(alpha, A.data(), trans_A, B.data(), trans_B, beta, C.data(), C.rows, C.cols, A.cols); } else { bool trans_A = A.is_colmajor()? false : true; bool trans_B = B.is_colmajor()? false : true; dmat_x_dmat_colmajor(alpha, A.data(), trans_A, B.data(), trans_B, beta, C.data(), C.rows, C.cols, A.cols); } return C; } // C = A'*B // C : m*n, k is the dimension of the middle // A, B, C are stored in row major! template<typename val_type> void dmat_trans_x_dmat(const val_type *A, const val_type *B, val_type *C, size_t m, size_t n, size_t k) { bool transpose = true; dmat_x_dmat(val_type(1.0), A, transpose, B, !transpose, val_type(0.0), C, m, n, k); } // C=A*B // A, B, C are stored in row major! template<typename val_type> void dmat_x_dmat(const val_type *A, const val_type *B, val_type *C, size_t m, size_t n, size_t k) { bool trans = true; dmat_x_dmat(val_type(1.0), A, !trans, B, !trans, val_type(0.0), C, m, n, k); } // Input: an n*k row-major matrix H // Output: an k*k matrix H^TH template<typename val_type> void doHTH(const val_type *H, val_type *HTH, size_t n, size_t k) { bool transpose = true; dmat_x_dmat_colmajor(val_type(1.0), H, !transpose, H, transpose, val_type(0.0), HTH, k, k, n); } /* trace(W^T X H) X is an m*n, sparse matrix W is an m*k, row-majored array H is an n*k, row-major */ template<typename val_type> val_type trace_dmat_T_smat_dmat(const val_type *W, const smat_t<val_type> &X, const val_type *H, const size_t k) { size_t m = X.rows; double ret = 0; #pragma omp parallel for schedule(dynamic,50) shared(X,H,W) reduction(+:ret) for(size_t i = 0; i < m; i++) { const val_type *Wi = &W[k*i]; for(long idx = X.row_ptr[i]; idx < X.row_ptr[i+1]; idx++) { const val_type *Hj = &H[X.col_idx[idx]*k]; double tmp=0; for(size_t t = 0; t < k; t++) tmp += Wi[t]*Hj[t]; ret += X.val_t[idx]*tmp; } } return (val_type)ret; } /* trace(W^T diag(D) H) D is an m*1 vector W is an m*k, row-majored array H is an m*k, row-major array */ template<typename val_type> val_type trace_dmat_T_diag_dmat(const val_type *W, const val_type *D, const val_type *H, const size_t m, const size_t k) { val_type *w = const_cast<val_type*>(W); val_type *h = const_cast<val_type*>(H); val_type *d = const_cast<val_type*>(D); double ret = 0.0; #pragma omp parallel for schedule(static) shared(w,h,d) reduction(+:ret) for(size_t i = 0; i < m; i++) { val_type *wi = &w[i*k], *hi = &h[i*k]; ret += do_dot_product(wi, wi, k) * d[i]; } return (val_type)ret; } template<typename val_type> val_type trace_dmat_T_diag_dmat(const dmat_t<val_type> &W, const dmat_t<val_type> &D, const dmat_t<val_type> &H) { return trace_dmat_T_diag_dmat(W, dvec_t<val_type>(D.get_view()), H); } //------------------ Implementation of zip_it ----------------------- // helpler functions and classes for zip_it template<class T1, class T2> struct zip_body { T1 x; T2 y; zip_body(const zip_ref<T1,T2>& other): x(*other.x), y(*other.y){} bool operator<(const zip_body &other) const {return x < other.x;} bool operator>(zip_body &other) const {return x > other.x;} bool operator==(zip_body &other) const {return x == other.x;} bool operator!=(zip_body &other) const {return x != other.x;} }; template<class T1, class T2> struct zip_ref { T1 *x; T2 *y; zip_ref(T1 &x, T2 &y): x(&x), y(&y){} zip_ref(zip_body<T1,T2>& other): x(&other.x), y(&other.y){} bool operator<(zip_ref other) const {return *x < *other.x;} bool operator>(zip_ref other) const {return *x > *other.x;} bool operator==(zip_ref other) const {return *x == *other.x;} bool operator!=(zip_ref other) const {return *x != *other.x;} zip_ref& operator=(zip_ref& other) { *x = *other.x; *y = *other.y; return *(this); } zip_ref& operator=(zip_body<T1,T2> other) { *x = other.x; *y = other.y; return *(this); } }; template<class T1, class T2> void swap(zip_ref<T1,T2> a, zip_ref<T1,T2> b) { std::swap(*(a.x),*(b.x)); std::swap(*(a.y),*(b.y)); } template<class IterT1, class IterT2> struct zip_it { typedef std::random_access_iterator_tag iterator_category; typedef typename std::iterator_traits<IterT1>::value_type T1; typedef typename std::iterator_traits<IterT2>::value_type T2; typedef zip_body<T1,T2> value_type; typedef zip_ref<T1,T2> reference; typedef zip_body<T1,T2>* pointer; typedef ptrdiff_t difference_type; IterT1 x; IterT2 y; zip_it(IterT1 x, IterT2 y): x(x), y(y){} reference operator*() {return reference(*x, *y);} reference operator[](const difference_type n) const {return reference(x[n],y[n]);} zip_it& operator++() {++x; ++y; return *this;} // prefix ++ zip_it& operator--() {--x; --y; return *this;} // prefix -- zip_it operator++(int) {return zip_it(x++,y++);} // sufix ++ zip_it operator--(int) {return zip_it(x--,y--);} // sufix -- zip_it operator+(const difference_type n) {return zip_it(x+n,y+n);} zip_it operator-(const difference_type n) {return zip_it(x-n,y-n);} zip_it& operator+=(const difference_type n) {x+=n; y+=n; return *this;} zip_it& operator-=(const difference_type n) {x-=n; y-=n; return *this;} bool operator<(const zip_it& other) {return x<other.x;} bool operator>(const zip_it& other) {return x>other.x;} bool operator==(const zip_it& other) {return x==other.x;} bool operator!=(const zip_it& other) {return x!=other.x;} difference_type operator-(const zip_it& other) {return x-other.x;} }; template<class IterT1, class IterT2> zip_it<IterT1, IterT2> zip_iter(IterT1 x, IterT2 y) { return zip_it<IterT1,IterT2>(x,y); } // ---------------- Implementation of string split utility -------------- // split utility template<typename Out> void split(const std::string &s, char delim, Out result) { std::stringstream ss; ss.str(s); std::string item; while (std::getline(ss, item, delim)) { *(result++) = item; } } /* std::vector<std::string> split(const std::string &s, char delim) { std::vector<std::string> elems; split(s, delim, std::back_inserter(elems)); return elems; } std::vector<std::string>& split(const std::string &s, char delim, std::vector<std::string>& elems) { elems.clear(); split(s, delim, std::back_inserter(elems)); return elems; } */ #undef coo_t #undef gmat_t #undef eye_t #undef smat_t #undef dmat_t #undef gvec_t #undef sdvec_t #undef svec_t #undef dvec_t // C Interface extern "C" { enum { DENSE_ROWMAJOR = 1, DENSE_COLMAJOR = 2, SPARSE = 3, EYE = 4 }; typedef struct { uint64_t rows, cols, nnz; size_t* row_ptr; size_t* col_ptr; uint32_t* row_idx; uint32_t* col_idx; void* val; void* val_t; int32_t type; } PyMatrix; } // end of extern "C" template<typename val_type> class general_matrix_wrapper { public: typedef sparse_vector<val_type> svec_t; typedef dense_vector<val_type> dvec_t; typedef sparse_dense_vector<val_type> sdvec_t; typedef general_vector<val_type> gvec_t; typedef sparse_matrix<val_type> smat_t; typedef dense_matrix<val_type> dmat_t; typedef identity_matrix<val_type> eye_t; typedef general_matrix<val_type> gmat_t; typedef coo_matrix<val_type> coo_t; general_matrix_wrapper() {} general_matrix_wrapper(const PyMatrix* py_mat_ptr) { if(py_mat_ptr->type == DENSE_ROWMAJOR) { dense = dmat_t(py_mat_ptr->rows, py_mat_ptr->cols, ROWMAJOR, static_cast<val_type*>(py_mat_ptr->val)); gmat_ptr = &dense; } else if(py_mat_ptr->type == DENSE_COLMAJOR) { dense = dmat_t(py_mat_ptr->rows, py_mat_ptr->cols, COLMAJOR, static_cast<val_type*>(py_mat_ptr->val)); gmat_ptr = &dense; } else if(py_mat_ptr->type == SPARSE) { sparse = smat_t( py_mat_ptr->rows, py_mat_ptr->cols, py_mat_ptr->nnz, static_cast<val_type*>(py_mat_ptr->val), static_cast<val_type*>(py_mat_ptr->val_t), py_mat_ptr->col_ptr, py_mat_ptr->row_ptr, py_mat_ptr->row_idx, py_mat_ptr->col_idx); gmat_ptr = &sparse; } } size_t rows() const { return gmat_ptr->rows; } size_t cols() const { return gmat_ptr->cols; } gmat_t& get_gmat() { return *gmat_ptr; } const gmat_t& get_gmat() const { return *gmat_ptr; } bool is_sparse() const { return gmat_ptr->is_sparse(); } bool is_dense() const { return gmat_ptr->is_dense(); } bool is_identity() const { return gmat_ptr->is_identity(); } smat_t& get_sparse() { return gmat_ptr->get_sparse(); } const smat_t& get_sparse() const { return gmat_ptr->get_sparse(); } dmat_t& get_dense() { return gmat_ptr->get_dense(); } const dmat_t& get_dense() const { return gmat_ptr->get_dense(); } general_matrix_wrapper<val_type> transpose() const { general_matrix_wrapper gmw; gmw.dense = this->dense.transpose(); if(is_sparse()) { gmw.sparse = this->sparse.transpose(); gmw.gmat_ptr = &gmw.sparse; } else if(is_dense()) { gmw.dense = this->dense.transpose(); gmw.gmat_ptr = &gmw.dense; } else if(is_identity()) { gmw.eye = this->eye; gmw.gmat_ptr = &gmw.eye; } return gmw; } private: smat_t sparse; dmat_t dense; eye_t eye; gmat_t* gmat_ptr; }; #endif // RF_MATRIX_H
Small_grib.c
#include <stdio.h> #include <stdlib.h> #include <math.h> #include "grb2.h" #include "wgrib2.h" #include "fnlist.h" // #include "omp.h" // #define DEBUG /* * small_grib * * 5/2008 Public Domain by Wesley Ebisuzaki * v1.1 8/2011 WNE added mercator, rotated lat-lon, redundant test for we:sn order * v1.2 1/2012 WNE added Gaussian grid */ extern int decode, latlon; extern int flush_mode, file_append; extern enum output_order_type output_order; extern int use_scale, dec_scale, bin_scale, wanted_bits, max_bits; extern enum output_grib_type grib_type; extern double *lat, *lon; extern int npts, nx, ny, scan; extern unsigned int nx_, ny_; static unsigned int idx(int ix, int iy, int nx, int ny, int cyclic_grid); /* * HEADER:100:ijsmall_grib:output:3:make small domain grib file X=ix0:ix1 Y=iy0:iy1 Z=file */ int f_ijsmall_grib(ARG3) { struct local_struct { struct seq_file out; int ix0, iy0, ix1, iy1; }; struct local_struct *save; if (mode == -1) { decode = latlon = 1; *local = save = (struct local_struct *)malloc( sizeof(struct local_struct)); if (save == NULL) fatal_error("ijsmall_grib memory allocation ",""); if (sscanf(arg1,"%d:%d", &(save->ix0), &(save->ix1)) != 2) fatal_error("ijsmall_grib: ix0:ix1 = %s?", arg1); if (sscanf(arg2,"%d:%d", &(save->iy0), &(save->iy1)) != 2) fatal_error("ijsmall_grib: iy0:iy1 = %s?", arg2); if (fopen_file(&(save->out), arg3, file_append ? "ab" : "wb") != 0) { free(save); fatal_error("Could not open %s", arg2); } if (save->iy0 <= 0) fatal_error_i("ijsmall_grib: iy0=%d <= 0", save->iy0); if (save->iy0 > save->iy1) fatal_error("ijsmall_grib: iy0 > iy1",""); if (save->ix0 > save->ix1) fatal_error("ijsmall_grib: ix0 > ix1",""); } else if (mode == -2) { save = (struct local_struct *) *local; fclose_file(&(save->out)); free(save); } else if (mode >= 0) { save = (struct local_struct *) *local; if (output_order != wesn) fatal_error("ijsmall_grib: data must be in we:sn order",""); if (GDS_Scan_staggered(scan)) fatal_error("ijsmall_grib: does not work for staggered grids",""); if (nx_ == 0 || ny_ == 0) fatal_error("small_grib: does not work for thinned grids",""); small_grib(sec,mode,data,lon,lat, ndata,save->ix0,save->ix1,save->iy0,save->iy1,&(save->out)); } return 0; } /* index into a we:sn array(1:nx,1:ny) set cyclic_grid = 1 for a an array that is cyclic in longitude */ static unsigned int idx(int ix, int iy, int nx, int ny, int cyclic_grid) { int i; if (iy <= 0) fatal_error("index: iy <= 0",""); if (iy > ny) fatal_error_i("index: iy = %d",iy); i = ix-1; if (cyclic_grid) { if (i < 0) i = nx - ( (-i) % nx ); i = i % nx; } else { if (ix <= 0) fatal_error_i("index: ix=%d <= 0",ix); if (i >= nx) fatal_error_i("index: ix = %d",ix); } return (unsigned int) (i + (iy-1)*nx); } /* * small_grib * makes a subset of certain grids * * NOTE: the data must be in we:sn order * v1.1 added mercator and rotated lat-lon grid */ int small_grib(unsigned char **sec, int mode, float *data, double *lon, double *lat, unsigned int ndata, int ix0, int ix1, int iy0, int iy1, struct seq_file *out) { int can_subset, grid_template; int nx, ny, res, scan, new_nx, new_ny, i, j; unsigned int sec3_len, new_ndata, k, npnts; unsigned char *sec3, *new_sec[9]; double units; int basic_ang, sub_ang, cyclic_grid; float *new_data; get_nxny(sec, &nx, &ny, &npnts, &res, &scan); /* get nx, ny, and scan mode of grid */ grid_template = code_table_3_1(sec); // make a copy of the gds (sec3) sec3_len = GB2_Sec3_size(sec); sec3 = (unsigned char *) malloc(sec3_len); for (k = 0; k < sec3_len; k++) sec3[k] = sec[3][k]; // make a copy of the sec[] with new sec3 new_sec[0] = sec[0]; new_sec[1] = sec[1]; new_sec[2] = sec[2]; new_sec[3] = sec3; new_sec[4] = sec[4]; new_sec[5] = sec[5]; new_sec[6] = sec[6]; new_sec[7] = sec[7]; // new_sec[8] = sec[8]; not needed by writing routines can_subset = 1; if (lat == NULL || lon == NULL) can_subset = 0; new_nx = ix1-ix0+1; new_ny = iy1-iy0+1; if (new_nx <= 0) fatal_error("small_grib, new_nx is <= 0",""); if (new_ny <= 0) fatal_error("small_grib, new_ny is <= 0",""); new_ndata = new_nx * new_ny; cyclic_grid = 0; if (can_subset) { cyclic_grid = cyclic(sec); // lat-lon grid - no thinning if ((grid_template == 0 && sec3_len == 72) || (grid_template == 1 && sec3_len == 04)) { uint_char(new_nx,sec3+30); // nx uint_char(new_ny,sec3+34); // ny basic_ang = GDS_LatLon_basic_ang(sec3); sub_ang = GDS_LatLon_sub_ang(sec3); if (basic_ang != 0) { units = (double) basic_ang / (double) sub_ang; } else { units = 0.000001; } i = lat[ idx(ix0,iy0,nx,ny,cyclic_grid) ] / units; // lat1 int_char(i,sec3+46); i = lon[ idx(ix0,iy0,nx,ny,cyclic_grid) ] / units; // lon1 int_char(i,sec3+50); i = lat[ idx(ix1,iy1,nx,ny,cyclic_grid) ] / units; // lat2 int_char(i,sec3+55); i = lon[ idx(ix1,iy1,nx,ny,cyclic_grid) ] / units; // lon2 int_char(i,sec3+59); } else if ((grid_template == 40 && sec3_len == 72)) { // full Gaussian grid uint_char(new_nx,sec3+30); // nx uint_char(new_ny,sec3+34); // ny basic_ang = GDS_Gaussian_basic_ang(sec3); sub_ang = GDS_Gaussian_sub_ang(sec3); if (basic_ang != 0) { units = (double) basic_ang / (double) sub_ang; } else { units = 0.000001; } i = lat[ idx(ix0,iy0,nx,ny,cyclic_grid) ] / units; // lat1 int_char(i,sec3+46); i = lon[ idx(ix0,iy0,nx,ny,cyclic_grid) ] / units; // lon1 int_char(i,sec3+50); i = lat[ idx(ix1,iy1,nx,ny,cyclic_grid) ] / units; // lat2 int_char(i,sec3+55); i = lon[ idx(ix1,iy1,nx,ny,cyclic_grid) ] / units; // lon2 int_char(i,sec3+59); } // polar-stereo graphic, lambert conformal , no thinning else if ((grid_template == 20 && sec3_len == 65) || // polar stereographic (grid_template == 30 && sec3_len == 81)) { // lambert conformal uint_char(new_nx,sec3+30); // nx uint_char(new_ny,sec3+34); // ny i = (int) (lat[ idx(ix0,iy0,nx,ny,cyclic_grid) ] * 1000000.0); // lat1 int_char(i,sec3+38); i = (int) (lon[ idx(ix0,iy0,nx,ny,cyclic_grid) ] * 1000000.0); // lon1 int_char(i,sec3+42); } // mercator, no thinning else if (grid_template == 10 && sec3_len == 72) { // mercator uint_char(new_nx,sec3+30); // nx uint_char(new_ny,sec3+34); // ny units = 0.000001; i = lat[ idx(ix0,iy0,nx,ny,cyclic_grid) ] / units; // lat1 int_char(i,sec3+38); i = lon[ idx(ix0,iy0,nx,ny,cyclic_grid) ] / units; // lon1 int_char(i,sec3+42); i = lat[ idx(ix1,iy1,nx,ny,cyclic_grid) ] / units; // lat2 int_char(i,sec3+51); i = lon[ idx(ix1,iy1,nx,ny,cyclic_grid) ] / units; // lon2 int_char(i,sec3+55); } else { can_subset = 0; } } // copy data to a new array if (can_subset) { uint_char(new_ndata, sec3+6); new_data = (float *) malloc(sizeof(float) * (size_t) new_ndata); #pragma omp parallel for private(i,j,k) for(j = iy0; j <= iy1; j++) { k = (j-iy0) * (size_t) (ix1-ix0+1); for(i = ix0; i <= ix1; i++) { new_data[(i-ix0) + k ] = data[ idx(i,j,nx,ny,cyclic_grid) ]; } } } else { new_ndata = ndata; new_data = (float *) malloc(sizeof(float) * (size_t) new_ndata); for (k = 0; k < ndata; k++) new_data[k] = data[k]; new_nx = nx; new_ny = ny; } set_order(new_sec, output_order); grib_wrt(new_sec, new_data, new_ndata, new_nx, new_ny, use_scale, dec_scale, bin_scale, wanted_bits, max_bits, grib_type, out); if (flush_mode) fflush_file(out); free(new_data); free(sec3); return 0; } /* * HEADER:100:small_grib:output:3:make small domain grib file X=lonW:lonE Y=latS:latN Z=file */ extern int GDS_change_no; int f_small_grib(ARG3) { struct local_struct { struct seq_file out; double lonE, lonW, latS, latN; int GDS_change_no; int ix0, ix1, iy0, iy1; }; struct local_struct *save; if (mode == -1) { decode = latlon = 1; *local = save = (struct local_struct *)malloc( sizeof(struct local_struct)); if (save == NULL) fatal_error("small_grib memory allocation ",""); save->GDS_change_no = 0; save->ix0 = save->ix1 = save->iy0 = save->iy1 = 0; if (sscanf(arg1,"%lf:%lf", &(save->lonW), &(save->lonE)) != 2) fatal_error("small_grib: lonW:lonE = %s?", arg1); if (sscanf(arg2,"%lf:%lf", &(save->latS), &(save->latN)) != 2) fatal_error("small_grib: latS:latN = %s?", arg2); if (fopen_file(&(save->out), arg3, file_append ? "ab" : "wb") != 0) { free(save); fatal_error("Could not open %s", arg2); } if (save->latS > save->latN) fatal_error("small_grib: latS > latN",""); if (save->lonW > save->lonE) fatal_error("small_grib: lonW > lonE",""); } else if (mode == -2) { save = (struct local_struct *) *local; fclose_file(&(save->out)); free(save); return 0; } else if (mode >= 0) { save = (struct local_struct *) *local; if (GDS_Scan_staggered(scan)) fatal_error("small_grib: does not work for staggered grids",""); if (GDS_change_no != save->GDS_change_no) { small_domain(sec, save->lonW,save->lonE,save->latS,save->latN, &(save->ix0), &(save->ix1), &(save->iy0), &(save->iy1)); save->GDS_change_no = GDS_change_no; } if (output_order != wesn) fatal_error("small_grib: data must be in we:sn order",""); if (nx_ == 0 || ny_ == 0) fatal_error("small_grib: does not work for thinned grids",""); small_grib(sec,mode,data,lon,lat, ndata,save->ix0,save->ix1,save->iy0,save->iy1,&(save->out)); } return 0; } /* * finds smallest rectangular domain for a set of lat-lon grid points * * 5/2017: for lat-lon and mercator grids: this code finds the bigest grid(i0:i1,j0:j1) that * will fit within the lat/lon specifications * * for any other grid: it will find a grid(i0:i1,j0:j1) that is smaller * the code is has a mistake in the the selection * one could fix the code but that would cause problems for the users as * the output grid would change. * * assumes that thinned grids are not passed to small_domain(..) */ int small_domain(unsigned char **sec, double lonW, double lonE, double latS, double latN, int *ix0, int *ix1, int *iy0, int *iy1) { int i, j, k, flag, x0, x1, y0, y1; int X0, X1, Y0, Y1, flag0; int gdt, flag1; double e,w,n,s; double lat_pt, lon_pt; // double time0, time1; #ifdef DEBUG printf("\n>> small_domain: lon lat %f:%f %f:%f\n", lonW, lonE, latS, latN); #endif if (GDS_Scan_staggered(scan)) fatal_error("small_domain: does not work for staggered grids",""); if (lat == NULL || lon == NULL) { // no lat-lon information return full grid *ix0 = 1; *ix1 = nx; *iy0 = 1; *iy1 = ny; return 1; } if (lonE < lonW) lonE += 360.0; if (lonE-lonW > 360.0) fatal_error("small_domain: longitude range is greater than 360 degrees",""); if (lonW < 0.0) { lonW += 360.0; lonE += 360.0; } #ifdef DEBUG printf("\n>> small_domain: new lon lat %f:%f %f:%f\n", lonW, lonE, latS, latN); printf(">> small_domain: nx %d ny %d\n", nx, ny); #endif /* for latlon, mercator grid, only need to scan axis for X0,X1,Y0,Y1 */ gdt = code_table_3_1(sec); // fprintf(stderr,"gdt= %d\n", gdt); if ( (gdt == 0 || gdt == 10) && nx > 1 && ny > 1) { // already checked for thinned grids flag0 = flag1 = 0; X0 = 1; X1 = nx; Y0 = 1; Y1 = ny; w = e = s = n = -1; // time0 = omp_get_wtime(); for (i = 1; i <= nx; i++) { lon_pt = lon[i-1]; if (lon_pt < lonW) lon_pt += 360.0; if (lon_pt < lonW) lon_pt += 360.0; // lon_pt > lonW if (lon_pt <= lonE) { if (flag0 == 0) { X0 = X1 = i; w = e = lon_pt; flag0 = 1; } if (lon_pt > e) { e = lon_pt; X1 = i; } if (lon_pt < w) { w = lon_pt; X0 = i; } } } for (j = 1; j <= ny; j++) { lat_pt = lat[(j-1)*nx]; if ((lat_pt >= latS) && (lat_pt <= latN)) { if (flag1 == 0) { Y0 = Y1 = j; n = s = lat_pt; flag1 = 1; } if (lat_pt < s) { s = lat_pt; Y0 = j; } if (lat_pt > n) { n = lat_pt; Y1 = j; } } } if (X1 < X0 && cyclic(sec)) X1 += nx; // time1 = omp_get_wtime(); // fprintf(stderr,"small_domain fast time=%lf %d %d %d %d flags=%d %d\n", time1-time0, X0, X1, Y0, Y1, flag0, flag1); if (flag0 == 1 && flag1 == 1) { *ix0 = X0; *ix1 = X1; *iy0 = Y0; *iy1 = Y1; return 0; } else { *ix0 = 1; *ix1 = nx; *iy0 = 1; *iy1 = ny; return 1; } } flag0 = 0; // initial point on grid X0 = 1; X1 = nx; Y0 = 1; Y1 = ny; // time0 = omp_get_wtime(); #pragma omp parallel for private (i,j,k,flag,x0,x1,y0,y1,w,e,n,s,lat_pt,lon_pt) for (j = 1; j <= ny; j++) { x0 = x1 = y0 = y1 = w = e = s = n = -1; flag = 0; // initial point on latitude for (i = 1; i <= nx; i++) { k = (i-1) + (j-1)*nx; lon_pt = lon[k]; lat_pt = lat[k]; if (lon_pt < lonW) lon_pt += 360.0; if (lon_pt < lonW) lon_pt += 360.0; // lon_pt > lonW if ( (lon_pt <= lonE) && (lat_pt >= latS) && (lat_pt <= latN)) { if (flag == 0) { x0 = x1 = i; y0 = y1 = j; w = e = lon_pt; n = s = lat_pt; flag = 1; } if (lat_pt < s) { s = lat_pt; y0 = j; } else if (lat_pt > n) { n = lat_pt; y1 = j; } if (lon_pt > e) { e = lon_pt; x1 = i; } if (lon_pt < w) { w = lon_pt; x0 = i; } } } if (flag) { // found points if (x1 < x0 && cyclic(sec)) x1 += nx; #pragma omp critical { if (flag0 == 0) { X0 = x0; X1 = x1; Y0 = y0; Y1 = y1; flag0 = 1; } else { X0 = (x0 < X0) ? x0 : X0; X1 = (x1 > X1) ? x1 : X1; Y0 = (y0 < Y0) ? y0 : Y0; Y1 = (y1 > Y1) ? y1 : Y1; } } } } // time1 = omp_get_wtime(); // fprintf(stderr,"small_domain slow time=%lf %d %d %d %d flag0 %d\n", time1-time0, X0, X1, Y0, Y1, flag0); #ifdef DEBUG printf(">> small domain: flag0 %d flag %d\n", flag0, flag); #endif if (flag0 && X1 < X0) flag0 = 0; if (flag0 == 0) { *ix0 = 1; *ix1 = nx; *iy0 = 1; *iy1 = ny; return 1; } #ifdef DEBUG printf(">> small domain: ix %d:%d iy %d:%d\n", X0, X1, Y0, Y1); #endif *ix0 = X0; *ix1 = X1; *iy0 = Y0; *iy1 = Y1; return 0; }
api_test.c
#include "ctest/ctest.h" #include "splatt_test.h" #include "../src/sptensor.h" /* API includes */ #include "../include/splatt.h" #ifdef _OPENMP #include <omp.h> #endif CTEST_DATA(api) { splatt_idx_t ntensors; sptensor_t * tensors[MAX_DSETS]; }; CTEST_SETUP(api) { data->ntensors = sizeof(datasets) / sizeof(datasets[0]); for(idx_t i=0; i < data->ntensors; ++i) { data->tensors[i] = tt_read(datasets[i]); } } CTEST_TEARDOWN(api) { for(idx_t i=0; i < data->ntensors; ++i) { tt_free(data->tensors[i]); } } CTEST(api, opts_alloc) { double * opts = splatt_default_opts(); ASSERT_NOT_NULL(opts); /* test defaults */ #ifdef _OPENMP ASSERT_EQUAL(omp_get_max_threads(), (int) opts[SPLATT_OPTION_NTHREADS]); #else ASSERT_EQUAL(1, (int) opts[SPLATT_OPTION_NTHREADS]); #endif splatt_free_opts(opts); splatt_global_opts * gopts = splatt_alloc_global_opts(); #ifdef _OPENMP ASSERT_EQUAL(omp_get_max_threads(), gopts->num_threads); #else ASSERT_EQUAL(1, gopts->num_threads); #endif splatt_free_global_opts(gopts); } CTEST(api, par_opts_alloc) { #pragma omp parallel num_threads(5) { double * opts = splatt_default_opts(); ASSERT_EQUAL(1, (int) opts[SPLATT_OPTION_NTHREADS]); splatt_free_opts(opts); splatt_global_opts * gopts = splatt_alloc_global_opts(); ASSERT_EQUAL(1, gopts->num_threads); splatt_free_global_opts(gopts); } } CTEST(api, version_major) { ASSERT_EQUAL(SPLATT_VER_MAJOR, splatt_version_major()); } CTEST(api, version_minor) { ASSERT_EQUAL(SPLATT_VER_MINOR, splatt_version_minor()); } CTEST(api, version_subminor) { ASSERT_EQUAL(SPLATT_VER_SUBMINOR, splatt_version_subminor()); } /* * Test dummy MPI functions. */ #ifndef SPLATT_USE_MPI CTEST(mpi_comm_info, alloc) { splatt_comm_info * mpi = splatt_alloc_comm_info(); ASSERT_NOT_NULL(mpi); ASSERT_EQUAL(0, mpi->world_rank); ASSERT_EQUAL(1, mpi->world_npes); /* don't crash */ splatt_free_comm_info(mpi); } #endif
GB_binop__ne_fc64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__ne_fc64) // A.*B function (eWiseMult): GB (_AemultB_01__ne_fc64) // A.*B function (eWiseMult): GB (_AemultB_02__ne_fc64) // A.*B function (eWiseMult): GB (_AemultB_03__ne_fc64) // A.*B function (eWiseMult): GB (_AemultB_bitmap__ne_fc64) // A*D function (colscale): GB ((none)) // D*A function (rowscale): GB ((none)) // C+=B function (dense accum): GB (_Cdense_accumB__ne_fc64) // C+=b function (dense accum): GB (_Cdense_accumb__ne_fc64) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__ne_fc64) // C=scalar+B GB (_bind1st__ne_fc64) // C=scalar+B' GB (_bind1st_tran__ne_fc64) // C=A+scalar GB (_bind2nd__ne_fc64) // C=A'+scalar GB (_bind2nd_tran__ne_fc64) // C type: bool // A type: GxB_FC64_t // B,b type: GxB_FC64_t // BinaryOp: cij = GB_FC64_ne (aij, bij) #define GB_ATYPE \ GxB_FC64_t #define GB_BTYPE \ GxB_FC64_t #define GB_CTYPE \ bool // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 0 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 0 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ GxB_FC64_t aij = GBX (Ax, pA, A_iso) // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ GxB_FC64_t bij = GBX (Bx, pB, B_iso) // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ bool t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = (creal (GBX (Ax, pA, A_iso)) != 0) || (cimag (GBX (Ax, pA, A_iso)) != 0) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = (creal (GBX (Bx, pB, B_iso)) != 0) || (cimag (GBX (Bx, pB, B_iso)) != 0) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = GB_FC64_ne (x, y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_NE || GxB_NO_FC64 || GxB_NO_NE_FC64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__ne_fc64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__ne_fc64) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { #include "GB_dense_subassign_23_template.c" } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__ne_fc64) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { // get the scalar b for C += b, of type GxB_FC64_t GxB_FC64_t bwork = (*((GxB_FC64_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__ne_fc64) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__ne_fc64) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__ne_fc64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__ne_fc64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__ne_fc64) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__ne_fc64) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *Cx = (bool *) Cx_output ; GxB_FC64_t x = (*((GxB_FC64_t *) x_input)) ; GxB_FC64_t *Bx = (GxB_FC64_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; GxB_FC64_t bij = GBX (Bx, p, false) ; Cx [p] = GB_FC64_ne (x, bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__ne_fc64) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; bool *Cx = (bool *) Cx_output ; GxB_FC64_t *Ax = (GxB_FC64_t *) Ax_input ; GxB_FC64_t y = (*((GxB_FC64_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; GxB_FC64_t aij = GBX (Ax, p, false) ; Cx [p] = GB_FC64_ne (aij, y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ GxB_FC64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_FC64_ne (x, aij) ; \ } GrB_Info GB (_bind1st_tran__ne_fc64) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ GxB_FC64_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC64_t x = (*((const GxB_FC64_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ GxB_FC64_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ GxB_FC64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_FC64_ne (aij, y) ; \ } GrB_Info GB (_bind2nd_tran__ne_fc64) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC64_t y = (*((const GxB_FC64_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
reduction.c
#include<stdio.h> #include<omp.h> #include<sys/time.h> #include<unistd.h> #define ARRAY_SIZE 1024768 int main(int argc, char *argv[]) { int i; int *a = (int *) malloc(sizeof(int) * ARRAY_SIZE); int *b = (int *) malloc(sizeof(int) * ARRAY_SIZE); int *c = (int *) malloc(sizeof(int) * ARRAY_SIZE); long long sum = 0; struct timeval tstart, tend; gettimeofday(&tstart, NULL); #pragma omp parallel for reduction(+: sum) for(i=0;i<ARRAY_SIZE;++i) { sum += a[i] + b[i]; } gettimeofday(&tend, NULL); printf("Sum is :%ld\n",sum); return 0; }
FeatureImportanceTree.h
/* * This software is distributed under BSD 3-clause license (see LICENSE file). * * Authors: Yuhui Liu */ #ifndef _FEATUREIMPORTANCETREE_H__ #define _FEATUREIMPORTANCETREE_H__ #include <shogun/lib/SGVector.h> #include <shogun/mathematics/linalg/LinalgNamespace.h> #include <shogun/multiclass/tree/TreeMachine.h> #include <shogun/multiclass/tree/TreeMachineNode.h> namespace shogun { /** @brief class FeatureImportanceTree, a mixin class for the tree which * needs computing feature importances. This class is derived from * TreeMachine<NodeType> and stores the feature importances * */ template <typename NodeType> class FeatureImportanceTree : public TreeMachine<NodeType> { protected: void compute_feature_importance( int32_t num_features, const std::shared_ptr<TreeMachineNode<NodeType>>& node) { m_feature_importances = SGVector<float64_t>(num_features); m_feature_importances.zero(); compute_feature_importance_impl(node); float64_t total_num_sample = node->data.total_weight; linalg::scale( m_feature_importances, m_feature_importances, 1.0 / total_num_sample); auto normalizer = linalg::sum(m_feature_importances); if (normalizer > 0) linalg::scale( m_feature_importances, m_feature_importances, 1.0 / normalizer); } SGVector<float64_t> m_feature_importances; public: virtual ~FeatureImportanceTree() = default; private: void compute_feature_importance_impl( const std::shared_ptr<TreeMachineNode<NodeType>>& node) { const auto& children = node->get_children(); m_feature_importances[node->data.attribute_id] += node->data.impurity * node->data.total_weight; #ifndef _MSC_VER #pragma omp parallel for shared(m_feature_importances) #endif for (auto i = 0; i < children.size(); i++) { const auto& child = children[i]; m_feature_importances[node->data.attribute_id] -= child->data.impurity * child->data.total_weight; if (child->data.attribute_id >= 0) { compute_feature_importance_impl(child); } } } }; } // namespace shogun #endif
GB_binop__bxor_int32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__bxor_int32) // A.*B function (eWiseMult): GB (_AemultB_08__bxor_int32) // A.*B function (eWiseMult): GB (_AemultB_02__bxor_int32) // A.*B function (eWiseMult): GB (_AemultB_04__bxor_int32) // A.*B function (eWiseMult): GB (_AemultB_bitmap__bxor_int32) // A*D function (colscale): GB ((none)) // D*A function (rowscale): GB ((none)) // C+=B function (dense accum): GB (_Cdense_accumB__bxor_int32) // C+=b function (dense accum): GB (_Cdense_accumb__bxor_int32) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__bxor_int32) // C=scalar+B GB (_bind1st__bxor_int32) // C=scalar+B' GB (_bind1st_tran__bxor_int32) // C=A+scalar GB (_bind2nd__bxor_int32) // C=A'+scalar GB (_bind2nd_tran__bxor_int32) // C type: int32_t // A type: int32_t // A pattern? 0 // B type: int32_t // B pattern? 0 // BinaryOp: cij = (aij) ^ (bij) #define GB_ATYPE \ int32_t #define GB_BTYPE \ int32_t #define GB_CTYPE \ int32_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ int32_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ int32_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int32_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x) ^ (y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_BXOR || GxB_NO_INT32 || GxB_NO_BXOR_INT32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__bxor_int32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__bxor_int32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__bxor_int32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int32_t int32_t bwork = (*((int32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *restrict Cx = (int32_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *restrict Cx = (int32_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__bxor_int32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; int32_t alpha_scalar ; int32_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((int32_t *) alpha_scalar_in)) ; beta_scalar = (*((int32_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__bxor_int32) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__bxor_int32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__bxor_int32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__bxor_int32) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__bxor_int32) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *Cx = (int32_t *) Cx_output ; int32_t x = (*((int32_t *) x_input)) ; int32_t *Bx = (int32_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; int32_t bij = GBX (Bx, p, false) ; Cx [p] = (x) ^ (bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__bxor_int32) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int32_t *Cx = (int32_t *) Cx_output ; int32_t *Ax = (int32_t *) Ax_input ; int32_t y = (*((int32_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int32_t aij = GBX (Ax, p, false) ; Cx [p] = (aij) ^ (y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x) ^ (aij) ; \ } GrB_Info GB (_bind1st_tran__bxor_int32) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t x = (*((const int32_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int32_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij) ^ (y) ; \ } GrB_Info GB (_bind2nd_tran__bxor_int32) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t y = (*((const int32_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
convolution_3x3_pack4.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2022 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void conv3x3s1_winograd63_transform_kernel_pack4_sse(const Mat& kernel, Mat& kernel_tm_pack4, int inch, int outch, const Option& opt) { // winograd63 transform kernel Mat kernel_tm; kernel_tm.create(8 * 8, inch, outch); const float ktm[8][3] = { {1.0f, 0.0f, 0.0f}, {-2.0f / 9, -2.0f / 9, -2.0f / 9}, {-2.0f / 9, 2.0f / 9, -2.0f / 9}, {1.0f / 90, 1.0f / 45, 2.0f / 45}, {1.0f / 90, -1.0f / 45, 2.0f / 45}, {1.0f / 45, 1.0f / 90, 1.0f / 180}, {1.0f / 45, -1.0f / 90, 1.0f / 180}, {0.0f, 0.0f, 1.0f} }; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { for (int q = 0; q < inch; q++) { const float* kernel0 = (const float*)kernel + p * inch * 9 + q * 9; float* kernel_tm0 = kernel_tm.channel(p).row(q); // transform kernel, transposed const float* k0 = kernel0; const float* k1 = kernel0 + 3; const float* k2 = kernel0 + 6; // h float tmp[8][3]; for (int i = 0; i < 8; i++) { tmp[i][0] = k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2]; tmp[i][1] = k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2]; tmp[i][2] = k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2]; } // v for (int j = 0; j < 8; j++) { float* tmpp = &tmp[j][0]; for (int i = 0; i < 8; i++) { kernel_tm0[j * 8 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2]; } } } } // interleave // src = 64-inch-outch // dst = pb-pa-inch/pa-64-outch/pb kernel_tm_pack4.create(inch / 4, 64, outch / 4, (size_t)4u * 4 * 4, 4 * 4); for (int q = 0; q + 3 < outch; q += 4) { Mat g0 = kernel_tm_pack4.channel(q / 4); for (int k = 0; k < 64; k++) { float* g00 = g0.row<float>(k); for (int p = 0; p + 3 < inch; p += 4) { for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { const float* k00 = kernel_tm.channel(q + j).row(p + i); g00[0] = k00[k]; g00++; } } } } } } static void conv3x3s1_winograd63_pack4_sse(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel_tm, const Mat& bias, const Option& opt) { int w = bottom_blob.w; int h = bottom_blob.h; int inch = bottom_blob.c; size_t elemsize = bottom_blob.elemsize; int elempack = bottom_blob.elempack; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; // pad to 6n+2 Mat bottom_blob_bordered = bottom_blob; outw = (outw + 5) / 6 * 6; outh = (outh + 5) / 6 * 6; w = outw + 2; h = outh + 2; copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, BORDER_CONSTANT, 0.f, opt); // BEGIN transform input Mat bottom_blob_tm; { int w_tiles = outw / 6; int h_tiles = outh / 6; int tiles = w_tiles * h_tiles; bottom_blob_tm.create(tiles, 64, inch, elemsize, elempack, opt.workspace_allocator); conv3x3s1_winograd63_transform_input_pack4_sse(bottom_blob_bordered, bottom_blob_tm, opt); } bottom_blob_bordered = Mat(); // END transform input // BEGIN dot Mat top_blob_tm; convolution_winograd_dot_pack4_sse(bottom_blob_tm, outch, kernel_tm, top_blob_tm, opt); // END dot // BEGIN transform output Mat top_blob_bordered; if (outw == top_blob.w && outh == top_blob.h) { top_blob_bordered = top_blob; } else { top_blob_bordered.create(outw, outh, outch, elemsize, elempack, opt.workspace_allocator); } { conv3x3s1_winograd63_transform_output_pack4_sse(top_blob_tm, top_blob_bordered, bias, opt); } // END transform output // cut result pad copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt); } static void conv3x3s1_winograd43_transform_kernel_pack4_sse(const Mat& kernel, Mat& kernel_tm_pack4, int inch, int outch, const Option& opt) { // winograd43 transform kernel Mat kernel_tm(6 * 6, inch, outch); const float ktm[6][3] = { {1.0f / 4, 0.0f, 0.0f}, {-1.0f / 6, -1.0f / 6, -1.0f / 6}, {-1.0f / 6, 1.0f / 6, -1.0f / 6}, {1.0f / 24, 1.0f / 12, 1.0f / 6}, {1.0f / 24, -1.0f / 12, 1.0f / 6}, {0.0f, 0.0f, 1.0f} }; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { for (int q = 0; q < inch; q++) { const float* kernel0 = (const float*)kernel + p * inch * 9 + q * 9; float* kernel_tm0 = kernel_tm.channel(p).row(q); // transform kernel const float* k0 = kernel0; const float* k1 = kernel0 + 3; const float* k2 = kernel0 + 6; // h float tmp[6][3]; for (int i = 0; i < 6; i++) { tmp[i][0] = k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2]; tmp[i][1] = k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2]; tmp[i][2] = k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2]; } // U for (int j = 0; j < 6; j++) { float* tmpp = &tmp[j][0]; for (int i = 0; i < 6; i++) { kernel_tm0[j * 6 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2]; } } } } // interleave // src = 36-inch-outch // dst = pb-pa-inch/pa-36-outch/pb kernel_tm_pack4.create(inch / 4, 36, outch / 4, (size_t)4u * 4 * 4, 4 * 4); for (int q = 0; q + 3 < outch; q += 4) { Mat g0 = kernel_tm_pack4.channel(q / 4); for (int k = 0; k < 36; k++) { float* g00 = g0.row<float>(k); for (int p = 0; p + 3 < inch; p += 4) { for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { const float* k00 = kernel_tm.channel(q + j).row(p + i); g00[0] = k00[k]; g00++; } } } } } } static void conv3x3s1_winograd43_pack4_sse(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel_tm, const Mat& bias, const Option& opt) { int w = bottom_blob.w; int h = bottom_blob.h; int inch = bottom_blob.c; size_t elemsize = bottom_blob.elemsize; int elempack = bottom_blob.elempack; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; // pad to 4n+2 Mat bottom_blob_bordered = bottom_blob; outw = (outw + 3) / 4 * 4; outh = (outh + 3) / 4 * 4; w = outw + 2; h = outh + 2; copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, BORDER_CONSTANT, 0.f, opt); // BEGIN transform input Mat bottom_blob_tm; { int w_tiles = outw / 4; int h_tiles = outh / 4; int tiles = w_tiles * h_tiles; bottom_blob_tm.create(tiles, 36, inch, elemsize, elempack, opt.workspace_allocator); conv3x3s1_winograd43_transform_input_pack4_sse(bottom_blob_bordered, bottom_blob_tm, opt); } bottom_blob_bordered = Mat(); // END transform input // BEGIN dot Mat top_blob_tm; convolution_winograd_dot_pack4_sse(bottom_blob_tm, outch, kernel_tm, top_blob_tm, opt); // END dot // BEGIN transform output Mat top_blob_bordered; if (outw == top_blob.w && outh == top_blob.h) { top_blob_bordered = top_blob; } else { top_blob_bordered.create(outw, outh, outch, elemsize, elempack, opt.workspace_allocator); } { conv3x3s1_winograd43_transform_output_pack4_sse(top_blob_tm, top_blob_bordered, bias, opt); } // END transform output // cut result pad copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt); } static void conv3x3s1_winograd23_transform_kernel_pack4_sse(const Mat& kernel, Mat& kernel_tm_pack4, int inch, int outch, const Option& opt) { // winograd23 transform kernel Mat kernel_tm(4 * 4, inch, outch); const float ktm[4][3] = { {1.0f, 0.0f, 0.0f}, {1.0f / 2, 1.0f / 2, 1.0f / 2}, {1.0f / 2, -1.0f / 2, 1.0f / 2}, {0.0f, 0.0f, 1.0f} }; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { for (int q = 0; q < inch; q++) { const float* kernel0 = (const float*)kernel + p * inch * 9 + q * 9; float* kernel_tm0 = kernel_tm.channel(p).row(q); // transform kernel const float* k0 = kernel0; const float* k1 = kernel0 + 3; const float* k2 = kernel0 + 6; // h float tmp[4][3]; for (int i = 0; i < 4; i++) { tmp[i][0] = k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2]; tmp[i][1] = k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2]; tmp[i][2] = k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2]; } // U for (int j = 0; j < 4; j++) { float* tmpp = &tmp[j][0]; for (int i = 0; i < 4; i++) { kernel_tm0[j * 4 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2]; } } } } // interleave // src = 16-inch-outch // dst = pb-pa-inch/pa-16-outch/pb kernel_tm_pack4.create(inch / 4, 16, outch / 4, (size_t)4u * 4 * 4, 4 * 4); for (int q = 0; q + 3 < outch; q += 4) { Mat g0 = kernel_tm_pack4.channel(q / 4); for (int k = 0; k < 16; k++) { float* g00 = g0.row<float>(k); for (int p = 0; p + 3 < inch; p += 4) { for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { const float* k00 = kernel_tm.channel(q + j).row(p + i); g00[0] = k00[k]; g00++; } } } } } } static void conv3x3s1_winograd23_pack4_sse(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel_tm, const Mat& bias, const Option& opt) { int w = bottom_blob.w; int h = bottom_blob.h; int inch = bottom_blob.c; size_t elemsize = bottom_blob.elemsize; int elempack = bottom_blob.elempack; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; // pad to 2n+2 Mat bottom_blob_bordered = bottom_blob; outw = (outw + 1) / 2 * 2; outh = (outh + 1) / 2 * 2; w = outw + 2; h = outh + 2; copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, BORDER_CONSTANT, 0.f, opt); // BEGIN transform input Mat bottom_blob_tm; { int w_tiles = outw / 2; int h_tiles = outh / 2; int tiles = w_tiles * h_tiles; bottom_blob_tm.create(tiles, 16, inch, elemsize, elempack, opt.workspace_allocator); conv3x3s1_winograd23_transform_input_pack4_sse(bottom_blob_bordered, bottom_blob_tm, opt); } bottom_blob_bordered = Mat(); // END transform input // BEGIN dot Mat top_blob_tm; convolution_winograd_dot_pack4_sse(bottom_blob_tm, outch, kernel_tm, top_blob_tm, opt); // END dot // BEGIN transform output Mat top_blob_bordered; if (outw == top_blob.w && outh == top_blob.h) { top_blob_bordered = top_blob; } else { top_blob_bordered.create(outw, outh, outch, elemsize, elempack, opt.workspace_allocator); } { conv3x3s1_winograd23_transform_output_pack4_sse(top_blob_tm, top_blob_bordered, bias, opt); } // END transform output // cut result pad copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt); }
GB_unop__identity_uint64_fc32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCUDA_DEV #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__identity_uint64_fc32) // op(A') function: GB (_unop_tran__identity_uint64_fc32) // C type: uint64_t // A type: GxB_FC32_t // cast: uint64_t cij = GB_cast_to_uint64_t ((double) crealf (aij)) // unaryop: cij = aij #define GB_ATYPE \ GxB_FC32_t #define GB_CTYPE \ uint64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ GxB_FC32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ uint64_t z = GB_cast_to_uint64_t ((double) crealf (aij)) ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GxB_FC32_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ uint64_t z = GB_cast_to_uint64_t ((double) crealf (aij)) ; \ Cx [pC] = z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_UINT64 || GxB_NO_FC32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__identity_uint64_fc32) ( uint64_t *Cx, // Cx and Ax may be aliased const GxB_FC32_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GxB_FC32_t aij = Ax [p] ; uint64_t z = GB_cast_to_uint64_t ((double) crealf (aij)) ; Cx [p] = z ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; GxB_FC32_t aij = Ax [p] ; uint64_t z = GB_cast_to_uint64_t ((double) crealf (aij)) ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__identity_uint64_fc32) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
DRB046-doall2-orig-no.c
/* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov, schordan1@llnl.gov, karlin1@llnl.gov) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https://github.com/LLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* Two-dimensional array computation: Only one loop is associated with the omp for construct. The inner loop's loop iteration variable needs an explicit private() clause, otherwise it will be shared by default. */ #include "omprace.h" #include <omp.h> int a[100][100]; int main() { omprace_init(); int i,j; #pragma omp parallel for private(j) for (i=0;i<100;i++) for (j=0;j<100;j++) a[i][j]=a[i][j]+1; omprace_fini(); return 0; }
ccl_fftlog.c
#include <stdlib.h> #include <math.h> #include <complex.h> #include <fftw3.h> #include <gsl/gsl_sf_result.h> #include <gsl/gsl_sf_gamma.h> #include "ccl.h" /**************************************************************** This is the famous FFTLog. First imlplemented by the living legend Andrew Hamilton: http://casa.colorado.edu/~ajsh/FFTLog/ This version is a C version that was adapted from the C++ version found in Copter JWG Carlson, another big loss for the cosmology community. https://github.com/jwgcarlson/Copter I've transformed this from C++ to C99 as the lowest common denominator and provided bindings for C++ and python. These are the C++ bindings *****************************************************************/ #ifndef M_PI #define M_PI 3.14159265358979323846 #endif #ifndef M_LN2 #define M_LN2 0.69314718056 #endif /* This code is FFTLog, which is described in arXiv:astro-ph/9905191 */ static double complex lngamma_fftlog(double complex z) { gsl_sf_result lnr, phi; gsl_sf_lngamma_complex_e(creal(z), cimag(z), &lnr, &phi); return lnr.val + I*phi.val; } static double complex gamma_fftlog(double complex z) { return cexp(lngamma_fftlog(z)); } static double complex polar (double r, double phi) { return (r*cos(phi) +I*(r*sin(phi))); } static void lngamma_4(double x, double y, double* lnr, double* arg) { double complex w = lngamma_fftlog(x+y*I); if(lnr) *lnr = creal(w); if(arg) *arg = cimag(w); } static double goodkr(int N, double mu, double q, double L, double kr) { double xp = (mu+1+q)/2; double xm = (mu+1-q)/2; double y = M_PI*N/(2*L); double lnr, argm, argp; lngamma_4(xp, y, &lnr, &argp); lngamma_4(xm, y, &lnr, &argm); double arg = log(2/kr) * N/L + (argp + argm)/M_PI; double iarg = round(arg); if(arg != iarg) kr *= exp((arg - iarg)*L/N); return kr; } /* Pre-compute the coefficients that appear in the FFTLog implementation of * the discrete Hankel transform. The parameters N, mu, and q here are the * same as for the function fht(). The parameter L is defined (for whatever * reason) to be N times the logarithmic spacing of the input array, i.e. * L = N * log(r[N-1]/r[0])/(N-1) */ static void compute_u_coefficients(int N, double mu, double q, double L, double kcrc, double complex *u) { double y = M_PI/L; double k0r0 = kcrc * exp(-L); double t = -2*y*log(k0r0/2); if(q == 0) { double x = (mu+1)/2; double lnr, phi; for(int m = 0; m <= N/2; m++) { lngamma_4(x, m*y, &lnr, &phi); u[m] = polar(1.0,m*t + 2*phi); } } else { double xp = (mu+1+q)/2; double xm = (mu+1-q)/2; double lnrp, phip, lnrm, phim; for(int m = 0; m <= N/2; m++) { lngamma_4(xp, m*y, &lnrp, &phip); lngamma_4(xm,-m*y, &lnrm, &phim); u[m] = polar(exp(q*M_LN2 + lnrp - lnrm), m*t + phip - phim); } } for(int m = N/2+1; m < N; m++) u[m] = conj(u[N-m]); if((N % 2) == 0) u[N/2] = (creal(u[N/2]) + I*0.0); } /* Compute the discrete Hankel transform of the function a(r). See the FFTLog * documentation (or the Fortran routine of the same name in the FFTLog * sources) for a description of exactly what this function computes. * If u is NULL, the transform coefficients will be computed anew and discarded * afterwards. If you plan on performing many consecutive transforms, it is * more efficient to pre-compute the u coefficients. */ static void fht(int npk, int N, double *k, double **pk, double *r, double **xi, double dim, double mu, double q, double kcrc, int noring, double complex* u, int *status) { fftw_plan forward_plan, reverse_plan; double L = log(k[N-1]/k[0]) * N/(N-1.); double complex* ulocal = NULL; if(u == NULL) { if(noring) kcrc = goodkr(N, mu, q, L, kcrc); ulocal = malloc (sizeof(complex double)*N); if(ulocal==NULL) *status=CCL_ERROR_MEMORY; if(*status == 0) { compute_u_coefficients(N, mu, q, L, kcrc, ulocal); u = ulocal; } } fftw_complex* a_tmp; fftw_complex* b_tmp; if(*status == 0) { a_tmp = fftw_alloc_complex(N); if(a_tmp==NULL) *status=CCL_ERROR_MEMORY; } if(*status == 0) { b_tmp = fftw_alloc_complex(N); if(b_tmp==NULL) *status=CCL_ERROR_MEMORY; } if(*status == 0) { /* Compute the convolution b = a*u using FFTs */ forward_plan = fftw_plan_dft_1d(N, (fftw_complex*) a_tmp, (fftw_complex*) b_tmp, -1, FFTW_ESTIMATE); reverse_plan = fftw_plan_dft_1d(N, (fftw_complex*) b_tmp, (fftw_complex*) b_tmp, +1, FFTW_ESTIMATE); } if(*status == 0) { #pragma omp parallel default(none) \ shared(npk, N, k, pk, r, xi, \ dim, mu, q, kcrc, u, status, \ forward_plan, reverse_plan, \ L, ulocal) { int local_status = 0; double *prefac_pk=NULL; if(local_status == 0) { prefac_pk = malloc(N*sizeof(double)); if(prefac_pk==NULL) local_status=CCL_ERROR_MEMORY; } double *prefac_xi=NULL; if(local_status == 0) { prefac_xi = malloc(N*sizeof(double)); if(prefac_xi==NULL) local_status=CCL_ERROR_MEMORY; } fftw_complex* a=NULL; fftw_complex* b=NULL; if(local_status == 0) { a = fftw_alloc_complex(N); if(a==NULL) local_status=CCL_ERROR_MEMORY; } if(local_status == 0) { b = fftw_alloc_complex(N); if(b==NULL) local_status=CCL_ERROR_MEMORY; } if(local_status == 0) { for(int i = 0; i < N; i++) prefac_pk[i] = pow(k[i], dim/2-q); /* Compute k's corresponding to input r's */ double k0r0 = kcrc * exp(-L); r[0] = k0r0/k[0]; for(int n = 1; n < N; n++) r[n] = r[0] * exp(n*L/N); double one_over_2pi_dhalf = pow(2*M_PI,-dim/2); for(int i = 0; i < N; i++) prefac_xi[i] = one_over_2pi_dhalf * pow(r[i], -dim/2-q); #pragma omp for for(int j = 0; j < npk; j++) { for(int i = 0; i < N; i++) a[i] = prefac_pk[i] * pk[j][i]; fftw_execute_dft(forward_plan,a,b); for(int m = 0; m < N; m++) b[m] *= u[m] / (double)(N); // divide by N since FFTW doesn't normalize the inverse FFT fftw_execute_dft(reverse_plan,b,b); /* Reverse b array */ double complex tmp; for(int n = 0; n < N/2; n++) { tmp = b[n]; b[n] = b[N-n-1]; b[N-n-1] = tmp; } for(int i = 0; i < N; i++) xi[j][i] = prefac_xi[i] * creal(b[i]); } } free(prefac_pk); free(prefac_xi); fftw_free(a); fftw_free(b); if (local_status) { #pragma omp atomic write *status = local_status; } } //end omp parallel } if(*status == 0) { fftw_destroy_plan(forward_plan); fftw_destroy_plan(reverse_plan); } free(ulocal); //TODO: free this up fftw_free(a_tmp); fftw_free(b_tmp); } void ccl_fftlog_ComputeXi2D(double mu, double epsilon, int npk, int N, double *l,double **cl, double *th, double **xi, int *status) { fht(npk, N, l, cl, th, xi, 2., mu, epsilon, 1, 1, NULL, status); } void ccl_fftlog_ComputeXi3D(double l, double epsilon, int npk, int N, double *k, double **pk, double *r, double **xi, int *status) { fht(npk, N, k, pk, r, xi, 3., l+0.5, epsilon, 1, 1, NULL, status); }
GB_ewise_slice.c
//------------------------------------------------------------------------------ // GB_ewise_slice: slice the entries and vectors for an ewise operation //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // Constructs a set of tasks to compute C, for an element-wise operation // (GB_add, GB_emult, and GB_mask) that operates on two input matrices, // C=op(A,B). The mask is ignored for computing where to slice the work, but // it is sliced once the location has been found. #define GB_FREE_ALL \ { \ GB_FREE_MEMORY (TaskList, max_ntasks+1, sizeof (GB_task_struct)) ; \ GB_FREE_MEMORY (Cwork, Cnvec+1, sizeof (int64_t)) ; \ } #include "GB.h" //------------------------------------------------------------------------------ // GB_ewise_slice //------------------------------------------------------------------------------ GrB_Info GB_ewise_slice ( // output: GB_task_struct **p_TaskList, // array of structs, of size max_ntasks int *p_max_ntasks, // size of TaskList int *p_ntasks, // # of tasks constructed int *p_nthreads, // # of threads for eWise operation // input: const int64_t Cnvec, // # of vectors of C const int64_t *restrict Ch, // vectors of C, if hypersparse const int64_t *restrict C_to_M, // mapping of C to M const int64_t *restrict C_to_A, // mapping of C to A const int64_t *restrict C_to_B, // mapping of C to B bool Ch_is_Mh, // if true, then Ch == Mh; GB_add only const GrB_Matrix M, // mask matrix to slice (optional) const GrB_Matrix A, // matrix to slice const GrB_Matrix B, // matrix to slice GB_Context Context ) { //-------------------------------------------------------------------------- // check inputs //-------------------------------------------------------------------------- ASSERT (p_TaskList != NULL) ; ASSERT (p_max_ntasks != NULL) ; ASSERT (p_ntasks != NULL) ; ASSERT (p_nthreads != NULL) ; ASSERT_OK (GB_check (A, "A for ewise_slice", GB0)) ; ASSERT_OK (GB_check (B, "B for ewise_slice", GB0)) ; (*p_TaskList ) = NULL ; (*p_max_ntasks) = 0 ; (*p_ntasks ) = 0 ; (*p_nthreads ) = 1 ; int64_t *restrict Cwork = NULL ; //-------------------------------------------------------------------------- // determine # of threads to use //-------------------------------------------------------------------------- GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ; //-------------------------------------------------------------------------- // allocate the initial TaskList //-------------------------------------------------------------------------- // Allocate the TaskList to hold at least 2*ntask0 tasks. It will grow // later, if needed. Usually, 64*nthreads_max is enough, but in a few cases // fine tasks can cause this number to be exceeded. If that occurs, // TaskList is reallocated. // When the mask is present, it is often fastest to break the work up // into tasks, even when nthreads_max is 1. GB_task_struct *restrict TaskList = NULL ; int max_ntasks = 0 ; int ntasks0 = (M == NULL && nthreads_max == 1) ? 1 : (32 * nthreads_max) ; GB_REALLOC_TASK_LIST (TaskList, ntasks0, max_ntasks) ; //-------------------------------------------------------------------------- // check for quick return for a single task //-------------------------------------------------------------------------- if (Cnvec == 0 || ntasks0 == 1) { // construct a single coarse task that computes all of C TaskList [0].kfirst = 0 ; TaskList [0].klast = Cnvec-1 ; (*p_TaskList ) = TaskList ; (*p_max_ntasks) = max_ntasks ; (*p_ntasks ) = (Cnvec == 0) ? 0 : 1 ; (*p_nthreads ) = 1 ; return (GrB_SUCCESS) ; } //-------------------------------------------------------------------------- // get A, B, and M //-------------------------------------------------------------------------- const int64_t vlen = A->vlen ; const int64_t *restrict Ap = A->p ; const int64_t *restrict Ai = A->i ; const int64_t *restrict Bp = B->p ; const int64_t *restrict Bi = B->i ; bool Ch_is_Ah = (Ch != NULL && A->h != NULL && Ch == A->h) ; bool Ch_is_Bh = (Ch != NULL && B->h != NULL && Ch == B->h) ; const int64_t *restrict Mp = NULL ; const int64_t *restrict Mi = NULL ; if (M != NULL) { Mp = M->p ; Mi = M->i ; // Ch_is_Mh is true if either true on input (for GB_add, which denotes // that Ch is a deep copy of M->h), or if Ch is a shallow copy of M->h. Ch_is_Mh = Ch_is_Mh || (Ch != NULL && M->h != NULL && Ch == M->h) ; } //-------------------------------------------------------------------------- // allocate workspace //-------------------------------------------------------------------------- GB_MALLOC_MEMORY (Cwork, Cnvec+1, sizeof (int64_t)) ; if (Cwork == NULL) { // out of memory GB_FREE_ALL ; return (GB_OUT_OF_MEMORY) ; } //-------------------------------------------------------------------------- // compute an estimate of the work for each vector of C //-------------------------------------------------------------------------- int nthreads_for_Cwork = GB_nthreads (Cnvec, chunk, nthreads_max) ; #pragma omp parallel for num_threads(nthreads_for_Cwork) schedule(static) for (int64_t k = 0 ; k < Cnvec ; k++) { //---------------------------------------------------------------------- // get the C(:,j) vector //---------------------------------------------------------------------- int64_t j = (Ch == NULL) ? k : Ch [k] ; //---------------------------------------------------------------------- // get the corresponding vector of A //---------------------------------------------------------------------- int64_t kA ; if (C_to_A != NULL) { // A is hypersparse and the C_to_A mapping has been created ASSERT (A->is_hyper || A->is_slice) ; kA = C_to_A [k] ; ASSERT (kA >= -1 && kA < A->nvec) ; if (kA >= 0) { ASSERT (j == ((A->is_hyper) ? A->h [kA] : (A->hfirst + kA))) ; } } else if (Ch_is_Ah) { // A is hypersparse, but Ch is a shallow copy of A->h kA = k ; ASSERT (j == A->h [kA]) ; } else { // A is standard ASSERT (!A->is_hyper) ; ASSERT (!A->is_slice) ; ASSERT (A->h == NULL) ; kA = j ; } //---------------------------------------------------------------------- // get the corresponding vector of B //---------------------------------------------------------------------- int64_t kB ; if (C_to_B != NULL) { // B is hypersparse and the C_to_B mapping has been created ASSERT (B->is_hyper || B->is_slice) ; kB = C_to_B [k] ; ASSERT (kB >= -1 && kB < B->nvec) ; if (kB >= 0) { ASSERT (j == ((B->is_hyper) ? B->h [kB] : (B->hfirst + kB))) ; } } else if (Ch_is_Bh) { // B is hypersparse, but Ch is a shallow copy of B->h kB = k ; ASSERT (j == B->h [kB]) ; } else { // B is standard ASSERT (!B->is_hyper) ; ASSERT (!B->is_slice) ; ASSERT (B->h == NULL) ; kB = j ; } //---------------------------------------------------------------------- // estimate the work for C(:,j) //---------------------------------------------------------------------- ASSERT (kA >= -1 && kA < A->nvec) ; ASSERT (kB >= -1 && kB < B->nvec) ; int64_t aknz = (kA < 0) ? 0 : (Ap [kA+1] - Ap [kA]) ; int64_t bknz = (kB < 0) ? 0 : (Bp [kB+1] - Bp [kB]) ; Cwork [k] = aknz + bknz + 1 ; } //-------------------------------------------------------------------------- // replace Cwork with its cumulative sum //-------------------------------------------------------------------------- GB_cumsum (Cwork, Cnvec, NULL, nthreads_for_Cwork) ; double cwork = (double) Cwork [Cnvec] ; //-------------------------------------------------------------------------- // determine # of threads and tasks for the eWise operation //-------------------------------------------------------------------------- int nthreads = GB_nthreads (cwork, chunk, nthreads_max) ; ntasks0 = (M == NULL && nthreads == 1) ? 1 : (32 * nthreads) ; double target_task_size = cwork / (double) (ntasks0) ; target_task_size = GB_IMAX (target_task_size, chunk) ; int ntasks1 = cwork / target_task_size ; ntasks1 = GB_IMAX (ntasks1, 1) ; //-------------------------------------------------------------------------- // slice the work into coarse tasks //-------------------------------------------------------------------------- int64_t Coarse [ntasks1+1] ; GB_pslice (Coarse, Cwork, Cnvec, ntasks1) ; //-------------------------------------------------------------------------- // construct all tasks, both coarse and fine //-------------------------------------------------------------------------- int ntasks = 0 ; for (int t = 0 ; t < ntasks1 ; t++) { //---------------------------------------------------------------------- // coarse task computes C (:,k:klast) //---------------------------------------------------------------------- int64_t k = Coarse [t] ; int64_t klast = Coarse [t+1] - 1 ; if (k >= Cnvec) { //------------------------------------------------------------------ // all tasks have been constructed //------------------------------------------------------------------ break ; } else if (k < klast) { //------------------------------------------------------------------ // coarse task has 2 or more vectors //------------------------------------------------------------------ // This is a non-empty coarse-grain task that does two or more // entire vectors of C, vectors k:klast, inclusive. GB_REALLOC_TASK_LIST (TaskList, ntasks + 1, max_ntasks) ; TaskList [ntasks].kfirst = k ; TaskList [ntasks].klast = klast ; ntasks++ ; } else { //------------------------------------------------------------------ // coarse task has 0 or 1 vectors //------------------------------------------------------------------ // As a coarse-grain task, this task is empty or does a single // vector, k. Vector k must be removed from the work done by this // and any other coarse-grain task, and split into one or more // fine-grain tasks. for (int tt = t ; tt < ntasks1 ; tt++) { // remove k from the initial slice tt if (Coarse [tt] == k) { // remove k from task tt Coarse [tt] = k+1 ; } else { // break, k not in task tt break ; } } //------------------------------------------------------------------ // get the vector of C //------------------------------------------------------------------ int64_t j = (Ch == NULL) ? k : Ch [k] ; //------------------------------------------------------------------ // get the corresponding vector of A //------------------------------------------------------------------ int64_t kA ; if (C_to_A != NULL) { // A is hypersparse and the C_to_A mapping has been created kA = C_to_A [k] ; } else if (Ch_is_Ah) { // A is hypersparse, but Ch is a shallow copy of A->h kA = k ; } else { // A is standard kA = j ; } int64_t pA_start = (kA < 0) ? -1 : Ap [kA] ; int64_t pA_end = (kA < 0) ? -1 : Ap [kA+1] ; bool a_empty = (pA_end == pA_start) ; //------------------------------------------------------------------ // get the corresponding vector of B //------------------------------------------------------------------ int64_t kB ; if (C_to_B != NULL) { // B is hypersparse and the C_to_B mapping has been created kB = C_to_B [k] ; } else if (Ch_is_Bh) { // B is hypersparse, but Ch is a shallow copy of B->h kB = k ; } else { // B is standard kB = j ; } int64_t pB_start = (kB < 0) ? -1 : Bp [kB] ; int64_t pB_end = (kB < 0) ? -1 : Bp [kB+1] ; bool b_empty = (pB_end == pB_start) ; //------------------------------------------------------------------ // get the corresponding vector of M, if present //------------------------------------------------------------------ int64_t pM_start = -1 ; int64_t pM_end = -1 ; if (M != NULL) { int64_t kM ; if (C_to_M != NULL) { // M is hypersparse and the C_to_M mapping has been created kM = C_to_M [k] ; } else if (Ch_is_Mh) { // Ch is a deep or shallow copy of Mh kM = k ; } else { // M is standard kM = j ; } pM_start = (kM < 0) ? -1 : Mp [kM] ; pM_end = (kM < 0) ? -1 : Mp [kM+1] ; } bool m_empty = (pM_end == pM_start) ; //------------------------------------------------------------------ // determine the # of fine-grain tasks to create for vector k //------------------------------------------------------------------ double ckwork = Cwork [k+1] - Cwork [k] ; int nfine = ckwork / target_task_size ; nfine = GB_IMAX (nfine, 1) ; // make the TaskList bigger, if needed GB_REALLOC_TASK_LIST (TaskList, ntasks + nfine, max_ntasks) ; //------------------------------------------------------------------ // create the fine-grain tasks //------------------------------------------------------------------ if (nfine == 1) { //-------------------------------------------------------------- // this is a single coarse task for all of vector k //-------------------------------------------------------------- TaskList [ntasks].kfirst = k ; TaskList [ntasks].klast = k ; ntasks++ ; } else { //-------------------------------------------------------------- // slice vector k into nfine fine tasks //-------------------------------------------------------------- // first fine task starts at the top of vector k ASSERT (ntasks < max_ntasks) ; TaskList [ntasks].kfirst = k ; TaskList [ntasks].klast = -1 ; // this is a fine task TaskList [ntasks].pM = (m_empty) ? -1 : pM_start ; TaskList [ntasks].pA = (a_empty) ? -1 : pA_start ; TaskList [ntasks].pB = (b_empty) ? -1 : pB_start ; TaskList [ntasks].len = 0 ; // to be determined below ntasks++ ; int64_t ilast = 0, i = 0 ; for (int tfine = 1 ; tfine < nfine ; tfine++) { double target_work = ((nfine-tfine) * ckwork) / nfine ; int64_t pM, pA, pB ; GB_slice_vector (&i, &pM, &pA, &pB, pM_start, pM_end, Mi, // Mi NULL if M not present pA_start, pA_end, Ai, 0, // Ai always explicit list pB_start, pB_end, Bi, // Bi always explicit list vlen, target_work) ; // prior task ends at pM-1, pA-1, and pB-1 TaskList [ntasks-1].pM_end = pM ; TaskList [ntasks-1].pA_end = pA ; TaskList [ntasks-1].pB_end = pB ; // prior task handles indices ilast:i-1 TaskList [ntasks-1].len = i - ilast ; // this task starts at pM, pA, and pB ASSERT (ntasks < max_ntasks) ; TaskList [ntasks].kfirst = k ; TaskList [ntasks].klast = -1 ; // this is a fine task TaskList [ntasks].pM = pM ; TaskList [ntasks].pA = pA ; TaskList [ntasks].pB = pB ; // advance to the next task ntasks++ ; ilast = i ; } // Terminate the last fine task. ASSERT (ntasks <= max_ntasks) ; TaskList [ntasks-1].pM_end = (m_empty) ? -1 : pM_end ; TaskList [ntasks-1].pA_end = (a_empty) ? -1 : pA_end ; TaskList [ntasks-1].pB_end = (b_empty) ? -1 : pB_end ; TaskList [ntasks-1].len = vlen - i ; } } } ASSERT (ntasks <= max_ntasks) ; //-------------------------------------------------------------------------- // free workspace and return result //-------------------------------------------------------------------------- GB_FREE_MEMORY (Cwork, Cnvec+1, sizeof (int64_t)) ; (*p_TaskList ) = TaskList ; (*p_max_ntasks) = max_ntasks ; (*p_ntasks ) = ntasks ; (*p_nthreads ) = nthreads ; return (GrB_SUCCESS) ; }
GB_reduce_panel.c
//------------------------------------------------------------------------------ // GB_reduce_panel: s=reduce(A), reduce a matrix to a scalar //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // Reduce a matrix to a scalar using a panel-based method for built-in // operators. No typecasting is performed. { //-------------------------------------------------------------------------- // get A //-------------------------------------------------------------------------- const GB_ATYPE *restrict Ax = A->x ; int64_t anz = GB_NNZ (A) ; ASSERT (anz > 0) ; //-------------------------------------------------------------------------- // typecast workspace //-------------------------------------------------------------------------- // ctype W [ntasks] ; GB_CTYPE *restrict W = (GB_CTYPE *) W_space ; //-------------------------------------------------------------------------- // reduce A to a scalar //-------------------------------------------------------------------------- if (nthreads == 1) { //---------------------------------------------------------------------- // load the Panel with the first entries //---------------------------------------------------------------------- GB_ATYPE Panel [GB_PANEL] ; int64_t first_panel_size = GB_IMIN (GB_PANEL, anz) ; for (int64_t k = 0 ; k < first_panel_size ; k++) { Panel [k] = Ax [k] ; } #if GB_HAS_TERMINAL int panel_count = 0 ; #endif //---------------------------------------------------------------------- // reduce all entries to the Panel //---------------------------------------------------------------------- for (int64_t p = GB_PANEL ; p < anz ; p += GB_PANEL) { if (p + GB_PANEL > anz) { // last partial panel for (int64_t k = 0 ; k < anz-p ; k++) { // Panel [k] = op (Panel [k], Ax [p+k]) ; GB_ADD_ARRAY_TO_ARRAY (Panel, k, Ax, p+k) ; } } else { // full panel for (int64_t k = 0 ; k < GB_PANEL ; k++) { // Panel [k] = op (Panel [k], Ax [p+k]) ; GB_ADD_ARRAY_TO_ARRAY (Panel, k, Ax, p+k) ; } #if GB_HAS_TERMINAL panel_count-- ; if (panel_count <= 0) { // check for early exit only every 256 panels panel_count = 256 ; int count = 0 ; for (int64_t k = 0 ; k < GB_PANEL ; k++) { count += (Panel [k] == GB_TERMINAL_VALUE) ; } if (count > 0) { break ; } } #endif } } //---------------------------------------------------------------------- // s = reduce (Panel) //---------------------------------------------------------------------- s = Panel [0] ; for (int64_t k = 1 ; k < first_panel_size ; k++) { // s = op (s, Panel [k]) ; GB_ADD_ARRAY_TO_SCALAR (s, Panel, k) ; } } else { //---------------------------------------------------------------------- // all tasks share a single early_exit flag //---------------------------------------------------------------------- // If this flag gets set, all tasks can terminate early #if GB_HAS_TERMINAL bool early_exit = false ; #endif //---------------------------------------------------------------------- // each thread reduces its own slice in parallel //---------------------------------------------------------------------- #pragma omp parallel for num_threads(nthreads) schedule(static) for (int tid = 0 ; tid < ntasks ; tid++) { //------------------------------------------------------------------ // determine the work for this task //------------------------------------------------------------------ // Task tid reduces Ax [pstart:pend-1] to the scalar W [tid] int64_t pstart, pend ; GB_PARTITION (pstart, pend, anz, tid, ntasks) ; GB_ATYPE t = Ax [pstart] ; //------------------------------------------------------------------ // skip this task if the terminal value has already been reached //------------------------------------------------------------------ #if GB_HAS_TERMINAL // check if another task has called for an early exit bool my_exit ; #pragma omp atomic read my_exit = early_exit ; if (!my_exit) #endif //------------------------------------------------------------------ // do the reductions for this task //------------------------------------------------------------------ { //-------------------------------------------------------------- // load the Panel with the first entries //-------------------------------------------------------------- GB_ATYPE Panel [GB_PANEL] ; int64_t my_anz = pend - pstart ; int64_t first_panel_size = GB_IMIN (GB_PANEL, my_anz) ; for (int64_t k = 0 ; k < first_panel_size ; k++) { Panel [k] = Ax [pstart + k] ; } #if GB_HAS_TERMINAL int panel_count = 0 ; #endif //-------------------------------------------------------------- // reduce all entries to the Panel //-------------------------------------------------------------- for (int64_t p = pstart + GB_PANEL ; p < pend ; p += GB_PANEL) { if (p + GB_PANEL > pend) { // last partial panel for (int64_t k = 0 ; k < pend-p ; k++) { // Panel [k] = op (Panel [k], Ax [p+k]) ; GB_ADD_ARRAY_TO_ARRAY (Panel, k, Ax, p+k) ; } } else { // full panel for (int64_t k = 0 ; k < GB_PANEL ; k++) { // Panel [k] = op (Panel [k], Ax [p+k]) ; GB_ADD_ARRAY_TO_ARRAY (Panel, k, Ax, p+k) ; } #if GB_HAS_TERMINAL panel_count-- ; if (panel_count <= 0) { // check for early exit only every 256 panels panel_count = 256 ; int count = 0 ; for (int64_t k = 0 ; k < GB_PANEL ; k++) { count += (Panel [k] == GB_TERMINAL_VALUE) ; } if (count > 0) { break ; } } #endif } } //-------------------------------------------------------------- // t = reduce (Panel) //-------------------------------------------------------------- t = Panel [0] ; for (int64_t k = 1 ; k < first_panel_size ; k++) { // t = op (t, Panel [k]) ; GB_ADD_ARRAY_TO_SCALAR (t, Panel, k) ; } #if GB_HAS_TERMINAL if (t == GB_TERMINAL_VALUE) { // tell all other tasks to exit early #pragma omp atomic write early_exit = true ; } #endif } //------------------------------------------------------------------ // save the results of this task //------------------------------------------------------------------ W [tid] = t ; } //---------------------------------------------------------------------- // sum up the results of each slice using a single thread //---------------------------------------------------------------------- s = W [0] ; for (int tid = 1 ; tid < ntasks ; tid++) { // s = op (s, W [tid]), no typecast GB_ADD_ARRAY_TO_SCALAR (s, W, tid) ; } } }
mkl_util.h
/* Copyright 2017 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ #ifndef TENSORFLOW_CORE_UTIL_MKL_UTIL_H_ #define TENSORFLOW_CORE_UTIL_MKL_UTIL_H_ #ifdef INTEL_MKL #include <string> #include <vector> #include <unordered_map> #include <utility> #include "mkl_dnn.h" #include "mkl_dnn_types.h" #include "mkl_service.h" #include "mkl_trans.h" #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/tensor_shape.h" #include "tensorflow/core/graph/mkl_graph_util.h" #include "tensorflow/core/lib/core/errors.h" #include "tensorflow/core/lib/gtl/array_slice.h" #include "tensorflow/core/platform/logging.h" #include "tensorflow/core/platform/macros.h" #include "tensorflow/core/util/padding.h" #include "tensorflow/core/util/tensor_format.h" #ifndef INTEL_MKL_ML #include "mkldnn.hpp" using mkldnn::engine; using mkldnn::memory; using mkldnn::padding_kind; using mkldnn::primitive; using mkldnn::reorder; #endif #ifdef _WIN32 typedef unsigned int uint; #endif // The file contains a number of utility classes and functions used by MKL // enabled kernels namespace tensorflow { // This class encapsulates all the meta data that is associated with an MKL // tensor. A tensor is an MKL tensor if it was created as the result of an // MKL operation, and did not go through a conversion to a standard // Tensorflow tensor. typedef enum { W = 0, H = 1, C = 2, N = 3 } MklDims; typedef enum { Dim_N = 0, Dim_C = 1, Dim_H = 2, Dim_W = 3, Dim_O = 0, Dim_I = 1 } MklDnnDims; class MklShape { public: MklShape() {} TF_DISALLOW_COPY_AND_ASSIGN(MklShape); // Cannot copy ~MklShape() { if (sizes_) delete[] sizes_; if (strides_) delete[] strides_; if (mklLayout_) CHECK_EQ(dnnLayoutDelete_F32(mklLayout_), E_SUCCESS); if (tfLayout_) CHECK_EQ(dnnLayoutDelete_F32(tfLayout_), E_SUCCESS); if (tf_to_mkl_dim_map_) delete[] tf_to_mkl_dim_map_; } const bool IsMklTensor() const { return isMklTensor_; } void SetMklTensor(const bool isMklTensor) { isMklTensor_ = isMklTensor; } void SetDimensions(const size_t dimension) { dimension_ = dimension; } void SetMklLayout(dnnLayout_t mklLayout) { mklLayout_ = mklLayout; } void SetMklLayout(const void* primitive, size_t resourceType) { CHECK_EQ( dnnLayoutCreateFromPrimitive_F32(&mklLayout_, (dnnPrimitive_t)primitive, (dnnResourceType_t)resourceType), E_SUCCESS); } void SetTfLayout(const size_t dimension, const size_t* sizes, const size_t* strides) { dimension_ = dimension; if (dimension > 0) { // MKl doesn't support zero dimension tensors sizes_ = new size_t[dimension]; strides_ = new size_t[dimension]; for (int ii = 0; ii < dimension; ii++) { sizes_[ii] = sizes[ii]; strides_[ii] = strides[ii]; } CHECK_EQ(dnnLayoutCreate_F32(&tfLayout_, dimension, sizes, strides), E_SUCCESS); } } // Default case - MKL dim ordering is opposite of TF dim ordering // MKL -> (DIMS-1)...0 where (DIMS-1) is outermost dim and 0 is innermost dim // TF -> 0...(DIMS-1) where 0 is outermost dim and (DIMS-1) is innermost dim // For layers that rely on data_format semantics (conv, pooling etc.) // or operate only on certain dimensions (relu, concat, split etc.), // Mkl APIs might require us to reorder these dimensions. In such cases, // kernels should explicitly set this map void SetTfDimOrder(const size_t dimension) { CHECK(dimension == dimension_); if (tf_to_mkl_dim_map_ == nullptr) { tf_to_mkl_dim_map_ = new size_t[dimension]; } for (size_t ii = 0; ii < dimension; ii++) { tf_to_mkl_dim_map_[ii] = dimension - (ii + 1); } } void SetTfDimOrder(const size_t dimension, const size_t* tf_to_mkl_dim_map) { CHECK(dimension == dimension_); if (tf_to_mkl_dim_map_ == nullptr) { tf_to_mkl_dim_map_ = new size_t[dimension]; } for (size_t ii = 0; ii < dimension; ii++) { tf_to_mkl_dim_map_[ii] = tf_to_mkl_dim_map[ii]; } } void SetTfDimOrder(const size_t dimension, TensorFormat data_format) { CHECK_EQ(dimension, 4); CHECK(dimension == dimension_); if (tf_to_mkl_dim_map_ == nullptr) { tf_to_mkl_dim_map_ = new size_t[dimension]; } tf_to_mkl_dim_map_[GetTensorDimIndex<2>(data_format, 'W')] = MklDims::W; tf_to_mkl_dim_map_[GetTensorDimIndex<2>(data_format, 'H')] = MklDims::H; tf_to_mkl_dim_map_[GetTensorDimIndex<2>(data_format, 'C')] = MklDims::C; tf_to_mkl_dim_map_[GetTensorDimIndex<2>(data_format, 'N')] = MklDims::N; } const dnnLayout_t GetMklLayout() const { return mklLayout_; } const dnnLayout_t GetTfLayout() const { return tfLayout_; } const dnnLayout_t GetCurLayout() const { return isMklTensor_ ? mklLayout_ : tfLayout_; } size_t GetDimension() const { return dimension_; } const size_t* GetSizes() const { return sizes_; } int64 dim_size(int index) const { return sizes_[index]; } int64 tf_dim_size(int index) const { return sizes_[tf_to_mkl_dim_map_[index]]; } const size_t* GetStrides() const { return strides_; } const size_t* GetTfToMklDimMap() const { return tf_to_mkl_dim_map_; } size_t tf_dim_idx(int index) const { return tf_to_mkl_dim_map_[index]; } // Query TF-MKL dimension ordering map and check if Tensorflow dimension 'd' // corresponds to MKL's Channel dimension. bool IsMklChannelDim(int d) const { return tf_dim_idx(d) == MklDims::C; } // Query TF-MKL dimension ordering map and check if Tensorflow dimension 'd' // corresponds to MKL's Batch dimension. bool IsMklBatchDim(int d) const { return tf_dim_idx(d) == MklDims::N; } // Query TF-MKL dimension ordering map and check if Tensorflow dimension 'd' // corresponds to MKL's Width dimension. bool IsMklWidthDim(int d) const { return tf_dim_idx(d) == MklDims::W; } // Query TF-MKL dimension ordering map and check if Tensorflow dimension 'd' // corresponds to MKL's Height dimension. bool IsMklHeightDim(int d) const { return tf_dim_idx(d) == MklDims::H; } // Check if the TF-Mkl dimension ordering map specifies if the input // tensor is in NCHW format. bool IsTensorInNCHWFormat() const { TensorFormat data_format = FORMAT_NCHW; return (IsMklBatchDim(GetTensorDimIndex<2>(data_format, 'N')) && IsMklChannelDim(GetTensorDimIndex<2>(data_format, 'C')) && IsMklHeightDim(GetTensorDimIndex<2>(data_format, 'H')) && IsMklWidthDim(GetTensorDimIndex<2>(data_format, 'W'))); } // Check if the TF-Mkl dimension ordering map specifies if the input // tensor is in NHWC format. bool IsTensorInNHWCFormat() const { TensorFormat data_format = FORMAT_NHWC; return (IsMklBatchDim(GetTensorDimIndex<2>(data_format, 'N')) && IsMklChannelDim(GetTensorDimIndex<2>(data_format, 'C')) && IsMklHeightDim(GetTensorDimIndex<2>(data_format, 'H')) && IsMklWidthDim(GetTensorDimIndex<2>(data_format, 'W'))); } void GetConvertedFlatData(dnnLayout_t targetLayout, void* input, void* output) const { dnnLayout_t curLayout; if (isMklTensor_) curLayout = mklLayout_; else curLayout = tfLayout_; dnnPrimitive_t convert; CHECK_EQ(dnnConversionCreate_F32(&convert, curLayout, targetLayout), E_SUCCESS); CHECK_EQ(dnnConversionExecute_F32(convert, input, output), E_SUCCESS); CHECK_EQ(dnnDelete_F32(convert), E_SUCCESS); } // The following methods are used for serializing and de-serializing the // contents of the mklshape object. // The data is serialized in this order // isMklTensor_ // dimension_ // sizes_ // strides_ // mklLayout_ // tfLayout_ // tf_to_mkl_dim_map_ #define SIZE_OF_MKL_DNN_BUF \ (dnnLayoutSerializationBufferSize_F32()) // Size of buffer needed to // serialize dnn_layout pointer // Size of buffer to hold the serialized object, the size is computed as // follows sizeof(isMklTensor_) + sizeof(dimension_) + sizeof(sizes_) + // sizeof(strides_) // + sizeof(mklLayout_ buffer) + sizeof(tfLayout_ buffer) // + sizeof(tf_to_mkl_dim_map_) #define SIZE_OF_MKL_SERIAL_DATA(dims) \ (2 * sizeof(size_t) + 3 * dims * sizeof(size_t) + 2 * SIZE_OF_MKL_DNN_BUF) // First we need to define some macro for offsets into the serial buffer where // different elements of Mklshape is written/read from #define IS_MKL_TENSOR_OFFSET 0 // Location from start of buffer where isMklTensor_ is serialized #define DIMS_OFFSET \ (IS_MKL_TENSOR_OFFSET + sizeof(size_t)) // Location of dimension_ // Location of sizes. Note dim is not used here, left here // to make macros consistent. #define SIZES_OFFSET(dims) (DIMS_OFFSET + sizeof(size_t)) #define STRIDES_OFFSET(dims) \ (SIZES_OFFSET(dims) + dims * sizeof(size_t)) // Location of strides #define MKL_LAYOUT_OFFSET(dims) \ (STRIDES_OFFSET(dims) + dims * sizeof(size_t)) // Location of mklLayout_ #define TF_LAYOUT_OFFSET(dims) \ (MKL_LAYOUT_OFFSET(dims) + SIZE_OF_MKL_DNN_BUF) // Location of tfLayout_ // Location of tf_to_mkl_dim_map_ #define TF_TO_MKL_DIM_MAP_OFFSET(dims) \ (TF_LAYOUT_OFFSET(dims) + SIZE_OF_MKL_DNN_BUF) // TODO (agramesh1) make sure to create a const to share with rewrite pass id:4141 // https://github.com/imdone/tensorflow/issues/4139 // for min size of MKL metadata tensor. void DeSerializeMklShape(const unsigned char* buf, size_t buf_size) { CHECK(buf_size >= sizeof(size_t)) << "Bufsize too small in DeSerialize"; // Make sure buffer holds at least isMklTensor_ isMklTensor_ = *reinterpret_cast<const size_t*>(buf + IS_MKL_TENSOR_OFFSET) != 0; if (isMklTensor_) { // If it is an MKL Tensor then read the rest dimension_ = *(reinterpret_cast<const size_t*>(buf + DIMS_OFFSET)); CHECK(buf_size >= SIZE_OF_MKL_SERIAL_DATA(dimension_)) << "Bufsize too small in DeSerialize"; sizes_ = new size_t[dimension_]; strides_ = new size_t[dimension_]; tf_to_mkl_dim_map_ = new size_t[dimension_]; for (int i = 0; i < dimension_; i++) { sizes_[i] = reinterpret_cast<const size_t*>(buf + SIZES_OFFSET(dimension_))[i]; strides_[i] = reinterpret_cast<const size_t*>( buf + STRIDES_OFFSET(dimension_))[i]; tf_to_mkl_dim_map_[i] = reinterpret_cast<const size_t*>( buf + TF_TO_MKL_DIM_MAP_OFFSET(dimension_))[i]; } CHECK_EQ(dnnLayoutDeserialize_F32(&mklLayout_, buf + MKL_LAYOUT_OFFSET(dimension_)), E_SUCCESS); CHECK_EQ(dnnLayoutDeserialize_F32(&tfLayout_, buf + TF_LAYOUT_OFFSET(dimension_)), E_SUCCESS); } } void SerializeMklShape(unsigned char* buf, size_t buf_size) const { CHECK(buf_size >= SIZE_OF_MKL_SERIAL_DATA(dimension_)) << "Bufsize too small to Serialize"; *reinterpret_cast<size_t*>(buf + IS_MKL_TENSOR_OFFSET) = isMklTensor_ ? 1 : 0; if (isMklTensor_) { *(reinterpret_cast<size_t*>(buf + DIMS_OFFSET)) = dimension_; for (int i = 0; i < dimension_; i++) { reinterpret_cast<size_t*>(buf + SIZES_OFFSET(dimension_))[i] = sizes_[i]; reinterpret_cast<size_t*>(buf + STRIDES_OFFSET(dimension_))[i] = strides_[i]; reinterpret_cast<size_t*>(buf + TF_TO_MKL_DIM_MAP_OFFSET(dimension_))[i] = tf_to_mkl_dim_map_[i]; } CHECK_EQ(dnnLayoutSerialize_F32(mklLayout_, buf + MKL_LAYOUT_OFFSET(dimension_)), E_SUCCESS); CHECK_EQ( dnnLayoutSerialize_F32(tfLayout_, buf + TF_LAYOUT_OFFSET(dimension_)), E_SUCCESS); } } private: bool isMklTensor_ = false; // Flag to indicate if the tensor is an MKL tensor or not dnnLayout_t mklLayout_ = nullptr; // Pointer to the MKL layout dnnLayout_t tfLayout_ = nullptr; // Pointer to layout of corresponding // Tensorflow tensor, used when conversion from MKL to standard tensor size_t dimension_ = 0; size_t* sizes_ = nullptr; // Required by MKL for conversions size_t* strides_ = nullptr; // Required by MKL for conversions size_t* tf_to_mkl_dim_map_ = nullptr; // TF dimension corresponding to this MKL dimension }; #ifndef INTEL_MKL_ML // Forward decl TensorFormat MklDnnDataFormatToTFDataFormat(memory::format format); memory::dims CalculateTFStrides(const memory::dims& dims_tf_order); memory::desc CreateBlockedMemDescHelper(const memory::dims& dim, const memory::dims& strides, memory::data_type dtype); class MklDnnShape { private: typedef struct { /// Flag to indicate if the tensor is an MKL tensor or not bool is_mkl_tensor_ = false; /// Number of dimensions in Tensorflow format size_t dimension_ = 0; /// Required by MKLDNN for conversions mkldnn_dims_t sizes_; // Required by MKL for conversions memory::format tf_data_format_ = memory::format::format_undef; memory::data_type T_ = memory::data_type::data_undef; // MKL layout mkldnn_memory_desc_t mkl_md_; /// TF dimension corresponding to this MKL dimension mkldnn_dims_t map_; } MklShapeData; MklShapeData data_; typedef std::remove_extent<mkldnn_dims_t>::type mkldnn_dim_t; #define INVALID_DIM_SIZE -1 public: MklDnnShape() { for (size_t i = 0; i < sizeof(data_.sizes_) / sizeof(data_.sizes_[0]); ++i) { data_.sizes_[i] = -1; } for (size_t i = 0; i < sizeof(data_.map_) / sizeof(data_.map_[0]); ++i) { data_.map_[i] = -1; } } ~MklDnnShape() {} TF_DISALLOW_COPY_AND_ASSIGN(MklDnnShape); // Cannot copy /// Helper function to compare memory::desc objects for MklDnn. /// May be this should go into MklDnn directly. inline bool CompareMklDnnLayouts(const memory::desc& md1, const memory::desc& md2) const { mkldnn_memory_desc_t mdd1 = md1.data; mkldnn_memory_desc_t mdd2 = md2.data; const char* d1 = reinterpret_cast<const char*>(&mdd1); const char* d2 = reinterpret_cast<const char*>(&mdd2); size_t md_size = sizeof(mdd1); for (size_t i = 0; i < md_size; i++) { if (*d1++ != *d2++) { return false; } } return true; } /// Equality function for MklDnnShape objects /// @return true if both are equal; false otherwise. inline bool operator==(const MklDnnShape& input_shape) const { if (this->IsMklTensor() != input_shape.IsMklTensor()) { return false; } // If input tensors are in Mkl layout, then we check for dimensions and // sizes. if (this->IsMklTensor()) { return this->GetTfShape() == input_shape.GetTfShape() && CompareMklDnnLayouts(this->GetMklLayout(), input_shape.GetMklLayout()); } return true; } /// Equality operator for MklDnnShape and TFShape. /// Returns: true if TF shapes for both are the same, false otherwise inline bool operator==(const TensorShape& input_shape) const { if (!this->IsMklTensor()) { return false; } return this->GetTfShape() == input_shape; } inline const bool IsMklTensor() const { return data_.is_mkl_tensor_; } inline void SetMklTensor(bool is_mkl_tensor) { data_.is_mkl_tensor_ = is_mkl_tensor; } inline void SetDimensions(const size_t dimension) { data_.dimension_ = dimension; } inline size_t GetDimension(char dimension) const { int index = GetMklDnnTensorDimIndex(dimension); CHECK(index >= 0 && index < this->GetDimension()) << "Invalid index from the dimension: " << index << ", " << dimension; return this->DimSize(index); } inline int32 GetMklDnnTensorDimIndex(char dimension) const { switch (dimension) { case 'N': return MklDnnDims::Dim_N; case 'C': return MklDnnDims::Dim_C; case 'H': return MklDnnDims::Dim_H; case 'W': return MklDnnDims::Dim_W; default: LOG(FATAL) << "Invalid dimension: " << dimension; return -1; // Avoid compiler warning about missing return value } } inline size_t GetDimension() const { return data_.dimension_; } inline const int* GetSizes() const { return reinterpret_cast<const int*>(&data_.sizes_[0]); } // Returns an mkldnn::memory::dims object that contains the sizes of this // MklDnnShape object. inline memory::dims GetSizesAsMklDnnDims() const { memory::dims retVal; if (data_.is_mkl_tensor_) { size_t dimensions = sizeof(data_.sizes_) / sizeof(data_.sizes_[0]); for (size_t i = 0; i < dimensions; i++) { if (data_.sizes_[i] != INVALID_DIM_SIZE) retVal.push_back(data_.sizes_[i]); } } else { CHECK_EQ(data_.is_mkl_tensor_, true); } return retVal; } inline int64 DimSize(int index) const { CHECK_LT(index, sizeof(data_.sizes_) / sizeof(data_.sizes_[0])); return data_.sizes_[index]; } /// Return TensorShape that describes the Tensorflow shape of the tensor /// represented by this MklShape. inline TensorShape GetTfShape() const { CHECK_EQ(data_.is_mkl_tensor_, true); std::vector<int32> shape(data_.dimension_, -1); if (data_.tf_data_format_ != memory::format::blocked) { for (size_t idx = 0; idx < data_.dimension_; ++idx) { shape[idx] = data_.sizes_[TfDimIdx(idx)]; } } else { // If Tensorflow shape is in Blocked format, then we don't have dimension // map for it. So we just create Tensorflow shape from sizes in the // specified order. for (size_t idx = 0; idx < data_.dimension_; ++idx) { shape[idx] = data_.sizes_[idx]; } } TensorShape ts; bool ret = TensorShapeUtils::MakeShape(shape, &ts).ok(); CHECK_EQ(ret, true); return ts; } inline void SetElemType(memory::data_type dt) { data_.T_ = dt; } inline const memory::data_type GetElemType() { return data_.T_; } inline void SetMklLayout(memory::primitive_desc* pd) { CHECK_NOTNULL(pd); data_.mkl_md_ = pd->desc().data; } inline void SetMklLayout(memory::desc* md) { CHECK_NOTNULL(md); data_.mkl_md_ = md->data; } inline const memory::desc GetMklLayout() const { return memory::desc(data_.mkl_md_); } inline memory::format GetTfDataFormat() const { return data_.tf_data_format_; } /// We don't create primitive_descriptor for TensorFlow layout now. /// We use lazy evaluation and create it only when needed. Input format can /// also be Blocked format. inline void SetTfLayout(size_t dims, const memory::dims& sizes, memory::format format) { CHECK_EQ(dims, sizes.size()); data_.dimension_ = dims; for (size_t ii = 0; ii < dims; ii++) { data_.sizes_[ii] = sizes[ii]; } data_.tf_data_format_ = format; if (format != memory::format::blocked) { SetTfDimOrder(dims, format); } } inline const memory::desc GetTfLayout() const { memory::dims dims; for (size_t ii = 0; ii < data_.dimension_; ii++) { dims.push_back(data_.sizes_[ii]); } // Create Blocked memory desc if input TF format was set like that. if (data_.tf_data_format_ == memory::format::blocked) { auto strides = CalculateTFStrides(dims); return CreateBlockedMemDescHelper(dims, strides, data_.T_); } else { return memory::desc(dims, data_.T_, data_.tf_data_format_); } } inline const memory::desc GetCurLayout() const { return IsMklTensor() ? GetMklLayout() : GetTfLayout(); } // nhasabni - I've removed SetTfDimOrder that was setting default order in // case of MKL-ML. We don't need a case of default dimension order because // when an operator that does not get data_format attribute gets all inputs // in Tensorflow format, it will produce output in Tensorflow format. inline void SetTfDimOrder(const size_t dimension, const mkldnn_dims_t map) { CHECK(dimension == data_.dimension_); for (size_t ii = 0; ii < dimension; ii++) { data_.map_[ii] = map[ii]; } } inline void SetTfDimOrder(const size_t dimension, TensorFormat data_format) { // TODO (nhasabni): Why do we restrict this to 4D? id:3491 // https://github.com/imdone/tensorflow/issues/3490 CHECK_EQ(dimension, 4); CHECK(dimension == data_.dimension_); data_.map_[GetTensorDimIndex<2>(data_format, 'W')] = MklDnnDims::Dim_W; data_.map_[GetTensorDimIndex<2>(data_format, 'H')] = MklDnnDims::Dim_H; data_.map_[GetTensorDimIndex<2>(data_format, 'C')] = MklDnnDims::Dim_C; data_.map_[GetTensorDimIndex<2>(data_format, 'N')] = MklDnnDims::Dim_N; } inline void SetTfDimOrder(const size_t dimension, memory::format format) { TensorFormat data_format = MklDnnDataFormatToTFDataFormat(format); SetTfDimOrder(dimension, data_format); } inline const mkldnn_dim_t* GetTfToMklDimMap() const { return &data_.map_[0]; } inline size_t TfDimIdx(int index) const { return data_.map_[index]; } inline int64 TfDimSize(int index) const { return data_.sizes_[TfDimIdx(index)]; } /// Query TF-MKL dimension ordering map and check if Tensorflow dimension 'd' /// corresponds to MKL's Channel dimension. inline bool IsMklChannelDim(int d) const { return TfDimIdx(d) == MklDnnDims::Dim_C; } /// Query TF-MKL dimension ordering map and check if Tensorflow dimension 'd' /// corresponds to MKL's Batch dimension. inline bool IsMklBatchDim(int d) const { return TfDimIdx(d) == MklDnnDims::Dim_N; } /// Query TF-MKL dimension ordering map and check if Tensorflow dimension 'd' /// corresponds to MKL's Width dimension. inline bool IsMklWidthDim(int d) const { return TfDimIdx(d) == MklDnnDims::Dim_W; } /// Query TF-MKL dimension ordering map and check if Tensorflow dimension 'd' /// corresponds to MKL's Height dimension. inline bool IsMklHeightDim(int d) const { return TfDimIdx(d) == MklDnnDims::Dim_H; } /// Check if the TF-Mkl dimension ordering map specifies if the input /// tensor is in NCHW format. inline bool IsTensorInNCHWFormat() const { TensorFormat data_format = FORMAT_NCHW; return (IsMklBatchDim(GetTensorDimIndex<2>(data_format, 'N')) && IsMklChannelDim(GetTensorDimIndex<2>(data_format, 'C')) && IsMklHeightDim(GetTensorDimIndex<2>(data_format, 'H')) && IsMklWidthDim(GetTensorDimIndex<2>(data_format, 'W'))); } /// Check if the TF-Mkl dimension ordering map specifies if the input /// tensor is in NHWC format. inline bool IsTensorInNHWCFormat() const { TensorFormat data_format = FORMAT_NHWC; return (IsMklBatchDim(GetTensorDimIndex<2>(data_format, 'N')) && IsMklChannelDim(GetTensorDimIndex<2>(data_format, 'C')) && IsMklHeightDim(GetTensorDimIndex<2>(data_format, 'H')) && IsMklWidthDim(GetTensorDimIndex<2>(data_format, 'W'))); } /// The following methods are used for serializing and de-serializing the /// contents of the mklshape object. /// The data is serialized in this order /// is_mkl_tensor_ : dimension_ : sizes_ : map_: format_ : T_ : mkl_pd_; /// Size of buffer to hold the serialized object, the size is computed by /// following above mentioned order inline size_t GetSerializeBufferSize() const { return sizeof(MklShapeData); } void SerializeMklDnnShape(unsigned char* buf, size_t buf_size) const { CHECK(buf_size >= GetSerializeBufferSize()) << "Buffer size is too small to SerializeMklDnnShape"; *reinterpret_cast<MklShapeData*>(buf) = data_; } void DeSerializeMklDnnShape(const unsigned char* buf, size_t buf_size) { // Make sure buffer holds at least is_mkl_tensor_. CHECK(buf_size >= sizeof(data_.is_mkl_tensor_)) << "Buffer size is too small in DeSerializeMklDnnShape"; const bool is_mkl_tensor = *reinterpret_cast<const bool*>(buf); if (is_mkl_tensor) { // If it is an MKL Tensor then read the rest CHECK(buf_size >= GetSerializeBufferSize()) << "Buffer size is too small in DeSerializeMklDnnShape"; data_ = *reinterpret_cast<const MklShapeData*>(buf); } } }; #endif // List of MklShape objects. Used in Concat/Split layers. typedef std::vector<MklShape> MklShapeList; #ifndef INTEL_MKL_ML typedef std::vector<MklDnnShape> MklDnnShapeList; #endif // Check if all tensors specified by MklShapes are MKL tensors. inline bool AreAllMklTensors(const MklShapeList& shapes) { for (auto& s : shapes) { if (!s.IsMklTensor()) { return false; } } return true; } #ifdef INTEL_MKL_ML template <typename T> inline Tensor ConvertMklToTF(OpKernelContext* context, const Tensor& mkl_tensor, const MklShape& mkl_shape) { Tensor output_tensor; TensorShape output_shape; for (size_t j = 0; j < mkl_shape.GetDimension(); j++) { // Outermost to innermost dimension output_shape.AddDim(mkl_shape.GetSizes()[mkl_shape.tf_dim_idx(j)]); } // Allocate output tensor. context->allocate_temp(DataTypeToEnum<T>::v(), output_shape, &output_tensor); dnnLayout_t output_layout = static_cast<dnnLayout_t>(mkl_shape.GetTfLayout()); void* input_buffer = const_cast<T*>(mkl_tensor.flat<T>().data()); void* output_buffer = const_cast<T*>(output_tensor.flat<T>().data()); if (mkl_tensor.NumElements() != 0) { mkl_shape.GetConvertedFlatData(output_layout, input_buffer, output_buffer); } return output_tensor; } #else template <typename T> inline Tensor ConvertMklToTF(OpKernelContext* context, const Tensor& mkl_tensor, const MklDnnShape& mkl_shape) { Tensor output_tensor; TensorShape output_shape; TF_CHECK_OK( Status(error::Code::UNIMPLEMENTED, "Unimplemented conversion function")); return output_tensor; } #endif // Get the MKL shape from the second string tensor inline void GetMklShape(OpKernelContext* ctext, int n, MklShape* mklshape) { mklshape->DeSerializeMklShape( ctext->input(GetTensorMetaDataIndex(n, ctext->num_inputs())) .flat<uint8>() .data(), ctext->input(GetTensorMetaDataIndex(n, ctext->num_inputs())) .flat<uint8>() .size() * sizeof(uint8)); } #ifndef INTEL_MKL_ML inline void GetMklShape(OpKernelContext* ctext, int n, MklDnnShape* mklshape) { mklshape->DeSerializeMklDnnShape( ctext->input(GetTensorMetaDataIndex(n, ctext->num_inputs())) .flat<uint8>() .data(), ctext->input(GetTensorMetaDataIndex(n, ctext->num_inputs())) .flat<uint8>() .size() * sizeof(uint8)); } #endif // Gets the actual input inline const Tensor& MklGetInput(OpKernelContext* ctext, int n) { return ctext->input(GetTensorDataIndex(n, ctext->num_inputs())); } inline void GetMklInputList(OpKernelContext* ctext, StringPiece name, OpInputList* input_tensors) { CHECK_NOTNULL(input_tensors); ctext->input_list(name, input_tensors); } #ifdef INTEL_MKL_ML inline void GetMklShapeList(OpKernelContext* ctext, StringPiece name, MklShapeList* mkl_shapes) { OpInputList input_mkl_tensors; GetMklInputList(ctext, strings::StrCat("mkl_", name), &input_mkl_tensors); for (int i = 0; i < input_mkl_tensors.size(); i++) { (*mkl_shapes)[i].DeSerializeMklShape( input_mkl_tensors[i].flat<uint8>().data(), input_mkl_tensors[i].flat<uint8>().size() * sizeof(uint8)); } } #else inline void GetMklShapeList(OpKernelContext* ctext, StringPiece name, MklDnnShapeList* mkl_shapes) { OpInputList input_mkl_tensors; GetMklInputList(ctext, strings::StrCat("mkl_", name), &input_mkl_tensors); for (int i = 0; i < input_mkl_tensors.size(); i++) { (*mkl_shapes)[i].DeSerializeMklDnnShape( input_mkl_tensors[i].flat<uint8>().data(), input_mkl_tensors[i].flat<uint8>().size() * sizeof(uint8)); } } #endif #ifndef INTEL_MKL_ML /// Get shape of input tensor pointed by 'input_idx' in TensorShape format. /// If the input tensor is in MKL layout, then obtains TensorShape from /// MklShape. inline TensorShape GetTfShape(OpKernelContext* context, size_t input_idx) { // Sanity check. CHECK_NOTNULL(context); CHECK_LT(input_idx, context->num_inputs()); MklDnnShape input_mkl_shape; GetMklShape(context, input_idx, &input_mkl_shape); if (input_mkl_shape.IsMklTensor()) { return input_mkl_shape.GetTfShape(); } else { const Tensor& t = MklGetInput(context, input_idx); return t.shape(); } } #endif // Allocate the second output tensor that will contain // the MKL shape serialized inline void AllocateOutputSetMklShape(OpKernelContext* ctext, int n, const MklShape& mkl_shape) { Tensor* second_tensor = nullptr; TensorShape second_shape; second_shape.AddDim(SIZE_OF_MKL_SERIAL_DATA(mkl_shape.GetDimension())); OP_REQUIRES_OK(ctext, ctext->allocate_output( GetTensorMetaDataIndex(n, ctext->num_outputs()), second_shape, &second_tensor)); mkl_shape.SerializeMklShape( second_tensor->flat<uint8>().data(), second_tensor->flat<uint8>().size() * sizeof(uint8)); } #ifndef INTEL_MKL_ML // Allocate the second output tensor that will contain // the MKL shape serialized inline void AllocateOutputSetMklShape(OpKernelContext* ctext, int n, const MklDnnShape& mkl_shape) { Tensor* second_tensor = nullptr; TensorShape second_shape; second_shape.AddDim(mkl_shape.GetSerializeBufferSize()); OP_REQUIRES_OK(ctext, ctext->allocate_output( GetTensorMetaDataIndex(n, ctext->num_outputs()), second_shape, &second_tensor)); mkl_shape.SerializeMklDnnShape( second_tensor->flat<uint8>().data(), second_tensor->flat<uint8>().size() * sizeof(uint8)); } #endif // Allocate the output tensor, create a second output tensor that will contain // the MKL shape serialized inline void AllocateOutputSetMklShape(OpKernelContext* ctext, int n, Tensor** output, const TensorShape& tf_shape, const MklShape& mkl_shape) { Tensor* second_tensor = nullptr; TensorShape second_shape; second_shape.AddDim(SIZE_OF_MKL_SERIAL_DATA(mkl_shape.GetDimension())); OP_REQUIRES_OK( ctext, ctext->allocate_output(GetTensorDataIndex(n, ctext->num_outputs()), tf_shape, output)); OP_REQUIRES_OK(ctext, ctext->allocate_output( GetTensorMetaDataIndex(n, ctext->num_outputs()), second_shape, &second_tensor)); mkl_shape.SerializeMklShape( second_tensor->flat<uint8>().data(), second_tensor->flat<uint8>().size() * sizeof(uint8)); } #ifndef INTEL_MKL_ML // Allocate the output tensor, create a second output tensor that will contain // the MKL shape serialized inline void AllocateOutputSetMklShape(OpKernelContext* ctext, int n, Tensor** output, const TensorShape& tf_shape, const MklDnnShape& mkl_shape) { Tensor* second_tensor = nullptr; TensorShape second_shape; second_shape.AddDim(mkl_shape.GetSerializeBufferSize()); OP_REQUIRES_OK( ctext, ctext->allocate_output(GetTensorDataIndex(n, ctext->num_outputs()), tf_shape, output)); OP_REQUIRES_OK(ctext, ctext->allocate_output( GetTensorMetaDataIndex(n, ctext->num_outputs()), second_shape, &second_tensor)); mkl_shape.SerializeMklDnnShape( second_tensor->flat<uint8>().data(), second_tensor->flat<uint8>().size() * sizeof(uint8)); } #endif // Allocates a temp tensor and returns the data buffer for temporary storage. // Currently #ifndef INTEL_MKL_ML template <typename T> inline void AllocTmpBuffer(OpKernelContext* context, Tensor* tensor_out, const memory::primitive_desc& pd, void** buf_out) { TensorShape tf_shape; tf_shape.AddDim(pd.get_size() / sizeof(T) + 1); OP_REQUIRES_OK(context, context->allocate_temp(DataTypeToEnum<T>::v(), tf_shape, tensor_out)); *buf_out = static_cast<void*>(tensor_out->flat<T>().data()); } #endif inline void AllocTmpBuffer(OpKernelContext* context, Tensor* tensor_out, dnnLayout_t lt_buff, void** buf_out) { TensorShape tf_shape; tf_shape.AddDim( dnnLayoutGetMemorySize_F32(static_cast<dnnLayout_t>(lt_buff)) / sizeof(float) + 1); OP_REQUIRES_OK(context, context->allocate_temp(DataTypeToEnum<float>::v(), tf_shape, tensor_out)); *buf_out = static_cast<void*>(tensor_out->flat<float>().data()); } template <typename T> inline void AllocTmpBuffer(OpKernelContext* context, Tensor* tensor_out, TensorShape tf_shape) { OP_REQUIRES_OK(context, context->allocate_temp(DataTypeToEnum<T>::v(), tf_shape, tensor_out)); } inline void GetStridesFromSizes(TensorFormat data_format, size_t* strides, const size_t* sizes) { // MKL requires strides in NCHW if (data_format == FORMAT_NHWC) { strides[0] = sizes[2]; strides[1] = sizes[0] * sizes[2]; strides[2] = 1; strides[3] = sizes[0] * sizes[1] * sizes[2]; } else { strides[0] = 1; strides[1] = sizes[0]; strides[2] = sizes[0] * sizes[1]; strides[3] = sizes[0] * sizes[1] * sizes[2]; } } inline void MklSizesToTFSizes(OpKernelContext* context, TensorFormat data_format_, const MklShape& mkl_shape, TensorShape* tf_shape) { size_t tf_dim = mkl_shape.GetDimension(); const size_t* tf_sizes = mkl_shape.GetSizes(); OP_REQUIRES(context, tf_dim == 4, errors::InvalidArgument("MKLSizesToTFSizes: size must be 4-dim")); std::vector<int32> sizes; sizes.push_back(tf_sizes[3]); if (data_format_ == FORMAT_NHWC) { sizes.push_back(tf_sizes[1]); sizes.push_back(tf_sizes[0]); sizes.push_back(tf_sizes[2]); } else { sizes.push_back(tf_sizes[2]); sizes.push_back(tf_sizes[1]); sizes.push_back(tf_sizes[0]); } OP_REQUIRES_OK(context, TensorShapeUtils::MakeShape(sizes, tf_shape)); } inline int32 GetMklTensorDimIndex(char dimension) { switch (dimension) { case 'N': return MklDims::N; case 'C': return MklDims::C; case 'H': return MklDims::H; case 'W': return MklDims::W; default: LOG(FATAL) << "Invalid dimension: " << dimension; return -1; // Avoid compiler warning about missing return value } } inline int64 GetMklTensorDim(const MklShape& mkl_shape, char dimension) { int index = GetMklTensorDimIndex(dimension); CHECK(index >= 0 && index < mkl_shape.GetDimension()) << "Invalid index from the dimension: " << index << ", " << dimension; return mkl_shape.dim_size(index); } inline void CopyMklTensorInToOut(OpKernelContext* context, int idx_in, int idx_out) { int num_inputs = context->num_inputs(); int num_outputs = context->num_outputs(); int idx_data_in = GetTensorDataIndex(idx_in, num_inputs); int idx_meta_in = GetTensorMetaDataIndex(idx_in, num_inputs); int idx_data_out = GetTensorDataIndex(idx_out, num_outputs); int idx_meta_out = GetTensorMetaDataIndex(idx_out, num_outputs); const Tensor& data = context->input(idx_data_in); const Tensor& meta = context->input(idx_meta_in); Tensor output(data.dtype()); Tensor meta_output(meta.dtype()); // TODO (intel_tf): alternatively, call forward_input_to_output_with_shape(...) id:2694 // https://github.com/imdone/tensorflow/issues/2693 CHECK(output.CopyFrom(data, data.shape())); CHECK(meta_output.CopyFrom(meta, meta.shape())); context->set_output(idx_data_out, output); context->set_output(idx_meta_out, meta_output); } #ifdef INTEL_MKL_ML inline void CopyTfTensorInToOutWithShape(OpKernelContext* context, int idx_in, int idx_out, const TensorShape& shape) { int num_inputs = context->num_inputs(); int num_outputs = context->num_outputs(); int idx_data_in = GetTensorDataIndex(idx_in, num_inputs); int idx_data_out = GetTensorDataIndex(idx_out, num_outputs); const Tensor& data = context->input(idx_data_in); MklShape mkl_shape_output; mkl_shape_output.SetMklTensor(false); AllocateOutputSetMklShape(context, idx_out, mkl_shape_output); Tensor output(data.dtype()); // TODO (intel_tf): alternatively, call forward_input_to_output_with_shape(...) id:3022 // https://github.com/imdone/tensorflow/issues/3021 CHECK(output.CopyFrom(data, shape)); context->set_output(idx_data_out, output); } #else inline void CopyTfTensorInToOutWithShape(OpKernelContext* context, int idx_in, int idx_out, const TensorShape& shape) { int num_inputs = context->num_inputs(); int num_outputs = context->num_outputs(); int idx_data_in = GetTensorDataIndex(idx_in, num_inputs); int idx_data_out = GetTensorDataIndex(idx_out, num_outputs); const Tensor& data = context->input(idx_data_in); MklDnnShape mkl_shape_output; mkl_shape_output.SetMklTensor(false); AllocateOutputSetMklShape(context, idx_out, mkl_shape_output); Tensor output(data.dtype()); // TODO (intel_tf): alternatively, call forward_input_to_output_with_shape(...) id:3534 // https://github.com/imdone/tensorflow/issues/3533 CHECK(output.CopyFrom(data, shape)); context->set_output(idx_data_out, output); } #endif #ifdef INTEL_MKL_ML inline void ForwardTfTensorInToOut(OpKernelContext* context, int idx_in, int idx_out) { int num_inputs = context->num_inputs(); int num_outputs = context->num_outputs(); int idx_data_in = GetTensorDataIndex(idx_in, num_inputs); int idx_data_out = GetTensorDataIndex(idx_out, num_outputs); MklShape mkl_shape_output; mkl_shape_output.SetMklTensor(false); AllocateOutputSetMklShape(context, idx_out, mkl_shape_output); if (IsRefType(context->input_dtype(idx_data_in))) { context->forward_ref_input_to_ref_output(idx_data_in, idx_data_out); } else { context->set_output(idx_data_out, context->input(idx_data_in)); } } #else inline void ForwardTfTensorInToOut(OpKernelContext* context, int idx_in, int idx_out) { int num_inputs = context->num_inputs(); int num_outputs = context->num_outputs(); int idx_data_in = GetTensorDataIndex(idx_in, num_inputs); int idx_data_out = GetTensorDataIndex(idx_out, num_outputs); MklDnnShape dnn_shape_output; dnn_shape_output.SetMklTensor(false); AllocateOutputSetMklShape(context, idx_out, dnn_shape_output); if (IsRefType(context->input_dtype(idx_data_in))) { context->forward_ref_input_to_ref_output(idx_data_in, idx_data_out); } else { context->set_output(idx_data_out, context->input(idx_data_in)); } } #endif inline void ForwardMklTensorInToOut(OpKernelContext* context, int idx_in, int idx_out) { int num_inputs = context->num_inputs(); int num_outputs = context->num_outputs(); int idx_data_in = GetTensorDataIndex(idx_in, num_inputs); int idx_meta_in = GetTensorMetaDataIndex(idx_in, num_inputs); int idx_data_out = GetTensorDataIndex(idx_out, num_outputs); int idx_meta_out = GetTensorMetaDataIndex(idx_out, num_outputs); if (IsRefType(context->input_dtype(idx_data_in))) { context->forward_ref_input_to_ref_output(idx_data_in, idx_data_out); context->forward_ref_input_to_ref_output(idx_meta_in, idx_meta_out); } else { context->set_output(idx_data_out, context->input(idx_data_in)); context->set_output(idx_meta_out, context->input(idx_meta_in)); } } #ifndef INTEL_MKL_ML inline void ForwardMklTensorInToOutWithMklShape(OpKernelContext* context, int idx_in, int idx_out, const MklDnnShape& mkl_shape) { int num_inputs = context->num_inputs(); int num_outputs = context->num_outputs(); int idx_data_in = GetTensorDataIndex(idx_in, num_inputs); int idx_data_out = GetTensorDataIndex(idx_out, num_outputs); AllocateOutputSetMklShape(context, idx_out, mkl_shape); if (IsRefType(context->input_dtype(idx_data_in))) { context->forward_ref_input_to_ref_output(idx_data_in, idx_data_out); } else { context->set_output(idx_data_out, context->input(idx_data_in)); } } #endif // Forward the MKL shape ONLY (used in elementwise and other ops where // we call the eigen implementation and MKL shape is not used) inline void ForwardMklMetaDataInToOut(OpKernelContext* context, uint32 idx_data_in, uint32_t idx_data_out) { uint32 idx_meta_in = GetTensorMetaDataIndex(idx_data_in, context->num_inputs()); uint32 idx_meta_out = GetTensorMetaDataIndex(idx_data_out, context->num_outputs()); if (IsRefType(context->input_dtype(idx_data_in))) { context->forward_ref_input_to_ref_output(idx_meta_in, idx_meta_out); } else { context->set_output(idx_meta_out, context->input(idx_meta_in)); } } // Set a dummy MKL shape (called when the output is in TF format) inline void SetDummyMklShapeOutput(OpKernelContext* context, uint32 idx_data_out) { MklShape mkl_shape_output; mkl_shape_output.SetMklTensor(false); AllocateOutputSetMklShape(context, idx_data_out, mkl_shape_output); } #ifdef INTEL_MKL_ML // We don't need these functions in MKLDNN. We have defined equality operator // on MklDnnShape class directly. // Checks if the TF shape for both MKL tensors is the same or not // Returns: true if both TF shapes are the same, false otherwise inline bool MklCompareShapes(const MklShape* input_shape_0, const MklShape* input_shape_1) { // Check for number of dimensions if (input_shape_0->GetDimension() != input_shape_1->GetDimension()) { return false; } // Check size of each dimension size_t ndims = input_shape_0->GetDimension(); for (size_t i = 0; i < ndims; i++) { if (input_shape_0->dim_size(i) != input_shape_1->dim_size(i)) { return false; } } return true; } // Checks if the TF shape for both tensors is the same or not // Returns: true if TF shapes for both are the same, false otherwise inline bool MklCompareShapes(const MklShape* input_shape_0, const TensorShape* input_shape_1) { // Check for number of dimensions if (input_shape_0->GetDimension() != input_shape_1->dims()) { return false; } // Check size of each dimension size_t ndims = input_shape_0->GetDimension(); for (size_t i = 0; i < ndims; i++) { if (input_shape_0->tf_dim_size(i) != input_shape_1->dim_size(i)) { return false; } } return true; } // Checks if the TF shape for both tensors is the same or not // Returns: true if TF shapes for both are the same, false otherwise inline bool MklCompareShapes(const TensorShape* input_shape_0, const MklShape* input_shape_1) { return MklCompareShapes(input_shape_1, input_shape_0); } // Checks if the TF shape for both tensors is the same or not // Returns: true if TF shapes for both are the same, false otherwise inline bool MklCompareShapes(const TensorShape* input_shape_0, const TensorShape* input_shape_1) { // Check for number of dimensions if (input_shape_0->dims() != input_shape_1->dims()) { return false; } // Check size of each dimension size_t ndims = input_shape_0->dims(); for (size_t i = 0; i < ndims; i++) { if (input_shape_0->dim_size(i) != input_shape_1->dim_size(i)) { return false; } } return true; } #endif // These functions do not compile with MKL-DNN since mkl.h is missing. // We may need to remove them later. // TODO (intel_tf): Remove this routine when faster MKL layout conversion is id:4142 // https://github.com/imdone/tensorflow/issues/4140 // out. inline void MklNHWCToNCHW(const Tensor& input, Tensor** output) { const float* buf_in = input.flat<float>().data(); float* buf_out = (*output)->flat<float>().data(); int64 N = input.dim_size(0); int64 H = input.dim_size(1); int64 W = input.dim_size(2); int64 C = input.dim_size(3); int64 stride_n = H * W * C; #pragma omp parallel for num_threads(16) for (int64 n = 0; n < N; ++n) { mkl_somatcopy('R', 'T', H * W, C, 1, buf_in + n * stride_n, C, buf_out + n * stride_n, H * W); } } inline void MklNCHWToNHWC(const Tensor& input, Tensor** output) { const float* buf_in = input.flat<float>().data(); float* buf_out = (*output)->flat<float>().data(); int64 N = (*output)->dim_size(0); int64 H = (*output)->dim_size(1); int64 W = (*output)->dim_size(2); int64 C = (*output)->dim_size(3); int64 stride_n = H * W * C; #pragma omp parallel for num_threads(16) for (int64 n = 0; n < N; ++n) { mkl_somatcopy('R', 'T', C, H * W, 1, buf_in + n * stride_n, H * W, buf_out + n * stride_n, C); } } // ------------------------------------------------------------------- #ifndef INTEL_MKL_ML /// Return MKL-DNN data type (memory::data_type) for input type T /// /// @input None /// @return memory::data_type corresponding to type T template <typename T> static memory::data_type MklDnnType(); /// Instantiation for float type. Add similar instantiations for other /// type if needed. template <> memory::data_type MklDnnType<float>() { return memory::data_type::f32; } /// Map TensorFlow's data format into MKL-DNN data format /// /// @input: TensorFlow data format /// @return: memory::format corresponding to TensorFlow data format; /// Fails with an error if invalid data format. inline memory::format TFDataFormatToMklDnnDataFormat(TensorFormat format) { if (format == FORMAT_NHWC) return memory::format::nhwc; else if (format == FORMAT_NCHW) return memory::format::nchw; TF_CHECK_OK(Status(error::Code::INVALID_ARGUMENT, "Unsupported data format")); // Return to get rid of compiler warning return memory::format::format_undef; } /// Map MKL-DNN data format to TensorFlow's data format /// /// @input: memory::format /// @return: Tensorflow data format corresponding to memory::format /// Fails with an error if invalid data format. inline TensorFormat MklDnnDataFormatToTFDataFormat(memory::format format) { if (format == memory::format::nhwc) return FORMAT_NHWC; else if (format == memory::format::nchw) return FORMAT_NCHW; TF_CHECK_OK(Status(error::Code::INVALID_ARGUMENT, "Unsupported data format")); // Return to prevent compiler warnings, otherwise TF_CHECK_OK will ensure // that we don't come here. return FORMAT_NHWC; } /// Map TensorShape object into memory::dims required by MKL-DNN /// /// This function will simply map input TensorShape into MKL-DNN dims /// naively. So it will preserve the order of dimensions. E.g., if /// input tensor is in NHWC format, then dims will be in NHWC format /// also. /// /// @input TensorShape object in shape /// @return memory::dims corresponding to TensorShape inline memory::dims TFShapeToMklDnnDims(const TensorShape& shape) { memory::dims dims(shape.dims()); for (int d = 0; d < shape.dims(); ++d) { dims[d] = shape.dim_size(d); } return dims; } /// Map TensorShape object into memory::dims in NCHW format required by MKL-DNN /// /// This function is a specific one than above function. It will map input /// TensorShape into MKL-DNN dims in NCHW format. So it may not preserve the /// order of dimensions. E.g., if input tensor is in NHWC format, then dims /// will be in NCHW format, and not in NHWC format. /// /// @input TensorShape object in shape /// @return memory::dims in MKL-DNN required NCHW format inline memory::dims TFShapeToMklDnnDimsInNCHW(const TensorShape& shape, TensorFormat format) { // Check validity of format. CHECK_NE(TFDataFormatToMklDnnDataFormat(format), memory::format::format_undef); int n = shape.dim_size(GetTensorDimIndex(format, 'N')); int c = shape.dim_size(GetTensorDimIndex(format, 'C')); int h = shape.dim_size(GetTensorDimIndex(format, 'H')); int w = shape.dim_size(GetTensorDimIndex(format, 'W')); // MKL-DNN requires dimensions in NCHW format. return memory::dims({n, c, h, w}); } /// Overloaded version of function above. Input parameters are /// self-explanatory. inline memory::dims MklDnnDimsInNCHW(const memory::dims& in_dims, TensorFormat format) { // Check validity of format. CHECK_NE(TFDataFormatToMklDnnDataFormat(format), memory::format::format_undef); int n = in_dims[GetTensorDimIndex(format, 'N')]; int c = in_dims[GetTensorDimIndex(format, 'C')]; int h = in_dims[GetTensorDimIndex(format, 'H')]; int w = in_dims[GetTensorDimIndex(format, 'W')]; // MKL-DNN requires dimensions in NCHW format. return memory::dims({n, c, h, w}); } /// Map MklDnn memory::dims object into TensorShape object. /// /// This function will simply map input shape in MKL-DNN memory::dims format /// in Tensorflow's TensorShape object by perserving dimension order. /// /// @input MKL-DNN memory::dims object /// @output TensorShape corresponding to memory::dims inline TensorShape MklDnnDimsToTFShape(const memory::dims& dims) { std::vector<int32> shape(dims.size(), -1); for (int d = 0; d < dims.size(); d++) { shape[d] = dims[d]; } TensorShape ret; CHECK_EQ(TensorShapeUtils::MakeShape(shape, &ret).ok(), true); return ret; } /// Function to calculate strides given tensor shape in Tensorflow order /// E.g., if dims_tf_order is {1, 2, 3, 4}, then as per Tensorflow convention, /// dimesion with size 1 is outermost dimension; while dimension with size 4 is /// innermost dimension. So strides for this tensor would be {4 * 3 * 2, /// 4 * 3, 4, 1}, i.e., {24, 12, 4, 1}. /// /// @input Tensorflow shape in memory::dims type /// @return memory::dims containing strides for the tensor. inline memory::dims CalculateTFStrides(const memory::dims& dims_tf_order) { CHECK_GT(dims_tf_order.size(), 0); memory::dims strides(dims_tf_order.size()); int last_dim_idx = dims_tf_order.size() - 1; strides[last_dim_idx] = 1; for (int d = last_dim_idx - 1; d >= 0; d--) { strides[d] = strides[d + 1] * dims_tf_order[d + 1]; } return strides; } inline padding_kind TFPaddingToMklDnnPadding(Padding pad) { // MKL-DNN only supports zero padding. return padding_kind::zero; } /// Helper function to create memory descriptor in Blocked format /// /// @input: Tensor dimensions /// @input: strides corresponding to dimensions. One can use utility /// function such as CalculateTFStrides to compute strides /// for given dimensions. /// @return: memory::desc object corresponding to blocked memory format /// for given dimensions and strides. inline memory::desc CreateBlockedMemDescHelper(const memory::dims& dim, const memory::dims& strides, memory::data_type dtype) { CHECK_EQ(dim.size(), strides.size()); // We have to construct memory descriptor in a C style. This is not at all // ideal but MKLDNN does not offer any API to construct descriptor in // blocked format except a copy constructor that accepts // mkldnn_memory_desc_t. mkldnn_memory_desc_t md; md.primitive_kind = mkldnn_memory; md.ndims = dim.size(); md.format = mkldnn_blocked; md.data_type = memory::convert_to_c(dtype); for (size_t i = 0; i < dim.size(); i++) { md.layout_desc.blocking.block_dims[i] = 1; md.layout_desc.blocking.strides[1][i] = 1; md.layout_desc.blocking.strides[0][i] = strides[i]; md.layout_desc.blocking.padding_dims[i] = dim[i]; md.layout_desc.blocking.offset_padding_to_data[i] = 0; md.dims[i] = dim[i]; } md.layout_desc.blocking.offset_padding = 0; return memory::desc(md); } /* * Class to represent all the resources corresponding to a tensor in TensorFlow * that are required to execute an operation (such as Convolution). */ template <typename T> class MklDnnData { private: /// MKL-DNN memory primitive for input user memory memory* user_memory_; /// MKL-DNN memory primitive in case input or output reorder is needed. memory* reorder_memory_; /// Operations memory descriptor memory::desc* op_md_; /// CPU engine on which operation will be executed const engine* cpu_engine_; public: explicit MklDnnData(const engine* e) : user_memory_(nullptr), reorder_memory_(nullptr), op_md_(nullptr), cpu_engine_(e) {} ~MklDnnData() { cpu_engine_ = nullptr; // We don't own this. delete (user_memory_); delete (reorder_memory_); delete (op_md_); } inline void* GetTensorBuffer(const Tensor* tensor) const { CHECK_NOTNULL(tensor); return const_cast<void*>( static_cast<const void*>(tensor->flat<T>().data())); } /// Set user memory primitive using specified dimensions, memory format and /// data_buffer. Function automatically uses element data type by using /// input type T used for creating call object. /// /// In a nutshell, function allows user to describe the input tensor to /// an operation. E.g., filter of Conv2D is of shape {1, 2, 3, 4}, and /// memory format HWIO, and the buffer that contains actual values is /// pointed by data_buffer. inline void SetUsrMem(const memory::dims& dim, memory::format fm, void* data_buffer = nullptr) { auto md = memory::desc(dim, MklDnnType<T>(), fm); SetUsrMem(md, data_buffer); } inline void SetUsrMem(const memory::dims& dim, memory::format fm, const Tensor* tensor) { CHECK_NOTNULL(tensor); SetUsrMem(dim, fm, GetTensorBuffer(tensor)); } /// Helper function to create memory descriptor in Blocked format /// /// @input: Tensor dimensions /// @input: strides corresponding to dimensions. One can use utility /// function such as CalculateTFStrides to compute strides /// for given dimensions. /// @return: memory::desc object corresponding to blocked memory format /// for given dimensions and strides. static inline memory::desc CreateBlockedMemDesc(const memory::dims& dim, const memory::dims& strides) { return CreateBlockedMemDescHelper(dim, strides, MklDnnType<T>()); } /// A version of SetUsrMem call that allows user to create memory in blocked /// format. So in addition to accepting dimensions, it also accepts strides. /// This allows user to create memory for tensor in a format that is not /// supported by MKLDNN. E.g., MKLDNN does not support tensor format for 6 /// dimensional tensor as a native format. But by using blocked format, a user /// can create memory for 6D tensor. inline void SetUsrMem(const memory::dims& dim, const memory::dims& strides, void* data_buffer = nullptr) { CHECK_EQ(dim.size(), strides.size()); auto blocked_md = MklDnnData<T>::CreateBlockedMemDesc(dim, strides); SetUsrMem(blocked_md, data_buffer); } inline void SetUsrMem(const memory::dims& dim, const memory::dims& strides, const Tensor* tensor) { CHECK_NOTNULL(tensor); SetUsrMem(dim, strides, GetTensorBuffer(tensor)); } /// A version of function to set user memory primitive that accepts memory /// descriptor directly, instead of accepting dimensions and format. This /// function is more generic that the one above, but the function above is /// sufficient in most cases. inline void SetUsrMem(const memory::desc& md, void* data_buffer = nullptr) { auto pd = memory::primitive_desc(md, *cpu_engine_); SetUsrMem(pd, data_buffer); } /// A version of SetUsrMem with memory descriptor and tensor inline void SetUsrMem(const memory::desc& md, const Tensor* tensor) { CHECK_NOTNULL(tensor); SetUsrMem(md, GetTensorBuffer(tensor)); } /// A version of function to set user memory primitive that accepts primitive /// descriptor directly, instead of accepting dimensions and format. This /// function is more generic that the one above, but the function above is /// sufficient in most cases. inline void SetUsrMem(const memory::primitive_desc& pd, void* data_buffer = nullptr) { CHECK_NOTNULL(cpu_engine_); // TODO (nhasabni): can we remove dynamic memory allocation? id:3493 // https://github.com/imdone/tensorflow/issues/3492 if (data_buffer) { user_memory_ = new memory(pd, data_buffer); } else { user_memory_ = new memory(pd); } } /// A version of SetUsrMem with primitive descriptor and tensor inline void SetUsrMem(const memory::primitive_desc& pd, const Tensor* tensor) { CHECK_NOTNULL(tensor); SetUsrMem(pd, GetTensorBuffer(tensor)); } /// Get function for user memory primitive. inline const memory* GetUsrMem() const { return user_memory_; } /// Get function for primitive descriptor of user memory primitive. inline const memory::primitive_desc GetUsrMemPrimDesc() const { CHECK_NOTNULL(user_memory_); return user_memory_->get_primitive_desc(); } /// Get function for descriptor of user memory. inline memory::desc GetUsrMemDesc() { // This is ugly. Why MKL-DNN does not provide desc() method of const type?? const memory::primitive_desc pd = GetUsrMemPrimDesc(); return const_cast<memory::primitive_desc*>(&pd)->desc(); } /// Get function for data buffer of user memory primitive. inline void* GetUsrMemDataHandle() const { CHECK_NOTNULL(user_memory_); return user_memory_->get_data_handle(); } /// Set function for data buffer of user memory primitive. inline void SetUsrMemDataHandle(void* data_buffer) { CHECK_NOTNULL(user_memory_); CHECK_NOTNULL(data_buffer); user_memory_->set_data_handle(data_buffer); } /// Set function for data buffer of user memory primitive. inline void SetUsrMemDataHandle(const Tensor* tensor) { CHECK_NOTNULL(user_memory_); CHECK_NOTNULL(tensor); user_memory_->set_data_handle(GetTensorBuffer(tensor)); } /// Get the memory primitive for input and output of an op. If inputs /// to an op require reorders, then this function returns memory primitive /// for reorder. Otherwise, it will return memory primitive for user memory. /// /// E.g., Conv2D(I, F) is a primitive with I and F being inputs. Then to /// execute Conv2D, we need memory primitive for I and F. Buf if reorder is /// required for I and F (say I_r is reorder primitive for I; F_r is reorder /// primitive for F), then we need I_r and F_r to perform Conv2D. inline const memory& GetOpMem() const { return reorder_memory_ ? *reorder_memory_ : *user_memory_; } /// Set memory descriptor of an operation in terms of dimensions and memory /// format. E.g., For Conv2D, the dimensions would be same as user dimensions /// but memory::format would be mkldnn::any because we want MKL-DNN to choose /// best layout/format for given input dimensions. inline void SetOpMemDesc(const memory::dims& dim, memory::format fm) { // TODO (nhasabni): can we remove dynamic memory allocation? id:2695 // https://github.com/imdone/tensorflow/issues/2694 op_md_ = new memory::desc(dim, MklDnnType<T>(), fm); } /// Get function for memory descriptor for an operation inline const memory::desc& GetOpMemDesc() const { return *op_md_; } /// Predicate that checks if we need to reorder user's memory into memory /// pointed by op_pd. /// /// @input: op_pd - memory primitive descriptor of the given input of an /// operation /// @return: true in case reorder of input is needed; false, otherwise. inline bool IsReorderNeeded(const memory::primitive_desc& op_pd) const { CHECK_NOTNULL(user_memory_); return op_pd != user_memory_->get_primitive_desc(); } /// Predicate that checks if we need to reorder user's memory into memory /// based on the provided format. /// /// @input: target_format - memory format of the given input of an /// operation /// @return: true in case reorder of input is needed; false, otherwise. inline bool IsReorderNeeded(const memory::format& target_format) const { CHECK_NOTNULL(user_memory_); return target_format != user_memory_->get_primitive_desc().desc().data.format; } /// Function to create a reorder from memory pointed by from to memory pointed /// by to. Returns created primitive. inline primitive CreateReorder(const memory* from, const memory* to) const { CHECK_NOTNULL(from); CHECK_NOTNULL(to); return reorder(*from, *to); } /// Function to handle input reordering /// /// Check if we need to reorder this input of an operation. /// Return true and allocate reorder memory primitive if reorder is needed. /// Otherwise, return false and do not allocate reorder memory primitive. /// /// To check if reorder is needed, this function compares memory primitive /// descriptor of an operation (op_pd) for the given input with the /// user-specified memory primitive descriptor. /// /// @input: op_pd - memory primitive descriptor of the given input of an /// operation /// @input: net - net to which to add reorder primitive in case it is needed. /// @return: true in case reorder of input is needed; false, otherwise. inline bool CheckReorderToOpMem(const memory::primitive_desc& op_pd, std::vector<primitive>* net) { CHECK_NOTNULL(net); CHECK_NOTNULL(user_memory_); if (IsReorderNeeded(op_pd)) { // TODO (nhasabni): can we remove dynamic memory allocation? id:3025 // https://github.com/imdone/tensorflow/issues/3024 reorder_memory_ = new memory(op_pd); net->push_back(CreateReorder(user_memory_, reorder_memory_)); return true; } return false; } /// Overloaded version of above function that accepts memory buffer /// where output of reorder needs to be stored. /// /// @input: op_pd - memory primitive descriptor of the given input of an /// operation /// @reorder_data_handle - memory buffer where output of reorder needs to be /// stored. Primitive does not check if buffer is /// enough size to write. /// @input: net - net to which to add reorder primitive in case it is needed. /// @return: true in case reorder of input is needed; false, otherwise. inline bool CheckReorderToOpMem(const memory::primitive_desc& op_pd, void* reorder_data_handle, std::vector<primitive>* net) { CHECK_NOTNULL(net); CHECK_NOTNULL(reorder_data_handle); CHECK_NOTNULL(user_memory_); if (IsReorderNeeded(op_pd)) { // TODO (nhasabni): can we remove dynamic memory allocation? id:3538 // https://github.com/imdone/tensorflow/issues/3537 reorder_memory_ = new memory(op_pd, reorder_data_handle); net->push_back(CreateReorder(user_memory_, reorder_memory_)); return true; } return false; } /// Another overloaded version of CheckReorderToOpMem that accepts Tensor /// where output of reorder needs to be stored. /// /// @input: op_pd - memory primitive descriptor of the given input of an /// operation /// @reorder_tensor - Tensor whose buffer is to be used to store output of /// reorder. Primitive does not check if buffer is /// enough size to write. /// @input: net - net to which to add reorder primitive in case it is needed. /// @return: true in case reorder of input is needed; false, otherwise. inline bool CheckReorderToOpMem(const memory::primitive_desc& op_pd, Tensor* reorder_tensor, std::vector<primitive>* net) { CHECK_NOTNULL(net); CHECK_NOTNULL(reorder_tensor); return CheckReorderToOpMem(op_pd, GetTensorBuffer(reorder_tensor), net); } /// Function to handle output reorder /// /// This function performs very similar functionality as input reordering /// function above. The only difference is that this function does not add /// reorder primitive to the net. The reason for this is: the reorder /// primitive for output needs to be added to the list only after operation /// has executed. But we need to prepare a temporary buffer in case output /// reorder is needed. And this temporary buffer will hold the output of /// an operation before it is fed to reorder primitive. /// /// @input memory primitive descriptor for the given output of an operation /// @return: true in case reorder of output is needed; false, otherwise. inline bool PrepareReorderToUserMemIfReq( const memory::primitive_desc& op_pd) { CHECK_NOTNULL(user_memory_); if (IsReorderNeeded(op_pd)) { // TODO (nhasabni): can we remove dynamic memory allocation? id:4143 // https://github.com/imdone/tensorflow/issues/4141 reorder_memory_ = new memory(op_pd); return true; } return false; } /// Function to actually insert reorder primitive in the net /// /// This function completes remaining part of output reordering. It inserts /// a reordering primitive from the temporary buffer that holds the output /// to the user-specified output buffer. /// /// @input: net - net to which to add reorder primitive inline void InsertReorderToUserMem(std::vector<primitive>* net) { CHECK_NOTNULL(net); CHECK_NOTNULL(user_memory_); CHECK_NOTNULL(reorder_memory_); net->push_back(CreateReorder(reorder_memory_, user_memory_)); } }; /// Base class for operations with reuse of DNN primitives /// class DnnOp { public: virtual ~DnnOp() {} // Dummy data. Its size, hard-coded as 256 here, does // not matter since MKL should never operate on this buffer. unsigned char DummyData[256]; }; const mkldnn::memory::dims NONE_DIMS = {}; // This constant is used to declare dummy buffer (size), for MKL primitives template <typename T> class DnnOpFactory { public: DnnOpFactory() {} ~DnnOpFactory() {} DnnOp* GetOp(const std::string& key) { auto stream_iter = DnnOpFactory<T>::GetHashMap().find(key); if (stream_iter == DnnOpFactory<T>::GetHashMap().end()) { return nullptr; } else { return stream_iter->second; } } void SetOp(const std::string& key, DnnOp* op) { auto stream_iter = DnnOpFactory<T>::GetHashMap().find(key); CHECK(stream_iter == DnnOpFactory<T>::GetHashMap().end()); DnnOpFactory<T>::GetHashMap()[key] = op; } private: static inline std::unordered_map<std::string, DnnOp*> &GetHashMap() { static thread_local std::unordered_map<std::string, DnnOp*> map_; return map_; } }; // utility class for creating keys of MKL primitive pool. class FactoryKeyCreator { public: FactoryKeyCreator() { key_.reserve(kMaxKeyLength); } ~FactoryKeyCreator() {} void AddAsKey(const string &str) { auto buffer = reinterpret_cast<const char *>(str.c_str()); Append(buffer, str.length()); } void AddAsKey(const mkldnn::memory::dims &dims) { for (unsigned int i = 0; i < dims.size(); i++) { AddAsKey<int>(dims[i]); } } template <typename T> void AddAsKey(const T data) { auto buffer = reinterpret_cast<const char *>(&data); Append(buffer, sizeof(T)); } std::string GetKey() { return key_; } private: string key_; const char delimiter = 'x'; const int kMaxKeyLength = 256; void Append(const char* data, int len) { key_.append(data, len); key_.append(1, delimiter); } }; #endif // INTEL_MKL_DNN } // namespace tensorflow #endif // INTEL_MKL #endif // TENSORFLOW_CORE_UTIL_MKL_UTIL_H_
DRB016-outputdep-orig-yes.c
/* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov, schordan1@llnl.gov, karlin1@llnl.gov) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https://github.com/LLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* The loop in this example cannot be parallelized. This pattern has two pair of dependencies: 1. loop carried output dependence x = .. : 2. loop carried true dependence due to: .. = x; x = ..; Data race pairs: we allow two pairs to preserve the original code pattern. 1. x@73:12 vs. x@74:5 2. x@74:5 vs. x@74:5 */ #include <stdio.h> int a[100]; int main() { int len=100; int i,x=10; #pragma omp parallel for for (i=0;i<len;i++) { a[i] = x; x=i; } printf("x=%d",x); return 0; }
saber_util.h
#ifndef ANAKIN_SABER_FUNCS_IMPL_SABER_UTIL_H #define ANAKIN_SABER_FUNCS_IMPL_SABER_UTIL_H #include <assert.h> #include "saber/core/common.h" #include "saber/core/tensor.h" #include "saber/core/shape.h" namespace anakin { namespace saber { namespace utils { template<typename opTensor> static inline bool try_expand_tensor(opTensor& x, anakin::saber::Shape shape) { if (x.valid_size() < shape.count()) { x.re_alloc(shape, x.get_dtype()); return true; } return false; } template<typename opTensor> static inline bool try_expand_tensor(opTensor& x, int size) { if (x.valid_size() < size) { anakin::saber::Shape shape({1, 1, 1, size}, Layout_NCHW); return try_expand_tensor(x, shape); } return false; } template <typename DataType> static inline void transpose(const DataType* in, int height, int width, DataType* out) { for (int i = 0; i < height; ++i) { for (int j = 0; j < width; ++j) { out[j * height + i] = in[i * width + j]; } } } inline int round_up(int k, int c) { return ((k + c - 1) / c) * c; } inline int div_up(int k, int c) { return (k + c - 1) / c; } /* a bunch of std:: analogues to be compliant with any msvs version * * Rationale: msvs c++ (and even some c) headers contain special pragma that * injects msvs-version check into object files in order to abi-mismatches * during the static linking. This makes sense if e.g. std:: objects are passed * through between application and library, which is not the case for mkl-dnn * (since there is no any c++-rt dependent stuff, ideally...). */ /* SFINAE helper -- analogue to std::enable_if */ class VectorPrint { public: template <typename Dtype> static void print_float(Dtype* target) { float* f = (float*)target; printf("size = %d\n", sizeof(Dtype)); for (int i = 0; i < sizeof(Dtype) / sizeof(float); i++) { printf(" %f ,", f[i]); } printf("\n"); } }; class AlignedUtils { public: template <typename Dtype> void aligned_last_dim(const Dtype* input, Dtype* output, int input_size, int ori_last_dim, int aligned_dim) { for (int row = 0; row < input_size / ori_last_dim; row++) { for (int col = ori_last_dim; col < aligned_dim; col++) { output[row * aligned_dim + col] = static_cast<Dtype>(0); } } for (int i = 0; i < input_size; i++) { int row = i / ori_last_dim; int col = i % ori_last_dim; output[row * aligned_dim + col] = input[i]; } } template <typename Dtype> void unaligned_last_dim(const Dtype* input, Dtype* output, int output_size, int ori_last_dim, int aligned_dim) { for (int i = 0; i < output_size; i++) { int row = i / ori_last_dim; int col = i % ori_last_dim; output[i] = input[row * aligned_dim + col]; } } }; class SeqSortedseqTranseUtil { public: SeqSortedseqTranseUtil(bool is_reverse = false, bool is_bi = false) : _is_reverse(is_reverse), _is_bi(is_bi) {}; void print_vec(int* in, int size, const char* perfix) { for (int i = 0; i < size; i++) { printf("[%s] %d = %d\n", perfix, i, in[i]); } } template <typename Dtype> void seq_2_sorted_seq(const Dtype* input, Dtype* output, int word_size) { // _map_vec.resize(word_sum); int word_sum = _map_vec.size(); // std::cout << "word_sum = " << word_sum << std::endl; for (int ori_word_id = 0; ori_word_id < word_sum; ++ori_word_id) { //can param int word_start = ori_word_id * word_size; int maped_id = _map_vec[ori_word_id]; int maped_start = maped_id * word_size; for (int word_vec_offset = 0; word_vec_offset < word_size; ++word_vec_offset) { // std::cout<<maped_start + word_vec_offset<<" --> "<<word_start + word_vec_offset<<" , = "<<input[maped_start + word_vec_offset]<<std::endl; output[maped_start + word_vec_offset] = input[word_start + word_vec_offset]; } } } template <typename Dtype> void hidden_2_sorted_hidden(const Dtype* input, Dtype* output, int hidden_size) { // _map_vec.resize(word_sum); int batch_size = _length_index.size(); // std::cout << "word_sum = " << word_sum << std::endl; for (int ori_word_id = 0; ori_word_id < batch_size; ++ori_word_id) { //can param int word_start = ori_word_id * hidden_size; int maped_id = _length_index[ori_word_id]; int maped_start = maped_id * hidden_size; for (int word_vec_offset = 0; word_vec_offset < hidden_size; ++word_vec_offset) { // std::cout<<maped_start + word_vec_offset<<" --> "<<word_start + word_vec_offset<<" , = "<<input[maped_start + word_vec_offset]<<std::endl; output[word_start + word_vec_offset] = input[maped_start + word_vec_offset]; } } } template <typename Dtype> void sorted_seq_2_seq(const Dtype* input, Dtype* output, int hidden_size) { int word_sum = _map_vec.size(); for (int ori_word_id = 0; ori_word_id < word_sum; ori_word_id++) { //can param int word_start = ori_word_id * hidden_size; int maped_id = _map_vec[ori_word_id]; int maped_start = maped_id * hidden_size; for (int word_vec_offset = 0; word_vec_offset < hidden_size; word_vec_offset++) { // std::cout<<ori_word_id+word_vec_offset<<" -> "<<maped_start+word_vec_offset<<std::endl; output[word_start + word_vec_offset] = input[maped_start + word_vec_offset]; } } } template <typename Dtype> void sorted_seq_2_seq(const Dtype* input, Dtype* output, int hidden_size, int alligned_hidden_size) { int word_sum = _map_vec.size(); for (int ori_word_id = 0; ori_word_id < word_sum; ori_word_id++) { //can param int word_start = ori_word_id * hidden_size; int maped_id = _map_vec[ori_word_id]; int maped_start = maped_id * alligned_hidden_size; for (int word_vec_offset = 0; word_vec_offset < hidden_size; word_vec_offset++) { // std::cout<<ori_word_id+word_vec_offset<<" -> "<<maped_start+word_vec_offset<<std::endl; output[word_start + word_vec_offset] = input[maped_start + word_vec_offset]; } } } /** * return whether need to transform * @param offset_vec * @param emit_offset_vec * @param emit_length * @return */ bool get_sorted_map(std::vector<int>& offset_vec, std::vector<int>& emit_offset_vec, int& emit_length, int skip_num = 0) { int batch_size = offset_vec.size() - 1; int word_sum = offset_vec[offset_vec.size() - 1]; std::vector<int>length_vec(batch_size); _length_index.resize(batch_size); if (skip_num > 1) { CHECK_EQ(batch_size, 1) << "only support batch = 1 in skip_mode"; CHECK_EQ(word_sum % skip_num, 0); int real_batch_size = skip_num; emit_length = word_sum / skip_num; emit_offset_vec.resize(emit_length + 1); emit_offset_vec[0] = 0; for (int i = 1; i <= emit_length; i++) { emit_offset_vec[i] = emit_offset_vec[i - 1] + skip_num; } return false; } if (batch_size == 1) { emit_length = offset_vec[1] - offset_vec[0]; emit_offset_vec.resize(emit_length + 1); for (int i = 0; i <= emit_length; i++) { emit_offset_vec[i] = i; } return false; } int max_len = 0; for (int i = 0; i < offset_vec.size() - 1; ++i) { int len = offset_vec[i + 1] - offset_vec[i]; max_len = max_len > len ? max_len : len; length_vec[i] = len; _length_index[i] = i; } emit_length = max_len; if (max_len == 1) { emit_offset_vec.push_back(0); emit_offset_vec.push_back(emit_length * batch_size); return false; } std::sort(_length_index.begin(), _length_index.end(), [&length_vec](int i1, int i2) { return length_vec[i1] > length_vec[i2]; }); emit_offset_vec.resize(max_len + 1); _map_vec.resize(word_sum); int target_word_id = 0; std::vector<int> length_vec_cnt = length_vec; int last_batch_size = batch_size; for (int word_id_in_seq = 0; word_id_in_seq < max_len; word_id_in_seq++) { emit_offset_vec[word_id_in_seq] = target_word_id; for (int batch_id = 0; batch_id < last_batch_size; batch_id++) { int old_batch_id = _length_index[batch_id]; if (length_vec_cnt[old_batch_id] > 0) { int inner_word_id_in_seq = word_id_in_seq; if (_is_reverse) { inner_word_id_in_seq = length_vec[old_batch_id] - 1 - word_id_in_seq; } int old_word_id = offset_vec[old_batch_id] + inner_word_id_in_seq; _map_vec[old_word_id] = target_word_id; // printf("map %d -> %d\n",old_word_id,target_word_id); length_vec_cnt[old_batch_id]--; target_word_id++; } else { last_batch_size--; break; } } } // print_vec(_map_vec.data(),word_sum,"map"); emit_offset_vec[max_len] = word_sum; return true; } private: // std::vector<int> _length_vec; std::vector<int> _length_index; std::vector<int> _map_vec; bool _is_reverse; bool _is_bi; }; /* analogue std::conditional */ template <bool, typename, typename> struct conditional {}; template <typename T, typename F> struct conditional<true, T, F> { typedef T type; }; template <typename T, typename F> struct conditional<false, T, F> { typedef F type; }; template <bool, typename, bool, typename, typename> struct conditional3 {}; template <typename T, typename FT, typename FF> struct conditional3<true, T, false, FT, FF> { typedef T type; }; template <typename T, typename FT, typename FF> struct conditional3<false, T, true, FT, FF> { typedef FT type; }; template <typename T, typename FT, typename FF> struct conditional3<false, T, false, FT, FF> { typedef FF type; }; template <bool, typename U, U, U> struct conditional_v {}; template <typename U, U t, U f> struct conditional_v<true, U, t, f> { static constexpr U value = t; }; template <typename U, U t, U f> struct conditional_v<false, U, t, f> { static constexpr U value = f; }; template<typename T> inline const T& min(const T& a, const T& b) { return a < b ? a : b; } template<typename T> inline const T& max(const T& a, const T& b) { return a > b ? a : b; } template <typename T> inline typename std::remove_reference<T>::type zero() { auto zero = typename std::remove_reference<T>::type(); return zero; } template <typename T, typename P> inline bool everyone_is(T val, P item) { return val == item; } template <typename T, typename P, typename... Args> inline bool everyone_is(T val, P item, Args... item_others) { return val == item && everyone_is(val, item_others...); } template <typename T, typename P> inline bool one_of(T val, P item) { return val == item; } template <typename T, typename P, typename... Args> inline bool one_of(T val, P item, Args... item_others) { return val == item || one_of(val, item_others...); } template <typename... Args> inline bool any_null(Args... ptrs) { return one_of(nullptr, ptrs...); } inline bool implication(bool cause, bool effect) { return !cause || effect; } template<typename T> inline void array_copy(T* dst, const T* src, size_t size) { for (size_t i = 0; i < size; ++i) { dst[i] = src[i]; } } template<typename T> inline bool array_cmp(const T* a1, const T* a2, size_t size) { for (size_t i = 0; i < size; ++i) if (a1[i] != a2[i]) { return false; } return true; } template<typename T, typename U> inline void array_set(T* arr, const U& val, size_t size) { for (size_t i = 0; i < size; ++i) { arr[i] = static_cast<T>(val); } } namespace product_impl { template<size_t> struct int2type {}; template <typename T> constexpr int product_impl(const T* arr, int2type<0>) { return arr[0]; } template <typename T, size_t num> inline T product_impl(const T* arr, int2type<num>) { return arr[0] * product_impl(arr + 1, int2type < num - 1 > ()); } } template <size_t num, typename T> inline T array_product(const T* arr) { return product_impl::product_impl(arr, product_impl::int2type < num - 1 > ()); } template<typename T, typename R = T> inline R array_product(const T* arr, size_t size) { R prod = 1; for (size_t i = 0; i < size; ++i) { prod *= arr[i]; } return prod; } template <typename T, typename U> inline typename std::remove_reference<T>::type div_up(const T a, const U b) { assert(b); return (a + b - 1) / b; } template <typename T, typename U> inline typename std::remove_reference<T>::type rnd_up(const T a, const U b) { return div_up(a, b) * b; } template <typename T, typename U> inline typename std::remove_reference<T>::type rnd_dn(const T a, const U b) { return (a / b) * b; } template <typename T, typename U, typename V> inline U this_block_size(const T offset, const U max, const V block_size) { assert(offset < max); // TODO (Roma): can't use nstl::max() due to circular dependency... we // need to fix this const T block_boundary = offset + block_size; if (block_boundary > max) { return max - offset; } else { return block_size; } } template <typename Telem, size_t Tdims> struct array_offset_calculator { template <typename... Targs> array_offset_calculator(Telem* base, Targs... Fargs) : _dims{ Fargs... } { _base_ptr = base; } template <typename... Targs> inline Telem& operator()(Targs... Fargs) { return *(_base_ptr + _offset(1, Fargs...)); } private: template <typename... Targs> inline size_t _offset(size_t const dimension, size_t element) { return element; } template <typename... Targs> inline size_t _offset(size_t const dimension, size_t theta, size_t element) { return element + (_dims[dimension] * theta); } template <typename... Targs> inline size_t _offset(size_t const dimension, size_t theta, size_t element, Targs... Fargs) { size_t t_prime = element + (_dims[dimension] * theta); return _offset(dimension + 1, t_prime, Fargs...); } Telem* _base_ptr; const int _dims[Tdims]; }; }//fin utils namespace template<typename T> struct is_integral { static constexpr bool value = false; }; template<> struct is_integral<int32_t> { static constexpr bool value = true; }; template<> struct is_integral<int16_t> { static constexpr bool value = true; }; template<> struct is_integral<int8_t> { static constexpr bool value = true; }; template<> struct is_integral<uint8_t> { static constexpr bool value = true; }; template <typename data_t, typename acc_t> inline typename std::enable_if < !is_integral<data_t>::value, typename std::remove_reference<data_t>::type >::type saturate(const acc_t& x) { return x; } template <typename data_t, typename acc_t> inline typename std::enable_if<is_integral<data_t>::value, typename std::remove_reference<data_t>::type>::type saturate(const acc_t& x) { acc_t v = x; if (v < (acc_t)std::numeric_limits<data_t>::lowest()) { v = (acc_t)std::numeric_limits<data_t>::lowest(); } if (v > (acc_t)std::numeric_limits<data_t>::max()) { v = (acc_t)std::numeric_limits<data_t>::max(); } return (typename std::remove_reference<data_t>::type)v; } template <typename out_t> inline out_t round_and_saturate(float f, round_mode rmode) { switch (rmode) { case round_mode::nearest: f = nearbyintf(f); break; case round_mode::down: f = floorf(f); break; } return saturate<out_t>(f); } /* Quantization with beta == 0 */ template <typename in_t, typename out_t> struct qz_b0 { out_t operator()(in_t in, float alpha, round_mode rmode) { return round_and_saturate<out_t>(alpha * in, rmode); } }; inline size_t datatype_size(DataType data_type) { switch (data_type) { case AK_FLOAT: return sizeof(float); case AK_INT32: return sizeof(int32_t); case AK_HALF: return sizeof(int16_t); case AK_INT8: return sizeof(int8_t); case AK_UINT8: return sizeof(uint8_t); case AK_INVALID: default: assert(!"unknown data_type"); } return 0; } /** returns floor(log2(v)), aka the position of the leftmost non-0 bit */ inline int ilog2q(size_t v) { if (v == 0) { return -1; } int p = 0; # define CP(pw) do { if (v >= (1ull << pw)) { v >>= pw; p += pw; } } while(0) CP(32); CP(16); CP(8); CP(4); CP(2); CP(1); # undef CP return p; } struct scratchpad_t { virtual ~scratchpad_t() {} virtual char* get() const = 0; }; template <typename T, typename U> inline void balance2D(U nthr, U ithr, T ny, T& ny_start, T& ny_end, T nx, T& nx_start, T& nx_end, T nx_divider) { const T grp_size = utils::div_up(nthr, nx_divider); const T grp_count = utils::div_up(nthr, grp_size); T grp = ithr / grp_size; T grp_ithr = ithr % grp_size; T grp_nthr = grp_size; T first_grps = nthr % grp_count; if (first_grps > 0 && grp >= first_grps) { ithr -= first_grps * grp_size; grp_nthr--; grp = ithr / grp_nthr + first_grps; grp_ithr = ithr % grp_nthr; } balance211(nx, grp_count, grp, nx_start, nx_end); balance211(ny, grp_nthr, grp_ithr, ny_start, ny_end); } template <typename T, typename U, typename V> inline U this_block_size(const T offset, const U max, const V block_size) { assert(offset < max); // TODO (Roma): can't use nstl::max() due to circular dependency... we // need to fix this const T block_boundary = offset + block_size; if (block_boundary > max) { return max - offset; } else { return block_size; } } template <typename HostType> static void reorder_nhwc_nchw(const Tensor<HostType>& input, Tensor<HostType>& output) { int n_value = input.num(); int c_value = input.channel(); int h_value = input.height(); int w_value = input.width(); if (input.get_layout() == Layout_NHWC && output.get_layout() == Layout_NCHW) { if (input.get_dtype() == AK_INT8 && output.get_dtype() == AK_FLOAT) { float* output_ptr = static_cast<float*>(output.mutable_data()); CHECK(input.get_scale().size() >= 1); float scale = input.get_scale()[0]; const int8_t* input_ptr = static_cast<const int8_t*>(input.data()); for (int n = 0; n < n_value; ++n) { for (int c = 0; c < c_value; ++c) { for (int h = 0; h < h_value; ++h) { for (int w = 0; w < w_value; ++w) { int in_index = n * h_value * w_value * c_value + h * w_value * c_value + w * c_value + c; int out_index = n * c_value * h_value * w_value + c * h_value * w_value + h * w_value + w; output_ptr[out_index] = input_ptr[in_index] * scale; } } } } } else if (input.get_dtype() == AK_UINT8 && output.get_dtype() == AK_FLOAT) { LOG(INFO) << "print uint 8"; CHECK(input.get_scale().size() >= 1); float scale = (input.get_scale()[0]) * (127.f / 255.f); LOG(INFO) << "scale = " << scale; double sum = 0.0; double max = 0.0; const uint8_t* input_ptr = static_cast<const uint8_t*>(input.data()); float* output_ptr = static_cast<float*>(output.mutable_data()); for (int n = 0; n < n_value; ++n) { for (int c = 0; c < c_value; ++c) { for (int h = 0; h < h_value; ++h) { for (int w = 0; w < w_value; ++w) { int in_index = n * h_value * w_value * c_value + h * w_value * c_value + w * c_value + c; int out_index = n * c_value * h_value * w_value + c * h_value * w_value + h * w_value + w; output_ptr[out_index] = (float)input_ptr[in_index] * scale; sum += output_ptr[out_index]; max = output_ptr[out_index] > max ? output_ptr[out_index] : max; } } } } LOG(INFO) << "avg = " << (sum / input.valid_size()) << "," << max; } else if (input.get_dtype() == AK_UINT8 && output.get_dtype() == AK_UINT8) { LOG(INFO) << "reorder uint 8"; uint8_t* output_ptr = static_cast<uint8_t*>(output.mutable_data()); const uint8_t* input_ptr = static_cast<const uint8_t*>(input.data()); for (int n = 0; n < n_value; ++n) { for (int c = 0; c < c_value; ++c) { for (int h = 0; h < h_value; ++h) { for (int w = 0; w < w_value; ++w) { int in_index = n * h_value * w_value * c_value + h * w_value * c_value + w * c_value + c; int out_index = n * c_value * h_value * w_value + c * h_value * w_value + h * w_value + w; output_ptr[out_index] = input_ptr[in_index]; } } } } } else if (input.get_dtype() == AK_FLOAT && output.get_dtype() == AK_FLOAT) { const float* input_ptr = static_cast<const float*>(input.data()); float* output_ptr = static_cast<float*>(output.mutable_data()); for (int n = 0; n < n_value; ++n) { for (int c = 0; c < c_value; ++c) { for (int h = 0; h < h_value; ++h) { for (int w = 0; w < w_value; ++w) { int in_index = n * h_value * w_value * c_value + h * w_value * c_value + w * c_value + c; int out_index = n * c_value * h_value * w_value + c * h_value * w_value + h * w_value + w; output_ptr[out_index] = input_ptr[in_index]; } } } } } else { LOG(FATAL) << "not support input type " << input.get_dtype(); } } else if (input.get_layout() == Layout_NCHW && output.get_layout() == Layout_NHWC) { if (input.get_dtype() == AK_FLOAT && output.get_dtype() == AK_FLOAT) { float* output_ptr = static_cast<float*>(output.mutable_data()); const float* input_ptr = static_cast<const float*>(input.data()); for (int n = 0; n < n_value; ++n) { for (int c = 0; c < c_value; ++c) { for (int h = 0; h < h_value; ++h) { for (int w = 0; w < w_value; ++w) { int in_index = n * c_value * h_value * w_value + c * h_value * w_value + h * w_value + w; int out_index = n * h_value * w_value * c_value + h * w_value * c_value + w * c_value + c; output_ptr[out_index] = input_ptr[in_index]; } } } } } else if (input.get_dtype() == AK_UINT8 && output.get_dtype() == AK_UINT8) { uint8_t* output_ptr = static_cast<uint8_t*>(output.mutable_data()); const uint8_t* input_ptr = static_cast<const uint8_t*>(input.data()); for (int n = 0; n < n_value; ++n) { for (int c = 0; c < c_value; ++c) { for (int h = 0; h < h_value; ++h) { for (int w = 0; w < w_value; ++w) { int in_index = n * c_value * h_value * w_value + c * h_value * w_value + h * w_value + w; int out_index = n * h_value * w_value * c_value + h * w_value * c_value + w * c_value + c; output_ptr[out_index] = input_ptr[in_index]; } } } } } else if (input.get_dtype() == AK_INT8 && output.get_dtype() == AK_INT8) { int8_t* output_ptr = static_cast<int8_t*>(output.mutable_data()); const int8_t* input_ptr = static_cast<const int8_t*>(input.data()); for (int n = 0; n < n_value; ++n) { for (int c = 0; c < c_value; ++c) { for (int h = 0; h < h_value; ++h) { for (int w = 0; w < w_value; ++w) { int in_index = n * c_value * h_value * w_value + c * h_value * w_value + h * w_value + w; int out_index = n * h_value * w_value * c_value + h * w_value * c_value + w * c_value + c; output_ptr[out_index] = input_ptr[in_index]; } } } } } else if (input.get_dtype() == AK_FLOAT && output.get_dtype() == AK_INT8) { CHECK(output.get_scale().size() >= 1); float scale = 1.f / (output.get_scale()[0]); int8_t* output_ptr = static_cast<int8_t*>(output.mutable_data()); const float* input_ptr = static_cast<const float*>(input.data()); for (int n = 0; n < n_value; ++n) { for (int c = 0; c < c_value; ++c) { for (int h = 0; h < h_value; ++h) { for (int w = 0; w < w_value; ++w) { int in_index = n * c_value * h_value * w_value + c * h_value * w_value + h * w_value + w; int out_index = n * h_value * w_value * c_value + h * w_value * c_value + w * c_value + c; output_ptr[out_index] = saturate<int8_t>(roundf(input_ptr[in_index] * scale)); } } } } } else if (input.get_dtype() == AK_FLOAT && output.get_dtype() == AK_UINT8) { CHECK(output.get_scale().size() >= 1); float scale = 1.f / (output.get_scale()[0]* (127.f / 255.f)); uint8_t* output_ptr = static_cast<uint8_t*>(output.mutable_data()); const float* input_ptr = static_cast<const float*>(input.data()); for (int n = 0; n < n_value; ++n) { for (int c = 0; c < c_value; ++c) { for (int h = 0; h < h_value; ++h) { for (int w = 0; w < w_value; ++w) { int in_index = n * c_value * h_value * w_value + c * h_value * w_value + h * w_value + w; int out_index = n * h_value * w_value * c_value + h * w_value * c_value + w * c_value + c; output_ptr[out_index] = saturate<uint8_t>(roundf(input_ptr[in_index] * scale)); } } } } }else { LOG(FATAL) << "not support in/ou type " << input.get_dtype() << "," << output.get_dtype(); } } else { LOG(FATAL) << "not support layout " << input.get_layout() << "," << output.get_layout(); } } template <typename HostType> static void reorder_nchwc_nchw(Tensor<HostType>& input, Tensor<HostType>& output) { if (input.valid_shape() == output.valid_shape()) { output.copy_from(input); return; } CHECK_EQ(input.get_dtype(), AK_FLOAT) << "only support float type"; LayoutType in_layout = input.get_layout(); LayoutType out_layout = output.get_layout(); bool is_nchwc_nchw = (in_layout == Layout_NCHW_C16R || in_layout == Layout_NCHW_C8R) && (out_layout == Layout_NCHW); bool is_nchw_nchwc = (out_layout == Layout_NCHW_C16R || out_layout == Layout_NCHW_C8R) && (in_layout == Layout_NCHW); CHECK(is_nchw_nchwc || is_nchwc_nchw) << "not support " << input.get_layout(); if (is_nchwc_nchw) { Shape shape = output.valid_shape(); int n_value = shape[0]; int c_value = shape[1]; int h_value = shape[2]; int w_value = shape[3]; Shape shape_input = input.valid_shape(); int aligned_length = shape_input.get_layout_aligned_length(); CHECK_GT(aligned_length, 0) << "input aligned should > 0"; int c_round_divk = shape_input[1]; c_round_divk = (shape_input.channel() + aligned_length - 1) / aligned_length; float* output_ptr = static_cast<float*>(output.mutable_data()); const float* input_ptr = static_cast<const float*>(input.data()); #pragma omp parallel for collapse(4) schedule(static) for (int n = 0; n < n_value; ++n) { for (int c = 0; c < c_value; ++c) { for (int h = 0; h < h_value; ++h) { //#pragma ivdep for (int w = 0; w < w_value; ++w) { int round_c = c / aligned_length; int remainder_c = c % aligned_length; int input_idx = n * c_round_divk * h_value * w_value * aligned_length + round_c * h_value * w_value * aligned_length + h * w_value * aligned_length + w * aligned_length + remainder_c; int output_idx = n * c_value * h_value * w_value + c * h_value * w_value + h * w_value + w ; *(output_ptr + output_idx) = input_ptr[input_idx]; } } } } } else if (is_nchw_nchwc) { Shape shape = input.valid_shape(); int n_value = shape[0], c_value = shape[1], h_value = shape[2], w_value = shape[3]; int aligned_length = output.valid_shape().get_layout_aligned_length(); CHECK_GT(aligned_length, 0) << "input aligned should > 0"; int c_round_divk = (c_value + aligned_length - 1) / aligned_length; float* output_ptr = static_cast<float*>(output.mutable_data()); const float* input_ptr = static_cast<const float*>(input.data()); #pragma omp parallel for collapse(5) schedule(static) for (int n = 0; n < n_value; ++n) { for (int c_idx = 0; c_idx < c_round_divk; ++c_idx) { for (int h = 0; h < h_value; ++h) { for (int w = 0; w < w_value; ++w) { for (int c = 0; c < aligned_length; ++c) { int input_idx = n * c_value * h_value * w_value + (c_idx * aligned_length + c) * h_value * w_value + h * w_value + w; int output_idx = n * c_round_divk * h_value * w_value * aligned_length + c_idx * h_value * w_value * aligned_length + h * w_value * aligned_length + w * aligned_length + c; *(output_ptr + output_idx) = ((c_idx * aligned_length + c) < c_value) ? * (input_ptr + input_idx) : 0; } } } } } } else { LOG(FATAL) << "not support this shape"; } } template <typename HostType> static void reorder_nchwc8_nchw(Tensor<HostType>& input, Tensor<HostType>& output) { CHECK_EQ(input.get_dtype(), AK_FLOAT) << "only support float type"; Shape shape = output.valid_shape(); int n_value = shape[0]; int c_value = shape[1]; int h_value = shape[2]; int w_value = shape[3]; Shape shape_input = input.valid_shape(); int c_round_div8 = shape_input[1]; if (input.get_layout() == Layout_NCHW_C8R) { c_round_div8 = (shape_input.channel() + 7) / 8; } float* output_ptr = static_cast<float*>(output.mutable_data()); const float* input_ptr = static_cast<const float*>(input.data()); #pragma omp parallel for collapse(4) schedule(static) for (int n = 0; n < n_value; ++n) { for (int c = 0; c < c_value; ++c) { for (int h = 0; h < h_value; ++h) { //#pragma ivdep for (int w = 0; w < w_value; ++w) { int round_c = c / 8; int remainder_c = c % 8; int input_idx = n * c_round_div8 * h_value * w_value * 8 + round_c * h_value * w_value * 8 + h * w_value * 8 + w * 8 + remainder_c; int output_idx = n * c_value * h_value * w_value + c * h_value * w_value + h * w_value + w ; *(output_ptr + output_idx) = input_ptr[input_idx]; } } } } } template <typename HOST_TYPE> inline void calibrate_int8c4_to_fp32_host(Tensor<HOST_TYPE>& host_tensor, const Tensor <HOST_TYPE>& int8_tensor) { CHECK_EQ(host_tensor.get_dtype(), AK_FLOAT); CHECK_EQ(host_tensor.get_layout(), Layout_NCHW); CHECK_EQ(int8_tensor.get_dtype(), AK_INT8); CHECK_EQ(int8_tensor.get_layout(), Layout_NCHW_C4); CHECK_EQ(host_tensor.valid_size(), int8_tensor.valid_size()); CHECK_GE(int8_tensor.get_scale().size(), 1); Shape out_stride = host_tensor.get_stride(); Shape in_shape = int8_tensor.valid_shape(); Shape out_shape = host_tensor.valid_shape(); int valid_width = in_shape.width(); int valid_height = in_shape.height(); int valid_channel_4 = in_shape.channel() / 4; int valid_num = in_shape.num(); int in_n_stride = in_shape[1] * in_shape[2] * in_shape[3] / 4; int in_c_stride = in_shape[2] * in_shape[3]; int in_h_stride = in_shape[3]; int in_w_stride = 1; int count = in_shape[0] * in_shape[1] * in_shape[2] * in_shape[3] / 4; const char* in_data = (const char*)int8_tensor.data(); float* out_data = (float*)host_tensor.mutable_data(); float scale = int8_tensor.get_scale()[0]; for (int gid = 0; gid < count; ++ gid) { float load0, load1, load2, load3; int read_w = (gid) % valid_width; int read_h = (gid / (in_h_stride)) % valid_height; int read_c = (gid / (in_c_stride)) % valid_channel_4; int read_n = (gid / (in_n_stride)) % valid_num; int in_offset = read_n * in_n_stride + read_c * in_c_stride + read_h * in_h_stride + read_w; int out_offset = read_n * out_stride[0] + read_c * (out_stride[1] << 2) + read_h * out_stride[2] + read_w * out_stride[3]; if (gid < count) { char readin0 = in_data[4 * in_offset + 0]; char readin1 = in_data[4 * in_offset + 1]; char readin2 = in_data[4 * in_offset + 2]; char readin3 = in_data[4 * in_offset + 3]; load0 = static_cast<float>(readin0); load1 = static_cast<float>(readin1); load2 = static_cast<float>(readin2); load3 = static_cast<float>(readin3); out_data[out_offset] = load0 * scale; out_offset += out_stride[1]; out_data[out_offset] = load1 * scale; out_offset += out_stride[1]; out_data[out_offset] = load2 * scale; out_offset += out_stride[1]; out_data[out_offset] = load3 * scale; } } } } } #endif //ANAKIN_SABER_FUNCS_IMPL_CUDA_SABER_UTIL_H
GB_binop__gt_int64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_mkl.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__gt_int64 // A.*B function (eWiseMult): GB_AemultB__gt_int64 // A*D function (colscale): GB_AxD__gt_int64 // D*A function (rowscale): GB_DxB__gt_int64 // C+=B function (dense accum): GB_Cdense_accumB__gt_int64 // C+=b function (dense accum): GB_Cdense_accumb__gt_int64 // C+=A+B function (dense ewise3): (none) // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__gt_int64 // C=scalar+B GB_bind1st__gt_int64 // C=scalar+B' GB_bind1st_tran__gt_int64 // C=A+scalar GB_bind2nd__gt_int64 // C=A'+scalar GB_bind2nd_tran__gt_int64 // C type: bool // A type: int64_t // B,b type: int64_t // BinaryOp: cij = (aij > bij) #define GB_ATYPE \ int64_t #define GB_BTYPE \ int64_t #define GB_CTYPE \ bool // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 0 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 0 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int64_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ int64_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ bool t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y) \ z = (x > y) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_GT || GxB_NO_INT64 || GxB_NO_GT_INT64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void (none) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__gt_int64 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__gt_int64 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { #include "GB_dense_subassign_23_template.c" } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__gt_int64 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { // get the scalar b for C += b, of type int64_t int64_t bwork = (*((int64_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_AxD__gt_int64 ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *GB_RESTRICT Cx = (bool *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_DxB__gt_int64 ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *GB_RESTRICT Cx = (bool *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB_AaddB__gt_int64 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_add_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__gt_int64 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__gt_int64 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *Cx = (bool *) Cx_output ; int64_t x = (*((int64_t *) x_input)) ; int64_t *Bx = (int64_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int64_t bij = Bx [p] ; Cx [p] = (x > bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__gt_int64 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; bool *Cx = (bool *) Cx_output ; int64_t *Ax = (int64_t *) Ax_input ; int64_t y = (*((int64_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int64_t aij = Ax [p] ; Cx [p] = (aij > y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int64_t aij = Ax [pA] ; \ Cx [pC] = (x > aij) ; \ } GrB_Info GB_bind1st_tran__gt_int64 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int64_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t x = (*((const int64_t *) x_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int64_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int64_t aij = Ax [pA] ; \ Cx [pC] = (aij > y) ; \ } GrB_Info GB_bind2nd_tran__gt_int64 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t y = (*((const int64_t *) y_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
sub_model_part_skin_detection_process.h
// | / | // ' / __| _` | __| _ \ __| // . \ | ( | | ( |\__ ` // _|\_\_| \__,_|\__|\___/ ____/ // Multi-Physics // // License: BSD License // Kratos default license: kratos/license.txt // // Main authors: Jordi Cotela // #if !defined(KRATOS_SUB_MODEL_PART_SKIN_DETECTION_PROCESS_H_INCLUDED) #define KRATOS_SUB_MODEL_PART_SKIN_DETECTION_PROCESS_H_INCLUDED // System includes #include <string> #include <iostream> // Project includes #include "skin_detection_process.h" namespace Kratos { ///@addtogroup KratosCore ///@{ ///@name Kratos Classes ///@{ /// Create a SubModelPart covering a part of the outside skin of the computation domain where a condition is met. /** For example, create the outer skin for the part of the domain belonging to a given SubModelPart. */ template<SizeType TDim> class KRATOS_API(KRATOS_CORE) SubModelPartSkinDetectionProcess: public SkinDetectionProcess<TDim> { KRATOS_DEFINE_LOCAL_FLAG( NODE_SELECTED ); // Internal class used to select which faces to create. class FaceSelector { public: KRATOS_CLASS_POINTER_DEFINITION(FaceSelector); virtual void Prepare(ModelPart& rMainModelPart) const = 0; virtual bool IsSelected(const Geometry<Node<3>>::PointsArrayType&) const = 0; }; // Select faces where all nodes belong to given SubModelPart. class SelectIfAllNodesOnSubModelPart: public FaceSelector { std::string mName; public: SelectIfAllNodesOnSubModelPart(const std::string& rName): mName(rName) {} void Prepare(ModelPart& rMainModelPart) const override { ModelPart& r_model_part = rMainModelPart.GetSubModelPart(mName); auto node_begin = r_model_part.NodesBegin(); const int num_nodes = r_model_part.NumberOfNodes(); #pragma omp parallel for for (int k = 0; k < num_nodes; k++) { (node_begin+k)->Set(SubModelPartSkinDetectionProcess::NODE_SELECTED); } } bool IsSelected(const Geometry<Node<3>>::PointsArrayType& rNodes) const override { bool select = true; for (auto i_node = rNodes.begin(); i_node != rNodes.end(); ++i_node) { select &= i_node->Is(SubModelPartSkinDetectionProcess::NODE_SELECTED); } return select; } }; public: ///@name Type Definitions ///@{ /// Pointer definition of SubModelPartSkinDetectionProcess KRATOS_CLASS_POINTER_DEFINITION(SubModelPartSkinDetectionProcess); using typename SkinDetectionProcess<TDim>::HashMapVectorIntType; using typename SkinDetectionProcess<TDim>::HashMapVectorIntIdsType; using typename SkinDetectionProcess<TDim>::VectorIndexType; using ConditionCheckType = bool(const Geometry<Node<3>>::PointsArrayType&); ///@} ///@name Life Cycle ///@{ /// Constructor SubModelPartSkinDetectionProcess(ModelPart& rModelPart, Parameters Settings); /// Deleted default constructor. SubModelPartSkinDetectionProcess() = delete; /// Deleted copy constructor. SubModelPartSkinDetectionProcess(SubModelPartSkinDetectionProcess const &rOther) = delete; /// Destructor. ~SubModelPartSkinDetectionProcess() override = default; ///@} ///@name Operators ///@{ /// Deleted sssignment operator. SubModelPartSkinDetectionProcess &operator=(SubModelPartSkinDetectionProcess const &rOther) = delete; ///@} ///@name Operations ///@{ void Execute() override; ///@} ///@name Input and output ///@{ std::string Info() const override { return "SkinDetectionProcess"; } /// Print information about this object. void PrintInfo(std::ostream& rOStream) const override { rOStream << "SkinDetectionProcess"; } /// Print object's data. void PrintData(std::ostream& rOStream) const override { } ///@} protected: ///@name Protected Operations ///@{ void CreateConditions( ModelPart& rMainModelPart, ModelPart& rSkinModelPart, HashMapVectorIntType& rInverseFaceMap, HashMapVectorIntIdsType& rPropertiesFaceMap, std::unordered_set<IndexType>& rNodesInTheSkin, const std::string& rConditionName) const override; Parameters GetDefaultSettings() const override; ///@} private: ///@name Member Variables ///@{ typename FaceSelector::Pointer mpFaceSelector; ///@} ///@name Private Operations ///@{ static bool FaceIsNeeded(const Geometry<Node<3>>::PointsArrayType&) { return true; } ///@} }; // Class SubModelPartSkinDetectionProcess ///@} ///@name Input and output ///@{ /// input stream function template<SizeType TDim> inline std::istream &operator>>(std::istream &rIStream, SubModelPartSkinDetectionProcess<TDim> &rThis); /// output stream function template<SizeType TDim> inline std::ostream &operator<<(std::ostream &rOStream, const SubModelPartSkinDetectionProcess<TDim> &rThis) { rThis.PrintInfo(rOStream); rOStream << std::endl; rThis.PrintData(rOStream); return rOStream; } ///@} ///@} } // namespace Kratos. #endif // KRATOS_SUB_MODEL_PART_SKIN_DETECTION_PROCESS_H_INCLUDED defined
target-10.c
/* { dg-do run } */ #pragma omp declare target extern int v; #pragma omp end declare target int v; int main () { #pragma omp target update to(v) return 0; }
geeksforgeeks.c
// OpenMP header #include <omp.h> #include <stdio.h> #include <stdlib.h> int main(int argc, char* argv[]) { int nthreads, tid; // Begin of parallel region #pragma omp parallel private(nthreads, tid) { // Getting thread number tid = omp_get_thread_num(); printf("Welcome to GFG from thread = %d\n", tid); if (tid == 0) { // Only master thread does this nthreads = omp_get_num_threads(); printf("Number of threads = %d\n", nthreads); } } }
page_migration_test.c
#include "page_migration_test.h" struct numa_node_bw * numa_node_list = NULL; struct numa_node_bw * numa_list_head = NULL; int mem_types; int max_node; int numt; int total_numa_nodes = 0; int * numa_node_ids; struct bitmask * numa_nodes; char ** mem_tech; long double * means; int * cluster_sizes; char classes[3][8] = {"fast", "slow", "slowest"}; char * cpu_range; long double ** tr_map; void label_mem(){ struct numa_node_bw * bw_it = numa_list_head; struct numa_node_bw * next_bw_it = bw_it->next; int i = 0; bw_it->mem_type = classes[i]; while(next_bw_it != NULL){ long double diff = bw_it->wr_only_avg - next_bw_it->wr_only_avg; long double perct = 0.2*bw_it->wr_only_avg; if((diff > perct)&&((i+1)<3)){ i++; } next_bw_it->mem_type = classes[i]; bw_it = next_bw_it; next_bw_it= bw_it->next; } } void sort_list(struct numa_node_bw * new_node){ struct numa_node_bw * bw_it = numa_list_head; struct numa_node_bw * prev_bw_it = NULL; while(bw_it != NULL){ if((bw_it->owtr_avg < new_node->owtr_avg)){ if(prev_bw_it == NULL){ new_node->next = bw_it; numa_list_head = new_node; }else{ prev_bw_it->next = new_node; new_node->next = bw_it; } return; } prev_bw_it = bw_it; bw_it = bw_it->next; } prev_bw_it->next = new_node; return; } void write_config_file(){ FILE * conf; char fname[50]; strcpy(fname, "numa_class"); conf = fopen(fname, "w"); struct numa_node_bw * bw_it = numa_list_head; printf("CPU ID\tSRC\tDEST\tPgMigration(Mb/s)\n"); int n = 0; int m = 0; for(n=0; n < total_numa_nodes; n++){ for(m=0; m < total_numa_nodes; m++){ if(m!=n) printf("%s\t%d\t%d\t%Lf\n",cpu_range, n, m, tr_map[n][m]); } } while(bw_it != NULL){ fprintf(conf, "%d %s %Lf\n", bw_it->numa_id, bw_it->mem_type, bw_it->owtr_avg); // printf("%s\t%d\t%s\t%LF\t%Lf\n", cpu_range, bw_it->numa_id, bw_it->mem_type, bw_it->wr_only_avg, bw_it->owtr_avg); bw_it = bw_it->next; } fclose(conf); } void page_migration(int argc, char ** argv){ cpu_range=argv[1]; max_node = numa_max_node() + 1; int cpu_count = numa_num_possible_cpus(); numa_node_ids = (int*)malloc(sizeof(int)*max_node); struct bitmask * numa_nodes = numa_get_membind(); int i = 0; unsigned long nodeMask; while(i < numa_nodes->size){ if(numa_bitmask_isbitset(numa_nodes, i)){ numa_node_ids[total_numa_nodes] = i; total_numa_nodes++; } i++; } //long double **tr_map; tr_map = (long double**)malloc(total_numa_nodes*sizeof(long double*)); i = 0; while(i < total_numa_nodes){ tr_map[i] = (long double*)malloc(total_numa_nodes*sizeof(long double)); i++; } for(int p=0; p < total_numa_nodes; p++) for(int q=0; q < total_numa_nodes; q++) tr_map[p][q] = 0.0; int mbs = 64; size_t size = mbs*1024*1024; int r_size = 16*32768; int c_size = 16*32768; double *a, *b, *c; clock_t start, end; struct timespec begin, stop; srand(clock()); //sleep(5); i = 0; while(i < total_numa_nodes){ int iters = 0; int stride; long double wr_only_avg=0.0; long double owtr_avg=0.0; long double accum; for( iters = 0; iters < 10; iters++){ int j = 0; int k = 0; a = (double*)numa_alloc_onnode(size, numa_node_ids[i]); b = (double*)numa_alloc_onnode(size, numa_node_ids[i]); c = (double*)numa_alloc_onnode(size, numa_node_ids[i]); // printf("Before Move Iter: %d A: %x, B: %x, C:%x\n",iters,a,b,c); long double empty=0.0; long double empty2=0.0; redo1: clock_gettime( CLOCK_MONOTONIC, &begin); #pragma omp parallel for for(j = 0;j < (size/sizeof(double));j++){ a[j] = 1.0; b[j] = 2.0; c[j] = 3.0; } clock_gettime( CLOCK_MONOTONIC, &stop); accum = ( stop.tv_sec - begin.tv_sec ) + (long double)( stop.tv_nsec - begin.tv_nsec ) / (long double)BILLION; if(accum <= empty){ goto redo1; } wr_only_avg += ((8*size*1.0E-06)/(long double)(accum - empty)); redo3: /*#pragma omp parallel for for(j =0; j < (size/sizeof(double)); j++){ a[j] = c[j] + b[j]; }*/ nodeMask = 1UL << numa_node_ids[i]; j = 0; while(j < total_numa_nodes){ // printf("Iter: %d NM: %ld\n", iters, nodeMask); if(j != numa_node_ids[i]){ clock_gettime( CLOCK_MONOTONIC, &begin); mbind(a, size, MPOL_BIND, (const)nodeMask/*numa_nodes->maskp*/, numa_nodes->size, MPOL_MF_MOVE); mbind(b, size, MPOL_BIND, (const)nodeMask/*numa_nodes->maskp*/, numa_nodes->size, MPOL_MF_MOVE); mbind(c, size, MPOL_BIND, (const)nodeMask/*numa_nodes->maskp*/, numa_nodes->size, MPOL_MF_MOVE); clock_gettime( CLOCK_MONOTONIC, &stop); accum = ( stop.tv_sec - begin.tv_sec ) + (long double)( stop.tv_nsec - begin.tv_nsec ) / (long double)BILLION; if(accum <= empty){ goto redo3; } tr_map[i][j] += ((3*size*1.0E-06)/(long double)(accum - empty)); // printf("%d %d %LF\n",i,j, ((3*size*1.0E-06)/(long double)(accum - empty))); } j++; nodeMask = 1UL << numa_node_ids[(i+j)%total_numa_nodes]; } // owtr_avg += ((3*size*1.0E-06)/(long double)(accum - empty)); //printf("After Move Iter: %d A: %x, B: %x, C:%x\n",iters,a,b,c); numa_free(a, size); numa_free(b, size); numa_free(c, size); } //int n = 0; int m = 0; //for(n=0; n < total_numa_nodes; n++){ for(m=0; m < total_numa_nodes; m++){ tr_map[i][m]/=10; } //} struct numa_node_bw * node_bw = (struct numa_node_bw *)malloc(sizeof(struct numa_node_bw)); node_bw->numa_id = numa_node_ids[i]; node_bw->wr_only_avg = wr_only_avg/10; node_bw->owtr_avg= owtr_avg/10; node_bw->next = NULL; if(numa_node_list == NULL){ numa_node_list = node_bw; numa_list_head = numa_node_list; } else{ sort_list(node_bw); } i++; } label_mem(); write_config_file(); i=0; while(i < total_numa_nodes) { free(tr_map[i]); i++; } free(tr_map); }
lattice.c
/******************************************************************************* ** Lattice functions in C to improve performance *******************************************************************************/ #define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION #include <Python.h> // includes stdio.h, string.h, errno.h, stdlib.h #include <numpy/arrayobject.h> #include <math.h> #include "visclibs/array_utils.h" #if PY_MAJOR_VERSION >= 3 #define MOD_ERROR_VAL NULL #define MOD_SUCCESS_VAL(val) val #define MOD_INIT(name) PyMODINIT_FUNC PyInit_##name(void) #define MOD_DEF(ob, name, doc, methods) \ static struct PyModuleDef moduledef = { \ PyModuleDef_HEAD_INIT, name, doc, -1, methods, }; \ ob = PyModule_Create(&moduledef); #else #define MOD_ERROR_VAL #define MOD_SUCCESS_VAL(val) #define MOD_INIT(name) void init##name(void) #define MOD_DEF(ob, name, doc, methods) \ ob = Py_InitModule3(name, methods, doc); #endif static PyObject* wrapAtoms(PyObject*, PyObject*); /******************************************************************************* ** List of python methods available in this module *******************************************************************************/ static struct PyMethodDef module_methods[] = { {"wrapAtoms", wrapAtoms, METH_VARARGS, "Wrap atoms that have left the periodic cell"}, {NULL, NULL, 0, NULL} }; /******************************************************************************* ** Module initialisation function *******************************************************************************/ MOD_INIT(_lattice) { PyObject *mod; MOD_DEF(mod, "_lattice", "Lattice C extension", module_methods) if (mod == NULL) return MOD_ERROR_VAL; import_array(); return MOD_SUCCESS_VAL(mod); } static PyObject* wrapAtoms(PyObject *self, PyObject *args) { int numAtoms; PyObject *result = NULL; PyArrayObject *pos = NULL; PyArrayObject *cellDims = NULL; PyArrayObject *pbc = NULL; /* parse and check arguments from Python */ if (PyArg_ParseTuple(args, "iO!O!O!", &numAtoms, &PyArray_Type, &pos, &PyArray_Type, &cellDims, &PyArray_Type, &pbc)) { int i; #pragma omp parallel for for (i = 0; i < numAtoms; i++) { int i3 = 3 * i; if (IIND1(pbc, 0)) DIND1(pos, i3 ) = DIND1(pos, i3 ) - floor(DIND1(pos, i3 ) / DIND1(cellDims, 0)) * DIND1(cellDims, 0); if (IIND1(pbc, 1)) DIND1(pos, i3 + 1) = DIND1(pos, i3 + 1) - floor(DIND1(pos, i3 + 1) / DIND1(cellDims, 1)) * DIND1(cellDims, 1); if (IIND1(pbc, 2)) DIND1(pos, i3 + 2) = DIND1(pos, i3 + 2) - floor(DIND1(pos, i3 + 2) / DIND1(cellDims, 2)) * DIND1(cellDims, 2); } Py_INCREF(Py_None); result = Py_None; } return result; }
omp_target_config.h
#ifndef OMP_TARGET_CONFIG_H_ #define OMP_TARGET_CONFIG_H_ static void warm_up() { int i, n = 8; int *x, *y, *z; x = (int *) malloc(n * sizeof(int)); y = (int *) malloc(n * sizeof(int)); z = (int *) malloc(n * sizeof(int)); for (i = 0; i != n; i++) { x[i] = 1; y[i] = 1; z[i] = 0; } #pragma omp target map(from:z[0:n]) map(to:y[0:n],x[0:n]) #pragma omp parallel for for (i = 0; i < 8; ++i) z[i] = x[i] + y[i]; } #endif
GB_unaryop__identity_int32_fp64.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__identity_int32_fp64 // op(A') function: GB_tran__identity_int32_fp64 // C type: int32_t // A type: double // cast: int32_t cij ; GB_CAST_SIGNED(cij,aij,32) // unaryop: cij = aij #define GB_ATYPE \ double #define GB_CTYPE \ int32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ double aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CASTING(z, x) \ int32_t z ; GB_CAST_SIGNED(z,x,32) ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_INT32 || GxB_NO_FP64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__identity_int32_fp64 ( int32_t *restrict Cx, const double *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__identity_int32_fp64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif